xref: /linux/mm/slab.h (revision 1f9f78b1b376f82cdd8ed73cc0abdb74d0453d43)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
297d06609SChristoph Lameter #ifndef MM_SLAB_H
397d06609SChristoph Lameter #define MM_SLAB_H
497d06609SChristoph Lameter /*
597d06609SChristoph Lameter  * Internal slab definitions
697d06609SChristoph Lameter  */
797d06609SChristoph Lameter 
807f361b2SJoonsoo Kim #ifdef CONFIG_SLOB
907f361b2SJoonsoo Kim /*
1007f361b2SJoonsoo Kim  * Common fields provided in kmem_cache by all slab allocators
1107f361b2SJoonsoo Kim  * This struct is either used directly by the allocator (SLOB)
1207f361b2SJoonsoo Kim  * or the allocator must include definitions for all fields
1307f361b2SJoonsoo Kim  * provided in kmem_cache_common in their definition of kmem_cache.
1407f361b2SJoonsoo Kim  *
1507f361b2SJoonsoo Kim  * Once we can do anonymous structs (C11 standard) we could put a
1607f361b2SJoonsoo Kim  * anonymous struct definition in these allocators so that the
1707f361b2SJoonsoo Kim  * separate allocations in the kmem_cache structure of SLAB and
1807f361b2SJoonsoo Kim  * SLUB is no longer needed.
1907f361b2SJoonsoo Kim  */
2007f361b2SJoonsoo Kim struct kmem_cache {
2107f361b2SJoonsoo Kim 	unsigned int object_size;/* The original size of the object */
2207f361b2SJoonsoo Kim 	unsigned int size;	/* The aligned/padded/added on size  */
2307f361b2SJoonsoo Kim 	unsigned int align;	/* Alignment as calculated */
24d50112edSAlexey Dobriyan 	slab_flags_t flags;	/* Active flags on the slab */
257bbdb81eSAlexey Dobriyan 	unsigned int useroffset;/* Usercopy region offset */
267bbdb81eSAlexey Dobriyan 	unsigned int usersize;	/* Usercopy region size */
2707f361b2SJoonsoo Kim 	const char *name;	/* Slab name for sysfs */
2807f361b2SJoonsoo Kim 	int refcount;		/* Use counter */
2907f361b2SJoonsoo Kim 	void (*ctor)(void *);	/* Called on object slot creation */
3007f361b2SJoonsoo Kim 	struct list_head list;	/* List of all slab caches on the system */
3107f361b2SJoonsoo Kim };
3207f361b2SJoonsoo Kim 
3307f361b2SJoonsoo Kim #endif /* CONFIG_SLOB */
3407f361b2SJoonsoo Kim 
3507f361b2SJoonsoo Kim #ifdef CONFIG_SLAB
3607f361b2SJoonsoo Kim #include <linux/slab_def.h>
3707f361b2SJoonsoo Kim #endif
3807f361b2SJoonsoo Kim 
3907f361b2SJoonsoo Kim #ifdef CONFIG_SLUB
4007f361b2SJoonsoo Kim #include <linux/slub_def.h>
4107f361b2SJoonsoo Kim #endif
4207f361b2SJoonsoo Kim 
4307f361b2SJoonsoo Kim #include <linux/memcontrol.h>
4411c7aec2SJesper Dangaard Brouer #include <linux/fault-inject.h>
4511c7aec2SJesper Dangaard Brouer #include <linux/kasan.h>
4611c7aec2SJesper Dangaard Brouer #include <linux/kmemleak.h>
477c00fce9SThomas Garnier #include <linux/random.h>
48d92a8cfcSPeter Zijlstra #include <linux/sched/mm.h>
4907f361b2SJoonsoo Kim 
5097d06609SChristoph Lameter /*
5197d06609SChristoph Lameter  * State of the slab allocator.
5297d06609SChristoph Lameter  *
5397d06609SChristoph Lameter  * This is used to describe the states of the allocator during bootup.
5497d06609SChristoph Lameter  * Allocators use this to gradually bootstrap themselves. Most allocators
5597d06609SChristoph Lameter  * have the problem that the structures used for managing slab caches are
5697d06609SChristoph Lameter  * allocated from slab caches themselves.
5797d06609SChristoph Lameter  */
5897d06609SChristoph Lameter enum slab_state {
5997d06609SChristoph Lameter 	DOWN,			/* No slab functionality yet */
6097d06609SChristoph Lameter 	PARTIAL,		/* SLUB: kmem_cache_node available */
61ce8eb6c4SChristoph Lameter 	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
6297d06609SChristoph Lameter 	UP,			/* Slab caches usable but not all extras yet */
6397d06609SChristoph Lameter 	FULL			/* Everything is working */
6497d06609SChristoph Lameter };
6597d06609SChristoph Lameter 
6697d06609SChristoph Lameter extern enum slab_state slab_state;
6797d06609SChristoph Lameter 
6818004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */
6918004c5dSChristoph Lameter extern struct mutex slab_mutex;
709b030cb8SChristoph Lameter 
719b030cb8SChristoph Lameter /* The list of all slab caches on the system */
7218004c5dSChristoph Lameter extern struct list_head slab_caches;
7318004c5dSChristoph Lameter 
749b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */
759b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache;
769b030cb8SChristoph Lameter 
77af3b5f87SVlastimil Babka /* A table of kmalloc cache names and sizes */
78af3b5f87SVlastimil Babka extern const struct kmalloc_info_struct {
79cb5d9fb3SPengfei Li 	const char *name[NR_KMALLOC_TYPES];
8055de8b9cSAlexey Dobriyan 	unsigned int size;
81af3b5f87SVlastimil Babka } kmalloc_info[];
82af3b5f87SVlastimil Babka 
83f97d5f63SChristoph Lameter #ifndef CONFIG_SLOB
84f97d5f63SChristoph Lameter /* Kmalloc array related functions */
8534cc6990SDaniel Sanders void setup_kmalloc_cache_index_table(void);
86d50112edSAlexey Dobriyan void create_kmalloc_caches(slab_flags_t);
872c59dd65SChristoph Lameter 
882c59dd65SChristoph Lameter /* Find the kmalloc slab corresponding for a certain size */
892c59dd65SChristoph Lameter struct kmem_cache *kmalloc_slab(size_t, gfp_t);
90f97d5f63SChristoph Lameter #endif
91f97d5f63SChristoph Lameter 
9244405099SLong Li gfp_t kmalloc_fix_flags(gfp_t flags);
93f97d5f63SChristoph Lameter 
949b030cb8SChristoph Lameter /* Functions provided by the slab allocators */
95d50112edSAlexey Dobriyan int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
9697d06609SChristoph Lameter 
9755de8b9cSAlexey Dobriyan struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
9855de8b9cSAlexey Dobriyan 			slab_flags_t flags, unsigned int useroffset,
9955de8b9cSAlexey Dobriyan 			unsigned int usersize);
10045530c44SChristoph Lameter extern void create_boot_cache(struct kmem_cache *, const char *name,
101361d575eSAlexey Dobriyan 			unsigned int size, slab_flags_t flags,
102361d575eSAlexey Dobriyan 			unsigned int useroffset, unsigned int usersize);
10345530c44SChristoph Lameter 
104423c929cSJoonsoo Kim int slab_unmergeable(struct kmem_cache *s);
105f4957d5bSAlexey Dobriyan struct kmem_cache *find_mergeable(unsigned size, unsigned align,
106d50112edSAlexey Dobriyan 		slab_flags_t flags, const char *name, void (*ctor)(void *));
10712220deaSJoonsoo Kim #ifndef CONFIG_SLOB
1082633d7a0SGlauber Costa struct kmem_cache *
109f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
110d50112edSAlexey Dobriyan 		   slab_flags_t flags, void (*ctor)(void *));
111423c929cSJoonsoo Kim 
1120293d1fdSAlexey Dobriyan slab_flags_t kmem_cache_flags(unsigned int object_size,
11337540008SNikolay Borisov 	slab_flags_t flags, const char *name);
114cbb79694SChristoph Lameter #else
1152633d7a0SGlauber Costa static inline struct kmem_cache *
116f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
117d50112edSAlexey Dobriyan 		   slab_flags_t flags, void (*ctor)(void *))
118cbb79694SChristoph Lameter { return NULL; }
119423c929cSJoonsoo Kim 
1200293d1fdSAlexey Dobriyan static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
12137540008SNikolay Borisov 	slab_flags_t flags, const char *name)
122423c929cSJoonsoo Kim {
123423c929cSJoonsoo Kim 	return flags;
124423c929cSJoonsoo Kim }
125cbb79694SChristoph Lameter #endif
126cbb79694SChristoph Lameter 
127cbb79694SChristoph Lameter 
128d8843922SGlauber Costa /* Legal flag mask for kmem_cache_create(), for various configurations */
1296d6ea1e9SNicolas Boichat #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
1306d6ea1e9SNicolas Boichat 			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
1315f0d5a3aSPaul E. McKenney 			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
132d8843922SGlauber Costa 
133d8843922SGlauber Costa #if defined(CONFIG_DEBUG_SLAB)
134d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
135d8843922SGlauber Costa #elif defined(CONFIG_SLUB_DEBUG)
136d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
137becfda68SLaura Abbott 			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
138d8843922SGlauber Costa #else
139d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (0)
140d8843922SGlauber Costa #endif
141d8843922SGlauber Costa 
142d8843922SGlauber Costa #if defined(CONFIG_SLAB)
143d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
144230e9fc2SVladimir Davydov 			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
14575f296d9SLevin, Alexander (Sasha Levin) 			  SLAB_ACCOUNT)
146d8843922SGlauber Costa #elif defined(CONFIG_SLUB)
147d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
14875f296d9SLevin, Alexander (Sasha Levin) 			  SLAB_TEMPORARY | SLAB_ACCOUNT)
149d8843922SGlauber Costa #else
150d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (0)
151d8843922SGlauber Costa #endif
152d8843922SGlauber Costa 
153e70954fdSThomas Garnier /* Common flags available with current configuration */
154d8843922SGlauber Costa #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
155d8843922SGlauber Costa 
156e70954fdSThomas Garnier /* Common flags permitted for kmem_cache_create */
157e70954fdSThomas Garnier #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
158e70954fdSThomas Garnier 			      SLAB_RED_ZONE | \
159e70954fdSThomas Garnier 			      SLAB_POISON | \
160e70954fdSThomas Garnier 			      SLAB_STORE_USER | \
161e70954fdSThomas Garnier 			      SLAB_TRACE | \
162e70954fdSThomas Garnier 			      SLAB_CONSISTENCY_CHECKS | \
163e70954fdSThomas Garnier 			      SLAB_MEM_SPREAD | \
164e70954fdSThomas Garnier 			      SLAB_NOLEAKTRACE | \
165e70954fdSThomas Garnier 			      SLAB_RECLAIM_ACCOUNT | \
166e70954fdSThomas Garnier 			      SLAB_TEMPORARY | \
167e70954fdSThomas Garnier 			      SLAB_ACCOUNT)
168e70954fdSThomas Garnier 
169f9e13c0aSShakeel Butt bool __kmem_cache_empty(struct kmem_cache *);
170945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *);
17152b4b950SDmitry Safonov void __kmem_cache_release(struct kmem_cache *);
172c9fc5864STejun Heo int __kmem_cache_shrink(struct kmem_cache *);
17341a21285SChristoph Lameter void slab_kmem_cache_release(struct kmem_cache *);
174945cf2b6SChristoph Lameter 
175b7454ad3SGlauber Costa struct seq_file;
176b7454ad3SGlauber Costa struct file;
177b7454ad3SGlauber Costa 
1780d7561c6SGlauber Costa struct slabinfo {
1790d7561c6SGlauber Costa 	unsigned long active_objs;
1800d7561c6SGlauber Costa 	unsigned long num_objs;
1810d7561c6SGlauber Costa 	unsigned long active_slabs;
1820d7561c6SGlauber Costa 	unsigned long num_slabs;
1830d7561c6SGlauber Costa 	unsigned long shared_avail;
1840d7561c6SGlauber Costa 	unsigned int limit;
1850d7561c6SGlauber Costa 	unsigned int batchcount;
1860d7561c6SGlauber Costa 	unsigned int shared;
1870d7561c6SGlauber Costa 	unsigned int objects_per_slab;
1880d7561c6SGlauber Costa 	unsigned int cache_order;
1890d7561c6SGlauber Costa };
1900d7561c6SGlauber Costa 
1910d7561c6SGlauber Costa void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
1920d7561c6SGlauber Costa void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
193b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer,
194b7454ad3SGlauber Costa 		       size_t count, loff_t *ppos);
195ba6c496eSGlauber Costa 
196484748f0SChristoph Lameter /*
197484748f0SChristoph Lameter  * Generic implementation of bulk operations
198484748f0SChristoph Lameter  * These are useful for situations in which the allocator cannot
1999f706d68SJesper Dangaard Brouer  * perform optimizations. In that case segments of the object listed
200484748f0SChristoph Lameter  * may be allocated or freed using these operations.
201484748f0SChristoph Lameter  */
202484748f0SChristoph Lameter void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
203865762a8SJesper Dangaard Brouer int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
204484748f0SChristoph Lameter 
2051a984c4eSMuchun Song static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
2066cea1d56SRoman Gushchin {
2076cea1d56SRoman Gushchin 	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
208d42f3245SRoman Gushchin 		NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
2096cea1d56SRoman Gushchin }
2106cea1d56SRoman Gushchin 
211e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG
212e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG_ON
213e42f174eSVlastimil Babka DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
214e42f174eSVlastimil Babka #else
215e42f174eSVlastimil Babka DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
216e42f174eSVlastimil Babka #endif
217e42f174eSVlastimil Babka extern void print_tracking(struct kmem_cache *s, void *object);
218*1f9f78b1SOliver Glitta long validate_slab_cache(struct kmem_cache *s);
219e42f174eSVlastimil Babka #else
220e42f174eSVlastimil Babka static inline void print_tracking(struct kmem_cache *s, void *object)
221e42f174eSVlastimil Babka {
222e42f174eSVlastimil Babka }
223e42f174eSVlastimil Babka #endif
224e42f174eSVlastimil Babka 
225e42f174eSVlastimil Babka /*
226e42f174eSVlastimil Babka  * Returns true if any of the specified slub_debug flags is enabled for the
227e42f174eSVlastimil Babka  * cache. Use only for flags parsed by setup_slub_debug() as it also enables
228e42f174eSVlastimil Babka  * the static key.
229e42f174eSVlastimil Babka  */
230e42f174eSVlastimil Babka static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
231e42f174eSVlastimil Babka {
232e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG
233e42f174eSVlastimil Babka 	VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
234e42f174eSVlastimil Babka 	if (static_branch_unlikely(&slub_debug_enabled))
235e42f174eSVlastimil Babka 		return s->flags & flags;
236e42f174eSVlastimil Babka #endif
237e42f174eSVlastimil Babka 	return false;
238e42f174eSVlastimil Babka }
239e42f174eSVlastimil Babka 
24084c07d11SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM
24110befea9SRoman Gushchin int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
2422e9bd483SRoman Gushchin 				 gfp_t gfp, bool new_page);
243286e04b8SRoman Gushchin 
244286e04b8SRoman Gushchin static inline void memcg_free_page_obj_cgroups(struct page *page)
245286e04b8SRoman Gushchin {
246270c6a71SRoman Gushchin 	kfree(page_objcgs(page));
247bcfe06bfSRoman Gushchin 	page->memcg_data = 0;
248286e04b8SRoman Gushchin }
249286e04b8SRoman Gushchin 
250f2fe7b09SRoman Gushchin static inline size_t obj_full_size(struct kmem_cache *s)
251f2fe7b09SRoman Gushchin {
252f2fe7b09SRoman Gushchin 	/*
253f2fe7b09SRoman Gushchin 	 * For each accounted object there is an extra space which is used
254f2fe7b09SRoman Gushchin 	 * to store obj_cgroup membership. Charge it too.
255f2fe7b09SRoman Gushchin 	 */
256f2fe7b09SRoman Gushchin 	return s->size + sizeof(struct obj_cgroup *);
257f2fe7b09SRoman Gushchin }
258f2fe7b09SRoman Gushchin 
259becaba65SRoman Gushchin /*
260becaba65SRoman Gushchin  * Returns false if the allocation should fail.
261becaba65SRoman Gushchin  */
262becaba65SRoman Gushchin static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
263becaba65SRoman Gushchin 					     struct obj_cgroup **objcgp,
264becaba65SRoman Gushchin 					     size_t objects, gfp_t flags)
265f2fe7b09SRoman Gushchin {
2669855609bSRoman Gushchin 	struct obj_cgroup *objcg;
267f2fe7b09SRoman Gushchin 
268becaba65SRoman Gushchin 	if (!memcg_kmem_enabled())
269becaba65SRoman Gushchin 		return true;
270becaba65SRoman Gushchin 
271becaba65SRoman Gushchin 	if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
272becaba65SRoman Gushchin 		return true;
273becaba65SRoman Gushchin 
2749855609bSRoman Gushchin 	objcg = get_obj_cgroup_from_current();
2759855609bSRoman Gushchin 	if (!objcg)
276becaba65SRoman Gushchin 		return true;
2779855609bSRoman Gushchin 
2789855609bSRoman Gushchin 	if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
2799855609bSRoman Gushchin 		obj_cgroup_put(objcg);
280becaba65SRoman Gushchin 		return false;
281f2fe7b09SRoman Gushchin 	}
282f2fe7b09SRoman Gushchin 
283becaba65SRoman Gushchin 	*objcgp = objcg;
284becaba65SRoman Gushchin 	return true;
285f2fe7b09SRoman Gushchin }
286f2fe7b09SRoman Gushchin 
287f2fe7b09SRoman Gushchin static inline void mod_objcg_state(struct obj_cgroup *objcg,
288f2fe7b09SRoman Gushchin 				   struct pglist_data *pgdat,
2891a984c4eSMuchun Song 				   enum node_stat_item idx, int nr)
290f2fe7b09SRoman Gushchin {
291f2fe7b09SRoman Gushchin 	struct mem_cgroup *memcg;
292f2fe7b09SRoman Gushchin 	struct lruvec *lruvec;
293f2fe7b09SRoman Gushchin 
294f2fe7b09SRoman Gushchin 	rcu_read_lock();
295f2fe7b09SRoman Gushchin 	memcg = obj_cgroup_memcg(objcg);
296f2fe7b09SRoman Gushchin 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
297f2fe7b09SRoman Gushchin 	mod_memcg_lruvec_state(lruvec, idx, nr);
298f2fe7b09SRoman Gushchin 	rcu_read_unlock();
299f2fe7b09SRoman Gushchin }
300f2fe7b09SRoman Gushchin 
301964d4bd3SRoman Gushchin static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
302964d4bd3SRoman Gushchin 					      struct obj_cgroup *objcg,
30310befea9SRoman Gushchin 					      gfp_t flags, size_t size,
30410befea9SRoman Gushchin 					      void **p)
305964d4bd3SRoman Gushchin {
306964d4bd3SRoman Gushchin 	struct page *page;
307964d4bd3SRoman Gushchin 	unsigned long off;
308964d4bd3SRoman Gushchin 	size_t i;
309964d4bd3SRoman Gushchin 
310becaba65SRoman Gushchin 	if (!memcg_kmem_enabled() || !objcg)
31110befea9SRoman Gushchin 		return;
31210befea9SRoman Gushchin 
31310befea9SRoman Gushchin 	flags &= ~__GFP_ACCOUNT;
314964d4bd3SRoman Gushchin 	for (i = 0; i < size; i++) {
315964d4bd3SRoman Gushchin 		if (likely(p[i])) {
316964d4bd3SRoman Gushchin 			page = virt_to_head_page(p[i]);
31710befea9SRoman Gushchin 
318270c6a71SRoman Gushchin 			if (!page_objcgs(page) &&
3192e9bd483SRoman Gushchin 			    memcg_alloc_page_obj_cgroups(page, s, flags,
3202e9bd483SRoman Gushchin 							 false)) {
32110befea9SRoman Gushchin 				obj_cgroup_uncharge(objcg, obj_full_size(s));
32210befea9SRoman Gushchin 				continue;
32310befea9SRoman Gushchin 			}
32410befea9SRoman Gushchin 
325964d4bd3SRoman Gushchin 			off = obj_to_index(s, page, p[i]);
326964d4bd3SRoman Gushchin 			obj_cgroup_get(objcg);
327270c6a71SRoman Gushchin 			page_objcgs(page)[off] = objcg;
328f2fe7b09SRoman Gushchin 			mod_objcg_state(objcg, page_pgdat(page),
329f2fe7b09SRoman Gushchin 					cache_vmstat_idx(s), obj_full_size(s));
330f2fe7b09SRoman Gushchin 		} else {
331f2fe7b09SRoman Gushchin 			obj_cgroup_uncharge(objcg, obj_full_size(s));
332964d4bd3SRoman Gushchin 		}
333964d4bd3SRoman Gushchin 	}
334964d4bd3SRoman Gushchin 	obj_cgroup_put(objcg);
335964d4bd3SRoman Gushchin }
336964d4bd3SRoman Gushchin 
337d1b2cf6cSBharata B Rao static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
338d1b2cf6cSBharata B Rao 					void **p, int objects)
339964d4bd3SRoman Gushchin {
340d1b2cf6cSBharata B Rao 	struct kmem_cache *s;
341270c6a71SRoman Gushchin 	struct obj_cgroup **objcgs;
342964d4bd3SRoman Gushchin 	struct obj_cgroup *objcg;
343d1b2cf6cSBharata B Rao 	struct page *page;
344964d4bd3SRoman Gushchin 	unsigned int off;
345d1b2cf6cSBharata B Rao 	int i;
346964d4bd3SRoman Gushchin 
34710befea9SRoman Gushchin 	if (!memcg_kmem_enabled())
34810befea9SRoman Gushchin 		return;
34910befea9SRoman Gushchin 
350d1b2cf6cSBharata B Rao 	for (i = 0; i < objects; i++) {
351d1b2cf6cSBharata B Rao 		if (unlikely(!p[i]))
352d1b2cf6cSBharata B Rao 			continue;
353d1b2cf6cSBharata B Rao 
354d1b2cf6cSBharata B Rao 		page = virt_to_head_page(p[i]);
355270c6a71SRoman Gushchin 		objcgs = page_objcgs(page);
356270c6a71SRoman Gushchin 		if (!objcgs)
357d1b2cf6cSBharata B Rao 			continue;
358964d4bd3SRoman Gushchin 
359d1b2cf6cSBharata B Rao 		if (!s_orig)
360d1b2cf6cSBharata B Rao 			s = page->slab_cache;
361d1b2cf6cSBharata B Rao 		else
362d1b2cf6cSBharata B Rao 			s = s_orig;
363d1b2cf6cSBharata B Rao 
364d1b2cf6cSBharata B Rao 		off = obj_to_index(s, page, p[i]);
365270c6a71SRoman Gushchin 		objcg = objcgs[off];
36610befea9SRoman Gushchin 		if (!objcg)
367d1b2cf6cSBharata B Rao 			continue;
36810befea9SRoman Gushchin 
369270c6a71SRoman Gushchin 		objcgs[off] = NULL;
370f2fe7b09SRoman Gushchin 		obj_cgroup_uncharge(objcg, obj_full_size(s));
371f2fe7b09SRoman Gushchin 		mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
372f2fe7b09SRoman Gushchin 				-obj_full_size(s));
373964d4bd3SRoman Gushchin 		obj_cgroup_put(objcg);
374964d4bd3SRoman Gushchin 	}
375d1b2cf6cSBharata B Rao }
376964d4bd3SRoman Gushchin 
37784c07d11SKirill Tkhai #else /* CONFIG_MEMCG_KMEM */
3789855609bSRoman Gushchin static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
3794d96ba35SRoman Gushchin {
3804d96ba35SRoman Gushchin 	return NULL;
3814d96ba35SRoman Gushchin }
3824d96ba35SRoman Gushchin 
383286e04b8SRoman Gushchin static inline int memcg_alloc_page_obj_cgroups(struct page *page,
3842e9bd483SRoman Gushchin 					       struct kmem_cache *s, gfp_t gfp,
3852e9bd483SRoman Gushchin 					       bool new_page)
386286e04b8SRoman Gushchin {
387286e04b8SRoman Gushchin 	return 0;
388286e04b8SRoman Gushchin }
389286e04b8SRoman Gushchin 
390286e04b8SRoman Gushchin static inline void memcg_free_page_obj_cgroups(struct page *page)
391286e04b8SRoman Gushchin {
392286e04b8SRoman Gushchin }
393286e04b8SRoman Gushchin 
394becaba65SRoman Gushchin static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
395becaba65SRoman Gushchin 					     struct obj_cgroup **objcgp,
396becaba65SRoman Gushchin 					     size_t objects, gfp_t flags)
397f2fe7b09SRoman Gushchin {
398becaba65SRoman Gushchin 	return true;
399f2fe7b09SRoman Gushchin }
400f2fe7b09SRoman Gushchin 
401964d4bd3SRoman Gushchin static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
402964d4bd3SRoman Gushchin 					      struct obj_cgroup *objcg,
40310befea9SRoman Gushchin 					      gfp_t flags, size_t size,
40410befea9SRoman Gushchin 					      void **p)
405964d4bd3SRoman Gushchin {
406964d4bd3SRoman Gushchin }
407964d4bd3SRoman Gushchin 
408d1b2cf6cSBharata B Rao static inline void memcg_slab_free_hook(struct kmem_cache *s,
409d1b2cf6cSBharata B Rao 					void **p, int objects)
410964d4bd3SRoman Gushchin {
411964d4bd3SRoman Gushchin }
41284c07d11SKirill Tkhai #endif /* CONFIG_MEMCG_KMEM */
413b9ce5ef4SGlauber Costa 
414a64b5378SKees Cook static inline struct kmem_cache *virt_to_cache(const void *obj)
415a64b5378SKees Cook {
416a64b5378SKees Cook 	struct page *page;
417a64b5378SKees Cook 
418a64b5378SKees Cook 	page = virt_to_head_page(obj);
419a64b5378SKees Cook 	if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
420a64b5378SKees Cook 					__func__))
421a64b5378SKees Cook 		return NULL;
422a64b5378SKees Cook 	return page->slab_cache;
423a64b5378SKees Cook }
424a64b5378SKees Cook 
42574d555beSRoman Gushchin static __always_inline void account_slab_page(struct page *page, int order,
4262e9bd483SRoman Gushchin 					      struct kmem_cache *s,
4272e9bd483SRoman Gushchin 					      gfp_t gfp)
4286cea1d56SRoman Gushchin {
4292e9bd483SRoman Gushchin 	if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
4302e9bd483SRoman Gushchin 		memcg_alloc_page_obj_cgroups(page, s, gfp, true);
4312e9bd483SRoman Gushchin 
432f2fe7b09SRoman Gushchin 	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
433f2fe7b09SRoman Gushchin 			    PAGE_SIZE << order);
4346cea1d56SRoman Gushchin }
4356cea1d56SRoman Gushchin 
43674d555beSRoman Gushchin static __always_inline void unaccount_slab_page(struct page *page, int order,
4376cea1d56SRoman Gushchin 						struct kmem_cache *s)
4386cea1d56SRoman Gushchin {
43910befea9SRoman Gushchin 	if (memcg_kmem_enabled())
440f2fe7b09SRoman Gushchin 		memcg_free_page_obj_cgroups(page);
4419855609bSRoman Gushchin 
4424d96ba35SRoman Gushchin 	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
443d42f3245SRoman Gushchin 			    -(PAGE_SIZE << order));
4446cea1d56SRoman Gushchin }
4456cea1d56SRoman Gushchin 
446e42f174eSVlastimil Babka static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
447e42f174eSVlastimil Babka {
448e42f174eSVlastimil Babka 	struct kmem_cache *cachep;
449e42f174eSVlastimil Babka 
450e42f174eSVlastimil Babka 	if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
451e42f174eSVlastimil Babka 	    !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
452e42f174eSVlastimil Babka 		return s;
453e42f174eSVlastimil Babka 
454e42f174eSVlastimil Babka 	cachep = virt_to_cache(x);
45510befea9SRoman Gushchin 	if (WARN(cachep && cachep != s,
456e42f174eSVlastimil Babka 		  "%s: Wrong slab cache. %s but object is from %s\n",
457e42f174eSVlastimil Babka 		  __func__, s->name, cachep->name))
458e42f174eSVlastimil Babka 		print_tracking(cachep, x);
459e42f174eSVlastimil Babka 	return cachep;
460e42f174eSVlastimil Babka }
461e42f174eSVlastimil Babka 
46211c7aec2SJesper Dangaard Brouer static inline size_t slab_ksize(const struct kmem_cache *s)
46311c7aec2SJesper Dangaard Brouer {
46411c7aec2SJesper Dangaard Brouer #ifndef CONFIG_SLUB
46511c7aec2SJesper Dangaard Brouer 	return s->object_size;
46611c7aec2SJesper Dangaard Brouer 
46711c7aec2SJesper Dangaard Brouer #else /* CONFIG_SLUB */
46811c7aec2SJesper Dangaard Brouer # ifdef CONFIG_SLUB_DEBUG
46911c7aec2SJesper Dangaard Brouer 	/*
47011c7aec2SJesper Dangaard Brouer 	 * Debugging requires use of the padding between object
47111c7aec2SJesper Dangaard Brouer 	 * and whatever may come after it.
47211c7aec2SJesper Dangaard Brouer 	 */
47311c7aec2SJesper Dangaard Brouer 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
47411c7aec2SJesper Dangaard Brouer 		return s->object_size;
47511c7aec2SJesper Dangaard Brouer # endif
47680a9201aSAlexander Potapenko 	if (s->flags & SLAB_KASAN)
47780a9201aSAlexander Potapenko 		return s->object_size;
47811c7aec2SJesper Dangaard Brouer 	/*
47911c7aec2SJesper Dangaard Brouer 	 * If we have the need to store the freelist pointer
48011c7aec2SJesper Dangaard Brouer 	 * back there or track user information then we can
48111c7aec2SJesper Dangaard Brouer 	 * only use the space before that information.
48211c7aec2SJesper Dangaard Brouer 	 */
4835f0d5a3aSPaul E. McKenney 	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
48411c7aec2SJesper Dangaard Brouer 		return s->inuse;
48511c7aec2SJesper Dangaard Brouer 	/*
48611c7aec2SJesper Dangaard Brouer 	 * Else we can use all the padding etc for the allocation
48711c7aec2SJesper Dangaard Brouer 	 */
48811c7aec2SJesper Dangaard Brouer 	return s->size;
48911c7aec2SJesper Dangaard Brouer #endif
49011c7aec2SJesper Dangaard Brouer }
49111c7aec2SJesper Dangaard Brouer 
49211c7aec2SJesper Dangaard Brouer static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
493964d4bd3SRoman Gushchin 						     struct obj_cgroup **objcgp,
494964d4bd3SRoman Gushchin 						     size_t size, gfp_t flags)
49511c7aec2SJesper Dangaard Brouer {
49611c7aec2SJesper Dangaard Brouer 	flags &= gfp_allowed_mask;
497d92a8cfcSPeter Zijlstra 
49895d6c701SDaniel Vetter 	might_alloc(flags);
49911c7aec2SJesper Dangaard Brouer 
500fab9963aSJesper Dangaard Brouer 	if (should_failslab(s, flags))
50111c7aec2SJesper Dangaard Brouer 		return NULL;
50211c7aec2SJesper Dangaard Brouer 
503becaba65SRoman Gushchin 	if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
504becaba65SRoman Gushchin 		return NULL;
50545264778SVladimir Davydov 
50645264778SVladimir Davydov 	return s;
50711c7aec2SJesper Dangaard Brouer }
50811c7aec2SJesper Dangaard Brouer 
509964d4bd3SRoman Gushchin static inline void slab_post_alloc_hook(struct kmem_cache *s,
510da844b78SAndrey Konovalov 					struct obj_cgroup *objcg, gfp_t flags,
511da844b78SAndrey Konovalov 					size_t size, void **p, bool init)
51211c7aec2SJesper Dangaard Brouer {
51311c7aec2SJesper Dangaard Brouer 	size_t i;
51411c7aec2SJesper Dangaard Brouer 
51511c7aec2SJesper Dangaard Brouer 	flags &= gfp_allowed_mask;
516da844b78SAndrey Konovalov 
517da844b78SAndrey Konovalov 	/*
518da844b78SAndrey Konovalov 	 * As memory initialization might be integrated into KASAN,
519da844b78SAndrey Konovalov 	 * kasan_slab_alloc and initialization memset must be
520da844b78SAndrey Konovalov 	 * kept together to avoid discrepancies in behavior.
521da844b78SAndrey Konovalov 	 *
522da844b78SAndrey Konovalov 	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
523da844b78SAndrey Konovalov 	 */
52411c7aec2SJesper Dangaard Brouer 	for (i = 0; i < size; i++) {
525da844b78SAndrey Konovalov 		p[i] = kasan_slab_alloc(s, p[i], flags, init);
526da844b78SAndrey Konovalov 		if (p[i] && init && !kasan_has_integrated_init())
527da844b78SAndrey Konovalov 			memset(p[i], 0, s->object_size);
52853128245SAndrey Konovalov 		kmemleak_alloc_recursive(p[i], s->object_size, 1,
52911c7aec2SJesper Dangaard Brouer 					 s->flags, flags);
53011c7aec2SJesper Dangaard Brouer 	}
53145264778SVladimir Davydov 
53210befea9SRoman Gushchin 	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
53311c7aec2SJesper Dangaard Brouer }
53411c7aec2SJesper Dangaard Brouer 
53544c5356fSChristoph Lameter #ifndef CONFIG_SLOB
536ca34956bSChristoph Lameter /*
537ca34956bSChristoph Lameter  * The slab lists for all objects.
538ca34956bSChristoph Lameter  */
539ca34956bSChristoph Lameter struct kmem_cache_node {
540ca34956bSChristoph Lameter 	spinlock_t list_lock;
541ca34956bSChristoph Lameter 
542ca34956bSChristoph Lameter #ifdef CONFIG_SLAB
543ca34956bSChristoph Lameter 	struct list_head slabs_partial;	/* partial list first, better asm code */
544ca34956bSChristoph Lameter 	struct list_head slabs_full;
545ca34956bSChristoph Lameter 	struct list_head slabs_free;
546bf00bd34SDavid Rientjes 	unsigned long total_slabs;	/* length of all slab lists */
547bf00bd34SDavid Rientjes 	unsigned long free_slabs;	/* length of free slab list only */
548ca34956bSChristoph Lameter 	unsigned long free_objects;
549ca34956bSChristoph Lameter 	unsigned int free_limit;
550ca34956bSChristoph Lameter 	unsigned int colour_next;	/* Per-node cache coloring */
551ca34956bSChristoph Lameter 	struct array_cache *shared;	/* shared per node */
552c8522a3aSJoonsoo Kim 	struct alien_cache **alien;	/* on other nodes */
553ca34956bSChristoph Lameter 	unsigned long next_reap;	/* updated without locking */
554ca34956bSChristoph Lameter 	int free_touched;		/* updated without locking */
555ca34956bSChristoph Lameter #endif
556ca34956bSChristoph Lameter 
557ca34956bSChristoph Lameter #ifdef CONFIG_SLUB
558ca34956bSChristoph Lameter 	unsigned long nr_partial;
559ca34956bSChristoph Lameter 	struct list_head partial;
560ca34956bSChristoph Lameter #ifdef CONFIG_SLUB_DEBUG
561ca34956bSChristoph Lameter 	atomic_long_t nr_slabs;
562ca34956bSChristoph Lameter 	atomic_long_t total_objects;
563ca34956bSChristoph Lameter 	struct list_head full;
564ca34956bSChristoph Lameter #endif
565ca34956bSChristoph Lameter #endif
566ca34956bSChristoph Lameter 
567ca34956bSChristoph Lameter };
568e25839f6SWanpeng Li 
56944c5356fSChristoph Lameter static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
57044c5356fSChristoph Lameter {
57144c5356fSChristoph Lameter 	return s->node[node];
57244c5356fSChristoph Lameter }
57344c5356fSChristoph Lameter 
57444c5356fSChristoph Lameter /*
57544c5356fSChristoph Lameter  * Iterator over all nodes. The body will be executed for each node that has
57644c5356fSChristoph Lameter  * a kmem_cache_node structure allocated (which is true for all online nodes)
57744c5356fSChristoph Lameter  */
57844c5356fSChristoph Lameter #define for_each_kmem_cache_node(__s, __node, __n) \
5799163582cSMikulas Patocka 	for (__node = 0; __node < nr_node_ids; __node++) \
5809163582cSMikulas Patocka 		 if ((__n = get_node(__s, __node)))
58144c5356fSChristoph Lameter 
58244c5356fSChristoph Lameter #endif
58344c5356fSChristoph Lameter 
5841df3b26fSVladimir Davydov void *slab_start(struct seq_file *m, loff_t *pos);
585276a2439SWanpeng Li void *slab_next(struct seq_file *m, void *p, loff_t *pos);
586276a2439SWanpeng Li void slab_stop(struct seq_file *m, void *p);
587b047501cSVladimir Davydov int memcg_slab_show(struct seq_file *m, void *p);
5885240ab40SAndrey Ryabinin 
589852d8be0SYang Shi #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
590852d8be0SYang Shi void dump_unreclaimable_slab(void);
591852d8be0SYang Shi #else
592852d8be0SYang Shi static inline void dump_unreclaimable_slab(void)
593852d8be0SYang Shi {
594852d8be0SYang Shi }
595852d8be0SYang Shi #endif
596852d8be0SYang Shi 
59755834c59SAlexander Potapenko void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
59855834c59SAlexander Potapenko 
5997c00fce9SThomas Garnier #ifdef CONFIG_SLAB_FREELIST_RANDOM
6007c00fce9SThomas Garnier int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
6017c00fce9SThomas Garnier 			gfp_t gfp);
6027c00fce9SThomas Garnier void cache_random_seq_destroy(struct kmem_cache *cachep);
6037c00fce9SThomas Garnier #else
6047c00fce9SThomas Garnier static inline int cache_random_seq_create(struct kmem_cache *cachep,
6057c00fce9SThomas Garnier 					unsigned int count, gfp_t gfp)
6067c00fce9SThomas Garnier {
6077c00fce9SThomas Garnier 	return 0;
6087c00fce9SThomas Garnier }
6097c00fce9SThomas Garnier static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
6107c00fce9SThomas Garnier #endif /* CONFIG_SLAB_FREELIST_RANDOM */
6117c00fce9SThomas Garnier 
6126471384aSAlexander Potapenko static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
6136471384aSAlexander Potapenko {
61451cba1ebSKees Cook 	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
61551cba1ebSKees Cook 				&init_on_alloc)) {
6166471384aSAlexander Potapenko 		if (c->ctor)
6176471384aSAlexander Potapenko 			return false;
6186471384aSAlexander Potapenko 		if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
6196471384aSAlexander Potapenko 			return flags & __GFP_ZERO;
6206471384aSAlexander Potapenko 		return true;
6216471384aSAlexander Potapenko 	}
6226471384aSAlexander Potapenko 	return flags & __GFP_ZERO;
6236471384aSAlexander Potapenko }
6246471384aSAlexander Potapenko 
6256471384aSAlexander Potapenko static inline bool slab_want_init_on_free(struct kmem_cache *c)
6266471384aSAlexander Potapenko {
62751cba1ebSKees Cook 	if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
62851cba1ebSKees Cook 				&init_on_free))
6296471384aSAlexander Potapenko 		return !(c->ctor ||
6306471384aSAlexander Potapenko 			 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
6316471384aSAlexander Potapenko 	return false;
6326471384aSAlexander Potapenko }
6336471384aSAlexander Potapenko 
6345bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK
6358e7f37f2SPaul E. McKenney #define KS_ADDRS_COUNT 16
6368e7f37f2SPaul E. McKenney struct kmem_obj_info {
6378e7f37f2SPaul E. McKenney 	void *kp_ptr;
6388e7f37f2SPaul E. McKenney 	struct page *kp_page;
6398e7f37f2SPaul E. McKenney 	void *kp_objp;
6408e7f37f2SPaul E. McKenney 	unsigned long kp_data_offset;
6418e7f37f2SPaul E. McKenney 	struct kmem_cache *kp_slab_cache;
6428e7f37f2SPaul E. McKenney 	void *kp_ret;
6438e7f37f2SPaul E. McKenney 	void *kp_stack[KS_ADDRS_COUNT];
6448e7f37f2SPaul E. McKenney };
6458e7f37f2SPaul E. McKenney void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page);
6465bb1bb35SPaul E. McKenney #endif
6478e7f37f2SPaul E. McKenney 
6485240ab40SAndrey Ryabinin #endif /* MM_SLAB_H */
649