xref: /linux/kernel/bpf/kmem_cache_iter.c (revision c771600c6af14749609b49565ffb4cac2959710d)
14971266eSNamhyung Kim // SPDX-License-Identifier: GPL-2.0-only
24971266eSNamhyung Kim /* Copyright (c) 2024 Google */
34971266eSNamhyung Kim #include <linux/bpf.h>
44971266eSNamhyung Kim #include <linux/btf_ids.h>
54971266eSNamhyung Kim #include <linux/slab.h>
64971266eSNamhyung Kim #include <linux/kernel.h>
74971266eSNamhyung Kim #include <linux/seq_file.h>
84971266eSNamhyung Kim 
94971266eSNamhyung Kim #include "../../mm/slab.h" /* kmem_cache, slab_caches and slab_mutex */
104971266eSNamhyung Kim 
11*2e9a5480SNamhyung Kim /* open-coded version */
12*2e9a5480SNamhyung Kim struct bpf_iter_kmem_cache {
13*2e9a5480SNamhyung Kim 	__u64 __opaque[1];
14*2e9a5480SNamhyung Kim } __attribute__((aligned(8)));
15*2e9a5480SNamhyung Kim 
16*2e9a5480SNamhyung Kim struct bpf_iter_kmem_cache_kern {
17*2e9a5480SNamhyung Kim 	struct kmem_cache *pos;
18*2e9a5480SNamhyung Kim } __attribute__((aligned(8)));
19*2e9a5480SNamhyung Kim 
20*2e9a5480SNamhyung Kim #define KMEM_CACHE_POS_START  ((void *)1L)
21*2e9a5480SNamhyung Kim 
22*2e9a5480SNamhyung Kim __bpf_kfunc_start_defs();
23*2e9a5480SNamhyung Kim 
24*2e9a5480SNamhyung Kim __bpf_kfunc int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it)
25*2e9a5480SNamhyung Kim {
26*2e9a5480SNamhyung Kim 	struct bpf_iter_kmem_cache_kern *kit = (void *)it;
27*2e9a5480SNamhyung Kim 
28*2e9a5480SNamhyung Kim 	BUILD_BUG_ON(sizeof(*kit) > sizeof(*it));
29*2e9a5480SNamhyung Kim 	BUILD_BUG_ON(__alignof__(*kit) != __alignof__(*it));
30*2e9a5480SNamhyung Kim 
31*2e9a5480SNamhyung Kim 	kit->pos = KMEM_CACHE_POS_START;
32*2e9a5480SNamhyung Kim 	return 0;
33*2e9a5480SNamhyung Kim }
34*2e9a5480SNamhyung Kim 
35*2e9a5480SNamhyung Kim __bpf_kfunc struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it)
36*2e9a5480SNamhyung Kim {
37*2e9a5480SNamhyung Kim 	struct bpf_iter_kmem_cache_kern *kit = (void *)it;
38*2e9a5480SNamhyung Kim 	struct kmem_cache *prev = kit->pos;
39*2e9a5480SNamhyung Kim 	struct kmem_cache *next;
40*2e9a5480SNamhyung Kim 	bool destroy = false;
41*2e9a5480SNamhyung Kim 
42*2e9a5480SNamhyung Kim 	if (!prev)
43*2e9a5480SNamhyung Kim 		return NULL;
44*2e9a5480SNamhyung Kim 
45*2e9a5480SNamhyung Kim 	mutex_lock(&slab_mutex);
46*2e9a5480SNamhyung Kim 
47*2e9a5480SNamhyung Kim 	if (list_empty(&slab_caches)) {
48*2e9a5480SNamhyung Kim 		mutex_unlock(&slab_mutex);
49*2e9a5480SNamhyung Kim 		return NULL;
50*2e9a5480SNamhyung Kim 	}
51*2e9a5480SNamhyung Kim 
52*2e9a5480SNamhyung Kim 	if (prev == KMEM_CACHE_POS_START)
53*2e9a5480SNamhyung Kim 		next = list_first_entry(&slab_caches, struct kmem_cache, list);
54*2e9a5480SNamhyung Kim 	else if (list_last_entry(&slab_caches, struct kmem_cache, list) == prev)
55*2e9a5480SNamhyung Kim 		next = NULL;
56*2e9a5480SNamhyung Kim 	else
57*2e9a5480SNamhyung Kim 		next = list_next_entry(prev, list);
58*2e9a5480SNamhyung Kim 
59*2e9a5480SNamhyung Kim 	/* boot_caches have negative refcount, don't touch them */
60*2e9a5480SNamhyung Kim 	if (next && next->refcount > 0)
61*2e9a5480SNamhyung Kim 		next->refcount++;
62*2e9a5480SNamhyung Kim 
63*2e9a5480SNamhyung Kim 	/* Skip kmem_cache_destroy() for active entries */
64*2e9a5480SNamhyung Kim 	if (prev && prev != KMEM_CACHE_POS_START) {
65*2e9a5480SNamhyung Kim 		if (prev->refcount > 1)
66*2e9a5480SNamhyung Kim 			prev->refcount--;
67*2e9a5480SNamhyung Kim 		else if (prev->refcount == 1)
68*2e9a5480SNamhyung Kim 			destroy = true;
69*2e9a5480SNamhyung Kim 	}
70*2e9a5480SNamhyung Kim 
71*2e9a5480SNamhyung Kim 	mutex_unlock(&slab_mutex);
72*2e9a5480SNamhyung Kim 
73*2e9a5480SNamhyung Kim 	if (destroy)
74*2e9a5480SNamhyung Kim 		kmem_cache_destroy(prev);
75*2e9a5480SNamhyung Kim 
76*2e9a5480SNamhyung Kim 	kit->pos = next;
77*2e9a5480SNamhyung Kim 	return next;
78*2e9a5480SNamhyung Kim }
79*2e9a5480SNamhyung Kim 
80*2e9a5480SNamhyung Kim __bpf_kfunc void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it)
81*2e9a5480SNamhyung Kim {
82*2e9a5480SNamhyung Kim 	struct bpf_iter_kmem_cache_kern *kit = (void *)it;
83*2e9a5480SNamhyung Kim 	struct kmem_cache *s = kit->pos;
84*2e9a5480SNamhyung Kim 	bool destroy = false;
85*2e9a5480SNamhyung Kim 
86*2e9a5480SNamhyung Kim 	if (s == NULL || s == KMEM_CACHE_POS_START)
87*2e9a5480SNamhyung Kim 		return;
88*2e9a5480SNamhyung Kim 
89*2e9a5480SNamhyung Kim 	mutex_lock(&slab_mutex);
90*2e9a5480SNamhyung Kim 
91*2e9a5480SNamhyung Kim 	/* Skip kmem_cache_destroy() for active entries */
92*2e9a5480SNamhyung Kim 	if (s->refcount > 1)
93*2e9a5480SNamhyung Kim 		s->refcount--;
94*2e9a5480SNamhyung Kim 	else if (s->refcount == 1)
95*2e9a5480SNamhyung Kim 		destroy = true;
96*2e9a5480SNamhyung Kim 
97*2e9a5480SNamhyung Kim 	mutex_unlock(&slab_mutex);
98*2e9a5480SNamhyung Kim 
99*2e9a5480SNamhyung Kim 	if (destroy)
100*2e9a5480SNamhyung Kim 		kmem_cache_destroy(s);
101*2e9a5480SNamhyung Kim }
102*2e9a5480SNamhyung Kim 
103*2e9a5480SNamhyung Kim __bpf_kfunc_end_defs();
104*2e9a5480SNamhyung Kim 
1054971266eSNamhyung Kim struct bpf_iter__kmem_cache {
1064971266eSNamhyung Kim 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
1074971266eSNamhyung Kim 	__bpf_md_ptr(struct kmem_cache *, s);
1084971266eSNamhyung Kim };
1094971266eSNamhyung Kim 
110*2e9a5480SNamhyung Kim union kmem_cache_iter_priv {
111*2e9a5480SNamhyung Kim 	struct bpf_iter_kmem_cache it;
112*2e9a5480SNamhyung Kim 	struct bpf_iter_kmem_cache_kern kit;
113*2e9a5480SNamhyung Kim };
114*2e9a5480SNamhyung Kim 
1154971266eSNamhyung Kim static void *kmem_cache_iter_seq_start(struct seq_file *seq, loff_t *pos)
1164971266eSNamhyung Kim {
1174971266eSNamhyung Kim 	loff_t cnt = 0;
1184971266eSNamhyung Kim 	bool found = false;
1194971266eSNamhyung Kim 	struct kmem_cache *s;
120*2e9a5480SNamhyung Kim 	union kmem_cache_iter_priv *p = seq->private;
1214971266eSNamhyung Kim 
1224971266eSNamhyung Kim 	mutex_lock(&slab_mutex);
1234971266eSNamhyung Kim 
1244971266eSNamhyung Kim 	/* Find an entry at the given position in the slab_caches list instead
1254971266eSNamhyung Kim 	 * of keeping a reference (of the last visited entry, if any) out of
1264971266eSNamhyung Kim 	 * slab_mutex. It might miss something if one is deleted in the middle
1274971266eSNamhyung Kim 	 * while it releases the lock.  But it should be rare and there's not
1284971266eSNamhyung Kim 	 * much we can do about it.
1294971266eSNamhyung Kim 	 */
1304971266eSNamhyung Kim 	list_for_each_entry(s, &slab_caches, list) {
1314971266eSNamhyung Kim 		if (cnt == *pos) {
1324971266eSNamhyung Kim 			/* Make sure this entry remains in the list by getting
1334971266eSNamhyung Kim 			 * a new reference count.  Note that boot_cache entries
1344971266eSNamhyung Kim 			 * have a negative refcount, so don't touch them.
1354971266eSNamhyung Kim 			 */
1364971266eSNamhyung Kim 			if (s->refcount > 0)
1374971266eSNamhyung Kim 				s->refcount++;
1384971266eSNamhyung Kim 			found = true;
1394971266eSNamhyung Kim 			break;
1404971266eSNamhyung Kim 		}
1414971266eSNamhyung Kim 		cnt++;
1424971266eSNamhyung Kim 	}
1434971266eSNamhyung Kim 	mutex_unlock(&slab_mutex);
1444971266eSNamhyung Kim 
1454971266eSNamhyung Kim 	if (!found)
146*2e9a5480SNamhyung Kim 		s = NULL;
1474971266eSNamhyung Kim 
148*2e9a5480SNamhyung Kim 	p->kit.pos = s;
1494971266eSNamhyung Kim 	return s;
1504971266eSNamhyung Kim }
1514971266eSNamhyung Kim 
1524971266eSNamhyung Kim static void kmem_cache_iter_seq_stop(struct seq_file *seq, void *v)
1534971266eSNamhyung Kim {
1544971266eSNamhyung Kim 	struct bpf_iter_meta meta;
1554971266eSNamhyung Kim 	struct bpf_iter__kmem_cache ctx = {
1564971266eSNamhyung Kim 		.meta = &meta,
1574971266eSNamhyung Kim 		.s = v,
1584971266eSNamhyung Kim 	};
159*2e9a5480SNamhyung Kim 	union kmem_cache_iter_priv *p = seq->private;
1604971266eSNamhyung Kim 	struct bpf_prog *prog;
1614971266eSNamhyung Kim 
1624971266eSNamhyung Kim 	meta.seq = seq;
1634971266eSNamhyung Kim 	prog = bpf_iter_get_info(&meta, true);
1644971266eSNamhyung Kim 	if (prog && !ctx.s)
1654971266eSNamhyung Kim 		bpf_iter_run_prog(prog, &ctx);
1664971266eSNamhyung Kim 
167*2e9a5480SNamhyung Kim 	bpf_iter_kmem_cache_destroy(&p->it);
1684971266eSNamhyung Kim }
1694971266eSNamhyung Kim 
1704971266eSNamhyung Kim static void *kmem_cache_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1714971266eSNamhyung Kim {
172*2e9a5480SNamhyung Kim 	union kmem_cache_iter_priv *p = seq->private;
1734971266eSNamhyung Kim 
1744971266eSNamhyung Kim 	++*pos;
1754971266eSNamhyung Kim 
176*2e9a5480SNamhyung Kim 	return bpf_iter_kmem_cache_next(&p->it);
1774971266eSNamhyung Kim }
1784971266eSNamhyung Kim 
1794971266eSNamhyung Kim static int kmem_cache_iter_seq_show(struct seq_file *seq, void *v)
1804971266eSNamhyung Kim {
1814971266eSNamhyung Kim 	struct bpf_iter_meta meta;
1824971266eSNamhyung Kim 	struct bpf_iter__kmem_cache ctx = {
1834971266eSNamhyung Kim 		.meta = &meta,
1844971266eSNamhyung Kim 		.s = v,
1854971266eSNamhyung Kim 	};
1864971266eSNamhyung Kim 	struct bpf_prog *prog;
1874971266eSNamhyung Kim 	int ret = 0;
1884971266eSNamhyung Kim 
1894971266eSNamhyung Kim 	meta.seq = seq;
1904971266eSNamhyung Kim 	prog = bpf_iter_get_info(&meta, false);
1914971266eSNamhyung Kim 	if (prog)
1924971266eSNamhyung Kim 		ret = bpf_iter_run_prog(prog, &ctx);
1934971266eSNamhyung Kim 
1944971266eSNamhyung Kim 	return ret;
1954971266eSNamhyung Kim }
1964971266eSNamhyung Kim 
1974971266eSNamhyung Kim static const struct seq_operations kmem_cache_iter_seq_ops = {
1984971266eSNamhyung Kim 	.start  = kmem_cache_iter_seq_start,
1994971266eSNamhyung Kim 	.next   = kmem_cache_iter_seq_next,
2004971266eSNamhyung Kim 	.stop   = kmem_cache_iter_seq_stop,
2014971266eSNamhyung Kim 	.show   = kmem_cache_iter_seq_show,
2024971266eSNamhyung Kim };
2034971266eSNamhyung Kim 
2044971266eSNamhyung Kim BTF_ID_LIST_GLOBAL_SINGLE(bpf_kmem_cache_btf_id, struct, kmem_cache)
2054971266eSNamhyung Kim 
2064971266eSNamhyung Kim static const struct bpf_iter_seq_info kmem_cache_iter_seq_info = {
2074971266eSNamhyung Kim 	.seq_ops		= &kmem_cache_iter_seq_ops,
208*2e9a5480SNamhyung Kim 	.seq_priv_size		= sizeof(union kmem_cache_iter_priv),
2094971266eSNamhyung Kim };
2104971266eSNamhyung Kim 
2114971266eSNamhyung Kim static void bpf_iter_kmem_cache_show_fdinfo(const struct bpf_iter_aux_info *aux,
2124971266eSNamhyung Kim 					    struct seq_file *seq)
2134971266eSNamhyung Kim {
2144971266eSNamhyung Kim 	seq_puts(seq, "kmem_cache iter\n");
2154971266eSNamhyung Kim }
2164971266eSNamhyung Kim 
2174971266eSNamhyung Kim DEFINE_BPF_ITER_FUNC(kmem_cache, struct bpf_iter_meta *meta,
2184971266eSNamhyung Kim 		     struct kmem_cache *s)
2194971266eSNamhyung Kim 
2204971266eSNamhyung Kim static struct bpf_iter_reg bpf_kmem_cache_reg_info = {
2214971266eSNamhyung Kim 	.target			= "kmem_cache",
2224971266eSNamhyung Kim 	.feature		= BPF_ITER_RESCHED,
2234971266eSNamhyung Kim 	.show_fdinfo		= bpf_iter_kmem_cache_show_fdinfo,
2244971266eSNamhyung Kim 	.ctx_arg_info_size	= 1,
2254971266eSNamhyung Kim 	.ctx_arg_info		= {
2264971266eSNamhyung Kim 		{ offsetof(struct bpf_iter__kmem_cache, s),
2274971266eSNamhyung Kim 		  PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
2284971266eSNamhyung Kim 	},
2294971266eSNamhyung Kim 	.seq_info		= &kmem_cache_iter_seq_info,
2304971266eSNamhyung Kim };
2314971266eSNamhyung Kim 
2324971266eSNamhyung Kim static int __init bpf_kmem_cache_iter_init(void)
2334971266eSNamhyung Kim {
2344971266eSNamhyung Kim 	bpf_kmem_cache_reg_info.ctx_arg_info[0].btf_id = bpf_kmem_cache_btf_id[0];
2354971266eSNamhyung Kim 	return bpf_iter_reg_target(&bpf_kmem_cache_reg_info);
2364971266eSNamhyung Kim }
2374971266eSNamhyung Kim 
2384971266eSNamhyung Kim late_initcall(bpf_kmem_cache_iter_init);
239