xref: /linux/kernel/bpf/kmem_cache_iter.c (revision 4971266e1595f76be3f844c834c1f9357a97dbde)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2024 Google */
3 #include <linux/bpf.h>
4 #include <linux/btf_ids.h>
5 #include <linux/slab.h>
6 #include <linux/kernel.h>
7 #include <linux/seq_file.h>
8 
9 #include "../../mm/slab.h" /* kmem_cache, slab_caches and slab_mutex */
10 
11 struct bpf_iter__kmem_cache {
12 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
13 	__bpf_md_ptr(struct kmem_cache *, s);
14 };
15 
16 static void *kmem_cache_iter_seq_start(struct seq_file *seq, loff_t *pos)
17 {
18 	loff_t cnt = 0;
19 	bool found = false;
20 	struct kmem_cache *s;
21 
22 	mutex_lock(&slab_mutex);
23 
24 	/* Find an entry at the given position in the slab_caches list instead
25 	 * of keeping a reference (of the last visited entry, if any) out of
26 	 * slab_mutex. It might miss something if one is deleted in the middle
27 	 * while it releases the lock.  But it should be rare and there's not
28 	 * much we can do about it.
29 	 */
30 	list_for_each_entry(s, &slab_caches, list) {
31 		if (cnt == *pos) {
32 			/* Make sure this entry remains in the list by getting
33 			 * a new reference count.  Note that boot_cache entries
34 			 * have a negative refcount, so don't touch them.
35 			 */
36 			if (s->refcount > 0)
37 				s->refcount++;
38 			found = true;
39 			break;
40 		}
41 		cnt++;
42 	}
43 	mutex_unlock(&slab_mutex);
44 
45 	if (!found)
46 		return NULL;
47 
48 	return s;
49 }
50 
51 static void kmem_cache_iter_seq_stop(struct seq_file *seq, void *v)
52 {
53 	struct bpf_iter_meta meta;
54 	struct bpf_iter__kmem_cache ctx = {
55 		.meta = &meta,
56 		.s = v,
57 	};
58 	struct bpf_prog *prog;
59 	bool destroy = false;
60 
61 	meta.seq = seq;
62 	prog = bpf_iter_get_info(&meta, true);
63 	if (prog && !ctx.s)
64 		bpf_iter_run_prog(prog, &ctx);
65 
66 	if (ctx.s == NULL)
67 		return;
68 
69 	mutex_lock(&slab_mutex);
70 
71 	/* Skip kmem_cache_destroy() for active entries */
72 	if (ctx.s->refcount > 1)
73 		ctx.s->refcount--;
74 	else if (ctx.s->refcount == 1)
75 		destroy = true;
76 
77 	mutex_unlock(&slab_mutex);
78 
79 	if (destroy)
80 		kmem_cache_destroy(ctx.s);
81 }
82 
83 static void *kmem_cache_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos)
84 {
85 	struct kmem_cache *s = v;
86 	struct kmem_cache *next = NULL;
87 	bool destroy = false;
88 
89 	++*pos;
90 
91 	mutex_lock(&slab_mutex);
92 
93 	if (list_last_entry(&slab_caches, struct kmem_cache, list) != s) {
94 		next = list_next_entry(s, list);
95 
96 		WARN_ON_ONCE(next->refcount == 0);
97 
98 		/* boot_caches have negative refcount, don't touch them */
99 		if (next->refcount > 0)
100 			next->refcount++;
101 	}
102 
103 	/* Skip kmem_cache_destroy() for active entries */
104 	if (s->refcount > 1)
105 		s->refcount--;
106 	else if (s->refcount == 1)
107 		destroy = true;
108 
109 	mutex_unlock(&slab_mutex);
110 
111 	if (destroy)
112 		kmem_cache_destroy(s);
113 
114 	return next;
115 }
116 
117 static int kmem_cache_iter_seq_show(struct seq_file *seq, void *v)
118 {
119 	struct bpf_iter_meta meta;
120 	struct bpf_iter__kmem_cache ctx = {
121 		.meta = &meta,
122 		.s = v,
123 	};
124 	struct bpf_prog *prog;
125 	int ret = 0;
126 
127 	meta.seq = seq;
128 	prog = bpf_iter_get_info(&meta, false);
129 	if (prog)
130 		ret = bpf_iter_run_prog(prog, &ctx);
131 
132 	return ret;
133 }
134 
135 static const struct seq_operations kmem_cache_iter_seq_ops = {
136 	.start  = kmem_cache_iter_seq_start,
137 	.next   = kmem_cache_iter_seq_next,
138 	.stop   = kmem_cache_iter_seq_stop,
139 	.show   = kmem_cache_iter_seq_show,
140 };
141 
142 BTF_ID_LIST_GLOBAL_SINGLE(bpf_kmem_cache_btf_id, struct, kmem_cache)
143 
144 static const struct bpf_iter_seq_info kmem_cache_iter_seq_info = {
145 	.seq_ops		= &kmem_cache_iter_seq_ops,
146 };
147 
148 static void bpf_iter_kmem_cache_show_fdinfo(const struct bpf_iter_aux_info *aux,
149 					    struct seq_file *seq)
150 {
151 	seq_puts(seq, "kmem_cache iter\n");
152 }
153 
154 DEFINE_BPF_ITER_FUNC(kmem_cache, struct bpf_iter_meta *meta,
155 		     struct kmem_cache *s)
156 
157 static struct bpf_iter_reg bpf_kmem_cache_reg_info = {
158 	.target			= "kmem_cache",
159 	.feature		= BPF_ITER_RESCHED,
160 	.show_fdinfo		= bpf_iter_kmem_cache_show_fdinfo,
161 	.ctx_arg_info_size	= 1,
162 	.ctx_arg_info		= {
163 		{ offsetof(struct bpf_iter__kmem_cache, s),
164 		  PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
165 	},
166 	.seq_info		= &kmem_cache_iter_seq_info,
167 };
168 
169 static int __init bpf_kmem_cache_iter_init(void)
170 {
171 	bpf_kmem_cache_reg_info.ctx_arg_info[0].btf_id = bpf_kmem_cache_btf_id[0];
172 	return bpf_iter_reg_target(&bpf_kmem_cache_reg_info);
173 }
174 
175 late_initcall(bpf_kmem_cache_iter_init);
176