xref: /linux/kernel/bpf/task_iter.c (revision 9406b485dea5e25bed7c81cd822747d494cc8bde)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2020 Facebook */
3 
4 #include <linux/init.h>
5 #include <linux/namei.h>
6 #include <linux/pid_namespace.h>
7 #include <linux/fs.h>
8 #include <linux/fdtable.h>
9 #include <linux/filter.h>
10 
11 struct bpf_iter_seq_task_common {
12 	struct pid_namespace *ns;
13 };
14 
15 struct bpf_iter_seq_task_info {
16 	/* The first field must be struct bpf_iter_seq_task_common.
17 	 * this is assumed by {init, fini}_seq_pidns() callback functions.
18 	 */
19 	struct bpf_iter_seq_task_common common;
20 	u32 tid;
21 };
22 
23 static struct task_struct *task_seq_get_next(struct pid_namespace *ns,
24 					     u32 *tid)
25 {
26 	struct task_struct *task = NULL;
27 	struct pid *pid;
28 
29 	rcu_read_lock();
30 	pid = idr_get_next(&ns->idr, tid);
31 	if (pid)
32 		task = get_pid_task(pid, PIDTYPE_PID);
33 	rcu_read_unlock();
34 
35 	return task;
36 }
37 
38 static void *task_seq_start(struct seq_file *seq, loff_t *pos)
39 {
40 	struct bpf_iter_seq_task_info *info = seq->private;
41 	struct task_struct *task;
42 
43 	task = task_seq_get_next(info->common.ns, &info->tid);
44 	if (!task)
45 		return NULL;
46 
47 	++*pos;
48 	return task;
49 }
50 
51 static void *task_seq_next(struct seq_file *seq, void *v, loff_t *pos)
52 {
53 	struct bpf_iter_seq_task_info *info = seq->private;
54 	struct task_struct *task;
55 
56 	++*pos;
57 	++info->tid;
58 	put_task_struct((struct task_struct *)v);
59 	task = task_seq_get_next(info->common.ns, &info->tid);
60 	if (!task)
61 		return NULL;
62 
63 	return task;
64 }
65 
66 struct bpf_iter__task {
67 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
68 	__bpf_md_ptr(struct task_struct *, task);
69 };
70 
71 DEFINE_BPF_ITER_FUNC(task, struct bpf_iter_meta *meta, struct task_struct *task)
72 
73 static int __task_seq_show(struct seq_file *seq, struct task_struct *task,
74 			   bool in_stop)
75 {
76 	struct bpf_iter_meta meta;
77 	struct bpf_iter__task ctx;
78 	struct bpf_prog *prog;
79 
80 	meta.seq = seq;
81 	prog = bpf_iter_get_info(&meta, in_stop);
82 	if (!prog)
83 		return 0;
84 
85 	meta.seq = seq;
86 	ctx.meta = &meta;
87 	ctx.task = task;
88 	return bpf_iter_run_prog(prog, &ctx);
89 }
90 
91 static int task_seq_show(struct seq_file *seq, void *v)
92 {
93 	return __task_seq_show(seq, v, false);
94 }
95 
96 static void task_seq_stop(struct seq_file *seq, void *v)
97 {
98 	if (!v)
99 		(void)__task_seq_show(seq, v, true);
100 	else
101 		put_task_struct((struct task_struct *)v);
102 }
103 
104 static const struct seq_operations task_seq_ops = {
105 	.start	= task_seq_start,
106 	.next	= task_seq_next,
107 	.stop	= task_seq_stop,
108 	.show	= task_seq_show,
109 };
110 
111 struct bpf_iter_seq_task_file_info {
112 	/* The first field must be struct bpf_iter_seq_task_common.
113 	 * this is assumed by {init, fini}_seq_pidns() callback functions.
114 	 */
115 	struct bpf_iter_seq_task_common common;
116 	struct task_struct *task;
117 	struct files_struct *files;
118 	u32 tid;
119 	u32 fd;
120 };
121 
122 static struct file *
123 task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info,
124 		       struct task_struct **task, struct files_struct **fstruct)
125 {
126 	struct pid_namespace *ns = info->common.ns;
127 	u32 curr_tid = info->tid, max_fds;
128 	struct files_struct *curr_files;
129 	struct task_struct *curr_task;
130 	int curr_fd = info->fd;
131 
132 	/* If this function returns a non-NULL file object,
133 	 * it held a reference to the task/files_struct/file.
134 	 * Otherwise, it does not hold any reference.
135 	 */
136 again:
137 	if (*task) {
138 		curr_task = *task;
139 		curr_files = *fstruct;
140 		curr_fd = info->fd;
141 	} else {
142 		curr_task = task_seq_get_next(ns, &curr_tid);
143 		if (!curr_task)
144 			return NULL;
145 
146 		curr_files = get_files_struct(curr_task);
147 		if (!curr_files) {
148 			put_task_struct(curr_task);
149 			curr_tid = ++(info->tid);
150 			info->fd = 0;
151 			goto again;
152 		}
153 
154 		/* set *fstruct, *task and info->tid */
155 		*fstruct = curr_files;
156 		*task = curr_task;
157 		if (curr_tid == info->tid) {
158 			curr_fd = info->fd;
159 		} else {
160 			info->tid = curr_tid;
161 			curr_fd = 0;
162 		}
163 	}
164 
165 	rcu_read_lock();
166 	max_fds = files_fdtable(curr_files)->max_fds;
167 	for (; curr_fd < max_fds; curr_fd++) {
168 		struct file *f;
169 
170 		f = fcheck_files(curr_files, curr_fd);
171 		if (!f)
172 			continue;
173 
174 		/* set info->fd */
175 		info->fd = curr_fd;
176 		get_file(f);
177 		rcu_read_unlock();
178 		return f;
179 	}
180 
181 	/* the current task is done, go to the next task */
182 	rcu_read_unlock();
183 	put_files_struct(curr_files);
184 	put_task_struct(curr_task);
185 	*task = NULL;
186 	*fstruct = NULL;
187 	info->fd = 0;
188 	curr_tid = ++(info->tid);
189 	goto again;
190 }
191 
192 static void *task_file_seq_start(struct seq_file *seq, loff_t *pos)
193 {
194 	struct bpf_iter_seq_task_file_info *info = seq->private;
195 	struct files_struct *files = NULL;
196 	struct task_struct *task = NULL;
197 	struct file *file;
198 
199 	file = task_file_seq_get_next(info, &task, &files);
200 	if (!file) {
201 		info->files = NULL;
202 		info->task = NULL;
203 		return NULL;
204 	}
205 
206 	++*pos;
207 	info->task = task;
208 	info->files = files;
209 
210 	return file;
211 }
212 
213 static void *task_file_seq_next(struct seq_file *seq, void *v, loff_t *pos)
214 {
215 	struct bpf_iter_seq_task_file_info *info = seq->private;
216 	struct files_struct *files = info->files;
217 	struct task_struct *task = info->task;
218 	struct file *file;
219 
220 	++*pos;
221 	++info->fd;
222 	fput((struct file *)v);
223 	file = task_file_seq_get_next(info, &task, &files);
224 	if (!file) {
225 		info->files = NULL;
226 		info->task = NULL;
227 		return NULL;
228 	}
229 
230 	info->task = task;
231 	info->files = files;
232 
233 	return file;
234 }
235 
236 struct bpf_iter__task_file {
237 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
238 	__bpf_md_ptr(struct task_struct *, task);
239 	u32 fd __aligned(8);
240 	__bpf_md_ptr(struct file *, file);
241 };
242 
243 DEFINE_BPF_ITER_FUNC(task_file, struct bpf_iter_meta *meta,
244 		     struct task_struct *task, u32 fd,
245 		     struct file *file)
246 
247 static int __task_file_seq_show(struct seq_file *seq, struct file *file,
248 				bool in_stop)
249 {
250 	struct bpf_iter_seq_task_file_info *info = seq->private;
251 	struct bpf_iter__task_file ctx;
252 	struct bpf_iter_meta meta;
253 	struct bpf_prog *prog;
254 
255 	meta.seq = seq;
256 	prog = bpf_iter_get_info(&meta, in_stop);
257 	if (!prog)
258 		return 0;
259 
260 	ctx.meta = &meta;
261 	ctx.task = info->task;
262 	ctx.fd = info->fd;
263 	ctx.file = file;
264 	return bpf_iter_run_prog(prog, &ctx);
265 }
266 
267 static int task_file_seq_show(struct seq_file *seq, void *v)
268 {
269 	return __task_file_seq_show(seq, v, false);
270 }
271 
272 static void task_file_seq_stop(struct seq_file *seq, void *v)
273 {
274 	struct bpf_iter_seq_task_file_info *info = seq->private;
275 
276 	if (!v) {
277 		(void)__task_file_seq_show(seq, v, true);
278 	} else {
279 		fput((struct file *)v);
280 		put_files_struct(info->files);
281 		put_task_struct(info->task);
282 		info->files = NULL;
283 		info->task = NULL;
284 	}
285 }
286 
287 static int init_seq_pidns(void *priv_data)
288 {
289 	struct bpf_iter_seq_task_common *common = priv_data;
290 
291 	common->ns = get_pid_ns(task_active_pid_ns(current));
292 	return 0;
293 }
294 
295 static void fini_seq_pidns(void *priv_data)
296 {
297 	struct bpf_iter_seq_task_common *common = priv_data;
298 
299 	put_pid_ns(common->ns);
300 }
301 
302 static const struct seq_operations task_file_seq_ops = {
303 	.start	= task_file_seq_start,
304 	.next	= task_file_seq_next,
305 	.stop	= task_file_seq_stop,
306 	.show	= task_file_seq_show,
307 };
308 
309 static int __init task_iter_init(void)
310 {
311 	struct bpf_iter_reg task_file_reg_info = {
312 		.target			= "task_file",
313 		.seq_ops		= &task_file_seq_ops,
314 		.init_seq_private	= init_seq_pidns,
315 		.fini_seq_private	= fini_seq_pidns,
316 		.seq_priv_size		= sizeof(struct bpf_iter_seq_task_file_info),
317 	};
318 	struct bpf_iter_reg task_reg_info = {
319 		.target			= "task",
320 		.seq_ops		= &task_seq_ops,
321 		.init_seq_private	= init_seq_pidns,
322 		.fini_seq_private	= fini_seq_pidns,
323 		.seq_priv_size		= sizeof(struct bpf_iter_seq_task_info),
324 	};
325 	int ret;
326 
327 	ret = bpf_iter_reg_target(&task_reg_info);
328 	if (ret)
329 		return ret;
330 
331 	return bpf_iter_reg_target(&task_file_reg_info);
332 }
333 late_initcall(task_iter_init);
334