xref: /linux/kernel/bpf/cgroup.c (revision f8da5dac7e5eebb8da93829eeb988cc37410c9e6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Functions to manage eBPF programs attached to cgroups
4  *
5  * Copyright (c) 2016 Daniel Mack
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
17 #include <net/sock.h>
18 
19 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
20 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
21 
22 void cgroup_bpf_offline(struct cgroup *cgrp)
23 {
24 	cgroup_get(cgrp);
25 	percpu_ref_kill(&cgrp->bpf.refcnt);
26 }
27 
28 /**
29  * cgroup_bpf_release() - put references of all bpf programs and
30  *                        release all cgroup bpf data
31  * @work: work structure embedded into the cgroup to modify
32  */
33 static void cgroup_bpf_release(struct work_struct *work)
34 {
35 	struct cgroup *cgrp = container_of(work, struct cgroup,
36 					   bpf.release_work);
37 	enum bpf_cgroup_storage_type stype;
38 	struct bpf_prog_array *old_array;
39 	unsigned int type;
40 
41 	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
42 		struct list_head *progs = &cgrp->bpf.progs[type];
43 		struct bpf_prog_list *pl, *tmp;
44 
45 		list_for_each_entry_safe(pl, tmp, progs, node) {
46 			list_del(&pl->node);
47 			bpf_prog_put(pl->prog);
48 			for_each_cgroup_storage_type(stype) {
49 				bpf_cgroup_storage_unlink(pl->storage[stype]);
50 				bpf_cgroup_storage_free(pl->storage[stype]);
51 			}
52 			kfree(pl);
53 			static_branch_dec(&cgroup_bpf_enabled_key);
54 		}
55 		old_array = rcu_dereference_protected(
56 				cgrp->bpf.effective[type],
57 				percpu_ref_is_dying(&cgrp->bpf.refcnt));
58 		bpf_prog_array_free(old_array);
59 	}
60 
61 	percpu_ref_exit(&cgrp->bpf.refcnt);
62 	cgroup_put(cgrp);
63 }
64 
65 /**
66  * cgroup_bpf_release_fn() - callback used to schedule releasing
67  *                           of bpf cgroup data
68  * @ref: percpu ref counter structure
69  */
70 static void cgroup_bpf_release_fn(struct percpu_ref *ref)
71 {
72 	struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
73 
74 	INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
75 	queue_work(system_wq, &cgrp->bpf.release_work);
76 }
77 
78 /* count number of elements in the list.
79  * it's slow but the list cannot be long
80  */
81 static u32 prog_list_length(struct list_head *head)
82 {
83 	struct bpf_prog_list *pl;
84 	u32 cnt = 0;
85 
86 	list_for_each_entry(pl, head, node) {
87 		if (!pl->prog)
88 			continue;
89 		cnt++;
90 	}
91 	return cnt;
92 }
93 
94 /* if parent has non-overridable prog attached,
95  * disallow attaching new programs to the descendent cgroup.
96  * if parent has overridable or multi-prog, allow attaching
97  */
98 static bool hierarchy_allows_attach(struct cgroup *cgrp,
99 				    enum bpf_attach_type type,
100 				    u32 new_flags)
101 {
102 	struct cgroup *p;
103 
104 	p = cgroup_parent(cgrp);
105 	if (!p)
106 		return true;
107 	do {
108 		u32 flags = p->bpf.flags[type];
109 		u32 cnt;
110 
111 		if (flags & BPF_F_ALLOW_MULTI)
112 			return true;
113 		cnt = prog_list_length(&p->bpf.progs[type]);
114 		WARN_ON_ONCE(cnt > 1);
115 		if (cnt == 1)
116 			return !!(flags & BPF_F_ALLOW_OVERRIDE);
117 		p = cgroup_parent(p);
118 	} while (p);
119 	return true;
120 }
121 
122 /* compute a chain of effective programs for a given cgroup:
123  * start from the list of programs in this cgroup and add
124  * all parent programs.
125  * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
126  * to programs in this cgroup
127  */
128 static int compute_effective_progs(struct cgroup *cgrp,
129 				   enum bpf_attach_type type,
130 				   struct bpf_prog_array **array)
131 {
132 	enum bpf_cgroup_storage_type stype;
133 	struct bpf_prog_array *progs;
134 	struct bpf_prog_list *pl;
135 	struct cgroup *p = cgrp;
136 	int cnt = 0;
137 
138 	/* count number of effective programs by walking parents */
139 	do {
140 		if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
141 			cnt += prog_list_length(&p->bpf.progs[type]);
142 		p = cgroup_parent(p);
143 	} while (p);
144 
145 	progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
146 	if (!progs)
147 		return -ENOMEM;
148 
149 	/* populate the array with effective progs */
150 	cnt = 0;
151 	p = cgrp;
152 	do {
153 		if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
154 			continue;
155 
156 		list_for_each_entry(pl, &p->bpf.progs[type], node) {
157 			if (!pl->prog)
158 				continue;
159 
160 			progs->items[cnt].prog = pl->prog;
161 			for_each_cgroup_storage_type(stype)
162 				progs->items[cnt].cgroup_storage[stype] =
163 					pl->storage[stype];
164 			cnt++;
165 		}
166 	} while ((p = cgroup_parent(p)));
167 
168 	*array = progs;
169 	return 0;
170 }
171 
172 static void activate_effective_progs(struct cgroup *cgrp,
173 				     enum bpf_attach_type type,
174 				     struct bpf_prog_array *old_array)
175 {
176 	rcu_swap_protected(cgrp->bpf.effective[type], old_array,
177 			   lockdep_is_held(&cgroup_mutex));
178 	/* free prog array after grace period, since __cgroup_bpf_run_*()
179 	 * might be still walking the array
180 	 */
181 	bpf_prog_array_free(old_array);
182 }
183 
184 /**
185  * cgroup_bpf_inherit() - inherit effective programs from parent
186  * @cgrp: the cgroup to modify
187  */
188 int cgroup_bpf_inherit(struct cgroup *cgrp)
189 {
190 /* has to use marco instead of const int, since compiler thinks
191  * that array below is variable length
192  */
193 #define	NR ARRAY_SIZE(cgrp->bpf.effective)
194 	struct bpf_prog_array *arrays[NR] = {};
195 	int ret, i;
196 
197 	ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
198 			      GFP_KERNEL);
199 	if (ret)
200 		return ret;
201 
202 	for (i = 0; i < NR; i++)
203 		INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
204 
205 	for (i = 0; i < NR; i++)
206 		if (compute_effective_progs(cgrp, i, &arrays[i]))
207 			goto cleanup;
208 
209 	for (i = 0; i < NR; i++)
210 		activate_effective_progs(cgrp, i, arrays[i]);
211 
212 	return 0;
213 cleanup:
214 	for (i = 0; i < NR; i++)
215 		bpf_prog_array_free(arrays[i]);
216 
217 	percpu_ref_exit(&cgrp->bpf.refcnt);
218 
219 	return -ENOMEM;
220 }
221 
222 static int update_effective_progs(struct cgroup *cgrp,
223 				  enum bpf_attach_type type)
224 {
225 	struct cgroup_subsys_state *css;
226 	int err;
227 
228 	/* allocate and recompute effective prog arrays */
229 	css_for_each_descendant_pre(css, &cgrp->self) {
230 		struct cgroup *desc = container_of(css, struct cgroup, self);
231 
232 		err = compute_effective_progs(desc, type, &desc->bpf.inactive);
233 		if (err)
234 			goto cleanup;
235 	}
236 
237 	/* all allocations were successful. Activate all prog arrays */
238 	css_for_each_descendant_pre(css, &cgrp->self) {
239 		struct cgroup *desc = container_of(css, struct cgroup, self);
240 
241 		activate_effective_progs(desc, type, desc->bpf.inactive);
242 		desc->bpf.inactive = NULL;
243 	}
244 
245 	return 0;
246 
247 cleanup:
248 	/* oom while computing effective. Free all computed effective arrays
249 	 * since they were not activated
250 	 */
251 	css_for_each_descendant_pre(css, &cgrp->self) {
252 		struct cgroup *desc = container_of(css, struct cgroup, self);
253 
254 		bpf_prog_array_free(desc->bpf.inactive);
255 		desc->bpf.inactive = NULL;
256 	}
257 
258 	return err;
259 }
260 
261 #define BPF_CGROUP_MAX_PROGS 64
262 
263 /**
264  * __cgroup_bpf_attach() - Attach the program to a cgroup, and
265  *                         propagate the change to descendants
266  * @cgrp: The cgroup which descendants to traverse
267  * @prog: A program to attach
268  * @type: Type of attach operation
269  * @flags: Option flags
270  *
271  * Must be called with cgroup_mutex held.
272  */
273 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
274 			enum bpf_attach_type type, u32 flags)
275 {
276 	struct list_head *progs = &cgrp->bpf.progs[type];
277 	struct bpf_prog *old_prog = NULL;
278 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
279 		*old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
280 	enum bpf_cgroup_storage_type stype;
281 	struct bpf_prog_list *pl;
282 	bool pl_was_allocated;
283 	int err;
284 
285 	if ((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI))
286 		/* invalid combination */
287 		return -EINVAL;
288 
289 	if (!hierarchy_allows_attach(cgrp, type, flags))
290 		return -EPERM;
291 
292 	if (!list_empty(progs) && cgrp->bpf.flags[type] != flags)
293 		/* Disallow attaching non-overridable on top
294 		 * of existing overridable in this cgroup.
295 		 * Disallow attaching multi-prog if overridable or none
296 		 */
297 		return -EPERM;
298 
299 	if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
300 		return -E2BIG;
301 
302 	for_each_cgroup_storage_type(stype) {
303 		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
304 		if (IS_ERR(storage[stype])) {
305 			storage[stype] = NULL;
306 			for_each_cgroup_storage_type(stype)
307 				bpf_cgroup_storage_free(storage[stype]);
308 			return -ENOMEM;
309 		}
310 	}
311 
312 	if (flags & BPF_F_ALLOW_MULTI) {
313 		list_for_each_entry(pl, progs, node) {
314 			if (pl->prog == prog) {
315 				/* disallow attaching the same prog twice */
316 				for_each_cgroup_storage_type(stype)
317 					bpf_cgroup_storage_free(storage[stype]);
318 				return -EINVAL;
319 			}
320 		}
321 
322 		pl = kmalloc(sizeof(*pl), GFP_KERNEL);
323 		if (!pl) {
324 			for_each_cgroup_storage_type(stype)
325 				bpf_cgroup_storage_free(storage[stype]);
326 			return -ENOMEM;
327 		}
328 
329 		pl_was_allocated = true;
330 		pl->prog = prog;
331 		for_each_cgroup_storage_type(stype)
332 			pl->storage[stype] = storage[stype];
333 		list_add_tail(&pl->node, progs);
334 	} else {
335 		if (list_empty(progs)) {
336 			pl = kmalloc(sizeof(*pl), GFP_KERNEL);
337 			if (!pl) {
338 				for_each_cgroup_storage_type(stype)
339 					bpf_cgroup_storage_free(storage[stype]);
340 				return -ENOMEM;
341 			}
342 			pl_was_allocated = true;
343 			list_add_tail(&pl->node, progs);
344 		} else {
345 			pl = list_first_entry(progs, typeof(*pl), node);
346 			old_prog = pl->prog;
347 			for_each_cgroup_storage_type(stype) {
348 				old_storage[stype] = pl->storage[stype];
349 				bpf_cgroup_storage_unlink(old_storage[stype]);
350 			}
351 			pl_was_allocated = false;
352 		}
353 		pl->prog = prog;
354 		for_each_cgroup_storage_type(stype)
355 			pl->storage[stype] = storage[stype];
356 	}
357 
358 	cgrp->bpf.flags[type] = flags;
359 
360 	err = update_effective_progs(cgrp, type);
361 	if (err)
362 		goto cleanup;
363 
364 	static_branch_inc(&cgroup_bpf_enabled_key);
365 	for_each_cgroup_storage_type(stype) {
366 		if (!old_storage[stype])
367 			continue;
368 		bpf_cgroup_storage_free(old_storage[stype]);
369 	}
370 	if (old_prog) {
371 		bpf_prog_put(old_prog);
372 		static_branch_dec(&cgroup_bpf_enabled_key);
373 	}
374 	for_each_cgroup_storage_type(stype)
375 		bpf_cgroup_storage_link(storage[stype], cgrp, type);
376 	return 0;
377 
378 cleanup:
379 	/* and cleanup the prog list */
380 	pl->prog = old_prog;
381 	for_each_cgroup_storage_type(stype) {
382 		bpf_cgroup_storage_free(pl->storage[stype]);
383 		pl->storage[stype] = old_storage[stype];
384 		bpf_cgroup_storage_link(old_storage[stype], cgrp, type);
385 	}
386 	if (pl_was_allocated) {
387 		list_del(&pl->node);
388 		kfree(pl);
389 	}
390 	return err;
391 }
392 
393 /**
394  * __cgroup_bpf_detach() - Detach the program from a cgroup, and
395  *                         propagate the change to descendants
396  * @cgrp: The cgroup which descendants to traverse
397  * @prog: A program to detach or NULL
398  * @type: Type of detach operation
399  *
400  * Must be called with cgroup_mutex held.
401  */
402 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
403 			enum bpf_attach_type type)
404 {
405 	struct list_head *progs = &cgrp->bpf.progs[type];
406 	enum bpf_cgroup_storage_type stype;
407 	u32 flags = cgrp->bpf.flags[type];
408 	struct bpf_prog *old_prog = NULL;
409 	struct bpf_prog_list *pl;
410 	int err;
411 
412 	if (flags & BPF_F_ALLOW_MULTI) {
413 		if (!prog)
414 			/* to detach MULTI prog the user has to specify valid FD
415 			 * of the program to be detached
416 			 */
417 			return -EINVAL;
418 	} else {
419 		if (list_empty(progs))
420 			/* report error when trying to detach and nothing is attached */
421 			return -ENOENT;
422 	}
423 
424 	if (flags & BPF_F_ALLOW_MULTI) {
425 		/* find the prog and detach it */
426 		list_for_each_entry(pl, progs, node) {
427 			if (pl->prog != prog)
428 				continue;
429 			old_prog = prog;
430 			/* mark it deleted, so it's ignored while
431 			 * recomputing effective
432 			 */
433 			pl->prog = NULL;
434 			break;
435 		}
436 		if (!old_prog)
437 			return -ENOENT;
438 	} else {
439 		/* to maintain backward compatibility NONE and OVERRIDE cgroups
440 		 * allow detaching with invalid FD (prog==NULL)
441 		 */
442 		pl = list_first_entry(progs, typeof(*pl), node);
443 		old_prog = pl->prog;
444 		pl->prog = NULL;
445 	}
446 
447 	err = update_effective_progs(cgrp, type);
448 	if (err)
449 		goto cleanup;
450 
451 	/* now can actually delete it from this cgroup list */
452 	list_del(&pl->node);
453 	for_each_cgroup_storage_type(stype) {
454 		bpf_cgroup_storage_unlink(pl->storage[stype]);
455 		bpf_cgroup_storage_free(pl->storage[stype]);
456 	}
457 	kfree(pl);
458 	if (list_empty(progs))
459 		/* last program was detached, reset flags to zero */
460 		cgrp->bpf.flags[type] = 0;
461 
462 	bpf_prog_put(old_prog);
463 	static_branch_dec(&cgroup_bpf_enabled_key);
464 	return 0;
465 
466 cleanup:
467 	/* and restore back old_prog */
468 	pl->prog = old_prog;
469 	return err;
470 }
471 
472 /* Must be called with cgroup_mutex held to avoid races. */
473 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
474 		       union bpf_attr __user *uattr)
475 {
476 	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
477 	enum bpf_attach_type type = attr->query.attach_type;
478 	struct list_head *progs = &cgrp->bpf.progs[type];
479 	u32 flags = cgrp->bpf.flags[type];
480 	struct bpf_prog_array *effective;
481 	int cnt, ret = 0, i;
482 
483 	effective = rcu_dereference_protected(cgrp->bpf.effective[type],
484 					      lockdep_is_held(&cgroup_mutex));
485 
486 	if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
487 		cnt = bpf_prog_array_length(effective);
488 	else
489 		cnt = prog_list_length(progs);
490 
491 	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
492 		return -EFAULT;
493 	if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
494 		return -EFAULT;
495 	if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
496 		/* return early if user requested only program count + flags */
497 		return 0;
498 	if (attr->query.prog_cnt < cnt) {
499 		cnt = attr->query.prog_cnt;
500 		ret = -ENOSPC;
501 	}
502 
503 	if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
504 		return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
505 	} else {
506 		struct bpf_prog_list *pl;
507 		u32 id;
508 
509 		i = 0;
510 		list_for_each_entry(pl, progs, node) {
511 			id = pl->prog->aux->id;
512 			if (copy_to_user(prog_ids + i, &id, sizeof(id)))
513 				return -EFAULT;
514 			if (++i == cnt)
515 				break;
516 		}
517 	}
518 	return ret;
519 }
520 
521 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
522 			   enum bpf_prog_type ptype, struct bpf_prog *prog)
523 {
524 	struct cgroup *cgrp;
525 	int ret;
526 
527 	cgrp = cgroup_get_from_fd(attr->target_fd);
528 	if (IS_ERR(cgrp))
529 		return PTR_ERR(cgrp);
530 
531 	ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
532 				attr->attach_flags);
533 	cgroup_put(cgrp);
534 	return ret;
535 }
536 
537 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
538 {
539 	struct bpf_prog *prog;
540 	struct cgroup *cgrp;
541 	int ret;
542 
543 	cgrp = cgroup_get_from_fd(attr->target_fd);
544 	if (IS_ERR(cgrp))
545 		return PTR_ERR(cgrp);
546 
547 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
548 	if (IS_ERR(prog))
549 		prog = NULL;
550 
551 	ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
552 	if (prog)
553 		bpf_prog_put(prog);
554 
555 	cgroup_put(cgrp);
556 	return ret;
557 }
558 
559 int cgroup_bpf_prog_query(const union bpf_attr *attr,
560 			  union bpf_attr __user *uattr)
561 {
562 	struct cgroup *cgrp;
563 	int ret;
564 
565 	cgrp = cgroup_get_from_fd(attr->query.target_fd);
566 	if (IS_ERR(cgrp))
567 		return PTR_ERR(cgrp);
568 
569 	ret = cgroup_bpf_query(cgrp, attr, uattr);
570 
571 	cgroup_put(cgrp);
572 	return ret;
573 }
574 
575 /**
576  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
577  * @sk: The socket sending or receiving traffic
578  * @skb: The skb that is being sent or received
579  * @type: The type of program to be exectuted
580  *
581  * If no socket is passed, or the socket is not of type INET or INET6,
582  * this function does nothing and returns 0.
583  *
584  * The program type passed in via @type must be suitable for network
585  * filtering. No further check is performed to assert that.
586  *
587  * For egress packets, this function can return:
588  *   NET_XMIT_SUCCESS    (0)	- continue with packet output
589  *   NET_XMIT_DROP       (1)	- drop packet and notify TCP to call cwr
590  *   NET_XMIT_CN         (2)	- continue with packet output and notify TCP
591  *				  to call cwr
592  *   -EPERM			- drop packet
593  *
594  * For ingress packets, this function will return -EPERM if any
595  * attached program was found and if it returned != 1 during execution.
596  * Otherwise 0 is returned.
597  */
598 int __cgroup_bpf_run_filter_skb(struct sock *sk,
599 				struct sk_buff *skb,
600 				enum bpf_attach_type type)
601 {
602 	unsigned int offset = skb->data - skb_network_header(skb);
603 	struct sock *save_sk;
604 	void *saved_data_end;
605 	struct cgroup *cgrp;
606 	int ret;
607 
608 	if (!sk || !sk_fullsock(sk))
609 		return 0;
610 
611 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
612 		return 0;
613 
614 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
615 	save_sk = skb->sk;
616 	skb->sk = sk;
617 	__skb_push(skb, offset);
618 
619 	/* compute pointers for the bpf prog */
620 	bpf_compute_and_save_data_end(skb, &saved_data_end);
621 
622 	if (type == BPF_CGROUP_INET_EGRESS) {
623 		ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
624 			cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb);
625 	} else {
626 		ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
627 					  __bpf_prog_run_save_cb);
628 		ret = (ret == 1 ? 0 : -EPERM);
629 	}
630 	bpf_restore_data_end(skb, saved_data_end);
631 	__skb_pull(skb, offset);
632 	skb->sk = save_sk;
633 
634 	return ret;
635 }
636 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
637 
638 /**
639  * __cgroup_bpf_run_filter_sk() - Run a program on a sock
640  * @sk: sock structure to manipulate
641  * @type: The type of program to be exectuted
642  *
643  * socket is passed is expected to be of type INET or INET6.
644  *
645  * The program type passed in via @type must be suitable for sock
646  * filtering. No further check is performed to assert that.
647  *
648  * This function will return %-EPERM if any if an attached program was found
649  * and if it returned != 1 during execution. In all other cases, 0 is returned.
650  */
651 int __cgroup_bpf_run_filter_sk(struct sock *sk,
652 			       enum bpf_attach_type type)
653 {
654 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
655 	int ret;
656 
657 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
658 	return ret == 1 ? 0 : -EPERM;
659 }
660 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
661 
662 /**
663  * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
664  *                                       provided by user sockaddr
665  * @sk: sock struct that will use sockaddr
666  * @uaddr: sockaddr struct provided by user
667  * @type: The type of program to be exectuted
668  * @t_ctx: Pointer to attach type specific context
669  *
670  * socket is expected to be of type INET or INET6.
671  *
672  * This function will return %-EPERM if an attached program is found and
673  * returned value != 1 during execution. In all other cases, 0 is returned.
674  */
675 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
676 				      struct sockaddr *uaddr,
677 				      enum bpf_attach_type type,
678 				      void *t_ctx)
679 {
680 	struct bpf_sock_addr_kern ctx = {
681 		.sk = sk,
682 		.uaddr = uaddr,
683 		.t_ctx = t_ctx,
684 	};
685 	struct sockaddr_storage unspec;
686 	struct cgroup *cgrp;
687 	int ret;
688 
689 	/* Check socket family since not all sockets represent network
690 	 * endpoint (e.g. AF_UNIX).
691 	 */
692 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
693 		return 0;
694 
695 	if (!ctx.uaddr) {
696 		memset(&unspec, 0, sizeof(unspec));
697 		ctx.uaddr = (struct sockaddr *)&unspec;
698 	}
699 
700 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
701 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
702 
703 	return ret == 1 ? 0 : -EPERM;
704 }
705 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
706 
707 /**
708  * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
709  * @sk: socket to get cgroup from
710  * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
711  * sk with connection information (IP addresses, etc.) May not contain
712  * cgroup info if it is a req sock.
713  * @type: The type of program to be exectuted
714  *
715  * socket passed is expected to be of type INET or INET6.
716  *
717  * The program type passed in via @type must be suitable for sock_ops
718  * filtering. No further check is performed to assert that.
719  *
720  * This function will return %-EPERM if any if an attached program was found
721  * and if it returned != 1 during execution. In all other cases, 0 is returned.
722  */
723 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
724 				     struct bpf_sock_ops_kern *sock_ops,
725 				     enum bpf_attach_type type)
726 {
727 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
728 	int ret;
729 
730 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
731 				 BPF_PROG_RUN);
732 	return ret == 1 ? 0 : -EPERM;
733 }
734 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
735 
736 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
737 				      short access, enum bpf_attach_type type)
738 {
739 	struct cgroup *cgrp;
740 	struct bpf_cgroup_dev_ctx ctx = {
741 		.access_type = (access << 16) | dev_type,
742 		.major = major,
743 		.minor = minor,
744 	};
745 	int allow = 1;
746 
747 	rcu_read_lock();
748 	cgrp = task_dfl_cgroup(current);
749 	allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
750 				   BPF_PROG_RUN);
751 	rcu_read_unlock();
752 
753 	return !allow;
754 }
755 EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
756 
757 static const struct bpf_func_proto *
758 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
759 {
760 	switch (func_id) {
761 	case BPF_FUNC_map_lookup_elem:
762 		return &bpf_map_lookup_elem_proto;
763 	case BPF_FUNC_map_update_elem:
764 		return &bpf_map_update_elem_proto;
765 	case BPF_FUNC_map_delete_elem:
766 		return &bpf_map_delete_elem_proto;
767 	case BPF_FUNC_map_push_elem:
768 		return &bpf_map_push_elem_proto;
769 	case BPF_FUNC_map_pop_elem:
770 		return &bpf_map_pop_elem_proto;
771 	case BPF_FUNC_map_peek_elem:
772 		return &bpf_map_peek_elem_proto;
773 	case BPF_FUNC_get_current_uid_gid:
774 		return &bpf_get_current_uid_gid_proto;
775 	case BPF_FUNC_get_local_storage:
776 		return &bpf_get_local_storage_proto;
777 	case BPF_FUNC_get_current_cgroup_id:
778 		return &bpf_get_current_cgroup_id_proto;
779 	case BPF_FUNC_trace_printk:
780 		if (capable(CAP_SYS_ADMIN))
781 			return bpf_get_trace_printk_proto();
782 		/* fall through */
783 	default:
784 		return NULL;
785 	}
786 }
787 
788 static const struct bpf_func_proto *
789 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
790 {
791 	return cgroup_base_func_proto(func_id, prog);
792 }
793 
794 static bool cgroup_dev_is_valid_access(int off, int size,
795 				       enum bpf_access_type type,
796 				       const struct bpf_prog *prog,
797 				       struct bpf_insn_access_aux *info)
798 {
799 	const int size_default = sizeof(__u32);
800 
801 	if (type == BPF_WRITE)
802 		return false;
803 
804 	if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
805 		return false;
806 	/* The verifier guarantees that size > 0. */
807 	if (off % size != 0)
808 		return false;
809 
810 	switch (off) {
811 	case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
812 		bpf_ctx_record_field_size(info, size_default);
813 		if (!bpf_ctx_narrow_access_ok(off, size, size_default))
814 			return false;
815 		break;
816 	default:
817 		if (size != size_default)
818 			return false;
819 	}
820 
821 	return true;
822 }
823 
824 const struct bpf_prog_ops cg_dev_prog_ops = {
825 };
826 
827 const struct bpf_verifier_ops cg_dev_verifier_ops = {
828 	.get_func_proto		= cgroup_dev_func_proto,
829 	.is_valid_access	= cgroup_dev_is_valid_access,
830 };
831 
832 /**
833  * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
834  *
835  * @head: sysctl table header
836  * @table: sysctl table
837  * @write: sysctl is being read (= 0) or written (= 1)
838  * @buf: pointer to buffer passed by user space
839  * @pcount: value-result argument: value is size of buffer pointed to by @buf,
840  *	result is size of @new_buf if program set new value, initial value
841  *	otherwise
842  * @ppos: value-result argument: value is position at which read from or write
843  *	to sysctl is happening, result is new position if program overrode it,
844  *	initial value otherwise
845  * @new_buf: pointer to pointer to new buffer that will be allocated if program
846  *	overrides new value provided by user space on sysctl write
847  *	NOTE: it's caller responsibility to free *new_buf if it was set
848  * @type: type of program to be executed
849  *
850  * Program is run when sysctl is being accessed, either read or written, and
851  * can allow or deny such access.
852  *
853  * This function will return %-EPERM if an attached program is found and
854  * returned value != 1 during execution. In all other cases 0 is returned.
855  */
856 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
857 				   struct ctl_table *table, int write,
858 				   void __user *buf, size_t *pcount,
859 				   loff_t *ppos, void **new_buf,
860 				   enum bpf_attach_type type)
861 {
862 	struct bpf_sysctl_kern ctx = {
863 		.head = head,
864 		.table = table,
865 		.write = write,
866 		.ppos = ppos,
867 		.cur_val = NULL,
868 		.cur_len = PAGE_SIZE,
869 		.new_val = NULL,
870 		.new_len = 0,
871 		.new_updated = 0,
872 	};
873 	struct cgroup *cgrp;
874 	int ret;
875 
876 	ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
877 	if (ctx.cur_val) {
878 		mm_segment_t old_fs;
879 		loff_t pos = 0;
880 
881 		old_fs = get_fs();
882 		set_fs(KERNEL_DS);
883 		if (table->proc_handler(table, 0, (void __user *)ctx.cur_val,
884 					&ctx.cur_len, &pos)) {
885 			/* Let BPF program decide how to proceed. */
886 			ctx.cur_len = 0;
887 		}
888 		set_fs(old_fs);
889 	} else {
890 		/* Let BPF program decide how to proceed. */
891 		ctx.cur_len = 0;
892 	}
893 
894 	if (write && buf && *pcount) {
895 		/* BPF program should be able to override new value with a
896 		 * buffer bigger than provided by user.
897 		 */
898 		ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
899 		ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
900 		if (!ctx.new_val ||
901 		    copy_from_user(ctx.new_val, buf, ctx.new_len))
902 			/* Let BPF program decide how to proceed. */
903 			ctx.new_len = 0;
904 	}
905 
906 	rcu_read_lock();
907 	cgrp = task_dfl_cgroup(current);
908 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
909 	rcu_read_unlock();
910 
911 	kfree(ctx.cur_val);
912 
913 	if (ret == 1 && ctx.new_updated) {
914 		*new_buf = ctx.new_val;
915 		*pcount = ctx.new_len;
916 	} else {
917 		kfree(ctx.new_val);
918 	}
919 
920 	return ret == 1 ? 0 : -EPERM;
921 }
922 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sysctl);
923 
924 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
925 			      size_t *lenp)
926 {
927 	ssize_t tmp_ret = 0, ret;
928 
929 	if (dir->header.parent) {
930 		tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
931 		if (tmp_ret < 0)
932 			return tmp_ret;
933 	}
934 
935 	ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
936 	if (ret < 0)
937 		return ret;
938 	*bufp += ret;
939 	*lenp -= ret;
940 	ret += tmp_ret;
941 
942 	/* Avoid leading slash. */
943 	if (!ret)
944 		return ret;
945 
946 	tmp_ret = strscpy(*bufp, "/", *lenp);
947 	if (tmp_ret < 0)
948 		return tmp_ret;
949 	*bufp += tmp_ret;
950 	*lenp -= tmp_ret;
951 
952 	return ret + tmp_ret;
953 }
954 
955 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
956 	   size_t, buf_len, u64, flags)
957 {
958 	ssize_t tmp_ret = 0, ret;
959 
960 	if (!buf)
961 		return -EINVAL;
962 
963 	if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
964 		if (!ctx->head)
965 			return -EINVAL;
966 		tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
967 		if (tmp_ret < 0)
968 			return tmp_ret;
969 	}
970 
971 	ret = strscpy(buf, ctx->table->procname, buf_len);
972 
973 	return ret < 0 ? ret : tmp_ret + ret;
974 }
975 
976 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
977 	.func		= bpf_sysctl_get_name,
978 	.gpl_only	= false,
979 	.ret_type	= RET_INTEGER,
980 	.arg1_type	= ARG_PTR_TO_CTX,
981 	.arg2_type	= ARG_PTR_TO_MEM,
982 	.arg3_type	= ARG_CONST_SIZE,
983 	.arg4_type	= ARG_ANYTHING,
984 };
985 
986 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
987 			     size_t src_len)
988 {
989 	if (!dst)
990 		return -EINVAL;
991 
992 	if (!dst_len)
993 		return -E2BIG;
994 
995 	if (!src || !src_len) {
996 		memset(dst, 0, dst_len);
997 		return -EINVAL;
998 	}
999 
1000 	memcpy(dst, src, min(dst_len, src_len));
1001 
1002 	if (dst_len > src_len) {
1003 		memset(dst + src_len, '\0', dst_len - src_len);
1004 		return src_len;
1005 	}
1006 
1007 	dst[dst_len - 1] = '\0';
1008 
1009 	return -E2BIG;
1010 }
1011 
1012 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
1013 	   char *, buf, size_t, buf_len)
1014 {
1015 	return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
1016 }
1017 
1018 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
1019 	.func		= bpf_sysctl_get_current_value,
1020 	.gpl_only	= false,
1021 	.ret_type	= RET_INTEGER,
1022 	.arg1_type	= ARG_PTR_TO_CTX,
1023 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1024 	.arg3_type	= ARG_CONST_SIZE,
1025 };
1026 
1027 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
1028 	   size_t, buf_len)
1029 {
1030 	if (!ctx->write) {
1031 		if (buf && buf_len)
1032 			memset(buf, '\0', buf_len);
1033 		return -EINVAL;
1034 	}
1035 	return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
1036 }
1037 
1038 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
1039 	.func		= bpf_sysctl_get_new_value,
1040 	.gpl_only	= false,
1041 	.ret_type	= RET_INTEGER,
1042 	.arg1_type	= ARG_PTR_TO_CTX,
1043 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1044 	.arg3_type	= ARG_CONST_SIZE,
1045 };
1046 
1047 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
1048 	   const char *, buf, size_t, buf_len)
1049 {
1050 	if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
1051 		return -EINVAL;
1052 
1053 	if (buf_len > PAGE_SIZE - 1)
1054 		return -E2BIG;
1055 
1056 	memcpy(ctx->new_val, buf, buf_len);
1057 	ctx->new_len = buf_len;
1058 	ctx->new_updated = 1;
1059 
1060 	return 0;
1061 }
1062 
1063 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
1064 	.func		= bpf_sysctl_set_new_value,
1065 	.gpl_only	= false,
1066 	.ret_type	= RET_INTEGER,
1067 	.arg1_type	= ARG_PTR_TO_CTX,
1068 	.arg2_type	= ARG_PTR_TO_MEM,
1069 	.arg3_type	= ARG_CONST_SIZE,
1070 };
1071 
1072 static const struct bpf_func_proto *
1073 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1074 {
1075 	switch (func_id) {
1076 	case BPF_FUNC_strtol:
1077 		return &bpf_strtol_proto;
1078 	case BPF_FUNC_strtoul:
1079 		return &bpf_strtoul_proto;
1080 	case BPF_FUNC_sysctl_get_name:
1081 		return &bpf_sysctl_get_name_proto;
1082 	case BPF_FUNC_sysctl_get_current_value:
1083 		return &bpf_sysctl_get_current_value_proto;
1084 	case BPF_FUNC_sysctl_get_new_value:
1085 		return &bpf_sysctl_get_new_value_proto;
1086 	case BPF_FUNC_sysctl_set_new_value:
1087 		return &bpf_sysctl_set_new_value_proto;
1088 	default:
1089 		return cgroup_base_func_proto(func_id, prog);
1090 	}
1091 }
1092 
1093 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1094 				   const struct bpf_prog *prog,
1095 				   struct bpf_insn_access_aux *info)
1096 {
1097 	const int size_default = sizeof(__u32);
1098 
1099 	if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
1100 		return false;
1101 
1102 	switch (off) {
1103 	case offsetof(struct bpf_sysctl, write):
1104 		if (type != BPF_READ)
1105 			return false;
1106 		bpf_ctx_record_field_size(info, size_default);
1107 		return bpf_ctx_narrow_access_ok(off, size, size_default);
1108 	case offsetof(struct bpf_sysctl, file_pos):
1109 		if (type == BPF_READ) {
1110 			bpf_ctx_record_field_size(info, size_default);
1111 			return bpf_ctx_narrow_access_ok(off, size, size_default);
1112 		} else {
1113 			return size == size_default;
1114 		}
1115 	default:
1116 		return false;
1117 	}
1118 }
1119 
1120 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
1121 				     const struct bpf_insn *si,
1122 				     struct bpf_insn *insn_buf,
1123 				     struct bpf_prog *prog, u32 *target_size)
1124 {
1125 	struct bpf_insn *insn = insn_buf;
1126 
1127 	switch (si->off) {
1128 	case offsetof(struct bpf_sysctl, write):
1129 		*insn++ = BPF_LDX_MEM(
1130 			BPF_SIZE(si->code), si->dst_reg, si->src_reg,
1131 			bpf_target_off(struct bpf_sysctl_kern, write,
1132 				       FIELD_SIZEOF(struct bpf_sysctl_kern,
1133 						    write),
1134 				       target_size));
1135 		break;
1136 	case offsetof(struct bpf_sysctl, file_pos):
1137 		/* ppos is a pointer so it should be accessed via indirect
1138 		 * loads and stores. Also for stores additional temporary
1139 		 * register is used since neither src_reg nor dst_reg can be
1140 		 * overridden.
1141 		 */
1142 		if (type == BPF_WRITE) {
1143 			int treg = BPF_REG_9;
1144 
1145 			if (si->src_reg == treg || si->dst_reg == treg)
1146 				--treg;
1147 			if (si->src_reg == treg || si->dst_reg == treg)
1148 				--treg;
1149 			*insn++ = BPF_STX_MEM(
1150 				BPF_DW, si->dst_reg, treg,
1151 				offsetof(struct bpf_sysctl_kern, tmp_reg));
1152 			*insn++ = BPF_LDX_MEM(
1153 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1154 				treg, si->dst_reg,
1155 				offsetof(struct bpf_sysctl_kern, ppos));
1156 			*insn++ = BPF_STX_MEM(
1157 				BPF_SIZEOF(u32), treg, si->src_reg, 0);
1158 			*insn++ = BPF_LDX_MEM(
1159 				BPF_DW, treg, si->dst_reg,
1160 				offsetof(struct bpf_sysctl_kern, tmp_reg));
1161 		} else {
1162 			*insn++ = BPF_LDX_MEM(
1163 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1164 				si->dst_reg, si->src_reg,
1165 				offsetof(struct bpf_sysctl_kern, ppos));
1166 			*insn++ = BPF_LDX_MEM(
1167 				BPF_SIZE(si->code), si->dst_reg, si->dst_reg, 0);
1168 		}
1169 		*target_size = sizeof(u32);
1170 		break;
1171 	}
1172 
1173 	return insn - insn_buf;
1174 }
1175 
1176 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
1177 	.get_func_proto		= sysctl_func_proto,
1178 	.is_valid_access	= sysctl_is_valid_access,
1179 	.convert_ctx_access	= sysctl_convert_ctx_access,
1180 };
1181 
1182 const struct bpf_prog_ops cg_sysctl_prog_ops = {
1183 };
1184