xref: /linux/kernel/bpf/cgroup.c (revision d9104cec3e8fe4b458b74709853231385779001f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Functions to manage eBPF programs attached to cgroups
4  *
5  * Copyright (c) 2016 Daniel Mack
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
17 #include <linux/bpf_lsm.h>
18 #include <linux/bpf_verifier.h>
19 #include <net/sock.h>
20 #include <net/bpf_sk_storage.h>
21 
22 #include "../cgroup/cgroup-internal.h"
23 
24 DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
25 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
26 
27 /*
28  * cgroup bpf destruction makes heavy use of work items and there can be a lot
29  * of concurrent destructions.  Use a separate workqueue so that cgroup bpf
30  * destruction work items don't end up filling up max_active of system_wq
31  * which may lead to deadlock.
32  */
33 static struct workqueue_struct *cgroup_bpf_destroy_wq;
34 
cgroup_bpf_wq_init(void)35 static int __init cgroup_bpf_wq_init(void)
36 {
37 	cgroup_bpf_destroy_wq = alloc_workqueue("cgroup_bpf_destroy", 0, 1);
38 	if (!cgroup_bpf_destroy_wq)
39 		panic("Failed to alloc workqueue for cgroup bpf destroy.\n");
40 	return 0;
41 }
42 core_initcall(cgroup_bpf_wq_init);
43 
44 static int cgroup_bpf_lifetime_notify(struct notifier_block *nb,
45 				      unsigned long action, void *data);
46 
47 static struct notifier_block cgroup_bpf_lifetime_nb = {
48 	.notifier_call = cgroup_bpf_lifetime_notify,
49 };
50 
cgroup_bpf_lifetime_notifier_init(void)51 void __init cgroup_bpf_lifetime_notifier_init(void)
52 {
53 	BUG_ON(blocking_notifier_chain_register(&cgroup_lifetime_notifier,
54 						&cgroup_bpf_lifetime_nb));
55 }
56 
57 /* __always_inline is necessary to prevent indirect call through run_prog
58  * function pointer.
59  */
60 static __always_inline int
bpf_prog_run_array_cg(const struct cgroup_bpf * cgrp,enum cgroup_bpf_attach_type atype,const void * ctx,bpf_prog_run_fn run_prog,int retval,u32 * ret_flags)61 bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
62 		      enum cgroup_bpf_attach_type atype,
63 		      const void *ctx, bpf_prog_run_fn run_prog,
64 		      int retval, u32 *ret_flags)
65 {
66 	const struct bpf_prog_array_item *item;
67 	const struct bpf_prog *prog;
68 	const struct bpf_prog_array *array;
69 	struct bpf_run_ctx *old_run_ctx;
70 	struct bpf_cg_run_ctx run_ctx;
71 	u32 func_ret;
72 
73 	run_ctx.retval = retval;
74 	migrate_disable();
75 	rcu_read_lock();
76 	array = rcu_dereference(cgrp->effective[atype]);
77 	item = &array->items[0];
78 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
79 	while ((prog = READ_ONCE(item->prog))) {
80 		run_ctx.prog_item = item;
81 		func_ret = run_prog(prog, ctx);
82 		if (ret_flags) {
83 			*(ret_flags) |= (func_ret >> 1);
84 			func_ret &= 1;
85 		}
86 		if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval))
87 			run_ctx.retval = -EPERM;
88 		item++;
89 	}
90 	bpf_reset_run_ctx(old_run_ctx);
91 	rcu_read_unlock();
92 	migrate_enable();
93 	return run_ctx.retval;
94 }
95 
__cgroup_bpf_run_lsm_sock(const void * ctx,const struct bpf_insn * insn)96 unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
97 				       const struct bpf_insn *insn)
98 {
99 	const struct bpf_prog *shim_prog;
100 	struct sock *sk;
101 	struct cgroup *cgrp;
102 	int ret = 0;
103 	u64 *args;
104 
105 	args = (u64 *)ctx;
106 	sk = (void *)(unsigned long)args[0];
107 	/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
108 	shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
109 
110 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
111 	if (likely(cgrp))
112 		ret = bpf_prog_run_array_cg(&cgrp->bpf,
113 					    shim_prog->aux->cgroup_atype,
114 					    ctx, bpf_prog_run, 0, NULL);
115 	return ret;
116 }
117 
__cgroup_bpf_run_lsm_socket(const void * ctx,const struct bpf_insn * insn)118 unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
119 					 const struct bpf_insn *insn)
120 {
121 	const struct bpf_prog *shim_prog;
122 	struct socket *sock;
123 	struct cgroup *cgrp;
124 	int ret = 0;
125 	u64 *args;
126 
127 	args = (u64 *)ctx;
128 	sock = (void *)(unsigned long)args[0];
129 	/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
130 	shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
131 
132 	cgrp = sock_cgroup_ptr(&sock->sk->sk_cgrp_data);
133 	if (likely(cgrp))
134 		ret = bpf_prog_run_array_cg(&cgrp->bpf,
135 					    shim_prog->aux->cgroup_atype,
136 					    ctx, bpf_prog_run, 0, NULL);
137 	return ret;
138 }
139 
__cgroup_bpf_run_lsm_current(const void * ctx,const struct bpf_insn * insn)140 unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
141 					  const struct bpf_insn *insn)
142 {
143 	const struct bpf_prog *shim_prog;
144 	struct cgroup *cgrp;
145 	int ret = 0;
146 
147 	/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
148 	shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
149 
150 	/* We rely on trampoline's __bpf_prog_enter_lsm_cgroup to grab RCU read lock. */
151 	cgrp = task_dfl_cgroup(current);
152 	if (likely(cgrp))
153 		ret = bpf_prog_run_array_cg(&cgrp->bpf,
154 					    shim_prog->aux->cgroup_atype,
155 					    ctx, bpf_prog_run, 0, NULL);
156 	return ret;
157 }
158 
159 #ifdef CONFIG_BPF_LSM
160 struct cgroup_lsm_atype {
161 	u32 attach_btf_id;
162 	int refcnt;
163 };
164 
165 static struct cgroup_lsm_atype cgroup_lsm_atype[CGROUP_LSM_NUM];
166 
167 static enum cgroup_bpf_attach_type
bpf_cgroup_atype_find(enum bpf_attach_type attach_type,u32 attach_btf_id)168 bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
169 {
170 	int i;
171 
172 	lockdep_assert_held(&cgroup_mutex);
173 
174 	if (attach_type != BPF_LSM_CGROUP)
175 		return to_cgroup_bpf_attach_type(attach_type);
176 
177 	for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
178 		if (cgroup_lsm_atype[i].attach_btf_id == attach_btf_id)
179 			return CGROUP_LSM_START + i;
180 
181 	for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
182 		if (cgroup_lsm_atype[i].attach_btf_id == 0)
183 			return CGROUP_LSM_START + i;
184 
185 	return -E2BIG;
186 
187 }
188 
bpf_cgroup_atype_get(u32 attach_btf_id,int cgroup_atype)189 void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype)
190 {
191 	int i = cgroup_atype - CGROUP_LSM_START;
192 
193 	lockdep_assert_held(&cgroup_mutex);
194 
195 	WARN_ON_ONCE(cgroup_lsm_atype[i].attach_btf_id &&
196 		     cgroup_lsm_atype[i].attach_btf_id != attach_btf_id);
197 
198 	cgroup_lsm_atype[i].attach_btf_id = attach_btf_id;
199 	cgroup_lsm_atype[i].refcnt++;
200 }
201 
bpf_cgroup_atype_put(int cgroup_atype)202 void bpf_cgroup_atype_put(int cgroup_atype)
203 {
204 	int i = cgroup_atype - CGROUP_LSM_START;
205 
206 	cgroup_lock();
207 	if (--cgroup_lsm_atype[i].refcnt <= 0)
208 		cgroup_lsm_atype[i].attach_btf_id = 0;
209 	WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0);
210 	cgroup_unlock();
211 }
212 #else
213 static enum cgroup_bpf_attach_type
bpf_cgroup_atype_find(enum bpf_attach_type attach_type,u32 attach_btf_id)214 bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
215 {
216 	if (attach_type != BPF_LSM_CGROUP)
217 		return to_cgroup_bpf_attach_type(attach_type);
218 	return -EOPNOTSUPP;
219 }
220 #endif /* CONFIG_BPF_LSM */
221 
cgroup_bpf_offline(struct cgroup * cgrp)222 static void cgroup_bpf_offline(struct cgroup *cgrp)
223 {
224 	cgroup_get(cgrp);
225 	percpu_ref_kill(&cgrp->bpf.refcnt);
226 }
227 
bpf_cgroup_storages_free(struct bpf_cgroup_storage * storages[])228 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
229 {
230 	enum bpf_cgroup_storage_type stype;
231 
232 	for_each_cgroup_storage_type(stype)
233 		bpf_cgroup_storage_free(storages[stype]);
234 }
235 
bpf_cgroup_storages_alloc(struct bpf_cgroup_storage * storages[],struct bpf_cgroup_storage * new_storages[],enum bpf_attach_type type,struct bpf_prog * prog,struct cgroup * cgrp)236 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
237 				     struct bpf_cgroup_storage *new_storages[],
238 				     enum bpf_attach_type type,
239 				     struct bpf_prog *prog,
240 				     struct cgroup *cgrp)
241 {
242 	enum bpf_cgroup_storage_type stype;
243 	struct bpf_cgroup_storage_key key;
244 	struct bpf_map *map;
245 
246 	key.cgroup_inode_id = cgroup_id(cgrp);
247 	key.attach_type = type;
248 
249 	for_each_cgroup_storage_type(stype) {
250 		map = prog->aux->cgroup_storage[stype];
251 		if (!map)
252 			continue;
253 
254 		storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
255 		if (storages[stype])
256 			continue;
257 
258 		storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
259 		if (IS_ERR(storages[stype])) {
260 			bpf_cgroup_storages_free(new_storages);
261 			return -ENOMEM;
262 		}
263 
264 		new_storages[stype] = storages[stype];
265 	}
266 
267 	return 0;
268 }
269 
bpf_cgroup_storages_assign(struct bpf_cgroup_storage * dst[],struct bpf_cgroup_storage * src[])270 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
271 				       struct bpf_cgroup_storage *src[])
272 {
273 	enum bpf_cgroup_storage_type stype;
274 
275 	for_each_cgroup_storage_type(stype)
276 		dst[stype] = src[stype];
277 }
278 
bpf_cgroup_storages_link(struct bpf_cgroup_storage * storages[],struct cgroup * cgrp,enum bpf_attach_type attach_type)279 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
280 				     struct cgroup *cgrp,
281 				     enum bpf_attach_type attach_type)
282 {
283 	enum bpf_cgroup_storage_type stype;
284 
285 	for_each_cgroup_storage_type(stype)
286 		bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
287 }
288 
289 /* Called when bpf_cgroup_link is auto-detached from dying cgroup.
290  * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
291  * doesn't free link memory, which will eventually be done by bpf_link's
292  * release() callback, when its last FD is closed.
293  */
bpf_cgroup_link_auto_detach(struct bpf_cgroup_link * link)294 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
295 {
296 	cgroup_put(link->cgroup);
297 	link->cgroup = NULL;
298 }
299 
300 /**
301  * cgroup_bpf_release() - put references of all bpf programs and
302  *                        release all cgroup bpf data
303  * @work: work structure embedded into the cgroup to modify
304  */
cgroup_bpf_release(struct work_struct * work)305 static void cgroup_bpf_release(struct work_struct *work)
306 {
307 	struct cgroup *p, *cgrp = container_of(work, struct cgroup,
308 					       bpf.release_work);
309 	struct bpf_prog_array *old_array;
310 	struct list_head *storages = &cgrp->bpf.storages;
311 	struct bpf_cgroup_storage *storage, *stmp;
312 
313 	unsigned int atype;
314 
315 	cgroup_lock();
316 
317 	for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
318 		struct hlist_head *progs = &cgrp->bpf.progs[atype];
319 		struct bpf_prog_list *pl;
320 		struct hlist_node *pltmp;
321 
322 		hlist_for_each_entry_safe(pl, pltmp, progs, node) {
323 			hlist_del(&pl->node);
324 			if (pl->prog) {
325 				if (pl->prog->expected_attach_type == BPF_LSM_CGROUP)
326 					bpf_trampoline_unlink_cgroup_shim(pl->prog);
327 				bpf_prog_put(pl->prog);
328 			}
329 			if (pl->link) {
330 				if (pl->link->link.prog->expected_attach_type == BPF_LSM_CGROUP)
331 					bpf_trampoline_unlink_cgroup_shim(pl->link->link.prog);
332 				bpf_cgroup_link_auto_detach(pl->link);
333 			}
334 			kfree(pl);
335 			static_branch_dec(&cgroup_bpf_enabled_key[atype]);
336 		}
337 		old_array = rcu_dereference_protected(
338 				cgrp->bpf.effective[atype],
339 				lockdep_is_held(&cgroup_mutex));
340 		bpf_prog_array_free(old_array);
341 	}
342 
343 	list_for_each_entry_safe(storage, stmp, storages, list_cg) {
344 		bpf_cgroup_storage_unlink(storage);
345 		bpf_cgroup_storage_free(storage);
346 	}
347 
348 	cgroup_unlock();
349 
350 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
351 		cgroup_bpf_put(p);
352 
353 	percpu_ref_exit(&cgrp->bpf.refcnt);
354 	cgroup_put(cgrp);
355 }
356 
357 /**
358  * cgroup_bpf_release_fn() - callback used to schedule releasing
359  *                           of bpf cgroup data
360  * @ref: percpu ref counter structure
361  */
cgroup_bpf_release_fn(struct percpu_ref * ref)362 static void cgroup_bpf_release_fn(struct percpu_ref *ref)
363 {
364 	struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
365 
366 	INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
367 	queue_work(cgroup_bpf_destroy_wq, &cgrp->bpf.release_work);
368 }
369 
370 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
371  * link or direct prog.
372  */
prog_list_prog(struct bpf_prog_list * pl)373 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
374 {
375 	if (pl->prog)
376 		return pl->prog;
377 	if (pl->link)
378 		return pl->link->link.prog;
379 	return NULL;
380 }
381 
382 /* count number of elements in the list.
383  * it's slow but the list cannot be long
384  */
prog_list_length(struct hlist_head * head,int * preorder_cnt)385 static u32 prog_list_length(struct hlist_head *head, int *preorder_cnt)
386 {
387 	struct bpf_prog_list *pl;
388 	u32 cnt = 0;
389 
390 	hlist_for_each_entry(pl, head, node) {
391 		if (!prog_list_prog(pl))
392 			continue;
393 		if (preorder_cnt && (pl->flags & BPF_F_PREORDER))
394 			(*preorder_cnt)++;
395 		cnt++;
396 	}
397 	return cnt;
398 }
399 
400 /* if parent has non-overridable prog attached,
401  * disallow attaching new programs to the descendent cgroup.
402  * if parent has overridable or multi-prog, allow attaching
403  */
hierarchy_allows_attach(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype)404 static bool hierarchy_allows_attach(struct cgroup *cgrp,
405 				    enum cgroup_bpf_attach_type atype)
406 {
407 	struct cgroup *p;
408 
409 	p = cgroup_parent(cgrp);
410 	if (!p)
411 		return true;
412 	do {
413 		u32 flags = p->bpf.flags[atype];
414 		u32 cnt;
415 
416 		if (flags & BPF_F_ALLOW_MULTI)
417 			return true;
418 		cnt = prog_list_length(&p->bpf.progs[atype], NULL);
419 		WARN_ON_ONCE(cnt > 1);
420 		if (cnt == 1)
421 			return !!(flags & BPF_F_ALLOW_OVERRIDE);
422 		p = cgroup_parent(p);
423 	} while (p);
424 	return true;
425 }
426 
427 /* compute a chain of effective programs for a given cgroup:
428  * start from the list of programs in this cgroup and add
429  * all parent programs.
430  * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
431  * to programs in this cgroup
432  */
compute_effective_progs(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype,struct bpf_prog_array ** array)433 static int compute_effective_progs(struct cgroup *cgrp,
434 				   enum cgroup_bpf_attach_type atype,
435 				   struct bpf_prog_array **array)
436 {
437 	struct bpf_prog_array_item *item;
438 	struct bpf_prog_array *progs;
439 	struct bpf_prog_list *pl;
440 	struct cgroup *p = cgrp;
441 	int i, j, cnt = 0, preorder_cnt = 0, fstart, bstart, init_bstart;
442 
443 	/* count number of effective programs by walking parents */
444 	do {
445 		if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
446 			cnt += prog_list_length(&p->bpf.progs[atype], &preorder_cnt);
447 		p = cgroup_parent(p);
448 	} while (p);
449 
450 	progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
451 	if (!progs)
452 		return -ENOMEM;
453 
454 	/* populate the array with effective progs */
455 	cnt = 0;
456 	p = cgrp;
457 	fstart = preorder_cnt;
458 	bstart = preorder_cnt - 1;
459 	do {
460 		if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
461 			continue;
462 
463 		init_bstart = bstart;
464 		hlist_for_each_entry(pl, &p->bpf.progs[atype], node) {
465 			if (!prog_list_prog(pl))
466 				continue;
467 
468 			if (pl->flags & BPF_F_PREORDER) {
469 				item = &progs->items[bstart];
470 				bstart--;
471 			} else {
472 				item = &progs->items[fstart];
473 				fstart++;
474 			}
475 			item->prog = prog_list_prog(pl);
476 			bpf_cgroup_storages_assign(item->cgroup_storage,
477 						   pl->storage);
478 			cnt++;
479 		}
480 
481 		/* reverse pre-ordering progs at this cgroup level */
482 		for (i = bstart + 1, j = init_bstart; i < j; i++, j--)
483 			swap(progs->items[i], progs->items[j]);
484 
485 	} while ((p = cgroup_parent(p)));
486 
487 	*array = progs;
488 	return 0;
489 }
490 
activate_effective_progs(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype,struct bpf_prog_array * old_array)491 static void activate_effective_progs(struct cgroup *cgrp,
492 				     enum cgroup_bpf_attach_type atype,
493 				     struct bpf_prog_array *old_array)
494 {
495 	old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array,
496 					lockdep_is_held(&cgroup_mutex));
497 	/* free prog array after grace period, since __cgroup_bpf_run_*()
498 	 * might be still walking the array
499 	 */
500 	bpf_prog_array_free(old_array);
501 }
502 
503 /**
504  * cgroup_bpf_inherit() - inherit effective programs from parent
505  * @cgrp: the cgroup to modify
506  */
cgroup_bpf_inherit(struct cgroup * cgrp)507 static int cgroup_bpf_inherit(struct cgroup *cgrp)
508 {
509 /* has to use marco instead of const int, since compiler thinks
510  * that array below is variable length
511  */
512 #define	NR ARRAY_SIZE(cgrp->bpf.effective)
513 	struct bpf_prog_array *arrays[NR] = {};
514 	struct cgroup *p;
515 	int ret, i;
516 
517 	ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
518 			      GFP_KERNEL);
519 	if (ret)
520 		return ret;
521 
522 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
523 		cgroup_bpf_get(p);
524 
525 	for (i = 0; i < NR; i++)
526 		INIT_HLIST_HEAD(&cgrp->bpf.progs[i]);
527 
528 	INIT_LIST_HEAD(&cgrp->bpf.storages);
529 
530 	for (i = 0; i < NR; i++)
531 		if (compute_effective_progs(cgrp, i, &arrays[i]))
532 			goto cleanup;
533 
534 	for (i = 0; i < NR; i++)
535 		activate_effective_progs(cgrp, i, arrays[i]);
536 
537 	return 0;
538 cleanup:
539 	for (i = 0; i < NR; i++)
540 		bpf_prog_array_free(arrays[i]);
541 
542 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
543 		cgroup_bpf_put(p);
544 
545 	percpu_ref_exit(&cgrp->bpf.refcnt);
546 
547 	return -ENOMEM;
548 }
549 
cgroup_bpf_lifetime_notify(struct notifier_block * nb,unsigned long action,void * data)550 static int cgroup_bpf_lifetime_notify(struct notifier_block *nb,
551 				      unsigned long action, void *data)
552 {
553 	struct cgroup *cgrp = data;
554 	int ret = 0;
555 
556 	if (cgrp->root != &cgrp_dfl_root)
557 		return NOTIFY_OK;
558 
559 	switch (action) {
560 	case CGROUP_LIFETIME_ONLINE:
561 		ret = cgroup_bpf_inherit(cgrp);
562 		break;
563 	case CGROUP_LIFETIME_OFFLINE:
564 		cgroup_bpf_offline(cgrp);
565 		break;
566 	}
567 
568 	return notifier_from_errno(ret);
569 }
570 
update_effective_progs(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype)571 static int update_effective_progs(struct cgroup *cgrp,
572 				  enum cgroup_bpf_attach_type atype)
573 {
574 	struct cgroup_subsys_state *css;
575 	int err;
576 
577 	/* allocate and recompute effective prog arrays */
578 	css_for_each_descendant_pre(css, &cgrp->self) {
579 		struct cgroup *desc = container_of(css, struct cgroup, self);
580 
581 		if (percpu_ref_is_zero(&desc->bpf.refcnt))
582 			continue;
583 
584 		err = compute_effective_progs(desc, atype, &desc->bpf.inactive);
585 		if (err)
586 			goto cleanup;
587 	}
588 
589 	/* all allocations were successful. Activate all prog arrays */
590 	css_for_each_descendant_pre(css, &cgrp->self) {
591 		struct cgroup *desc = container_of(css, struct cgroup, self);
592 
593 		if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
594 			if (unlikely(desc->bpf.inactive)) {
595 				bpf_prog_array_free(desc->bpf.inactive);
596 				desc->bpf.inactive = NULL;
597 			}
598 			continue;
599 		}
600 
601 		activate_effective_progs(desc, atype, desc->bpf.inactive);
602 		desc->bpf.inactive = NULL;
603 	}
604 
605 	return 0;
606 
607 cleanup:
608 	/* oom while computing effective. Free all computed effective arrays
609 	 * since they were not activated
610 	 */
611 	css_for_each_descendant_pre(css, &cgrp->self) {
612 		struct cgroup *desc = container_of(css, struct cgroup, self);
613 
614 		bpf_prog_array_free(desc->bpf.inactive);
615 		desc->bpf.inactive = NULL;
616 	}
617 
618 	return err;
619 }
620 
621 #define BPF_CGROUP_MAX_PROGS 64
622 
find_attach_entry(struct hlist_head * progs,struct bpf_prog * prog,struct bpf_cgroup_link * link,struct bpf_prog * replace_prog,bool allow_multi)623 static struct bpf_prog_list *find_attach_entry(struct hlist_head *progs,
624 					       struct bpf_prog *prog,
625 					       struct bpf_cgroup_link *link,
626 					       struct bpf_prog *replace_prog,
627 					       bool allow_multi)
628 {
629 	struct bpf_prog_list *pl;
630 
631 	/* single-attach case */
632 	if (!allow_multi) {
633 		if (hlist_empty(progs))
634 			return NULL;
635 		return hlist_entry(progs->first, typeof(*pl), node);
636 	}
637 
638 	hlist_for_each_entry(pl, progs, node) {
639 		if (prog && pl->prog == prog && prog != replace_prog)
640 			/* disallow attaching the same prog twice */
641 			return ERR_PTR(-EINVAL);
642 		if (link && pl->link == link)
643 			/* disallow attaching the same link twice */
644 			return ERR_PTR(-EINVAL);
645 	}
646 
647 	/* direct prog multi-attach w/ replacement case */
648 	if (replace_prog) {
649 		hlist_for_each_entry(pl, progs, node) {
650 			if (pl->prog == replace_prog)
651 				/* a match found */
652 				return pl;
653 		}
654 		/* prog to replace not found for cgroup */
655 		return ERR_PTR(-ENOENT);
656 	}
657 
658 	return NULL;
659 }
660 
bpf_get_anchor_link(u32 flags,u32 id_or_fd)661 static struct bpf_link *bpf_get_anchor_link(u32 flags, u32 id_or_fd)
662 {
663 	struct bpf_link *link = ERR_PTR(-EINVAL);
664 
665 	if (flags & BPF_F_ID)
666 		link = bpf_link_by_id(id_or_fd);
667 	else if (id_or_fd)
668 		link = bpf_link_get_from_fd(id_or_fd);
669 	return link;
670 }
671 
bpf_get_anchor_prog(u32 flags,u32 id_or_fd)672 static struct bpf_prog *bpf_get_anchor_prog(u32 flags, u32 id_or_fd)
673 {
674 	struct bpf_prog *prog = ERR_PTR(-EINVAL);
675 
676 	if (flags & BPF_F_ID)
677 		prog = bpf_prog_by_id(id_or_fd);
678 	else if (id_or_fd)
679 		prog = bpf_prog_get(id_or_fd);
680 	return prog;
681 }
682 
get_prog_list(struct hlist_head * progs,struct bpf_prog * prog,struct bpf_cgroup_link * link,u32 flags,u32 id_or_fd)683 static struct bpf_prog_list *get_prog_list(struct hlist_head *progs, struct bpf_prog *prog,
684 					   struct bpf_cgroup_link *link, u32 flags, u32 id_or_fd)
685 {
686 	bool is_link = flags & BPF_F_LINK, is_id = flags & BPF_F_ID;
687 	struct bpf_prog_list *pltmp, *pl = ERR_PTR(-EINVAL);
688 	bool preorder = flags & BPF_F_PREORDER;
689 	struct bpf_link *anchor_link = NULL;
690 	struct bpf_prog *anchor_prog = NULL;
691 	bool is_before, is_after;
692 
693 	is_before = flags & BPF_F_BEFORE;
694 	is_after = flags & BPF_F_AFTER;
695 	if (is_link || is_id || id_or_fd) {
696 		/* flags must have either BPF_F_BEFORE or BPF_F_AFTER */
697 		if (is_before == is_after)
698 			return ERR_PTR(-EINVAL);
699 		if ((is_link && !link) || (!is_link && !prog))
700 			return ERR_PTR(-EINVAL);
701 	} else if (!hlist_empty(progs)) {
702 		/* flags cannot have both BPF_F_BEFORE and BPF_F_AFTER */
703 		if (is_before && is_after)
704 			return ERR_PTR(-EINVAL);
705 	}
706 
707 	if (is_link) {
708 		anchor_link = bpf_get_anchor_link(flags, id_or_fd);
709 		if (IS_ERR(anchor_link))
710 			return ERR_CAST(anchor_link);
711 	} else if (is_id || id_or_fd) {
712 		anchor_prog = bpf_get_anchor_prog(flags, id_or_fd);
713 		if (IS_ERR(anchor_prog))
714 			return ERR_CAST(anchor_prog);
715 	}
716 
717 	if (!anchor_prog && !anchor_link) {
718 		/* if there is no anchor_prog/anchor_link, then BPF_F_PREORDER
719 		 * doesn't matter since either prepend or append to a combined
720 		 * list of progs will end up with correct result.
721 		 */
722 		hlist_for_each_entry(pltmp, progs, node) {
723 			if (is_before)
724 				return pltmp;
725 			if (pltmp->node.next)
726 				continue;
727 			return pltmp;
728 		}
729 		return NULL;
730 	}
731 
732 	hlist_for_each_entry(pltmp, progs, node) {
733 		if ((anchor_prog && anchor_prog == pltmp->prog) ||
734 		    (anchor_link && anchor_link == &pltmp->link->link)) {
735 			if (!!(pltmp->flags & BPF_F_PREORDER) != preorder)
736 				goto out;
737 			pl = pltmp;
738 			goto out;
739 		}
740 	}
741 
742 	pl = ERR_PTR(-ENOENT);
743 out:
744 	if (anchor_link)
745 		bpf_link_put(anchor_link);
746 	else
747 		bpf_prog_put(anchor_prog);
748 	return pl;
749 }
750 
insert_pl_to_hlist(struct bpf_prog_list * pl,struct hlist_head * progs,struct bpf_prog * prog,struct bpf_cgroup_link * link,u32 flags,u32 id_or_fd)751 static int insert_pl_to_hlist(struct bpf_prog_list *pl, struct hlist_head *progs,
752 			      struct bpf_prog *prog, struct bpf_cgroup_link *link,
753 			      u32 flags, u32 id_or_fd)
754 {
755 	struct bpf_prog_list *pltmp;
756 
757 	pltmp = get_prog_list(progs, prog, link, flags, id_or_fd);
758 	if (IS_ERR(pltmp))
759 		return PTR_ERR(pltmp);
760 
761 	if (!pltmp)
762 		hlist_add_head(&pl->node, progs);
763 	else if (flags & BPF_F_BEFORE)
764 		hlist_add_before(&pl->node, &pltmp->node);
765 	else
766 		hlist_add_behind(&pl->node, &pltmp->node);
767 
768 	return 0;
769 }
770 
771 /**
772  * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
773  *                         propagate the change to descendants
774  * @cgrp: The cgroup which descendants to traverse
775  * @prog: A program to attach
776  * @link: A link to attach
777  * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
778  * @type: Type of attach operation
779  * @flags: Option flags
780  * @id_or_fd: Relative prog id or fd
781  * @revision: bpf_prog_list revision
782  *
783  * Exactly one of @prog or @link can be non-null.
784  * Must be called with cgroup_mutex held.
785  */
__cgroup_bpf_attach(struct cgroup * cgrp,struct bpf_prog * prog,struct bpf_prog * replace_prog,struct bpf_cgroup_link * link,enum bpf_attach_type type,u32 flags,u32 id_or_fd,u64 revision)786 static int __cgroup_bpf_attach(struct cgroup *cgrp,
787 			       struct bpf_prog *prog, struct bpf_prog *replace_prog,
788 			       struct bpf_cgroup_link *link,
789 			       enum bpf_attach_type type, u32 flags, u32 id_or_fd,
790 			       u64 revision)
791 {
792 	u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
793 	struct bpf_prog *old_prog = NULL;
794 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
795 	struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
796 	struct bpf_prog *new_prog = prog ? : link->link.prog;
797 	enum cgroup_bpf_attach_type atype;
798 	struct bpf_prog_list *pl;
799 	struct hlist_head *progs;
800 	int err;
801 
802 	if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
803 	    ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
804 		/* invalid combination */
805 		return -EINVAL;
806 	if ((flags & BPF_F_REPLACE) && (flags & (BPF_F_BEFORE | BPF_F_AFTER)))
807 		/* only either replace or insertion with before/after */
808 		return -EINVAL;
809 	if (link && (prog || replace_prog))
810 		/* only either link or prog/replace_prog can be specified */
811 		return -EINVAL;
812 	if (!!replace_prog != !!(flags & BPF_F_REPLACE))
813 		/* replace_prog implies BPF_F_REPLACE, and vice versa */
814 		return -EINVAL;
815 
816 	atype = bpf_cgroup_atype_find(type, new_prog->aux->attach_btf_id);
817 	if (atype < 0)
818 		return -EINVAL;
819 	if (revision && revision != cgrp->bpf.revisions[atype])
820 		return -ESTALE;
821 
822 	progs = &cgrp->bpf.progs[atype];
823 
824 	if (!hierarchy_allows_attach(cgrp, atype))
825 		return -EPERM;
826 
827 	if (!hlist_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
828 		/* Disallow attaching non-overridable on top
829 		 * of existing overridable in this cgroup.
830 		 * Disallow attaching multi-prog if overridable or none
831 		 */
832 		return -EPERM;
833 
834 	if (prog_list_length(progs, NULL) >= BPF_CGROUP_MAX_PROGS)
835 		return -E2BIG;
836 
837 	pl = find_attach_entry(progs, prog, link, replace_prog,
838 			       flags & BPF_F_ALLOW_MULTI);
839 	if (IS_ERR(pl))
840 		return PTR_ERR(pl);
841 
842 	if (bpf_cgroup_storages_alloc(storage, new_storage, type,
843 				      prog ? : link->link.prog, cgrp))
844 		return -ENOMEM;
845 
846 	if (pl) {
847 		old_prog = pl->prog;
848 	} else {
849 		pl = kmalloc(sizeof(*pl), GFP_KERNEL);
850 		if (!pl) {
851 			bpf_cgroup_storages_free(new_storage);
852 			return -ENOMEM;
853 		}
854 
855 		err = insert_pl_to_hlist(pl, progs, prog, link, flags, id_or_fd);
856 		if (err) {
857 			kfree(pl);
858 			bpf_cgroup_storages_free(new_storage);
859 			return err;
860 		}
861 	}
862 
863 	pl->prog = prog;
864 	pl->link = link;
865 	pl->flags = flags;
866 	bpf_cgroup_storages_assign(pl->storage, storage);
867 	cgrp->bpf.flags[atype] = saved_flags;
868 
869 	if (type == BPF_LSM_CGROUP) {
870 		err = bpf_trampoline_link_cgroup_shim(new_prog, atype, type);
871 		if (err)
872 			goto cleanup;
873 	}
874 
875 	err = update_effective_progs(cgrp, atype);
876 	if (err)
877 		goto cleanup_trampoline;
878 
879 	cgrp->bpf.revisions[atype] += 1;
880 	if (old_prog) {
881 		if (type == BPF_LSM_CGROUP)
882 			bpf_trampoline_unlink_cgroup_shim(old_prog);
883 		bpf_prog_put(old_prog);
884 	} else {
885 		static_branch_inc(&cgroup_bpf_enabled_key[atype]);
886 	}
887 	bpf_cgroup_storages_link(new_storage, cgrp, type);
888 	return 0;
889 
890 cleanup_trampoline:
891 	if (type == BPF_LSM_CGROUP)
892 		bpf_trampoline_unlink_cgroup_shim(new_prog);
893 
894 cleanup:
895 	if (old_prog) {
896 		pl->prog = old_prog;
897 		pl->link = NULL;
898 	}
899 	bpf_cgroup_storages_free(new_storage);
900 	if (!old_prog) {
901 		hlist_del(&pl->node);
902 		kfree(pl);
903 	}
904 	return err;
905 }
906 
cgroup_bpf_attach(struct cgroup * cgrp,struct bpf_prog * prog,struct bpf_prog * replace_prog,struct bpf_cgroup_link * link,enum bpf_attach_type type,u32 flags,u32 id_or_fd,u64 revision)907 static int cgroup_bpf_attach(struct cgroup *cgrp,
908 			     struct bpf_prog *prog, struct bpf_prog *replace_prog,
909 			     struct bpf_cgroup_link *link,
910 			     enum bpf_attach_type type,
911 			     u32 flags, u32 id_or_fd, u64 revision)
912 {
913 	int ret;
914 
915 	cgroup_lock();
916 	ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags,
917 				  id_or_fd, revision);
918 	cgroup_unlock();
919 	return ret;
920 }
921 
922 /* Swap updated BPF program for given link in effective program arrays across
923  * all descendant cgroups. This function is guaranteed to succeed.
924  */
replace_effective_prog(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype,struct bpf_cgroup_link * link)925 static void replace_effective_prog(struct cgroup *cgrp,
926 				   enum cgroup_bpf_attach_type atype,
927 				   struct bpf_cgroup_link *link)
928 {
929 	struct bpf_prog_array_item *item;
930 	struct cgroup_subsys_state *css;
931 	struct bpf_prog_array *progs;
932 	struct bpf_prog_list *pl;
933 	struct hlist_head *head;
934 	struct cgroup *cg;
935 	int pos;
936 
937 	css_for_each_descendant_pre(css, &cgrp->self) {
938 		struct cgroup *desc = container_of(css, struct cgroup, self);
939 
940 		if (percpu_ref_is_zero(&desc->bpf.refcnt))
941 			continue;
942 
943 		/* find position of link in effective progs array */
944 		for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
945 			if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
946 				continue;
947 
948 			head = &cg->bpf.progs[atype];
949 			hlist_for_each_entry(pl, head, node) {
950 				if (!prog_list_prog(pl))
951 					continue;
952 				if (pl->link == link)
953 					goto found;
954 				pos++;
955 			}
956 		}
957 found:
958 		BUG_ON(!cg);
959 		progs = rcu_dereference_protected(
960 				desc->bpf.effective[atype],
961 				lockdep_is_held(&cgroup_mutex));
962 		item = &progs->items[pos];
963 		WRITE_ONCE(item->prog, link->link.prog);
964 	}
965 }
966 
967 /**
968  * __cgroup_bpf_replace() - Replace link's program and propagate the change
969  *                          to descendants
970  * @cgrp: The cgroup which descendants to traverse
971  * @link: A link for which to replace BPF program
972  * @new_prog: &struct bpf_prog for the target BPF program with its refcnt
973  *            incremented
974  *
975  * Must be called with cgroup_mutex held.
976  */
__cgroup_bpf_replace(struct cgroup * cgrp,struct bpf_cgroup_link * link,struct bpf_prog * new_prog)977 static int __cgroup_bpf_replace(struct cgroup *cgrp,
978 				struct bpf_cgroup_link *link,
979 				struct bpf_prog *new_prog)
980 {
981 	enum cgroup_bpf_attach_type atype;
982 	struct bpf_prog *old_prog;
983 	struct bpf_prog_list *pl;
984 	struct hlist_head *progs;
985 	bool found = false;
986 
987 	atype = bpf_cgroup_atype_find(link->link.attach_type, new_prog->aux->attach_btf_id);
988 	if (atype < 0)
989 		return -EINVAL;
990 
991 	progs = &cgrp->bpf.progs[atype];
992 
993 	if (link->link.prog->type != new_prog->type)
994 		return -EINVAL;
995 
996 	hlist_for_each_entry(pl, progs, node) {
997 		if (pl->link == link) {
998 			found = true;
999 			break;
1000 		}
1001 	}
1002 	if (!found)
1003 		return -ENOENT;
1004 
1005 	cgrp->bpf.revisions[atype] += 1;
1006 	old_prog = xchg(&link->link.prog, new_prog);
1007 	replace_effective_prog(cgrp, atype, link);
1008 	bpf_prog_put(old_prog);
1009 	return 0;
1010 }
1011 
cgroup_bpf_replace(struct bpf_link * link,struct bpf_prog * new_prog,struct bpf_prog * old_prog)1012 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
1013 			      struct bpf_prog *old_prog)
1014 {
1015 	struct bpf_cgroup_link *cg_link;
1016 	int ret;
1017 
1018 	cg_link = container_of(link, struct bpf_cgroup_link, link);
1019 
1020 	cgroup_lock();
1021 	/* link might have been auto-released by dying cgroup, so fail */
1022 	if (!cg_link->cgroup) {
1023 		ret = -ENOLINK;
1024 		goto out_unlock;
1025 	}
1026 	if (old_prog && link->prog != old_prog) {
1027 		ret = -EPERM;
1028 		goto out_unlock;
1029 	}
1030 	ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
1031 out_unlock:
1032 	cgroup_unlock();
1033 	return ret;
1034 }
1035 
find_detach_entry(struct hlist_head * progs,struct bpf_prog * prog,struct bpf_cgroup_link * link,bool allow_multi)1036 static struct bpf_prog_list *find_detach_entry(struct hlist_head *progs,
1037 					       struct bpf_prog *prog,
1038 					       struct bpf_cgroup_link *link,
1039 					       bool allow_multi)
1040 {
1041 	struct bpf_prog_list *pl;
1042 
1043 	if (!allow_multi) {
1044 		if (hlist_empty(progs))
1045 			/* report error when trying to detach and nothing is attached */
1046 			return ERR_PTR(-ENOENT);
1047 
1048 		/* to maintain backward compatibility NONE and OVERRIDE cgroups
1049 		 * allow detaching with invalid FD (prog==NULL) in legacy mode
1050 		 */
1051 		return hlist_entry(progs->first, typeof(*pl), node);
1052 	}
1053 
1054 	if (!prog && !link)
1055 		/* to detach MULTI prog the user has to specify valid FD
1056 		 * of the program or link to be detached
1057 		 */
1058 		return ERR_PTR(-EINVAL);
1059 
1060 	/* find the prog or link and detach it */
1061 	hlist_for_each_entry(pl, progs, node) {
1062 		if (pl->prog == prog && pl->link == link)
1063 			return pl;
1064 	}
1065 	return ERR_PTR(-ENOENT);
1066 }
1067 
1068 /**
1069  * purge_effective_progs() - After compute_effective_progs fails to alloc new
1070  *                           cgrp->bpf.inactive table we can recover by
1071  *                           recomputing the array in place.
1072  *
1073  * @cgrp: The cgroup which descendants to travers
1074  * @prog: A program to detach or NULL
1075  * @link: A link to detach or NULL
1076  * @atype: Type of detach operation
1077  */
purge_effective_progs(struct cgroup * cgrp,struct bpf_prog * prog,struct bpf_cgroup_link * link,enum cgroup_bpf_attach_type atype)1078 static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
1079 				  struct bpf_cgroup_link *link,
1080 				  enum cgroup_bpf_attach_type atype)
1081 {
1082 	struct cgroup_subsys_state *css;
1083 	struct bpf_prog_array *progs;
1084 	struct bpf_prog_list *pl;
1085 	struct hlist_head *head;
1086 	struct cgroup *cg;
1087 	int pos;
1088 
1089 	/* recompute effective prog array in place */
1090 	css_for_each_descendant_pre(css, &cgrp->self) {
1091 		struct cgroup *desc = container_of(css, struct cgroup, self);
1092 
1093 		if (percpu_ref_is_zero(&desc->bpf.refcnt))
1094 			continue;
1095 
1096 		/* find position of link or prog in effective progs array */
1097 		for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
1098 			if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
1099 				continue;
1100 
1101 			head = &cg->bpf.progs[atype];
1102 			hlist_for_each_entry(pl, head, node) {
1103 				if (!prog_list_prog(pl))
1104 					continue;
1105 				if (pl->prog == prog && pl->link == link)
1106 					goto found;
1107 				pos++;
1108 			}
1109 		}
1110 
1111 		/* no link or prog match, skip the cgroup of this layer */
1112 		continue;
1113 found:
1114 		progs = rcu_dereference_protected(
1115 				desc->bpf.effective[atype],
1116 				lockdep_is_held(&cgroup_mutex));
1117 
1118 		/* Remove the program from the array */
1119 		WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos),
1120 			  "Failed to purge a prog from array at index %d", pos);
1121 	}
1122 }
1123 
1124 /**
1125  * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
1126  *                         propagate the change to descendants
1127  * @cgrp: The cgroup which descendants to traverse
1128  * @prog: A program to detach or NULL
1129  * @link: A link to detach or NULL
1130  * @type: Type of detach operation
1131  * @revision: bpf_prog_list revision
1132  *
1133  * At most one of @prog or @link can be non-NULL.
1134  * Must be called with cgroup_mutex held.
1135  */
__cgroup_bpf_detach(struct cgroup * cgrp,struct bpf_prog * prog,struct bpf_cgroup_link * link,enum bpf_attach_type type,u64 revision)1136 static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
1137 			       struct bpf_cgroup_link *link, enum bpf_attach_type type,
1138 			       u64 revision)
1139 {
1140 	enum cgroup_bpf_attach_type atype;
1141 	struct bpf_prog *old_prog;
1142 	struct bpf_prog_list *pl;
1143 	struct hlist_head *progs;
1144 	u32 attach_btf_id = 0;
1145 	u32 flags;
1146 
1147 	if (prog)
1148 		attach_btf_id = prog->aux->attach_btf_id;
1149 	if (link)
1150 		attach_btf_id = link->link.prog->aux->attach_btf_id;
1151 
1152 	atype = bpf_cgroup_atype_find(type, attach_btf_id);
1153 	if (atype < 0)
1154 		return -EINVAL;
1155 
1156 	if (revision && revision != cgrp->bpf.revisions[atype])
1157 		return -ESTALE;
1158 
1159 	progs = &cgrp->bpf.progs[atype];
1160 	flags = cgrp->bpf.flags[atype];
1161 
1162 	if (prog && link)
1163 		/* only one of prog or link can be specified */
1164 		return -EINVAL;
1165 
1166 	pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
1167 	if (IS_ERR(pl))
1168 		return PTR_ERR(pl);
1169 
1170 	/* mark it deleted, so it's ignored while recomputing effective */
1171 	old_prog = pl->prog;
1172 	pl->prog = NULL;
1173 	pl->link = NULL;
1174 
1175 	if (update_effective_progs(cgrp, atype)) {
1176 		/* if update effective array failed replace the prog with a dummy prog*/
1177 		pl->prog = old_prog;
1178 		pl->link = link;
1179 		purge_effective_progs(cgrp, old_prog, link, atype);
1180 	}
1181 
1182 	/* now can actually delete it from this cgroup list */
1183 	hlist_del(&pl->node);
1184 	cgrp->bpf.revisions[atype] += 1;
1185 
1186 	kfree(pl);
1187 	if (hlist_empty(progs))
1188 		/* last program was detached, reset flags to zero */
1189 		cgrp->bpf.flags[atype] = 0;
1190 	if (old_prog) {
1191 		if (type == BPF_LSM_CGROUP)
1192 			bpf_trampoline_unlink_cgroup_shim(old_prog);
1193 		bpf_prog_put(old_prog);
1194 	}
1195 	static_branch_dec(&cgroup_bpf_enabled_key[atype]);
1196 	return 0;
1197 }
1198 
cgroup_bpf_detach(struct cgroup * cgrp,struct bpf_prog * prog,enum bpf_attach_type type,u64 revision)1199 static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
1200 			     enum bpf_attach_type type, u64 revision)
1201 {
1202 	int ret;
1203 
1204 	cgroup_lock();
1205 	ret = __cgroup_bpf_detach(cgrp, prog, NULL, type, revision);
1206 	cgroup_unlock();
1207 	return ret;
1208 }
1209 
1210 /* Must be called with cgroup_mutex held to avoid races. */
__cgroup_bpf_query(struct cgroup * cgrp,const union bpf_attr * attr,union bpf_attr __user * uattr)1211 static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
1212 			      union bpf_attr __user *uattr)
1213 {
1214 	__u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
1215 	bool effective_query = attr->query.query_flags & BPF_F_QUERY_EFFECTIVE;
1216 	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1217 	enum bpf_attach_type type = attr->query.attach_type;
1218 	enum cgroup_bpf_attach_type from_atype, to_atype;
1219 	enum cgroup_bpf_attach_type atype;
1220 	struct bpf_prog_array *effective;
1221 	int cnt, ret = 0, i;
1222 	int total_cnt = 0;
1223 	u64 revision = 0;
1224 	u32 flags;
1225 
1226 	if (effective_query && prog_attach_flags)
1227 		return -EINVAL;
1228 
1229 	if (type == BPF_LSM_CGROUP) {
1230 		if (!effective_query && attr->query.prog_cnt &&
1231 		    prog_ids && !prog_attach_flags)
1232 			return -EINVAL;
1233 
1234 		from_atype = CGROUP_LSM_START;
1235 		to_atype = CGROUP_LSM_END;
1236 		flags = 0;
1237 	} else {
1238 		from_atype = to_cgroup_bpf_attach_type(type);
1239 		if (from_atype < 0)
1240 			return -EINVAL;
1241 		to_atype = from_atype;
1242 		flags = cgrp->bpf.flags[from_atype];
1243 	}
1244 
1245 	for (atype = from_atype; atype <= to_atype; atype++) {
1246 		if (effective_query) {
1247 			effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
1248 							      lockdep_is_held(&cgroup_mutex));
1249 			total_cnt += bpf_prog_array_length(effective);
1250 		} else {
1251 			total_cnt += prog_list_length(&cgrp->bpf.progs[atype], NULL);
1252 		}
1253 	}
1254 
1255 	/* always output uattr->query.attach_flags as 0 during effective query */
1256 	flags = effective_query ? 0 : flags;
1257 	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
1258 		return -EFAULT;
1259 	if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt)))
1260 		return -EFAULT;
1261 	if (!effective_query && from_atype == to_atype)
1262 		revision = cgrp->bpf.revisions[from_atype];
1263 	if (copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))
1264 		return -EFAULT;
1265 	if (attr->query.prog_cnt == 0 || !prog_ids || !total_cnt)
1266 		/* return early if user requested only program count + flags */
1267 		return 0;
1268 
1269 	if (attr->query.prog_cnt < total_cnt) {
1270 		total_cnt = attr->query.prog_cnt;
1271 		ret = -ENOSPC;
1272 	}
1273 
1274 	for (atype = from_atype; atype <= to_atype && total_cnt; atype++) {
1275 		if (effective_query) {
1276 			effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
1277 							      lockdep_is_held(&cgroup_mutex));
1278 			cnt = min_t(int, bpf_prog_array_length(effective), total_cnt);
1279 			ret = bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
1280 		} else {
1281 			struct hlist_head *progs;
1282 			struct bpf_prog_list *pl;
1283 			struct bpf_prog *prog;
1284 			u32 id;
1285 
1286 			progs = &cgrp->bpf.progs[atype];
1287 			cnt = min_t(int, prog_list_length(progs, NULL), total_cnt);
1288 			i = 0;
1289 			hlist_for_each_entry(pl, progs, node) {
1290 				prog = prog_list_prog(pl);
1291 				id = prog->aux->id;
1292 				if (copy_to_user(prog_ids + i, &id, sizeof(id)))
1293 					return -EFAULT;
1294 				if (++i == cnt)
1295 					break;
1296 			}
1297 
1298 			if (prog_attach_flags) {
1299 				flags = cgrp->bpf.flags[atype];
1300 
1301 				for (i = 0; i < cnt; i++)
1302 					if (copy_to_user(prog_attach_flags + i,
1303 							 &flags, sizeof(flags)))
1304 						return -EFAULT;
1305 				prog_attach_flags += cnt;
1306 			}
1307 		}
1308 
1309 		prog_ids += cnt;
1310 		total_cnt -= cnt;
1311 	}
1312 	return ret;
1313 }
1314 
cgroup_bpf_query(struct cgroup * cgrp,const union bpf_attr * attr,union bpf_attr __user * uattr)1315 static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
1316 			    union bpf_attr __user *uattr)
1317 {
1318 	int ret;
1319 
1320 	cgroup_lock();
1321 	ret = __cgroup_bpf_query(cgrp, attr, uattr);
1322 	cgroup_unlock();
1323 	return ret;
1324 }
1325 
cgroup_bpf_prog_attach(const union bpf_attr * attr,enum bpf_prog_type ptype,struct bpf_prog * prog)1326 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
1327 			   enum bpf_prog_type ptype, struct bpf_prog *prog)
1328 {
1329 	struct bpf_prog *replace_prog = NULL;
1330 	struct cgroup *cgrp;
1331 	int ret;
1332 
1333 	cgrp = cgroup_get_from_fd(attr->target_fd);
1334 	if (IS_ERR(cgrp))
1335 		return PTR_ERR(cgrp);
1336 
1337 	if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
1338 	    (attr->attach_flags & BPF_F_REPLACE)) {
1339 		replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
1340 		if (IS_ERR(replace_prog)) {
1341 			cgroup_put(cgrp);
1342 			return PTR_ERR(replace_prog);
1343 		}
1344 	}
1345 
1346 	ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
1347 				attr->attach_type, attr->attach_flags,
1348 				attr->relative_fd, attr->expected_revision);
1349 
1350 	if (replace_prog)
1351 		bpf_prog_put(replace_prog);
1352 	cgroup_put(cgrp);
1353 	return ret;
1354 }
1355 
cgroup_bpf_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)1356 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
1357 {
1358 	struct bpf_prog *prog;
1359 	struct cgroup *cgrp;
1360 	int ret;
1361 
1362 	cgrp = cgroup_get_from_fd(attr->target_fd);
1363 	if (IS_ERR(cgrp))
1364 		return PTR_ERR(cgrp);
1365 
1366 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1367 	if (IS_ERR(prog))
1368 		prog = NULL;
1369 
1370 	ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, attr->expected_revision);
1371 	if (prog)
1372 		bpf_prog_put(prog);
1373 
1374 	cgroup_put(cgrp);
1375 	return ret;
1376 }
1377 
bpf_cgroup_link_release(struct bpf_link * link)1378 static void bpf_cgroup_link_release(struct bpf_link *link)
1379 {
1380 	struct bpf_cgroup_link *cg_link =
1381 		container_of(link, struct bpf_cgroup_link, link);
1382 	struct cgroup *cg;
1383 
1384 	/* link might have been auto-detached by dying cgroup already,
1385 	 * in that case our work is done here
1386 	 */
1387 	if (!cg_link->cgroup)
1388 		return;
1389 
1390 	cgroup_lock();
1391 
1392 	/* re-check cgroup under lock again */
1393 	if (!cg_link->cgroup) {
1394 		cgroup_unlock();
1395 		return;
1396 	}
1397 
1398 	WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
1399 				    link->attach_type, 0));
1400 	if (link->attach_type == BPF_LSM_CGROUP)
1401 		bpf_trampoline_unlink_cgroup_shim(cg_link->link.prog);
1402 
1403 	cg = cg_link->cgroup;
1404 	cg_link->cgroup = NULL;
1405 
1406 	cgroup_unlock();
1407 
1408 	cgroup_put(cg);
1409 }
1410 
bpf_cgroup_link_dealloc(struct bpf_link * link)1411 static void bpf_cgroup_link_dealloc(struct bpf_link *link)
1412 {
1413 	struct bpf_cgroup_link *cg_link =
1414 		container_of(link, struct bpf_cgroup_link, link);
1415 
1416 	kfree(cg_link);
1417 }
1418 
bpf_cgroup_link_detach(struct bpf_link * link)1419 static int bpf_cgroup_link_detach(struct bpf_link *link)
1420 {
1421 	bpf_cgroup_link_release(link);
1422 
1423 	return 0;
1424 }
1425 
bpf_cgroup_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)1426 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
1427 					struct seq_file *seq)
1428 {
1429 	struct bpf_cgroup_link *cg_link =
1430 		container_of(link, struct bpf_cgroup_link, link);
1431 	u64 cg_id = 0;
1432 
1433 	cgroup_lock();
1434 	if (cg_link->cgroup)
1435 		cg_id = cgroup_id(cg_link->cgroup);
1436 	cgroup_unlock();
1437 
1438 	seq_printf(seq,
1439 		   "cgroup_id:\t%llu\n"
1440 		   "attach_type:\t%d\n",
1441 		   cg_id,
1442 		   link->attach_type);
1443 }
1444 
bpf_cgroup_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)1445 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
1446 					  struct bpf_link_info *info)
1447 {
1448 	struct bpf_cgroup_link *cg_link =
1449 		container_of(link, struct bpf_cgroup_link, link);
1450 	u64 cg_id = 0;
1451 
1452 	cgroup_lock();
1453 	if (cg_link->cgroup)
1454 		cg_id = cgroup_id(cg_link->cgroup);
1455 	cgroup_unlock();
1456 
1457 	info->cgroup.cgroup_id = cg_id;
1458 	info->cgroup.attach_type = link->attach_type;
1459 	return 0;
1460 }
1461 
1462 static const struct bpf_link_ops bpf_cgroup_link_lops = {
1463 	.release = bpf_cgroup_link_release,
1464 	.dealloc = bpf_cgroup_link_dealloc,
1465 	.detach = bpf_cgroup_link_detach,
1466 	.update_prog = cgroup_bpf_replace,
1467 	.show_fdinfo = bpf_cgroup_link_show_fdinfo,
1468 	.fill_link_info = bpf_cgroup_link_fill_link_info,
1469 };
1470 
1471 #define BPF_F_LINK_ATTACH_MASK	\
1472 	(BPF_F_ID |		\
1473 	 BPF_F_BEFORE |		\
1474 	 BPF_F_AFTER |		\
1475 	 BPF_F_PREORDER |	\
1476 	 BPF_F_LINK)
1477 
cgroup_bpf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)1478 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
1479 {
1480 	struct bpf_link_primer link_primer;
1481 	struct bpf_cgroup_link *link;
1482 	struct cgroup *cgrp;
1483 	int err;
1484 
1485 	if (attr->link_create.flags & (~BPF_F_LINK_ATTACH_MASK))
1486 		return -EINVAL;
1487 
1488 	cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
1489 	if (IS_ERR(cgrp))
1490 		return PTR_ERR(cgrp);
1491 
1492 	link = kzalloc(sizeof(*link), GFP_USER);
1493 	if (!link) {
1494 		err = -ENOMEM;
1495 		goto out_put_cgroup;
1496 	}
1497 	bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
1498 		      prog, attr->link_create.attach_type);
1499 	link->cgroup = cgrp;
1500 
1501 	err = bpf_link_prime(&link->link, &link_primer);
1502 	if (err) {
1503 		kfree(link);
1504 		goto out_put_cgroup;
1505 	}
1506 
1507 	err = cgroup_bpf_attach(cgrp, NULL, NULL, link,
1508 				link->link.attach_type, BPF_F_ALLOW_MULTI | attr->link_create.flags,
1509 				attr->link_create.cgroup.relative_fd,
1510 				attr->link_create.cgroup.expected_revision);
1511 	if (err) {
1512 		bpf_link_cleanup(&link_primer);
1513 		goto out_put_cgroup;
1514 	}
1515 
1516 	return bpf_link_settle(&link_primer);
1517 
1518 out_put_cgroup:
1519 	cgroup_put(cgrp);
1520 	return err;
1521 }
1522 
cgroup_bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)1523 int cgroup_bpf_prog_query(const union bpf_attr *attr,
1524 			  union bpf_attr __user *uattr)
1525 {
1526 	struct cgroup *cgrp;
1527 	int ret;
1528 
1529 	cgrp = cgroup_get_from_fd(attr->query.target_fd);
1530 	if (IS_ERR(cgrp))
1531 		return PTR_ERR(cgrp);
1532 
1533 	ret = cgroup_bpf_query(cgrp, attr, uattr);
1534 
1535 	cgroup_put(cgrp);
1536 	return ret;
1537 }
1538 
1539 /**
1540  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
1541  * @sk: The socket sending or receiving traffic
1542  * @skb: The skb that is being sent or received
1543  * @atype: The type of program to be executed
1544  *
1545  * If no socket is passed, or the socket is not of type INET or INET6,
1546  * this function does nothing and returns 0.
1547  *
1548  * The program type passed in via @type must be suitable for network
1549  * filtering. No further check is performed to assert that.
1550  *
1551  * For egress packets, this function can return:
1552  *   NET_XMIT_SUCCESS    (0)	- continue with packet output
1553  *   NET_XMIT_DROP       (1)	- drop packet and notify TCP to call cwr
1554  *   NET_XMIT_CN         (2)	- continue with packet output and notify TCP
1555  *				  to call cwr
1556  *   -err			- drop packet
1557  *
1558  * For ingress packets, this function will return -EPERM if any
1559  * attached program was found and if it returned != 1 during execution.
1560  * Otherwise 0 is returned.
1561  */
__cgroup_bpf_run_filter_skb(struct sock * sk,struct sk_buff * skb,enum cgroup_bpf_attach_type atype)1562 int __cgroup_bpf_run_filter_skb(struct sock *sk,
1563 				struct sk_buff *skb,
1564 				enum cgroup_bpf_attach_type atype)
1565 {
1566 	unsigned int offset = -skb_network_offset(skb);
1567 	struct sock *save_sk;
1568 	void *saved_data_end;
1569 	struct cgroup *cgrp;
1570 	int ret;
1571 
1572 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1573 		return 0;
1574 
1575 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1576 	save_sk = skb->sk;
1577 	skb->sk = sk;
1578 	__skb_push(skb, offset);
1579 
1580 	/* compute pointers for the bpf prog */
1581 	bpf_compute_and_save_data_end(skb, &saved_data_end);
1582 
1583 	if (atype == CGROUP_INET_EGRESS) {
1584 		u32 flags = 0;
1585 		bool cn;
1586 
1587 		ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb,
1588 					    __bpf_prog_run_save_cb, 0, &flags);
1589 
1590 		/* Return values of CGROUP EGRESS BPF programs are:
1591 		 *   0: drop packet
1592 		 *   1: keep packet
1593 		 *   2: drop packet and cn
1594 		 *   3: keep packet and cn
1595 		 *
1596 		 * The returned value is then converted to one of the NET_XMIT
1597 		 * or an error code that is then interpreted as drop packet
1598 		 * (and no cn):
1599 		 *   0: NET_XMIT_SUCCESS  skb should be transmitted
1600 		 *   1: NET_XMIT_DROP     skb should be dropped and cn
1601 		 *   2: NET_XMIT_CN       skb should be transmitted and cn
1602 		 *   3: -err              skb should be dropped
1603 		 */
1604 
1605 		cn = flags & BPF_RET_SET_CN;
1606 		if (ret && !IS_ERR_VALUE((long)ret))
1607 			ret = -EFAULT;
1608 		if (!ret)
1609 			ret = (cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);
1610 		else
1611 			ret = (cn ? NET_XMIT_DROP : ret);
1612 	} else {
1613 		ret = bpf_prog_run_array_cg(&cgrp->bpf, atype,
1614 					    skb, __bpf_prog_run_save_cb, 0,
1615 					    NULL);
1616 		if (ret && !IS_ERR_VALUE((long)ret))
1617 			ret = -EFAULT;
1618 	}
1619 	bpf_restore_data_end(skb, saved_data_end);
1620 	__skb_pull(skb, offset);
1621 	skb->sk = save_sk;
1622 
1623 	return ret;
1624 }
1625 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
1626 
1627 /**
1628  * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1629  * @sk: sock structure to manipulate
1630  * @atype: The type of program to be executed
1631  *
1632  * socket is passed is expected to be of type INET or INET6.
1633  *
1634  * The program type passed in via @type must be suitable for sock
1635  * filtering. No further check is performed to assert that.
1636  *
1637  * This function will return %-EPERM if any if an attached program was found
1638  * and if it returned != 1 during execution. In all other cases, 0 is returned.
1639  */
__cgroup_bpf_run_filter_sk(struct sock * sk,enum cgroup_bpf_attach_type atype)1640 int __cgroup_bpf_run_filter_sk(struct sock *sk,
1641 			       enum cgroup_bpf_attach_type atype)
1642 {
1643 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1644 
1645 	return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0,
1646 				     NULL);
1647 }
1648 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
1649 
1650 /**
1651  * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1652  *                                       provided by user sockaddr
1653  * @sk: sock struct that will use sockaddr
1654  * @uaddr: sockaddr struct provided by user
1655  * @uaddrlen: Pointer to the size of the sockaddr struct provided by user. It is
1656  *            read-only for AF_INET[6] uaddr but can be modified for AF_UNIX
1657  *            uaddr.
1658  * @atype: The type of program to be executed
1659  * @t_ctx: Pointer to attach type specific context
1660  * @flags: Pointer to u32 which contains higher bits of BPF program
1661  *         return value (OR'ed together).
1662  *
1663  * socket is expected to be of type INET, INET6 or UNIX.
1664  *
1665  * This function will return %-EPERM if an attached program is found and
1666  * returned value != 1 during execution. In all other cases, 0 is returned.
1667  */
__cgroup_bpf_run_filter_sock_addr(struct sock * sk,struct sockaddr * uaddr,int * uaddrlen,enum cgroup_bpf_attach_type atype,void * t_ctx,u32 * flags)1668 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1669 				      struct sockaddr *uaddr,
1670 				      int *uaddrlen,
1671 				      enum cgroup_bpf_attach_type atype,
1672 				      void *t_ctx,
1673 				      u32 *flags)
1674 {
1675 	struct bpf_sock_addr_kern ctx = {
1676 		.sk = sk,
1677 		.uaddr = uaddr,
1678 		.t_ctx = t_ctx,
1679 	};
1680 	struct sockaddr_storage unspec;
1681 	struct cgroup *cgrp;
1682 	int ret;
1683 
1684 	/* Check socket family since not all sockets represent network
1685 	 * endpoint (e.g. AF_UNIX).
1686 	 */
1687 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6 &&
1688 	    sk->sk_family != AF_UNIX)
1689 		return 0;
1690 
1691 	if (!ctx.uaddr) {
1692 		memset(&unspec, 0, sizeof(unspec));
1693 		ctx.uaddr = (struct sockaddr *)&unspec;
1694 		ctx.uaddrlen = 0;
1695 	} else {
1696 		ctx.uaddrlen = *uaddrlen;
1697 	}
1698 
1699 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1700 	ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run,
1701 				    0, flags);
1702 
1703 	if (!ret && uaddr)
1704 		*uaddrlen = ctx.uaddrlen;
1705 
1706 	return ret;
1707 }
1708 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
1709 
1710 /**
1711  * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1712  * @sk: socket to get cgroup from
1713  * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1714  * sk with connection information (IP addresses, etc.) May not contain
1715  * cgroup info if it is a req sock.
1716  * @atype: The type of program to be executed
1717  *
1718  * socket passed is expected to be of type INET or INET6.
1719  *
1720  * The program type passed in via @type must be suitable for sock_ops
1721  * filtering. No further check is performed to assert that.
1722  *
1723  * This function will return %-EPERM if any if an attached program was found
1724  * and if it returned != 1 during execution. In all other cases, 0 is returned.
1725  */
__cgroup_bpf_run_filter_sock_ops(struct sock * sk,struct bpf_sock_ops_kern * sock_ops,enum cgroup_bpf_attach_type atype)1726 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1727 				     struct bpf_sock_ops_kern *sock_ops,
1728 				     enum cgroup_bpf_attach_type atype)
1729 {
1730 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1731 
1732 	return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run,
1733 				     0, NULL);
1734 }
1735 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
1736 
__cgroup_bpf_check_dev_permission(short dev_type,u32 major,u32 minor,short access,enum cgroup_bpf_attach_type atype)1737 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1738 				      short access, enum cgroup_bpf_attach_type atype)
1739 {
1740 	struct cgroup *cgrp;
1741 	struct bpf_cgroup_dev_ctx ctx = {
1742 		.access_type = (access << 16) | dev_type,
1743 		.major = major,
1744 		.minor = minor,
1745 	};
1746 	int ret;
1747 
1748 	rcu_read_lock();
1749 	cgrp = task_dfl_cgroup(current);
1750 	ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1751 				    NULL);
1752 	rcu_read_unlock();
1753 
1754 	return ret;
1755 }
1756 
BPF_CALL_2(bpf_get_local_storage,struct bpf_map *,map,u64,flags)1757 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
1758 {
1759 	/* flags argument is not used now,
1760 	 * but provides an ability to extend the API.
1761 	 * verifier checks that its value is correct.
1762 	 */
1763 	enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
1764 	struct bpf_cgroup_storage *storage;
1765 	struct bpf_cg_run_ctx *ctx;
1766 	void *ptr;
1767 
1768 	/* get current cgroup storage from BPF run context */
1769 	ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1770 	storage = ctx->prog_item->cgroup_storage[stype];
1771 
1772 	if (stype == BPF_CGROUP_STORAGE_SHARED)
1773 		ptr = &READ_ONCE(storage->buf)->data[0];
1774 	else
1775 		ptr = this_cpu_ptr(storage->percpu_buf);
1776 
1777 	return (unsigned long)ptr;
1778 }
1779 
1780 const struct bpf_func_proto bpf_get_local_storage_proto = {
1781 	.func		= bpf_get_local_storage,
1782 	.gpl_only	= false,
1783 	.ret_type	= RET_PTR_TO_MAP_VALUE,
1784 	.arg1_type	= ARG_CONST_MAP_PTR,
1785 	.arg2_type	= ARG_ANYTHING,
1786 };
1787 
BPF_CALL_0(bpf_get_retval)1788 BPF_CALL_0(bpf_get_retval)
1789 {
1790 	struct bpf_cg_run_ctx *ctx =
1791 		container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1792 
1793 	return ctx->retval;
1794 }
1795 
1796 const struct bpf_func_proto bpf_get_retval_proto = {
1797 	.func		= bpf_get_retval,
1798 	.gpl_only	= false,
1799 	.ret_type	= RET_INTEGER,
1800 };
1801 
BPF_CALL_1(bpf_set_retval,int,retval)1802 BPF_CALL_1(bpf_set_retval, int, retval)
1803 {
1804 	struct bpf_cg_run_ctx *ctx =
1805 		container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1806 
1807 	ctx->retval = retval;
1808 	return 0;
1809 }
1810 
1811 const struct bpf_func_proto bpf_set_retval_proto = {
1812 	.func		= bpf_set_retval,
1813 	.gpl_only	= false,
1814 	.ret_type	= RET_INTEGER,
1815 	.arg1_type	= ARG_ANYTHING,
1816 };
1817 
1818 static const struct bpf_func_proto *
cgroup_dev_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1819 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1820 {
1821 	const struct bpf_func_proto *func_proto;
1822 
1823 	func_proto = cgroup_common_func_proto(func_id, prog);
1824 	if (func_proto)
1825 		return func_proto;
1826 
1827 	switch (func_id) {
1828 	case BPF_FUNC_perf_event_output:
1829 		return &bpf_event_output_data_proto;
1830 	default:
1831 		return bpf_base_func_proto(func_id, prog);
1832 	}
1833 }
1834 
cgroup_dev_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)1835 static bool cgroup_dev_is_valid_access(int off, int size,
1836 				       enum bpf_access_type type,
1837 				       const struct bpf_prog *prog,
1838 				       struct bpf_insn_access_aux *info)
1839 {
1840 	const int size_default = sizeof(__u32);
1841 
1842 	if (type == BPF_WRITE)
1843 		return false;
1844 
1845 	if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1846 		return false;
1847 	/* The verifier guarantees that size > 0. */
1848 	if (off % size != 0)
1849 		return false;
1850 
1851 	switch (off) {
1852 	case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1853 		bpf_ctx_record_field_size(info, size_default);
1854 		if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1855 			return false;
1856 		break;
1857 	default:
1858 		if (size != size_default)
1859 			return false;
1860 	}
1861 
1862 	return true;
1863 }
1864 
1865 const struct bpf_prog_ops cg_dev_prog_ops = {
1866 };
1867 
1868 const struct bpf_verifier_ops cg_dev_verifier_ops = {
1869 	.get_func_proto		= cgroup_dev_func_proto,
1870 	.is_valid_access	= cgroup_dev_is_valid_access,
1871 };
1872 
1873 /**
1874  * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1875  *
1876  * @head: sysctl table header
1877  * @table: sysctl table
1878  * @write: sysctl is being read (= 0) or written (= 1)
1879  * @buf: pointer to buffer (in and out)
1880  * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1881  *	result is size of @new_buf if program set new value, initial value
1882  *	otherwise
1883  * @ppos: value-result argument: value is position at which read from or write
1884  *	to sysctl is happening, result is new position if program overrode it,
1885  *	initial value otherwise
1886  * @atype: type of program to be executed
1887  *
1888  * Program is run when sysctl is being accessed, either read or written, and
1889  * can allow or deny such access.
1890  *
1891  * This function will return %-EPERM if an attached program is found and
1892  * returned value != 1 during execution. In all other cases 0 is returned.
1893  */
__cgroup_bpf_run_filter_sysctl(struct ctl_table_header * head,const struct ctl_table * table,int write,char ** buf,size_t * pcount,loff_t * ppos,enum cgroup_bpf_attach_type atype)1894 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1895 				   const struct ctl_table *table, int write,
1896 				   char **buf, size_t *pcount, loff_t *ppos,
1897 				   enum cgroup_bpf_attach_type atype)
1898 {
1899 	struct bpf_sysctl_kern ctx = {
1900 		.head = head,
1901 		.table = table,
1902 		.write = write,
1903 		.ppos = ppos,
1904 		.cur_val = NULL,
1905 		.cur_len = PAGE_SIZE,
1906 		.new_val = NULL,
1907 		.new_len = 0,
1908 		.new_updated = 0,
1909 	};
1910 	struct cgroup *cgrp;
1911 	loff_t pos = 0;
1912 	int ret;
1913 
1914 	ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1915 	if (!ctx.cur_val ||
1916 	    table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
1917 		/* Let BPF program decide how to proceed. */
1918 		ctx.cur_len = 0;
1919 	}
1920 
1921 	if (write && *buf && *pcount) {
1922 		/* BPF program should be able to override new value with a
1923 		 * buffer bigger than provided by user.
1924 		 */
1925 		ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1926 		ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1927 		if (ctx.new_val) {
1928 			memcpy(ctx.new_val, *buf, ctx.new_len);
1929 		} else {
1930 			/* Let BPF program decide how to proceed. */
1931 			ctx.new_len = 0;
1932 		}
1933 	}
1934 
1935 	rcu_read_lock();
1936 	cgrp = task_dfl_cgroup(current);
1937 	ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1938 				    NULL);
1939 	rcu_read_unlock();
1940 
1941 	kfree(ctx.cur_val);
1942 
1943 	if (ret == 1 && ctx.new_updated) {
1944 		kfree(*buf);
1945 		*buf = ctx.new_val;
1946 		*pcount = ctx.new_len;
1947 	} else {
1948 		kfree(ctx.new_val);
1949 	}
1950 
1951 	return ret;
1952 }
1953 
1954 #ifdef CONFIG_NET
sockopt_alloc_buf(struct bpf_sockopt_kern * ctx,int max_optlen,struct bpf_sockopt_buf * buf)1955 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
1956 			     struct bpf_sockopt_buf *buf)
1957 {
1958 	if (unlikely(max_optlen < 0))
1959 		return -EINVAL;
1960 
1961 	if (unlikely(max_optlen > PAGE_SIZE)) {
1962 		/* We don't expose optvals that are greater than PAGE_SIZE
1963 		 * to the BPF program.
1964 		 */
1965 		max_optlen = PAGE_SIZE;
1966 	}
1967 
1968 	if (max_optlen <= sizeof(buf->data)) {
1969 		/* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
1970 		 * bytes avoid the cost of kzalloc.
1971 		 */
1972 		ctx->optval = buf->data;
1973 		ctx->optval_end = ctx->optval + max_optlen;
1974 		return max_optlen;
1975 	}
1976 
1977 	ctx->optval = kzalloc(max_optlen, GFP_USER);
1978 	if (!ctx->optval)
1979 		return -ENOMEM;
1980 
1981 	ctx->optval_end = ctx->optval + max_optlen;
1982 
1983 	return max_optlen;
1984 }
1985 
sockopt_free_buf(struct bpf_sockopt_kern * ctx,struct bpf_sockopt_buf * buf)1986 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
1987 			     struct bpf_sockopt_buf *buf)
1988 {
1989 	if (ctx->optval == buf->data)
1990 		return;
1991 	kfree(ctx->optval);
1992 }
1993 
sockopt_buf_allocated(struct bpf_sockopt_kern * ctx,struct bpf_sockopt_buf * buf)1994 static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
1995 				  struct bpf_sockopt_buf *buf)
1996 {
1997 	return ctx->optval != buf->data;
1998 }
1999 
__cgroup_bpf_run_filter_setsockopt(struct sock * sk,int * level,int * optname,sockptr_t optval,int * optlen,char ** kernel_optval)2000 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
2001 				       int *optname, sockptr_t optval,
2002 				       int *optlen, char **kernel_optval)
2003 {
2004 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
2005 	struct bpf_sockopt_buf buf = {};
2006 	struct bpf_sockopt_kern ctx = {
2007 		.sk = sk,
2008 		.level = *level,
2009 		.optname = *optname,
2010 	};
2011 	int ret, max_optlen;
2012 
2013 	/* Allocate a bit more than the initial user buffer for
2014 	 * BPF program. The canonical use case is overriding
2015 	 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
2016 	 */
2017 	max_optlen = max_t(int, 16, *optlen);
2018 	max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
2019 	if (max_optlen < 0)
2020 		return max_optlen;
2021 
2022 	ctx.optlen = *optlen;
2023 
2024 	if (copy_from_sockptr(ctx.optval, optval,
2025 			      min(*optlen, max_optlen))) {
2026 		ret = -EFAULT;
2027 		goto out;
2028 	}
2029 
2030 	lock_sock(sk);
2031 	ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT,
2032 				    &ctx, bpf_prog_run, 0, NULL);
2033 	release_sock(sk);
2034 
2035 	if (ret)
2036 		goto out;
2037 
2038 	if (ctx.optlen == -1) {
2039 		/* optlen set to -1, bypass kernel */
2040 		ret = 1;
2041 	} else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
2042 		/* optlen is out of bounds */
2043 		if (*optlen > PAGE_SIZE && ctx.optlen >= 0) {
2044 			pr_info_once("bpf setsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
2045 				     ctx.optlen, max_optlen);
2046 			ret = 0;
2047 			goto out;
2048 		}
2049 		ret = -EFAULT;
2050 	} else {
2051 		/* optlen within bounds, run kernel handler */
2052 		ret = 0;
2053 
2054 		/* export any potential modifications */
2055 		*level = ctx.level;
2056 		*optname = ctx.optname;
2057 
2058 		/* optlen == 0 from BPF indicates that we should
2059 		 * use original userspace data.
2060 		 */
2061 		if (ctx.optlen != 0) {
2062 			*optlen = ctx.optlen;
2063 			/* We've used bpf_sockopt_kern->buf as an intermediary
2064 			 * storage, but the BPF program indicates that we need
2065 			 * to pass this data to the kernel setsockopt handler.
2066 			 * No way to export on-stack buf, have to allocate a
2067 			 * new buffer.
2068 			 */
2069 			if (!sockopt_buf_allocated(&ctx, &buf)) {
2070 				void *p = kmalloc(ctx.optlen, GFP_USER);
2071 
2072 				if (!p) {
2073 					ret = -ENOMEM;
2074 					goto out;
2075 				}
2076 				memcpy(p, ctx.optval, ctx.optlen);
2077 				*kernel_optval = p;
2078 			} else {
2079 				*kernel_optval = ctx.optval;
2080 			}
2081 			/* export and don't free sockopt buf */
2082 			return 0;
2083 		}
2084 	}
2085 
2086 out:
2087 	sockopt_free_buf(&ctx, &buf);
2088 	return ret;
2089 }
2090 
__cgroup_bpf_run_filter_getsockopt(struct sock * sk,int level,int optname,sockptr_t optval,sockptr_t optlen,int max_optlen,int retval)2091 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
2092 				       int optname, sockptr_t optval,
2093 				       sockptr_t optlen, int max_optlen,
2094 				       int retval)
2095 {
2096 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
2097 	struct bpf_sockopt_buf buf = {};
2098 	struct bpf_sockopt_kern ctx = {
2099 		.sk = sk,
2100 		.level = level,
2101 		.optname = optname,
2102 		.current_task = current,
2103 	};
2104 	int orig_optlen;
2105 	int ret;
2106 
2107 	orig_optlen = max_optlen;
2108 	ctx.optlen = max_optlen;
2109 	max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
2110 	if (max_optlen < 0)
2111 		return max_optlen;
2112 
2113 	if (!retval) {
2114 		/* If kernel getsockopt finished successfully,
2115 		 * copy whatever was returned to the user back
2116 		 * into our temporary buffer. Set optlen to the
2117 		 * one that kernel returned as well to let
2118 		 * BPF programs inspect the value.
2119 		 */
2120 		if (copy_from_sockptr(&ctx.optlen, optlen,
2121 				      sizeof(ctx.optlen))) {
2122 			ret = -EFAULT;
2123 			goto out;
2124 		}
2125 
2126 		if (ctx.optlen < 0) {
2127 			ret = -EFAULT;
2128 			goto out;
2129 		}
2130 		orig_optlen = ctx.optlen;
2131 
2132 		if (copy_from_sockptr(ctx.optval, optval,
2133 				      min(ctx.optlen, max_optlen))) {
2134 			ret = -EFAULT;
2135 			goto out;
2136 		}
2137 	}
2138 
2139 	lock_sock(sk);
2140 	ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
2141 				    &ctx, bpf_prog_run, retval, NULL);
2142 	release_sock(sk);
2143 
2144 	if (ret < 0)
2145 		goto out;
2146 
2147 	if (!sockptr_is_null(optval) &&
2148 	    (ctx.optlen > max_optlen || ctx.optlen < 0)) {
2149 		if (orig_optlen > PAGE_SIZE && ctx.optlen >= 0) {
2150 			pr_info_once("bpf getsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
2151 				     ctx.optlen, max_optlen);
2152 			ret = retval;
2153 			goto out;
2154 		}
2155 		ret = -EFAULT;
2156 		goto out;
2157 	}
2158 
2159 	if (ctx.optlen != 0) {
2160 		if (!sockptr_is_null(optval) &&
2161 		    copy_to_sockptr(optval, ctx.optval, ctx.optlen)) {
2162 			ret = -EFAULT;
2163 			goto out;
2164 		}
2165 		if (copy_to_sockptr(optlen, &ctx.optlen, sizeof(ctx.optlen))) {
2166 			ret = -EFAULT;
2167 			goto out;
2168 		}
2169 	}
2170 
2171 out:
2172 	sockopt_free_buf(&ctx, &buf);
2173 	return ret;
2174 }
2175 
__cgroup_bpf_run_filter_getsockopt_kern(struct sock * sk,int level,int optname,void * optval,int * optlen,int retval)2176 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
2177 					    int optname, void *optval,
2178 					    int *optlen, int retval)
2179 {
2180 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
2181 	struct bpf_sockopt_kern ctx = {
2182 		.sk = sk,
2183 		.level = level,
2184 		.optname = optname,
2185 		.optlen = *optlen,
2186 		.optval = optval,
2187 		.optval_end = optval + *optlen,
2188 		.current_task = current,
2189 	};
2190 	int ret;
2191 
2192 	/* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
2193 	 * user data back into BPF buffer when reval != 0. This is
2194 	 * done as an optimization to avoid extra copy, assuming
2195 	 * kernel won't populate the data in case of an error.
2196 	 * Here we always pass the data and memset() should
2197 	 * be called if that data shouldn't be "exported".
2198 	 */
2199 
2200 	ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
2201 				    &ctx, bpf_prog_run, retval, NULL);
2202 	if (ret < 0)
2203 		return ret;
2204 
2205 	if (ctx.optlen > *optlen)
2206 		return -EFAULT;
2207 
2208 	/* BPF programs can shrink the buffer, export the modifications.
2209 	 */
2210 	if (ctx.optlen != 0)
2211 		*optlen = ctx.optlen;
2212 
2213 	return ret;
2214 }
2215 #endif
2216 
sysctl_cpy_dir(const struct ctl_dir * dir,char ** bufp,size_t * lenp)2217 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
2218 			      size_t *lenp)
2219 {
2220 	ssize_t tmp_ret = 0, ret;
2221 
2222 	if (dir->header.parent) {
2223 		tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
2224 		if (tmp_ret < 0)
2225 			return tmp_ret;
2226 	}
2227 
2228 	ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
2229 	if (ret < 0)
2230 		return ret;
2231 	*bufp += ret;
2232 	*lenp -= ret;
2233 	ret += tmp_ret;
2234 
2235 	/* Avoid leading slash. */
2236 	if (!ret)
2237 		return ret;
2238 
2239 	tmp_ret = strscpy(*bufp, "/", *lenp);
2240 	if (tmp_ret < 0)
2241 		return tmp_ret;
2242 	*bufp += tmp_ret;
2243 	*lenp -= tmp_ret;
2244 
2245 	return ret + tmp_ret;
2246 }
2247 
BPF_CALL_4(bpf_sysctl_get_name,struct bpf_sysctl_kern *,ctx,char *,buf,size_t,buf_len,u64,flags)2248 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
2249 	   size_t, buf_len, u64, flags)
2250 {
2251 	ssize_t tmp_ret = 0, ret;
2252 
2253 	if (!buf)
2254 		return -EINVAL;
2255 
2256 	if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
2257 		if (!ctx->head)
2258 			return -EINVAL;
2259 		tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
2260 		if (tmp_ret < 0)
2261 			return tmp_ret;
2262 	}
2263 
2264 	ret = strscpy(buf, ctx->table->procname, buf_len);
2265 
2266 	return ret < 0 ? ret : tmp_ret + ret;
2267 }
2268 
2269 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
2270 	.func		= bpf_sysctl_get_name,
2271 	.gpl_only	= false,
2272 	.ret_type	= RET_INTEGER,
2273 	.arg1_type	= ARG_PTR_TO_CTX,
2274 	.arg2_type	= ARG_PTR_TO_MEM | MEM_WRITE,
2275 	.arg3_type	= ARG_CONST_SIZE,
2276 	.arg4_type	= ARG_ANYTHING,
2277 };
2278 
copy_sysctl_value(char * dst,size_t dst_len,char * src,size_t src_len)2279 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
2280 			     size_t src_len)
2281 {
2282 	if (!dst)
2283 		return -EINVAL;
2284 
2285 	if (!dst_len)
2286 		return -E2BIG;
2287 
2288 	if (!src || !src_len) {
2289 		memset(dst, 0, dst_len);
2290 		return -EINVAL;
2291 	}
2292 
2293 	memcpy(dst, src, min(dst_len, src_len));
2294 
2295 	if (dst_len > src_len) {
2296 		memset(dst + src_len, '\0', dst_len - src_len);
2297 		return src_len;
2298 	}
2299 
2300 	dst[dst_len - 1] = '\0';
2301 
2302 	return -E2BIG;
2303 }
2304 
BPF_CALL_3(bpf_sysctl_get_current_value,struct bpf_sysctl_kern *,ctx,char *,buf,size_t,buf_len)2305 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
2306 	   char *, buf, size_t, buf_len)
2307 {
2308 	return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
2309 }
2310 
2311 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
2312 	.func		= bpf_sysctl_get_current_value,
2313 	.gpl_only	= false,
2314 	.ret_type	= RET_INTEGER,
2315 	.arg1_type	= ARG_PTR_TO_CTX,
2316 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
2317 	.arg3_type	= ARG_CONST_SIZE,
2318 };
2319 
BPF_CALL_3(bpf_sysctl_get_new_value,struct bpf_sysctl_kern *,ctx,char *,buf,size_t,buf_len)2320 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
2321 	   size_t, buf_len)
2322 {
2323 	if (!ctx->write) {
2324 		if (buf && buf_len)
2325 			memset(buf, '\0', buf_len);
2326 		return -EINVAL;
2327 	}
2328 	return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
2329 }
2330 
2331 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
2332 	.func		= bpf_sysctl_get_new_value,
2333 	.gpl_only	= false,
2334 	.ret_type	= RET_INTEGER,
2335 	.arg1_type	= ARG_PTR_TO_CTX,
2336 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
2337 	.arg3_type	= ARG_CONST_SIZE,
2338 };
2339 
BPF_CALL_3(bpf_sysctl_set_new_value,struct bpf_sysctl_kern *,ctx,const char *,buf,size_t,buf_len)2340 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
2341 	   const char *, buf, size_t, buf_len)
2342 {
2343 	if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
2344 		return -EINVAL;
2345 
2346 	if (buf_len > PAGE_SIZE - 1)
2347 		return -E2BIG;
2348 
2349 	memcpy(ctx->new_val, buf, buf_len);
2350 	ctx->new_len = buf_len;
2351 	ctx->new_updated = 1;
2352 
2353 	return 0;
2354 }
2355 
2356 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
2357 	.func		= bpf_sysctl_set_new_value,
2358 	.gpl_only	= false,
2359 	.ret_type	= RET_INTEGER,
2360 	.arg1_type	= ARG_PTR_TO_CTX,
2361 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
2362 	.arg3_type	= ARG_CONST_SIZE,
2363 };
2364 
2365 static const struct bpf_func_proto *
sysctl_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)2366 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2367 {
2368 	const struct bpf_func_proto *func_proto;
2369 
2370 	func_proto = cgroup_common_func_proto(func_id, prog);
2371 	if (func_proto)
2372 		return func_proto;
2373 
2374 	switch (func_id) {
2375 	case BPF_FUNC_sysctl_get_name:
2376 		return &bpf_sysctl_get_name_proto;
2377 	case BPF_FUNC_sysctl_get_current_value:
2378 		return &bpf_sysctl_get_current_value_proto;
2379 	case BPF_FUNC_sysctl_get_new_value:
2380 		return &bpf_sysctl_get_new_value_proto;
2381 	case BPF_FUNC_sysctl_set_new_value:
2382 		return &bpf_sysctl_set_new_value_proto;
2383 	case BPF_FUNC_ktime_get_coarse_ns:
2384 		return &bpf_ktime_get_coarse_ns_proto;
2385 	case BPF_FUNC_perf_event_output:
2386 		return &bpf_event_output_data_proto;
2387 	default:
2388 		return bpf_base_func_proto(func_id, prog);
2389 	}
2390 }
2391 
sysctl_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)2392 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
2393 				   const struct bpf_prog *prog,
2394 				   struct bpf_insn_access_aux *info)
2395 {
2396 	const int size_default = sizeof(__u32);
2397 
2398 	if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
2399 		return false;
2400 
2401 	switch (off) {
2402 	case bpf_ctx_range(struct bpf_sysctl, write):
2403 		if (type != BPF_READ)
2404 			return false;
2405 		bpf_ctx_record_field_size(info, size_default);
2406 		return bpf_ctx_narrow_access_ok(off, size, size_default);
2407 	case bpf_ctx_range(struct bpf_sysctl, file_pos):
2408 		if (type == BPF_READ) {
2409 			bpf_ctx_record_field_size(info, size_default);
2410 			return bpf_ctx_narrow_access_ok(off, size, size_default);
2411 		} else {
2412 			return size == size_default;
2413 		}
2414 	default:
2415 		return false;
2416 	}
2417 }
2418 
sysctl_convert_ctx_access(enum bpf_access_type type,const struct bpf_insn * si,struct bpf_insn * insn_buf,struct bpf_prog * prog,u32 * target_size)2419 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
2420 				     const struct bpf_insn *si,
2421 				     struct bpf_insn *insn_buf,
2422 				     struct bpf_prog *prog, u32 *target_size)
2423 {
2424 	struct bpf_insn *insn = insn_buf;
2425 	u32 read_size;
2426 
2427 	switch (si->off) {
2428 	case offsetof(struct bpf_sysctl, write):
2429 		*insn++ = BPF_LDX_MEM(
2430 			BPF_SIZE(si->code), si->dst_reg, si->src_reg,
2431 			bpf_target_off(struct bpf_sysctl_kern, write,
2432 				       sizeof_field(struct bpf_sysctl_kern,
2433 						    write),
2434 				       target_size));
2435 		break;
2436 	case offsetof(struct bpf_sysctl, file_pos):
2437 		/* ppos is a pointer so it should be accessed via indirect
2438 		 * loads and stores. Also for stores additional temporary
2439 		 * register is used since neither src_reg nor dst_reg can be
2440 		 * overridden.
2441 		 */
2442 		if (type == BPF_WRITE) {
2443 			int treg = BPF_REG_9;
2444 
2445 			if (si->src_reg == treg || si->dst_reg == treg)
2446 				--treg;
2447 			if (si->src_reg == treg || si->dst_reg == treg)
2448 				--treg;
2449 			*insn++ = BPF_STX_MEM(
2450 				BPF_DW, si->dst_reg, treg,
2451 				offsetof(struct bpf_sysctl_kern, tmp_reg));
2452 			*insn++ = BPF_LDX_MEM(
2453 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
2454 				treg, si->dst_reg,
2455 				offsetof(struct bpf_sysctl_kern, ppos));
2456 			*insn++ = BPF_RAW_INSN(
2457 				BPF_CLASS(si->code) | BPF_MEM | BPF_SIZEOF(u32),
2458 				treg, si->src_reg,
2459 				bpf_ctx_narrow_access_offset(
2460 					0, sizeof(u32), sizeof(loff_t)),
2461 				si->imm);
2462 			*insn++ = BPF_LDX_MEM(
2463 				BPF_DW, treg, si->dst_reg,
2464 				offsetof(struct bpf_sysctl_kern, tmp_reg));
2465 		} else {
2466 			*insn++ = BPF_LDX_MEM(
2467 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
2468 				si->dst_reg, si->src_reg,
2469 				offsetof(struct bpf_sysctl_kern, ppos));
2470 			read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
2471 			*insn++ = BPF_LDX_MEM(
2472 				BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
2473 				bpf_ctx_narrow_access_offset(
2474 					0, read_size, sizeof(loff_t)));
2475 		}
2476 		*target_size = sizeof(u32);
2477 		break;
2478 	}
2479 
2480 	return insn - insn_buf;
2481 }
2482 
2483 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
2484 	.get_func_proto		= sysctl_func_proto,
2485 	.is_valid_access	= sysctl_is_valid_access,
2486 	.convert_ctx_access	= sysctl_convert_ctx_access,
2487 };
2488 
2489 const struct bpf_prog_ops cg_sysctl_prog_ops = {
2490 };
2491 
2492 #ifdef CONFIG_NET
BPF_CALL_1(bpf_get_netns_cookie_sockopt,struct bpf_sockopt_kern *,ctx)2493 BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx)
2494 {
2495 	const struct net *net = ctx ? sock_net(ctx->sk) : &init_net;
2496 
2497 	return net->net_cookie;
2498 }
2499 
2500 static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = {
2501 	.func		= bpf_get_netns_cookie_sockopt,
2502 	.gpl_only	= false,
2503 	.ret_type	= RET_INTEGER,
2504 	.arg1_type	= ARG_PTR_TO_CTX_OR_NULL,
2505 };
2506 #endif
2507 
2508 static const struct bpf_func_proto *
cg_sockopt_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)2509 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2510 {
2511 	const struct bpf_func_proto *func_proto;
2512 
2513 	func_proto = cgroup_common_func_proto(func_id, prog);
2514 	if (func_proto)
2515 		return func_proto;
2516 
2517 	switch (func_id) {
2518 #ifdef CONFIG_NET
2519 	case BPF_FUNC_get_netns_cookie:
2520 		return &bpf_get_netns_cookie_sockopt_proto;
2521 	case BPF_FUNC_sk_storage_get:
2522 		return &bpf_sk_storage_get_proto;
2523 	case BPF_FUNC_sk_storage_delete:
2524 		return &bpf_sk_storage_delete_proto;
2525 	case BPF_FUNC_setsockopt:
2526 		if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
2527 			return &bpf_sk_setsockopt_proto;
2528 		return NULL;
2529 	case BPF_FUNC_getsockopt:
2530 		if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
2531 			return &bpf_sk_getsockopt_proto;
2532 		return NULL;
2533 #endif
2534 #ifdef CONFIG_INET
2535 	case BPF_FUNC_tcp_sock:
2536 		return &bpf_tcp_sock_proto;
2537 #endif
2538 	case BPF_FUNC_perf_event_output:
2539 		return &bpf_event_output_data_proto;
2540 	default:
2541 		return bpf_base_func_proto(func_id, prog);
2542 	}
2543 }
2544 
cg_sockopt_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)2545 static bool cg_sockopt_is_valid_access(int off, int size,
2546 				       enum bpf_access_type type,
2547 				       const struct bpf_prog *prog,
2548 				       struct bpf_insn_access_aux *info)
2549 {
2550 	const int size_default = sizeof(__u32);
2551 
2552 	if (off < 0 || off >= sizeof(struct bpf_sockopt))
2553 		return false;
2554 
2555 	if (off % size != 0)
2556 		return false;
2557 
2558 	if (type == BPF_WRITE) {
2559 		switch (off) {
2560 		case offsetof(struct bpf_sockopt, retval):
2561 			if (size != size_default)
2562 				return false;
2563 			return prog->expected_attach_type ==
2564 				BPF_CGROUP_GETSOCKOPT;
2565 		case offsetof(struct bpf_sockopt, optname):
2566 			fallthrough;
2567 		case offsetof(struct bpf_sockopt, level):
2568 			if (size != size_default)
2569 				return false;
2570 			return prog->expected_attach_type ==
2571 				BPF_CGROUP_SETSOCKOPT;
2572 		case offsetof(struct bpf_sockopt, optlen):
2573 			return size == size_default;
2574 		default:
2575 			return false;
2576 		}
2577 	}
2578 
2579 	switch (off) {
2580 	case bpf_ctx_range_ptr(struct bpf_sockopt, sk):
2581 		if (size != sizeof(__u64))
2582 			return false;
2583 		info->reg_type = PTR_TO_SOCKET;
2584 		break;
2585 	case bpf_ctx_range_ptr(struct bpf_sockopt, optval):
2586 		if (size != sizeof(__u64))
2587 			return false;
2588 		info->reg_type = PTR_TO_PACKET;
2589 		break;
2590 	case bpf_ctx_range_ptr(struct bpf_sockopt, optval_end):
2591 		if (size != sizeof(__u64))
2592 			return false;
2593 		info->reg_type = PTR_TO_PACKET_END;
2594 		break;
2595 	case bpf_ctx_range(struct bpf_sockopt, retval):
2596 		if (size != size_default)
2597 			return false;
2598 		return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
2599 	default:
2600 		if (size != size_default)
2601 			return false;
2602 		break;
2603 	}
2604 	return true;
2605 }
2606 
2607 #define CG_SOCKOPT_READ_FIELD(F)					\
2608 	BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F),	\
2609 		    si->dst_reg, si->src_reg,				\
2610 		    offsetof(struct bpf_sockopt_kern, F))
2611 
2612 #define CG_SOCKOPT_WRITE_FIELD(F)					\
2613 	BPF_RAW_INSN((BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F) |	\
2614 		      BPF_MEM | BPF_CLASS(si->code)),			\
2615 		     si->dst_reg, si->src_reg,				\
2616 		     offsetof(struct bpf_sockopt_kern, F),		\
2617 		     si->imm)
2618 
cg_sockopt_convert_ctx_access(enum bpf_access_type type,const struct bpf_insn * si,struct bpf_insn * insn_buf,struct bpf_prog * prog,u32 * target_size)2619 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
2620 					 const struct bpf_insn *si,
2621 					 struct bpf_insn *insn_buf,
2622 					 struct bpf_prog *prog,
2623 					 u32 *target_size)
2624 {
2625 	struct bpf_insn *insn = insn_buf;
2626 
2627 	switch (si->off) {
2628 	case offsetof(struct bpf_sockopt, sk):
2629 		*insn++ = CG_SOCKOPT_READ_FIELD(sk);
2630 		break;
2631 	case offsetof(struct bpf_sockopt, level):
2632 		if (type == BPF_WRITE)
2633 			*insn++ = CG_SOCKOPT_WRITE_FIELD(level);
2634 		else
2635 			*insn++ = CG_SOCKOPT_READ_FIELD(level);
2636 		break;
2637 	case offsetof(struct bpf_sockopt, optname):
2638 		if (type == BPF_WRITE)
2639 			*insn++ = CG_SOCKOPT_WRITE_FIELD(optname);
2640 		else
2641 			*insn++ = CG_SOCKOPT_READ_FIELD(optname);
2642 		break;
2643 	case offsetof(struct bpf_sockopt, optlen):
2644 		if (type == BPF_WRITE)
2645 			*insn++ = CG_SOCKOPT_WRITE_FIELD(optlen);
2646 		else
2647 			*insn++ = CG_SOCKOPT_READ_FIELD(optlen);
2648 		break;
2649 	case offsetof(struct bpf_sockopt, retval):
2650 		BUILD_BUG_ON(offsetof(struct bpf_cg_run_ctx, run_ctx) != 0);
2651 
2652 		if (type == BPF_WRITE) {
2653 			int treg = BPF_REG_9;
2654 
2655 			if (si->src_reg == treg || si->dst_reg == treg)
2656 				--treg;
2657 			if (si->src_reg == treg || si->dst_reg == treg)
2658 				--treg;
2659 			*insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, treg,
2660 					      offsetof(struct bpf_sockopt_kern, tmp_reg));
2661 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2662 					      treg, si->dst_reg,
2663 					      offsetof(struct bpf_sockopt_kern, current_task));
2664 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2665 					      treg, treg,
2666 					      offsetof(struct task_struct, bpf_ctx));
2667 			*insn++ = BPF_RAW_INSN(BPF_CLASS(si->code) | BPF_MEM |
2668 					       BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2669 					       treg, si->src_reg,
2670 					       offsetof(struct bpf_cg_run_ctx, retval),
2671 					       si->imm);
2672 			*insn++ = BPF_LDX_MEM(BPF_DW, treg, si->dst_reg,
2673 					      offsetof(struct bpf_sockopt_kern, tmp_reg));
2674 		} else {
2675 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2676 					      si->dst_reg, si->src_reg,
2677 					      offsetof(struct bpf_sockopt_kern, current_task));
2678 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2679 					      si->dst_reg, si->dst_reg,
2680 					      offsetof(struct task_struct, bpf_ctx));
2681 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2682 					      si->dst_reg, si->dst_reg,
2683 					      offsetof(struct bpf_cg_run_ctx, retval));
2684 		}
2685 		break;
2686 	case offsetof(struct bpf_sockopt, optval):
2687 		*insn++ = CG_SOCKOPT_READ_FIELD(optval);
2688 		break;
2689 	case offsetof(struct bpf_sockopt, optval_end):
2690 		*insn++ = CG_SOCKOPT_READ_FIELD(optval_end);
2691 		break;
2692 	}
2693 
2694 	return insn - insn_buf;
2695 }
2696 
cg_sockopt_get_prologue(struct bpf_insn * insn_buf,bool direct_write,const struct bpf_prog * prog)2697 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
2698 				   bool direct_write,
2699 				   const struct bpf_prog *prog)
2700 {
2701 	/* Nothing to do for sockopt argument. The data is kzalloc'ated.
2702 	 */
2703 	return 0;
2704 }
2705 
2706 const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
2707 	.get_func_proto		= cg_sockopt_func_proto,
2708 	.is_valid_access	= cg_sockopt_is_valid_access,
2709 	.convert_ctx_access	= cg_sockopt_convert_ctx_access,
2710 	.gen_prologue		= cg_sockopt_get_prologue,
2711 };
2712 
2713 const struct bpf_prog_ops cg_sockopt_prog_ops = {
2714 };
2715 
2716 /* Common helpers for cgroup hooks. */
2717 const struct bpf_func_proto *
cgroup_common_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)2718 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2719 {
2720 	switch (func_id) {
2721 	case BPF_FUNC_get_local_storage:
2722 		return &bpf_get_local_storage_proto;
2723 	case BPF_FUNC_get_retval:
2724 		switch (prog->expected_attach_type) {
2725 		case BPF_CGROUP_INET_INGRESS:
2726 		case BPF_CGROUP_INET_EGRESS:
2727 		case BPF_CGROUP_SOCK_OPS:
2728 		case BPF_CGROUP_UDP4_RECVMSG:
2729 		case BPF_CGROUP_UDP6_RECVMSG:
2730 		case BPF_CGROUP_UNIX_RECVMSG:
2731 		case BPF_CGROUP_INET4_GETPEERNAME:
2732 		case BPF_CGROUP_INET6_GETPEERNAME:
2733 		case BPF_CGROUP_UNIX_GETPEERNAME:
2734 		case BPF_CGROUP_INET4_GETSOCKNAME:
2735 		case BPF_CGROUP_INET6_GETSOCKNAME:
2736 		case BPF_CGROUP_UNIX_GETSOCKNAME:
2737 			return NULL;
2738 		default:
2739 			return &bpf_get_retval_proto;
2740 		}
2741 	case BPF_FUNC_set_retval:
2742 		switch (prog->expected_attach_type) {
2743 		case BPF_CGROUP_INET_INGRESS:
2744 		case BPF_CGROUP_INET_EGRESS:
2745 		case BPF_CGROUP_SOCK_OPS:
2746 		case BPF_CGROUP_UDP4_RECVMSG:
2747 		case BPF_CGROUP_UDP6_RECVMSG:
2748 		case BPF_CGROUP_UNIX_RECVMSG:
2749 		case BPF_CGROUP_INET4_GETPEERNAME:
2750 		case BPF_CGROUP_INET6_GETPEERNAME:
2751 		case BPF_CGROUP_UNIX_GETPEERNAME:
2752 		case BPF_CGROUP_INET4_GETSOCKNAME:
2753 		case BPF_CGROUP_INET6_GETSOCKNAME:
2754 		case BPF_CGROUP_UNIX_GETSOCKNAME:
2755 			return NULL;
2756 		default:
2757 			return &bpf_set_retval_proto;
2758 		}
2759 	default:
2760 		return NULL;
2761 	}
2762 }
2763