xref: /linux/kernel/bpf/cgroup.c (revision 876f5ebd58a9ac42f48a7ead3d5b274a314e0ace)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Functions to manage eBPF programs attached to cgroups
4  *
5  * Copyright (c) 2016 Daniel Mack
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
17 #include <linux/bpf_lsm.h>
18 #include <linux/bpf_verifier.h>
19 #include <net/sock.h>
20 #include <net/bpf_sk_storage.h>
21 
22 #include "../cgroup/cgroup-internal.h"
23 
24 DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
25 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
26 
27 /*
28  * cgroup bpf destruction makes heavy use of work items and there can be a lot
29  * of concurrent destructions.  Use a separate workqueue so that cgroup bpf
30  * destruction work items don't end up filling up max_active of system_wq
31  * which may lead to deadlock.
32  */
33 static struct workqueue_struct *cgroup_bpf_destroy_wq;
34 
35 static int __init cgroup_bpf_wq_init(void)
36 {
37 	cgroup_bpf_destroy_wq = alloc_workqueue("cgroup_bpf_destroy", 0, 1);
38 	if (!cgroup_bpf_destroy_wq)
39 		panic("Failed to alloc workqueue for cgroup bpf destroy.\n");
40 	return 0;
41 }
42 core_initcall(cgroup_bpf_wq_init);
43 
44 static int cgroup_bpf_lifetime_notify(struct notifier_block *nb,
45 				      unsigned long action, void *data);
46 
47 static struct notifier_block cgroup_bpf_lifetime_nb = {
48 	.notifier_call = cgroup_bpf_lifetime_notify,
49 };
50 
51 void __init cgroup_bpf_lifetime_notifier_init(void)
52 {
53 	BUG_ON(blocking_notifier_chain_register(&cgroup_lifetime_notifier,
54 						&cgroup_bpf_lifetime_nb));
55 }
56 
57 /* __always_inline is necessary to prevent indirect call through run_prog
58  * function pointer.
59  */
60 static __always_inline int
61 bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
62 		      enum cgroup_bpf_attach_type atype,
63 		      const void *ctx, bpf_prog_run_fn run_prog,
64 		      int retval, u32 *ret_flags)
65 {
66 	const struct bpf_prog_array_item *item;
67 	const struct bpf_prog *prog;
68 	const struct bpf_prog_array *array;
69 	struct bpf_run_ctx *old_run_ctx;
70 	struct bpf_cg_run_ctx run_ctx;
71 	u32 func_ret;
72 
73 	run_ctx.retval = retval;
74 	migrate_disable();
75 	rcu_read_lock();
76 	array = rcu_dereference(cgrp->effective[atype]);
77 	item = &array->items[0];
78 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
79 	while ((prog = READ_ONCE(item->prog))) {
80 		run_ctx.prog_item = item;
81 		func_ret = run_prog(prog, ctx);
82 		if (ret_flags) {
83 			*(ret_flags) |= (func_ret >> 1);
84 			func_ret &= 1;
85 		}
86 		if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval))
87 			run_ctx.retval = -EPERM;
88 		item++;
89 	}
90 	bpf_reset_run_ctx(old_run_ctx);
91 	rcu_read_unlock();
92 	migrate_enable();
93 	return run_ctx.retval;
94 }
95 
96 unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
97 				       const struct bpf_insn *insn)
98 {
99 	const struct bpf_prog *shim_prog;
100 	struct sock *sk;
101 	struct cgroup *cgrp;
102 	int ret = 0;
103 	u64 *args;
104 
105 	args = (u64 *)ctx;
106 	sk = (void *)(unsigned long)args[0];
107 	/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
108 	shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
109 
110 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
111 	if (likely(cgrp))
112 		ret = bpf_prog_run_array_cg(&cgrp->bpf,
113 					    shim_prog->aux->cgroup_atype,
114 					    ctx, bpf_prog_run, 0, NULL);
115 	return ret;
116 }
117 
118 unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
119 					 const struct bpf_insn *insn)
120 {
121 	const struct bpf_prog *shim_prog;
122 	struct socket *sock;
123 	struct cgroup *cgrp;
124 	int ret = 0;
125 	u64 *args;
126 
127 	args = (u64 *)ctx;
128 	sock = (void *)(unsigned long)args[0];
129 	/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
130 	shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
131 
132 	cgrp = sock_cgroup_ptr(&sock->sk->sk_cgrp_data);
133 	if (likely(cgrp))
134 		ret = bpf_prog_run_array_cg(&cgrp->bpf,
135 					    shim_prog->aux->cgroup_atype,
136 					    ctx, bpf_prog_run, 0, NULL);
137 	return ret;
138 }
139 
140 unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
141 					  const struct bpf_insn *insn)
142 {
143 	const struct bpf_prog *shim_prog;
144 	struct cgroup *cgrp;
145 	int ret = 0;
146 
147 	/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
148 	shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
149 
150 	/* We rely on trampoline's __bpf_prog_enter_lsm_cgroup to grab RCU read lock. */
151 	cgrp = task_dfl_cgroup(current);
152 	if (likely(cgrp))
153 		ret = bpf_prog_run_array_cg(&cgrp->bpf,
154 					    shim_prog->aux->cgroup_atype,
155 					    ctx, bpf_prog_run, 0, NULL);
156 	return ret;
157 }
158 
159 #ifdef CONFIG_BPF_LSM
160 struct cgroup_lsm_atype {
161 	u32 attach_btf_id;
162 	int refcnt;
163 };
164 
165 static struct cgroup_lsm_atype cgroup_lsm_atype[CGROUP_LSM_NUM];
166 
167 static enum cgroup_bpf_attach_type
168 bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
169 {
170 	int i;
171 
172 	lockdep_assert_held(&cgroup_mutex);
173 
174 	if (attach_type != BPF_LSM_CGROUP)
175 		return to_cgroup_bpf_attach_type(attach_type);
176 
177 	for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
178 		if (cgroup_lsm_atype[i].attach_btf_id == attach_btf_id)
179 			return CGROUP_LSM_START + i;
180 
181 	for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
182 		if (cgroup_lsm_atype[i].attach_btf_id == 0)
183 			return CGROUP_LSM_START + i;
184 
185 	return -E2BIG;
186 
187 }
188 
189 void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype)
190 {
191 	int i = cgroup_atype - CGROUP_LSM_START;
192 
193 	lockdep_assert_held(&cgroup_mutex);
194 
195 	WARN_ON_ONCE(cgroup_lsm_atype[i].attach_btf_id &&
196 		     cgroup_lsm_atype[i].attach_btf_id != attach_btf_id);
197 
198 	cgroup_lsm_atype[i].attach_btf_id = attach_btf_id;
199 	cgroup_lsm_atype[i].refcnt++;
200 }
201 
202 void bpf_cgroup_atype_put(int cgroup_atype)
203 {
204 	int i = cgroup_atype - CGROUP_LSM_START;
205 
206 	cgroup_lock();
207 	if (--cgroup_lsm_atype[i].refcnt <= 0)
208 		cgroup_lsm_atype[i].attach_btf_id = 0;
209 	WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0);
210 	cgroup_unlock();
211 }
212 #else
213 static enum cgroup_bpf_attach_type
214 bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
215 {
216 	if (attach_type != BPF_LSM_CGROUP)
217 		return to_cgroup_bpf_attach_type(attach_type);
218 	return -EOPNOTSUPP;
219 }
220 #endif /* CONFIG_BPF_LSM */
221 
222 static void cgroup_bpf_offline(struct cgroup *cgrp)
223 {
224 	cgroup_get(cgrp);
225 	percpu_ref_kill(&cgrp->bpf.refcnt);
226 }
227 
228 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
229 {
230 	enum bpf_cgroup_storage_type stype;
231 
232 	for_each_cgroup_storage_type(stype)
233 		bpf_cgroup_storage_free(storages[stype]);
234 }
235 
236 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
237 				     struct bpf_cgroup_storage *new_storages[],
238 				     enum bpf_attach_type type,
239 				     struct bpf_prog *prog,
240 				     struct cgroup *cgrp)
241 {
242 	enum bpf_cgroup_storage_type stype;
243 	struct bpf_cgroup_storage_key key;
244 	struct bpf_map *map;
245 
246 	key.cgroup_inode_id = cgroup_id(cgrp);
247 	key.attach_type = type;
248 
249 	for_each_cgroup_storage_type(stype) {
250 		map = prog->aux->cgroup_storage[stype];
251 		if (!map)
252 			continue;
253 
254 		storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
255 		if (storages[stype])
256 			continue;
257 
258 		storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
259 		if (IS_ERR(storages[stype])) {
260 			bpf_cgroup_storages_free(new_storages);
261 			return -ENOMEM;
262 		}
263 
264 		new_storages[stype] = storages[stype];
265 	}
266 
267 	return 0;
268 }
269 
270 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
271 				       struct bpf_cgroup_storage *src[])
272 {
273 	enum bpf_cgroup_storage_type stype;
274 
275 	for_each_cgroup_storage_type(stype)
276 		dst[stype] = src[stype];
277 }
278 
279 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
280 				     struct cgroup *cgrp,
281 				     enum bpf_attach_type attach_type)
282 {
283 	enum bpf_cgroup_storage_type stype;
284 
285 	for_each_cgroup_storage_type(stype)
286 		bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
287 }
288 
289 /* Called when bpf_cgroup_link is auto-detached from dying cgroup.
290  * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
291  * doesn't free link memory, which will eventually be done by bpf_link's
292  * release() callback, when its last FD is closed.
293  */
294 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
295 {
296 	cgroup_put(link->cgroup);
297 	link->cgroup = NULL;
298 }
299 
300 /**
301  * cgroup_bpf_release() - put references of all bpf programs and
302  *                        release all cgroup bpf data
303  * @work: work structure embedded into the cgroup to modify
304  */
305 static void cgroup_bpf_release(struct work_struct *work)
306 {
307 	struct cgroup *p, *cgrp = container_of(work, struct cgroup,
308 					       bpf.release_work);
309 	struct bpf_prog_array *old_array;
310 	struct list_head *storages = &cgrp->bpf.storages;
311 	struct bpf_cgroup_storage *storage, *stmp;
312 
313 	unsigned int atype;
314 
315 	cgroup_lock();
316 
317 	for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
318 		struct hlist_head *progs = &cgrp->bpf.progs[atype];
319 		struct bpf_prog_list *pl;
320 		struct hlist_node *pltmp;
321 
322 		hlist_for_each_entry_safe(pl, pltmp, progs, node) {
323 			hlist_del(&pl->node);
324 			if (pl->prog) {
325 				if (pl->prog->expected_attach_type == BPF_LSM_CGROUP)
326 					bpf_trampoline_unlink_cgroup_shim(pl->prog);
327 				bpf_prog_put(pl->prog);
328 			}
329 			if (pl->link) {
330 				if (pl->link->link.prog->expected_attach_type == BPF_LSM_CGROUP)
331 					bpf_trampoline_unlink_cgroup_shim(pl->link->link.prog);
332 				bpf_cgroup_link_auto_detach(pl->link);
333 			}
334 			kfree(pl);
335 			static_branch_dec(&cgroup_bpf_enabled_key[atype]);
336 		}
337 		old_array = rcu_dereference_protected(
338 				cgrp->bpf.effective[atype],
339 				lockdep_is_held(&cgroup_mutex));
340 		bpf_prog_array_free(old_array);
341 	}
342 
343 	list_for_each_entry_safe(storage, stmp, storages, list_cg) {
344 		bpf_cgroup_storage_unlink(storage);
345 		bpf_cgroup_storage_free(storage);
346 	}
347 
348 	cgroup_unlock();
349 
350 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
351 		cgroup_bpf_put(p);
352 
353 	percpu_ref_exit(&cgrp->bpf.refcnt);
354 	cgroup_put(cgrp);
355 }
356 
357 /**
358  * cgroup_bpf_release_fn() - callback used to schedule releasing
359  *                           of bpf cgroup data
360  * @ref: percpu ref counter structure
361  */
362 static void cgroup_bpf_release_fn(struct percpu_ref *ref)
363 {
364 	struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
365 
366 	INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
367 	queue_work(cgroup_bpf_destroy_wq, &cgrp->bpf.release_work);
368 }
369 
370 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
371  * link or direct prog.
372  */
373 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
374 {
375 	if (pl->prog)
376 		return pl->prog;
377 	if (pl->link)
378 		return pl->link->link.prog;
379 	return NULL;
380 }
381 
382 /* count number of elements in the list.
383  * it's slow but the list cannot be long
384  */
385 static u32 prog_list_length(struct hlist_head *head, int *preorder_cnt)
386 {
387 	struct bpf_prog_list *pl;
388 	u32 cnt = 0;
389 
390 	hlist_for_each_entry(pl, head, node) {
391 		if (!prog_list_prog(pl))
392 			continue;
393 		if (preorder_cnt && (pl->flags & BPF_F_PREORDER))
394 			(*preorder_cnt)++;
395 		cnt++;
396 	}
397 	return cnt;
398 }
399 
400 /* if parent has non-overridable prog attached,
401  * disallow attaching new programs to the descendent cgroup.
402  * if parent has overridable or multi-prog, allow attaching
403  */
404 static bool hierarchy_allows_attach(struct cgroup *cgrp,
405 				    enum cgroup_bpf_attach_type atype)
406 {
407 	struct cgroup *p;
408 
409 	p = cgroup_parent(cgrp);
410 	if (!p)
411 		return true;
412 	do {
413 		u32 flags = p->bpf.flags[atype];
414 		u32 cnt;
415 
416 		if (flags & BPF_F_ALLOW_MULTI)
417 			return true;
418 		cnt = prog_list_length(&p->bpf.progs[atype], NULL);
419 		WARN_ON_ONCE(cnt > 1);
420 		if (cnt == 1)
421 			return !!(flags & BPF_F_ALLOW_OVERRIDE);
422 		p = cgroup_parent(p);
423 	} while (p);
424 	return true;
425 }
426 
427 /* compute a chain of effective programs for a given cgroup:
428  * start from the list of programs in this cgroup and add
429  * all parent programs.
430  * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
431  * to programs in this cgroup
432  */
433 static int compute_effective_progs(struct cgroup *cgrp,
434 				   enum cgroup_bpf_attach_type atype,
435 				   struct bpf_prog_array **array)
436 {
437 	struct bpf_prog_array_item *item;
438 	struct bpf_prog_array *progs;
439 	struct bpf_prog_list *pl;
440 	struct cgroup *p = cgrp;
441 	int i, j, cnt = 0, preorder_cnt = 0, fstart, bstart, init_bstart;
442 
443 	/* count number of effective programs by walking parents */
444 	do {
445 		if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
446 			cnt += prog_list_length(&p->bpf.progs[atype], &preorder_cnt);
447 		p = cgroup_parent(p);
448 	} while (p);
449 
450 	progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
451 	if (!progs)
452 		return -ENOMEM;
453 
454 	/* populate the array with effective progs */
455 	cnt = 0;
456 	p = cgrp;
457 	fstart = preorder_cnt;
458 	bstart = preorder_cnt - 1;
459 	do {
460 		if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
461 			continue;
462 
463 		init_bstart = bstart;
464 		hlist_for_each_entry(pl, &p->bpf.progs[atype], node) {
465 			if (!prog_list_prog(pl))
466 				continue;
467 
468 			if (pl->flags & BPF_F_PREORDER) {
469 				item = &progs->items[bstart];
470 				bstart--;
471 			} else {
472 				item = &progs->items[fstart];
473 				fstart++;
474 			}
475 			item->prog = prog_list_prog(pl);
476 			bpf_cgroup_storages_assign(item->cgroup_storage,
477 						   pl->storage);
478 			cnt++;
479 		}
480 
481 		/* reverse pre-ordering progs at this cgroup level */
482 		for (i = bstart + 1, j = init_bstart; i < j; i++, j--)
483 			swap(progs->items[i], progs->items[j]);
484 
485 	} while ((p = cgroup_parent(p)));
486 
487 	*array = progs;
488 	return 0;
489 }
490 
491 static void activate_effective_progs(struct cgroup *cgrp,
492 				     enum cgroup_bpf_attach_type atype,
493 				     struct bpf_prog_array *old_array)
494 {
495 	old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array,
496 					lockdep_is_held(&cgroup_mutex));
497 	/* free prog array after grace period, since __cgroup_bpf_run_*()
498 	 * might be still walking the array
499 	 */
500 	bpf_prog_array_free(old_array);
501 }
502 
503 /**
504  * cgroup_bpf_inherit() - inherit effective programs from parent
505  * @cgrp: the cgroup to modify
506  */
507 static int cgroup_bpf_inherit(struct cgroup *cgrp)
508 {
509 /* has to use marco instead of const int, since compiler thinks
510  * that array below is variable length
511  */
512 #define	NR ARRAY_SIZE(cgrp->bpf.effective)
513 	struct bpf_prog_array *arrays[NR] = {};
514 	struct cgroup *p;
515 	int ret, i;
516 
517 	ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
518 			      GFP_KERNEL);
519 	if (ret)
520 		return ret;
521 
522 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
523 		cgroup_bpf_get(p);
524 
525 	for (i = 0; i < NR; i++)
526 		INIT_HLIST_HEAD(&cgrp->bpf.progs[i]);
527 
528 	INIT_LIST_HEAD(&cgrp->bpf.storages);
529 
530 	for (i = 0; i < NR; i++)
531 		if (compute_effective_progs(cgrp, i, &arrays[i]))
532 			goto cleanup;
533 
534 	for (i = 0; i < NR; i++)
535 		activate_effective_progs(cgrp, i, arrays[i]);
536 
537 	return 0;
538 cleanup:
539 	for (i = 0; i < NR; i++)
540 		bpf_prog_array_free(arrays[i]);
541 
542 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
543 		cgroup_bpf_put(p);
544 
545 	percpu_ref_exit(&cgrp->bpf.refcnt);
546 
547 	return -ENOMEM;
548 }
549 
550 static int cgroup_bpf_lifetime_notify(struct notifier_block *nb,
551 				      unsigned long action, void *data)
552 {
553 	struct cgroup *cgrp = data;
554 	int ret = 0;
555 
556 	if (cgrp->root != &cgrp_dfl_root)
557 		return NOTIFY_OK;
558 
559 	switch (action) {
560 	case CGROUP_LIFETIME_ONLINE:
561 		ret = cgroup_bpf_inherit(cgrp);
562 		break;
563 	case CGROUP_LIFETIME_OFFLINE:
564 		cgroup_bpf_offline(cgrp);
565 		break;
566 	}
567 
568 	return notifier_from_errno(ret);
569 }
570 
571 static int update_effective_progs(struct cgroup *cgrp,
572 				  enum cgroup_bpf_attach_type atype)
573 {
574 	struct cgroup_subsys_state *css;
575 	int err;
576 
577 	/* allocate and recompute effective prog arrays */
578 	css_for_each_descendant_pre(css, &cgrp->self) {
579 		struct cgroup *desc = container_of(css, struct cgroup, self);
580 
581 		if (percpu_ref_is_zero(&desc->bpf.refcnt))
582 			continue;
583 
584 		err = compute_effective_progs(desc, atype, &desc->bpf.inactive);
585 		if (err)
586 			goto cleanup;
587 	}
588 
589 	/* all allocations were successful. Activate all prog arrays */
590 	css_for_each_descendant_pre(css, &cgrp->self) {
591 		struct cgroup *desc = container_of(css, struct cgroup, self);
592 
593 		if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
594 			if (unlikely(desc->bpf.inactive)) {
595 				bpf_prog_array_free(desc->bpf.inactive);
596 				desc->bpf.inactive = NULL;
597 			}
598 			continue;
599 		}
600 
601 		activate_effective_progs(desc, atype, desc->bpf.inactive);
602 		desc->bpf.inactive = NULL;
603 	}
604 
605 	return 0;
606 
607 cleanup:
608 	/* oom while computing effective. Free all computed effective arrays
609 	 * since they were not activated
610 	 */
611 	css_for_each_descendant_pre(css, &cgrp->self) {
612 		struct cgroup *desc = container_of(css, struct cgroup, self);
613 
614 		bpf_prog_array_free(desc->bpf.inactive);
615 		desc->bpf.inactive = NULL;
616 	}
617 
618 	return err;
619 }
620 
621 #define BPF_CGROUP_MAX_PROGS 64
622 
623 static struct bpf_prog_list *find_attach_entry(struct hlist_head *progs,
624 					       struct bpf_prog *prog,
625 					       struct bpf_cgroup_link *link,
626 					       struct bpf_prog *replace_prog,
627 					       bool allow_multi)
628 {
629 	struct bpf_prog_list *pl;
630 
631 	/* single-attach case */
632 	if (!allow_multi) {
633 		if (hlist_empty(progs))
634 			return NULL;
635 		return hlist_entry(progs->first, typeof(*pl), node);
636 	}
637 
638 	hlist_for_each_entry(pl, progs, node) {
639 		if (prog && pl->prog == prog && prog != replace_prog)
640 			/* disallow attaching the same prog twice */
641 			return ERR_PTR(-EINVAL);
642 		if (link && pl->link == link)
643 			/* disallow attaching the same link twice */
644 			return ERR_PTR(-EINVAL);
645 	}
646 
647 	/* direct prog multi-attach w/ replacement case */
648 	if (replace_prog) {
649 		hlist_for_each_entry(pl, progs, node) {
650 			if (pl->prog == replace_prog)
651 				/* a match found */
652 				return pl;
653 		}
654 		/* prog to replace not found for cgroup */
655 		return ERR_PTR(-ENOENT);
656 	}
657 
658 	return NULL;
659 }
660 
661 static struct bpf_link *bpf_get_anchor_link(u32 flags, u32 id_or_fd)
662 {
663 	struct bpf_link *link = ERR_PTR(-EINVAL);
664 
665 	if (flags & BPF_F_ID)
666 		link = bpf_link_by_id(id_or_fd);
667 	else if (id_or_fd)
668 		link = bpf_link_get_from_fd(id_or_fd);
669 	return link;
670 }
671 
672 static struct bpf_prog *bpf_get_anchor_prog(u32 flags, u32 id_or_fd)
673 {
674 	struct bpf_prog *prog = ERR_PTR(-EINVAL);
675 
676 	if (flags & BPF_F_ID)
677 		prog = bpf_prog_by_id(id_or_fd);
678 	else if (id_or_fd)
679 		prog = bpf_prog_get(id_or_fd);
680 	return prog;
681 }
682 
683 static struct bpf_prog_list *get_prog_list(struct hlist_head *progs, struct bpf_prog *prog,
684 					   struct bpf_cgroup_link *link, u32 flags, u32 id_or_fd)
685 {
686 	bool is_link = flags & BPF_F_LINK, is_id = flags & BPF_F_ID;
687 	struct bpf_prog_list *pltmp, *pl = ERR_PTR(-EINVAL);
688 	bool preorder = flags & BPF_F_PREORDER;
689 	struct bpf_link *anchor_link = NULL;
690 	struct bpf_prog *anchor_prog = NULL;
691 	bool is_before, is_after;
692 
693 	is_before = flags & BPF_F_BEFORE;
694 	is_after = flags & BPF_F_AFTER;
695 	if (is_link || is_id || id_or_fd) {
696 		/* flags must have either BPF_F_BEFORE or BPF_F_AFTER */
697 		if (is_before == is_after)
698 			return ERR_PTR(-EINVAL);
699 		if ((is_link && !link) || (!is_link && !prog))
700 			return ERR_PTR(-EINVAL);
701 	} else if (!hlist_empty(progs)) {
702 		/* flags cannot have both BPF_F_BEFORE and BPF_F_AFTER */
703 		if (is_before && is_after)
704 			return ERR_PTR(-EINVAL);
705 	}
706 
707 	if (is_link) {
708 		anchor_link = bpf_get_anchor_link(flags, id_or_fd);
709 		if (IS_ERR(anchor_link))
710 			return ERR_PTR(PTR_ERR(anchor_link));
711 	} else if (is_id || id_or_fd) {
712 		anchor_prog = bpf_get_anchor_prog(flags, id_or_fd);
713 		if (IS_ERR(anchor_prog))
714 			return ERR_PTR(PTR_ERR(anchor_prog));
715 	}
716 
717 	if (!anchor_prog && !anchor_link) {
718 		/* if there is no anchor_prog/anchor_link, then BPF_F_PREORDER
719 		 * doesn't matter since either prepend or append to a combined
720 		 * list of progs will end up with correct result.
721 		 */
722 		hlist_for_each_entry(pltmp, progs, node) {
723 			if (is_before)
724 				return pltmp;
725 			if (pltmp->node.next)
726 				continue;
727 			return pltmp;
728 		}
729 		return NULL;
730 	}
731 
732 	hlist_for_each_entry(pltmp, progs, node) {
733 		if ((anchor_prog && anchor_prog == pltmp->prog) ||
734 		    (anchor_link && anchor_link == &pltmp->link->link)) {
735 			if (!!(pltmp->flags & BPF_F_PREORDER) != preorder)
736 				goto out;
737 			pl = pltmp;
738 			goto out;
739 		}
740 	}
741 
742 	pl = ERR_PTR(-ENOENT);
743 out:
744 	if (anchor_link)
745 		bpf_link_put(anchor_link);
746 	else
747 		bpf_prog_put(anchor_prog);
748 	return pl;
749 }
750 
751 static int insert_pl_to_hlist(struct bpf_prog_list *pl, struct hlist_head *progs,
752 			      struct bpf_prog *prog, struct bpf_cgroup_link *link,
753 			      u32 flags, u32 id_or_fd)
754 {
755 	struct bpf_prog_list *pltmp;
756 
757 	pltmp = get_prog_list(progs, prog, link, flags, id_or_fd);
758 	if (IS_ERR(pltmp))
759 		return PTR_ERR(pltmp);
760 
761 	if (!pltmp)
762 		hlist_add_head(&pl->node, progs);
763 	else if (flags & BPF_F_BEFORE)
764 		hlist_add_before(&pl->node, &pltmp->node);
765 	else
766 		hlist_add_behind(&pl->node, &pltmp->node);
767 
768 	return 0;
769 }
770 
771 /**
772  * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
773  *                         propagate the change to descendants
774  * @cgrp: The cgroup which descendants to traverse
775  * @prog: A program to attach
776  * @link: A link to attach
777  * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
778  * @type: Type of attach operation
779  * @flags: Option flags
780  * @id_or_fd: Relative prog id or fd
781  * @revision: bpf_prog_list revision
782  *
783  * Exactly one of @prog or @link can be non-null.
784  * Must be called with cgroup_mutex held.
785  */
786 static int __cgroup_bpf_attach(struct cgroup *cgrp,
787 			       struct bpf_prog *prog, struct bpf_prog *replace_prog,
788 			       struct bpf_cgroup_link *link,
789 			       enum bpf_attach_type type, u32 flags, u32 id_or_fd,
790 			       u64 revision)
791 {
792 	u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
793 	struct bpf_prog *old_prog = NULL;
794 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
795 	struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
796 	struct bpf_prog *new_prog = prog ? : link->link.prog;
797 	enum cgroup_bpf_attach_type atype;
798 	struct bpf_prog_list *pl;
799 	struct hlist_head *progs;
800 	int err;
801 
802 	if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
803 	    ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
804 		/* invalid combination */
805 		return -EINVAL;
806 	if ((flags & BPF_F_REPLACE) && (flags & (BPF_F_BEFORE | BPF_F_AFTER)))
807 		/* only either replace or insertion with before/after */
808 		return -EINVAL;
809 	if (link && (prog || replace_prog))
810 		/* only either link or prog/replace_prog can be specified */
811 		return -EINVAL;
812 	if (!!replace_prog != !!(flags & BPF_F_REPLACE))
813 		/* replace_prog implies BPF_F_REPLACE, and vice versa */
814 		return -EINVAL;
815 
816 	atype = bpf_cgroup_atype_find(type, new_prog->aux->attach_btf_id);
817 	if (atype < 0)
818 		return -EINVAL;
819 	if (revision && revision != cgrp->bpf.revisions[atype])
820 		return -ESTALE;
821 
822 	progs = &cgrp->bpf.progs[atype];
823 
824 	if (!hierarchy_allows_attach(cgrp, atype))
825 		return -EPERM;
826 
827 	if (!hlist_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
828 		/* Disallow attaching non-overridable on top
829 		 * of existing overridable in this cgroup.
830 		 * Disallow attaching multi-prog if overridable or none
831 		 */
832 		return -EPERM;
833 
834 	if (prog_list_length(progs, NULL) >= BPF_CGROUP_MAX_PROGS)
835 		return -E2BIG;
836 
837 	pl = find_attach_entry(progs, prog, link, replace_prog,
838 			       flags & BPF_F_ALLOW_MULTI);
839 	if (IS_ERR(pl))
840 		return PTR_ERR(pl);
841 
842 	if (bpf_cgroup_storages_alloc(storage, new_storage, type,
843 				      prog ? : link->link.prog, cgrp))
844 		return -ENOMEM;
845 
846 	if (pl) {
847 		old_prog = pl->prog;
848 	} else {
849 		pl = kmalloc(sizeof(*pl), GFP_KERNEL);
850 		if (!pl) {
851 			bpf_cgroup_storages_free(new_storage);
852 			return -ENOMEM;
853 		}
854 
855 		err = insert_pl_to_hlist(pl, progs, prog, link, flags, id_or_fd);
856 		if (err) {
857 			kfree(pl);
858 			bpf_cgroup_storages_free(new_storage);
859 			return err;
860 		}
861 	}
862 
863 	pl->prog = prog;
864 	pl->link = link;
865 	pl->flags = flags;
866 	bpf_cgroup_storages_assign(pl->storage, storage);
867 	cgrp->bpf.flags[atype] = saved_flags;
868 
869 	if (type == BPF_LSM_CGROUP) {
870 		err = bpf_trampoline_link_cgroup_shim(new_prog, atype);
871 		if (err)
872 			goto cleanup;
873 	}
874 
875 	err = update_effective_progs(cgrp, atype);
876 	if (err)
877 		goto cleanup_trampoline;
878 
879 	cgrp->bpf.revisions[atype] += 1;
880 	if (old_prog) {
881 		if (type == BPF_LSM_CGROUP)
882 			bpf_trampoline_unlink_cgroup_shim(old_prog);
883 		bpf_prog_put(old_prog);
884 	} else {
885 		static_branch_inc(&cgroup_bpf_enabled_key[atype]);
886 	}
887 	bpf_cgroup_storages_link(new_storage, cgrp, type);
888 	return 0;
889 
890 cleanup_trampoline:
891 	if (type == BPF_LSM_CGROUP)
892 		bpf_trampoline_unlink_cgroup_shim(new_prog);
893 
894 cleanup:
895 	if (old_prog) {
896 		pl->prog = old_prog;
897 		pl->link = NULL;
898 	}
899 	bpf_cgroup_storages_free(new_storage);
900 	if (!old_prog) {
901 		hlist_del(&pl->node);
902 		kfree(pl);
903 	}
904 	return err;
905 }
906 
907 static int cgroup_bpf_attach(struct cgroup *cgrp,
908 			     struct bpf_prog *prog, struct bpf_prog *replace_prog,
909 			     struct bpf_cgroup_link *link,
910 			     enum bpf_attach_type type,
911 			     u32 flags, u32 id_or_fd, u64 revision)
912 {
913 	int ret;
914 
915 	cgroup_lock();
916 	ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags,
917 				  id_or_fd, revision);
918 	cgroup_unlock();
919 	return ret;
920 }
921 
922 /* Swap updated BPF program for given link in effective program arrays across
923  * all descendant cgroups. This function is guaranteed to succeed.
924  */
925 static void replace_effective_prog(struct cgroup *cgrp,
926 				   enum cgroup_bpf_attach_type atype,
927 				   struct bpf_cgroup_link *link)
928 {
929 	struct bpf_prog_array_item *item;
930 	struct cgroup_subsys_state *css;
931 	struct bpf_prog_array *progs;
932 	struct bpf_prog_list *pl;
933 	struct hlist_head *head;
934 	struct cgroup *cg;
935 	int pos;
936 
937 	css_for_each_descendant_pre(css, &cgrp->self) {
938 		struct cgroup *desc = container_of(css, struct cgroup, self);
939 
940 		if (percpu_ref_is_zero(&desc->bpf.refcnt))
941 			continue;
942 
943 		/* find position of link in effective progs array */
944 		for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
945 			if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
946 				continue;
947 
948 			head = &cg->bpf.progs[atype];
949 			hlist_for_each_entry(pl, head, node) {
950 				if (!prog_list_prog(pl))
951 					continue;
952 				if (pl->link == link)
953 					goto found;
954 				pos++;
955 			}
956 		}
957 found:
958 		BUG_ON(!cg);
959 		progs = rcu_dereference_protected(
960 				desc->bpf.effective[atype],
961 				lockdep_is_held(&cgroup_mutex));
962 		item = &progs->items[pos];
963 		WRITE_ONCE(item->prog, link->link.prog);
964 	}
965 }
966 
967 /**
968  * __cgroup_bpf_replace() - Replace link's program and propagate the change
969  *                          to descendants
970  * @cgrp: The cgroup which descendants to traverse
971  * @link: A link for which to replace BPF program
972  * @new_prog: &struct bpf_prog for the target BPF program with its refcnt
973  *            incremented
974  *
975  * Must be called with cgroup_mutex held.
976  */
977 static int __cgroup_bpf_replace(struct cgroup *cgrp,
978 				struct bpf_cgroup_link *link,
979 				struct bpf_prog *new_prog)
980 {
981 	enum cgroup_bpf_attach_type atype;
982 	struct bpf_prog *old_prog;
983 	struct bpf_prog_list *pl;
984 	struct hlist_head *progs;
985 	bool found = false;
986 
987 	atype = bpf_cgroup_atype_find(link->type, new_prog->aux->attach_btf_id);
988 	if (atype < 0)
989 		return -EINVAL;
990 
991 	progs = &cgrp->bpf.progs[atype];
992 
993 	if (link->link.prog->type != new_prog->type)
994 		return -EINVAL;
995 
996 	hlist_for_each_entry(pl, progs, node) {
997 		if (pl->link == link) {
998 			found = true;
999 			break;
1000 		}
1001 	}
1002 	if (!found)
1003 		return -ENOENT;
1004 
1005 	cgrp->bpf.revisions[atype] += 1;
1006 	old_prog = xchg(&link->link.prog, new_prog);
1007 	replace_effective_prog(cgrp, atype, link);
1008 	bpf_prog_put(old_prog);
1009 	return 0;
1010 }
1011 
1012 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
1013 			      struct bpf_prog *old_prog)
1014 {
1015 	struct bpf_cgroup_link *cg_link;
1016 	int ret;
1017 
1018 	cg_link = container_of(link, struct bpf_cgroup_link, link);
1019 
1020 	cgroup_lock();
1021 	/* link might have been auto-released by dying cgroup, so fail */
1022 	if (!cg_link->cgroup) {
1023 		ret = -ENOLINK;
1024 		goto out_unlock;
1025 	}
1026 	if (old_prog && link->prog != old_prog) {
1027 		ret = -EPERM;
1028 		goto out_unlock;
1029 	}
1030 	ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
1031 out_unlock:
1032 	cgroup_unlock();
1033 	return ret;
1034 }
1035 
1036 static struct bpf_prog_list *find_detach_entry(struct hlist_head *progs,
1037 					       struct bpf_prog *prog,
1038 					       struct bpf_cgroup_link *link,
1039 					       bool allow_multi)
1040 {
1041 	struct bpf_prog_list *pl;
1042 
1043 	if (!allow_multi) {
1044 		if (hlist_empty(progs))
1045 			/* report error when trying to detach and nothing is attached */
1046 			return ERR_PTR(-ENOENT);
1047 
1048 		/* to maintain backward compatibility NONE and OVERRIDE cgroups
1049 		 * allow detaching with invalid FD (prog==NULL) in legacy mode
1050 		 */
1051 		return hlist_entry(progs->first, typeof(*pl), node);
1052 	}
1053 
1054 	if (!prog && !link)
1055 		/* to detach MULTI prog the user has to specify valid FD
1056 		 * of the program or link to be detached
1057 		 */
1058 		return ERR_PTR(-EINVAL);
1059 
1060 	/* find the prog or link and detach it */
1061 	hlist_for_each_entry(pl, progs, node) {
1062 		if (pl->prog == prog && pl->link == link)
1063 			return pl;
1064 	}
1065 	return ERR_PTR(-ENOENT);
1066 }
1067 
1068 /**
1069  * purge_effective_progs() - After compute_effective_progs fails to alloc new
1070  *                           cgrp->bpf.inactive table we can recover by
1071  *                           recomputing the array in place.
1072  *
1073  * @cgrp: The cgroup which descendants to travers
1074  * @prog: A program to detach or NULL
1075  * @link: A link to detach or NULL
1076  * @atype: Type of detach operation
1077  */
1078 static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
1079 				  struct bpf_cgroup_link *link,
1080 				  enum cgroup_bpf_attach_type atype)
1081 {
1082 	struct cgroup_subsys_state *css;
1083 	struct bpf_prog_array *progs;
1084 	struct bpf_prog_list *pl;
1085 	struct hlist_head *head;
1086 	struct cgroup *cg;
1087 	int pos;
1088 
1089 	/* recompute effective prog array in place */
1090 	css_for_each_descendant_pre(css, &cgrp->self) {
1091 		struct cgroup *desc = container_of(css, struct cgroup, self);
1092 
1093 		if (percpu_ref_is_zero(&desc->bpf.refcnt))
1094 			continue;
1095 
1096 		/* find position of link or prog in effective progs array */
1097 		for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
1098 			if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
1099 				continue;
1100 
1101 			head = &cg->bpf.progs[atype];
1102 			hlist_for_each_entry(pl, head, node) {
1103 				if (!prog_list_prog(pl))
1104 					continue;
1105 				if (pl->prog == prog && pl->link == link)
1106 					goto found;
1107 				pos++;
1108 			}
1109 		}
1110 
1111 		/* no link or prog match, skip the cgroup of this layer */
1112 		continue;
1113 found:
1114 		progs = rcu_dereference_protected(
1115 				desc->bpf.effective[atype],
1116 				lockdep_is_held(&cgroup_mutex));
1117 
1118 		/* Remove the program from the array */
1119 		WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos),
1120 			  "Failed to purge a prog from array at index %d", pos);
1121 	}
1122 }
1123 
1124 /**
1125  * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
1126  *                         propagate the change to descendants
1127  * @cgrp: The cgroup which descendants to traverse
1128  * @prog: A program to detach or NULL
1129  * @link: A link to detach or NULL
1130  * @type: Type of detach operation
1131  * @revision: bpf_prog_list revision
1132  *
1133  * At most one of @prog or @link can be non-NULL.
1134  * Must be called with cgroup_mutex held.
1135  */
1136 static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
1137 			       struct bpf_cgroup_link *link, enum bpf_attach_type type,
1138 			       u64 revision)
1139 {
1140 	enum cgroup_bpf_attach_type atype;
1141 	struct bpf_prog *old_prog;
1142 	struct bpf_prog_list *pl;
1143 	struct hlist_head *progs;
1144 	u32 attach_btf_id = 0;
1145 	u32 flags;
1146 
1147 	if (prog)
1148 		attach_btf_id = prog->aux->attach_btf_id;
1149 	if (link)
1150 		attach_btf_id = link->link.prog->aux->attach_btf_id;
1151 
1152 	atype = bpf_cgroup_atype_find(type, attach_btf_id);
1153 	if (atype < 0)
1154 		return -EINVAL;
1155 
1156 	if (revision && revision != cgrp->bpf.revisions[atype])
1157 		return -ESTALE;
1158 
1159 	progs = &cgrp->bpf.progs[atype];
1160 	flags = cgrp->bpf.flags[atype];
1161 
1162 	if (prog && link)
1163 		/* only one of prog or link can be specified */
1164 		return -EINVAL;
1165 
1166 	pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
1167 	if (IS_ERR(pl))
1168 		return PTR_ERR(pl);
1169 
1170 	/* mark it deleted, so it's ignored while recomputing effective */
1171 	old_prog = pl->prog;
1172 	pl->prog = NULL;
1173 	pl->link = NULL;
1174 
1175 	if (update_effective_progs(cgrp, atype)) {
1176 		/* if update effective array failed replace the prog with a dummy prog*/
1177 		pl->prog = old_prog;
1178 		pl->link = link;
1179 		purge_effective_progs(cgrp, old_prog, link, atype);
1180 	}
1181 
1182 	/* now can actually delete it from this cgroup list */
1183 	hlist_del(&pl->node);
1184 	cgrp->bpf.revisions[atype] += 1;
1185 
1186 	kfree(pl);
1187 	if (hlist_empty(progs))
1188 		/* last program was detached, reset flags to zero */
1189 		cgrp->bpf.flags[atype] = 0;
1190 	if (old_prog) {
1191 		if (type == BPF_LSM_CGROUP)
1192 			bpf_trampoline_unlink_cgroup_shim(old_prog);
1193 		bpf_prog_put(old_prog);
1194 	}
1195 	static_branch_dec(&cgroup_bpf_enabled_key[atype]);
1196 	return 0;
1197 }
1198 
1199 static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
1200 			     enum bpf_attach_type type, u64 revision)
1201 {
1202 	int ret;
1203 
1204 	cgroup_lock();
1205 	ret = __cgroup_bpf_detach(cgrp, prog, NULL, type, revision);
1206 	cgroup_unlock();
1207 	return ret;
1208 }
1209 
1210 /* Must be called with cgroup_mutex held to avoid races. */
1211 static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
1212 			      union bpf_attr __user *uattr)
1213 {
1214 	__u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
1215 	bool effective_query = attr->query.query_flags & BPF_F_QUERY_EFFECTIVE;
1216 	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1217 	enum bpf_attach_type type = attr->query.attach_type;
1218 	enum cgroup_bpf_attach_type from_atype, to_atype;
1219 	enum cgroup_bpf_attach_type atype;
1220 	struct bpf_prog_array *effective;
1221 	int cnt, ret = 0, i;
1222 	int total_cnt = 0;
1223 	u64 revision = 0;
1224 	u32 flags;
1225 
1226 	if (effective_query && prog_attach_flags)
1227 		return -EINVAL;
1228 
1229 	if (type == BPF_LSM_CGROUP) {
1230 		if (!effective_query && attr->query.prog_cnt &&
1231 		    prog_ids && !prog_attach_flags)
1232 			return -EINVAL;
1233 
1234 		from_atype = CGROUP_LSM_START;
1235 		to_atype = CGROUP_LSM_END;
1236 		flags = 0;
1237 	} else {
1238 		from_atype = to_cgroup_bpf_attach_type(type);
1239 		if (from_atype < 0)
1240 			return -EINVAL;
1241 		to_atype = from_atype;
1242 		flags = cgrp->bpf.flags[from_atype];
1243 	}
1244 
1245 	for (atype = from_atype; atype <= to_atype; atype++) {
1246 		if (effective_query) {
1247 			effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
1248 							      lockdep_is_held(&cgroup_mutex));
1249 			total_cnt += bpf_prog_array_length(effective);
1250 		} else {
1251 			total_cnt += prog_list_length(&cgrp->bpf.progs[atype], NULL);
1252 		}
1253 	}
1254 
1255 	/* always output uattr->query.attach_flags as 0 during effective query */
1256 	flags = effective_query ? 0 : flags;
1257 	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
1258 		return -EFAULT;
1259 	if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt)))
1260 		return -EFAULT;
1261 	if (!effective_query && from_atype == to_atype)
1262 		revision = cgrp->bpf.revisions[from_atype];
1263 	if (copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))
1264 		return -EFAULT;
1265 	if (attr->query.prog_cnt == 0 || !prog_ids || !total_cnt)
1266 		/* return early if user requested only program count + flags */
1267 		return 0;
1268 
1269 	if (attr->query.prog_cnt < total_cnt) {
1270 		total_cnt = attr->query.prog_cnt;
1271 		ret = -ENOSPC;
1272 	}
1273 
1274 	for (atype = from_atype; atype <= to_atype && total_cnt; atype++) {
1275 		if (effective_query) {
1276 			effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
1277 							      lockdep_is_held(&cgroup_mutex));
1278 			cnt = min_t(int, bpf_prog_array_length(effective), total_cnt);
1279 			ret = bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
1280 		} else {
1281 			struct hlist_head *progs;
1282 			struct bpf_prog_list *pl;
1283 			struct bpf_prog *prog;
1284 			u32 id;
1285 
1286 			progs = &cgrp->bpf.progs[atype];
1287 			cnt = min_t(int, prog_list_length(progs, NULL), total_cnt);
1288 			i = 0;
1289 			hlist_for_each_entry(pl, progs, node) {
1290 				prog = prog_list_prog(pl);
1291 				id = prog->aux->id;
1292 				if (copy_to_user(prog_ids + i, &id, sizeof(id)))
1293 					return -EFAULT;
1294 				if (++i == cnt)
1295 					break;
1296 			}
1297 
1298 			if (prog_attach_flags) {
1299 				flags = cgrp->bpf.flags[atype];
1300 
1301 				for (i = 0; i < cnt; i++)
1302 					if (copy_to_user(prog_attach_flags + i,
1303 							 &flags, sizeof(flags)))
1304 						return -EFAULT;
1305 				prog_attach_flags += cnt;
1306 			}
1307 		}
1308 
1309 		prog_ids += cnt;
1310 		total_cnt -= cnt;
1311 	}
1312 	return ret;
1313 }
1314 
1315 static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
1316 			    union bpf_attr __user *uattr)
1317 {
1318 	int ret;
1319 
1320 	cgroup_lock();
1321 	ret = __cgroup_bpf_query(cgrp, attr, uattr);
1322 	cgroup_unlock();
1323 	return ret;
1324 }
1325 
1326 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
1327 			   enum bpf_prog_type ptype, struct bpf_prog *prog)
1328 {
1329 	struct bpf_prog *replace_prog = NULL;
1330 	struct cgroup *cgrp;
1331 	int ret;
1332 
1333 	cgrp = cgroup_get_from_fd(attr->target_fd);
1334 	if (IS_ERR(cgrp))
1335 		return PTR_ERR(cgrp);
1336 
1337 	if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
1338 	    (attr->attach_flags & BPF_F_REPLACE)) {
1339 		replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
1340 		if (IS_ERR(replace_prog)) {
1341 			cgroup_put(cgrp);
1342 			return PTR_ERR(replace_prog);
1343 		}
1344 	}
1345 
1346 	ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
1347 				attr->attach_type, attr->attach_flags,
1348 				attr->relative_fd, attr->expected_revision);
1349 
1350 	if (replace_prog)
1351 		bpf_prog_put(replace_prog);
1352 	cgroup_put(cgrp);
1353 	return ret;
1354 }
1355 
1356 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
1357 {
1358 	struct bpf_prog *prog;
1359 	struct cgroup *cgrp;
1360 	int ret;
1361 
1362 	cgrp = cgroup_get_from_fd(attr->target_fd);
1363 	if (IS_ERR(cgrp))
1364 		return PTR_ERR(cgrp);
1365 
1366 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1367 	if (IS_ERR(prog))
1368 		prog = NULL;
1369 
1370 	ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, attr->expected_revision);
1371 	if (prog)
1372 		bpf_prog_put(prog);
1373 
1374 	cgroup_put(cgrp);
1375 	return ret;
1376 }
1377 
1378 static void bpf_cgroup_link_release(struct bpf_link *link)
1379 {
1380 	struct bpf_cgroup_link *cg_link =
1381 		container_of(link, struct bpf_cgroup_link, link);
1382 	struct cgroup *cg;
1383 
1384 	/* link might have been auto-detached by dying cgroup already,
1385 	 * in that case our work is done here
1386 	 */
1387 	if (!cg_link->cgroup)
1388 		return;
1389 
1390 	cgroup_lock();
1391 
1392 	/* re-check cgroup under lock again */
1393 	if (!cg_link->cgroup) {
1394 		cgroup_unlock();
1395 		return;
1396 	}
1397 
1398 	WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
1399 				    cg_link->type, 0));
1400 	if (cg_link->type == BPF_LSM_CGROUP)
1401 		bpf_trampoline_unlink_cgroup_shim(cg_link->link.prog);
1402 
1403 	cg = cg_link->cgroup;
1404 	cg_link->cgroup = NULL;
1405 
1406 	cgroup_unlock();
1407 
1408 	cgroup_put(cg);
1409 }
1410 
1411 static void bpf_cgroup_link_dealloc(struct bpf_link *link)
1412 {
1413 	struct bpf_cgroup_link *cg_link =
1414 		container_of(link, struct bpf_cgroup_link, link);
1415 
1416 	kfree(cg_link);
1417 }
1418 
1419 static int bpf_cgroup_link_detach(struct bpf_link *link)
1420 {
1421 	bpf_cgroup_link_release(link);
1422 
1423 	return 0;
1424 }
1425 
1426 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
1427 					struct seq_file *seq)
1428 {
1429 	struct bpf_cgroup_link *cg_link =
1430 		container_of(link, struct bpf_cgroup_link, link);
1431 	u64 cg_id = 0;
1432 
1433 	cgroup_lock();
1434 	if (cg_link->cgroup)
1435 		cg_id = cgroup_id(cg_link->cgroup);
1436 	cgroup_unlock();
1437 
1438 	seq_printf(seq,
1439 		   "cgroup_id:\t%llu\n"
1440 		   "attach_type:\t%d\n",
1441 		   cg_id,
1442 		   cg_link->type);
1443 }
1444 
1445 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
1446 					  struct bpf_link_info *info)
1447 {
1448 	struct bpf_cgroup_link *cg_link =
1449 		container_of(link, struct bpf_cgroup_link, link);
1450 	u64 cg_id = 0;
1451 
1452 	cgroup_lock();
1453 	if (cg_link->cgroup)
1454 		cg_id = cgroup_id(cg_link->cgroup);
1455 	cgroup_unlock();
1456 
1457 	info->cgroup.cgroup_id = cg_id;
1458 	info->cgroup.attach_type = cg_link->type;
1459 	return 0;
1460 }
1461 
1462 static const struct bpf_link_ops bpf_cgroup_link_lops = {
1463 	.release = bpf_cgroup_link_release,
1464 	.dealloc = bpf_cgroup_link_dealloc,
1465 	.detach = bpf_cgroup_link_detach,
1466 	.update_prog = cgroup_bpf_replace,
1467 	.show_fdinfo = bpf_cgroup_link_show_fdinfo,
1468 	.fill_link_info = bpf_cgroup_link_fill_link_info,
1469 };
1470 
1471 #define BPF_F_LINK_ATTACH_MASK	\
1472 	(BPF_F_ID |		\
1473 	 BPF_F_BEFORE |		\
1474 	 BPF_F_AFTER |		\
1475 	 BPF_F_PREORDER |	\
1476 	 BPF_F_LINK)
1477 
1478 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
1479 {
1480 	struct bpf_link_primer link_primer;
1481 	struct bpf_cgroup_link *link;
1482 	struct cgroup *cgrp;
1483 	int err;
1484 
1485 	if (attr->link_create.flags & (~BPF_F_LINK_ATTACH_MASK))
1486 		return -EINVAL;
1487 
1488 	cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
1489 	if (IS_ERR(cgrp))
1490 		return PTR_ERR(cgrp);
1491 
1492 	link = kzalloc(sizeof(*link), GFP_USER);
1493 	if (!link) {
1494 		err = -ENOMEM;
1495 		goto out_put_cgroup;
1496 	}
1497 	bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
1498 		      prog);
1499 	link->cgroup = cgrp;
1500 	link->type = attr->link_create.attach_type;
1501 
1502 	err = bpf_link_prime(&link->link, &link_primer);
1503 	if (err) {
1504 		kfree(link);
1505 		goto out_put_cgroup;
1506 	}
1507 
1508 	err = cgroup_bpf_attach(cgrp, NULL, NULL, link,
1509 				link->type, BPF_F_ALLOW_MULTI | attr->link_create.flags,
1510 				attr->link_create.cgroup.relative_fd,
1511 				attr->link_create.cgroup.expected_revision);
1512 	if (err) {
1513 		bpf_link_cleanup(&link_primer);
1514 		goto out_put_cgroup;
1515 	}
1516 
1517 	return bpf_link_settle(&link_primer);
1518 
1519 out_put_cgroup:
1520 	cgroup_put(cgrp);
1521 	return err;
1522 }
1523 
1524 int cgroup_bpf_prog_query(const union bpf_attr *attr,
1525 			  union bpf_attr __user *uattr)
1526 {
1527 	struct cgroup *cgrp;
1528 	int ret;
1529 
1530 	cgrp = cgroup_get_from_fd(attr->query.target_fd);
1531 	if (IS_ERR(cgrp))
1532 		return PTR_ERR(cgrp);
1533 
1534 	ret = cgroup_bpf_query(cgrp, attr, uattr);
1535 
1536 	cgroup_put(cgrp);
1537 	return ret;
1538 }
1539 
1540 /**
1541  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
1542  * @sk: The socket sending or receiving traffic
1543  * @skb: The skb that is being sent or received
1544  * @atype: The type of program to be executed
1545  *
1546  * If no socket is passed, or the socket is not of type INET or INET6,
1547  * this function does nothing and returns 0.
1548  *
1549  * The program type passed in via @type must be suitable for network
1550  * filtering. No further check is performed to assert that.
1551  *
1552  * For egress packets, this function can return:
1553  *   NET_XMIT_SUCCESS    (0)	- continue with packet output
1554  *   NET_XMIT_DROP       (1)	- drop packet and notify TCP to call cwr
1555  *   NET_XMIT_CN         (2)	- continue with packet output and notify TCP
1556  *				  to call cwr
1557  *   -err			- drop packet
1558  *
1559  * For ingress packets, this function will return -EPERM if any
1560  * attached program was found and if it returned != 1 during execution.
1561  * Otherwise 0 is returned.
1562  */
1563 int __cgroup_bpf_run_filter_skb(struct sock *sk,
1564 				struct sk_buff *skb,
1565 				enum cgroup_bpf_attach_type atype)
1566 {
1567 	unsigned int offset = -skb_network_offset(skb);
1568 	struct sock *save_sk;
1569 	void *saved_data_end;
1570 	struct cgroup *cgrp;
1571 	int ret;
1572 
1573 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1574 		return 0;
1575 
1576 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1577 	save_sk = skb->sk;
1578 	skb->sk = sk;
1579 	__skb_push(skb, offset);
1580 
1581 	/* compute pointers for the bpf prog */
1582 	bpf_compute_and_save_data_end(skb, &saved_data_end);
1583 
1584 	if (atype == CGROUP_INET_EGRESS) {
1585 		u32 flags = 0;
1586 		bool cn;
1587 
1588 		ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb,
1589 					    __bpf_prog_run_save_cb, 0, &flags);
1590 
1591 		/* Return values of CGROUP EGRESS BPF programs are:
1592 		 *   0: drop packet
1593 		 *   1: keep packet
1594 		 *   2: drop packet and cn
1595 		 *   3: keep packet and cn
1596 		 *
1597 		 * The returned value is then converted to one of the NET_XMIT
1598 		 * or an error code that is then interpreted as drop packet
1599 		 * (and no cn):
1600 		 *   0: NET_XMIT_SUCCESS  skb should be transmitted
1601 		 *   1: NET_XMIT_DROP     skb should be dropped and cn
1602 		 *   2: NET_XMIT_CN       skb should be transmitted and cn
1603 		 *   3: -err              skb should be dropped
1604 		 */
1605 
1606 		cn = flags & BPF_RET_SET_CN;
1607 		if (ret && !IS_ERR_VALUE((long)ret))
1608 			ret = -EFAULT;
1609 		if (!ret)
1610 			ret = (cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);
1611 		else
1612 			ret = (cn ? NET_XMIT_DROP : ret);
1613 	} else {
1614 		ret = bpf_prog_run_array_cg(&cgrp->bpf, atype,
1615 					    skb, __bpf_prog_run_save_cb, 0,
1616 					    NULL);
1617 		if (ret && !IS_ERR_VALUE((long)ret))
1618 			ret = -EFAULT;
1619 	}
1620 	bpf_restore_data_end(skb, saved_data_end);
1621 	__skb_pull(skb, offset);
1622 	skb->sk = save_sk;
1623 
1624 	return ret;
1625 }
1626 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
1627 
1628 /**
1629  * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1630  * @sk: sock structure to manipulate
1631  * @atype: The type of program to be executed
1632  *
1633  * socket is passed is expected to be of type INET or INET6.
1634  *
1635  * The program type passed in via @type must be suitable for sock
1636  * filtering. No further check is performed to assert that.
1637  *
1638  * This function will return %-EPERM if any if an attached program was found
1639  * and if it returned != 1 during execution. In all other cases, 0 is returned.
1640  */
1641 int __cgroup_bpf_run_filter_sk(struct sock *sk,
1642 			       enum cgroup_bpf_attach_type atype)
1643 {
1644 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1645 
1646 	return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0,
1647 				     NULL);
1648 }
1649 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
1650 
1651 /**
1652  * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1653  *                                       provided by user sockaddr
1654  * @sk: sock struct that will use sockaddr
1655  * @uaddr: sockaddr struct provided by user
1656  * @uaddrlen: Pointer to the size of the sockaddr struct provided by user. It is
1657  *            read-only for AF_INET[6] uaddr but can be modified for AF_UNIX
1658  *            uaddr.
1659  * @atype: The type of program to be executed
1660  * @t_ctx: Pointer to attach type specific context
1661  * @flags: Pointer to u32 which contains higher bits of BPF program
1662  *         return value (OR'ed together).
1663  *
1664  * socket is expected to be of type INET, INET6 or UNIX.
1665  *
1666  * This function will return %-EPERM if an attached program is found and
1667  * returned value != 1 during execution. In all other cases, 0 is returned.
1668  */
1669 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1670 				      struct sockaddr *uaddr,
1671 				      int *uaddrlen,
1672 				      enum cgroup_bpf_attach_type atype,
1673 				      void *t_ctx,
1674 				      u32 *flags)
1675 {
1676 	struct bpf_sock_addr_kern ctx = {
1677 		.sk = sk,
1678 		.uaddr = uaddr,
1679 		.t_ctx = t_ctx,
1680 	};
1681 	struct sockaddr_storage unspec;
1682 	struct cgroup *cgrp;
1683 	int ret;
1684 
1685 	/* Check socket family since not all sockets represent network
1686 	 * endpoint (e.g. AF_UNIX).
1687 	 */
1688 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6 &&
1689 	    sk->sk_family != AF_UNIX)
1690 		return 0;
1691 
1692 	if (!ctx.uaddr) {
1693 		memset(&unspec, 0, sizeof(unspec));
1694 		ctx.uaddr = (struct sockaddr *)&unspec;
1695 		ctx.uaddrlen = 0;
1696 	} else {
1697 		ctx.uaddrlen = *uaddrlen;
1698 	}
1699 
1700 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1701 	ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run,
1702 				    0, flags);
1703 
1704 	if (!ret && uaddr)
1705 		*uaddrlen = ctx.uaddrlen;
1706 
1707 	return ret;
1708 }
1709 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
1710 
1711 /**
1712  * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1713  * @sk: socket to get cgroup from
1714  * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1715  * sk with connection information (IP addresses, etc.) May not contain
1716  * cgroup info if it is a req sock.
1717  * @atype: The type of program to be executed
1718  *
1719  * socket passed is expected to be of type INET or INET6.
1720  *
1721  * The program type passed in via @type must be suitable for sock_ops
1722  * filtering. No further check is performed to assert that.
1723  *
1724  * This function will return %-EPERM if any if an attached program was found
1725  * and if it returned != 1 during execution. In all other cases, 0 is returned.
1726  */
1727 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1728 				     struct bpf_sock_ops_kern *sock_ops,
1729 				     enum cgroup_bpf_attach_type atype)
1730 {
1731 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1732 
1733 	return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run,
1734 				     0, NULL);
1735 }
1736 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
1737 
1738 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1739 				      short access, enum cgroup_bpf_attach_type atype)
1740 {
1741 	struct cgroup *cgrp;
1742 	struct bpf_cgroup_dev_ctx ctx = {
1743 		.access_type = (access << 16) | dev_type,
1744 		.major = major,
1745 		.minor = minor,
1746 	};
1747 	int ret;
1748 
1749 	rcu_read_lock();
1750 	cgrp = task_dfl_cgroup(current);
1751 	ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1752 				    NULL);
1753 	rcu_read_unlock();
1754 
1755 	return ret;
1756 }
1757 
1758 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
1759 {
1760 	/* flags argument is not used now,
1761 	 * but provides an ability to extend the API.
1762 	 * verifier checks that its value is correct.
1763 	 */
1764 	enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
1765 	struct bpf_cgroup_storage *storage;
1766 	struct bpf_cg_run_ctx *ctx;
1767 	void *ptr;
1768 
1769 	/* get current cgroup storage from BPF run context */
1770 	ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1771 	storage = ctx->prog_item->cgroup_storage[stype];
1772 
1773 	if (stype == BPF_CGROUP_STORAGE_SHARED)
1774 		ptr = &READ_ONCE(storage->buf)->data[0];
1775 	else
1776 		ptr = this_cpu_ptr(storage->percpu_buf);
1777 
1778 	return (unsigned long)ptr;
1779 }
1780 
1781 const struct bpf_func_proto bpf_get_local_storage_proto = {
1782 	.func		= bpf_get_local_storage,
1783 	.gpl_only	= false,
1784 	.ret_type	= RET_PTR_TO_MAP_VALUE,
1785 	.arg1_type	= ARG_CONST_MAP_PTR,
1786 	.arg2_type	= ARG_ANYTHING,
1787 };
1788 
1789 BPF_CALL_0(bpf_get_retval)
1790 {
1791 	struct bpf_cg_run_ctx *ctx =
1792 		container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1793 
1794 	return ctx->retval;
1795 }
1796 
1797 const struct bpf_func_proto bpf_get_retval_proto = {
1798 	.func		= bpf_get_retval,
1799 	.gpl_only	= false,
1800 	.ret_type	= RET_INTEGER,
1801 };
1802 
1803 BPF_CALL_1(bpf_set_retval, int, retval)
1804 {
1805 	struct bpf_cg_run_ctx *ctx =
1806 		container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1807 
1808 	ctx->retval = retval;
1809 	return 0;
1810 }
1811 
1812 const struct bpf_func_proto bpf_set_retval_proto = {
1813 	.func		= bpf_set_retval,
1814 	.gpl_only	= false,
1815 	.ret_type	= RET_INTEGER,
1816 	.arg1_type	= ARG_ANYTHING,
1817 };
1818 
1819 static const struct bpf_func_proto *
1820 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1821 {
1822 	const struct bpf_func_proto *func_proto;
1823 
1824 	func_proto = cgroup_common_func_proto(func_id, prog);
1825 	if (func_proto)
1826 		return func_proto;
1827 
1828 	switch (func_id) {
1829 	case BPF_FUNC_perf_event_output:
1830 		return &bpf_event_output_data_proto;
1831 	default:
1832 		return bpf_base_func_proto(func_id, prog);
1833 	}
1834 }
1835 
1836 static bool cgroup_dev_is_valid_access(int off, int size,
1837 				       enum bpf_access_type type,
1838 				       const struct bpf_prog *prog,
1839 				       struct bpf_insn_access_aux *info)
1840 {
1841 	const int size_default = sizeof(__u32);
1842 
1843 	if (type == BPF_WRITE)
1844 		return false;
1845 
1846 	if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1847 		return false;
1848 	/* The verifier guarantees that size > 0. */
1849 	if (off % size != 0)
1850 		return false;
1851 
1852 	switch (off) {
1853 	case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1854 		bpf_ctx_record_field_size(info, size_default);
1855 		if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1856 			return false;
1857 		break;
1858 	default:
1859 		if (size != size_default)
1860 			return false;
1861 	}
1862 
1863 	return true;
1864 }
1865 
1866 const struct bpf_prog_ops cg_dev_prog_ops = {
1867 };
1868 
1869 const struct bpf_verifier_ops cg_dev_verifier_ops = {
1870 	.get_func_proto		= cgroup_dev_func_proto,
1871 	.is_valid_access	= cgroup_dev_is_valid_access,
1872 };
1873 
1874 /**
1875  * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1876  *
1877  * @head: sysctl table header
1878  * @table: sysctl table
1879  * @write: sysctl is being read (= 0) or written (= 1)
1880  * @buf: pointer to buffer (in and out)
1881  * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1882  *	result is size of @new_buf if program set new value, initial value
1883  *	otherwise
1884  * @ppos: value-result argument: value is position at which read from or write
1885  *	to sysctl is happening, result is new position if program overrode it,
1886  *	initial value otherwise
1887  * @atype: type of program to be executed
1888  *
1889  * Program is run when sysctl is being accessed, either read or written, and
1890  * can allow or deny such access.
1891  *
1892  * This function will return %-EPERM if an attached program is found and
1893  * returned value != 1 during execution. In all other cases 0 is returned.
1894  */
1895 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1896 				   const struct ctl_table *table, int write,
1897 				   char **buf, size_t *pcount, loff_t *ppos,
1898 				   enum cgroup_bpf_attach_type atype)
1899 {
1900 	struct bpf_sysctl_kern ctx = {
1901 		.head = head,
1902 		.table = table,
1903 		.write = write,
1904 		.ppos = ppos,
1905 		.cur_val = NULL,
1906 		.cur_len = PAGE_SIZE,
1907 		.new_val = NULL,
1908 		.new_len = 0,
1909 		.new_updated = 0,
1910 	};
1911 	struct cgroup *cgrp;
1912 	loff_t pos = 0;
1913 	int ret;
1914 
1915 	ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1916 	if (!ctx.cur_val ||
1917 	    table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
1918 		/* Let BPF program decide how to proceed. */
1919 		ctx.cur_len = 0;
1920 	}
1921 
1922 	if (write && *buf && *pcount) {
1923 		/* BPF program should be able to override new value with a
1924 		 * buffer bigger than provided by user.
1925 		 */
1926 		ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1927 		ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1928 		if (ctx.new_val) {
1929 			memcpy(ctx.new_val, *buf, ctx.new_len);
1930 		} else {
1931 			/* Let BPF program decide how to proceed. */
1932 			ctx.new_len = 0;
1933 		}
1934 	}
1935 
1936 	rcu_read_lock();
1937 	cgrp = task_dfl_cgroup(current);
1938 	ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1939 				    NULL);
1940 	rcu_read_unlock();
1941 
1942 	kfree(ctx.cur_val);
1943 
1944 	if (ret == 1 && ctx.new_updated) {
1945 		kfree(*buf);
1946 		*buf = ctx.new_val;
1947 		*pcount = ctx.new_len;
1948 	} else {
1949 		kfree(ctx.new_val);
1950 	}
1951 
1952 	return ret;
1953 }
1954 
1955 #ifdef CONFIG_NET
1956 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
1957 			     struct bpf_sockopt_buf *buf)
1958 {
1959 	if (unlikely(max_optlen < 0))
1960 		return -EINVAL;
1961 
1962 	if (unlikely(max_optlen > PAGE_SIZE)) {
1963 		/* We don't expose optvals that are greater than PAGE_SIZE
1964 		 * to the BPF program.
1965 		 */
1966 		max_optlen = PAGE_SIZE;
1967 	}
1968 
1969 	if (max_optlen <= sizeof(buf->data)) {
1970 		/* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
1971 		 * bytes avoid the cost of kzalloc.
1972 		 */
1973 		ctx->optval = buf->data;
1974 		ctx->optval_end = ctx->optval + max_optlen;
1975 		return max_optlen;
1976 	}
1977 
1978 	ctx->optval = kzalloc(max_optlen, GFP_USER);
1979 	if (!ctx->optval)
1980 		return -ENOMEM;
1981 
1982 	ctx->optval_end = ctx->optval + max_optlen;
1983 
1984 	return max_optlen;
1985 }
1986 
1987 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
1988 			     struct bpf_sockopt_buf *buf)
1989 {
1990 	if (ctx->optval == buf->data)
1991 		return;
1992 	kfree(ctx->optval);
1993 }
1994 
1995 static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
1996 				  struct bpf_sockopt_buf *buf)
1997 {
1998 	return ctx->optval != buf->data;
1999 }
2000 
2001 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
2002 				       int *optname, sockptr_t optval,
2003 				       int *optlen, char **kernel_optval)
2004 {
2005 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
2006 	struct bpf_sockopt_buf buf = {};
2007 	struct bpf_sockopt_kern ctx = {
2008 		.sk = sk,
2009 		.level = *level,
2010 		.optname = *optname,
2011 	};
2012 	int ret, max_optlen;
2013 
2014 	/* Allocate a bit more than the initial user buffer for
2015 	 * BPF program. The canonical use case is overriding
2016 	 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
2017 	 */
2018 	max_optlen = max_t(int, 16, *optlen);
2019 	max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
2020 	if (max_optlen < 0)
2021 		return max_optlen;
2022 
2023 	ctx.optlen = *optlen;
2024 
2025 	if (copy_from_sockptr(ctx.optval, optval,
2026 			      min(*optlen, max_optlen))) {
2027 		ret = -EFAULT;
2028 		goto out;
2029 	}
2030 
2031 	lock_sock(sk);
2032 	ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT,
2033 				    &ctx, bpf_prog_run, 0, NULL);
2034 	release_sock(sk);
2035 
2036 	if (ret)
2037 		goto out;
2038 
2039 	if (ctx.optlen == -1) {
2040 		/* optlen set to -1, bypass kernel */
2041 		ret = 1;
2042 	} else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
2043 		/* optlen is out of bounds */
2044 		if (*optlen > PAGE_SIZE && ctx.optlen >= 0) {
2045 			pr_info_once("bpf setsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
2046 				     ctx.optlen, max_optlen);
2047 			ret = 0;
2048 			goto out;
2049 		}
2050 		ret = -EFAULT;
2051 	} else {
2052 		/* optlen within bounds, run kernel handler */
2053 		ret = 0;
2054 
2055 		/* export any potential modifications */
2056 		*level = ctx.level;
2057 		*optname = ctx.optname;
2058 
2059 		/* optlen == 0 from BPF indicates that we should
2060 		 * use original userspace data.
2061 		 */
2062 		if (ctx.optlen != 0) {
2063 			*optlen = ctx.optlen;
2064 			/* We've used bpf_sockopt_kern->buf as an intermediary
2065 			 * storage, but the BPF program indicates that we need
2066 			 * to pass this data to the kernel setsockopt handler.
2067 			 * No way to export on-stack buf, have to allocate a
2068 			 * new buffer.
2069 			 */
2070 			if (!sockopt_buf_allocated(&ctx, &buf)) {
2071 				void *p = kmalloc(ctx.optlen, GFP_USER);
2072 
2073 				if (!p) {
2074 					ret = -ENOMEM;
2075 					goto out;
2076 				}
2077 				memcpy(p, ctx.optval, ctx.optlen);
2078 				*kernel_optval = p;
2079 			} else {
2080 				*kernel_optval = ctx.optval;
2081 			}
2082 			/* export and don't free sockopt buf */
2083 			return 0;
2084 		}
2085 	}
2086 
2087 out:
2088 	sockopt_free_buf(&ctx, &buf);
2089 	return ret;
2090 }
2091 
2092 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
2093 				       int optname, sockptr_t optval,
2094 				       sockptr_t optlen, int max_optlen,
2095 				       int retval)
2096 {
2097 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
2098 	struct bpf_sockopt_buf buf = {};
2099 	struct bpf_sockopt_kern ctx = {
2100 		.sk = sk,
2101 		.level = level,
2102 		.optname = optname,
2103 		.current_task = current,
2104 	};
2105 	int orig_optlen;
2106 	int ret;
2107 
2108 	orig_optlen = max_optlen;
2109 	ctx.optlen = max_optlen;
2110 	max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
2111 	if (max_optlen < 0)
2112 		return max_optlen;
2113 
2114 	if (!retval) {
2115 		/* If kernel getsockopt finished successfully,
2116 		 * copy whatever was returned to the user back
2117 		 * into our temporary buffer. Set optlen to the
2118 		 * one that kernel returned as well to let
2119 		 * BPF programs inspect the value.
2120 		 */
2121 		if (copy_from_sockptr(&ctx.optlen, optlen,
2122 				      sizeof(ctx.optlen))) {
2123 			ret = -EFAULT;
2124 			goto out;
2125 		}
2126 
2127 		if (ctx.optlen < 0) {
2128 			ret = -EFAULT;
2129 			goto out;
2130 		}
2131 		orig_optlen = ctx.optlen;
2132 
2133 		if (copy_from_sockptr(ctx.optval, optval,
2134 				      min(ctx.optlen, max_optlen))) {
2135 			ret = -EFAULT;
2136 			goto out;
2137 		}
2138 	}
2139 
2140 	lock_sock(sk);
2141 	ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
2142 				    &ctx, bpf_prog_run, retval, NULL);
2143 	release_sock(sk);
2144 
2145 	if (ret < 0)
2146 		goto out;
2147 
2148 	if (!sockptr_is_null(optval) &&
2149 	    (ctx.optlen > max_optlen || ctx.optlen < 0)) {
2150 		if (orig_optlen > PAGE_SIZE && ctx.optlen >= 0) {
2151 			pr_info_once("bpf getsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
2152 				     ctx.optlen, max_optlen);
2153 			ret = retval;
2154 			goto out;
2155 		}
2156 		ret = -EFAULT;
2157 		goto out;
2158 	}
2159 
2160 	if (ctx.optlen != 0) {
2161 		if (!sockptr_is_null(optval) &&
2162 		    copy_to_sockptr(optval, ctx.optval, ctx.optlen)) {
2163 			ret = -EFAULT;
2164 			goto out;
2165 		}
2166 		if (copy_to_sockptr(optlen, &ctx.optlen, sizeof(ctx.optlen))) {
2167 			ret = -EFAULT;
2168 			goto out;
2169 		}
2170 	}
2171 
2172 out:
2173 	sockopt_free_buf(&ctx, &buf);
2174 	return ret;
2175 }
2176 
2177 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
2178 					    int optname, void *optval,
2179 					    int *optlen, int retval)
2180 {
2181 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
2182 	struct bpf_sockopt_kern ctx = {
2183 		.sk = sk,
2184 		.level = level,
2185 		.optname = optname,
2186 		.optlen = *optlen,
2187 		.optval = optval,
2188 		.optval_end = optval + *optlen,
2189 		.current_task = current,
2190 	};
2191 	int ret;
2192 
2193 	/* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
2194 	 * user data back into BPF buffer when reval != 0. This is
2195 	 * done as an optimization to avoid extra copy, assuming
2196 	 * kernel won't populate the data in case of an error.
2197 	 * Here we always pass the data and memset() should
2198 	 * be called if that data shouldn't be "exported".
2199 	 */
2200 
2201 	ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
2202 				    &ctx, bpf_prog_run, retval, NULL);
2203 	if (ret < 0)
2204 		return ret;
2205 
2206 	if (ctx.optlen > *optlen)
2207 		return -EFAULT;
2208 
2209 	/* BPF programs can shrink the buffer, export the modifications.
2210 	 */
2211 	if (ctx.optlen != 0)
2212 		*optlen = ctx.optlen;
2213 
2214 	return ret;
2215 }
2216 #endif
2217 
2218 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
2219 			      size_t *lenp)
2220 {
2221 	ssize_t tmp_ret = 0, ret;
2222 
2223 	if (dir->header.parent) {
2224 		tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
2225 		if (tmp_ret < 0)
2226 			return tmp_ret;
2227 	}
2228 
2229 	ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
2230 	if (ret < 0)
2231 		return ret;
2232 	*bufp += ret;
2233 	*lenp -= ret;
2234 	ret += tmp_ret;
2235 
2236 	/* Avoid leading slash. */
2237 	if (!ret)
2238 		return ret;
2239 
2240 	tmp_ret = strscpy(*bufp, "/", *lenp);
2241 	if (tmp_ret < 0)
2242 		return tmp_ret;
2243 	*bufp += tmp_ret;
2244 	*lenp -= tmp_ret;
2245 
2246 	return ret + tmp_ret;
2247 }
2248 
2249 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
2250 	   size_t, buf_len, u64, flags)
2251 {
2252 	ssize_t tmp_ret = 0, ret;
2253 
2254 	if (!buf)
2255 		return -EINVAL;
2256 
2257 	if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
2258 		if (!ctx->head)
2259 			return -EINVAL;
2260 		tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
2261 		if (tmp_ret < 0)
2262 			return tmp_ret;
2263 	}
2264 
2265 	ret = strscpy(buf, ctx->table->procname, buf_len);
2266 
2267 	return ret < 0 ? ret : tmp_ret + ret;
2268 }
2269 
2270 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
2271 	.func		= bpf_sysctl_get_name,
2272 	.gpl_only	= false,
2273 	.ret_type	= RET_INTEGER,
2274 	.arg1_type	= ARG_PTR_TO_CTX,
2275 	.arg2_type	= ARG_PTR_TO_MEM | MEM_WRITE,
2276 	.arg3_type	= ARG_CONST_SIZE,
2277 	.arg4_type	= ARG_ANYTHING,
2278 };
2279 
2280 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
2281 			     size_t src_len)
2282 {
2283 	if (!dst)
2284 		return -EINVAL;
2285 
2286 	if (!dst_len)
2287 		return -E2BIG;
2288 
2289 	if (!src || !src_len) {
2290 		memset(dst, 0, dst_len);
2291 		return -EINVAL;
2292 	}
2293 
2294 	memcpy(dst, src, min(dst_len, src_len));
2295 
2296 	if (dst_len > src_len) {
2297 		memset(dst + src_len, '\0', dst_len - src_len);
2298 		return src_len;
2299 	}
2300 
2301 	dst[dst_len - 1] = '\0';
2302 
2303 	return -E2BIG;
2304 }
2305 
2306 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
2307 	   char *, buf, size_t, buf_len)
2308 {
2309 	return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
2310 }
2311 
2312 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
2313 	.func		= bpf_sysctl_get_current_value,
2314 	.gpl_only	= false,
2315 	.ret_type	= RET_INTEGER,
2316 	.arg1_type	= ARG_PTR_TO_CTX,
2317 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
2318 	.arg3_type	= ARG_CONST_SIZE,
2319 };
2320 
2321 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
2322 	   size_t, buf_len)
2323 {
2324 	if (!ctx->write) {
2325 		if (buf && buf_len)
2326 			memset(buf, '\0', buf_len);
2327 		return -EINVAL;
2328 	}
2329 	return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
2330 }
2331 
2332 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
2333 	.func		= bpf_sysctl_get_new_value,
2334 	.gpl_only	= false,
2335 	.ret_type	= RET_INTEGER,
2336 	.arg1_type	= ARG_PTR_TO_CTX,
2337 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
2338 	.arg3_type	= ARG_CONST_SIZE,
2339 };
2340 
2341 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
2342 	   const char *, buf, size_t, buf_len)
2343 {
2344 	if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
2345 		return -EINVAL;
2346 
2347 	if (buf_len > PAGE_SIZE - 1)
2348 		return -E2BIG;
2349 
2350 	memcpy(ctx->new_val, buf, buf_len);
2351 	ctx->new_len = buf_len;
2352 	ctx->new_updated = 1;
2353 
2354 	return 0;
2355 }
2356 
2357 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
2358 	.func		= bpf_sysctl_set_new_value,
2359 	.gpl_only	= false,
2360 	.ret_type	= RET_INTEGER,
2361 	.arg1_type	= ARG_PTR_TO_CTX,
2362 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
2363 	.arg3_type	= ARG_CONST_SIZE,
2364 };
2365 
2366 static const struct bpf_func_proto *
2367 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2368 {
2369 	const struct bpf_func_proto *func_proto;
2370 
2371 	func_proto = cgroup_common_func_proto(func_id, prog);
2372 	if (func_proto)
2373 		return func_proto;
2374 
2375 	switch (func_id) {
2376 	case BPF_FUNC_sysctl_get_name:
2377 		return &bpf_sysctl_get_name_proto;
2378 	case BPF_FUNC_sysctl_get_current_value:
2379 		return &bpf_sysctl_get_current_value_proto;
2380 	case BPF_FUNC_sysctl_get_new_value:
2381 		return &bpf_sysctl_get_new_value_proto;
2382 	case BPF_FUNC_sysctl_set_new_value:
2383 		return &bpf_sysctl_set_new_value_proto;
2384 	case BPF_FUNC_ktime_get_coarse_ns:
2385 		return &bpf_ktime_get_coarse_ns_proto;
2386 	case BPF_FUNC_perf_event_output:
2387 		return &bpf_event_output_data_proto;
2388 	default:
2389 		return bpf_base_func_proto(func_id, prog);
2390 	}
2391 }
2392 
2393 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
2394 				   const struct bpf_prog *prog,
2395 				   struct bpf_insn_access_aux *info)
2396 {
2397 	const int size_default = sizeof(__u32);
2398 
2399 	if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
2400 		return false;
2401 
2402 	switch (off) {
2403 	case bpf_ctx_range(struct bpf_sysctl, write):
2404 		if (type != BPF_READ)
2405 			return false;
2406 		bpf_ctx_record_field_size(info, size_default);
2407 		return bpf_ctx_narrow_access_ok(off, size, size_default);
2408 	case bpf_ctx_range(struct bpf_sysctl, file_pos):
2409 		if (type == BPF_READ) {
2410 			bpf_ctx_record_field_size(info, size_default);
2411 			return bpf_ctx_narrow_access_ok(off, size, size_default);
2412 		} else {
2413 			return size == size_default;
2414 		}
2415 	default:
2416 		return false;
2417 	}
2418 }
2419 
2420 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
2421 				     const struct bpf_insn *si,
2422 				     struct bpf_insn *insn_buf,
2423 				     struct bpf_prog *prog, u32 *target_size)
2424 {
2425 	struct bpf_insn *insn = insn_buf;
2426 	u32 read_size;
2427 
2428 	switch (si->off) {
2429 	case offsetof(struct bpf_sysctl, write):
2430 		*insn++ = BPF_LDX_MEM(
2431 			BPF_SIZE(si->code), si->dst_reg, si->src_reg,
2432 			bpf_target_off(struct bpf_sysctl_kern, write,
2433 				       sizeof_field(struct bpf_sysctl_kern,
2434 						    write),
2435 				       target_size));
2436 		break;
2437 	case offsetof(struct bpf_sysctl, file_pos):
2438 		/* ppos is a pointer so it should be accessed via indirect
2439 		 * loads and stores. Also for stores additional temporary
2440 		 * register is used since neither src_reg nor dst_reg can be
2441 		 * overridden.
2442 		 */
2443 		if (type == BPF_WRITE) {
2444 			int treg = BPF_REG_9;
2445 
2446 			if (si->src_reg == treg || si->dst_reg == treg)
2447 				--treg;
2448 			if (si->src_reg == treg || si->dst_reg == treg)
2449 				--treg;
2450 			*insn++ = BPF_STX_MEM(
2451 				BPF_DW, si->dst_reg, treg,
2452 				offsetof(struct bpf_sysctl_kern, tmp_reg));
2453 			*insn++ = BPF_LDX_MEM(
2454 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
2455 				treg, si->dst_reg,
2456 				offsetof(struct bpf_sysctl_kern, ppos));
2457 			*insn++ = BPF_RAW_INSN(
2458 				BPF_CLASS(si->code) | BPF_MEM | BPF_SIZEOF(u32),
2459 				treg, si->src_reg,
2460 				bpf_ctx_narrow_access_offset(
2461 					0, sizeof(u32), sizeof(loff_t)),
2462 				si->imm);
2463 			*insn++ = BPF_LDX_MEM(
2464 				BPF_DW, treg, si->dst_reg,
2465 				offsetof(struct bpf_sysctl_kern, tmp_reg));
2466 		} else {
2467 			*insn++ = BPF_LDX_MEM(
2468 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
2469 				si->dst_reg, si->src_reg,
2470 				offsetof(struct bpf_sysctl_kern, ppos));
2471 			read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
2472 			*insn++ = BPF_LDX_MEM(
2473 				BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
2474 				bpf_ctx_narrow_access_offset(
2475 					0, read_size, sizeof(loff_t)));
2476 		}
2477 		*target_size = sizeof(u32);
2478 		break;
2479 	}
2480 
2481 	return insn - insn_buf;
2482 }
2483 
2484 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
2485 	.get_func_proto		= sysctl_func_proto,
2486 	.is_valid_access	= sysctl_is_valid_access,
2487 	.convert_ctx_access	= sysctl_convert_ctx_access,
2488 };
2489 
2490 const struct bpf_prog_ops cg_sysctl_prog_ops = {
2491 };
2492 
2493 #ifdef CONFIG_NET
2494 BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx)
2495 {
2496 	const struct net *net = ctx ? sock_net(ctx->sk) : &init_net;
2497 
2498 	return net->net_cookie;
2499 }
2500 
2501 static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = {
2502 	.func		= bpf_get_netns_cookie_sockopt,
2503 	.gpl_only	= false,
2504 	.ret_type	= RET_INTEGER,
2505 	.arg1_type	= ARG_PTR_TO_CTX_OR_NULL,
2506 };
2507 #endif
2508 
2509 static const struct bpf_func_proto *
2510 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2511 {
2512 	const struct bpf_func_proto *func_proto;
2513 
2514 	func_proto = cgroup_common_func_proto(func_id, prog);
2515 	if (func_proto)
2516 		return func_proto;
2517 
2518 	switch (func_id) {
2519 #ifdef CONFIG_NET
2520 	case BPF_FUNC_get_netns_cookie:
2521 		return &bpf_get_netns_cookie_sockopt_proto;
2522 	case BPF_FUNC_sk_storage_get:
2523 		return &bpf_sk_storage_get_proto;
2524 	case BPF_FUNC_sk_storage_delete:
2525 		return &bpf_sk_storage_delete_proto;
2526 	case BPF_FUNC_setsockopt:
2527 		if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
2528 			return &bpf_sk_setsockopt_proto;
2529 		return NULL;
2530 	case BPF_FUNC_getsockopt:
2531 		if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
2532 			return &bpf_sk_getsockopt_proto;
2533 		return NULL;
2534 #endif
2535 #ifdef CONFIG_INET
2536 	case BPF_FUNC_tcp_sock:
2537 		return &bpf_tcp_sock_proto;
2538 #endif
2539 	case BPF_FUNC_perf_event_output:
2540 		return &bpf_event_output_data_proto;
2541 	default:
2542 		return bpf_base_func_proto(func_id, prog);
2543 	}
2544 }
2545 
2546 static bool cg_sockopt_is_valid_access(int off, int size,
2547 				       enum bpf_access_type type,
2548 				       const struct bpf_prog *prog,
2549 				       struct bpf_insn_access_aux *info)
2550 {
2551 	const int size_default = sizeof(__u32);
2552 
2553 	if (off < 0 || off >= sizeof(struct bpf_sockopt))
2554 		return false;
2555 
2556 	if (off % size != 0)
2557 		return false;
2558 
2559 	if (type == BPF_WRITE) {
2560 		switch (off) {
2561 		case offsetof(struct bpf_sockopt, retval):
2562 			if (size != size_default)
2563 				return false;
2564 			return prog->expected_attach_type ==
2565 				BPF_CGROUP_GETSOCKOPT;
2566 		case offsetof(struct bpf_sockopt, optname):
2567 			fallthrough;
2568 		case offsetof(struct bpf_sockopt, level):
2569 			if (size != size_default)
2570 				return false;
2571 			return prog->expected_attach_type ==
2572 				BPF_CGROUP_SETSOCKOPT;
2573 		case offsetof(struct bpf_sockopt, optlen):
2574 			return size == size_default;
2575 		default:
2576 			return false;
2577 		}
2578 	}
2579 
2580 	switch (off) {
2581 	case offsetof(struct bpf_sockopt, sk):
2582 		if (size != sizeof(__u64))
2583 			return false;
2584 		info->reg_type = PTR_TO_SOCKET;
2585 		break;
2586 	case offsetof(struct bpf_sockopt, optval):
2587 		if (size != sizeof(__u64))
2588 			return false;
2589 		info->reg_type = PTR_TO_PACKET;
2590 		break;
2591 	case offsetof(struct bpf_sockopt, optval_end):
2592 		if (size != sizeof(__u64))
2593 			return false;
2594 		info->reg_type = PTR_TO_PACKET_END;
2595 		break;
2596 	case offsetof(struct bpf_sockopt, retval):
2597 		if (size != size_default)
2598 			return false;
2599 		return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
2600 	default:
2601 		if (size != size_default)
2602 			return false;
2603 		break;
2604 	}
2605 	return true;
2606 }
2607 
2608 #define CG_SOCKOPT_READ_FIELD(F)					\
2609 	BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F),	\
2610 		    si->dst_reg, si->src_reg,				\
2611 		    offsetof(struct bpf_sockopt_kern, F))
2612 
2613 #define CG_SOCKOPT_WRITE_FIELD(F)					\
2614 	BPF_RAW_INSN((BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F) |	\
2615 		      BPF_MEM | BPF_CLASS(si->code)),			\
2616 		     si->dst_reg, si->src_reg,				\
2617 		     offsetof(struct bpf_sockopt_kern, F),		\
2618 		     si->imm)
2619 
2620 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
2621 					 const struct bpf_insn *si,
2622 					 struct bpf_insn *insn_buf,
2623 					 struct bpf_prog *prog,
2624 					 u32 *target_size)
2625 {
2626 	struct bpf_insn *insn = insn_buf;
2627 
2628 	switch (si->off) {
2629 	case offsetof(struct bpf_sockopt, sk):
2630 		*insn++ = CG_SOCKOPT_READ_FIELD(sk);
2631 		break;
2632 	case offsetof(struct bpf_sockopt, level):
2633 		if (type == BPF_WRITE)
2634 			*insn++ = CG_SOCKOPT_WRITE_FIELD(level);
2635 		else
2636 			*insn++ = CG_SOCKOPT_READ_FIELD(level);
2637 		break;
2638 	case offsetof(struct bpf_sockopt, optname):
2639 		if (type == BPF_WRITE)
2640 			*insn++ = CG_SOCKOPT_WRITE_FIELD(optname);
2641 		else
2642 			*insn++ = CG_SOCKOPT_READ_FIELD(optname);
2643 		break;
2644 	case offsetof(struct bpf_sockopt, optlen):
2645 		if (type == BPF_WRITE)
2646 			*insn++ = CG_SOCKOPT_WRITE_FIELD(optlen);
2647 		else
2648 			*insn++ = CG_SOCKOPT_READ_FIELD(optlen);
2649 		break;
2650 	case offsetof(struct bpf_sockopt, retval):
2651 		BUILD_BUG_ON(offsetof(struct bpf_cg_run_ctx, run_ctx) != 0);
2652 
2653 		if (type == BPF_WRITE) {
2654 			int treg = BPF_REG_9;
2655 
2656 			if (si->src_reg == treg || si->dst_reg == treg)
2657 				--treg;
2658 			if (si->src_reg == treg || si->dst_reg == treg)
2659 				--treg;
2660 			*insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, treg,
2661 					      offsetof(struct bpf_sockopt_kern, tmp_reg));
2662 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2663 					      treg, si->dst_reg,
2664 					      offsetof(struct bpf_sockopt_kern, current_task));
2665 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2666 					      treg, treg,
2667 					      offsetof(struct task_struct, bpf_ctx));
2668 			*insn++ = BPF_RAW_INSN(BPF_CLASS(si->code) | BPF_MEM |
2669 					       BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2670 					       treg, si->src_reg,
2671 					       offsetof(struct bpf_cg_run_ctx, retval),
2672 					       si->imm);
2673 			*insn++ = BPF_LDX_MEM(BPF_DW, treg, si->dst_reg,
2674 					      offsetof(struct bpf_sockopt_kern, tmp_reg));
2675 		} else {
2676 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2677 					      si->dst_reg, si->src_reg,
2678 					      offsetof(struct bpf_sockopt_kern, current_task));
2679 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2680 					      si->dst_reg, si->dst_reg,
2681 					      offsetof(struct task_struct, bpf_ctx));
2682 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2683 					      si->dst_reg, si->dst_reg,
2684 					      offsetof(struct bpf_cg_run_ctx, retval));
2685 		}
2686 		break;
2687 	case offsetof(struct bpf_sockopt, optval):
2688 		*insn++ = CG_SOCKOPT_READ_FIELD(optval);
2689 		break;
2690 	case offsetof(struct bpf_sockopt, optval_end):
2691 		*insn++ = CG_SOCKOPT_READ_FIELD(optval_end);
2692 		break;
2693 	}
2694 
2695 	return insn - insn_buf;
2696 }
2697 
2698 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
2699 				   bool direct_write,
2700 				   const struct bpf_prog *prog)
2701 {
2702 	/* Nothing to do for sockopt argument. The data is kzalloc'ated.
2703 	 */
2704 	return 0;
2705 }
2706 
2707 const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
2708 	.get_func_proto		= cg_sockopt_func_proto,
2709 	.is_valid_access	= cg_sockopt_is_valid_access,
2710 	.convert_ctx_access	= cg_sockopt_convert_ctx_access,
2711 	.gen_prologue		= cg_sockopt_get_prologue,
2712 };
2713 
2714 const struct bpf_prog_ops cg_sockopt_prog_ops = {
2715 };
2716 
2717 /* Common helpers for cgroup hooks. */
2718 const struct bpf_func_proto *
2719 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2720 {
2721 	switch (func_id) {
2722 	case BPF_FUNC_get_local_storage:
2723 		return &bpf_get_local_storage_proto;
2724 	case BPF_FUNC_get_retval:
2725 		switch (prog->expected_attach_type) {
2726 		case BPF_CGROUP_INET_INGRESS:
2727 		case BPF_CGROUP_INET_EGRESS:
2728 		case BPF_CGROUP_SOCK_OPS:
2729 		case BPF_CGROUP_UDP4_RECVMSG:
2730 		case BPF_CGROUP_UDP6_RECVMSG:
2731 		case BPF_CGROUP_UNIX_RECVMSG:
2732 		case BPF_CGROUP_INET4_GETPEERNAME:
2733 		case BPF_CGROUP_INET6_GETPEERNAME:
2734 		case BPF_CGROUP_UNIX_GETPEERNAME:
2735 		case BPF_CGROUP_INET4_GETSOCKNAME:
2736 		case BPF_CGROUP_INET6_GETSOCKNAME:
2737 		case BPF_CGROUP_UNIX_GETSOCKNAME:
2738 			return NULL;
2739 		default:
2740 			return &bpf_get_retval_proto;
2741 		}
2742 	case BPF_FUNC_set_retval:
2743 		switch (prog->expected_attach_type) {
2744 		case BPF_CGROUP_INET_INGRESS:
2745 		case BPF_CGROUP_INET_EGRESS:
2746 		case BPF_CGROUP_SOCK_OPS:
2747 		case BPF_CGROUP_UDP4_RECVMSG:
2748 		case BPF_CGROUP_UDP6_RECVMSG:
2749 		case BPF_CGROUP_UNIX_RECVMSG:
2750 		case BPF_CGROUP_INET4_GETPEERNAME:
2751 		case BPF_CGROUP_INET6_GETPEERNAME:
2752 		case BPF_CGROUP_UNIX_GETPEERNAME:
2753 		case BPF_CGROUP_INET4_GETSOCKNAME:
2754 		case BPF_CGROUP_INET6_GETSOCKNAME:
2755 		case BPF_CGROUP_UNIX_GETSOCKNAME:
2756 			return NULL;
2757 		default:
2758 			return &bpf_set_retval_proto;
2759 		}
2760 	default:
2761 		return NULL;
2762 	}
2763 }
2764