1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Functions to manage eBPF programs attached to cgroups
4 *
5 * Copyright (c) 2016 Daniel Mack
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
17 #include <linux/bpf_lsm.h>
18 #include <linux/bpf_verifier.h>
19 #include <net/sock.h>
20 #include <net/bpf_sk_storage.h>
21
22 #include "../cgroup/cgroup-internal.h"
23
24 DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
25 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
26
27 /*
28 * cgroup bpf destruction makes heavy use of work items and there can be a lot
29 * of concurrent destructions. Use a separate workqueue so that cgroup bpf
30 * destruction work items don't end up filling up max_active of system_percpu_wq
31 * which may lead to deadlock.
32 */
33 static struct workqueue_struct *cgroup_bpf_destroy_wq;
34
cgroup_bpf_wq_init(void)35 static int __init cgroup_bpf_wq_init(void)
36 {
37 cgroup_bpf_destroy_wq = alloc_workqueue("cgroup_bpf_destroy",
38 WQ_PERCPU, 1);
39 if (!cgroup_bpf_destroy_wq)
40 panic("Failed to alloc workqueue for cgroup bpf destroy.\n");
41 return 0;
42 }
43 core_initcall(cgroup_bpf_wq_init);
44
45 static int cgroup_bpf_lifetime_notify(struct notifier_block *nb,
46 unsigned long action, void *data);
47
48 static struct notifier_block cgroup_bpf_lifetime_nb = {
49 .notifier_call = cgroup_bpf_lifetime_notify,
50 };
51
cgroup_bpf_lifetime_notifier_init(void)52 void __init cgroup_bpf_lifetime_notifier_init(void)
53 {
54 BUG_ON(blocking_notifier_chain_register(&cgroup_lifetime_notifier,
55 &cgroup_bpf_lifetime_nb));
56 }
57
58 /* __always_inline is necessary to prevent indirect call through run_prog
59 * function pointer.
60 */
61 static __always_inline int
bpf_prog_run_array_cg(const struct cgroup_bpf * cgrp,enum cgroup_bpf_attach_type atype,const void * ctx,bpf_prog_run_fn run_prog,int retval,u32 * ret_flags)62 bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
63 enum cgroup_bpf_attach_type atype,
64 const void *ctx, bpf_prog_run_fn run_prog,
65 int retval, u32 *ret_flags)
66 {
67 const struct bpf_prog_array_item *item;
68 const struct bpf_prog *prog;
69 const struct bpf_prog_array *array;
70 struct bpf_run_ctx *old_run_ctx;
71 struct bpf_cg_run_ctx run_ctx;
72 u32 func_ret;
73
74 run_ctx.retval = retval;
75 rcu_read_lock_dont_migrate();
76 array = rcu_dereference(cgrp->effective[atype]);
77 item = &array->items[0];
78 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
79 while ((prog = READ_ONCE(item->prog))) {
80 run_ctx.prog_item = item;
81 func_ret = run_prog(prog, ctx);
82 if (ret_flags) {
83 *(ret_flags) |= (func_ret >> 1);
84 func_ret &= 1;
85 }
86 if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval))
87 run_ctx.retval = -EPERM;
88 item++;
89 }
90 bpf_reset_run_ctx(old_run_ctx);
91 rcu_read_unlock_migrate();
92 return run_ctx.retval;
93 }
94
__cgroup_bpf_run_lsm_sock(const void * ctx,const struct bpf_insn * insn)95 unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
96 const struct bpf_insn *insn)
97 {
98 const struct bpf_prog *shim_prog;
99 struct sock *sk;
100 struct cgroup *cgrp;
101 int ret = 0;
102 u64 *args;
103
104 args = (u64 *)ctx;
105 sk = (void *)(unsigned long)args[0];
106 /*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
107 shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
108
109 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
110 if (likely(cgrp))
111 ret = bpf_prog_run_array_cg(&cgrp->bpf,
112 shim_prog->aux->cgroup_atype,
113 ctx, bpf_prog_run, 0, NULL);
114 return ret;
115 }
116
__cgroup_bpf_run_lsm_socket(const void * ctx,const struct bpf_insn * insn)117 unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
118 const struct bpf_insn *insn)
119 {
120 const struct bpf_prog *shim_prog;
121 struct socket *sock;
122 struct cgroup *cgrp;
123 int ret = 0;
124 u64 *args;
125
126 args = (u64 *)ctx;
127 sock = (void *)(unsigned long)args[0];
128 /*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
129 shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
130
131 cgrp = sock_cgroup_ptr(&sock->sk->sk_cgrp_data);
132 if (likely(cgrp))
133 ret = bpf_prog_run_array_cg(&cgrp->bpf,
134 shim_prog->aux->cgroup_atype,
135 ctx, bpf_prog_run, 0, NULL);
136 return ret;
137 }
138
__cgroup_bpf_run_lsm_current(const void * ctx,const struct bpf_insn * insn)139 unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
140 const struct bpf_insn *insn)
141 {
142 const struct bpf_prog *shim_prog;
143 struct cgroup *cgrp;
144 int ret = 0;
145
146 /*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
147 shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
148
149 /* We rely on trampoline's __bpf_prog_enter_lsm_cgroup to grab RCU read lock. */
150 cgrp = task_dfl_cgroup(current);
151 if (likely(cgrp))
152 ret = bpf_prog_run_array_cg(&cgrp->bpf,
153 shim_prog->aux->cgroup_atype,
154 ctx, bpf_prog_run, 0, NULL);
155 return ret;
156 }
157
158 #ifdef CONFIG_BPF_LSM
159 struct cgroup_lsm_atype {
160 u32 attach_btf_id;
161 int refcnt;
162 };
163
164 static struct cgroup_lsm_atype cgroup_lsm_atype[CGROUP_LSM_NUM];
165
166 static enum cgroup_bpf_attach_type
bpf_cgroup_atype_find(enum bpf_attach_type attach_type,u32 attach_btf_id)167 bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
168 {
169 int i;
170
171 lockdep_assert_held(&cgroup_mutex);
172
173 if (attach_type != BPF_LSM_CGROUP)
174 return to_cgroup_bpf_attach_type(attach_type);
175
176 for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
177 if (cgroup_lsm_atype[i].attach_btf_id == attach_btf_id)
178 return CGROUP_LSM_START + i;
179
180 for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
181 if (cgroup_lsm_atype[i].attach_btf_id == 0)
182 return CGROUP_LSM_START + i;
183
184 return -E2BIG;
185
186 }
187
bpf_cgroup_atype_get(u32 attach_btf_id,int cgroup_atype)188 void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype)
189 {
190 int i = cgroup_atype - CGROUP_LSM_START;
191
192 lockdep_assert_held(&cgroup_mutex);
193
194 WARN_ON_ONCE(cgroup_lsm_atype[i].attach_btf_id &&
195 cgroup_lsm_atype[i].attach_btf_id != attach_btf_id);
196
197 cgroup_lsm_atype[i].attach_btf_id = attach_btf_id;
198 cgroup_lsm_atype[i].refcnt++;
199 }
200
bpf_cgroup_atype_put(int cgroup_atype)201 void bpf_cgroup_atype_put(int cgroup_atype)
202 {
203 int i = cgroup_atype - CGROUP_LSM_START;
204
205 cgroup_lock();
206 if (--cgroup_lsm_atype[i].refcnt <= 0)
207 cgroup_lsm_atype[i].attach_btf_id = 0;
208 WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0);
209 cgroup_unlock();
210 }
211 #else
212 static enum cgroup_bpf_attach_type
bpf_cgroup_atype_find(enum bpf_attach_type attach_type,u32 attach_btf_id)213 bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
214 {
215 if (attach_type != BPF_LSM_CGROUP)
216 return to_cgroup_bpf_attach_type(attach_type);
217 return -EOPNOTSUPP;
218 }
219 #endif /* CONFIG_BPF_LSM */
220
cgroup_bpf_offline(struct cgroup * cgrp)221 static void cgroup_bpf_offline(struct cgroup *cgrp)
222 {
223 cgroup_get(cgrp);
224 percpu_ref_kill(&cgrp->bpf.refcnt);
225 }
226
bpf_cgroup_storages_free(struct bpf_cgroup_storage * storages[])227 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
228 {
229 enum bpf_cgroup_storage_type stype;
230
231 for_each_cgroup_storage_type(stype)
232 bpf_cgroup_storage_free(storages[stype]);
233 }
234
bpf_cgroup_storages_alloc(struct bpf_cgroup_storage * storages[],struct bpf_cgroup_storage * new_storages[],enum bpf_attach_type type,struct bpf_prog * prog,struct cgroup * cgrp)235 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
236 struct bpf_cgroup_storage *new_storages[],
237 enum bpf_attach_type type,
238 struct bpf_prog *prog,
239 struct cgroup *cgrp)
240 {
241 enum bpf_cgroup_storage_type stype;
242 struct bpf_cgroup_storage_key key;
243 struct bpf_map *map;
244
245 key.cgroup_inode_id = cgroup_id(cgrp);
246 key.attach_type = type;
247
248 for_each_cgroup_storage_type(stype) {
249 map = prog->aux->cgroup_storage[stype];
250 if (!map)
251 continue;
252
253 storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
254 if (storages[stype])
255 continue;
256
257 storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
258 if (IS_ERR(storages[stype])) {
259 bpf_cgroup_storages_free(new_storages);
260 return -ENOMEM;
261 }
262
263 new_storages[stype] = storages[stype];
264 }
265
266 return 0;
267 }
268
bpf_cgroup_storages_assign(struct bpf_cgroup_storage * dst[],struct bpf_cgroup_storage * src[])269 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
270 struct bpf_cgroup_storage *src[])
271 {
272 enum bpf_cgroup_storage_type stype;
273
274 for_each_cgroup_storage_type(stype)
275 dst[stype] = src[stype];
276 }
277
bpf_cgroup_storages_link(struct bpf_cgroup_storage * storages[],struct cgroup * cgrp,enum bpf_attach_type attach_type)278 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
279 struct cgroup *cgrp,
280 enum bpf_attach_type attach_type)
281 {
282 enum bpf_cgroup_storage_type stype;
283
284 for_each_cgroup_storage_type(stype)
285 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
286 }
287
288 /* Called when bpf_cgroup_link is auto-detached from dying cgroup.
289 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
290 * doesn't free link memory, which will eventually be done by bpf_link's
291 * release() callback, when its last FD is closed.
292 */
bpf_cgroup_link_auto_detach(struct bpf_cgroup_link * link)293 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
294 {
295 cgroup_put(link->cgroup);
296 link->cgroup = NULL;
297 }
298
299 /**
300 * cgroup_bpf_release() - put references of all bpf programs and
301 * release all cgroup bpf data
302 * @work: work structure embedded into the cgroup to modify
303 */
cgroup_bpf_release(struct work_struct * work)304 static void cgroup_bpf_release(struct work_struct *work)
305 {
306 struct cgroup *p, *cgrp = container_of(work, struct cgroup,
307 bpf.release_work);
308 struct bpf_prog_array *old_array;
309 struct list_head *storages = &cgrp->bpf.storages;
310 struct bpf_cgroup_storage *storage, *stmp;
311
312 unsigned int atype;
313
314 cgroup_lock();
315
316 for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
317 struct hlist_head *progs = &cgrp->bpf.progs[atype];
318 struct bpf_prog_list *pl;
319 struct hlist_node *pltmp;
320
321 hlist_for_each_entry_safe(pl, pltmp, progs, node) {
322 hlist_del(&pl->node);
323 if (pl->prog) {
324 if (pl->prog->expected_attach_type == BPF_LSM_CGROUP)
325 bpf_trampoline_unlink_cgroup_shim(pl->prog);
326 bpf_prog_put(pl->prog);
327 }
328 if (pl->link) {
329 if (pl->link->link.prog->expected_attach_type == BPF_LSM_CGROUP)
330 bpf_trampoline_unlink_cgroup_shim(pl->link->link.prog);
331 bpf_cgroup_link_auto_detach(pl->link);
332 }
333 kfree(pl);
334 static_branch_dec(&cgroup_bpf_enabled_key[atype]);
335 }
336 old_array = rcu_dereference_protected(
337 cgrp->bpf.effective[atype],
338 lockdep_is_held(&cgroup_mutex));
339 bpf_prog_array_free(old_array);
340 }
341
342 list_for_each_entry_safe(storage, stmp, storages, list_cg) {
343 bpf_cgroup_storage_unlink(storage);
344 bpf_cgroup_storage_free(storage);
345 }
346
347 cgroup_unlock();
348
349 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
350 cgroup_bpf_put(p);
351
352 percpu_ref_exit(&cgrp->bpf.refcnt);
353 cgroup_put(cgrp);
354 }
355
356 /**
357 * cgroup_bpf_release_fn() - callback used to schedule releasing
358 * of bpf cgroup data
359 * @ref: percpu ref counter structure
360 */
cgroup_bpf_release_fn(struct percpu_ref * ref)361 static void cgroup_bpf_release_fn(struct percpu_ref *ref)
362 {
363 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
364
365 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
366 queue_work(cgroup_bpf_destroy_wq, &cgrp->bpf.release_work);
367 }
368
369 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
370 * link or direct prog.
371 */
prog_list_prog(struct bpf_prog_list * pl)372 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
373 {
374 if (pl->prog)
375 return pl->prog;
376 if (pl->link)
377 return pl->link->link.prog;
378 return NULL;
379 }
380
381 /* count number of elements in the list.
382 * it's slow but the list cannot be long
383 */
prog_list_length(struct hlist_head * head,int * preorder_cnt)384 static u32 prog_list_length(struct hlist_head *head, int *preorder_cnt)
385 {
386 struct bpf_prog_list *pl;
387 u32 cnt = 0;
388
389 hlist_for_each_entry(pl, head, node) {
390 if (!prog_list_prog(pl))
391 continue;
392 if (preorder_cnt && (pl->flags & BPF_F_PREORDER))
393 (*preorder_cnt)++;
394 cnt++;
395 }
396 return cnt;
397 }
398
399 /* if parent has non-overridable prog attached,
400 * disallow attaching new programs to the descendent cgroup.
401 * if parent has overridable or multi-prog, allow attaching
402 */
hierarchy_allows_attach(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype)403 static bool hierarchy_allows_attach(struct cgroup *cgrp,
404 enum cgroup_bpf_attach_type atype)
405 {
406 struct cgroup *p;
407
408 p = cgroup_parent(cgrp);
409 if (!p)
410 return true;
411 do {
412 u32 flags = p->bpf.flags[atype];
413 u32 cnt;
414
415 if (flags & BPF_F_ALLOW_MULTI)
416 return true;
417 cnt = prog_list_length(&p->bpf.progs[atype], NULL);
418 WARN_ON_ONCE(cnt > 1);
419 if (cnt == 1)
420 return !!(flags & BPF_F_ALLOW_OVERRIDE);
421 p = cgroup_parent(p);
422 } while (p);
423 return true;
424 }
425
426 /* compute a chain of effective programs for a given cgroup:
427 * start from the list of programs in this cgroup and add
428 * all parent programs.
429 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
430 * to programs in this cgroup
431 */
compute_effective_progs(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype,struct bpf_prog_array ** array)432 static int compute_effective_progs(struct cgroup *cgrp,
433 enum cgroup_bpf_attach_type atype,
434 struct bpf_prog_array **array)
435 {
436 struct bpf_prog_array_item *item;
437 struct bpf_prog_array *progs;
438 struct bpf_prog_list *pl;
439 struct cgroup *p = cgrp;
440 int i, j, cnt = 0, preorder_cnt = 0, fstart, bstart, init_bstart;
441
442 /* count number of effective programs by walking parents */
443 do {
444 if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
445 cnt += prog_list_length(&p->bpf.progs[atype], &preorder_cnt);
446 p = cgroup_parent(p);
447 } while (p);
448
449 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
450 if (!progs)
451 return -ENOMEM;
452
453 /* populate the array with effective progs */
454 cnt = 0;
455 p = cgrp;
456 fstart = preorder_cnt;
457 bstart = preorder_cnt - 1;
458 do {
459 if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
460 continue;
461
462 init_bstart = bstart;
463 hlist_for_each_entry(pl, &p->bpf.progs[atype], node) {
464 if (!prog_list_prog(pl))
465 continue;
466
467 if (pl->flags & BPF_F_PREORDER) {
468 item = &progs->items[bstart];
469 bstart--;
470 } else {
471 item = &progs->items[fstart];
472 fstart++;
473 }
474 item->prog = prog_list_prog(pl);
475 bpf_cgroup_storages_assign(item->cgroup_storage,
476 pl->storage);
477 cnt++;
478 }
479
480 /* reverse pre-ordering progs at this cgroup level */
481 for (i = bstart + 1, j = init_bstart; i < j; i++, j--)
482 swap(progs->items[i], progs->items[j]);
483
484 } while ((p = cgroup_parent(p)));
485
486 *array = progs;
487 return 0;
488 }
489
activate_effective_progs(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype,struct bpf_prog_array * old_array)490 static void activate_effective_progs(struct cgroup *cgrp,
491 enum cgroup_bpf_attach_type atype,
492 struct bpf_prog_array *old_array)
493 {
494 old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array,
495 lockdep_is_held(&cgroup_mutex));
496 /* free prog array after grace period, since __cgroup_bpf_run_*()
497 * might be still walking the array
498 */
499 bpf_prog_array_free(old_array);
500 }
501
502 /**
503 * cgroup_bpf_inherit() - inherit effective programs from parent
504 * @cgrp: the cgroup to modify
505 */
cgroup_bpf_inherit(struct cgroup * cgrp)506 static int cgroup_bpf_inherit(struct cgroup *cgrp)
507 {
508 /* has to use marco instead of const int, since compiler thinks
509 * that array below is variable length
510 */
511 #define NR ARRAY_SIZE(cgrp->bpf.effective)
512 struct bpf_prog_array *arrays[NR] = {};
513 struct cgroup *p;
514 int ret, i;
515
516 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
517 GFP_KERNEL);
518 if (ret)
519 return ret;
520
521 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
522 cgroup_bpf_get(p);
523
524 for (i = 0; i < NR; i++)
525 INIT_HLIST_HEAD(&cgrp->bpf.progs[i]);
526
527 INIT_LIST_HEAD(&cgrp->bpf.storages);
528
529 for (i = 0; i < NR; i++)
530 if (compute_effective_progs(cgrp, i, &arrays[i]))
531 goto cleanup;
532
533 for (i = 0; i < NR; i++)
534 activate_effective_progs(cgrp, i, arrays[i]);
535
536 return 0;
537 cleanup:
538 for (i = 0; i < NR; i++)
539 bpf_prog_array_free(arrays[i]);
540
541 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
542 cgroup_bpf_put(p);
543
544 percpu_ref_exit(&cgrp->bpf.refcnt);
545
546 return -ENOMEM;
547 }
548
cgroup_bpf_lifetime_notify(struct notifier_block * nb,unsigned long action,void * data)549 static int cgroup_bpf_lifetime_notify(struct notifier_block *nb,
550 unsigned long action, void *data)
551 {
552 struct cgroup *cgrp = data;
553 int ret = 0;
554
555 if (cgrp->root != &cgrp_dfl_root)
556 return NOTIFY_OK;
557
558 switch (action) {
559 case CGROUP_LIFETIME_ONLINE:
560 ret = cgroup_bpf_inherit(cgrp);
561 break;
562 case CGROUP_LIFETIME_OFFLINE:
563 cgroup_bpf_offline(cgrp);
564 break;
565 }
566
567 return notifier_from_errno(ret);
568 }
569
update_effective_progs(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype)570 static int update_effective_progs(struct cgroup *cgrp,
571 enum cgroup_bpf_attach_type atype)
572 {
573 struct cgroup_subsys_state *css;
574 int err;
575
576 /* allocate and recompute effective prog arrays */
577 css_for_each_descendant_pre(css, &cgrp->self) {
578 struct cgroup *desc = container_of(css, struct cgroup, self);
579
580 if (percpu_ref_is_zero(&desc->bpf.refcnt))
581 continue;
582
583 err = compute_effective_progs(desc, atype, &desc->bpf.inactive);
584 if (err)
585 goto cleanup;
586 }
587
588 /* all allocations were successful. Activate all prog arrays */
589 css_for_each_descendant_pre(css, &cgrp->self) {
590 struct cgroup *desc = container_of(css, struct cgroup, self);
591
592 if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
593 if (unlikely(desc->bpf.inactive)) {
594 bpf_prog_array_free(desc->bpf.inactive);
595 desc->bpf.inactive = NULL;
596 }
597 continue;
598 }
599
600 activate_effective_progs(desc, atype, desc->bpf.inactive);
601 desc->bpf.inactive = NULL;
602 }
603
604 return 0;
605
606 cleanup:
607 /* oom while computing effective. Free all computed effective arrays
608 * since they were not activated
609 */
610 css_for_each_descendant_pre(css, &cgrp->self) {
611 struct cgroup *desc = container_of(css, struct cgroup, self);
612
613 bpf_prog_array_free(desc->bpf.inactive);
614 desc->bpf.inactive = NULL;
615 }
616
617 return err;
618 }
619
620 #define BPF_CGROUP_MAX_PROGS 64
621
find_attach_entry(struct hlist_head * progs,struct bpf_prog * prog,struct bpf_cgroup_link * link,struct bpf_prog * replace_prog,bool allow_multi)622 static struct bpf_prog_list *find_attach_entry(struct hlist_head *progs,
623 struct bpf_prog *prog,
624 struct bpf_cgroup_link *link,
625 struct bpf_prog *replace_prog,
626 bool allow_multi)
627 {
628 struct bpf_prog_list *pl;
629
630 /* single-attach case */
631 if (!allow_multi) {
632 if (hlist_empty(progs))
633 return NULL;
634 return hlist_entry(progs->first, typeof(*pl), node);
635 }
636
637 hlist_for_each_entry(pl, progs, node) {
638 if (prog && pl->prog == prog && prog != replace_prog)
639 /* disallow attaching the same prog twice */
640 return ERR_PTR(-EINVAL);
641 if (link && pl->link == link)
642 /* disallow attaching the same link twice */
643 return ERR_PTR(-EINVAL);
644 }
645
646 /* direct prog multi-attach w/ replacement case */
647 if (replace_prog) {
648 hlist_for_each_entry(pl, progs, node) {
649 if (pl->prog == replace_prog)
650 /* a match found */
651 return pl;
652 }
653 /* prog to replace not found for cgroup */
654 return ERR_PTR(-ENOENT);
655 }
656
657 return NULL;
658 }
659
bpf_get_anchor_link(u32 flags,u32 id_or_fd)660 static struct bpf_link *bpf_get_anchor_link(u32 flags, u32 id_or_fd)
661 {
662 struct bpf_link *link = ERR_PTR(-EINVAL);
663
664 if (flags & BPF_F_ID)
665 link = bpf_link_by_id(id_or_fd);
666 else if (id_or_fd)
667 link = bpf_link_get_from_fd(id_or_fd);
668 return link;
669 }
670
bpf_get_anchor_prog(u32 flags,u32 id_or_fd)671 static struct bpf_prog *bpf_get_anchor_prog(u32 flags, u32 id_or_fd)
672 {
673 struct bpf_prog *prog = ERR_PTR(-EINVAL);
674
675 if (flags & BPF_F_ID)
676 prog = bpf_prog_by_id(id_or_fd);
677 else if (id_or_fd)
678 prog = bpf_prog_get(id_or_fd);
679 return prog;
680 }
681
get_prog_list(struct hlist_head * progs,struct bpf_prog * prog,struct bpf_cgroup_link * link,u32 flags,u32 id_or_fd)682 static struct bpf_prog_list *get_prog_list(struct hlist_head *progs, struct bpf_prog *prog,
683 struct bpf_cgroup_link *link, u32 flags, u32 id_or_fd)
684 {
685 bool is_link = flags & BPF_F_LINK, is_id = flags & BPF_F_ID;
686 struct bpf_prog_list *pltmp, *pl = ERR_PTR(-EINVAL);
687 bool preorder = flags & BPF_F_PREORDER;
688 struct bpf_link *anchor_link = NULL;
689 struct bpf_prog *anchor_prog = NULL;
690 bool is_before, is_after;
691
692 is_before = flags & BPF_F_BEFORE;
693 is_after = flags & BPF_F_AFTER;
694 if (is_link || is_id || id_or_fd) {
695 /* flags must have either BPF_F_BEFORE or BPF_F_AFTER */
696 if (is_before == is_after)
697 return ERR_PTR(-EINVAL);
698 if ((is_link && !link) || (!is_link && !prog))
699 return ERR_PTR(-EINVAL);
700 } else if (!hlist_empty(progs)) {
701 /* flags cannot have both BPF_F_BEFORE and BPF_F_AFTER */
702 if (is_before && is_after)
703 return ERR_PTR(-EINVAL);
704 }
705
706 if (is_link) {
707 anchor_link = bpf_get_anchor_link(flags, id_or_fd);
708 if (IS_ERR(anchor_link))
709 return ERR_CAST(anchor_link);
710 } else if (is_id || id_or_fd) {
711 anchor_prog = bpf_get_anchor_prog(flags, id_or_fd);
712 if (IS_ERR(anchor_prog))
713 return ERR_CAST(anchor_prog);
714 }
715
716 if (!anchor_prog && !anchor_link) {
717 /* if there is no anchor_prog/anchor_link, then BPF_F_PREORDER
718 * doesn't matter since either prepend or append to a combined
719 * list of progs will end up with correct result.
720 */
721 hlist_for_each_entry(pltmp, progs, node) {
722 if (is_before)
723 return pltmp;
724 if (pltmp->node.next)
725 continue;
726 return pltmp;
727 }
728 return NULL;
729 }
730
731 hlist_for_each_entry(pltmp, progs, node) {
732 if ((anchor_prog && anchor_prog == pltmp->prog) ||
733 (anchor_link && anchor_link == &pltmp->link->link)) {
734 if (!!(pltmp->flags & BPF_F_PREORDER) != preorder)
735 goto out;
736 pl = pltmp;
737 goto out;
738 }
739 }
740
741 pl = ERR_PTR(-ENOENT);
742 out:
743 if (anchor_link)
744 bpf_link_put(anchor_link);
745 else
746 bpf_prog_put(anchor_prog);
747 return pl;
748 }
749
insert_pl_to_hlist(struct bpf_prog_list * pl,struct hlist_head * progs,struct bpf_prog * prog,struct bpf_cgroup_link * link,u32 flags,u32 id_or_fd)750 static int insert_pl_to_hlist(struct bpf_prog_list *pl, struct hlist_head *progs,
751 struct bpf_prog *prog, struct bpf_cgroup_link *link,
752 u32 flags, u32 id_or_fd)
753 {
754 struct bpf_prog_list *pltmp;
755
756 pltmp = get_prog_list(progs, prog, link, flags, id_or_fd);
757 if (IS_ERR(pltmp))
758 return PTR_ERR(pltmp);
759
760 if (!pltmp)
761 hlist_add_head(&pl->node, progs);
762 else if (flags & BPF_F_BEFORE)
763 hlist_add_before(&pl->node, &pltmp->node);
764 else
765 hlist_add_behind(&pl->node, &pltmp->node);
766
767 return 0;
768 }
769
770 /**
771 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
772 * propagate the change to descendants
773 * @cgrp: The cgroup which descendants to traverse
774 * @prog: A program to attach
775 * @link: A link to attach
776 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
777 * @type: Type of attach operation
778 * @flags: Option flags
779 * @id_or_fd: Relative prog id or fd
780 * @revision: bpf_prog_list revision
781 *
782 * Exactly one of @prog or @link can be non-null.
783 * Must be called with cgroup_mutex held.
784 */
__cgroup_bpf_attach(struct cgroup * cgrp,struct bpf_prog * prog,struct bpf_prog * replace_prog,struct bpf_cgroup_link * link,enum bpf_attach_type type,u32 flags,u32 id_or_fd,u64 revision)785 static int __cgroup_bpf_attach(struct cgroup *cgrp,
786 struct bpf_prog *prog, struct bpf_prog *replace_prog,
787 struct bpf_cgroup_link *link,
788 enum bpf_attach_type type, u32 flags, u32 id_or_fd,
789 u64 revision)
790 {
791 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
792 struct bpf_prog *old_prog = NULL;
793 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
794 struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
795 struct bpf_prog *new_prog = prog ? : link->link.prog;
796 enum cgroup_bpf_attach_type atype;
797 struct bpf_prog_list *pl;
798 struct hlist_head *progs;
799 int err;
800
801 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
802 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
803 /* invalid combination */
804 return -EINVAL;
805 if ((flags & BPF_F_REPLACE) && (flags & (BPF_F_BEFORE | BPF_F_AFTER)))
806 /* only either replace or insertion with before/after */
807 return -EINVAL;
808 if (link && (prog || replace_prog))
809 /* only either link or prog/replace_prog can be specified */
810 return -EINVAL;
811 if (!!replace_prog != !!(flags & BPF_F_REPLACE))
812 /* replace_prog implies BPF_F_REPLACE, and vice versa */
813 return -EINVAL;
814
815 atype = bpf_cgroup_atype_find(type, new_prog->aux->attach_btf_id);
816 if (atype < 0)
817 return -EINVAL;
818 if (revision && revision != cgrp->bpf.revisions[atype])
819 return -ESTALE;
820
821 progs = &cgrp->bpf.progs[atype];
822
823 if (!hierarchy_allows_attach(cgrp, atype))
824 return -EPERM;
825
826 if (!hlist_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
827 /* Disallow attaching non-overridable on top
828 * of existing overridable in this cgroup.
829 * Disallow attaching multi-prog if overridable or none
830 */
831 return -EPERM;
832
833 if (prog_list_length(progs, NULL) >= BPF_CGROUP_MAX_PROGS)
834 return -E2BIG;
835
836 pl = find_attach_entry(progs, prog, link, replace_prog,
837 flags & BPF_F_ALLOW_MULTI);
838 if (IS_ERR(pl))
839 return PTR_ERR(pl);
840
841 if (bpf_cgroup_storages_alloc(storage, new_storage, type,
842 prog ? : link->link.prog, cgrp))
843 return -ENOMEM;
844
845 if (pl) {
846 old_prog = pl->prog;
847 } else {
848 pl = kmalloc_obj(*pl);
849 if (!pl) {
850 bpf_cgroup_storages_free(new_storage);
851 return -ENOMEM;
852 }
853
854 err = insert_pl_to_hlist(pl, progs, prog, link, flags, id_or_fd);
855 if (err) {
856 kfree(pl);
857 bpf_cgroup_storages_free(new_storage);
858 return err;
859 }
860 }
861
862 pl->prog = prog;
863 pl->link = link;
864 pl->flags = flags;
865 bpf_cgroup_storages_assign(pl->storage, storage);
866 cgrp->bpf.flags[atype] = saved_flags;
867
868 if (type == BPF_LSM_CGROUP) {
869 err = bpf_trampoline_link_cgroup_shim(new_prog, atype, type);
870 if (err)
871 goto cleanup;
872 }
873
874 err = update_effective_progs(cgrp, atype);
875 if (err)
876 goto cleanup_trampoline;
877
878 cgrp->bpf.revisions[atype] += 1;
879 if (old_prog) {
880 if (type == BPF_LSM_CGROUP)
881 bpf_trampoline_unlink_cgroup_shim(old_prog);
882 bpf_prog_put(old_prog);
883 } else {
884 static_branch_inc(&cgroup_bpf_enabled_key[atype]);
885 }
886 bpf_cgroup_storages_link(new_storage, cgrp, type);
887 return 0;
888
889 cleanup_trampoline:
890 if (type == BPF_LSM_CGROUP)
891 bpf_trampoline_unlink_cgroup_shim(new_prog);
892
893 cleanup:
894 if (old_prog) {
895 pl->prog = old_prog;
896 pl->link = NULL;
897 }
898 bpf_cgroup_storages_free(new_storage);
899 if (!old_prog) {
900 hlist_del(&pl->node);
901 kfree(pl);
902 }
903 return err;
904 }
905
cgroup_bpf_attach(struct cgroup * cgrp,struct bpf_prog * prog,struct bpf_prog * replace_prog,struct bpf_cgroup_link * link,enum bpf_attach_type type,u32 flags,u32 id_or_fd,u64 revision)906 static int cgroup_bpf_attach(struct cgroup *cgrp,
907 struct bpf_prog *prog, struct bpf_prog *replace_prog,
908 struct bpf_cgroup_link *link,
909 enum bpf_attach_type type,
910 u32 flags, u32 id_or_fd, u64 revision)
911 {
912 int ret;
913
914 cgroup_lock();
915 ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags,
916 id_or_fd, revision);
917 cgroup_unlock();
918 return ret;
919 }
920
921 /* Swap updated BPF program for given link in effective program arrays across
922 * all descendant cgroups. This function is guaranteed to succeed.
923 */
replace_effective_prog(struct cgroup * cgrp,enum cgroup_bpf_attach_type atype,struct bpf_cgroup_link * link)924 static void replace_effective_prog(struct cgroup *cgrp,
925 enum cgroup_bpf_attach_type atype,
926 struct bpf_cgroup_link *link)
927 {
928 struct bpf_prog_array_item *item;
929 struct cgroup_subsys_state *css;
930 struct bpf_prog_array *progs;
931 struct bpf_prog_list *pl;
932 struct hlist_head *head;
933 struct cgroup *cg;
934 int pos;
935
936 css_for_each_descendant_pre(css, &cgrp->self) {
937 struct cgroup *desc = container_of(css, struct cgroup, self);
938
939 if (percpu_ref_is_zero(&desc->bpf.refcnt))
940 continue;
941
942 /* find position of link in effective progs array */
943 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
944 if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
945 continue;
946
947 head = &cg->bpf.progs[atype];
948 hlist_for_each_entry(pl, head, node) {
949 if (!prog_list_prog(pl))
950 continue;
951 if (pl->link == link)
952 goto found;
953 pos++;
954 }
955 }
956 found:
957 BUG_ON(!cg);
958 progs = rcu_dereference_protected(
959 desc->bpf.effective[atype],
960 lockdep_is_held(&cgroup_mutex));
961 item = &progs->items[pos];
962 WRITE_ONCE(item->prog, link->link.prog);
963 }
964 }
965
966 /**
967 * __cgroup_bpf_replace() - Replace link's program and propagate the change
968 * to descendants
969 * @cgrp: The cgroup which descendants to traverse
970 * @link: A link for which to replace BPF program
971 * @new_prog: &struct bpf_prog for the target BPF program with its refcnt
972 * incremented
973 *
974 * Must be called with cgroup_mutex held.
975 */
__cgroup_bpf_replace(struct cgroup * cgrp,struct bpf_cgroup_link * link,struct bpf_prog * new_prog)976 static int __cgroup_bpf_replace(struct cgroup *cgrp,
977 struct bpf_cgroup_link *link,
978 struct bpf_prog *new_prog)
979 {
980 enum cgroup_bpf_attach_type atype;
981 struct bpf_prog *old_prog;
982 struct bpf_prog_list *pl;
983 struct hlist_head *progs;
984 bool found = false;
985
986 atype = bpf_cgroup_atype_find(link->link.attach_type, new_prog->aux->attach_btf_id);
987 if (atype < 0)
988 return -EINVAL;
989
990 progs = &cgrp->bpf.progs[atype];
991
992 if (link->link.prog->type != new_prog->type)
993 return -EINVAL;
994
995 hlist_for_each_entry(pl, progs, node) {
996 if (pl->link == link) {
997 found = true;
998 break;
999 }
1000 }
1001 if (!found)
1002 return -ENOENT;
1003
1004 cgrp->bpf.revisions[atype] += 1;
1005 old_prog = xchg(&link->link.prog, new_prog);
1006 replace_effective_prog(cgrp, atype, link);
1007 bpf_prog_put(old_prog);
1008 return 0;
1009 }
1010
cgroup_bpf_replace(struct bpf_link * link,struct bpf_prog * new_prog,struct bpf_prog * old_prog)1011 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
1012 struct bpf_prog *old_prog)
1013 {
1014 struct bpf_cgroup_link *cg_link;
1015 int ret;
1016
1017 cg_link = container_of(link, struct bpf_cgroup_link, link);
1018
1019 cgroup_lock();
1020 /* link might have been auto-released by dying cgroup, so fail */
1021 if (!cg_link->cgroup) {
1022 ret = -ENOLINK;
1023 goto out_unlock;
1024 }
1025 if (old_prog && link->prog != old_prog) {
1026 ret = -EPERM;
1027 goto out_unlock;
1028 }
1029 ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
1030 out_unlock:
1031 cgroup_unlock();
1032 return ret;
1033 }
1034
find_detach_entry(struct hlist_head * progs,struct bpf_prog * prog,struct bpf_cgroup_link * link,bool allow_multi)1035 static struct bpf_prog_list *find_detach_entry(struct hlist_head *progs,
1036 struct bpf_prog *prog,
1037 struct bpf_cgroup_link *link,
1038 bool allow_multi)
1039 {
1040 struct bpf_prog_list *pl;
1041
1042 if (!allow_multi) {
1043 if (hlist_empty(progs))
1044 /* report error when trying to detach and nothing is attached */
1045 return ERR_PTR(-ENOENT);
1046
1047 /* to maintain backward compatibility NONE and OVERRIDE cgroups
1048 * allow detaching with invalid FD (prog==NULL) in legacy mode
1049 */
1050 return hlist_entry(progs->first, typeof(*pl), node);
1051 }
1052
1053 if (!prog && !link)
1054 /* to detach MULTI prog the user has to specify valid FD
1055 * of the program or link to be detached
1056 */
1057 return ERR_PTR(-EINVAL);
1058
1059 /* find the prog or link and detach it */
1060 hlist_for_each_entry(pl, progs, node) {
1061 if (pl->prog == prog && pl->link == link)
1062 return pl;
1063 }
1064 return ERR_PTR(-ENOENT);
1065 }
1066
1067 /**
1068 * purge_effective_progs() - After compute_effective_progs fails to alloc new
1069 * cgrp->bpf.inactive table we can recover by
1070 * recomputing the array in place.
1071 *
1072 * @cgrp: The cgroup which descendants to travers
1073 * @prog: A program to detach or NULL
1074 * @link: A link to detach or NULL
1075 * @atype: Type of detach operation
1076 */
purge_effective_progs(struct cgroup * cgrp,struct bpf_prog * prog,struct bpf_cgroup_link * link,enum cgroup_bpf_attach_type atype)1077 static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
1078 struct bpf_cgroup_link *link,
1079 enum cgroup_bpf_attach_type atype)
1080 {
1081 struct cgroup_subsys_state *css;
1082 struct bpf_prog_array *progs;
1083 struct bpf_prog_list *pl;
1084 struct hlist_head *head;
1085 struct cgroup *cg;
1086 int pos;
1087
1088 /* recompute effective prog array in place */
1089 css_for_each_descendant_pre(css, &cgrp->self) {
1090 struct cgroup *desc = container_of(css, struct cgroup, self);
1091
1092 if (percpu_ref_is_zero(&desc->bpf.refcnt))
1093 continue;
1094
1095 /* find position of link or prog in effective progs array */
1096 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
1097 if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
1098 continue;
1099
1100 head = &cg->bpf.progs[atype];
1101 hlist_for_each_entry(pl, head, node) {
1102 if (!prog_list_prog(pl))
1103 continue;
1104 if (pl->prog == prog && pl->link == link)
1105 goto found;
1106 pos++;
1107 }
1108 }
1109
1110 /* no link or prog match, skip the cgroup of this layer */
1111 continue;
1112 found:
1113 progs = rcu_dereference_protected(
1114 desc->bpf.effective[atype],
1115 lockdep_is_held(&cgroup_mutex));
1116
1117 /* Remove the program from the array */
1118 WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos),
1119 "Failed to purge a prog from array at index %d", pos);
1120 }
1121 }
1122
1123 /**
1124 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
1125 * propagate the change to descendants
1126 * @cgrp: The cgroup which descendants to traverse
1127 * @prog: A program to detach or NULL
1128 * @link: A link to detach or NULL
1129 * @type: Type of detach operation
1130 * @revision: bpf_prog_list revision
1131 *
1132 * At most one of @prog or @link can be non-NULL.
1133 * Must be called with cgroup_mutex held.
1134 */
__cgroup_bpf_detach(struct cgroup * cgrp,struct bpf_prog * prog,struct bpf_cgroup_link * link,enum bpf_attach_type type,u64 revision)1135 static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
1136 struct bpf_cgroup_link *link, enum bpf_attach_type type,
1137 u64 revision)
1138 {
1139 enum cgroup_bpf_attach_type atype;
1140 struct bpf_prog *old_prog;
1141 struct bpf_prog_list *pl;
1142 struct hlist_head *progs;
1143 u32 attach_btf_id = 0;
1144 u32 flags;
1145
1146 if (prog)
1147 attach_btf_id = prog->aux->attach_btf_id;
1148 if (link)
1149 attach_btf_id = link->link.prog->aux->attach_btf_id;
1150
1151 atype = bpf_cgroup_atype_find(type, attach_btf_id);
1152 if (atype < 0)
1153 return -EINVAL;
1154
1155 if (revision && revision != cgrp->bpf.revisions[atype])
1156 return -ESTALE;
1157
1158 progs = &cgrp->bpf.progs[atype];
1159 flags = cgrp->bpf.flags[atype];
1160
1161 if (prog && link)
1162 /* only one of prog or link can be specified */
1163 return -EINVAL;
1164
1165 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
1166 if (IS_ERR(pl))
1167 return PTR_ERR(pl);
1168
1169 /* mark it deleted, so it's ignored while recomputing effective */
1170 old_prog = pl->prog;
1171 pl->prog = NULL;
1172 pl->link = NULL;
1173
1174 if (update_effective_progs(cgrp, atype)) {
1175 /* if update effective array failed replace the prog with a dummy prog*/
1176 pl->prog = old_prog;
1177 pl->link = link;
1178 purge_effective_progs(cgrp, old_prog, link, atype);
1179 }
1180
1181 /* now can actually delete it from this cgroup list */
1182 hlist_del(&pl->node);
1183 cgrp->bpf.revisions[atype] += 1;
1184
1185 kfree(pl);
1186 if (hlist_empty(progs))
1187 /* last program was detached, reset flags to zero */
1188 cgrp->bpf.flags[atype] = 0;
1189 if (old_prog) {
1190 if (type == BPF_LSM_CGROUP)
1191 bpf_trampoline_unlink_cgroup_shim(old_prog);
1192 bpf_prog_put(old_prog);
1193 }
1194 static_branch_dec(&cgroup_bpf_enabled_key[atype]);
1195 return 0;
1196 }
1197
cgroup_bpf_detach(struct cgroup * cgrp,struct bpf_prog * prog,enum bpf_attach_type type,u64 revision)1198 static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
1199 enum bpf_attach_type type, u64 revision)
1200 {
1201 int ret;
1202
1203 cgroup_lock();
1204 ret = __cgroup_bpf_detach(cgrp, prog, NULL, type, revision);
1205 cgroup_unlock();
1206 return ret;
1207 }
1208
1209 /* Must be called with cgroup_mutex held to avoid races. */
__cgroup_bpf_query(struct cgroup * cgrp,const union bpf_attr * attr,union bpf_attr __user * uattr)1210 static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
1211 union bpf_attr __user *uattr)
1212 {
1213 __u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
1214 bool effective_query = attr->query.query_flags & BPF_F_QUERY_EFFECTIVE;
1215 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1216 enum bpf_attach_type type = attr->query.attach_type;
1217 enum cgroup_bpf_attach_type from_atype, to_atype;
1218 enum cgroup_bpf_attach_type atype;
1219 struct bpf_prog_array *effective;
1220 int cnt, ret = 0, i;
1221 int total_cnt = 0;
1222 u64 revision = 0;
1223 u32 flags;
1224
1225 if (effective_query && prog_attach_flags)
1226 return -EINVAL;
1227
1228 if (type == BPF_LSM_CGROUP) {
1229 if (!effective_query && attr->query.prog_cnt &&
1230 prog_ids && !prog_attach_flags)
1231 return -EINVAL;
1232
1233 from_atype = CGROUP_LSM_START;
1234 to_atype = CGROUP_LSM_END;
1235 flags = 0;
1236 } else {
1237 from_atype = to_cgroup_bpf_attach_type(type);
1238 if (from_atype < 0)
1239 return -EINVAL;
1240 to_atype = from_atype;
1241 flags = cgrp->bpf.flags[from_atype];
1242 }
1243
1244 for (atype = from_atype; atype <= to_atype; atype++) {
1245 if (effective_query) {
1246 effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
1247 lockdep_is_held(&cgroup_mutex));
1248 total_cnt += bpf_prog_array_length(effective);
1249 } else {
1250 total_cnt += prog_list_length(&cgrp->bpf.progs[atype], NULL);
1251 }
1252 }
1253
1254 /* always output uattr->query.attach_flags as 0 during effective query */
1255 flags = effective_query ? 0 : flags;
1256 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
1257 return -EFAULT;
1258 if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt)))
1259 return -EFAULT;
1260 if (!effective_query && from_atype == to_atype)
1261 revision = cgrp->bpf.revisions[from_atype];
1262 if (copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))
1263 return -EFAULT;
1264 if (attr->query.prog_cnt == 0 || !prog_ids || !total_cnt)
1265 /* return early if user requested only program count + flags */
1266 return 0;
1267
1268 if (attr->query.prog_cnt < total_cnt) {
1269 total_cnt = attr->query.prog_cnt;
1270 ret = -ENOSPC;
1271 }
1272
1273 for (atype = from_atype; atype <= to_atype && total_cnt; atype++) {
1274 if (effective_query) {
1275 effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
1276 lockdep_is_held(&cgroup_mutex));
1277 cnt = min_t(int, bpf_prog_array_length(effective), total_cnt);
1278 ret = bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
1279 } else {
1280 struct hlist_head *progs;
1281 struct bpf_prog_list *pl;
1282 struct bpf_prog *prog;
1283 u32 id;
1284
1285 progs = &cgrp->bpf.progs[atype];
1286 cnt = min_t(int, prog_list_length(progs, NULL), total_cnt);
1287 i = 0;
1288 hlist_for_each_entry(pl, progs, node) {
1289 prog = prog_list_prog(pl);
1290 id = prog->aux->id;
1291 if (copy_to_user(prog_ids + i, &id, sizeof(id)))
1292 return -EFAULT;
1293 if (++i == cnt)
1294 break;
1295 }
1296
1297 if (prog_attach_flags) {
1298 flags = cgrp->bpf.flags[atype];
1299
1300 for (i = 0; i < cnt; i++)
1301 if (copy_to_user(prog_attach_flags + i,
1302 &flags, sizeof(flags)))
1303 return -EFAULT;
1304 prog_attach_flags += cnt;
1305 }
1306 }
1307
1308 prog_ids += cnt;
1309 total_cnt -= cnt;
1310 }
1311 return ret;
1312 }
1313
cgroup_bpf_query(struct cgroup * cgrp,const union bpf_attr * attr,union bpf_attr __user * uattr)1314 static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
1315 union bpf_attr __user *uattr)
1316 {
1317 int ret;
1318
1319 cgroup_lock();
1320 ret = __cgroup_bpf_query(cgrp, attr, uattr);
1321 cgroup_unlock();
1322 return ret;
1323 }
1324
cgroup_bpf_prog_attach(const union bpf_attr * attr,enum bpf_prog_type ptype,struct bpf_prog * prog)1325 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
1326 enum bpf_prog_type ptype, struct bpf_prog *prog)
1327 {
1328 struct bpf_prog *replace_prog = NULL;
1329 struct cgroup *cgrp;
1330 int ret;
1331
1332 cgrp = cgroup_get_from_fd(attr->target_fd);
1333 if (IS_ERR(cgrp))
1334 return PTR_ERR(cgrp);
1335
1336 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
1337 (attr->attach_flags & BPF_F_REPLACE)) {
1338 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
1339 if (IS_ERR(replace_prog)) {
1340 cgroup_put(cgrp);
1341 return PTR_ERR(replace_prog);
1342 }
1343 }
1344
1345 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
1346 attr->attach_type, attr->attach_flags,
1347 attr->relative_fd, attr->expected_revision);
1348
1349 if (replace_prog)
1350 bpf_prog_put(replace_prog);
1351 cgroup_put(cgrp);
1352 return ret;
1353 }
1354
cgroup_bpf_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)1355 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
1356 {
1357 struct bpf_prog *prog;
1358 struct cgroup *cgrp;
1359 int ret;
1360
1361 cgrp = cgroup_get_from_fd(attr->target_fd);
1362 if (IS_ERR(cgrp))
1363 return PTR_ERR(cgrp);
1364
1365 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1366 if (IS_ERR(prog))
1367 prog = NULL;
1368
1369 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, attr->expected_revision);
1370 if (prog)
1371 bpf_prog_put(prog);
1372
1373 cgroup_put(cgrp);
1374 return ret;
1375 }
1376
bpf_cgroup_link_release(struct bpf_link * link)1377 static void bpf_cgroup_link_release(struct bpf_link *link)
1378 {
1379 struct bpf_cgroup_link *cg_link =
1380 container_of(link, struct bpf_cgroup_link, link);
1381 struct cgroup *cg;
1382
1383 /* link might have been auto-detached by dying cgroup already,
1384 * in that case our work is done here
1385 */
1386 if (!cg_link->cgroup)
1387 return;
1388
1389 cgroup_lock();
1390
1391 /* re-check cgroup under lock again */
1392 if (!cg_link->cgroup) {
1393 cgroup_unlock();
1394 return;
1395 }
1396
1397 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
1398 link->attach_type, 0));
1399 if (link->attach_type == BPF_LSM_CGROUP)
1400 bpf_trampoline_unlink_cgroup_shim(cg_link->link.prog);
1401
1402 cg = cg_link->cgroup;
1403 cg_link->cgroup = NULL;
1404
1405 cgroup_unlock();
1406
1407 cgroup_put(cg);
1408 }
1409
bpf_cgroup_link_dealloc(struct bpf_link * link)1410 static void bpf_cgroup_link_dealloc(struct bpf_link *link)
1411 {
1412 struct bpf_cgroup_link *cg_link =
1413 container_of(link, struct bpf_cgroup_link, link);
1414
1415 kfree(cg_link);
1416 }
1417
bpf_cgroup_link_detach(struct bpf_link * link)1418 static int bpf_cgroup_link_detach(struct bpf_link *link)
1419 {
1420 bpf_cgroup_link_release(link);
1421
1422 return 0;
1423 }
1424
bpf_cgroup_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)1425 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
1426 struct seq_file *seq)
1427 {
1428 struct bpf_cgroup_link *cg_link =
1429 container_of(link, struct bpf_cgroup_link, link);
1430 u64 cg_id = 0;
1431
1432 cgroup_lock();
1433 if (cg_link->cgroup)
1434 cg_id = cgroup_id(cg_link->cgroup);
1435 cgroup_unlock();
1436
1437 seq_printf(seq,
1438 "cgroup_id:\t%llu\n"
1439 "attach_type:\t%d\n",
1440 cg_id,
1441 link->attach_type);
1442 }
1443
bpf_cgroup_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)1444 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
1445 struct bpf_link_info *info)
1446 {
1447 struct bpf_cgroup_link *cg_link =
1448 container_of(link, struct bpf_cgroup_link, link);
1449 u64 cg_id = 0;
1450
1451 cgroup_lock();
1452 if (cg_link->cgroup)
1453 cg_id = cgroup_id(cg_link->cgroup);
1454 cgroup_unlock();
1455
1456 info->cgroup.cgroup_id = cg_id;
1457 info->cgroup.attach_type = link->attach_type;
1458 return 0;
1459 }
1460
1461 static const struct bpf_link_ops bpf_cgroup_link_lops = {
1462 .release = bpf_cgroup_link_release,
1463 .dealloc = bpf_cgroup_link_dealloc,
1464 .detach = bpf_cgroup_link_detach,
1465 .update_prog = cgroup_bpf_replace,
1466 .show_fdinfo = bpf_cgroup_link_show_fdinfo,
1467 .fill_link_info = bpf_cgroup_link_fill_link_info,
1468 };
1469
1470 #define BPF_F_LINK_ATTACH_MASK \
1471 (BPF_F_ID | \
1472 BPF_F_BEFORE | \
1473 BPF_F_AFTER | \
1474 BPF_F_PREORDER | \
1475 BPF_F_LINK)
1476
cgroup_bpf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)1477 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
1478 {
1479 struct bpf_link_primer link_primer;
1480 struct bpf_cgroup_link *link;
1481 struct cgroup *cgrp;
1482 int err;
1483
1484 if (attr->link_create.flags & (~BPF_F_LINK_ATTACH_MASK))
1485 return -EINVAL;
1486
1487 cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
1488 if (IS_ERR(cgrp))
1489 return PTR_ERR(cgrp);
1490
1491 link = kzalloc_obj(*link, GFP_USER);
1492 if (!link) {
1493 err = -ENOMEM;
1494 goto out_put_cgroup;
1495 }
1496 bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
1497 prog, attr->link_create.attach_type);
1498 link->cgroup = cgrp;
1499
1500 err = bpf_link_prime(&link->link, &link_primer);
1501 if (err) {
1502 kfree(link);
1503 goto out_put_cgroup;
1504 }
1505
1506 err = cgroup_bpf_attach(cgrp, NULL, NULL, link,
1507 link->link.attach_type, BPF_F_ALLOW_MULTI | attr->link_create.flags,
1508 attr->link_create.cgroup.relative_fd,
1509 attr->link_create.cgroup.expected_revision);
1510 if (err) {
1511 bpf_link_cleanup(&link_primer);
1512 goto out_put_cgroup;
1513 }
1514
1515 return bpf_link_settle(&link_primer);
1516
1517 out_put_cgroup:
1518 cgroup_put(cgrp);
1519 return err;
1520 }
1521
cgroup_bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)1522 int cgroup_bpf_prog_query(const union bpf_attr *attr,
1523 union bpf_attr __user *uattr)
1524 {
1525 struct cgroup *cgrp;
1526 int ret;
1527
1528 cgrp = cgroup_get_from_fd(attr->query.target_fd);
1529 if (IS_ERR(cgrp))
1530 return PTR_ERR(cgrp);
1531
1532 ret = cgroup_bpf_query(cgrp, attr, uattr);
1533
1534 cgroup_put(cgrp);
1535 return ret;
1536 }
1537
1538 /**
1539 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
1540 * @sk: The socket sending or receiving traffic
1541 * @skb: The skb that is being sent or received
1542 * @atype: The type of program to be executed
1543 *
1544 * If no socket is passed, or the socket is not of type INET or INET6,
1545 * this function does nothing and returns 0.
1546 *
1547 * The program type passed in via @type must be suitable for network
1548 * filtering. No further check is performed to assert that.
1549 *
1550 * For egress packets, this function can return:
1551 * NET_XMIT_SUCCESS (0) - continue with packet output
1552 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr
1553 * NET_XMIT_CN (2) - continue with packet output and notify TCP
1554 * to call cwr
1555 * -err - drop packet
1556 *
1557 * For ingress packets, this function will return -EPERM if any
1558 * attached program was found and if it returned != 1 during execution.
1559 * Otherwise 0 is returned.
1560 */
__cgroup_bpf_run_filter_skb(struct sock * sk,struct sk_buff * skb,enum cgroup_bpf_attach_type atype)1561 int __cgroup_bpf_run_filter_skb(struct sock *sk,
1562 struct sk_buff *skb,
1563 enum cgroup_bpf_attach_type atype)
1564 {
1565 unsigned int offset = -skb_network_offset(skb);
1566 struct sock *save_sk;
1567 void *saved_data_end;
1568 struct cgroup *cgrp;
1569 int ret;
1570
1571 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1572 return 0;
1573
1574 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1575 save_sk = skb->sk;
1576 skb->sk = sk;
1577 __skb_push(skb, offset);
1578
1579 /* compute pointers for the bpf prog */
1580 bpf_compute_and_save_data_end(skb, &saved_data_end);
1581
1582 if (atype == CGROUP_INET_EGRESS) {
1583 u32 flags = 0;
1584 bool cn;
1585
1586 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb,
1587 __bpf_prog_run_save_cb, 0, &flags);
1588
1589 /* Return values of CGROUP EGRESS BPF programs are:
1590 * 0: drop packet
1591 * 1: keep packet
1592 * 2: drop packet and cn
1593 * 3: keep packet and cn
1594 *
1595 * The returned value is then converted to one of the NET_XMIT
1596 * or an error code that is then interpreted as drop packet
1597 * (and no cn):
1598 * 0: NET_XMIT_SUCCESS skb should be transmitted
1599 * 1: NET_XMIT_DROP skb should be dropped and cn
1600 * 2: NET_XMIT_CN skb should be transmitted and cn
1601 * 3: -err skb should be dropped
1602 */
1603
1604 cn = flags & BPF_RET_SET_CN;
1605 if (ret && !IS_ERR_VALUE((long)ret))
1606 ret = -EFAULT;
1607 if (!ret)
1608 ret = (cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);
1609 else
1610 ret = (cn ? NET_XMIT_DROP : ret);
1611 } else {
1612 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype,
1613 skb, __bpf_prog_run_save_cb, 0,
1614 NULL);
1615 if (ret && !IS_ERR_VALUE((long)ret))
1616 ret = -EFAULT;
1617 }
1618 bpf_restore_data_end(skb, saved_data_end);
1619 __skb_pull(skb, offset);
1620 skb->sk = save_sk;
1621
1622 return ret;
1623 }
1624 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
1625
1626 /**
1627 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1628 * @sk: sock structure to manipulate
1629 * @atype: The type of program to be executed
1630 *
1631 * socket is passed is expected to be of type INET or INET6.
1632 *
1633 * The program type passed in via @type must be suitable for sock
1634 * filtering. No further check is performed to assert that.
1635 *
1636 * This function will return %-EPERM if any if an attached program was found
1637 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1638 */
__cgroup_bpf_run_filter_sk(struct sock * sk,enum cgroup_bpf_attach_type atype)1639 int __cgroup_bpf_run_filter_sk(struct sock *sk,
1640 enum cgroup_bpf_attach_type atype)
1641 {
1642 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1643
1644 return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0,
1645 NULL);
1646 }
1647 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
1648
1649 /**
1650 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1651 * provided by user sockaddr
1652 * @sk: sock struct that will use sockaddr
1653 * @uaddr: sockaddr struct provided by user
1654 * @uaddrlen: Pointer to the size of the sockaddr struct provided by user. It is
1655 * read-only for AF_INET[6] uaddr but can be modified for AF_UNIX
1656 * uaddr.
1657 * @atype: The type of program to be executed
1658 * @t_ctx: Pointer to attach type specific context
1659 * @flags: Pointer to u32 which contains higher bits of BPF program
1660 * return value (OR'ed together).
1661 *
1662 * socket is expected to be of type INET, INET6 or UNIX.
1663 *
1664 * This function will return %-EPERM if an attached program is found and
1665 * returned value != 1 during execution. In all other cases, 0 is returned.
1666 */
__cgroup_bpf_run_filter_sock_addr(struct sock * sk,struct sockaddr_unsized * uaddr,int * uaddrlen,enum cgroup_bpf_attach_type atype,void * t_ctx,u32 * flags)1667 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1668 struct sockaddr_unsized *uaddr,
1669 int *uaddrlen,
1670 enum cgroup_bpf_attach_type atype,
1671 void *t_ctx,
1672 u32 *flags)
1673 {
1674 struct bpf_sock_addr_kern ctx = {
1675 .sk = sk,
1676 .uaddr = uaddr,
1677 .t_ctx = t_ctx,
1678 };
1679 struct sockaddr_storage storage;
1680 struct cgroup *cgrp;
1681 int ret;
1682
1683 if (!sk_is_inet(sk) && !sk_is_unix(sk))
1684 return 0;
1685
1686 if (!ctx.uaddr) {
1687 memset(&storage, 0, sizeof(storage));
1688 ctx.uaddr = (struct sockaddr_unsized *)&storage;
1689 ctx.uaddrlen = 0;
1690 } else {
1691 ctx.uaddrlen = *uaddrlen;
1692 }
1693
1694 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1695 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run,
1696 0, flags);
1697
1698 if (!ret && uaddr)
1699 *uaddrlen = ctx.uaddrlen;
1700
1701 return ret;
1702 }
1703 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
1704
1705 /**
1706 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1707 * @sk: socket to get cgroup from
1708 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1709 * sk with connection information (IP addresses, etc.) May not contain
1710 * cgroup info if it is a req sock.
1711 * @atype: The type of program to be executed
1712 *
1713 * socket passed is expected to be of type INET or INET6.
1714 *
1715 * The program type passed in via @type must be suitable for sock_ops
1716 * filtering. No further check is performed to assert that.
1717 *
1718 * This function will return %-EPERM if any if an attached program was found
1719 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1720 */
__cgroup_bpf_run_filter_sock_ops(struct sock * sk,struct bpf_sock_ops_kern * sock_ops,enum cgroup_bpf_attach_type atype)1721 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1722 struct bpf_sock_ops_kern *sock_ops,
1723 enum cgroup_bpf_attach_type atype)
1724 {
1725 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1726
1727 return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run,
1728 0, NULL);
1729 }
1730 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
1731
__cgroup_bpf_check_dev_permission(short dev_type,u32 major,u32 minor,short access,enum cgroup_bpf_attach_type atype)1732 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1733 short access, enum cgroup_bpf_attach_type atype)
1734 {
1735 struct cgroup *cgrp;
1736 struct bpf_cgroup_dev_ctx ctx = {
1737 .access_type = (access << 16) | dev_type,
1738 .major = major,
1739 .minor = minor,
1740 };
1741 int ret;
1742
1743 rcu_read_lock();
1744 cgrp = task_dfl_cgroup(current);
1745 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1746 NULL);
1747 rcu_read_unlock();
1748
1749 return ret;
1750 }
1751
BPF_CALL_2(bpf_get_local_storage,struct bpf_map *,map,u64,flags)1752 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
1753 {
1754 /* flags argument is not used now,
1755 * but provides an ability to extend the API.
1756 * verifier checks that its value is correct.
1757 */
1758 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
1759 struct bpf_cgroup_storage *storage;
1760 struct bpf_cg_run_ctx *ctx;
1761 void *ptr;
1762
1763 /* get current cgroup storage from BPF run context */
1764 ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1765 storage = ctx->prog_item->cgroup_storage[stype];
1766
1767 if (stype == BPF_CGROUP_STORAGE_SHARED)
1768 ptr = &READ_ONCE(storage->buf)->data[0];
1769 else
1770 ptr = this_cpu_ptr(storage->percpu_buf);
1771
1772 return (unsigned long)ptr;
1773 }
1774
1775 const struct bpf_func_proto bpf_get_local_storage_proto = {
1776 .func = bpf_get_local_storage,
1777 .gpl_only = false,
1778 .ret_type = RET_PTR_TO_MAP_VALUE,
1779 .arg1_type = ARG_CONST_MAP_PTR,
1780 .arg2_type = ARG_ANYTHING,
1781 };
1782
BPF_CALL_0(bpf_get_retval)1783 BPF_CALL_0(bpf_get_retval)
1784 {
1785 struct bpf_cg_run_ctx *ctx =
1786 container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1787
1788 return ctx->retval;
1789 }
1790
1791 const struct bpf_func_proto bpf_get_retval_proto = {
1792 .func = bpf_get_retval,
1793 .gpl_only = false,
1794 .ret_type = RET_INTEGER,
1795 };
1796
BPF_CALL_1(bpf_set_retval,int,retval)1797 BPF_CALL_1(bpf_set_retval, int, retval)
1798 {
1799 struct bpf_cg_run_ctx *ctx =
1800 container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1801
1802 ctx->retval = retval;
1803 return 0;
1804 }
1805
1806 const struct bpf_func_proto bpf_set_retval_proto = {
1807 .func = bpf_set_retval,
1808 .gpl_only = false,
1809 .ret_type = RET_INTEGER,
1810 .arg1_type = ARG_ANYTHING,
1811 };
1812
1813 static const struct bpf_func_proto *
cgroup_dev_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1814 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1815 {
1816 const struct bpf_func_proto *func_proto;
1817
1818 func_proto = cgroup_common_func_proto(func_id, prog);
1819 if (func_proto)
1820 return func_proto;
1821
1822 switch (func_id) {
1823 case BPF_FUNC_perf_event_output:
1824 return &bpf_event_output_data_proto;
1825 default:
1826 return bpf_base_func_proto(func_id, prog);
1827 }
1828 }
1829
cgroup_dev_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)1830 static bool cgroup_dev_is_valid_access(int off, int size,
1831 enum bpf_access_type type,
1832 const struct bpf_prog *prog,
1833 struct bpf_insn_access_aux *info)
1834 {
1835 const int size_default = sizeof(__u32);
1836
1837 if (type == BPF_WRITE)
1838 return false;
1839
1840 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1841 return false;
1842 /* The verifier guarantees that size > 0. */
1843 if (off % size != 0)
1844 return false;
1845
1846 switch (off) {
1847 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1848 bpf_ctx_record_field_size(info, size_default);
1849 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1850 return false;
1851 break;
1852 default:
1853 if (size != size_default)
1854 return false;
1855 }
1856
1857 return true;
1858 }
1859
1860 const struct bpf_prog_ops cg_dev_prog_ops = {
1861 };
1862
1863 const struct bpf_verifier_ops cg_dev_verifier_ops = {
1864 .get_func_proto = cgroup_dev_func_proto,
1865 .is_valid_access = cgroup_dev_is_valid_access,
1866 };
1867
1868 /**
1869 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1870 *
1871 * @head: sysctl table header
1872 * @table: sysctl table
1873 * @write: sysctl is being read (= 0) or written (= 1)
1874 * @buf: pointer to buffer (in and out)
1875 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1876 * result is size of @new_buf if program set new value, initial value
1877 * otherwise
1878 * @ppos: value-result argument: value is position at which read from or write
1879 * to sysctl is happening, result is new position if program overrode it,
1880 * initial value otherwise
1881 * @atype: type of program to be executed
1882 *
1883 * Program is run when sysctl is being accessed, either read or written, and
1884 * can allow or deny such access.
1885 *
1886 * This function will return %-EPERM if an attached program is found and
1887 * returned value != 1 during execution. In all other cases 0 is returned.
1888 */
__cgroup_bpf_run_filter_sysctl(struct ctl_table_header * head,const struct ctl_table * table,int write,char ** buf,size_t * pcount,loff_t * ppos,enum cgroup_bpf_attach_type atype)1889 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1890 const struct ctl_table *table, int write,
1891 char **buf, size_t *pcount, loff_t *ppos,
1892 enum cgroup_bpf_attach_type atype)
1893 {
1894 struct bpf_sysctl_kern ctx = {
1895 .head = head,
1896 .table = table,
1897 .write = write,
1898 .ppos = ppos,
1899 .cur_val = NULL,
1900 .cur_len = PAGE_SIZE,
1901 .new_val = NULL,
1902 .new_len = 0,
1903 .new_updated = 0,
1904 };
1905 struct cgroup *cgrp;
1906 loff_t pos = 0;
1907 int ret;
1908
1909 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1910 if (!ctx.cur_val ||
1911 table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
1912 /* Let BPF program decide how to proceed. */
1913 ctx.cur_len = 0;
1914 }
1915
1916 if (write && *buf && *pcount) {
1917 /* BPF program should be able to override new value with a
1918 * buffer bigger than provided by user.
1919 */
1920 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1921 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1922 if (ctx.new_val) {
1923 memcpy(ctx.new_val, *buf, ctx.new_len);
1924 } else {
1925 /* Let BPF program decide how to proceed. */
1926 ctx.new_len = 0;
1927 }
1928 }
1929
1930 rcu_read_lock();
1931 cgrp = task_dfl_cgroup(current);
1932 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1933 NULL);
1934 rcu_read_unlock();
1935
1936 kfree(ctx.cur_val);
1937
1938 if (ret == 1 && ctx.new_updated) {
1939 kfree(*buf);
1940 *buf = ctx.new_val;
1941 *pcount = ctx.new_len;
1942 } else {
1943 kfree(ctx.new_val);
1944 }
1945
1946 return ret;
1947 }
1948
1949 #ifdef CONFIG_NET
sockopt_alloc_buf(struct bpf_sockopt_kern * ctx,int max_optlen,struct bpf_sockopt_buf * buf)1950 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
1951 struct bpf_sockopt_buf *buf)
1952 {
1953 if (unlikely(max_optlen < 0))
1954 return -EINVAL;
1955
1956 if (unlikely(max_optlen > PAGE_SIZE)) {
1957 /* We don't expose optvals that are greater than PAGE_SIZE
1958 * to the BPF program.
1959 */
1960 max_optlen = PAGE_SIZE;
1961 }
1962
1963 if (max_optlen <= sizeof(buf->data)) {
1964 /* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
1965 * bytes avoid the cost of kzalloc.
1966 */
1967 ctx->optval = buf->data;
1968 ctx->optval_end = ctx->optval + max_optlen;
1969 return max_optlen;
1970 }
1971
1972 ctx->optval = kzalloc(max_optlen, GFP_USER);
1973 if (!ctx->optval)
1974 return -ENOMEM;
1975
1976 ctx->optval_end = ctx->optval + max_optlen;
1977
1978 return max_optlen;
1979 }
1980
sockopt_free_buf(struct bpf_sockopt_kern * ctx,struct bpf_sockopt_buf * buf)1981 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
1982 struct bpf_sockopt_buf *buf)
1983 {
1984 if (ctx->optval == buf->data)
1985 return;
1986 kfree(ctx->optval);
1987 }
1988
sockopt_buf_allocated(struct bpf_sockopt_kern * ctx,struct bpf_sockopt_buf * buf)1989 static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
1990 struct bpf_sockopt_buf *buf)
1991 {
1992 return ctx->optval != buf->data;
1993 }
1994
__cgroup_bpf_run_filter_setsockopt(struct sock * sk,int * level,int * optname,sockptr_t optval,int * optlen,char ** kernel_optval)1995 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
1996 int *optname, sockptr_t optval,
1997 int *optlen, char **kernel_optval)
1998 {
1999 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
2000 struct bpf_sockopt_buf buf = {};
2001 struct bpf_sockopt_kern ctx = {
2002 .sk = sk,
2003 .level = *level,
2004 .optname = *optname,
2005 };
2006 int ret, max_optlen;
2007
2008 /* Allocate a bit more than the initial user buffer for
2009 * BPF program. The canonical use case is overriding
2010 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
2011 */
2012 max_optlen = max_t(int, 16, *optlen);
2013 max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
2014 if (max_optlen < 0)
2015 return max_optlen;
2016
2017 ctx.optlen = *optlen;
2018
2019 if (copy_from_sockptr(ctx.optval, optval,
2020 min(*optlen, max_optlen))) {
2021 ret = -EFAULT;
2022 goto out;
2023 }
2024
2025 lock_sock(sk);
2026 ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT,
2027 &ctx, bpf_prog_run, 0, NULL);
2028 release_sock(sk);
2029
2030 if (ret)
2031 goto out;
2032
2033 if (ctx.optlen == -1) {
2034 /* optlen set to -1, bypass kernel */
2035 ret = 1;
2036 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
2037 /* optlen is out of bounds */
2038 if (*optlen > PAGE_SIZE && ctx.optlen >= 0) {
2039 pr_info_once("bpf setsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
2040 ctx.optlen, max_optlen);
2041 ret = 0;
2042 goto out;
2043 }
2044 ret = -EFAULT;
2045 } else {
2046 /* optlen within bounds, run kernel handler */
2047 ret = 0;
2048
2049 /* export any potential modifications */
2050 *level = ctx.level;
2051 *optname = ctx.optname;
2052
2053 /* optlen == 0 from BPF indicates that we should
2054 * use original userspace data.
2055 */
2056 if (ctx.optlen != 0) {
2057 *optlen = ctx.optlen;
2058 /* We've used bpf_sockopt_kern->buf as an intermediary
2059 * storage, but the BPF program indicates that we need
2060 * to pass this data to the kernel setsockopt handler.
2061 * No way to export on-stack buf, have to allocate a
2062 * new buffer.
2063 */
2064 if (!sockopt_buf_allocated(&ctx, &buf)) {
2065 void *p = kmalloc(ctx.optlen, GFP_USER);
2066
2067 if (!p) {
2068 ret = -ENOMEM;
2069 goto out;
2070 }
2071 memcpy(p, ctx.optval, ctx.optlen);
2072 *kernel_optval = p;
2073 } else {
2074 *kernel_optval = ctx.optval;
2075 }
2076 /* export and don't free sockopt buf */
2077 return 0;
2078 }
2079 }
2080
2081 out:
2082 sockopt_free_buf(&ctx, &buf);
2083 return ret;
2084 }
2085
__cgroup_bpf_run_filter_getsockopt(struct sock * sk,int level,int optname,sockptr_t optval,sockptr_t optlen,int max_optlen,int retval)2086 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
2087 int optname, sockptr_t optval,
2088 sockptr_t optlen, int max_optlen,
2089 int retval)
2090 {
2091 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
2092 struct bpf_sockopt_buf buf = {};
2093 struct bpf_sockopt_kern ctx = {
2094 .sk = sk,
2095 .level = level,
2096 .optname = optname,
2097 .current_task = current,
2098 };
2099 int orig_optlen;
2100 int ret;
2101
2102 orig_optlen = max_optlen;
2103 ctx.optlen = max_optlen;
2104 max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
2105 if (max_optlen < 0)
2106 return max_optlen;
2107
2108 if (!retval) {
2109 /* If kernel getsockopt finished successfully,
2110 * copy whatever was returned to the user back
2111 * into our temporary buffer. Set optlen to the
2112 * one that kernel returned as well to let
2113 * BPF programs inspect the value.
2114 */
2115 if (copy_from_sockptr(&ctx.optlen, optlen,
2116 sizeof(ctx.optlen))) {
2117 ret = -EFAULT;
2118 goto out;
2119 }
2120
2121 if (ctx.optlen < 0) {
2122 ret = -EFAULT;
2123 goto out;
2124 }
2125 orig_optlen = ctx.optlen;
2126
2127 if (copy_from_sockptr(ctx.optval, optval,
2128 min(ctx.optlen, max_optlen))) {
2129 ret = -EFAULT;
2130 goto out;
2131 }
2132 }
2133
2134 lock_sock(sk);
2135 ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
2136 &ctx, bpf_prog_run, retval, NULL);
2137 release_sock(sk);
2138
2139 if (ret < 0)
2140 goto out;
2141
2142 if (!sockptr_is_null(optval) &&
2143 (ctx.optlen > max_optlen || ctx.optlen < 0)) {
2144 if (orig_optlen > PAGE_SIZE && ctx.optlen >= 0) {
2145 pr_info_once("bpf getsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
2146 ctx.optlen, max_optlen);
2147 ret = retval;
2148 goto out;
2149 }
2150 ret = -EFAULT;
2151 goto out;
2152 }
2153
2154 if (ctx.optlen != 0) {
2155 if (!sockptr_is_null(optval) &&
2156 copy_to_sockptr(optval, ctx.optval, ctx.optlen)) {
2157 ret = -EFAULT;
2158 goto out;
2159 }
2160 if (copy_to_sockptr(optlen, &ctx.optlen, sizeof(ctx.optlen))) {
2161 ret = -EFAULT;
2162 goto out;
2163 }
2164 }
2165
2166 out:
2167 sockopt_free_buf(&ctx, &buf);
2168 return ret;
2169 }
2170
__cgroup_bpf_run_filter_getsockopt_kern(struct sock * sk,int level,int optname,void * optval,int * optlen,int retval)2171 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
2172 int optname, void *optval,
2173 int *optlen, int retval)
2174 {
2175 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
2176 struct bpf_sockopt_kern ctx = {
2177 .sk = sk,
2178 .level = level,
2179 .optname = optname,
2180 .optlen = *optlen,
2181 .optval = optval,
2182 .optval_end = optval + *optlen,
2183 .current_task = current,
2184 };
2185 int ret;
2186
2187 /* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
2188 * user data back into BPF buffer when reval != 0. This is
2189 * done as an optimization to avoid extra copy, assuming
2190 * kernel won't populate the data in case of an error.
2191 * Here we always pass the data and memset() should
2192 * be called if that data shouldn't be "exported".
2193 */
2194
2195 ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
2196 &ctx, bpf_prog_run, retval, NULL);
2197 if (ret < 0)
2198 return ret;
2199
2200 if (ctx.optlen > *optlen)
2201 return -EFAULT;
2202
2203 /* BPF programs can shrink the buffer, export the modifications.
2204 */
2205 if (ctx.optlen != 0)
2206 *optlen = ctx.optlen;
2207
2208 return ret;
2209 }
2210 #endif
2211
sysctl_cpy_dir(const struct ctl_dir * dir,char ** bufp,size_t * lenp)2212 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
2213 size_t *lenp)
2214 {
2215 ssize_t tmp_ret = 0, ret;
2216
2217 if (dir->header.parent) {
2218 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
2219 if (tmp_ret < 0)
2220 return tmp_ret;
2221 }
2222
2223 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
2224 if (ret < 0)
2225 return ret;
2226 *bufp += ret;
2227 *lenp -= ret;
2228 ret += tmp_ret;
2229
2230 /* Avoid leading slash. */
2231 if (!ret)
2232 return ret;
2233
2234 tmp_ret = strscpy(*bufp, "/", *lenp);
2235 if (tmp_ret < 0)
2236 return tmp_ret;
2237 *bufp += tmp_ret;
2238 *lenp -= tmp_ret;
2239
2240 return ret + tmp_ret;
2241 }
2242
BPF_CALL_4(bpf_sysctl_get_name,struct bpf_sysctl_kern *,ctx,char *,buf,size_t,buf_len,u64,flags)2243 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
2244 size_t, buf_len, u64, flags)
2245 {
2246 ssize_t tmp_ret = 0, ret;
2247
2248 if (!buf)
2249 return -EINVAL;
2250
2251 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
2252 if (!ctx->head)
2253 return -EINVAL;
2254 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
2255 if (tmp_ret < 0)
2256 return tmp_ret;
2257 }
2258
2259 ret = strscpy(buf, ctx->table->procname, buf_len);
2260
2261 return ret < 0 ? ret : tmp_ret + ret;
2262 }
2263
2264 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
2265 .func = bpf_sysctl_get_name,
2266 .gpl_only = false,
2267 .ret_type = RET_INTEGER,
2268 .arg1_type = ARG_PTR_TO_CTX,
2269 .arg2_type = ARG_PTR_TO_MEM | MEM_WRITE,
2270 .arg3_type = ARG_CONST_SIZE,
2271 .arg4_type = ARG_ANYTHING,
2272 };
2273
copy_sysctl_value(char * dst,size_t dst_len,char * src,size_t src_len)2274 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
2275 size_t src_len)
2276 {
2277 if (!dst)
2278 return -EINVAL;
2279
2280 if (!dst_len)
2281 return -E2BIG;
2282
2283 if (!src || !src_len) {
2284 memset(dst, 0, dst_len);
2285 return -EINVAL;
2286 }
2287
2288 memcpy(dst, src, min(dst_len, src_len));
2289
2290 if (dst_len > src_len) {
2291 memset(dst + src_len, '\0', dst_len - src_len);
2292 return src_len;
2293 }
2294
2295 dst[dst_len - 1] = '\0';
2296
2297 return -E2BIG;
2298 }
2299
BPF_CALL_3(bpf_sysctl_get_current_value,struct bpf_sysctl_kern *,ctx,char *,buf,size_t,buf_len)2300 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
2301 char *, buf, size_t, buf_len)
2302 {
2303 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
2304 }
2305
2306 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
2307 .func = bpf_sysctl_get_current_value,
2308 .gpl_only = false,
2309 .ret_type = RET_INTEGER,
2310 .arg1_type = ARG_PTR_TO_CTX,
2311 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
2312 .arg3_type = ARG_CONST_SIZE,
2313 };
2314
BPF_CALL_3(bpf_sysctl_get_new_value,struct bpf_sysctl_kern *,ctx,char *,buf,size_t,buf_len)2315 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
2316 size_t, buf_len)
2317 {
2318 if (!ctx->write) {
2319 if (buf && buf_len)
2320 memset(buf, '\0', buf_len);
2321 return -EINVAL;
2322 }
2323 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
2324 }
2325
2326 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
2327 .func = bpf_sysctl_get_new_value,
2328 .gpl_only = false,
2329 .ret_type = RET_INTEGER,
2330 .arg1_type = ARG_PTR_TO_CTX,
2331 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
2332 .arg3_type = ARG_CONST_SIZE,
2333 };
2334
BPF_CALL_3(bpf_sysctl_set_new_value,struct bpf_sysctl_kern *,ctx,const char *,buf,size_t,buf_len)2335 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
2336 const char *, buf, size_t, buf_len)
2337 {
2338 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
2339 return -EINVAL;
2340
2341 if (buf_len > PAGE_SIZE - 1)
2342 return -E2BIG;
2343
2344 memcpy(ctx->new_val, buf, buf_len);
2345 ctx->new_len = buf_len;
2346 ctx->new_updated = 1;
2347
2348 return 0;
2349 }
2350
2351 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
2352 .func = bpf_sysctl_set_new_value,
2353 .gpl_only = false,
2354 .ret_type = RET_INTEGER,
2355 .arg1_type = ARG_PTR_TO_CTX,
2356 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
2357 .arg3_type = ARG_CONST_SIZE,
2358 };
2359
2360 static const struct bpf_func_proto *
sysctl_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)2361 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2362 {
2363 const struct bpf_func_proto *func_proto;
2364
2365 func_proto = cgroup_common_func_proto(func_id, prog);
2366 if (func_proto)
2367 return func_proto;
2368
2369 switch (func_id) {
2370 case BPF_FUNC_sysctl_get_name:
2371 return &bpf_sysctl_get_name_proto;
2372 case BPF_FUNC_sysctl_get_current_value:
2373 return &bpf_sysctl_get_current_value_proto;
2374 case BPF_FUNC_sysctl_get_new_value:
2375 return &bpf_sysctl_get_new_value_proto;
2376 case BPF_FUNC_sysctl_set_new_value:
2377 return &bpf_sysctl_set_new_value_proto;
2378 case BPF_FUNC_ktime_get_coarse_ns:
2379 return &bpf_ktime_get_coarse_ns_proto;
2380 case BPF_FUNC_perf_event_output:
2381 return &bpf_event_output_data_proto;
2382 default:
2383 return bpf_base_func_proto(func_id, prog);
2384 }
2385 }
2386
sysctl_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)2387 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
2388 const struct bpf_prog *prog,
2389 struct bpf_insn_access_aux *info)
2390 {
2391 const int size_default = sizeof(__u32);
2392
2393 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
2394 return false;
2395
2396 switch (off) {
2397 case bpf_ctx_range(struct bpf_sysctl, write):
2398 if (type != BPF_READ)
2399 return false;
2400 bpf_ctx_record_field_size(info, size_default);
2401 return bpf_ctx_narrow_access_ok(off, size, size_default);
2402 case bpf_ctx_range(struct bpf_sysctl, file_pos):
2403 if (type == BPF_READ) {
2404 bpf_ctx_record_field_size(info, size_default);
2405 return bpf_ctx_narrow_access_ok(off, size, size_default);
2406 } else {
2407 return size == size_default;
2408 }
2409 default:
2410 return false;
2411 }
2412 }
2413
sysctl_convert_ctx_access(enum bpf_access_type type,const struct bpf_insn * si,struct bpf_insn * insn_buf,struct bpf_prog * prog,u32 * target_size)2414 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
2415 const struct bpf_insn *si,
2416 struct bpf_insn *insn_buf,
2417 struct bpf_prog *prog, u32 *target_size)
2418 {
2419 struct bpf_insn *insn = insn_buf;
2420 u32 read_size;
2421
2422 switch (si->off) {
2423 case offsetof(struct bpf_sysctl, write):
2424 *insn++ = BPF_LDX_MEM(
2425 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
2426 bpf_target_off(struct bpf_sysctl_kern, write,
2427 sizeof_field(struct bpf_sysctl_kern,
2428 write),
2429 target_size));
2430 break;
2431 case offsetof(struct bpf_sysctl, file_pos):
2432 /* ppos is a pointer so it should be accessed via indirect
2433 * loads and stores. Also for stores additional temporary
2434 * register is used since neither src_reg nor dst_reg can be
2435 * overridden.
2436 */
2437 if (type == BPF_WRITE) {
2438 int treg = BPF_REG_9;
2439
2440 if (si->src_reg == treg || si->dst_reg == treg)
2441 --treg;
2442 if (si->src_reg == treg || si->dst_reg == treg)
2443 --treg;
2444 *insn++ = BPF_STX_MEM(
2445 BPF_DW, si->dst_reg, treg,
2446 offsetof(struct bpf_sysctl_kern, tmp_reg));
2447 *insn++ = BPF_LDX_MEM(
2448 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
2449 treg, si->dst_reg,
2450 offsetof(struct bpf_sysctl_kern, ppos));
2451 *insn++ = BPF_RAW_INSN(
2452 BPF_CLASS(si->code) | BPF_MEM | BPF_SIZEOF(u32),
2453 treg, si->src_reg,
2454 bpf_ctx_narrow_access_offset(
2455 0, sizeof(u32), sizeof(loff_t)),
2456 si->imm);
2457 *insn++ = BPF_LDX_MEM(
2458 BPF_DW, treg, si->dst_reg,
2459 offsetof(struct bpf_sysctl_kern, tmp_reg));
2460 } else {
2461 *insn++ = BPF_LDX_MEM(
2462 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
2463 si->dst_reg, si->src_reg,
2464 offsetof(struct bpf_sysctl_kern, ppos));
2465 read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
2466 *insn++ = BPF_LDX_MEM(
2467 BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
2468 bpf_ctx_narrow_access_offset(
2469 0, read_size, sizeof(loff_t)));
2470 }
2471 *target_size = sizeof(u32);
2472 break;
2473 }
2474
2475 return insn - insn_buf;
2476 }
2477
2478 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
2479 .get_func_proto = sysctl_func_proto,
2480 .is_valid_access = sysctl_is_valid_access,
2481 .convert_ctx_access = sysctl_convert_ctx_access,
2482 };
2483
2484 const struct bpf_prog_ops cg_sysctl_prog_ops = {
2485 };
2486
2487 #ifdef CONFIG_NET
BPF_CALL_1(bpf_get_netns_cookie_sockopt,struct bpf_sockopt_kern *,ctx)2488 BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx)
2489 {
2490 const struct net *net = ctx ? sock_net(ctx->sk) : &init_net;
2491
2492 return net->net_cookie;
2493 }
2494
2495 static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = {
2496 .func = bpf_get_netns_cookie_sockopt,
2497 .gpl_only = false,
2498 .ret_type = RET_INTEGER,
2499 .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
2500 };
2501 #endif
2502
2503 static const struct bpf_func_proto *
cg_sockopt_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)2504 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2505 {
2506 const struct bpf_func_proto *func_proto;
2507
2508 func_proto = cgroup_common_func_proto(func_id, prog);
2509 if (func_proto)
2510 return func_proto;
2511
2512 switch (func_id) {
2513 #ifdef CONFIG_NET
2514 case BPF_FUNC_get_netns_cookie:
2515 return &bpf_get_netns_cookie_sockopt_proto;
2516 case BPF_FUNC_sk_storage_get:
2517 return &bpf_sk_storage_get_proto;
2518 case BPF_FUNC_sk_storage_delete:
2519 return &bpf_sk_storage_delete_proto;
2520 case BPF_FUNC_setsockopt:
2521 if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
2522 return &bpf_sk_setsockopt_proto;
2523 return NULL;
2524 case BPF_FUNC_getsockopt:
2525 if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
2526 return &bpf_sk_getsockopt_proto;
2527 return NULL;
2528 #endif
2529 #ifdef CONFIG_INET
2530 case BPF_FUNC_tcp_sock:
2531 return &bpf_tcp_sock_proto;
2532 #endif
2533 case BPF_FUNC_perf_event_output:
2534 return &bpf_event_output_data_proto;
2535 default:
2536 return bpf_base_func_proto(func_id, prog);
2537 }
2538 }
2539
cg_sockopt_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)2540 static bool cg_sockopt_is_valid_access(int off, int size,
2541 enum bpf_access_type type,
2542 const struct bpf_prog *prog,
2543 struct bpf_insn_access_aux *info)
2544 {
2545 const int size_default = sizeof(__u32);
2546
2547 if (off < 0 || off >= sizeof(struct bpf_sockopt))
2548 return false;
2549
2550 if (off % size != 0)
2551 return false;
2552
2553 if (type == BPF_WRITE) {
2554 switch (off) {
2555 case offsetof(struct bpf_sockopt, retval):
2556 if (size != size_default)
2557 return false;
2558 return prog->expected_attach_type ==
2559 BPF_CGROUP_GETSOCKOPT;
2560 case offsetof(struct bpf_sockopt, optname):
2561 fallthrough;
2562 case offsetof(struct bpf_sockopt, level):
2563 if (size != size_default)
2564 return false;
2565 return prog->expected_attach_type ==
2566 BPF_CGROUP_SETSOCKOPT;
2567 case offsetof(struct bpf_sockopt, optlen):
2568 return size == size_default;
2569 default:
2570 return false;
2571 }
2572 }
2573
2574 switch (off) {
2575 case bpf_ctx_range_ptr(struct bpf_sockopt, sk):
2576 if (size != sizeof(__u64))
2577 return false;
2578 info->reg_type = PTR_TO_SOCKET;
2579 break;
2580 case bpf_ctx_range_ptr(struct bpf_sockopt, optval):
2581 if (size != sizeof(__u64))
2582 return false;
2583 info->reg_type = PTR_TO_PACKET;
2584 break;
2585 case bpf_ctx_range_ptr(struct bpf_sockopt, optval_end):
2586 if (size != sizeof(__u64))
2587 return false;
2588 info->reg_type = PTR_TO_PACKET_END;
2589 break;
2590 case bpf_ctx_range(struct bpf_sockopt, retval):
2591 if (size != size_default)
2592 return false;
2593 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
2594 default:
2595 if (size != size_default)
2596 return false;
2597 break;
2598 }
2599 return true;
2600 }
2601
2602 #define CG_SOCKOPT_READ_FIELD(F) \
2603 BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \
2604 si->dst_reg, si->src_reg, \
2605 offsetof(struct bpf_sockopt_kern, F))
2606
2607 #define CG_SOCKOPT_WRITE_FIELD(F) \
2608 BPF_RAW_INSN((BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F) | \
2609 BPF_MEM | BPF_CLASS(si->code)), \
2610 si->dst_reg, si->src_reg, \
2611 offsetof(struct bpf_sockopt_kern, F), \
2612 si->imm)
2613
cg_sockopt_convert_ctx_access(enum bpf_access_type type,const struct bpf_insn * si,struct bpf_insn * insn_buf,struct bpf_prog * prog,u32 * target_size)2614 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
2615 const struct bpf_insn *si,
2616 struct bpf_insn *insn_buf,
2617 struct bpf_prog *prog,
2618 u32 *target_size)
2619 {
2620 struct bpf_insn *insn = insn_buf;
2621
2622 switch (si->off) {
2623 case offsetof(struct bpf_sockopt, sk):
2624 *insn++ = CG_SOCKOPT_READ_FIELD(sk);
2625 break;
2626 case offsetof(struct bpf_sockopt, level):
2627 if (type == BPF_WRITE)
2628 *insn++ = CG_SOCKOPT_WRITE_FIELD(level);
2629 else
2630 *insn++ = CG_SOCKOPT_READ_FIELD(level);
2631 break;
2632 case offsetof(struct bpf_sockopt, optname):
2633 if (type == BPF_WRITE)
2634 *insn++ = CG_SOCKOPT_WRITE_FIELD(optname);
2635 else
2636 *insn++ = CG_SOCKOPT_READ_FIELD(optname);
2637 break;
2638 case offsetof(struct bpf_sockopt, optlen):
2639 if (type == BPF_WRITE)
2640 *insn++ = CG_SOCKOPT_WRITE_FIELD(optlen);
2641 else
2642 *insn++ = CG_SOCKOPT_READ_FIELD(optlen);
2643 break;
2644 case offsetof(struct bpf_sockopt, retval):
2645 BUILD_BUG_ON(offsetof(struct bpf_cg_run_ctx, run_ctx) != 0);
2646
2647 if (type == BPF_WRITE) {
2648 int treg = BPF_REG_9;
2649
2650 if (si->src_reg == treg || si->dst_reg == treg)
2651 --treg;
2652 if (si->src_reg == treg || si->dst_reg == treg)
2653 --treg;
2654 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, treg,
2655 offsetof(struct bpf_sockopt_kern, tmp_reg));
2656 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2657 treg, si->dst_reg,
2658 offsetof(struct bpf_sockopt_kern, current_task));
2659 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2660 treg, treg,
2661 offsetof(struct task_struct, bpf_ctx));
2662 *insn++ = BPF_RAW_INSN(BPF_CLASS(si->code) | BPF_MEM |
2663 BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2664 treg, si->src_reg,
2665 offsetof(struct bpf_cg_run_ctx, retval),
2666 si->imm);
2667 *insn++ = BPF_LDX_MEM(BPF_DW, treg, si->dst_reg,
2668 offsetof(struct bpf_sockopt_kern, tmp_reg));
2669 } else {
2670 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2671 si->dst_reg, si->src_reg,
2672 offsetof(struct bpf_sockopt_kern, current_task));
2673 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2674 si->dst_reg, si->dst_reg,
2675 offsetof(struct task_struct, bpf_ctx));
2676 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2677 si->dst_reg, si->dst_reg,
2678 offsetof(struct bpf_cg_run_ctx, retval));
2679 }
2680 break;
2681 case offsetof(struct bpf_sockopt, optval):
2682 *insn++ = CG_SOCKOPT_READ_FIELD(optval);
2683 break;
2684 case offsetof(struct bpf_sockopt, optval_end):
2685 *insn++ = CG_SOCKOPT_READ_FIELD(optval_end);
2686 break;
2687 }
2688
2689 return insn - insn_buf;
2690 }
2691
cg_sockopt_get_prologue(struct bpf_insn * insn_buf,bool direct_write,const struct bpf_prog * prog)2692 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
2693 bool direct_write,
2694 const struct bpf_prog *prog)
2695 {
2696 /* Nothing to do for sockopt argument. The data is kzalloc'ated.
2697 */
2698 return 0;
2699 }
2700
2701 const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
2702 .get_func_proto = cg_sockopt_func_proto,
2703 .is_valid_access = cg_sockopt_is_valid_access,
2704 .convert_ctx_access = cg_sockopt_convert_ctx_access,
2705 .gen_prologue = cg_sockopt_get_prologue,
2706 };
2707
2708 const struct bpf_prog_ops cg_sockopt_prog_ops = {
2709 };
2710
2711 /* Common helpers for cgroup hooks. */
2712 const struct bpf_func_proto *
cgroup_common_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)2713 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2714 {
2715 switch (func_id) {
2716 case BPF_FUNC_get_local_storage:
2717 return &bpf_get_local_storage_proto;
2718 case BPF_FUNC_get_retval:
2719 switch (prog->expected_attach_type) {
2720 case BPF_CGROUP_INET_INGRESS:
2721 case BPF_CGROUP_INET_EGRESS:
2722 case BPF_CGROUP_SOCK_OPS:
2723 case BPF_CGROUP_UDP4_RECVMSG:
2724 case BPF_CGROUP_UDP6_RECVMSG:
2725 case BPF_CGROUP_UNIX_RECVMSG:
2726 case BPF_CGROUP_INET4_GETPEERNAME:
2727 case BPF_CGROUP_INET6_GETPEERNAME:
2728 case BPF_CGROUP_UNIX_GETPEERNAME:
2729 case BPF_CGROUP_INET4_GETSOCKNAME:
2730 case BPF_CGROUP_INET6_GETSOCKNAME:
2731 case BPF_CGROUP_UNIX_GETSOCKNAME:
2732 return NULL;
2733 default:
2734 return &bpf_get_retval_proto;
2735 }
2736 case BPF_FUNC_set_retval:
2737 switch (prog->expected_attach_type) {
2738 case BPF_CGROUP_INET_INGRESS:
2739 case BPF_CGROUP_INET_EGRESS:
2740 case BPF_CGROUP_SOCK_OPS:
2741 case BPF_CGROUP_UDP4_RECVMSG:
2742 case BPF_CGROUP_UDP6_RECVMSG:
2743 case BPF_CGROUP_UNIX_RECVMSG:
2744 case BPF_CGROUP_INET4_GETPEERNAME:
2745 case BPF_CGROUP_INET6_GETPEERNAME:
2746 case BPF_CGROUP_UNIX_GETPEERNAME:
2747 case BPF_CGROUP_INET4_GETSOCKNAME:
2748 case BPF_CGROUP_INET6_GETSOCKNAME:
2749 case BPF_CGROUP_UNIX_GETSOCKNAME:
2750 return NULL;
2751 default:
2752 return &bpf_set_retval_proto;
2753 }
2754 default:
2755 return NULL;
2756 }
2757 }
2758