syscall.c (50501936288d6a29d7ef78f25d00e33240fad45f) syscall.c (e420bed025071a623d2720a92bc2245c84757ecb)
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4#include <linux/bpf.h>
5#include <linux/bpf-cgroup.h>
6#include <linux/bpf_trace.h>
7#include <linux/bpf_lirc.h>
8#include <linux/bpf_verifier.h>

--- 23 unchanged lines hidden (view full) ---

32#include <linux/poll.h>
33#include <linux/sort.h>
34#include <linux/bpf-netns.h>
35#include <linux/rcupdate_trace.h>
36#include <linux/memcontrol.h>
37#include <linux/trace_events.h>
38#include <net/netfilter/nf_bpf_link.h>
39
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4#include <linux/bpf.h>
5#include <linux/bpf-cgroup.h>
6#include <linux/bpf_trace.h>
7#include <linux/bpf_lirc.h>
8#include <linux/bpf_verifier.h>

--- 23 unchanged lines hidden (view full) ---

32#include <linux/poll.h>
33#include <linux/sort.h>
34#include <linux/bpf-netns.h>
35#include <linux/rcupdate_trace.h>
36#include <linux/memcontrol.h>
37#include <linux/trace_events.h>
38#include <net/netfilter/nf_bpf_link.h>
39
40#include <net/tcx.h>
41
40#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
41 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
42 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
43#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
44#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
45#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
46 IS_FD_HASH(map))
47

--- 56 unchanged lines hidden (view full) ---

104const struct bpf_map_ops bpf_map_offload_ops = {
105 .map_meta_equal = bpf_map_meta_equal,
106 .map_alloc = bpf_map_offload_map_alloc,
107 .map_free = bpf_map_offload_map_free,
108 .map_check_btf = map_check_no_btf,
109 .map_mem_usage = bpf_map_offload_map_mem_usage,
110};
111
42#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
43 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
44 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
45#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
46#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
47#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
48 IS_FD_HASH(map))
49

--- 56 unchanged lines hidden (view full) ---

106const struct bpf_map_ops bpf_map_offload_ops = {
107 .map_meta_equal = bpf_map_meta_equal,
108 .map_alloc = bpf_map_offload_map_alloc,
109 .map_free = bpf_map_offload_map_free,
110 .map_check_btf = map_check_no_btf,
111 .map_mem_usage = bpf_map_offload_map_mem_usage,
112};
113
112static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
113{
114 const struct bpf_map_ops *ops;
115 u32 type = attr->map_type;
116 struct bpf_map *map;
117 int err;
118
119 if (type >= ARRAY_SIZE(bpf_map_types))
120 return ERR_PTR(-EINVAL);
121 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
122 ops = bpf_map_types[type];
123 if (!ops)
124 return ERR_PTR(-EINVAL);
125
126 if (ops->map_alloc_check) {
127 err = ops->map_alloc_check(attr);
128 if (err)
129 return ERR_PTR(err);
130 }
131 if (attr->map_ifindex)
132 ops = &bpf_map_offload_ops;
133 if (!ops->map_mem_usage)
134 return ERR_PTR(-EINVAL);
135 map = ops->map_alloc(attr);
136 if (IS_ERR(map))
137 return map;
138 map->ops = ops;
139 map->map_type = type;
140 return map;
141}
142
143static void bpf_map_write_active_inc(struct bpf_map *map)
144{
145 atomic64_inc(&map->writecnt);
146}
147
148static void bpf_map_write_active_dec(struct bpf_map *map)
149{
150 atomic64_dec(&map->writecnt);

--- 971 unchanged lines hidden (view full) ---

1122 bpf_map_free_record(map);
1123 return ret;
1124}
1125
1126#define BPF_MAP_CREATE_LAST_FIELD map_extra
1127/* called via syscall */
1128static int map_create(union bpf_attr *attr)
1129{
114static void bpf_map_write_active_inc(struct bpf_map *map)
115{
116 atomic64_inc(&map->writecnt);
117}
118
119static void bpf_map_write_active_dec(struct bpf_map *map)
120{
121 atomic64_dec(&map->writecnt);

--- 971 unchanged lines hidden (view full) ---

1093 bpf_map_free_record(map);
1094 return ret;
1095}
1096
1097#define BPF_MAP_CREATE_LAST_FIELD map_extra
1098/* called via syscall */
1099static int map_create(union bpf_attr *attr)
1100{
1101 const struct bpf_map_ops *ops;
1130 int numa_node = bpf_map_attr_numa_node(attr);
1102 int numa_node = bpf_map_attr_numa_node(attr);
1103 u32 map_type = attr->map_type;
1131 struct bpf_map *map;
1132 int f_flags;
1133 int err;
1134
1135 err = CHECK_ATTR(BPF_MAP_CREATE);
1136 if (err)
1137 return -EINVAL;
1138

--- 14 unchanged lines hidden (view full) ---

1153 return f_flags;
1154
1155 if (numa_node != NUMA_NO_NODE &&
1156 ((unsigned int)numa_node >= nr_node_ids ||
1157 !node_online(numa_node)))
1158 return -EINVAL;
1159
1160 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1104 struct bpf_map *map;
1105 int f_flags;
1106 int err;
1107
1108 err = CHECK_ATTR(BPF_MAP_CREATE);
1109 if (err)
1110 return -EINVAL;
1111

--- 14 unchanged lines hidden (view full) ---

1126 return f_flags;
1127
1128 if (numa_node != NUMA_NO_NODE &&
1129 ((unsigned int)numa_node >= nr_node_ids ||
1130 !node_online(numa_node)))
1131 return -EINVAL;
1132
1133 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1161 map = find_and_alloc_map(attr);
1134 map_type = attr->map_type;
1135 if (map_type >= ARRAY_SIZE(bpf_map_types))
1136 return -EINVAL;
1137 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
1138 ops = bpf_map_types[map_type];
1139 if (!ops)
1140 return -EINVAL;
1141
1142 if (ops->map_alloc_check) {
1143 err = ops->map_alloc_check(attr);
1144 if (err)
1145 return err;
1146 }
1147 if (attr->map_ifindex)
1148 ops = &bpf_map_offload_ops;
1149 if (!ops->map_mem_usage)
1150 return -EINVAL;
1151
1152 /* Intent here is for unprivileged_bpf_disabled to block BPF map
1153 * creation for unprivileged users; other actions depend
1154 * on fd availability and access to bpffs, so are dependent on
1155 * object creation success. Even with unprivileged BPF disabled,
1156 * capability checks are still carried out.
1157 */
1158 if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
1159 return -EPERM;
1160
1161 /* check privileged map type permissions */
1162 switch (map_type) {
1163 case BPF_MAP_TYPE_ARRAY:
1164 case BPF_MAP_TYPE_PERCPU_ARRAY:
1165 case BPF_MAP_TYPE_PROG_ARRAY:
1166 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1167 case BPF_MAP_TYPE_CGROUP_ARRAY:
1168 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1169 case BPF_MAP_TYPE_HASH:
1170 case BPF_MAP_TYPE_PERCPU_HASH:
1171 case BPF_MAP_TYPE_HASH_OF_MAPS:
1172 case BPF_MAP_TYPE_RINGBUF:
1173 case BPF_MAP_TYPE_USER_RINGBUF:
1174 case BPF_MAP_TYPE_CGROUP_STORAGE:
1175 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
1176 /* unprivileged */
1177 break;
1178 case BPF_MAP_TYPE_SK_STORAGE:
1179 case BPF_MAP_TYPE_INODE_STORAGE:
1180 case BPF_MAP_TYPE_TASK_STORAGE:
1181 case BPF_MAP_TYPE_CGRP_STORAGE:
1182 case BPF_MAP_TYPE_BLOOM_FILTER:
1183 case BPF_MAP_TYPE_LPM_TRIE:
1184 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
1185 case BPF_MAP_TYPE_STACK_TRACE:
1186 case BPF_MAP_TYPE_QUEUE:
1187 case BPF_MAP_TYPE_STACK:
1188 case BPF_MAP_TYPE_LRU_HASH:
1189 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
1190 case BPF_MAP_TYPE_STRUCT_OPS:
1191 case BPF_MAP_TYPE_CPUMAP:
1192 if (!bpf_capable())
1193 return -EPERM;
1194 break;
1195 case BPF_MAP_TYPE_SOCKMAP:
1196 case BPF_MAP_TYPE_SOCKHASH:
1197 case BPF_MAP_TYPE_DEVMAP:
1198 case BPF_MAP_TYPE_DEVMAP_HASH:
1199 case BPF_MAP_TYPE_XSKMAP:
1200 if (!capable(CAP_NET_ADMIN))
1201 return -EPERM;
1202 break;
1203 default:
1204 WARN(1, "unsupported map type %d", map_type);
1205 return -EPERM;
1206 }
1207
1208 map = ops->map_alloc(attr);
1162 if (IS_ERR(map))
1163 return PTR_ERR(map);
1209 if (IS_ERR(map))
1210 return PTR_ERR(map);
1211 map->ops = ops;
1212 map->map_type = map_type;
1164
1165 err = bpf_obj_name_cpy(map->name, attr->map_name,
1166 sizeof(attr->map_name));
1167 if (err < 0)
1168 goto free_map;
1169
1170 atomic64_set(&map->refcnt, 1);
1171 atomic64_set(&map->usercnt, 1);

--- 754 unchanged lines hidden (view full) ---

1926 if (IS_ERR(map))
1927 return PTR_ERR(map);
1928
1929 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) {
1930 fdput(f);
1931 return -ENOTSUPP;
1932 }
1933
1213
1214 err = bpf_obj_name_cpy(map->name, attr->map_name,
1215 sizeof(attr->map_name));
1216 if (err < 0)
1217 goto free_map;
1218
1219 atomic64_set(&map->refcnt, 1);
1220 atomic64_set(&map->usercnt, 1);

--- 754 unchanged lines hidden (view full) ---

1975 if (IS_ERR(map))
1976 return PTR_ERR(map);
1977
1978 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) {
1979 fdput(f);
1980 return -ENOTSUPP;
1981 }
1982
1983 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1984 fdput(f);
1985 return -EPERM;
1986 }
1987
1934 mutex_lock(&map->freeze_mutex);
1935 if (bpf_map_write_active(map)) {
1936 err = -EBUSY;
1937 goto err_put;
1938 }
1939 if (READ_ONCE(map->frozen)) {
1940 err = -EBUSY;
1941 goto err_put;
1942 }
1988 mutex_lock(&map->freeze_mutex);
1989 if (bpf_map_write_active(map)) {
1990 err = -EBUSY;
1991 goto err_put;
1992 }
1993 if (READ_ONCE(map->frozen)) {
1994 err = -EBUSY;
1995 goto err_put;
1996 }
1943 if (!bpf_capable()) {
1944 err = -EPERM;
1945 goto err_put;
1946 }
1947
1948 WRITE_ONCE(map->frozen, true);
1949err_put:
1950 mutex_unlock(&map->freeze_mutex);
1951 fdput(f);
1952 return err;
1953}
1954

--- 546 unchanged lines hidden (view full) ---

2501
2502static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
2503{
2504 enum bpf_prog_type type = attr->prog_type;
2505 struct bpf_prog *prog, *dst_prog = NULL;
2506 struct btf *attach_btf = NULL;
2507 int err;
2508 char license[128];
1997
1998 WRITE_ONCE(map->frozen, true);
1999err_put:
2000 mutex_unlock(&map->freeze_mutex);
2001 fdput(f);
2002 return err;
2003}
2004

--- 546 unchanged lines hidden (view full) ---

2551
2552static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
2553{
2554 enum bpf_prog_type type = attr->prog_type;
2555 struct bpf_prog *prog, *dst_prog = NULL;
2556 struct btf *attach_btf = NULL;
2557 int err;
2558 char license[128];
2509 bool is_gpl;
2510
2511 if (CHECK_ATTR(BPF_PROG_LOAD))
2512 return -EINVAL;
2513
2514 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2515 BPF_F_ANY_ALIGNMENT |
2516 BPF_F_TEST_STATE_FREQ |
2517 BPF_F_SLEEPABLE |
2518 BPF_F_TEST_RND_HI32 |
2519 BPF_F_XDP_HAS_FRAGS |
2520 BPF_F_XDP_DEV_BOUND_ONLY))
2521 return -EINVAL;
2522
2523 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2524 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2525 !bpf_capable())
2526 return -EPERM;
2527
2559
2560 if (CHECK_ATTR(BPF_PROG_LOAD))
2561 return -EINVAL;
2562
2563 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2564 BPF_F_ANY_ALIGNMENT |
2565 BPF_F_TEST_STATE_FREQ |
2566 BPF_F_SLEEPABLE |
2567 BPF_F_TEST_RND_HI32 |
2568 BPF_F_XDP_HAS_FRAGS |
2569 BPF_F_XDP_DEV_BOUND_ONLY))
2570 return -EINVAL;
2571
2572 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2573 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2574 !bpf_capable())
2575 return -EPERM;
2576
2528 /* copy eBPF program license from user space */
2529 if (strncpy_from_bpfptr(license,
2530 make_bpfptr(attr->license, uattr.is_kernel),
2531 sizeof(license) - 1) < 0)
2532 return -EFAULT;
2533 license[sizeof(license) - 1] = 0;
2577 /* Intent here is for unprivileged_bpf_disabled to block BPF program
2578 * creation for unprivileged users; other actions depend
2579 * on fd availability and access to bpffs, so are dependent on
2580 * object creation success. Even with unprivileged BPF disabled,
2581 * capability checks are still carried out for these
2582 * and other operations.
2583 */
2584 if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
2585 return -EPERM;
2534
2586
2535 /* eBPF programs must be GPL compatible to use GPL-ed functions */
2536 is_gpl = license_is_gpl_compatible(license);
2537
2538 if (attr->insn_cnt == 0 ||
2539 attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2540 return -E2BIG;
2541 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2542 type != BPF_PROG_TYPE_CGROUP_SKB &&
2543 !bpf_capable())
2544 return -EPERM;
2545

--- 66 unchanged lines hidden (view full) ---

2612 prog->aux->user = get_current_user();
2613 prog->len = attr->insn_cnt;
2614
2615 err = -EFAULT;
2616 if (copy_from_bpfptr(prog->insns,
2617 make_bpfptr(attr->insns, uattr.is_kernel),
2618 bpf_prog_insn_size(prog)) != 0)
2619 goto free_prog_sec;
2587 if (attr->insn_cnt == 0 ||
2588 attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2589 return -E2BIG;
2590 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2591 type != BPF_PROG_TYPE_CGROUP_SKB &&
2592 !bpf_capable())
2593 return -EPERM;
2594

--- 66 unchanged lines hidden (view full) ---

2661 prog->aux->user = get_current_user();
2662 prog->len = attr->insn_cnt;
2663
2664 err = -EFAULT;
2665 if (copy_from_bpfptr(prog->insns,
2666 make_bpfptr(attr->insns, uattr.is_kernel),
2667 bpf_prog_insn_size(prog)) != 0)
2668 goto free_prog_sec;
2669 /* copy eBPF program license from user space */
2670 if (strncpy_from_bpfptr(license,
2671 make_bpfptr(attr->license, uattr.is_kernel),
2672 sizeof(license) - 1) < 0)
2673 goto free_prog_sec;
2674 license[sizeof(license) - 1] = 0;
2620
2675
2676 /* eBPF programs must be GPL compatible to use GPL-ed functions */
2677 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0;
2678
2621 prog->orig_prog = NULL;
2622 prog->jited = 0;
2623
2624 atomic64_set(&prog->aux->refcnt, 1);
2679 prog->orig_prog = NULL;
2680 prog->jited = 0;
2681
2682 atomic64_set(&prog->aux->refcnt, 1);
2625 prog->gpl_compatible = is_gpl ? 1 : 0;
2626
2627 if (bpf_prog_is_dev_bound(prog->aux)) {
2628 err = bpf_prog_dev_bound_init(prog, attr);
2629 if (err)
2630 goto free_prog_sec;
2631 }
2632
2633 if (type == BPF_PROG_TYPE_EXT && dst_prog &&

--- 62 unchanged lines hidden (view full) ---

2696 security_bpf_prog_free(prog->aux);
2697free_prog:
2698 if (prog->aux->attach_btf)
2699 btf_put(prog->aux->attach_btf);
2700 bpf_prog_free(prog);
2701 return err;
2702}
2703
2683
2684 if (bpf_prog_is_dev_bound(prog->aux)) {
2685 err = bpf_prog_dev_bound_init(prog, attr);
2686 if (err)
2687 goto free_prog_sec;
2688 }
2689
2690 if (type == BPF_PROG_TYPE_EXT && dst_prog &&

--- 62 unchanged lines hidden (view full) ---

2753 security_bpf_prog_free(prog->aux);
2754free_prog:
2755 if (prog->aux->attach_btf)
2756 btf_put(prog->aux->attach_btf);
2757 bpf_prog_free(prog);
2758 return err;
2759}
2760
2704#define BPF_OBJ_LAST_FIELD file_flags
2761#define BPF_OBJ_LAST_FIELD path_fd
2705
2706static int bpf_obj_pin(const union bpf_attr *attr)
2707{
2762
2763static int bpf_obj_pin(const union bpf_attr *attr)
2764{
2708 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2765 int path_fd;
2766
2767 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD)
2709 return -EINVAL;
2710
2768 return -EINVAL;
2769
2711 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2770 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2771 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2772 return -EINVAL;
2773
2774 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2775 return bpf_obj_pin_user(attr->bpf_fd, path_fd,
2776 u64_to_user_ptr(attr->pathname));
2712}
2713
2714static int bpf_obj_get(const union bpf_attr *attr)
2715{
2777}
2778
2779static int bpf_obj_get(const union bpf_attr *attr)
2780{
2781 int path_fd;
2782
2716 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2783 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2717 attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2784 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD))
2718 return -EINVAL;
2719
2785 return -EINVAL;
2786
2720 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2787 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2788 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2789 return -EINVAL;
2790
2791 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2792 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname),
2721 attr->file_flags);
2722}
2723
2724void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2725 const struct bpf_link_ops *ops, struct bpf_prog *prog)
2726{
2727 atomic64_set(&link->refcnt, 1);
2728 link->type = type;

--- 47 unchanged lines hidden (view full) ---

2776
2777static void bpf_link_put_deferred(struct work_struct *work)
2778{
2779 struct bpf_link *link = container_of(work, struct bpf_link, work);
2780
2781 bpf_link_free(link);
2782}
2783
2793 attr->file_flags);
2794}
2795
2796void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2797 const struct bpf_link_ops *ops, struct bpf_prog *prog)
2798{
2799 atomic64_set(&link->refcnt, 1);
2800 link->type = type;

--- 47 unchanged lines hidden (view full) ---

2848
2849static void bpf_link_put_deferred(struct work_struct *work)
2850{
2851 struct bpf_link *link = container_of(work, struct bpf_link, work);
2852
2853 bpf_link_free(link);
2854}
2855
2784/* bpf_link_put can be called from atomic context, but ensures that resources
2785 * are freed from process context
2856/* bpf_link_put might be called from atomic context. It needs to be called
2857 * from sleepable context in order to acquire sleeping locks during the process.
2786 */
2787void bpf_link_put(struct bpf_link *link)
2788{
2789 if (!atomic64_dec_and_test(&link->refcnt))
2790 return;
2791
2858 */
2859void bpf_link_put(struct bpf_link *link)
2860{
2861 if (!atomic64_dec_and_test(&link->refcnt))
2862 return;
2863
2792 if (in_atomic()) {
2793 INIT_WORK(&link->work, bpf_link_put_deferred);
2794 schedule_work(&link->work);
2795 } else {
2796 bpf_link_free(link);
2797 }
2864 INIT_WORK(&link->work, bpf_link_put_deferred);
2865 schedule_work(&link->work);
2798}
2799EXPORT_SYMBOL(bpf_link_put);
2800
2866}
2867EXPORT_SYMBOL(bpf_link_put);
2868
2869static void bpf_link_put_direct(struct bpf_link *link)
2870{
2871 if (!atomic64_dec_and_test(&link->refcnt))
2872 return;
2873 bpf_link_free(link);
2874}
2875
2801static int bpf_link_release(struct inode *inode, struct file *filp)
2802{
2803 struct bpf_link *link = filp->private_data;
2804
2876static int bpf_link_release(struct inode *inode, struct file *filp)
2877{
2878 struct bpf_link *link = filp->private_data;
2879
2805 bpf_link_put(link);
2880 bpf_link_put_direct(link);
2806 return 0;
2807}
2808
2809#ifdef CONFIG_PROC_FS
2810#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2811#define BPF_MAP_TYPE(_id, _ops)
2812#define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2813static const char *bpf_link_type_strs[] = {

--- 153 unchanged lines hidden (view full) ---

2967 kfree(tr_link);
2968}
2969
2970static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2971 struct seq_file *seq)
2972{
2973 struct bpf_tracing_link *tr_link =
2974 container_of(link, struct bpf_tracing_link, link.link);
2881 return 0;
2882}
2883
2884#ifdef CONFIG_PROC_FS
2885#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2886#define BPF_MAP_TYPE(_id, _ops)
2887#define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2888static const char *bpf_link_type_strs[] = {

--- 153 unchanged lines hidden (view full) ---

3042 kfree(tr_link);
3043}
3044
3045static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
3046 struct seq_file *seq)
3047{
3048 struct bpf_tracing_link *tr_link =
3049 container_of(link, struct bpf_tracing_link, link.link);
3050 u32 target_btf_id, target_obj_id;
2975
3051
3052 bpf_trampoline_unpack_key(tr_link->trampoline->key,
3053 &target_obj_id, &target_btf_id);
2976 seq_printf(seq,
3054 seq_printf(seq,
2977 "attach_type:\t%d\n",
2978 tr_link->attach_type);
3055 "attach_type:\t%d\n"
3056 "target_obj_id:\t%u\n"
3057 "target_btf_id:\t%u\n",
3058 tr_link->attach_type,
3059 target_obj_id,
3060 target_btf_id);
2979}
2980
2981static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2982 struct bpf_link_info *info)
2983{
2984 struct bpf_tracing_link *tr_link =
2985 container_of(link, struct bpf_tracing_link, link.link);
2986

--- 223 unchanged lines hidden (view full) ---

3210 struct bpf_raw_tp_link *raw_tp_link =
3211 container_of(link, struct bpf_raw_tp_link, link);
3212
3213 seq_printf(seq,
3214 "tp_name:\t%s\n",
3215 raw_tp_link->btp->tp->name);
3216}
3217
3061}
3062
3063static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
3064 struct bpf_link_info *info)
3065{
3066 struct bpf_tracing_link *tr_link =
3067 container_of(link, struct bpf_tracing_link, link.link);
3068

--- 223 unchanged lines hidden (view full) ---

3292 struct bpf_raw_tp_link *raw_tp_link =
3293 container_of(link, struct bpf_raw_tp_link, link);
3294
3295 seq_printf(seq,
3296 "tp_name:\t%s\n",
3297 raw_tp_link->btp->tp->name);
3298}
3299
3300static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen,
3301 u32 len)
3302{
3303 if (ulen >= len + 1) {
3304 if (copy_to_user(ubuf, buf, len + 1))
3305 return -EFAULT;
3306 } else {
3307 char zero = '\0';
3308
3309 if (copy_to_user(ubuf, buf, ulen - 1))
3310 return -EFAULT;
3311 if (put_user(zero, ubuf + ulen - 1))
3312 return -EFAULT;
3313 return -ENOSPC;
3314 }
3315
3316 return 0;
3317}
3318
3218static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3219 struct bpf_link_info *info)
3220{
3221 struct bpf_raw_tp_link *raw_tp_link =
3222 container_of(link, struct bpf_raw_tp_link, link);
3223 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3224 const char *tp_name = raw_tp_link->btp->tp->name;
3225 u32 ulen = info->raw_tracepoint.tp_name_len;
3226 size_t tp_len = strlen(tp_name);
3227
3228 if (!ulen ^ !ubuf)
3229 return -EINVAL;
3230
3231 info->raw_tracepoint.tp_name_len = tp_len + 1;
3232
3233 if (!ubuf)
3234 return 0;
3235
3319static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3320 struct bpf_link_info *info)
3321{
3322 struct bpf_raw_tp_link *raw_tp_link =
3323 container_of(link, struct bpf_raw_tp_link, link);
3324 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3325 const char *tp_name = raw_tp_link->btp->tp->name;
3326 u32 ulen = info->raw_tracepoint.tp_name_len;
3327 size_t tp_len = strlen(tp_name);
3328
3329 if (!ulen ^ !ubuf)
3330 return -EINVAL;
3331
3332 info->raw_tracepoint.tp_name_len = tp_len + 1;
3333
3334 if (!ubuf)
3335 return 0;
3336
3236 if (ulen >= tp_len + 1) {
3237 if (copy_to_user(ubuf, tp_name, tp_len + 1))
3238 return -EFAULT;
3239 } else {
3240 char zero = '\0';
3241
3242 if (copy_to_user(ubuf, tp_name, ulen - 1))
3243 return -EFAULT;
3244 if (put_user(zero, ubuf + ulen - 1))
3245 return -EFAULT;
3246 return -ENOSPC;
3247 }
3248
3249 return 0;
3337 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len);
3250}
3251
3252static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3253 .release = bpf_raw_tp_link_release,
3254 .dealloc = bpf_raw_tp_link_dealloc,
3255 .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3256 .fill_link_info = bpf_raw_tp_link_fill_link_info,
3257};

--- 15 unchanged lines hidden (view full) ---

3273
3274static void bpf_perf_link_dealloc(struct bpf_link *link)
3275{
3276 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3277
3278 kfree(perf_link);
3279}
3280
3338}
3339
3340static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3341 .release = bpf_raw_tp_link_release,
3342 .dealloc = bpf_raw_tp_link_dealloc,
3343 .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3344 .fill_link_info = bpf_raw_tp_link_fill_link_info,
3345};

--- 15 unchanged lines hidden (view full) ---

3361
3362static void bpf_perf_link_dealloc(struct bpf_link *link)
3363{
3364 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3365
3366 kfree(perf_link);
3367}
3368
3369static int bpf_perf_link_fill_common(const struct perf_event *event,
3370 char __user *uname, u32 ulen,
3371 u64 *probe_offset, u64 *probe_addr,
3372 u32 *fd_type)
3373{
3374 const char *buf;
3375 u32 prog_id;
3376 size_t len;
3377 int err;
3378
3379 if (!ulen ^ !uname)
3380 return -EINVAL;
3381 if (!uname)
3382 return 0;
3383
3384 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
3385 probe_offset, probe_addr);
3386 if (err)
3387 return err;
3388
3389 if (buf) {
3390 len = strlen(buf);
3391 err = bpf_copy_to_user(uname, buf, ulen, len);
3392 if (err)
3393 return err;
3394 } else {
3395 char zero = '\0';
3396
3397 if (put_user(zero, uname))
3398 return -EFAULT;
3399 }
3400 return 0;
3401}
3402
3403#ifdef CONFIG_KPROBE_EVENTS
3404static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
3405 struct bpf_link_info *info)
3406{
3407 char __user *uname;
3408 u64 addr, offset;
3409 u32 ulen, type;
3410 int err;
3411
3412 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
3413 ulen = info->perf_event.kprobe.name_len;
3414 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
3415 &type);
3416 if (err)
3417 return err;
3418 if (type == BPF_FD_TYPE_KRETPROBE)
3419 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
3420 else
3421 info->perf_event.type = BPF_PERF_EVENT_KPROBE;
3422
3423 info->perf_event.kprobe.offset = offset;
3424 if (!kallsyms_show_value(current_cred()))
3425 addr = 0;
3426 info->perf_event.kprobe.addr = addr;
3427 return 0;
3428}
3429#endif
3430
3431#ifdef CONFIG_UPROBE_EVENTS
3432static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
3433 struct bpf_link_info *info)
3434{
3435 char __user *uname;
3436 u64 addr, offset;
3437 u32 ulen, type;
3438 int err;
3439
3440 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
3441 ulen = info->perf_event.uprobe.name_len;
3442 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
3443 &type);
3444 if (err)
3445 return err;
3446
3447 if (type == BPF_FD_TYPE_URETPROBE)
3448 info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
3449 else
3450 info->perf_event.type = BPF_PERF_EVENT_UPROBE;
3451 info->perf_event.uprobe.offset = offset;
3452 return 0;
3453}
3454#endif
3455
3456static int bpf_perf_link_fill_probe(const struct perf_event *event,
3457 struct bpf_link_info *info)
3458{
3459#ifdef CONFIG_KPROBE_EVENTS
3460 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
3461 return bpf_perf_link_fill_kprobe(event, info);
3462#endif
3463#ifdef CONFIG_UPROBE_EVENTS
3464 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
3465 return bpf_perf_link_fill_uprobe(event, info);
3466#endif
3467 return -EOPNOTSUPP;
3468}
3469
3470static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
3471 struct bpf_link_info *info)
3472{
3473 char __user *uname;
3474 u32 ulen;
3475
3476 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
3477 ulen = info->perf_event.tracepoint.name_len;
3478 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
3479 return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL);
3480}
3481
3482static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
3483 struct bpf_link_info *info)
3484{
3485 info->perf_event.event.type = event->attr.type;
3486 info->perf_event.event.config = event->attr.config;
3487 info->perf_event.type = BPF_PERF_EVENT_EVENT;
3488 return 0;
3489}
3490
3491static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
3492 struct bpf_link_info *info)
3493{
3494 struct bpf_perf_link *perf_link;
3495 const struct perf_event *event;
3496
3497 perf_link = container_of(link, struct bpf_perf_link, link);
3498 event = perf_get_event(perf_link->perf_file);
3499 if (IS_ERR(event))
3500 return PTR_ERR(event);
3501
3502 switch (event->prog->type) {
3503 case BPF_PROG_TYPE_PERF_EVENT:
3504 return bpf_perf_link_fill_perf_event(event, info);
3505 case BPF_PROG_TYPE_TRACEPOINT:
3506 return bpf_perf_link_fill_tracepoint(event, info);
3507 case BPF_PROG_TYPE_KPROBE:
3508 return bpf_perf_link_fill_probe(event, info);
3509 default:
3510 return -EOPNOTSUPP;
3511 }
3512}
3513
3281static const struct bpf_link_ops bpf_perf_link_lops = {
3282 .release = bpf_perf_link_release,
3283 .dealloc = bpf_perf_link_dealloc,
3514static const struct bpf_link_ops bpf_perf_link_lops = {
3515 .release = bpf_perf_link_release,
3516 .dealloc = bpf_perf_link_dealloc,
3517 .fill_link_info = bpf_perf_link_fill_link_info,
3284};
3285
3286static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3287{
3288 struct bpf_link_primer link_primer;
3289 struct bpf_perf_link *link;
3290 struct perf_event *event;
3291 struct file *perf_file;

--- 211 unchanged lines hidden (view full) ---

3503 case BPF_LSM_MAC:
3504 return BPF_PROG_TYPE_LSM;
3505 case BPF_SK_LOOKUP:
3506 return BPF_PROG_TYPE_SK_LOOKUP;
3507 case BPF_XDP:
3508 return BPF_PROG_TYPE_XDP;
3509 case BPF_LSM_CGROUP:
3510 return BPF_PROG_TYPE_LSM;
3518};
3519
3520static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3521{
3522 struct bpf_link_primer link_primer;
3523 struct bpf_perf_link *link;
3524 struct perf_event *event;
3525 struct file *perf_file;

--- 211 unchanged lines hidden (view full) ---

3737 case BPF_LSM_MAC:
3738 return BPF_PROG_TYPE_LSM;
3739 case BPF_SK_LOOKUP:
3740 return BPF_PROG_TYPE_SK_LOOKUP;
3741 case BPF_XDP:
3742 return BPF_PROG_TYPE_XDP;
3743 case BPF_LSM_CGROUP:
3744 return BPF_PROG_TYPE_LSM;
3745 case BPF_TCX_INGRESS:
3746 case BPF_TCX_EGRESS:
3747 return BPF_PROG_TYPE_SCHED_CLS;
3511 default:
3512 return BPF_PROG_TYPE_UNSPEC;
3513 }
3514}
3515
3748 default:
3749 return BPF_PROG_TYPE_UNSPEC;
3750 }
3751}
3752
3516#define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
3753#define BPF_PROG_ATTACH_LAST_FIELD expected_revision
3517
3754
3518#define BPF_F_ATTACH_MASK \
3519 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
3755#define BPF_F_ATTACH_MASK_BASE \
3756 (BPF_F_ALLOW_OVERRIDE | \
3757 BPF_F_ALLOW_MULTI | \
3758 BPF_F_REPLACE)
3520
3759
3760#define BPF_F_ATTACH_MASK_MPROG \
3761 (BPF_F_REPLACE | \
3762 BPF_F_BEFORE | \
3763 BPF_F_AFTER | \
3764 BPF_F_ID | \
3765 BPF_F_LINK)
3766
3521static int bpf_prog_attach(const union bpf_attr *attr)
3522{
3523 enum bpf_prog_type ptype;
3524 struct bpf_prog *prog;
3767static int bpf_prog_attach(const union bpf_attr *attr)
3768{
3769 enum bpf_prog_type ptype;
3770 struct bpf_prog *prog;
3771 u32 mask;
3525 int ret;
3526
3527 if (CHECK_ATTR(BPF_PROG_ATTACH))
3528 return -EINVAL;
3529
3772 int ret;
3773
3774 if (CHECK_ATTR(BPF_PROG_ATTACH))
3775 return -EINVAL;
3776
3530 if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
3531 return -EINVAL;
3532
3533 ptype = attach_type_to_prog_type(attr->attach_type);
3534 if (ptype == BPF_PROG_TYPE_UNSPEC)
3535 return -EINVAL;
3777 ptype = attach_type_to_prog_type(attr->attach_type);
3778 if (ptype == BPF_PROG_TYPE_UNSPEC)
3779 return -EINVAL;
3780 mask = bpf_mprog_supported(ptype) ?
3781 BPF_F_ATTACH_MASK_MPROG : BPF_F_ATTACH_MASK_BASE;
3782 if (attr->attach_flags & ~mask)
3783 return -EINVAL;
3536
3537 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3538 if (IS_ERR(prog))
3539 return PTR_ERR(prog);
3540
3541 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
3542 bpf_prog_put(prog);
3543 return -EINVAL;

--- 19 unchanged lines hidden (view full) ---

3563 case BPF_PROG_TYPE_SOCK_OPS:
3564 case BPF_PROG_TYPE_LSM:
3565 if (ptype == BPF_PROG_TYPE_LSM &&
3566 prog->expected_attach_type != BPF_LSM_CGROUP)
3567 ret = -EINVAL;
3568 else
3569 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3570 break;
3784
3785 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3786 if (IS_ERR(prog))
3787 return PTR_ERR(prog);
3788
3789 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
3790 bpf_prog_put(prog);
3791 return -EINVAL;

--- 19 unchanged lines hidden (view full) ---

3811 case BPF_PROG_TYPE_SOCK_OPS:
3812 case BPF_PROG_TYPE_LSM:
3813 if (ptype == BPF_PROG_TYPE_LSM &&
3814 prog->expected_attach_type != BPF_LSM_CGROUP)
3815 ret = -EINVAL;
3816 else
3817 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3818 break;
3819 case BPF_PROG_TYPE_SCHED_CLS:
3820 ret = tcx_prog_attach(attr, prog);
3821 break;
3571 default:
3572 ret = -EINVAL;
3573 }
3574
3575 if (ret)
3576 bpf_prog_put(prog);
3577 return ret;
3578}
3579
3822 default:
3823 ret = -EINVAL;
3824 }
3825
3826 if (ret)
3827 bpf_prog_put(prog);
3828 return ret;
3829}
3830
3580#define BPF_PROG_DETACH_LAST_FIELD attach_type
3831#define BPF_PROG_DETACH_LAST_FIELD expected_revision
3581
3582static int bpf_prog_detach(const union bpf_attr *attr)
3583{
3832
3833static int bpf_prog_detach(const union bpf_attr *attr)
3834{
3835 struct bpf_prog *prog = NULL;
3584 enum bpf_prog_type ptype;
3836 enum bpf_prog_type ptype;
3837 int ret;
3585
3586 if (CHECK_ATTR(BPF_PROG_DETACH))
3587 return -EINVAL;
3588
3589 ptype = attach_type_to_prog_type(attr->attach_type);
3838
3839 if (CHECK_ATTR(BPF_PROG_DETACH))
3840 return -EINVAL;
3841
3842 ptype = attach_type_to_prog_type(attr->attach_type);
3843 if (bpf_mprog_supported(ptype)) {
3844 if (ptype == BPF_PROG_TYPE_UNSPEC)
3845 return -EINVAL;
3846 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
3847 return -EINVAL;
3848 if (attr->attach_bpf_fd) {
3849 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3850 if (IS_ERR(prog))
3851 return PTR_ERR(prog);
3852 }
3853 }
3590
3591 switch (ptype) {
3592 case BPF_PROG_TYPE_SK_MSG:
3593 case BPF_PROG_TYPE_SK_SKB:
3854
3855 switch (ptype) {
3856 case BPF_PROG_TYPE_SK_MSG:
3857 case BPF_PROG_TYPE_SK_SKB:
3594 return sock_map_prog_detach(attr, ptype);
3858 ret = sock_map_prog_detach(attr, ptype);
3859 break;
3595 case BPF_PROG_TYPE_LIRC_MODE2:
3860 case BPF_PROG_TYPE_LIRC_MODE2:
3596 return lirc_prog_detach(attr);
3861 ret = lirc_prog_detach(attr);
3862 break;
3597 case BPF_PROG_TYPE_FLOW_DISSECTOR:
3863 case BPF_PROG_TYPE_FLOW_DISSECTOR:
3598 return netns_bpf_prog_detach(attr, ptype);
3864 ret = netns_bpf_prog_detach(attr, ptype);
3865 break;
3599 case BPF_PROG_TYPE_CGROUP_DEVICE:
3600 case BPF_PROG_TYPE_CGROUP_SKB:
3601 case BPF_PROG_TYPE_CGROUP_SOCK:
3602 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3603 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3604 case BPF_PROG_TYPE_CGROUP_SYSCTL:
3605 case BPF_PROG_TYPE_SOCK_OPS:
3606 case BPF_PROG_TYPE_LSM:
3866 case BPF_PROG_TYPE_CGROUP_DEVICE:
3867 case BPF_PROG_TYPE_CGROUP_SKB:
3868 case BPF_PROG_TYPE_CGROUP_SOCK:
3869 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3870 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3871 case BPF_PROG_TYPE_CGROUP_SYSCTL:
3872 case BPF_PROG_TYPE_SOCK_OPS:
3873 case BPF_PROG_TYPE_LSM:
3607 return cgroup_bpf_prog_detach(attr, ptype);
3874 ret = cgroup_bpf_prog_detach(attr, ptype);
3875 break;
3876 case BPF_PROG_TYPE_SCHED_CLS:
3877 ret = tcx_prog_detach(attr, prog);
3878 break;
3608 default:
3879 default:
3609 return -EINVAL;
3880 ret = -EINVAL;
3610 }
3881 }
3882
3883 if (prog)
3884 bpf_prog_put(prog);
3885 return ret;
3611}
3612
3886}
3887
3613#define BPF_PROG_QUERY_LAST_FIELD query.prog_attach_flags
3888#define BPF_PROG_QUERY_LAST_FIELD query.link_attach_flags
3614
3615static int bpf_prog_query(const union bpf_attr *attr,
3616 union bpf_attr __user *uattr)
3617{
3618 if (!capable(CAP_NET_ADMIN))
3619 return -EPERM;
3620 if (CHECK_ATTR(BPF_PROG_QUERY))
3621 return -EINVAL;

--- 31 unchanged lines hidden (view full) ---

3653 case BPF_FLOW_DISSECTOR:
3654 case BPF_SK_LOOKUP:
3655 return netns_bpf_prog_query(attr, uattr);
3656 case BPF_SK_SKB_STREAM_PARSER:
3657 case BPF_SK_SKB_STREAM_VERDICT:
3658 case BPF_SK_MSG_VERDICT:
3659 case BPF_SK_SKB_VERDICT:
3660 return sock_map_bpf_prog_query(attr, uattr);
3889
3890static int bpf_prog_query(const union bpf_attr *attr,
3891 union bpf_attr __user *uattr)
3892{
3893 if (!capable(CAP_NET_ADMIN))
3894 return -EPERM;
3895 if (CHECK_ATTR(BPF_PROG_QUERY))
3896 return -EINVAL;

--- 31 unchanged lines hidden (view full) ---

3928 case BPF_FLOW_DISSECTOR:
3929 case BPF_SK_LOOKUP:
3930 return netns_bpf_prog_query(attr, uattr);
3931 case BPF_SK_SKB_STREAM_PARSER:
3932 case BPF_SK_SKB_STREAM_VERDICT:
3933 case BPF_SK_MSG_VERDICT:
3934 case BPF_SK_SKB_VERDICT:
3935 return sock_map_bpf_prog_query(attr, uattr);
3936 case BPF_TCX_INGRESS:
3937 case BPF_TCX_EGRESS:
3938 return tcx_prog_query(attr, uattr);
3661 default:
3662 return -EINVAL;
3663 }
3664}
3665
3666#define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
3667
3668static int bpf_prog_test_run(const union bpf_attr *attr,

--- 946 unchanged lines hidden (view full) ---

4615 break;
4616 case BPF_PROG_TYPE_KPROBE:
4617 if (attr->link_create.attach_type != BPF_PERF_EVENT &&
4618 attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) {
4619 ret = -EINVAL;
4620 goto out;
4621 }
4622 break;
3939 default:
3940 return -EINVAL;
3941 }
3942}
3943
3944#define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
3945
3946static int bpf_prog_test_run(const union bpf_attr *attr,

--- 946 unchanged lines hidden (view full) ---

4893 break;
4894 case BPF_PROG_TYPE_KPROBE:
4895 if (attr->link_create.attach_type != BPF_PERF_EVENT &&
4896 attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) {
4897 ret = -EINVAL;
4898 goto out;
4899 }
4900 break;
4901 case BPF_PROG_TYPE_SCHED_CLS:
4902 if (attr->link_create.attach_type != BPF_TCX_INGRESS &&
4903 attr->link_create.attach_type != BPF_TCX_EGRESS) {
4904 ret = -EINVAL;
4905 goto out;
4906 }
4907 break;
4623 default:
4624 ptype = attach_type_to_prog_type(attr->link_create.attach_type);
4625 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
4626 ret = -EINVAL;
4627 goto out;
4628 }
4629 break;
4630 }

--- 35 unchanged lines hidden (view full) ---

4666 case BPF_PROG_TYPE_FLOW_DISSECTOR:
4667 case BPF_PROG_TYPE_SK_LOOKUP:
4668 ret = netns_bpf_link_create(attr, prog);
4669 break;
4670#ifdef CONFIG_NET
4671 case BPF_PROG_TYPE_XDP:
4672 ret = bpf_xdp_link_attach(attr, prog);
4673 break;
4908 default:
4909 ptype = attach_type_to_prog_type(attr->link_create.attach_type);
4910 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
4911 ret = -EINVAL;
4912 goto out;
4913 }
4914 break;
4915 }

--- 35 unchanged lines hidden (view full) ---

4951 case BPF_PROG_TYPE_FLOW_DISSECTOR:
4952 case BPF_PROG_TYPE_SK_LOOKUP:
4953 ret = netns_bpf_link_create(attr, prog);
4954 break;
4955#ifdef CONFIG_NET
4956 case BPF_PROG_TYPE_XDP:
4957 ret = bpf_xdp_link_attach(attr, prog);
4958 break;
4959 case BPF_PROG_TYPE_SCHED_CLS:
4960 ret = tcx_link_attach(attr, prog);
4961 break;
4674 case BPF_PROG_TYPE_NETFILTER:
4675 ret = bpf_nf_link_attach(attr, prog);
4676 break;
4677#endif
4678 case BPF_PROG_TYPE_PERF_EVENT:
4679 case BPF_PROG_TYPE_TRACEPOINT:
4680 ret = bpf_perf_link_attach(attr, prog);
4681 break;

--- 91 unchanged lines hidden (view full) ---

4773 ret = -EINVAL;
4774
4775out_put_progs:
4776 if (old_prog)
4777 bpf_prog_put(old_prog);
4778 if (ret)
4779 bpf_prog_put(new_prog);
4780out_put_link:
4962 case BPF_PROG_TYPE_NETFILTER:
4963 ret = bpf_nf_link_attach(attr, prog);
4964 break;
4965#endif
4966 case BPF_PROG_TYPE_PERF_EVENT:
4967 case BPF_PROG_TYPE_TRACEPOINT:
4968 ret = bpf_perf_link_attach(attr, prog);
4969 break;

--- 91 unchanged lines hidden (view full) ---

5061 ret = -EINVAL;
5062
5063out_put_progs:
5064 if (old_prog)
5065 bpf_prog_put(old_prog);
5066 if (ret)
5067 bpf_prog_put(new_prog);
5068out_put_link:
4781 bpf_link_put(link);
5069 bpf_link_put_direct(link);
4782 return ret;
4783}
4784
4785#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
4786
4787static int link_detach(union bpf_attr *attr)
4788{
4789 struct bpf_link *link;

--- 6 unchanged lines hidden (view full) ---

4796 if (IS_ERR(link))
4797 return PTR_ERR(link);
4798
4799 if (link->ops->detach)
4800 ret = link->ops->detach(link);
4801 else
4802 ret = -EOPNOTSUPP;
4803
5070 return ret;
5071}
5072
5073#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
5074
5075static int link_detach(union bpf_attr *attr)
5076{
5077 struct bpf_link *link;

--- 6 unchanged lines hidden (view full) ---

5084 if (IS_ERR(link))
5085 return PTR_ERR(link);
5086
5087 if (link->ops->detach)
5088 ret = link->ops->detach(link);
5089 else
5090 ret = -EOPNOTSUPP;
5091
4804 bpf_link_put(link);
5092 bpf_link_put_direct(link);
4805 return ret;
4806}
4807
4808static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
4809{
4810 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
4811}
4812

--- 53 unchanged lines hidden (view full) ---

4866 return -EPERM;
4867
4868 link = bpf_link_by_id(id);
4869 if (IS_ERR(link))
4870 return PTR_ERR(link);
4871
4872 fd = bpf_link_new_fd(link);
4873 if (fd < 0)
5093 return ret;
5094}
5095
5096static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
5097{
5098 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
5099}
5100

--- 53 unchanged lines hidden (view full) ---

5154 return -EPERM;
5155
5156 link = bpf_link_by_id(id);
5157 if (IS_ERR(link))
5158 return PTR_ERR(link);
5159
5160 fd = bpf_link_new_fd(link);
5161 if (fd < 0)
4874 bpf_link_put(link);
5162 bpf_link_put_direct(link);
4875
4876 return fd;
4877}
4878
4879DEFINE_MUTEX(bpf_stats_enabled_mutex);
4880
4881static int bpf_stats_release(struct inode *inode, struct file *file)
4882{

--- 60 unchanged lines hidden (view full) ---

4943 if (attr->iter_create.flags)
4944 return -EINVAL;
4945
4946 link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4947 if (IS_ERR(link))
4948 return PTR_ERR(link);
4949
4950 err = bpf_iter_new_fd(link);
5163
5164 return fd;
5165}
5166
5167DEFINE_MUTEX(bpf_stats_enabled_mutex);
5168
5169static int bpf_stats_release(struct inode *inode, struct file *file)
5170{

--- 60 unchanged lines hidden (view full) ---

5231 if (attr->iter_create.flags)
5232 return -EINVAL;
5233
5234 link = bpf_link_get_from_fd(attr->iter_create.link_fd);
5235 if (IS_ERR(link))
5236 return PTR_ERR(link);
5237
5238 err = bpf_iter_new_fd(link);
4951 bpf_link_put(link);
5239 bpf_link_put_direct(link);
4952
4953 return err;
4954}
4955
4956#define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
4957
4958static int bpf_prog_bind_map(union bpf_attr *attr)
4959{

--- 53 unchanged lines hidden (view full) ---

5013out_prog_put:
5014 bpf_prog_put(prog);
5015 return ret;
5016}
5017
5018static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
5019{
5020 union bpf_attr attr;
5240
5241 return err;
5242}
5243
5244#define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
5245
5246static int bpf_prog_bind_map(union bpf_attr *attr)
5247{

--- 53 unchanged lines hidden (view full) ---

5301out_prog_put:
5302 bpf_prog_put(prog);
5303 return ret;
5304}
5305
5306static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
5307{
5308 union bpf_attr attr;
5021 bool capable;
5022 int err;
5023
5309 int err;
5310
5024 capable = bpf_capable() || !sysctl_unprivileged_bpf_disabled;
5025
5026 /* Intent here is for unprivileged_bpf_disabled to block key object
5027 * creation commands for unprivileged users; other actions depend
5028 * of fd availability and access to bpffs, so are dependent on
5029 * object creation success. Capabilities are later verified for
5030 * operations such as load and map create, so even with unprivileged
5031 * BPF disabled, capability checks are still carried out for these
5032 * and other operations.
5033 */
5034 if (!capable &&
5035 (cmd == BPF_MAP_CREATE || cmd == BPF_PROG_LOAD))
5036 return -EPERM;
5037
5038 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
5039 if (err)
5040 return err;
5041 size = min_t(u32, size, sizeof(attr));
5042
5043 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
5044 memset(&attr, 0, sizeof(attr));
5045 if (copy_from_bpfptr(&attr, uattr, size) != 0)

--- 343 unchanged lines hidden (view full) ---

5389 tmp.data = &unpriv_enable;
5390 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5391 if (write && !ret) {
5392 if (locked_state && unpriv_enable != 1)
5393 return -EPERM;
5394 *(int *)table->data = unpriv_enable;
5395 }
5396
5311 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
5312 if (err)
5313 return err;
5314 size = min_t(u32, size, sizeof(attr));
5315
5316 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
5317 memset(&attr, 0, sizeof(attr));
5318 if (copy_from_bpfptr(&attr, uattr, size) != 0)

--- 343 unchanged lines hidden (view full) ---

5662 tmp.data = &unpriv_enable;
5663 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5664 if (write && !ret) {
5665 if (locked_state && unpriv_enable != 1)
5666 return -EPERM;
5667 *(int *)table->data = unpriv_enable;
5668 }
5669
5397 unpriv_ebpf_notify(unpriv_enable);
5670 if (write)
5671 unpriv_ebpf_notify(unpriv_enable);
5398
5399 return ret;
5400}
5401
5402static struct ctl_table bpf_syscall_table[] = {
5403 {
5404 .procname = "unprivileged_bpf_disabled",
5405 .data = &sysctl_unprivileged_bpf_disabled,

--- 22 unchanged lines hidden ---
5672
5673 return ret;
5674}
5675
5676static struct ctl_table bpf_syscall_table[] = {
5677 {
5678 .procname = "unprivileged_bpf_disabled",
5679 .data = &sysctl_unprivileged_bpf_disabled,

--- 22 unchanged lines hidden ---