xref: /linux/kernel/bpf/syscall.c (revision 509db2833e0ddac7faf6e7d2dd6e7f85c98fbee0)
199c55f7dSAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
299c55f7dSAlexei Starovoitov  *
399c55f7dSAlexei Starovoitov  * This program is free software; you can redistribute it and/or
499c55f7dSAlexei Starovoitov  * modify it under the terms of version 2 of the GNU General Public
599c55f7dSAlexei Starovoitov  * License as published by the Free Software Foundation.
699c55f7dSAlexei Starovoitov  *
799c55f7dSAlexei Starovoitov  * This program is distributed in the hope that it will be useful, but
899c55f7dSAlexei Starovoitov  * WITHOUT ANY WARRANTY; without even the implied warranty of
999c55f7dSAlexei Starovoitov  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1099c55f7dSAlexei Starovoitov  * General Public License for more details.
1199c55f7dSAlexei Starovoitov  */
1299c55f7dSAlexei Starovoitov #include <linux/bpf.h>
13a67edbf4SDaniel Borkmann #include <linux/bpf_trace.h>
14f4364dcfSSean Young #include <linux/bpf_lirc.h>
15f56a653cSMartin KaFai Lau #include <linux/btf.h>
1699c55f7dSAlexei Starovoitov #include <linux/syscalls.h>
1799c55f7dSAlexei Starovoitov #include <linux/slab.h>
183f07c014SIngo Molnar #include <linux/sched/signal.h>
19d407bd25SDaniel Borkmann #include <linux/vmalloc.h>
20d407bd25SDaniel Borkmann #include <linux/mmzone.h>
2199c55f7dSAlexei Starovoitov #include <linux/anon_inodes.h>
2241bdc4b4SYonghong Song #include <linux/fdtable.h>
23db20fd2bSAlexei Starovoitov #include <linux/file.h>
2441bdc4b4SYonghong Song #include <linux/fs.h>
2509756af4SAlexei Starovoitov #include <linux/license.h>
2609756af4SAlexei Starovoitov #include <linux/filter.h>
272541517cSAlexei Starovoitov #include <linux/version.h>
28535e7b4bSMickaël Salaün #include <linux/kernel.h>
29dc4bb0e2SMartin KaFai Lau #include <linux/idr.h>
30cb4d2b3fSMartin KaFai Lau #include <linux/cred.h>
31cb4d2b3fSMartin KaFai Lau #include <linux/timekeeping.h>
32cb4d2b3fSMartin KaFai Lau #include <linux/ctype.h>
339ef09e35SMark Rutland #include <linux/nospec.h>
3499c55f7dSAlexei Starovoitov 
3514dc6f04SMartin KaFai Lau #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
3614dc6f04SMartin KaFai Lau 			   (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
3714dc6f04SMartin KaFai Lau 			   (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
3814dc6f04SMartin KaFai Lau 			   (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
3914dc6f04SMartin KaFai Lau #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
4014dc6f04SMartin KaFai Lau #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
4114dc6f04SMartin KaFai Lau 
426e71b04aSChenbo Feng #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
436e71b04aSChenbo Feng 
44b121d1e7SAlexei Starovoitov DEFINE_PER_CPU(int, bpf_prog_active);
45dc4bb0e2SMartin KaFai Lau static DEFINE_IDR(prog_idr);
46dc4bb0e2SMartin KaFai Lau static DEFINE_SPINLOCK(prog_idr_lock);
47f3f1c054SMartin KaFai Lau static DEFINE_IDR(map_idr);
48f3f1c054SMartin KaFai Lau static DEFINE_SPINLOCK(map_idr_lock);
49b121d1e7SAlexei Starovoitov 
501be7f75dSAlexei Starovoitov int sysctl_unprivileged_bpf_disabled __read_mostly;
511be7f75dSAlexei Starovoitov 
5240077e0cSJohannes Berg static const struct bpf_map_ops * const bpf_map_types[] = {
5340077e0cSJohannes Berg #define BPF_PROG_TYPE(_id, _ops)
5440077e0cSJohannes Berg #define BPF_MAP_TYPE(_id, _ops) \
5540077e0cSJohannes Berg 	[_id] = &_ops,
5640077e0cSJohannes Berg #include <linux/bpf_types.h>
5740077e0cSJohannes Berg #undef BPF_PROG_TYPE
5840077e0cSJohannes Berg #undef BPF_MAP_TYPE
5940077e0cSJohannes Berg };
6099c55f7dSAlexei Starovoitov 
61752ba56fSMickaël Salaün /*
62752ba56fSMickaël Salaün  * If we're handed a bigger struct than we know of, ensure all the unknown bits
63752ba56fSMickaël Salaün  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
64752ba56fSMickaël Salaün  * we don't know about yet.
65752ba56fSMickaël Salaün  *
66752ba56fSMickaël Salaün  * There is a ToCToU between this function call and the following
67752ba56fSMickaël Salaün  * copy_from_user() call. However, this is not a concern since this function is
68752ba56fSMickaël Salaün  * meant to be a future-proofing of bits.
69752ba56fSMickaël Salaün  */
70dcab51f1SMartin KaFai Lau int bpf_check_uarg_tail_zero(void __user *uaddr,
7158291a74SMickaël Salaün 			     size_t expected_size,
7258291a74SMickaël Salaün 			     size_t actual_size)
7358291a74SMickaël Salaün {
7458291a74SMickaël Salaün 	unsigned char __user *addr;
7558291a74SMickaël Salaün 	unsigned char __user *end;
7658291a74SMickaël Salaün 	unsigned char val;
7758291a74SMickaël Salaün 	int err;
7858291a74SMickaël Salaün 
79752ba56fSMickaël Salaün 	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
80752ba56fSMickaël Salaün 		return -E2BIG;
81752ba56fSMickaël Salaün 
82752ba56fSMickaël Salaün 	if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size)))
83752ba56fSMickaël Salaün 		return -EFAULT;
84752ba56fSMickaël Salaün 
8558291a74SMickaël Salaün 	if (actual_size <= expected_size)
8658291a74SMickaël Salaün 		return 0;
8758291a74SMickaël Salaün 
8858291a74SMickaël Salaün 	addr = uaddr + expected_size;
8958291a74SMickaël Salaün 	end  = uaddr + actual_size;
9058291a74SMickaël Salaün 
9158291a74SMickaël Salaün 	for (; addr < end; addr++) {
9258291a74SMickaël Salaün 		err = get_user(val, addr);
9358291a74SMickaël Salaün 		if (err)
9458291a74SMickaël Salaün 			return err;
9558291a74SMickaël Salaün 		if (val)
9658291a74SMickaël Salaün 			return -E2BIG;
9758291a74SMickaël Salaün 	}
9858291a74SMickaël Salaün 
9958291a74SMickaël Salaün 	return 0;
10058291a74SMickaël Salaün }
10158291a74SMickaël Salaün 
102a3884572SJakub Kicinski const struct bpf_map_ops bpf_map_offload_ops = {
103a3884572SJakub Kicinski 	.map_alloc = bpf_map_offload_map_alloc,
104a3884572SJakub Kicinski 	.map_free = bpf_map_offload_map_free,
105e8d2bec0SDaniel Borkmann 	.map_check_btf = map_check_no_btf,
106a3884572SJakub Kicinski };
107a3884572SJakub Kicinski 
10899c55f7dSAlexei Starovoitov static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
10999c55f7dSAlexei Starovoitov {
1101110f3a9SJakub Kicinski 	const struct bpf_map_ops *ops;
1119ef09e35SMark Rutland 	u32 type = attr->map_type;
11299c55f7dSAlexei Starovoitov 	struct bpf_map *map;
1131110f3a9SJakub Kicinski 	int err;
11499c55f7dSAlexei Starovoitov 
1159ef09e35SMark Rutland 	if (type >= ARRAY_SIZE(bpf_map_types))
1161110f3a9SJakub Kicinski 		return ERR_PTR(-EINVAL);
1179ef09e35SMark Rutland 	type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
1189ef09e35SMark Rutland 	ops = bpf_map_types[type];
1191110f3a9SJakub Kicinski 	if (!ops)
12040077e0cSJohannes Berg 		return ERR_PTR(-EINVAL);
12140077e0cSJohannes Berg 
1221110f3a9SJakub Kicinski 	if (ops->map_alloc_check) {
1231110f3a9SJakub Kicinski 		err = ops->map_alloc_check(attr);
1241110f3a9SJakub Kicinski 		if (err)
1251110f3a9SJakub Kicinski 			return ERR_PTR(err);
1261110f3a9SJakub Kicinski 	}
127a3884572SJakub Kicinski 	if (attr->map_ifindex)
128a3884572SJakub Kicinski 		ops = &bpf_map_offload_ops;
1291110f3a9SJakub Kicinski 	map = ops->map_alloc(attr);
13099c55f7dSAlexei Starovoitov 	if (IS_ERR(map))
13199c55f7dSAlexei Starovoitov 		return map;
1321110f3a9SJakub Kicinski 	map->ops = ops;
1339ef09e35SMark Rutland 	map->map_type = type;
13499c55f7dSAlexei Starovoitov 	return map;
13599c55f7dSAlexei Starovoitov }
13699c55f7dSAlexei Starovoitov 
13796eabe7aSMartin KaFai Lau void *bpf_map_area_alloc(size_t size, int numa_node)
138d407bd25SDaniel Borkmann {
139d407bd25SDaniel Borkmann 	/* We definitely need __GFP_NORETRY, so OOM killer doesn't
140d407bd25SDaniel Borkmann 	 * trigger under memory pressure as we really just want to
141d407bd25SDaniel Borkmann 	 * fail instead.
142d407bd25SDaniel Borkmann 	 */
143d407bd25SDaniel Borkmann 	const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
144d407bd25SDaniel Borkmann 	void *area;
145d407bd25SDaniel Borkmann 
146d407bd25SDaniel Borkmann 	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
14796eabe7aSMartin KaFai Lau 		area = kmalloc_node(size, GFP_USER | flags, numa_node);
148d407bd25SDaniel Borkmann 		if (area != NULL)
149d407bd25SDaniel Borkmann 			return area;
150d407bd25SDaniel Borkmann 	}
151d407bd25SDaniel Borkmann 
15296eabe7aSMartin KaFai Lau 	return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
15396eabe7aSMartin KaFai Lau 					   __builtin_return_address(0));
154d407bd25SDaniel Borkmann }
155d407bd25SDaniel Borkmann 
156d407bd25SDaniel Borkmann void bpf_map_area_free(void *area)
157d407bd25SDaniel Borkmann {
158d407bd25SDaniel Borkmann 	kvfree(area);
159d407bd25SDaniel Borkmann }
160d407bd25SDaniel Borkmann 
161bd475643SJakub Kicinski void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
162bd475643SJakub Kicinski {
163bd475643SJakub Kicinski 	map->map_type = attr->map_type;
164bd475643SJakub Kicinski 	map->key_size = attr->key_size;
165bd475643SJakub Kicinski 	map->value_size = attr->value_size;
166bd475643SJakub Kicinski 	map->max_entries = attr->max_entries;
167bd475643SJakub Kicinski 	map->map_flags = attr->map_flags;
168bd475643SJakub Kicinski 	map->numa_node = bpf_map_attr_numa_node(attr);
169bd475643SJakub Kicinski }
170bd475643SJakub Kicinski 
1716c905981SAlexei Starovoitov int bpf_map_precharge_memlock(u32 pages)
1726c905981SAlexei Starovoitov {
1736c905981SAlexei Starovoitov 	struct user_struct *user = get_current_user();
1746c905981SAlexei Starovoitov 	unsigned long memlock_limit, cur;
1756c905981SAlexei Starovoitov 
1766c905981SAlexei Starovoitov 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1776c905981SAlexei Starovoitov 	cur = atomic_long_read(&user->locked_vm);
1786c905981SAlexei Starovoitov 	free_uid(user);
1796c905981SAlexei Starovoitov 	if (cur + pages > memlock_limit)
1806c905981SAlexei Starovoitov 		return -EPERM;
1816c905981SAlexei Starovoitov 	return 0;
1826c905981SAlexei Starovoitov }
1836c905981SAlexei Starovoitov 
1840a4c58f5SRoman Gushchin static int bpf_charge_memlock(struct user_struct *user, u32 pages)
185aaac3ba9SAlexei Starovoitov {
1860a4c58f5SRoman Gushchin 	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
187aaac3ba9SAlexei Starovoitov 
1880a4c58f5SRoman Gushchin 	if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
1890a4c58f5SRoman Gushchin 		atomic_long_sub(pages, &user->locked_vm);
190aaac3ba9SAlexei Starovoitov 		return -EPERM;
191aaac3ba9SAlexei Starovoitov 	}
192aaac3ba9SAlexei Starovoitov 	return 0;
193aaac3ba9SAlexei Starovoitov }
194aaac3ba9SAlexei Starovoitov 
1950a4c58f5SRoman Gushchin static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
1960a4c58f5SRoman Gushchin {
1970a4c58f5SRoman Gushchin 	atomic_long_sub(pages, &user->locked_vm);
1980a4c58f5SRoman Gushchin }
1990a4c58f5SRoman Gushchin 
2000a4c58f5SRoman Gushchin static int bpf_map_init_memlock(struct bpf_map *map)
2010a4c58f5SRoman Gushchin {
2020a4c58f5SRoman Gushchin 	struct user_struct *user = get_current_user();
2030a4c58f5SRoman Gushchin 	int ret;
2040a4c58f5SRoman Gushchin 
2050a4c58f5SRoman Gushchin 	ret = bpf_charge_memlock(user, map->pages);
2060a4c58f5SRoman Gushchin 	if (ret) {
2070a4c58f5SRoman Gushchin 		free_uid(user);
2080a4c58f5SRoman Gushchin 		return ret;
2090a4c58f5SRoman Gushchin 	}
2100a4c58f5SRoman Gushchin 	map->user = user;
2110a4c58f5SRoman Gushchin 	return ret;
2120a4c58f5SRoman Gushchin }
2130a4c58f5SRoman Gushchin 
2140a4c58f5SRoman Gushchin static void bpf_map_release_memlock(struct bpf_map *map)
215aaac3ba9SAlexei Starovoitov {
216aaac3ba9SAlexei Starovoitov 	struct user_struct *user = map->user;
2170a4c58f5SRoman Gushchin 	bpf_uncharge_memlock(user, map->pages);
218aaac3ba9SAlexei Starovoitov 	free_uid(user);
219aaac3ba9SAlexei Starovoitov }
220aaac3ba9SAlexei Starovoitov 
2210a4c58f5SRoman Gushchin int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
2220a4c58f5SRoman Gushchin {
2230a4c58f5SRoman Gushchin 	int ret;
2240a4c58f5SRoman Gushchin 
2250a4c58f5SRoman Gushchin 	ret = bpf_charge_memlock(map->user, pages);
2260a4c58f5SRoman Gushchin 	if (ret)
2270a4c58f5SRoman Gushchin 		return ret;
2280a4c58f5SRoman Gushchin 	map->pages += pages;
2290a4c58f5SRoman Gushchin 	return ret;
2300a4c58f5SRoman Gushchin }
2310a4c58f5SRoman Gushchin 
2320a4c58f5SRoman Gushchin void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
2330a4c58f5SRoman Gushchin {
2340a4c58f5SRoman Gushchin 	bpf_uncharge_memlock(map->user, pages);
2350a4c58f5SRoman Gushchin 	map->pages -= pages;
2360a4c58f5SRoman Gushchin }
2370a4c58f5SRoman Gushchin 
238f3f1c054SMartin KaFai Lau static int bpf_map_alloc_id(struct bpf_map *map)
239f3f1c054SMartin KaFai Lau {
240f3f1c054SMartin KaFai Lau 	int id;
241f3f1c054SMartin KaFai Lau 
242b76354cdSShaohua Li 	idr_preload(GFP_KERNEL);
243f3f1c054SMartin KaFai Lau 	spin_lock_bh(&map_idr_lock);
244f3f1c054SMartin KaFai Lau 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
245f3f1c054SMartin KaFai Lau 	if (id > 0)
246f3f1c054SMartin KaFai Lau 		map->id = id;
247f3f1c054SMartin KaFai Lau 	spin_unlock_bh(&map_idr_lock);
248b76354cdSShaohua Li 	idr_preload_end();
249f3f1c054SMartin KaFai Lau 
250f3f1c054SMartin KaFai Lau 	if (WARN_ON_ONCE(!id))
251f3f1c054SMartin KaFai Lau 		return -ENOSPC;
252f3f1c054SMartin KaFai Lau 
253f3f1c054SMartin KaFai Lau 	return id > 0 ? 0 : id;
254f3f1c054SMartin KaFai Lau }
255f3f1c054SMartin KaFai Lau 
256a3884572SJakub Kicinski void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
257f3f1c054SMartin KaFai Lau {
258930651a7SEric Dumazet 	unsigned long flags;
259930651a7SEric Dumazet 
260a3884572SJakub Kicinski 	/* Offloaded maps are removed from the IDR store when their device
261a3884572SJakub Kicinski 	 * disappears - even if someone holds an fd to them they are unusable,
262a3884572SJakub Kicinski 	 * the memory is gone, all ops will fail; they are simply waiting for
263a3884572SJakub Kicinski 	 * refcnt to drop to be freed.
264a3884572SJakub Kicinski 	 */
265a3884572SJakub Kicinski 	if (!map->id)
266a3884572SJakub Kicinski 		return;
267a3884572SJakub Kicinski 
268bd5f5f4eSMartin KaFai Lau 	if (do_idr_lock)
269930651a7SEric Dumazet 		spin_lock_irqsave(&map_idr_lock, flags);
270bd5f5f4eSMartin KaFai Lau 	else
271bd5f5f4eSMartin KaFai Lau 		__acquire(&map_idr_lock);
272bd5f5f4eSMartin KaFai Lau 
273f3f1c054SMartin KaFai Lau 	idr_remove(&map_idr, map->id);
274a3884572SJakub Kicinski 	map->id = 0;
275bd5f5f4eSMartin KaFai Lau 
276bd5f5f4eSMartin KaFai Lau 	if (do_idr_lock)
277930651a7SEric Dumazet 		spin_unlock_irqrestore(&map_idr_lock, flags);
278bd5f5f4eSMartin KaFai Lau 	else
279bd5f5f4eSMartin KaFai Lau 		__release(&map_idr_lock);
280f3f1c054SMartin KaFai Lau }
281f3f1c054SMartin KaFai Lau 
28299c55f7dSAlexei Starovoitov /* called from workqueue */
28399c55f7dSAlexei Starovoitov static void bpf_map_free_deferred(struct work_struct *work)
28499c55f7dSAlexei Starovoitov {
28599c55f7dSAlexei Starovoitov 	struct bpf_map *map = container_of(work, struct bpf_map, work);
28699c55f7dSAlexei Starovoitov 
2870a4c58f5SRoman Gushchin 	bpf_map_release_memlock(map);
288afdb09c7SChenbo Feng 	security_bpf_map_free(map);
28999c55f7dSAlexei Starovoitov 	/* implementation dependent freeing */
29099c55f7dSAlexei Starovoitov 	map->ops->map_free(map);
29199c55f7dSAlexei Starovoitov }
29299c55f7dSAlexei Starovoitov 
293c9da161cSDaniel Borkmann static void bpf_map_put_uref(struct bpf_map *map)
294c9da161cSDaniel Borkmann {
295c9da161cSDaniel Borkmann 	if (atomic_dec_and_test(&map->usercnt)) {
296ba6b8de4SJohn Fastabend 		if (map->ops->map_release_uref)
297ba6b8de4SJohn Fastabend 			map->ops->map_release_uref(map);
298c9da161cSDaniel Borkmann 	}
299c9da161cSDaniel Borkmann }
300c9da161cSDaniel Borkmann 
30199c55f7dSAlexei Starovoitov /* decrement map refcnt and schedule it for freeing via workqueue
30299c55f7dSAlexei Starovoitov  * (unrelying map implementation ops->map_free() might sleep)
30399c55f7dSAlexei Starovoitov  */
304bd5f5f4eSMartin KaFai Lau static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
30599c55f7dSAlexei Starovoitov {
30699c55f7dSAlexei Starovoitov 	if (atomic_dec_and_test(&map->refcnt)) {
30734ad5580SMartin KaFai Lau 		/* bpf_map_free_id() must be called first */
308bd5f5f4eSMartin KaFai Lau 		bpf_map_free_id(map, do_idr_lock);
30978958fcaSMartin KaFai Lau 		btf_put(map->btf);
31099c55f7dSAlexei Starovoitov 		INIT_WORK(&map->work, bpf_map_free_deferred);
31199c55f7dSAlexei Starovoitov 		schedule_work(&map->work);
31299c55f7dSAlexei Starovoitov 	}
31399c55f7dSAlexei Starovoitov }
31499c55f7dSAlexei Starovoitov 
315bd5f5f4eSMartin KaFai Lau void bpf_map_put(struct bpf_map *map)
316bd5f5f4eSMartin KaFai Lau {
317bd5f5f4eSMartin KaFai Lau 	__bpf_map_put(map, true);
318bd5f5f4eSMartin KaFai Lau }
319630a4d38SJakub Kicinski EXPORT_SYMBOL_GPL(bpf_map_put);
320bd5f5f4eSMartin KaFai Lau 
321c9da161cSDaniel Borkmann void bpf_map_put_with_uref(struct bpf_map *map)
322c9da161cSDaniel Borkmann {
323c9da161cSDaniel Borkmann 	bpf_map_put_uref(map);
324c9da161cSDaniel Borkmann 	bpf_map_put(map);
325c9da161cSDaniel Borkmann }
326c9da161cSDaniel Borkmann 
32799c55f7dSAlexei Starovoitov static int bpf_map_release(struct inode *inode, struct file *filp)
32899c55f7dSAlexei Starovoitov {
32961d1b6a4SDaniel Borkmann 	struct bpf_map *map = filp->private_data;
33061d1b6a4SDaniel Borkmann 
33161d1b6a4SDaniel Borkmann 	if (map->ops->map_release)
33261d1b6a4SDaniel Borkmann 		map->ops->map_release(map, filp);
33361d1b6a4SDaniel Borkmann 
33461d1b6a4SDaniel Borkmann 	bpf_map_put_with_uref(map);
33599c55f7dSAlexei Starovoitov 	return 0;
33699c55f7dSAlexei Starovoitov }
33799c55f7dSAlexei Starovoitov 
338f99bf205SDaniel Borkmann #ifdef CONFIG_PROC_FS
339f99bf205SDaniel Borkmann static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
340f99bf205SDaniel Borkmann {
341f99bf205SDaniel Borkmann 	const struct bpf_map *map = filp->private_data;
34221116b70SDaniel Borkmann 	const struct bpf_array *array;
34321116b70SDaniel Borkmann 	u32 owner_prog_type = 0;
3449780c0abSDaniel Borkmann 	u32 owner_jited = 0;
34521116b70SDaniel Borkmann 
34621116b70SDaniel Borkmann 	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
34721116b70SDaniel Borkmann 		array = container_of(map, struct bpf_array, map);
34821116b70SDaniel Borkmann 		owner_prog_type = array->owner_prog_type;
3499780c0abSDaniel Borkmann 		owner_jited = array->owner_jited;
35021116b70SDaniel Borkmann 	}
351f99bf205SDaniel Borkmann 
352f99bf205SDaniel Borkmann 	seq_printf(m,
353f99bf205SDaniel Borkmann 		   "map_type:\t%u\n"
354f99bf205SDaniel Borkmann 		   "key_size:\t%u\n"
355f99bf205SDaniel Borkmann 		   "value_size:\t%u\n"
356322cea2fSDaniel Borkmann 		   "max_entries:\t%u\n"
35721116b70SDaniel Borkmann 		   "map_flags:\t%#x\n"
3584316b409SDaniel Borkmann 		   "memlock:\t%llu\n"
3594316b409SDaniel Borkmann 		   "map_id:\t%u\n",
360f99bf205SDaniel Borkmann 		   map->map_type,
361f99bf205SDaniel Borkmann 		   map->key_size,
362f99bf205SDaniel Borkmann 		   map->value_size,
363322cea2fSDaniel Borkmann 		   map->max_entries,
36421116b70SDaniel Borkmann 		   map->map_flags,
3654316b409SDaniel Borkmann 		   map->pages * 1ULL << PAGE_SHIFT,
3664316b409SDaniel Borkmann 		   map->id);
36721116b70SDaniel Borkmann 
3689780c0abSDaniel Borkmann 	if (owner_prog_type) {
36921116b70SDaniel Borkmann 		seq_printf(m, "owner_prog_type:\t%u\n",
37021116b70SDaniel Borkmann 			   owner_prog_type);
3719780c0abSDaniel Borkmann 		seq_printf(m, "owner_jited:\t%u\n",
3729780c0abSDaniel Borkmann 			   owner_jited);
3739780c0abSDaniel Borkmann 	}
374f99bf205SDaniel Borkmann }
375f99bf205SDaniel Borkmann #endif
376f99bf205SDaniel Borkmann 
3776e71b04aSChenbo Feng static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
3786e71b04aSChenbo Feng 			      loff_t *ppos)
3796e71b04aSChenbo Feng {
3806e71b04aSChenbo Feng 	/* We need this handler such that alloc_file() enables
3816e71b04aSChenbo Feng 	 * f_mode with FMODE_CAN_READ.
3826e71b04aSChenbo Feng 	 */
3836e71b04aSChenbo Feng 	return -EINVAL;
3846e71b04aSChenbo Feng }
3856e71b04aSChenbo Feng 
3866e71b04aSChenbo Feng static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
3876e71b04aSChenbo Feng 			       size_t siz, loff_t *ppos)
3886e71b04aSChenbo Feng {
3896e71b04aSChenbo Feng 	/* We need this handler such that alloc_file() enables
3906e71b04aSChenbo Feng 	 * f_mode with FMODE_CAN_WRITE.
3916e71b04aSChenbo Feng 	 */
3926e71b04aSChenbo Feng 	return -EINVAL;
3936e71b04aSChenbo Feng }
3946e71b04aSChenbo Feng 
395f66e448cSChenbo Feng const struct file_operations bpf_map_fops = {
396f99bf205SDaniel Borkmann #ifdef CONFIG_PROC_FS
397f99bf205SDaniel Borkmann 	.show_fdinfo	= bpf_map_show_fdinfo,
398f99bf205SDaniel Borkmann #endif
39999c55f7dSAlexei Starovoitov 	.release	= bpf_map_release,
4006e71b04aSChenbo Feng 	.read		= bpf_dummy_read,
4016e71b04aSChenbo Feng 	.write		= bpf_dummy_write,
40299c55f7dSAlexei Starovoitov };
40399c55f7dSAlexei Starovoitov 
4046e71b04aSChenbo Feng int bpf_map_new_fd(struct bpf_map *map, int flags)
405aa79781bSDaniel Borkmann {
406afdb09c7SChenbo Feng 	int ret;
407afdb09c7SChenbo Feng 
408afdb09c7SChenbo Feng 	ret = security_bpf_map(map, OPEN_FMODE(flags));
409afdb09c7SChenbo Feng 	if (ret < 0)
410afdb09c7SChenbo Feng 		return ret;
411afdb09c7SChenbo Feng 
412aa79781bSDaniel Borkmann 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
4136e71b04aSChenbo Feng 				flags | O_CLOEXEC);
4146e71b04aSChenbo Feng }
4156e71b04aSChenbo Feng 
4166e71b04aSChenbo Feng int bpf_get_file_flag(int flags)
4176e71b04aSChenbo Feng {
4186e71b04aSChenbo Feng 	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
4196e71b04aSChenbo Feng 		return -EINVAL;
4206e71b04aSChenbo Feng 	if (flags & BPF_F_RDONLY)
4216e71b04aSChenbo Feng 		return O_RDONLY;
4226e71b04aSChenbo Feng 	if (flags & BPF_F_WRONLY)
4236e71b04aSChenbo Feng 		return O_WRONLY;
4246e71b04aSChenbo Feng 	return O_RDWR;
425aa79781bSDaniel Borkmann }
426aa79781bSDaniel Borkmann 
42799c55f7dSAlexei Starovoitov /* helper macro to check that unused fields 'union bpf_attr' are zero */
42899c55f7dSAlexei Starovoitov #define CHECK_ATTR(CMD) \
42999c55f7dSAlexei Starovoitov 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
43099c55f7dSAlexei Starovoitov 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
43199c55f7dSAlexei Starovoitov 		   sizeof(*attr) - \
43299c55f7dSAlexei Starovoitov 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
43399c55f7dSAlexei Starovoitov 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
43499c55f7dSAlexei Starovoitov 
435cb4d2b3fSMartin KaFai Lau /* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes.
436cb4d2b3fSMartin KaFai Lau  * Return 0 on success and < 0 on error.
437cb4d2b3fSMartin KaFai Lau  */
438cb4d2b3fSMartin KaFai Lau static int bpf_obj_name_cpy(char *dst, const char *src)
439cb4d2b3fSMartin KaFai Lau {
440cb4d2b3fSMartin KaFai Lau 	const char *end = src + BPF_OBJ_NAME_LEN;
441cb4d2b3fSMartin KaFai Lau 
442473d9734SMartin KaFai Lau 	memset(dst, 0, BPF_OBJ_NAME_LEN);
443473d9734SMartin KaFai Lau 
444cb4d2b3fSMartin KaFai Lau 	/* Copy all isalnum() and '_' char */
445cb4d2b3fSMartin KaFai Lau 	while (src < end && *src) {
446cb4d2b3fSMartin KaFai Lau 		if (!isalnum(*src) && *src != '_')
447cb4d2b3fSMartin KaFai Lau 			return -EINVAL;
448cb4d2b3fSMartin KaFai Lau 		*dst++ = *src++;
449cb4d2b3fSMartin KaFai Lau 	}
450cb4d2b3fSMartin KaFai Lau 
451cb4d2b3fSMartin KaFai Lau 	/* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */
452cb4d2b3fSMartin KaFai Lau 	if (src == end)
453cb4d2b3fSMartin KaFai Lau 		return -EINVAL;
454cb4d2b3fSMartin KaFai Lau 
455cb4d2b3fSMartin KaFai Lau 	return 0;
456cb4d2b3fSMartin KaFai Lau }
457cb4d2b3fSMartin KaFai Lau 
458e8d2bec0SDaniel Borkmann int map_check_no_btf(const struct bpf_map *map,
459e8d2bec0SDaniel Borkmann 		     const struct btf_type *key_type,
460e8d2bec0SDaniel Borkmann 		     const struct btf_type *value_type)
461e8d2bec0SDaniel Borkmann {
462e8d2bec0SDaniel Borkmann 	return -ENOTSUPP;
463e8d2bec0SDaniel Borkmann }
464e8d2bec0SDaniel Borkmann 
465e8d2bec0SDaniel Borkmann static int map_check_btf(const struct bpf_map *map, const struct btf *btf,
466e8d2bec0SDaniel Borkmann 			 u32 btf_key_id, u32 btf_value_id)
467e8d2bec0SDaniel Borkmann {
468e8d2bec0SDaniel Borkmann 	const struct btf_type *key_type, *value_type;
469e8d2bec0SDaniel Borkmann 	u32 key_size, value_size;
470e8d2bec0SDaniel Borkmann 	int ret = 0;
471e8d2bec0SDaniel Borkmann 
472e8d2bec0SDaniel Borkmann 	key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
473e8d2bec0SDaniel Borkmann 	if (!key_type || key_size != map->key_size)
474e8d2bec0SDaniel Borkmann 		return -EINVAL;
475e8d2bec0SDaniel Borkmann 
476e8d2bec0SDaniel Borkmann 	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
477e8d2bec0SDaniel Borkmann 	if (!value_type || value_size != map->value_size)
478e8d2bec0SDaniel Borkmann 		return -EINVAL;
479e8d2bec0SDaniel Borkmann 
480e8d2bec0SDaniel Borkmann 	if (map->ops->map_check_btf)
481e8d2bec0SDaniel Borkmann 		ret = map->ops->map_check_btf(map, key_type, value_type);
482e8d2bec0SDaniel Borkmann 
483e8d2bec0SDaniel Borkmann 	return ret;
484e8d2bec0SDaniel Borkmann }
485e8d2bec0SDaniel Borkmann 
4869b2cf328SMartin KaFai Lau #define BPF_MAP_CREATE_LAST_FIELD btf_value_type_id
48799c55f7dSAlexei Starovoitov /* called via syscall */
48899c55f7dSAlexei Starovoitov static int map_create(union bpf_attr *attr)
48999c55f7dSAlexei Starovoitov {
49096eabe7aSMartin KaFai Lau 	int numa_node = bpf_map_attr_numa_node(attr);
49199c55f7dSAlexei Starovoitov 	struct bpf_map *map;
4926e71b04aSChenbo Feng 	int f_flags;
49399c55f7dSAlexei Starovoitov 	int err;
49499c55f7dSAlexei Starovoitov 
49599c55f7dSAlexei Starovoitov 	err = CHECK_ATTR(BPF_MAP_CREATE);
49699c55f7dSAlexei Starovoitov 	if (err)
49799c55f7dSAlexei Starovoitov 		return -EINVAL;
49899c55f7dSAlexei Starovoitov 
4996e71b04aSChenbo Feng 	f_flags = bpf_get_file_flag(attr->map_flags);
5006e71b04aSChenbo Feng 	if (f_flags < 0)
5016e71b04aSChenbo Feng 		return f_flags;
5026e71b04aSChenbo Feng 
50396eabe7aSMartin KaFai Lau 	if (numa_node != NUMA_NO_NODE &&
50496e5ae4eSEric Dumazet 	    ((unsigned int)numa_node >= nr_node_ids ||
50596e5ae4eSEric Dumazet 	     !node_online(numa_node)))
50696eabe7aSMartin KaFai Lau 		return -EINVAL;
50796eabe7aSMartin KaFai Lau 
50899c55f7dSAlexei Starovoitov 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
50999c55f7dSAlexei Starovoitov 	map = find_and_alloc_map(attr);
51099c55f7dSAlexei Starovoitov 	if (IS_ERR(map))
51199c55f7dSAlexei Starovoitov 		return PTR_ERR(map);
51299c55f7dSAlexei Starovoitov 
513ad5b177bSMartin KaFai Lau 	err = bpf_obj_name_cpy(map->name, attr->map_name);
514ad5b177bSMartin KaFai Lau 	if (err)
515ad5b177bSMartin KaFai Lau 		goto free_map_nouncharge;
516ad5b177bSMartin KaFai Lau 
51799c55f7dSAlexei Starovoitov 	atomic_set(&map->refcnt, 1);
518c9da161cSDaniel Borkmann 	atomic_set(&map->usercnt, 1);
51999c55f7dSAlexei Starovoitov 
520e8d2bec0SDaniel Borkmann 	if (attr->btf_key_type_id || attr->btf_value_type_id) {
521a26ca7c9SMartin KaFai Lau 		struct btf *btf;
522a26ca7c9SMartin KaFai Lau 
5239b2cf328SMartin KaFai Lau 		if (!attr->btf_key_type_id || !attr->btf_value_type_id) {
524a26ca7c9SMartin KaFai Lau 			err = -EINVAL;
525a26ca7c9SMartin KaFai Lau 			goto free_map_nouncharge;
526a26ca7c9SMartin KaFai Lau 		}
527a26ca7c9SMartin KaFai Lau 
528a26ca7c9SMartin KaFai Lau 		btf = btf_get_by_fd(attr->btf_fd);
529a26ca7c9SMartin KaFai Lau 		if (IS_ERR(btf)) {
530a26ca7c9SMartin KaFai Lau 			err = PTR_ERR(btf);
531a26ca7c9SMartin KaFai Lau 			goto free_map_nouncharge;
532a26ca7c9SMartin KaFai Lau 		}
533a26ca7c9SMartin KaFai Lau 
534e8d2bec0SDaniel Borkmann 		err = map_check_btf(map, btf, attr->btf_key_type_id,
5359b2cf328SMartin KaFai Lau 				    attr->btf_value_type_id);
536a26ca7c9SMartin KaFai Lau 		if (err) {
537a26ca7c9SMartin KaFai Lau 			btf_put(btf);
538a26ca7c9SMartin KaFai Lau 			goto free_map_nouncharge;
539a26ca7c9SMartin KaFai Lau 		}
540a26ca7c9SMartin KaFai Lau 
541a26ca7c9SMartin KaFai Lau 		map->btf = btf;
5429b2cf328SMartin KaFai Lau 		map->btf_key_type_id = attr->btf_key_type_id;
5439b2cf328SMartin KaFai Lau 		map->btf_value_type_id = attr->btf_value_type_id;
544a26ca7c9SMartin KaFai Lau 	}
545a26ca7c9SMartin KaFai Lau 
546afdb09c7SChenbo Feng 	err = security_bpf_map_alloc(map);
547aaac3ba9SAlexei Starovoitov 	if (err)
54820b2b24fSDaniel Borkmann 		goto free_map_nouncharge;
549aaac3ba9SAlexei Starovoitov 
5500a4c58f5SRoman Gushchin 	err = bpf_map_init_memlock(map);
551afdb09c7SChenbo Feng 	if (err)
552afdb09c7SChenbo Feng 		goto free_map_sec;
553afdb09c7SChenbo Feng 
554f3f1c054SMartin KaFai Lau 	err = bpf_map_alloc_id(map);
555f3f1c054SMartin KaFai Lau 	if (err)
556f3f1c054SMartin KaFai Lau 		goto free_map;
557f3f1c054SMartin KaFai Lau 
5586e71b04aSChenbo Feng 	err = bpf_map_new_fd(map, f_flags);
559bd5f5f4eSMartin KaFai Lau 	if (err < 0) {
560bd5f5f4eSMartin KaFai Lau 		/* failed to allocate fd.
561bd5f5f4eSMartin KaFai Lau 		 * bpf_map_put() is needed because the above
562bd5f5f4eSMartin KaFai Lau 		 * bpf_map_alloc_id() has published the map
563bd5f5f4eSMartin KaFai Lau 		 * to the userspace and the userspace may
564bd5f5f4eSMartin KaFai Lau 		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
565bd5f5f4eSMartin KaFai Lau 		 */
566bd5f5f4eSMartin KaFai Lau 		bpf_map_put(map);
567bd5f5f4eSMartin KaFai Lau 		return err;
568bd5f5f4eSMartin KaFai Lau 	}
56999c55f7dSAlexei Starovoitov 
57099c55f7dSAlexei Starovoitov 	return err;
57199c55f7dSAlexei Starovoitov 
57299c55f7dSAlexei Starovoitov free_map:
5730a4c58f5SRoman Gushchin 	bpf_map_release_memlock(map);
574afdb09c7SChenbo Feng free_map_sec:
575afdb09c7SChenbo Feng 	security_bpf_map_free(map);
57620b2b24fSDaniel Borkmann free_map_nouncharge:
577a26ca7c9SMartin KaFai Lau 	btf_put(map->btf);
57899c55f7dSAlexei Starovoitov 	map->ops->map_free(map);
57999c55f7dSAlexei Starovoitov 	return err;
58099c55f7dSAlexei Starovoitov }
58199c55f7dSAlexei Starovoitov 
582db20fd2bSAlexei Starovoitov /* if error is returned, fd is released.
583db20fd2bSAlexei Starovoitov  * On success caller should complete fd access with matching fdput()
584db20fd2bSAlexei Starovoitov  */
585c2101297SDaniel Borkmann struct bpf_map *__bpf_map_get(struct fd f)
586db20fd2bSAlexei Starovoitov {
587db20fd2bSAlexei Starovoitov 	if (!f.file)
588db20fd2bSAlexei Starovoitov 		return ERR_PTR(-EBADF);
589db20fd2bSAlexei Starovoitov 	if (f.file->f_op != &bpf_map_fops) {
590db20fd2bSAlexei Starovoitov 		fdput(f);
591db20fd2bSAlexei Starovoitov 		return ERR_PTR(-EINVAL);
592db20fd2bSAlexei Starovoitov 	}
593db20fd2bSAlexei Starovoitov 
594c2101297SDaniel Borkmann 	return f.file->private_data;
595c2101297SDaniel Borkmann }
596c2101297SDaniel Borkmann 
59792117d84SAlexei Starovoitov /* prog's and map's refcnt limit */
59892117d84SAlexei Starovoitov #define BPF_MAX_REFCNT 32768
59992117d84SAlexei Starovoitov 
60092117d84SAlexei Starovoitov struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
601c9da161cSDaniel Borkmann {
60292117d84SAlexei Starovoitov 	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
60392117d84SAlexei Starovoitov 		atomic_dec(&map->refcnt);
60492117d84SAlexei Starovoitov 		return ERR_PTR(-EBUSY);
60592117d84SAlexei Starovoitov 	}
606c9da161cSDaniel Borkmann 	if (uref)
607c9da161cSDaniel Borkmann 		atomic_inc(&map->usercnt);
60892117d84SAlexei Starovoitov 	return map;
609c9da161cSDaniel Borkmann }
610630a4d38SJakub Kicinski EXPORT_SYMBOL_GPL(bpf_map_inc);
611c9da161cSDaniel Borkmann 
612c9da161cSDaniel Borkmann struct bpf_map *bpf_map_get_with_uref(u32 ufd)
613c2101297SDaniel Borkmann {
614c2101297SDaniel Borkmann 	struct fd f = fdget(ufd);
615c2101297SDaniel Borkmann 	struct bpf_map *map;
616c2101297SDaniel Borkmann 
617c2101297SDaniel Borkmann 	map = __bpf_map_get(f);
618c2101297SDaniel Borkmann 	if (IS_ERR(map))
619c2101297SDaniel Borkmann 		return map;
620c2101297SDaniel Borkmann 
62192117d84SAlexei Starovoitov 	map = bpf_map_inc(map, true);
622c2101297SDaniel Borkmann 	fdput(f);
623db20fd2bSAlexei Starovoitov 
624db20fd2bSAlexei Starovoitov 	return map;
625db20fd2bSAlexei Starovoitov }
626db20fd2bSAlexei Starovoitov 
627bd5f5f4eSMartin KaFai Lau /* map_idr_lock should have been held */
628bd5f5f4eSMartin KaFai Lau static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
629bd5f5f4eSMartin KaFai Lau 					    bool uref)
630bd5f5f4eSMartin KaFai Lau {
631bd5f5f4eSMartin KaFai Lau 	int refold;
632bd5f5f4eSMartin KaFai Lau 
633bfc18e38SMark Rutland 	refold = atomic_fetch_add_unless(&map->refcnt, 1, 0);
634bd5f5f4eSMartin KaFai Lau 
635bd5f5f4eSMartin KaFai Lau 	if (refold >= BPF_MAX_REFCNT) {
636bd5f5f4eSMartin KaFai Lau 		__bpf_map_put(map, false);
637bd5f5f4eSMartin KaFai Lau 		return ERR_PTR(-EBUSY);
638bd5f5f4eSMartin KaFai Lau 	}
639bd5f5f4eSMartin KaFai Lau 
640bd5f5f4eSMartin KaFai Lau 	if (!refold)
641bd5f5f4eSMartin KaFai Lau 		return ERR_PTR(-ENOENT);
642bd5f5f4eSMartin KaFai Lau 
643bd5f5f4eSMartin KaFai Lau 	if (uref)
644bd5f5f4eSMartin KaFai Lau 		atomic_inc(&map->usercnt);
645bd5f5f4eSMartin KaFai Lau 
646bd5f5f4eSMartin KaFai Lau 	return map;
647bd5f5f4eSMartin KaFai Lau }
648bd5f5f4eSMartin KaFai Lau 
649b8cdc051SAlexei Starovoitov int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
650b8cdc051SAlexei Starovoitov {
651b8cdc051SAlexei Starovoitov 	return -ENOTSUPP;
652b8cdc051SAlexei Starovoitov }
653b8cdc051SAlexei Starovoitov 
654db20fd2bSAlexei Starovoitov /* last field in 'union bpf_attr' used by this command */
655db20fd2bSAlexei Starovoitov #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
656db20fd2bSAlexei Starovoitov 
657db20fd2bSAlexei Starovoitov static int map_lookup_elem(union bpf_attr *attr)
658db20fd2bSAlexei Starovoitov {
659535e7b4bSMickaël Salaün 	void __user *ukey = u64_to_user_ptr(attr->key);
660535e7b4bSMickaël Salaün 	void __user *uvalue = u64_to_user_ptr(attr->value);
661db20fd2bSAlexei Starovoitov 	int ufd = attr->map_fd;
662db20fd2bSAlexei Starovoitov 	struct bpf_map *map;
6638ebe667cSAlexei Starovoitov 	void *key, *value, *ptr;
66415a07b33SAlexei Starovoitov 	u32 value_size;
665592867bfSDaniel Borkmann 	struct fd f;
666db20fd2bSAlexei Starovoitov 	int err;
667db20fd2bSAlexei Starovoitov 
668db20fd2bSAlexei Starovoitov 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
669db20fd2bSAlexei Starovoitov 		return -EINVAL;
670db20fd2bSAlexei Starovoitov 
671592867bfSDaniel Borkmann 	f = fdget(ufd);
672c2101297SDaniel Borkmann 	map = __bpf_map_get(f);
673db20fd2bSAlexei Starovoitov 	if (IS_ERR(map))
674db20fd2bSAlexei Starovoitov 		return PTR_ERR(map);
675db20fd2bSAlexei Starovoitov 
6766e71b04aSChenbo Feng 	if (!(f.file->f_mode & FMODE_CAN_READ)) {
6776e71b04aSChenbo Feng 		err = -EPERM;
6786e71b04aSChenbo Feng 		goto err_put;
6796e71b04aSChenbo Feng 	}
6806e71b04aSChenbo Feng 
681e4448ed8SAl Viro 	key = memdup_user(ukey, map->key_size);
682e4448ed8SAl Viro 	if (IS_ERR(key)) {
683e4448ed8SAl Viro 		err = PTR_ERR(key);
684db20fd2bSAlexei Starovoitov 		goto err_put;
685e4448ed8SAl Viro 	}
686db20fd2bSAlexei Starovoitov 
68715a07b33SAlexei Starovoitov 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
6888f844938SMartin KaFai Lau 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
689b741f163SRoman Gushchin 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
690b741f163SRoman Gushchin 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
69115a07b33SAlexei Starovoitov 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
69214dc6f04SMartin KaFai Lau 	else if (IS_FD_MAP(map))
69314dc6f04SMartin KaFai Lau 		value_size = sizeof(u32);
69415a07b33SAlexei Starovoitov 	else
69515a07b33SAlexei Starovoitov 		value_size = map->value_size;
69615a07b33SAlexei Starovoitov 
6978ebe667cSAlexei Starovoitov 	err = -ENOMEM;
69815a07b33SAlexei Starovoitov 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
699db20fd2bSAlexei Starovoitov 	if (!value)
7008ebe667cSAlexei Starovoitov 		goto free_key;
7018ebe667cSAlexei Starovoitov 
702a3884572SJakub Kicinski 	if (bpf_map_is_dev_bound(map)) {
703a3884572SJakub Kicinski 		err = bpf_map_offload_lookup_elem(map, key, value);
704a3884572SJakub Kicinski 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
7058f844938SMartin KaFai Lau 		   map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
70615a07b33SAlexei Starovoitov 		err = bpf_percpu_hash_copy(map, key, value);
70715a07b33SAlexei Starovoitov 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
70815a07b33SAlexei Starovoitov 		err = bpf_percpu_array_copy(map, key, value);
709b741f163SRoman Gushchin 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
710b741f163SRoman Gushchin 		err = bpf_percpu_cgroup_storage_copy(map, key, value);
711557c0c6eSAlexei Starovoitov 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
712557c0c6eSAlexei Starovoitov 		err = bpf_stackmap_copy(map, key, value);
71314dc6f04SMartin KaFai Lau 	} else if (IS_FD_ARRAY(map)) {
71414dc6f04SMartin KaFai Lau 		err = bpf_fd_array_map_lookup_elem(map, key, value);
71514dc6f04SMartin KaFai Lau 	} else if (IS_FD_HASH(map)) {
71614dc6f04SMartin KaFai Lau 		err = bpf_fd_htab_map_lookup_elem(map, key, value);
7175dc4c4b7SMartin KaFai Lau 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
7185dc4c4b7SMartin KaFai Lau 		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
71915a07b33SAlexei Starovoitov 	} else {
7208ebe667cSAlexei Starovoitov 		rcu_read_lock();
7218ebe667cSAlexei Starovoitov 		ptr = map->ops->map_lookup_elem(map, key);
722*509db283SPrashant Bhole 		if (IS_ERR(ptr)) {
723*509db283SPrashant Bhole 			err = PTR_ERR(ptr);
724*509db283SPrashant Bhole 		} else if (!ptr) {
725*509db283SPrashant Bhole 			err = -ENOENT;
726*509db283SPrashant Bhole 		} else {
727*509db283SPrashant Bhole 			err = 0;
72815a07b33SAlexei Starovoitov 			memcpy(value, ptr, value_size);
729*509db283SPrashant Bhole 		}
7308ebe667cSAlexei Starovoitov 		rcu_read_unlock();
73115a07b33SAlexei Starovoitov 	}
7328ebe667cSAlexei Starovoitov 
73315a07b33SAlexei Starovoitov 	if (err)
7348ebe667cSAlexei Starovoitov 		goto free_value;
735db20fd2bSAlexei Starovoitov 
736db20fd2bSAlexei Starovoitov 	err = -EFAULT;
73715a07b33SAlexei Starovoitov 	if (copy_to_user(uvalue, value, value_size) != 0)
7388ebe667cSAlexei Starovoitov 		goto free_value;
739db20fd2bSAlexei Starovoitov 
740db20fd2bSAlexei Starovoitov 	err = 0;
741db20fd2bSAlexei Starovoitov 
7428ebe667cSAlexei Starovoitov free_value:
7438ebe667cSAlexei Starovoitov 	kfree(value);
744db20fd2bSAlexei Starovoitov free_key:
745db20fd2bSAlexei Starovoitov 	kfree(key);
746db20fd2bSAlexei Starovoitov err_put:
747db20fd2bSAlexei Starovoitov 	fdput(f);
748db20fd2bSAlexei Starovoitov 	return err;
749db20fd2bSAlexei Starovoitov }
750db20fd2bSAlexei Starovoitov 
7513274f520SAlexei Starovoitov #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
752db20fd2bSAlexei Starovoitov 
753db20fd2bSAlexei Starovoitov static int map_update_elem(union bpf_attr *attr)
754db20fd2bSAlexei Starovoitov {
755535e7b4bSMickaël Salaün 	void __user *ukey = u64_to_user_ptr(attr->key);
756535e7b4bSMickaël Salaün 	void __user *uvalue = u64_to_user_ptr(attr->value);
757db20fd2bSAlexei Starovoitov 	int ufd = attr->map_fd;
758db20fd2bSAlexei Starovoitov 	struct bpf_map *map;
759db20fd2bSAlexei Starovoitov 	void *key, *value;
76015a07b33SAlexei Starovoitov 	u32 value_size;
761592867bfSDaniel Borkmann 	struct fd f;
762db20fd2bSAlexei Starovoitov 	int err;
763db20fd2bSAlexei Starovoitov 
764db20fd2bSAlexei Starovoitov 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
765db20fd2bSAlexei Starovoitov 		return -EINVAL;
766db20fd2bSAlexei Starovoitov 
767592867bfSDaniel Borkmann 	f = fdget(ufd);
768c2101297SDaniel Borkmann 	map = __bpf_map_get(f);
769db20fd2bSAlexei Starovoitov 	if (IS_ERR(map))
770db20fd2bSAlexei Starovoitov 		return PTR_ERR(map);
771db20fd2bSAlexei Starovoitov 
7726e71b04aSChenbo Feng 	if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
7736e71b04aSChenbo Feng 		err = -EPERM;
7746e71b04aSChenbo Feng 		goto err_put;
7756e71b04aSChenbo Feng 	}
7766e71b04aSChenbo Feng 
777e4448ed8SAl Viro 	key = memdup_user(ukey, map->key_size);
778e4448ed8SAl Viro 	if (IS_ERR(key)) {
779e4448ed8SAl Viro 		err = PTR_ERR(key);
780db20fd2bSAlexei Starovoitov 		goto err_put;
781e4448ed8SAl Viro 	}
782db20fd2bSAlexei Starovoitov 
78315a07b33SAlexei Starovoitov 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
7848f844938SMartin KaFai Lau 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
785b741f163SRoman Gushchin 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
786b741f163SRoman Gushchin 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
78715a07b33SAlexei Starovoitov 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
78815a07b33SAlexei Starovoitov 	else
78915a07b33SAlexei Starovoitov 		value_size = map->value_size;
79015a07b33SAlexei Starovoitov 
791db20fd2bSAlexei Starovoitov 	err = -ENOMEM;
79215a07b33SAlexei Starovoitov 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
793db20fd2bSAlexei Starovoitov 	if (!value)
794db20fd2bSAlexei Starovoitov 		goto free_key;
795db20fd2bSAlexei Starovoitov 
796db20fd2bSAlexei Starovoitov 	err = -EFAULT;
79715a07b33SAlexei Starovoitov 	if (copy_from_user(value, uvalue, value_size) != 0)
798db20fd2bSAlexei Starovoitov 		goto free_value;
799db20fd2bSAlexei Starovoitov 
8006710e112SJesper Dangaard Brouer 	/* Need to create a kthread, thus must support schedule */
801a3884572SJakub Kicinski 	if (bpf_map_is_dev_bound(map)) {
802a3884572SJakub Kicinski 		err = bpf_map_offload_update_elem(map, key, value, attr->flags);
803a3884572SJakub Kicinski 		goto out;
80499ba2b5aSJohn Fastabend 	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
80599ba2b5aSJohn Fastabend 		   map->map_type == BPF_MAP_TYPE_SOCKHASH ||
80699ba2b5aSJohn Fastabend 		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
8076710e112SJesper Dangaard Brouer 		err = map->ops->map_update_elem(map, key, value, attr->flags);
8086710e112SJesper Dangaard Brouer 		goto out;
8096710e112SJesper Dangaard Brouer 	}
8106710e112SJesper Dangaard Brouer 
811b121d1e7SAlexei Starovoitov 	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
812b121d1e7SAlexei Starovoitov 	 * inside bpf map update or delete otherwise deadlocks are possible
813b121d1e7SAlexei Starovoitov 	 */
814b121d1e7SAlexei Starovoitov 	preempt_disable();
815b121d1e7SAlexei Starovoitov 	__this_cpu_inc(bpf_prog_active);
8168f844938SMartin KaFai Lau 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
8178f844938SMartin KaFai Lau 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
81815a07b33SAlexei Starovoitov 		err = bpf_percpu_hash_update(map, key, value, attr->flags);
81915a07b33SAlexei Starovoitov 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
82015a07b33SAlexei Starovoitov 		err = bpf_percpu_array_update(map, key, value, attr->flags);
821b741f163SRoman Gushchin 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
822b741f163SRoman Gushchin 		err = bpf_percpu_cgroup_storage_update(map, key, value,
823b741f163SRoman Gushchin 						       attr->flags);
8249c147b56SMickaël Salaün 	} else if (IS_FD_ARRAY(map)) {
825d056a788SDaniel Borkmann 		rcu_read_lock();
826d056a788SDaniel Borkmann 		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
827d056a788SDaniel Borkmann 						   attr->flags);
828d056a788SDaniel Borkmann 		rcu_read_unlock();
829bcc6b1b7SMartin KaFai Lau 	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
830bcc6b1b7SMartin KaFai Lau 		rcu_read_lock();
831bcc6b1b7SMartin KaFai Lau 		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
832bcc6b1b7SMartin KaFai Lau 						  attr->flags);
833bcc6b1b7SMartin KaFai Lau 		rcu_read_unlock();
8345dc4c4b7SMartin KaFai Lau 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
8355dc4c4b7SMartin KaFai Lau 		/* rcu_read_lock() is not needed */
8365dc4c4b7SMartin KaFai Lau 		err = bpf_fd_reuseport_array_update_elem(map, key, value,
8375dc4c4b7SMartin KaFai Lau 							 attr->flags);
83815a07b33SAlexei Starovoitov 	} else {
839db20fd2bSAlexei Starovoitov 		rcu_read_lock();
8403274f520SAlexei Starovoitov 		err = map->ops->map_update_elem(map, key, value, attr->flags);
841db20fd2bSAlexei Starovoitov 		rcu_read_unlock();
84215a07b33SAlexei Starovoitov 	}
843b121d1e7SAlexei Starovoitov 	__this_cpu_dec(bpf_prog_active);
844b121d1e7SAlexei Starovoitov 	preempt_enable();
8456710e112SJesper Dangaard Brouer out:
846db20fd2bSAlexei Starovoitov free_value:
847db20fd2bSAlexei Starovoitov 	kfree(value);
848db20fd2bSAlexei Starovoitov free_key:
849db20fd2bSAlexei Starovoitov 	kfree(key);
850db20fd2bSAlexei Starovoitov err_put:
851db20fd2bSAlexei Starovoitov 	fdput(f);
852db20fd2bSAlexei Starovoitov 	return err;
853db20fd2bSAlexei Starovoitov }
854db20fd2bSAlexei Starovoitov 
855db20fd2bSAlexei Starovoitov #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
856db20fd2bSAlexei Starovoitov 
857db20fd2bSAlexei Starovoitov static int map_delete_elem(union bpf_attr *attr)
858db20fd2bSAlexei Starovoitov {
859535e7b4bSMickaël Salaün 	void __user *ukey = u64_to_user_ptr(attr->key);
860db20fd2bSAlexei Starovoitov 	int ufd = attr->map_fd;
861db20fd2bSAlexei Starovoitov 	struct bpf_map *map;
862592867bfSDaniel Borkmann 	struct fd f;
863db20fd2bSAlexei Starovoitov 	void *key;
864db20fd2bSAlexei Starovoitov 	int err;
865db20fd2bSAlexei Starovoitov 
866db20fd2bSAlexei Starovoitov 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
867db20fd2bSAlexei Starovoitov 		return -EINVAL;
868db20fd2bSAlexei Starovoitov 
869592867bfSDaniel Borkmann 	f = fdget(ufd);
870c2101297SDaniel Borkmann 	map = __bpf_map_get(f);
871db20fd2bSAlexei Starovoitov 	if (IS_ERR(map))
872db20fd2bSAlexei Starovoitov 		return PTR_ERR(map);
873db20fd2bSAlexei Starovoitov 
8746e71b04aSChenbo Feng 	if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
8756e71b04aSChenbo Feng 		err = -EPERM;
8766e71b04aSChenbo Feng 		goto err_put;
8776e71b04aSChenbo Feng 	}
8786e71b04aSChenbo Feng 
879e4448ed8SAl Viro 	key = memdup_user(ukey, map->key_size);
880e4448ed8SAl Viro 	if (IS_ERR(key)) {
881e4448ed8SAl Viro 		err = PTR_ERR(key);
882db20fd2bSAlexei Starovoitov 		goto err_put;
883e4448ed8SAl Viro 	}
884db20fd2bSAlexei Starovoitov 
885a3884572SJakub Kicinski 	if (bpf_map_is_dev_bound(map)) {
886a3884572SJakub Kicinski 		err = bpf_map_offload_delete_elem(map, key);
887a3884572SJakub Kicinski 		goto out;
888a3884572SJakub Kicinski 	}
889a3884572SJakub Kicinski 
890b121d1e7SAlexei Starovoitov 	preempt_disable();
891b121d1e7SAlexei Starovoitov 	__this_cpu_inc(bpf_prog_active);
892db20fd2bSAlexei Starovoitov 	rcu_read_lock();
893db20fd2bSAlexei Starovoitov 	err = map->ops->map_delete_elem(map, key);
894db20fd2bSAlexei Starovoitov 	rcu_read_unlock();
895b121d1e7SAlexei Starovoitov 	__this_cpu_dec(bpf_prog_active);
896b121d1e7SAlexei Starovoitov 	preempt_enable();
897a3884572SJakub Kicinski out:
898db20fd2bSAlexei Starovoitov 	kfree(key);
899db20fd2bSAlexei Starovoitov err_put:
900db20fd2bSAlexei Starovoitov 	fdput(f);
901db20fd2bSAlexei Starovoitov 	return err;
902db20fd2bSAlexei Starovoitov }
903db20fd2bSAlexei Starovoitov 
904db20fd2bSAlexei Starovoitov /* last field in 'union bpf_attr' used by this command */
905db20fd2bSAlexei Starovoitov #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
906db20fd2bSAlexei Starovoitov 
907db20fd2bSAlexei Starovoitov static int map_get_next_key(union bpf_attr *attr)
908db20fd2bSAlexei Starovoitov {
909535e7b4bSMickaël Salaün 	void __user *ukey = u64_to_user_ptr(attr->key);
910535e7b4bSMickaël Salaün 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
911db20fd2bSAlexei Starovoitov 	int ufd = attr->map_fd;
912db20fd2bSAlexei Starovoitov 	struct bpf_map *map;
913db20fd2bSAlexei Starovoitov 	void *key, *next_key;
914592867bfSDaniel Borkmann 	struct fd f;
915db20fd2bSAlexei Starovoitov 	int err;
916db20fd2bSAlexei Starovoitov 
917db20fd2bSAlexei Starovoitov 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
918db20fd2bSAlexei Starovoitov 		return -EINVAL;
919db20fd2bSAlexei Starovoitov 
920592867bfSDaniel Borkmann 	f = fdget(ufd);
921c2101297SDaniel Borkmann 	map = __bpf_map_get(f);
922db20fd2bSAlexei Starovoitov 	if (IS_ERR(map))
923db20fd2bSAlexei Starovoitov 		return PTR_ERR(map);
924db20fd2bSAlexei Starovoitov 
9256e71b04aSChenbo Feng 	if (!(f.file->f_mode & FMODE_CAN_READ)) {
9266e71b04aSChenbo Feng 		err = -EPERM;
9276e71b04aSChenbo Feng 		goto err_put;
9286e71b04aSChenbo Feng 	}
9296e71b04aSChenbo Feng 
9308fe45924STeng Qin 	if (ukey) {
931e4448ed8SAl Viro 		key = memdup_user(ukey, map->key_size);
932e4448ed8SAl Viro 		if (IS_ERR(key)) {
933e4448ed8SAl Viro 			err = PTR_ERR(key);
934db20fd2bSAlexei Starovoitov 			goto err_put;
935e4448ed8SAl Viro 		}
9368fe45924STeng Qin 	} else {
9378fe45924STeng Qin 		key = NULL;
9388fe45924STeng Qin 	}
939db20fd2bSAlexei Starovoitov 
940db20fd2bSAlexei Starovoitov 	err = -ENOMEM;
941db20fd2bSAlexei Starovoitov 	next_key = kmalloc(map->key_size, GFP_USER);
942db20fd2bSAlexei Starovoitov 	if (!next_key)
943db20fd2bSAlexei Starovoitov 		goto free_key;
944db20fd2bSAlexei Starovoitov 
945a3884572SJakub Kicinski 	if (bpf_map_is_dev_bound(map)) {
946a3884572SJakub Kicinski 		err = bpf_map_offload_get_next_key(map, key, next_key);
947a3884572SJakub Kicinski 		goto out;
948a3884572SJakub Kicinski 	}
949a3884572SJakub Kicinski 
950db20fd2bSAlexei Starovoitov 	rcu_read_lock();
951db20fd2bSAlexei Starovoitov 	err = map->ops->map_get_next_key(map, key, next_key);
952db20fd2bSAlexei Starovoitov 	rcu_read_unlock();
953a3884572SJakub Kicinski out:
954db20fd2bSAlexei Starovoitov 	if (err)
955db20fd2bSAlexei Starovoitov 		goto free_next_key;
956db20fd2bSAlexei Starovoitov 
957db20fd2bSAlexei Starovoitov 	err = -EFAULT;
958db20fd2bSAlexei Starovoitov 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
959db20fd2bSAlexei Starovoitov 		goto free_next_key;
960db20fd2bSAlexei Starovoitov 
961db20fd2bSAlexei Starovoitov 	err = 0;
962db20fd2bSAlexei Starovoitov 
963db20fd2bSAlexei Starovoitov free_next_key:
964db20fd2bSAlexei Starovoitov 	kfree(next_key);
965db20fd2bSAlexei Starovoitov free_key:
966db20fd2bSAlexei Starovoitov 	kfree(key);
967db20fd2bSAlexei Starovoitov err_put:
968db20fd2bSAlexei Starovoitov 	fdput(f);
969db20fd2bSAlexei Starovoitov 	return err;
970db20fd2bSAlexei Starovoitov }
971db20fd2bSAlexei Starovoitov 
9727de16e3aSJakub Kicinski static const struct bpf_prog_ops * const bpf_prog_types[] = {
9737de16e3aSJakub Kicinski #define BPF_PROG_TYPE(_id, _name) \
9747de16e3aSJakub Kicinski 	[_id] = & _name ## _prog_ops,
9757de16e3aSJakub Kicinski #define BPF_MAP_TYPE(_id, _ops)
9767de16e3aSJakub Kicinski #include <linux/bpf_types.h>
9777de16e3aSJakub Kicinski #undef BPF_PROG_TYPE
9787de16e3aSJakub Kicinski #undef BPF_MAP_TYPE
9797de16e3aSJakub Kicinski };
9807de16e3aSJakub Kicinski 
98109756af4SAlexei Starovoitov static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
98209756af4SAlexei Starovoitov {
983d0f1a451SDaniel Borkmann 	const struct bpf_prog_ops *ops;
984d0f1a451SDaniel Borkmann 
985d0f1a451SDaniel Borkmann 	if (type >= ARRAY_SIZE(bpf_prog_types))
986d0f1a451SDaniel Borkmann 		return -EINVAL;
987d0f1a451SDaniel Borkmann 	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
988d0f1a451SDaniel Borkmann 	ops = bpf_prog_types[type];
989d0f1a451SDaniel Borkmann 	if (!ops)
990be9370a7SJohannes Berg 		return -EINVAL;
99109756af4SAlexei Starovoitov 
992ab3f0063SJakub Kicinski 	if (!bpf_prog_is_dev_bound(prog->aux))
993d0f1a451SDaniel Borkmann 		prog->aux->ops = ops;
994ab3f0063SJakub Kicinski 	else
995ab3f0063SJakub Kicinski 		prog->aux->ops = &bpf_offload_prog_ops;
99624701eceSDaniel Borkmann 	prog->type = type;
99709756af4SAlexei Starovoitov 	return 0;
99809756af4SAlexei Starovoitov }
99909756af4SAlexei Starovoitov 
100009756af4SAlexei Starovoitov /* drop refcnt on maps used by eBPF program and free auxilary data */
100109756af4SAlexei Starovoitov static void free_used_maps(struct bpf_prog_aux *aux)
100209756af4SAlexei Starovoitov {
10038bad74f9SRoman Gushchin 	enum bpf_cgroup_storage_type stype;
100409756af4SAlexei Starovoitov 	int i;
100509756af4SAlexei Starovoitov 
10068bad74f9SRoman Gushchin 	for_each_cgroup_storage_type(stype) {
10078bad74f9SRoman Gushchin 		if (!aux->cgroup_storage[stype])
10088bad74f9SRoman Gushchin 			continue;
10098bad74f9SRoman Gushchin 		bpf_cgroup_storage_release(aux->prog,
10108bad74f9SRoman Gushchin 					   aux->cgroup_storage[stype]);
10118bad74f9SRoman Gushchin 	}
1012de9cbbaaSRoman Gushchin 
101309756af4SAlexei Starovoitov 	for (i = 0; i < aux->used_map_cnt; i++)
101409756af4SAlexei Starovoitov 		bpf_map_put(aux->used_maps[i]);
101509756af4SAlexei Starovoitov 
101609756af4SAlexei Starovoitov 	kfree(aux->used_maps);
101709756af4SAlexei Starovoitov }
101809756af4SAlexei Starovoitov 
10195ccb071eSDaniel Borkmann int __bpf_prog_charge(struct user_struct *user, u32 pages)
10205ccb071eSDaniel Borkmann {
10215ccb071eSDaniel Borkmann 	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
10225ccb071eSDaniel Borkmann 	unsigned long user_bufs;
10235ccb071eSDaniel Borkmann 
10245ccb071eSDaniel Borkmann 	if (user) {
10255ccb071eSDaniel Borkmann 		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
10265ccb071eSDaniel Borkmann 		if (user_bufs > memlock_limit) {
10275ccb071eSDaniel Borkmann 			atomic_long_sub(pages, &user->locked_vm);
10285ccb071eSDaniel Borkmann 			return -EPERM;
10295ccb071eSDaniel Borkmann 		}
10305ccb071eSDaniel Borkmann 	}
10315ccb071eSDaniel Borkmann 
10325ccb071eSDaniel Borkmann 	return 0;
10335ccb071eSDaniel Borkmann }
10345ccb071eSDaniel Borkmann 
10355ccb071eSDaniel Borkmann void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
10365ccb071eSDaniel Borkmann {
10375ccb071eSDaniel Borkmann 	if (user)
10385ccb071eSDaniel Borkmann 		atomic_long_sub(pages, &user->locked_vm);
10395ccb071eSDaniel Borkmann }
10405ccb071eSDaniel Borkmann 
1041aaac3ba9SAlexei Starovoitov static int bpf_prog_charge_memlock(struct bpf_prog *prog)
1042aaac3ba9SAlexei Starovoitov {
1043aaac3ba9SAlexei Starovoitov 	struct user_struct *user = get_current_user();
10445ccb071eSDaniel Borkmann 	int ret;
1045aaac3ba9SAlexei Starovoitov 
10465ccb071eSDaniel Borkmann 	ret = __bpf_prog_charge(user, prog->pages);
10475ccb071eSDaniel Borkmann 	if (ret) {
1048aaac3ba9SAlexei Starovoitov 		free_uid(user);
10495ccb071eSDaniel Borkmann 		return ret;
1050aaac3ba9SAlexei Starovoitov 	}
10515ccb071eSDaniel Borkmann 
1052aaac3ba9SAlexei Starovoitov 	prog->aux->user = user;
1053aaac3ba9SAlexei Starovoitov 	return 0;
1054aaac3ba9SAlexei Starovoitov }
1055aaac3ba9SAlexei Starovoitov 
1056aaac3ba9SAlexei Starovoitov static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
1057aaac3ba9SAlexei Starovoitov {
1058aaac3ba9SAlexei Starovoitov 	struct user_struct *user = prog->aux->user;
1059aaac3ba9SAlexei Starovoitov 
10605ccb071eSDaniel Borkmann 	__bpf_prog_uncharge(user, prog->pages);
1061aaac3ba9SAlexei Starovoitov 	free_uid(user);
1062aaac3ba9SAlexei Starovoitov }
1063aaac3ba9SAlexei Starovoitov 
1064dc4bb0e2SMartin KaFai Lau static int bpf_prog_alloc_id(struct bpf_prog *prog)
1065dc4bb0e2SMartin KaFai Lau {
1066dc4bb0e2SMartin KaFai Lau 	int id;
1067dc4bb0e2SMartin KaFai Lau 
1068b76354cdSShaohua Li 	idr_preload(GFP_KERNEL);
1069dc4bb0e2SMartin KaFai Lau 	spin_lock_bh(&prog_idr_lock);
1070dc4bb0e2SMartin KaFai Lau 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1071dc4bb0e2SMartin KaFai Lau 	if (id > 0)
1072dc4bb0e2SMartin KaFai Lau 		prog->aux->id = id;
1073dc4bb0e2SMartin KaFai Lau 	spin_unlock_bh(&prog_idr_lock);
1074b76354cdSShaohua Li 	idr_preload_end();
1075dc4bb0e2SMartin KaFai Lau 
1076dc4bb0e2SMartin KaFai Lau 	/* id is in [1, INT_MAX) */
1077dc4bb0e2SMartin KaFai Lau 	if (WARN_ON_ONCE(!id))
1078dc4bb0e2SMartin KaFai Lau 		return -ENOSPC;
1079dc4bb0e2SMartin KaFai Lau 
1080dc4bb0e2SMartin KaFai Lau 	return id > 0 ? 0 : id;
1081dc4bb0e2SMartin KaFai Lau }
1082dc4bb0e2SMartin KaFai Lau 
1083ad8ad79fSJakub Kicinski void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1084dc4bb0e2SMartin KaFai Lau {
1085ad8ad79fSJakub Kicinski 	/* cBPF to eBPF migrations are currently not in the idr store.
1086ad8ad79fSJakub Kicinski 	 * Offloaded programs are removed from the store when their device
1087ad8ad79fSJakub Kicinski 	 * disappears - even if someone grabs an fd to them they are unusable,
1088ad8ad79fSJakub Kicinski 	 * simply waiting for refcnt to drop to be freed.
1089ad8ad79fSJakub Kicinski 	 */
1090dc4bb0e2SMartin KaFai Lau 	if (!prog->aux->id)
1091dc4bb0e2SMartin KaFai Lau 		return;
1092dc4bb0e2SMartin KaFai Lau 
1093b16d9aa4SMartin KaFai Lau 	if (do_idr_lock)
1094dc4bb0e2SMartin KaFai Lau 		spin_lock_bh(&prog_idr_lock);
1095b16d9aa4SMartin KaFai Lau 	else
1096b16d9aa4SMartin KaFai Lau 		__acquire(&prog_idr_lock);
1097b16d9aa4SMartin KaFai Lau 
1098dc4bb0e2SMartin KaFai Lau 	idr_remove(&prog_idr, prog->aux->id);
1099ad8ad79fSJakub Kicinski 	prog->aux->id = 0;
1100b16d9aa4SMartin KaFai Lau 
1101b16d9aa4SMartin KaFai Lau 	if (do_idr_lock)
1102dc4bb0e2SMartin KaFai Lau 		spin_unlock_bh(&prog_idr_lock);
1103b16d9aa4SMartin KaFai Lau 	else
1104b16d9aa4SMartin KaFai Lau 		__release(&prog_idr_lock);
1105dc4bb0e2SMartin KaFai Lau }
1106dc4bb0e2SMartin KaFai Lau 
11071aacde3dSDaniel Borkmann static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1108abf2e7d6SAlexei Starovoitov {
1109abf2e7d6SAlexei Starovoitov 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1110abf2e7d6SAlexei Starovoitov 
1111abf2e7d6SAlexei Starovoitov 	free_used_maps(aux);
1112aaac3ba9SAlexei Starovoitov 	bpf_prog_uncharge_memlock(aux->prog);
1113afdb09c7SChenbo Feng 	security_bpf_prog_free(aux);
1114abf2e7d6SAlexei Starovoitov 	bpf_prog_free(aux->prog);
1115abf2e7d6SAlexei Starovoitov }
1116abf2e7d6SAlexei Starovoitov 
1117b16d9aa4SMartin KaFai Lau static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
111809756af4SAlexei Starovoitov {
1119a67edbf4SDaniel Borkmann 	if (atomic_dec_and_test(&prog->aux->refcnt)) {
112034ad5580SMartin KaFai Lau 		/* bpf_prog_free_id() must be called first */
1121b16d9aa4SMartin KaFai Lau 		bpf_prog_free_id(prog, do_idr_lock);
11227d1982b4SDaniel Borkmann 		bpf_prog_kallsyms_del_all(prog);
11234f74d809SDaniel Borkmann 
11241aacde3dSDaniel Borkmann 		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
112509756af4SAlexei Starovoitov 	}
1126a67edbf4SDaniel Borkmann }
1127b16d9aa4SMartin KaFai Lau 
1128b16d9aa4SMartin KaFai Lau void bpf_prog_put(struct bpf_prog *prog)
1129b16d9aa4SMartin KaFai Lau {
1130b16d9aa4SMartin KaFai Lau 	__bpf_prog_put(prog, true);
1131b16d9aa4SMartin KaFai Lau }
1132e2e9b654SDaniel Borkmann EXPORT_SYMBOL_GPL(bpf_prog_put);
113309756af4SAlexei Starovoitov 
113409756af4SAlexei Starovoitov static int bpf_prog_release(struct inode *inode, struct file *filp)
113509756af4SAlexei Starovoitov {
113609756af4SAlexei Starovoitov 	struct bpf_prog *prog = filp->private_data;
113709756af4SAlexei Starovoitov 
11381aacde3dSDaniel Borkmann 	bpf_prog_put(prog);
113909756af4SAlexei Starovoitov 	return 0;
114009756af4SAlexei Starovoitov }
114109756af4SAlexei Starovoitov 
11427bd509e3SDaniel Borkmann #ifdef CONFIG_PROC_FS
11437bd509e3SDaniel Borkmann static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
11447bd509e3SDaniel Borkmann {
11457bd509e3SDaniel Borkmann 	const struct bpf_prog *prog = filp->private_data;
1146f1f7714eSDaniel Borkmann 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
11477bd509e3SDaniel Borkmann 
1148f1f7714eSDaniel Borkmann 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
11497bd509e3SDaniel Borkmann 	seq_printf(m,
11507bd509e3SDaniel Borkmann 		   "prog_type:\t%u\n"
11517bd509e3SDaniel Borkmann 		   "prog_jited:\t%u\n"
1152f1f7714eSDaniel Borkmann 		   "prog_tag:\t%s\n"
11534316b409SDaniel Borkmann 		   "memlock:\t%llu\n"
11544316b409SDaniel Borkmann 		   "prog_id:\t%u\n",
11557bd509e3SDaniel Borkmann 		   prog->type,
11567bd509e3SDaniel Borkmann 		   prog->jited,
1157f1f7714eSDaniel Borkmann 		   prog_tag,
11584316b409SDaniel Borkmann 		   prog->pages * 1ULL << PAGE_SHIFT,
11594316b409SDaniel Borkmann 		   prog->aux->id);
11607bd509e3SDaniel Borkmann }
11617bd509e3SDaniel Borkmann #endif
11627bd509e3SDaniel Borkmann 
1163f66e448cSChenbo Feng const struct file_operations bpf_prog_fops = {
11647bd509e3SDaniel Borkmann #ifdef CONFIG_PROC_FS
11657bd509e3SDaniel Borkmann 	.show_fdinfo	= bpf_prog_show_fdinfo,
11667bd509e3SDaniel Borkmann #endif
116709756af4SAlexei Starovoitov 	.release	= bpf_prog_release,
11686e71b04aSChenbo Feng 	.read		= bpf_dummy_read,
11696e71b04aSChenbo Feng 	.write		= bpf_dummy_write,
117009756af4SAlexei Starovoitov };
117109756af4SAlexei Starovoitov 
1172b2197755SDaniel Borkmann int bpf_prog_new_fd(struct bpf_prog *prog)
1173aa79781bSDaniel Borkmann {
1174afdb09c7SChenbo Feng 	int ret;
1175afdb09c7SChenbo Feng 
1176afdb09c7SChenbo Feng 	ret = security_bpf_prog(prog);
1177afdb09c7SChenbo Feng 	if (ret < 0)
1178afdb09c7SChenbo Feng 		return ret;
1179afdb09c7SChenbo Feng 
1180aa79781bSDaniel Borkmann 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1181aa79781bSDaniel Borkmann 				O_RDWR | O_CLOEXEC);
1182aa79781bSDaniel Borkmann }
1183aa79781bSDaniel Borkmann 
1184113214beSDaniel Borkmann static struct bpf_prog *____bpf_prog_get(struct fd f)
118509756af4SAlexei Starovoitov {
118609756af4SAlexei Starovoitov 	if (!f.file)
118709756af4SAlexei Starovoitov 		return ERR_PTR(-EBADF);
118809756af4SAlexei Starovoitov 	if (f.file->f_op != &bpf_prog_fops) {
118909756af4SAlexei Starovoitov 		fdput(f);
119009756af4SAlexei Starovoitov 		return ERR_PTR(-EINVAL);
119109756af4SAlexei Starovoitov 	}
119209756af4SAlexei Starovoitov 
1193c2101297SDaniel Borkmann 	return f.file->private_data;
119409756af4SAlexei Starovoitov }
119509756af4SAlexei Starovoitov 
119659d3656dSBrenden Blanco struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
119792117d84SAlexei Starovoitov {
119859d3656dSBrenden Blanco 	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
119959d3656dSBrenden Blanco 		atomic_sub(i, &prog->aux->refcnt);
120092117d84SAlexei Starovoitov 		return ERR_PTR(-EBUSY);
120192117d84SAlexei Starovoitov 	}
120292117d84SAlexei Starovoitov 	return prog;
120392117d84SAlexei Starovoitov }
120459d3656dSBrenden Blanco EXPORT_SYMBOL_GPL(bpf_prog_add);
120559d3656dSBrenden Blanco 
1206c540594fSDaniel Borkmann void bpf_prog_sub(struct bpf_prog *prog, int i)
1207c540594fSDaniel Borkmann {
1208c540594fSDaniel Borkmann 	/* Only to be used for undoing previous bpf_prog_add() in some
1209c540594fSDaniel Borkmann 	 * error path. We still know that another entity in our call
1210c540594fSDaniel Borkmann 	 * path holds a reference to the program, thus atomic_sub() can
1211c540594fSDaniel Borkmann 	 * be safely used in such cases!
1212c540594fSDaniel Borkmann 	 */
1213c540594fSDaniel Borkmann 	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
1214c540594fSDaniel Borkmann }
1215c540594fSDaniel Borkmann EXPORT_SYMBOL_GPL(bpf_prog_sub);
1216c540594fSDaniel Borkmann 
121759d3656dSBrenden Blanco struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
121859d3656dSBrenden Blanco {
121959d3656dSBrenden Blanco 	return bpf_prog_add(prog, 1);
122059d3656dSBrenden Blanco }
122197bc402dSDaniel Borkmann EXPORT_SYMBOL_GPL(bpf_prog_inc);
122292117d84SAlexei Starovoitov 
1223b16d9aa4SMartin KaFai Lau /* prog_idr_lock should have been held */
1224a6f6df69SJohn Fastabend struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1225b16d9aa4SMartin KaFai Lau {
1226b16d9aa4SMartin KaFai Lau 	int refold;
1227b16d9aa4SMartin KaFai Lau 
1228bfc18e38SMark Rutland 	refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1229b16d9aa4SMartin KaFai Lau 
1230b16d9aa4SMartin KaFai Lau 	if (refold >= BPF_MAX_REFCNT) {
1231b16d9aa4SMartin KaFai Lau 		__bpf_prog_put(prog, false);
1232b16d9aa4SMartin KaFai Lau 		return ERR_PTR(-EBUSY);
1233b16d9aa4SMartin KaFai Lau 	}
1234b16d9aa4SMartin KaFai Lau 
1235b16d9aa4SMartin KaFai Lau 	if (!refold)
1236b16d9aa4SMartin KaFai Lau 		return ERR_PTR(-ENOENT);
1237b16d9aa4SMartin KaFai Lau 
1238b16d9aa4SMartin KaFai Lau 	return prog;
1239b16d9aa4SMartin KaFai Lau }
1240a6f6df69SJohn Fastabend EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1241b16d9aa4SMartin KaFai Lau 
1242040ee692SAl Viro bool bpf_prog_get_ok(struct bpf_prog *prog,
1243288b3de5SJakub Kicinski 			    enum bpf_prog_type *attach_type, bool attach_drv)
1244248f346fSJakub Kicinski {
1245288b3de5SJakub Kicinski 	/* not an attachment, just a refcount inc, always allow */
1246288b3de5SJakub Kicinski 	if (!attach_type)
1247288b3de5SJakub Kicinski 		return true;
1248248f346fSJakub Kicinski 
1249248f346fSJakub Kicinski 	if (prog->type != *attach_type)
1250248f346fSJakub Kicinski 		return false;
1251288b3de5SJakub Kicinski 	if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1252248f346fSJakub Kicinski 		return false;
1253248f346fSJakub Kicinski 
1254248f346fSJakub Kicinski 	return true;
1255248f346fSJakub Kicinski }
1256248f346fSJakub Kicinski 
1257248f346fSJakub Kicinski static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1258288b3de5SJakub Kicinski 				       bool attach_drv)
125909756af4SAlexei Starovoitov {
126009756af4SAlexei Starovoitov 	struct fd f = fdget(ufd);
126109756af4SAlexei Starovoitov 	struct bpf_prog *prog;
126209756af4SAlexei Starovoitov 
1263113214beSDaniel Borkmann 	prog = ____bpf_prog_get(f);
126409756af4SAlexei Starovoitov 	if (IS_ERR(prog))
126509756af4SAlexei Starovoitov 		return prog;
1266288b3de5SJakub Kicinski 	if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1267113214beSDaniel Borkmann 		prog = ERR_PTR(-EINVAL);
1268113214beSDaniel Borkmann 		goto out;
1269113214beSDaniel Borkmann 	}
127009756af4SAlexei Starovoitov 
127192117d84SAlexei Starovoitov 	prog = bpf_prog_inc(prog);
1272113214beSDaniel Borkmann out:
127309756af4SAlexei Starovoitov 	fdput(f);
127409756af4SAlexei Starovoitov 	return prog;
127509756af4SAlexei Starovoitov }
1276113214beSDaniel Borkmann 
1277113214beSDaniel Borkmann struct bpf_prog *bpf_prog_get(u32 ufd)
1278113214beSDaniel Borkmann {
1279288b3de5SJakub Kicinski 	return __bpf_prog_get(ufd, NULL, false);
1280113214beSDaniel Borkmann }
1281113214beSDaniel Borkmann 
1282248f346fSJakub Kicinski struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1283288b3de5SJakub Kicinski 				       bool attach_drv)
1284248f346fSJakub Kicinski {
12854d220ed0SAlexei Starovoitov 	return __bpf_prog_get(ufd, &type, attach_drv);
1286248f346fSJakub Kicinski }
12876c8dfe21SJakub Kicinski EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1288248f346fSJakub Kicinski 
1289aac3fc32SAndrey Ignatov /* Initially all BPF programs could be loaded w/o specifying
1290aac3fc32SAndrey Ignatov  * expected_attach_type. Later for some of them specifying expected_attach_type
1291aac3fc32SAndrey Ignatov  * at load time became required so that program could be validated properly.
1292aac3fc32SAndrey Ignatov  * Programs of types that are allowed to be loaded both w/ and w/o (for
1293aac3fc32SAndrey Ignatov  * backward compatibility) expected_attach_type, should have the default attach
1294aac3fc32SAndrey Ignatov  * type assigned to expected_attach_type for the latter case, so that it can be
1295aac3fc32SAndrey Ignatov  * validated later at attach time.
1296aac3fc32SAndrey Ignatov  *
1297aac3fc32SAndrey Ignatov  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1298aac3fc32SAndrey Ignatov  * prog type requires it but has some attach types that have to be backward
1299aac3fc32SAndrey Ignatov  * compatible.
1300aac3fc32SAndrey Ignatov  */
1301aac3fc32SAndrey Ignatov static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1302aac3fc32SAndrey Ignatov {
1303aac3fc32SAndrey Ignatov 	switch (attr->prog_type) {
1304aac3fc32SAndrey Ignatov 	case BPF_PROG_TYPE_CGROUP_SOCK:
1305aac3fc32SAndrey Ignatov 		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1306aac3fc32SAndrey Ignatov 		 * exist so checking for non-zero is the way to go here.
1307aac3fc32SAndrey Ignatov 		 */
1308aac3fc32SAndrey Ignatov 		if (!attr->expected_attach_type)
1309aac3fc32SAndrey Ignatov 			attr->expected_attach_type =
1310aac3fc32SAndrey Ignatov 				BPF_CGROUP_INET_SOCK_CREATE;
1311aac3fc32SAndrey Ignatov 		break;
1312aac3fc32SAndrey Ignatov 	}
1313aac3fc32SAndrey Ignatov }
1314aac3fc32SAndrey Ignatov 
13155e43f899SAndrey Ignatov static int
13165e43f899SAndrey Ignatov bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
13175e43f899SAndrey Ignatov 				enum bpf_attach_type expected_attach_type)
13185e43f899SAndrey Ignatov {
13194fbac77dSAndrey Ignatov 	switch (prog_type) {
1320aac3fc32SAndrey Ignatov 	case BPF_PROG_TYPE_CGROUP_SOCK:
1321aac3fc32SAndrey Ignatov 		switch (expected_attach_type) {
1322aac3fc32SAndrey Ignatov 		case BPF_CGROUP_INET_SOCK_CREATE:
1323aac3fc32SAndrey Ignatov 		case BPF_CGROUP_INET4_POST_BIND:
1324aac3fc32SAndrey Ignatov 		case BPF_CGROUP_INET6_POST_BIND:
1325aac3fc32SAndrey Ignatov 			return 0;
1326aac3fc32SAndrey Ignatov 		default:
1327aac3fc32SAndrey Ignatov 			return -EINVAL;
1328aac3fc32SAndrey Ignatov 		}
13294fbac77dSAndrey Ignatov 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
13304fbac77dSAndrey Ignatov 		switch (expected_attach_type) {
13314fbac77dSAndrey Ignatov 		case BPF_CGROUP_INET4_BIND:
13324fbac77dSAndrey Ignatov 		case BPF_CGROUP_INET6_BIND:
1333d74bad4eSAndrey Ignatov 		case BPF_CGROUP_INET4_CONNECT:
1334d74bad4eSAndrey Ignatov 		case BPF_CGROUP_INET6_CONNECT:
13351cedee13SAndrey Ignatov 		case BPF_CGROUP_UDP4_SENDMSG:
13361cedee13SAndrey Ignatov 		case BPF_CGROUP_UDP6_SENDMSG:
13375e43f899SAndrey Ignatov 			return 0;
13384fbac77dSAndrey Ignatov 		default:
13394fbac77dSAndrey Ignatov 			return -EINVAL;
13404fbac77dSAndrey Ignatov 		}
13414fbac77dSAndrey Ignatov 	default:
13424fbac77dSAndrey Ignatov 		return 0;
13434fbac77dSAndrey Ignatov 	}
13445e43f899SAndrey Ignatov }
13455e43f899SAndrey Ignatov 
134609756af4SAlexei Starovoitov /* last field in 'union bpf_attr' used by this command */
13475e43f899SAndrey Ignatov #define	BPF_PROG_LOAD_LAST_FIELD expected_attach_type
134809756af4SAlexei Starovoitov 
134909756af4SAlexei Starovoitov static int bpf_prog_load(union bpf_attr *attr)
135009756af4SAlexei Starovoitov {
135109756af4SAlexei Starovoitov 	enum bpf_prog_type type = attr->prog_type;
135209756af4SAlexei Starovoitov 	struct bpf_prog *prog;
135309756af4SAlexei Starovoitov 	int err;
135409756af4SAlexei Starovoitov 	char license[128];
135509756af4SAlexei Starovoitov 	bool is_gpl;
135609756af4SAlexei Starovoitov 
135709756af4SAlexei Starovoitov 	if (CHECK_ATTR(BPF_PROG_LOAD))
135809756af4SAlexei Starovoitov 		return -EINVAL;
135909756af4SAlexei Starovoitov 
1360e07b98d9SDavid S. Miller 	if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
1361e07b98d9SDavid S. Miller 		return -EINVAL;
1362e07b98d9SDavid S. Miller 
136309756af4SAlexei Starovoitov 	/* copy eBPF program license from user space */
1364535e7b4bSMickaël Salaün 	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
136509756af4SAlexei Starovoitov 			      sizeof(license) - 1) < 0)
136609756af4SAlexei Starovoitov 		return -EFAULT;
136709756af4SAlexei Starovoitov 	license[sizeof(license) - 1] = 0;
136809756af4SAlexei Starovoitov 
136909756af4SAlexei Starovoitov 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
137009756af4SAlexei Starovoitov 	is_gpl = license_is_gpl_compatible(license);
137109756af4SAlexei Starovoitov 
1372ef0915caSDaniel Borkmann 	if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
1373ef0915caSDaniel Borkmann 		return -E2BIG;
137409756af4SAlexei Starovoitov 
13752541517cSAlexei Starovoitov 	if (type == BPF_PROG_TYPE_KPROBE &&
13762541517cSAlexei Starovoitov 	    attr->kern_version != LINUX_VERSION_CODE)
13772541517cSAlexei Starovoitov 		return -EINVAL;
13782541517cSAlexei Starovoitov 
137980b7d819SChenbo Feng 	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
138080b7d819SChenbo Feng 	    type != BPF_PROG_TYPE_CGROUP_SKB &&
138180b7d819SChenbo Feng 	    !capable(CAP_SYS_ADMIN))
13821be7f75dSAlexei Starovoitov 		return -EPERM;
13831be7f75dSAlexei Starovoitov 
1384aac3fc32SAndrey Ignatov 	bpf_prog_load_fixup_attach_type(attr);
13855e43f899SAndrey Ignatov 	if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type))
13865e43f899SAndrey Ignatov 		return -EINVAL;
13875e43f899SAndrey Ignatov 
138809756af4SAlexei Starovoitov 	/* plain bpf_prog allocation */
138909756af4SAlexei Starovoitov 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
139009756af4SAlexei Starovoitov 	if (!prog)
139109756af4SAlexei Starovoitov 		return -ENOMEM;
139209756af4SAlexei Starovoitov 
13935e43f899SAndrey Ignatov 	prog->expected_attach_type = attr->expected_attach_type;
13945e43f899SAndrey Ignatov 
13959a18eedbSJakub Kicinski 	prog->aux->offload_requested = !!attr->prog_ifindex;
13969a18eedbSJakub Kicinski 
1397afdb09c7SChenbo Feng 	err = security_bpf_prog_alloc(prog->aux);
1398aaac3ba9SAlexei Starovoitov 	if (err)
1399aaac3ba9SAlexei Starovoitov 		goto free_prog_nouncharge;
1400aaac3ba9SAlexei Starovoitov 
1401afdb09c7SChenbo Feng 	err = bpf_prog_charge_memlock(prog);
1402afdb09c7SChenbo Feng 	if (err)
1403afdb09c7SChenbo Feng 		goto free_prog_sec;
1404afdb09c7SChenbo Feng 
140509756af4SAlexei Starovoitov 	prog->len = attr->insn_cnt;
140609756af4SAlexei Starovoitov 
140709756af4SAlexei Starovoitov 	err = -EFAULT;
1408535e7b4bSMickaël Salaün 	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
1409aafe6ae9SDaniel Borkmann 			   bpf_prog_insn_size(prog)) != 0)
141009756af4SAlexei Starovoitov 		goto free_prog;
141109756af4SAlexei Starovoitov 
141209756af4SAlexei Starovoitov 	prog->orig_prog = NULL;
1413a91263d5SDaniel Borkmann 	prog->jited = 0;
141409756af4SAlexei Starovoitov 
141509756af4SAlexei Starovoitov 	atomic_set(&prog->aux->refcnt, 1);
1416a91263d5SDaniel Borkmann 	prog->gpl_compatible = is_gpl ? 1 : 0;
141709756af4SAlexei Starovoitov 
14189a18eedbSJakub Kicinski 	if (bpf_prog_is_dev_bound(prog->aux)) {
1419ab3f0063SJakub Kicinski 		err = bpf_prog_offload_init(prog, attr);
1420ab3f0063SJakub Kicinski 		if (err)
1421ab3f0063SJakub Kicinski 			goto free_prog;
1422ab3f0063SJakub Kicinski 	}
1423ab3f0063SJakub Kicinski 
142409756af4SAlexei Starovoitov 	/* find program type: socket_filter vs tracing_filter */
142509756af4SAlexei Starovoitov 	err = find_prog_type(type, prog);
142609756af4SAlexei Starovoitov 	if (err < 0)
142709756af4SAlexei Starovoitov 		goto free_prog;
142809756af4SAlexei Starovoitov 
1429cb4d2b3fSMartin KaFai Lau 	prog->aux->load_time = ktime_get_boot_ns();
1430cb4d2b3fSMartin KaFai Lau 	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name);
1431cb4d2b3fSMartin KaFai Lau 	if (err)
1432cb4d2b3fSMartin KaFai Lau 		goto free_prog;
1433cb4d2b3fSMartin KaFai Lau 
143409756af4SAlexei Starovoitov 	/* run eBPF verifier */
14359bac3d6dSAlexei Starovoitov 	err = bpf_check(&prog, attr);
143609756af4SAlexei Starovoitov 	if (err < 0)
143709756af4SAlexei Starovoitov 		goto free_used_maps;
143809756af4SAlexei Starovoitov 
1439d1c55ab5SDaniel Borkmann 	prog = bpf_prog_select_runtime(prog, &err);
144004fd61abSAlexei Starovoitov 	if (err < 0)
144104fd61abSAlexei Starovoitov 		goto free_used_maps;
144209756af4SAlexei Starovoitov 
1443dc4bb0e2SMartin KaFai Lau 	err = bpf_prog_alloc_id(prog);
1444dc4bb0e2SMartin KaFai Lau 	if (err)
1445dc4bb0e2SMartin KaFai Lau 		goto free_used_maps;
1446dc4bb0e2SMartin KaFai Lau 
1447aa79781bSDaniel Borkmann 	err = bpf_prog_new_fd(prog);
1448b16d9aa4SMartin KaFai Lau 	if (err < 0) {
1449b16d9aa4SMartin KaFai Lau 		/* failed to allocate fd.
1450b16d9aa4SMartin KaFai Lau 		 * bpf_prog_put() is needed because the above
1451b16d9aa4SMartin KaFai Lau 		 * bpf_prog_alloc_id() has published the prog
1452b16d9aa4SMartin KaFai Lau 		 * to the userspace and the userspace may
1453b16d9aa4SMartin KaFai Lau 		 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
1454b16d9aa4SMartin KaFai Lau 		 */
1455b16d9aa4SMartin KaFai Lau 		bpf_prog_put(prog);
1456b16d9aa4SMartin KaFai Lau 		return err;
1457b16d9aa4SMartin KaFai Lau 	}
145809756af4SAlexei Starovoitov 
145974451e66SDaniel Borkmann 	bpf_prog_kallsyms_add(prog);
146009756af4SAlexei Starovoitov 	return err;
146109756af4SAlexei Starovoitov 
146209756af4SAlexei Starovoitov free_used_maps:
14637d1982b4SDaniel Borkmann 	bpf_prog_kallsyms_del_subprogs(prog);
146409756af4SAlexei Starovoitov 	free_used_maps(prog->aux);
146509756af4SAlexei Starovoitov free_prog:
1466aaac3ba9SAlexei Starovoitov 	bpf_prog_uncharge_memlock(prog);
1467afdb09c7SChenbo Feng free_prog_sec:
1468afdb09c7SChenbo Feng 	security_bpf_prog_free(prog->aux);
1469aaac3ba9SAlexei Starovoitov free_prog_nouncharge:
147009756af4SAlexei Starovoitov 	bpf_prog_free(prog);
147109756af4SAlexei Starovoitov 	return err;
147209756af4SAlexei Starovoitov }
147309756af4SAlexei Starovoitov 
14746e71b04aSChenbo Feng #define BPF_OBJ_LAST_FIELD file_flags
1475b2197755SDaniel Borkmann 
1476b2197755SDaniel Borkmann static int bpf_obj_pin(const union bpf_attr *attr)
1477b2197755SDaniel Borkmann {
14786e71b04aSChenbo Feng 	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
1479b2197755SDaniel Borkmann 		return -EINVAL;
1480b2197755SDaniel Borkmann 
1481535e7b4bSMickaël Salaün 	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
1482b2197755SDaniel Borkmann }
1483b2197755SDaniel Borkmann 
1484b2197755SDaniel Borkmann static int bpf_obj_get(const union bpf_attr *attr)
1485b2197755SDaniel Borkmann {
14866e71b04aSChenbo Feng 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
14876e71b04aSChenbo Feng 	    attr->file_flags & ~BPF_OBJ_FLAG_MASK)
1488b2197755SDaniel Borkmann 		return -EINVAL;
1489b2197755SDaniel Borkmann 
14906e71b04aSChenbo Feng 	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
14916e71b04aSChenbo Feng 				attr->file_flags);
1492b2197755SDaniel Borkmann }
1493b2197755SDaniel Borkmann 
1494c4f6699dSAlexei Starovoitov struct bpf_raw_tracepoint {
1495c4f6699dSAlexei Starovoitov 	struct bpf_raw_event_map *btp;
1496c4f6699dSAlexei Starovoitov 	struct bpf_prog *prog;
1497c4f6699dSAlexei Starovoitov };
1498c4f6699dSAlexei Starovoitov 
1499c4f6699dSAlexei Starovoitov static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp)
1500c4f6699dSAlexei Starovoitov {
1501c4f6699dSAlexei Starovoitov 	struct bpf_raw_tracepoint *raw_tp = filp->private_data;
1502c4f6699dSAlexei Starovoitov 
1503c4f6699dSAlexei Starovoitov 	if (raw_tp->prog) {
1504c4f6699dSAlexei Starovoitov 		bpf_probe_unregister(raw_tp->btp, raw_tp->prog);
1505c4f6699dSAlexei Starovoitov 		bpf_prog_put(raw_tp->prog);
1506c4f6699dSAlexei Starovoitov 	}
1507c4f6699dSAlexei Starovoitov 	kfree(raw_tp);
1508c4f6699dSAlexei Starovoitov 	return 0;
1509c4f6699dSAlexei Starovoitov }
1510c4f6699dSAlexei Starovoitov 
1511c4f6699dSAlexei Starovoitov static const struct file_operations bpf_raw_tp_fops = {
1512c4f6699dSAlexei Starovoitov 	.release	= bpf_raw_tracepoint_release,
1513c4f6699dSAlexei Starovoitov 	.read		= bpf_dummy_read,
1514c4f6699dSAlexei Starovoitov 	.write		= bpf_dummy_write,
1515c4f6699dSAlexei Starovoitov };
1516c4f6699dSAlexei Starovoitov 
1517c4f6699dSAlexei Starovoitov #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
1518c4f6699dSAlexei Starovoitov 
1519c4f6699dSAlexei Starovoitov static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
1520c4f6699dSAlexei Starovoitov {
1521c4f6699dSAlexei Starovoitov 	struct bpf_raw_tracepoint *raw_tp;
1522c4f6699dSAlexei Starovoitov 	struct bpf_raw_event_map *btp;
1523c4f6699dSAlexei Starovoitov 	struct bpf_prog *prog;
1524c4f6699dSAlexei Starovoitov 	char tp_name[128];
1525c4f6699dSAlexei Starovoitov 	int tp_fd, err;
1526c4f6699dSAlexei Starovoitov 
1527c4f6699dSAlexei Starovoitov 	if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name),
1528c4f6699dSAlexei Starovoitov 			      sizeof(tp_name) - 1) < 0)
1529c4f6699dSAlexei Starovoitov 		return -EFAULT;
1530c4f6699dSAlexei Starovoitov 	tp_name[sizeof(tp_name) - 1] = 0;
1531c4f6699dSAlexei Starovoitov 
1532c4f6699dSAlexei Starovoitov 	btp = bpf_find_raw_tracepoint(tp_name);
1533c4f6699dSAlexei Starovoitov 	if (!btp)
1534c4f6699dSAlexei Starovoitov 		return -ENOENT;
1535c4f6699dSAlexei Starovoitov 
1536c4f6699dSAlexei Starovoitov 	raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
1537c4f6699dSAlexei Starovoitov 	if (!raw_tp)
1538c4f6699dSAlexei Starovoitov 		return -ENOMEM;
1539c4f6699dSAlexei Starovoitov 	raw_tp->btp = btp;
1540c4f6699dSAlexei Starovoitov 
1541c4f6699dSAlexei Starovoitov 	prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd,
1542c4f6699dSAlexei Starovoitov 				 BPF_PROG_TYPE_RAW_TRACEPOINT);
1543c4f6699dSAlexei Starovoitov 	if (IS_ERR(prog)) {
1544c4f6699dSAlexei Starovoitov 		err = PTR_ERR(prog);
1545c4f6699dSAlexei Starovoitov 		goto out_free_tp;
1546c4f6699dSAlexei Starovoitov 	}
1547c4f6699dSAlexei Starovoitov 
1548c4f6699dSAlexei Starovoitov 	err = bpf_probe_register(raw_tp->btp, prog);
1549c4f6699dSAlexei Starovoitov 	if (err)
1550c4f6699dSAlexei Starovoitov 		goto out_put_prog;
1551c4f6699dSAlexei Starovoitov 
1552c4f6699dSAlexei Starovoitov 	raw_tp->prog = prog;
1553c4f6699dSAlexei Starovoitov 	tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp,
1554c4f6699dSAlexei Starovoitov 				 O_CLOEXEC);
1555c4f6699dSAlexei Starovoitov 	if (tp_fd < 0) {
1556c4f6699dSAlexei Starovoitov 		bpf_probe_unregister(raw_tp->btp, prog);
1557c4f6699dSAlexei Starovoitov 		err = tp_fd;
1558c4f6699dSAlexei Starovoitov 		goto out_put_prog;
1559c4f6699dSAlexei Starovoitov 	}
1560c4f6699dSAlexei Starovoitov 	return tp_fd;
1561c4f6699dSAlexei Starovoitov 
1562c4f6699dSAlexei Starovoitov out_put_prog:
1563c4f6699dSAlexei Starovoitov 	bpf_prog_put(prog);
1564c4f6699dSAlexei Starovoitov out_free_tp:
1565c4f6699dSAlexei Starovoitov 	kfree(raw_tp);
1566c4f6699dSAlexei Starovoitov 	return err;
1567c4f6699dSAlexei Starovoitov }
1568c4f6699dSAlexei Starovoitov 
156933491588SAnders Roxell static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
157033491588SAnders Roxell 					     enum bpf_attach_type attach_type)
157133491588SAnders Roxell {
157233491588SAnders Roxell 	switch (prog->type) {
157333491588SAnders Roxell 	case BPF_PROG_TYPE_CGROUP_SOCK:
157433491588SAnders Roxell 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
157533491588SAnders Roxell 		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
157633491588SAnders Roxell 	default:
157733491588SAnders Roxell 		return 0;
157833491588SAnders Roxell 	}
157933491588SAnders Roxell }
158033491588SAnders Roxell 
1581464bc0fdSJohn Fastabend #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
1582174a79ffSJohn Fastabend 
1583324bda9eSAlexei Starovoitov #define BPF_F_ATTACH_MASK \
1584324bda9eSAlexei Starovoitov 	(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
1585324bda9eSAlexei Starovoitov 
1586f4324551SDaniel Mack static int bpf_prog_attach(const union bpf_attr *attr)
1587f4324551SDaniel Mack {
15887f677633SAlexei Starovoitov 	enum bpf_prog_type ptype;
1589f4324551SDaniel Mack 	struct bpf_prog *prog;
15907f677633SAlexei Starovoitov 	int ret;
1591f4324551SDaniel Mack 
1592f4324551SDaniel Mack 	if (!capable(CAP_NET_ADMIN))
1593f4324551SDaniel Mack 		return -EPERM;
1594f4324551SDaniel Mack 
1595f4324551SDaniel Mack 	if (CHECK_ATTR(BPF_PROG_ATTACH))
1596f4324551SDaniel Mack 		return -EINVAL;
1597f4324551SDaniel Mack 
1598324bda9eSAlexei Starovoitov 	if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
15997f677633SAlexei Starovoitov 		return -EINVAL;
16007f677633SAlexei Starovoitov 
1601f4324551SDaniel Mack 	switch (attr->attach_type) {
1602f4324551SDaniel Mack 	case BPF_CGROUP_INET_INGRESS:
1603f4324551SDaniel Mack 	case BPF_CGROUP_INET_EGRESS:
1604b2cd1257SDavid Ahern 		ptype = BPF_PROG_TYPE_CGROUP_SKB;
1605b2cd1257SDavid Ahern 		break;
160661023658SDavid Ahern 	case BPF_CGROUP_INET_SOCK_CREATE:
1607aac3fc32SAndrey Ignatov 	case BPF_CGROUP_INET4_POST_BIND:
1608aac3fc32SAndrey Ignatov 	case BPF_CGROUP_INET6_POST_BIND:
160961023658SDavid Ahern 		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
161061023658SDavid Ahern 		break;
16114fbac77dSAndrey Ignatov 	case BPF_CGROUP_INET4_BIND:
16124fbac77dSAndrey Ignatov 	case BPF_CGROUP_INET6_BIND:
1613d74bad4eSAndrey Ignatov 	case BPF_CGROUP_INET4_CONNECT:
1614d74bad4eSAndrey Ignatov 	case BPF_CGROUP_INET6_CONNECT:
16151cedee13SAndrey Ignatov 	case BPF_CGROUP_UDP4_SENDMSG:
16161cedee13SAndrey Ignatov 	case BPF_CGROUP_UDP6_SENDMSG:
16174fbac77dSAndrey Ignatov 		ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
16184fbac77dSAndrey Ignatov 		break;
161940304b2aSLawrence Brakmo 	case BPF_CGROUP_SOCK_OPS:
162040304b2aSLawrence Brakmo 		ptype = BPF_PROG_TYPE_SOCK_OPS;
162140304b2aSLawrence Brakmo 		break;
1622ebc614f6SRoman Gushchin 	case BPF_CGROUP_DEVICE:
1623ebc614f6SRoman Gushchin 		ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
1624ebc614f6SRoman Gushchin 		break;
16254f738adbSJohn Fastabend 	case BPF_SK_MSG_VERDICT:
1626fdb5c453SSean Young 		ptype = BPF_PROG_TYPE_SK_MSG;
1627fdb5c453SSean Young 		break;
1628464bc0fdSJohn Fastabend 	case BPF_SK_SKB_STREAM_PARSER:
1629464bc0fdSJohn Fastabend 	case BPF_SK_SKB_STREAM_VERDICT:
1630fdb5c453SSean Young 		ptype = BPF_PROG_TYPE_SK_SKB;
1631fdb5c453SSean Young 		break;
1632f4364dcfSSean Young 	case BPF_LIRC_MODE2:
1633fdb5c453SSean Young 		ptype = BPF_PROG_TYPE_LIRC_MODE2;
1634fdb5c453SSean Young 		break;
1635d58e468bSPetar Penkov 	case BPF_FLOW_DISSECTOR:
1636d58e468bSPetar Penkov 		ptype = BPF_PROG_TYPE_FLOW_DISSECTOR;
1637d58e468bSPetar Penkov 		break;
1638b2cd1257SDavid Ahern 	default:
1639b2cd1257SDavid Ahern 		return -EINVAL;
1640b2cd1257SDavid Ahern 	}
1641b2cd1257SDavid Ahern 
1642b2cd1257SDavid Ahern 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1643f4324551SDaniel Mack 	if (IS_ERR(prog))
1644f4324551SDaniel Mack 		return PTR_ERR(prog);
1645f4324551SDaniel Mack 
16465e43f899SAndrey Ignatov 	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
16475e43f899SAndrey Ignatov 		bpf_prog_put(prog);
16485e43f899SAndrey Ignatov 		return -EINVAL;
16495e43f899SAndrey Ignatov 	}
16505e43f899SAndrey Ignatov 
1651fdb5c453SSean Young 	switch (ptype) {
1652fdb5c453SSean Young 	case BPF_PROG_TYPE_SK_SKB:
1653fdb5c453SSean Young 	case BPF_PROG_TYPE_SK_MSG:
1654fdb5c453SSean Young 		ret = sockmap_get_from_fd(attr, ptype, prog);
1655fdb5c453SSean Young 		break;
1656fdb5c453SSean Young 	case BPF_PROG_TYPE_LIRC_MODE2:
1657fdb5c453SSean Young 		ret = lirc_prog_attach(attr, prog);
1658fdb5c453SSean Young 		break;
1659d58e468bSPetar Penkov 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
1660d58e468bSPetar Penkov 		ret = skb_flow_dissector_bpf_prog_attach(attr, prog);
1661d58e468bSPetar Penkov 		break;
1662fdb5c453SSean Young 	default:
1663fdb5c453SSean Young 		ret = cgroup_bpf_prog_attach(attr, ptype, prog);
1664f4324551SDaniel Mack 	}
1665f4324551SDaniel Mack 
16667f677633SAlexei Starovoitov 	if (ret)
16677f677633SAlexei Starovoitov 		bpf_prog_put(prog);
16687f677633SAlexei Starovoitov 	return ret;
1669f4324551SDaniel Mack }
1670f4324551SDaniel Mack 
1671f4324551SDaniel Mack #define BPF_PROG_DETACH_LAST_FIELD attach_type
1672f4324551SDaniel Mack 
1673f4324551SDaniel Mack static int bpf_prog_detach(const union bpf_attr *attr)
1674f4324551SDaniel Mack {
1675324bda9eSAlexei Starovoitov 	enum bpf_prog_type ptype;
1676f4324551SDaniel Mack 
1677f4324551SDaniel Mack 	if (!capable(CAP_NET_ADMIN))
1678f4324551SDaniel Mack 		return -EPERM;
1679f4324551SDaniel Mack 
1680f4324551SDaniel Mack 	if (CHECK_ATTR(BPF_PROG_DETACH))
1681f4324551SDaniel Mack 		return -EINVAL;
1682f4324551SDaniel Mack 
1683f4324551SDaniel Mack 	switch (attr->attach_type) {
1684f4324551SDaniel Mack 	case BPF_CGROUP_INET_INGRESS:
1685f4324551SDaniel Mack 	case BPF_CGROUP_INET_EGRESS:
1686324bda9eSAlexei Starovoitov 		ptype = BPF_PROG_TYPE_CGROUP_SKB;
1687324bda9eSAlexei Starovoitov 		break;
168861023658SDavid Ahern 	case BPF_CGROUP_INET_SOCK_CREATE:
1689aac3fc32SAndrey Ignatov 	case BPF_CGROUP_INET4_POST_BIND:
1690aac3fc32SAndrey Ignatov 	case BPF_CGROUP_INET6_POST_BIND:
1691324bda9eSAlexei Starovoitov 		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1692324bda9eSAlexei Starovoitov 		break;
16934fbac77dSAndrey Ignatov 	case BPF_CGROUP_INET4_BIND:
16944fbac77dSAndrey Ignatov 	case BPF_CGROUP_INET6_BIND:
1695d74bad4eSAndrey Ignatov 	case BPF_CGROUP_INET4_CONNECT:
1696d74bad4eSAndrey Ignatov 	case BPF_CGROUP_INET6_CONNECT:
16971cedee13SAndrey Ignatov 	case BPF_CGROUP_UDP4_SENDMSG:
16981cedee13SAndrey Ignatov 	case BPF_CGROUP_UDP6_SENDMSG:
16994fbac77dSAndrey Ignatov 		ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
17004fbac77dSAndrey Ignatov 		break;
170140304b2aSLawrence Brakmo 	case BPF_CGROUP_SOCK_OPS:
1702324bda9eSAlexei Starovoitov 		ptype = BPF_PROG_TYPE_SOCK_OPS;
1703f4324551SDaniel Mack 		break;
1704ebc614f6SRoman Gushchin 	case BPF_CGROUP_DEVICE:
1705ebc614f6SRoman Gushchin 		ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
1706ebc614f6SRoman Gushchin 		break;
17074f738adbSJohn Fastabend 	case BPF_SK_MSG_VERDICT:
1708fdb5c453SSean Young 		return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL);
17095a67da2aSJohn Fastabend 	case BPF_SK_SKB_STREAM_PARSER:
17105a67da2aSJohn Fastabend 	case BPF_SK_SKB_STREAM_VERDICT:
1711fdb5c453SSean Young 		return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL);
1712f4364dcfSSean Young 	case BPF_LIRC_MODE2:
1713f4364dcfSSean Young 		return lirc_prog_detach(attr);
1714d58e468bSPetar Penkov 	case BPF_FLOW_DISSECTOR:
1715d58e468bSPetar Penkov 		return skb_flow_dissector_bpf_prog_detach(attr);
1716f4324551SDaniel Mack 	default:
1717f4324551SDaniel Mack 		return -EINVAL;
1718f4324551SDaniel Mack 	}
1719f4324551SDaniel Mack 
1720fdb5c453SSean Young 	return cgroup_bpf_prog_detach(attr, ptype);
1721f4324551SDaniel Mack }
172240304b2aSLawrence Brakmo 
1723468e2f64SAlexei Starovoitov #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
1724468e2f64SAlexei Starovoitov 
1725468e2f64SAlexei Starovoitov static int bpf_prog_query(const union bpf_attr *attr,
1726468e2f64SAlexei Starovoitov 			  union bpf_attr __user *uattr)
1727468e2f64SAlexei Starovoitov {
1728468e2f64SAlexei Starovoitov 	if (!capable(CAP_NET_ADMIN))
1729468e2f64SAlexei Starovoitov 		return -EPERM;
1730468e2f64SAlexei Starovoitov 	if (CHECK_ATTR(BPF_PROG_QUERY))
1731468e2f64SAlexei Starovoitov 		return -EINVAL;
1732468e2f64SAlexei Starovoitov 	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
1733468e2f64SAlexei Starovoitov 		return -EINVAL;
1734468e2f64SAlexei Starovoitov 
1735468e2f64SAlexei Starovoitov 	switch (attr->query.attach_type) {
1736468e2f64SAlexei Starovoitov 	case BPF_CGROUP_INET_INGRESS:
1737468e2f64SAlexei Starovoitov 	case BPF_CGROUP_INET_EGRESS:
1738468e2f64SAlexei Starovoitov 	case BPF_CGROUP_INET_SOCK_CREATE:
17394fbac77dSAndrey Ignatov 	case BPF_CGROUP_INET4_BIND:
17404fbac77dSAndrey Ignatov 	case BPF_CGROUP_INET6_BIND:
1741aac3fc32SAndrey Ignatov 	case BPF_CGROUP_INET4_POST_BIND:
1742aac3fc32SAndrey Ignatov 	case BPF_CGROUP_INET6_POST_BIND:
1743d74bad4eSAndrey Ignatov 	case BPF_CGROUP_INET4_CONNECT:
1744d74bad4eSAndrey Ignatov 	case BPF_CGROUP_INET6_CONNECT:
17451cedee13SAndrey Ignatov 	case BPF_CGROUP_UDP4_SENDMSG:
17461cedee13SAndrey Ignatov 	case BPF_CGROUP_UDP6_SENDMSG:
1747468e2f64SAlexei Starovoitov 	case BPF_CGROUP_SOCK_OPS:
1748ebc614f6SRoman Gushchin 	case BPF_CGROUP_DEVICE:
1749468e2f64SAlexei Starovoitov 		break;
1750f4364dcfSSean Young 	case BPF_LIRC_MODE2:
1751f4364dcfSSean Young 		return lirc_prog_query(attr, uattr);
1752468e2f64SAlexei Starovoitov 	default:
1753468e2f64SAlexei Starovoitov 		return -EINVAL;
1754468e2f64SAlexei Starovoitov 	}
1755fdb5c453SSean Young 
1756fdb5c453SSean Young 	return cgroup_bpf_prog_query(attr, uattr);
1757468e2f64SAlexei Starovoitov }
1758f4324551SDaniel Mack 
17591cf1cae9SAlexei Starovoitov #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
17601cf1cae9SAlexei Starovoitov 
17611cf1cae9SAlexei Starovoitov static int bpf_prog_test_run(const union bpf_attr *attr,
17621cf1cae9SAlexei Starovoitov 			     union bpf_attr __user *uattr)
17631cf1cae9SAlexei Starovoitov {
17641cf1cae9SAlexei Starovoitov 	struct bpf_prog *prog;
17651cf1cae9SAlexei Starovoitov 	int ret = -ENOTSUPP;
17661cf1cae9SAlexei Starovoitov 
176761f3c964SAlexei Starovoitov 	if (!capable(CAP_SYS_ADMIN))
176861f3c964SAlexei Starovoitov 		return -EPERM;
17691cf1cae9SAlexei Starovoitov 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
17701cf1cae9SAlexei Starovoitov 		return -EINVAL;
17711cf1cae9SAlexei Starovoitov 
17721cf1cae9SAlexei Starovoitov 	prog = bpf_prog_get(attr->test.prog_fd);
17731cf1cae9SAlexei Starovoitov 	if (IS_ERR(prog))
17741cf1cae9SAlexei Starovoitov 		return PTR_ERR(prog);
17751cf1cae9SAlexei Starovoitov 
17761cf1cae9SAlexei Starovoitov 	if (prog->aux->ops->test_run)
17771cf1cae9SAlexei Starovoitov 		ret = prog->aux->ops->test_run(prog, attr, uattr);
17781cf1cae9SAlexei Starovoitov 
17791cf1cae9SAlexei Starovoitov 	bpf_prog_put(prog);
17801cf1cae9SAlexei Starovoitov 	return ret;
17811cf1cae9SAlexei Starovoitov }
17821cf1cae9SAlexei Starovoitov 
178334ad5580SMartin KaFai Lau #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
178434ad5580SMartin KaFai Lau 
178534ad5580SMartin KaFai Lau static int bpf_obj_get_next_id(const union bpf_attr *attr,
178634ad5580SMartin KaFai Lau 			       union bpf_attr __user *uattr,
178734ad5580SMartin KaFai Lau 			       struct idr *idr,
178834ad5580SMartin KaFai Lau 			       spinlock_t *lock)
178934ad5580SMartin KaFai Lau {
179034ad5580SMartin KaFai Lau 	u32 next_id = attr->start_id;
179134ad5580SMartin KaFai Lau 	int err = 0;
179234ad5580SMartin KaFai Lau 
179334ad5580SMartin KaFai Lau 	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
179434ad5580SMartin KaFai Lau 		return -EINVAL;
179534ad5580SMartin KaFai Lau 
179634ad5580SMartin KaFai Lau 	if (!capable(CAP_SYS_ADMIN))
179734ad5580SMartin KaFai Lau 		return -EPERM;
179834ad5580SMartin KaFai Lau 
179934ad5580SMartin KaFai Lau 	next_id++;
180034ad5580SMartin KaFai Lau 	spin_lock_bh(lock);
180134ad5580SMartin KaFai Lau 	if (!idr_get_next(idr, &next_id))
180234ad5580SMartin KaFai Lau 		err = -ENOENT;
180334ad5580SMartin KaFai Lau 	spin_unlock_bh(lock);
180434ad5580SMartin KaFai Lau 
180534ad5580SMartin KaFai Lau 	if (!err)
180634ad5580SMartin KaFai Lau 		err = put_user(next_id, &uattr->next_id);
180734ad5580SMartin KaFai Lau 
180834ad5580SMartin KaFai Lau 	return err;
180934ad5580SMartin KaFai Lau }
181034ad5580SMartin KaFai Lau 
1811b16d9aa4SMartin KaFai Lau #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
1812b16d9aa4SMartin KaFai Lau 
1813b16d9aa4SMartin KaFai Lau static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
1814b16d9aa4SMartin KaFai Lau {
1815b16d9aa4SMartin KaFai Lau 	struct bpf_prog *prog;
1816b16d9aa4SMartin KaFai Lau 	u32 id = attr->prog_id;
1817b16d9aa4SMartin KaFai Lau 	int fd;
1818b16d9aa4SMartin KaFai Lau 
1819b16d9aa4SMartin KaFai Lau 	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
1820b16d9aa4SMartin KaFai Lau 		return -EINVAL;
1821b16d9aa4SMartin KaFai Lau 
1822b16d9aa4SMartin KaFai Lau 	if (!capable(CAP_SYS_ADMIN))
1823b16d9aa4SMartin KaFai Lau 		return -EPERM;
1824b16d9aa4SMartin KaFai Lau 
1825b16d9aa4SMartin KaFai Lau 	spin_lock_bh(&prog_idr_lock);
1826b16d9aa4SMartin KaFai Lau 	prog = idr_find(&prog_idr, id);
1827b16d9aa4SMartin KaFai Lau 	if (prog)
1828b16d9aa4SMartin KaFai Lau 		prog = bpf_prog_inc_not_zero(prog);
1829b16d9aa4SMartin KaFai Lau 	else
1830b16d9aa4SMartin KaFai Lau 		prog = ERR_PTR(-ENOENT);
1831b16d9aa4SMartin KaFai Lau 	spin_unlock_bh(&prog_idr_lock);
1832b16d9aa4SMartin KaFai Lau 
1833b16d9aa4SMartin KaFai Lau 	if (IS_ERR(prog))
1834b16d9aa4SMartin KaFai Lau 		return PTR_ERR(prog);
1835b16d9aa4SMartin KaFai Lau 
1836b16d9aa4SMartin KaFai Lau 	fd = bpf_prog_new_fd(prog);
1837b16d9aa4SMartin KaFai Lau 	if (fd < 0)
1838b16d9aa4SMartin KaFai Lau 		bpf_prog_put(prog);
1839b16d9aa4SMartin KaFai Lau 
1840b16d9aa4SMartin KaFai Lau 	return fd;
1841b16d9aa4SMartin KaFai Lau }
1842b16d9aa4SMartin KaFai Lau 
18436e71b04aSChenbo Feng #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
1844bd5f5f4eSMartin KaFai Lau 
1845bd5f5f4eSMartin KaFai Lau static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1846bd5f5f4eSMartin KaFai Lau {
1847bd5f5f4eSMartin KaFai Lau 	struct bpf_map *map;
1848bd5f5f4eSMartin KaFai Lau 	u32 id = attr->map_id;
18496e71b04aSChenbo Feng 	int f_flags;
1850bd5f5f4eSMartin KaFai Lau 	int fd;
1851bd5f5f4eSMartin KaFai Lau 
18526e71b04aSChenbo Feng 	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
18536e71b04aSChenbo Feng 	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
1854bd5f5f4eSMartin KaFai Lau 		return -EINVAL;
1855bd5f5f4eSMartin KaFai Lau 
1856bd5f5f4eSMartin KaFai Lau 	if (!capable(CAP_SYS_ADMIN))
1857bd5f5f4eSMartin KaFai Lau 		return -EPERM;
1858bd5f5f4eSMartin KaFai Lau 
18596e71b04aSChenbo Feng 	f_flags = bpf_get_file_flag(attr->open_flags);
18606e71b04aSChenbo Feng 	if (f_flags < 0)
18616e71b04aSChenbo Feng 		return f_flags;
18626e71b04aSChenbo Feng 
1863bd5f5f4eSMartin KaFai Lau 	spin_lock_bh(&map_idr_lock);
1864bd5f5f4eSMartin KaFai Lau 	map = idr_find(&map_idr, id);
1865bd5f5f4eSMartin KaFai Lau 	if (map)
1866bd5f5f4eSMartin KaFai Lau 		map = bpf_map_inc_not_zero(map, true);
1867bd5f5f4eSMartin KaFai Lau 	else
1868bd5f5f4eSMartin KaFai Lau 		map = ERR_PTR(-ENOENT);
1869bd5f5f4eSMartin KaFai Lau 	spin_unlock_bh(&map_idr_lock);
1870bd5f5f4eSMartin KaFai Lau 
1871bd5f5f4eSMartin KaFai Lau 	if (IS_ERR(map))
1872bd5f5f4eSMartin KaFai Lau 		return PTR_ERR(map);
1873bd5f5f4eSMartin KaFai Lau 
18746e71b04aSChenbo Feng 	fd = bpf_map_new_fd(map, f_flags);
1875bd5f5f4eSMartin KaFai Lau 	if (fd < 0)
1876bd5f5f4eSMartin KaFai Lau 		bpf_map_put(map);
1877bd5f5f4eSMartin KaFai Lau 
1878bd5f5f4eSMartin KaFai Lau 	return fd;
1879bd5f5f4eSMartin KaFai Lau }
1880bd5f5f4eSMartin KaFai Lau 
18817105e828SDaniel Borkmann static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
18827105e828SDaniel Borkmann 					      unsigned long addr)
18837105e828SDaniel Borkmann {
18847105e828SDaniel Borkmann 	int i;
18857105e828SDaniel Borkmann 
18867105e828SDaniel Borkmann 	for (i = 0; i < prog->aux->used_map_cnt; i++)
18877105e828SDaniel Borkmann 		if (prog->aux->used_maps[i] == (void *)addr)
18887105e828SDaniel Borkmann 			return prog->aux->used_maps[i];
18897105e828SDaniel Borkmann 	return NULL;
18907105e828SDaniel Borkmann }
18917105e828SDaniel Borkmann 
18927105e828SDaniel Borkmann static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
18937105e828SDaniel Borkmann {
18947105e828SDaniel Borkmann 	const struct bpf_map *map;
18957105e828SDaniel Borkmann 	struct bpf_insn *insns;
18967105e828SDaniel Borkmann 	u64 imm;
18977105e828SDaniel Borkmann 	int i;
18987105e828SDaniel Borkmann 
18997105e828SDaniel Borkmann 	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
19007105e828SDaniel Borkmann 			GFP_USER);
19017105e828SDaniel Borkmann 	if (!insns)
19027105e828SDaniel Borkmann 		return insns;
19037105e828SDaniel Borkmann 
19047105e828SDaniel Borkmann 	for (i = 0; i < prog->len; i++) {
19057105e828SDaniel Borkmann 		if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
19067105e828SDaniel Borkmann 			insns[i].code = BPF_JMP | BPF_CALL;
19077105e828SDaniel Borkmann 			insns[i].imm = BPF_FUNC_tail_call;
19087105e828SDaniel Borkmann 			/* fall-through */
19097105e828SDaniel Borkmann 		}
19107105e828SDaniel Borkmann 		if (insns[i].code == (BPF_JMP | BPF_CALL) ||
19117105e828SDaniel Borkmann 		    insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
19127105e828SDaniel Borkmann 			if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
19137105e828SDaniel Borkmann 				insns[i].code = BPF_JMP | BPF_CALL;
19147105e828SDaniel Borkmann 			if (!bpf_dump_raw_ok())
19157105e828SDaniel Borkmann 				insns[i].imm = 0;
19167105e828SDaniel Borkmann 			continue;
19177105e828SDaniel Borkmann 		}
19187105e828SDaniel Borkmann 
19197105e828SDaniel Borkmann 		if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
19207105e828SDaniel Borkmann 			continue;
19217105e828SDaniel Borkmann 
19227105e828SDaniel Borkmann 		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
19237105e828SDaniel Borkmann 		map = bpf_map_from_imm(prog, imm);
19247105e828SDaniel Borkmann 		if (map) {
19257105e828SDaniel Borkmann 			insns[i].src_reg = BPF_PSEUDO_MAP_FD;
19267105e828SDaniel Borkmann 			insns[i].imm = map->id;
19277105e828SDaniel Borkmann 			insns[i + 1].imm = 0;
19287105e828SDaniel Borkmann 			continue;
19297105e828SDaniel Borkmann 		}
19307105e828SDaniel Borkmann 
19317105e828SDaniel Borkmann 		if (!bpf_dump_raw_ok() &&
19327105e828SDaniel Borkmann 		    imm == (unsigned long)prog->aux) {
19337105e828SDaniel Borkmann 			insns[i].imm = 0;
19347105e828SDaniel Borkmann 			insns[i + 1].imm = 0;
19357105e828SDaniel Borkmann 			continue;
19367105e828SDaniel Borkmann 		}
19377105e828SDaniel Borkmann 	}
19387105e828SDaniel Borkmann 
19397105e828SDaniel Borkmann 	return insns;
19407105e828SDaniel Borkmann }
19417105e828SDaniel Borkmann 
19421e270976SMartin KaFai Lau static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
19431e270976SMartin KaFai Lau 				   const union bpf_attr *attr,
19441e270976SMartin KaFai Lau 				   union bpf_attr __user *uattr)
19451e270976SMartin KaFai Lau {
19461e270976SMartin KaFai Lau 	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
19471e270976SMartin KaFai Lau 	struct bpf_prog_info info = {};
19481e270976SMartin KaFai Lau 	u32 info_len = attr->info.info_len;
19491e270976SMartin KaFai Lau 	char __user *uinsns;
19501e270976SMartin KaFai Lau 	u32 ulen;
19511e270976SMartin KaFai Lau 	int err;
19521e270976SMartin KaFai Lau 
1953dcab51f1SMartin KaFai Lau 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
19541e270976SMartin KaFai Lau 	if (err)
19551e270976SMartin KaFai Lau 		return err;
19561e270976SMartin KaFai Lau 	info_len = min_t(u32, sizeof(info), info_len);
19571e270976SMartin KaFai Lau 
19581e270976SMartin KaFai Lau 	if (copy_from_user(&info, uinfo, info_len))
195989b09689SDaniel Borkmann 		return -EFAULT;
19601e270976SMartin KaFai Lau 
19611e270976SMartin KaFai Lau 	info.type = prog->type;
19621e270976SMartin KaFai Lau 	info.id = prog->aux->id;
1963cb4d2b3fSMartin KaFai Lau 	info.load_time = prog->aux->load_time;
1964cb4d2b3fSMartin KaFai Lau 	info.created_by_uid = from_kuid_munged(current_user_ns(),
1965cb4d2b3fSMartin KaFai Lau 					       prog->aux->user->uid);
1966b85fab0eSJiri Olsa 	info.gpl_compatible = prog->gpl_compatible;
19671e270976SMartin KaFai Lau 
19681e270976SMartin KaFai Lau 	memcpy(info.tag, prog->tag, sizeof(prog->tag));
1969cb4d2b3fSMartin KaFai Lau 	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
1970cb4d2b3fSMartin KaFai Lau 
1971cb4d2b3fSMartin KaFai Lau 	ulen = info.nr_map_ids;
1972cb4d2b3fSMartin KaFai Lau 	info.nr_map_ids = prog->aux->used_map_cnt;
1973cb4d2b3fSMartin KaFai Lau 	ulen = min_t(u32, info.nr_map_ids, ulen);
1974cb4d2b3fSMartin KaFai Lau 	if (ulen) {
1975721e08daSMartin KaFai Lau 		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
1976cb4d2b3fSMartin KaFai Lau 		u32 i;
1977cb4d2b3fSMartin KaFai Lau 
1978cb4d2b3fSMartin KaFai Lau 		for (i = 0; i < ulen; i++)
1979cb4d2b3fSMartin KaFai Lau 			if (put_user(prog->aux->used_maps[i]->id,
1980cb4d2b3fSMartin KaFai Lau 				     &user_map_ids[i]))
1981cb4d2b3fSMartin KaFai Lau 				return -EFAULT;
1982cb4d2b3fSMartin KaFai Lau 	}
19831e270976SMartin KaFai Lau 
19841e270976SMartin KaFai Lau 	if (!capable(CAP_SYS_ADMIN)) {
19851e270976SMartin KaFai Lau 		info.jited_prog_len = 0;
19861e270976SMartin KaFai Lau 		info.xlated_prog_len = 0;
1987dbecd738SSandipan Das 		info.nr_jited_ksyms = 0;
19881e270976SMartin KaFai Lau 		goto done;
19891e270976SMartin KaFai Lau 	}
19901e270976SMartin KaFai Lau 
19911e270976SMartin KaFai Lau 	ulen = info.xlated_prog_len;
19929975a54bSDaniel Borkmann 	info.xlated_prog_len = bpf_prog_insn_size(prog);
19931e270976SMartin KaFai Lau 	if (info.xlated_prog_len && ulen) {
19947105e828SDaniel Borkmann 		struct bpf_insn *insns_sanitized;
19957105e828SDaniel Borkmann 		bool fault;
19967105e828SDaniel Borkmann 
19977105e828SDaniel Borkmann 		if (prog->blinded && !bpf_dump_raw_ok()) {
19987105e828SDaniel Borkmann 			info.xlated_prog_insns = 0;
19997105e828SDaniel Borkmann 			goto done;
20007105e828SDaniel Borkmann 		}
20017105e828SDaniel Borkmann 		insns_sanitized = bpf_insn_prepare_dump(prog);
20027105e828SDaniel Borkmann 		if (!insns_sanitized)
20037105e828SDaniel Borkmann 			return -ENOMEM;
20041e270976SMartin KaFai Lau 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
20051e270976SMartin KaFai Lau 		ulen = min_t(u32, info.xlated_prog_len, ulen);
20067105e828SDaniel Borkmann 		fault = copy_to_user(uinsns, insns_sanitized, ulen);
20077105e828SDaniel Borkmann 		kfree(insns_sanitized);
20087105e828SDaniel Borkmann 		if (fault)
20091e270976SMartin KaFai Lau 			return -EFAULT;
20101e270976SMartin KaFai Lau 	}
20111e270976SMartin KaFai Lau 
2012675fc275SJakub Kicinski 	if (bpf_prog_is_dev_bound(prog->aux)) {
2013675fc275SJakub Kicinski 		err = bpf_prog_offload_info_fill(&info, prog);
2014675fc275SJakub Kicinski 		if (err)
2015675fc275SJakub Kicinski 			return err;
2016fcfb126dSJiong Wang 		goto done;
2017fcfb126dSJiong Wang 	}
2018fcfb126dSJiong Wang 
2019fcfb126dSJiong Wang 	/* NOTE: the following code is supposed to be skipped for offload.
2020fcfb126dSJiong Wang 	 * bpf_prog_offload_info_fill() is the place to fill similar fields
2021fcfb126dSJiong Wang 	 * for offload.
2022fcfb126dSJiong Wang 	 */
2023fcfb126dSJiong Wang 	ulen = info.jited_prog_len;
20244d56a76eSSandipan Das 	if (prog->aux->func_cnt) {
20254d56a76eSSandipan Das 		u32 i;
20264d56a76eSSandipan Das 
20274d56a76eSSandipan Das 		info.jited_prog_len = 0;
20284d56a76eSSandipan Das 		for (i = 0; i < prog->aux->func_cnt; i++)
20294d56a76eSSandipan Das 			info.jited_prog_len += prog->aux->func[i]->jited_len;
20304d56a76eSSandipan Das 	} else {
2031fcfb126dSJiong Wang 		info.jited_prog_len = prog->jited_len;
20324d56a76eSSandipan Das 	}
20334d56a76eSSandipan Das 
2034fcfb126dSJiong Wang 	if (info.jited_prog_len && ulen) {
2035fcfb126dSJiong Wang 		if (bpf_dump_raw_ok()) {
2036fcfb126dSJiong Wang 			uinsns = u64_to_user_ptr(info.jited_prog_insns);
2037fcfb126dSJiong Wang 			ulen = min_t(u32, info.jited_prog_len, ulen);
20384d56a76eSSandipan Das 
20394d56a76eSSandipan Das 			/* for multi-function programs, copy the JITed
20404d56a76eSSandipan Das 			 * instructions for all the functions
20414d56a76eSSandipan Das 			 */
20424d56a76eSSandipan Das 			if (prog->aux->func_cnt) {
20434d56a76eSSandipan Das 				u32 len, free, i;
20444d56a76eSSandipan Das 				u8 *img;
20454d56a76eSSandipan Das 
20464d56a76eSSandipan Das 				free = ulen;
20474d56a76eSSandipan Das 				for (i = 0; i < prog->aux->func_cnt; i++) {
20484d56a76eSSandipan Das 					len = prog->aux->func[i]->jited_len;
20494d56a76eSSandipan Das 					len = min_t(u32, len, free);
20504d56a76eSSandipan Das 					img = (u8 *) prog->aux->func[i]->bpf_func;
20514d56a76eSSandipan Das 					if (copy_to_user(uinsns, img, len))
20524d56a76eSSandipan Das 						return -EFAULT;
20534d56a76eSSandipan Das 					uinsns += len;
20544d56a76eSSandipan Das 					free -= len;
20554d56a76eSSandipan Das 					if (!free)
20564d56a76eSSandipan Das 						break;
20574d56a76eSSandipan Das 				}
20584d56a76eSSandipan Das 			} else {
2059fcfb126dSJiong Wang 				if (copy_to_user(uinsns, prog->bpf_func, ulen))
2060fcfb126dSJiong Wang 					return -EFAULT;
20614d56a76eSSandipan Das 			}
2062fcfb126dSJiong Wang 		} else {
2063fcfb126dSJiong Wang 			info.jited_prog_insns = 0;
2064fcfb126dSJiong Wang 		}
2065675fc275SJakub Kicinski 	}
2066675fc275SJakub Kicinski 
2067dbecd738SSandipan Das 	ulen = info.nr_jited_ksyms;
2068dbecd738SSandipan Das 	info.nr_jited_ksyms = prog->aux->func_cnt;
2069dbecd738SSandipan Das 	if (info.nr_jited_ksyms && ulen) {
2070dbecd738SSandipan Das 		if (bpf_dump_raw_ok()) {
2071dbecd738SSandipan Das 			u64 __user *user_ksyms;
2072dbecd738SSandipan Das 			ulong ksym_addr;
2073dbecd738SSandipan Das 			u32 i;
2074dbecd738SSandipan Das 
2075dbecd738SSandipan Das 			/* copy the address of the kernel symbol
2076dbecd738SSandipan Das 			 * corresponding to each function
2077dbecd738SSandipan Das 			 */
2078dbecd738SSandipan Das 			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
2079dbecd738SSandipan Das 			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
2080dbecd738SSandipan Das 			for (i = 0; i < ulen; i++) {
2081dbecd738SSandipan Das 				ksym_addr = (ulong) prog->aux->func[i]->bpf_func;
2082dbecd738SSandipan Das 				ksym_addr &= PAGE_MASK;
2083dbecd738SSandipan Das 				if (put_user((u64) ksym_addr, &user_ksyms[i]))
2084dbecd738SSandipan Das 					return -EFAULT;
2085dbecd738SSandipan Das 			}
2086dbecd738SSandipan Das 		} else {
2087dbecd738SSandipan Das 			info.jited_ksyms = 0;
2088dbecd738SSandipan Das 		}
2089dbecd738SSandipan Das 	}
2090dbecd738SSandipan Das 
2091815581c1SSandipan Das 	ulen = info.nr_jited_func_lens;
2092815581c1SSandipan Das 	info.nr_jited_func_lens = prog->aux->func_cnt;
2093815581c1SSandipan Das 	if (info.nr_jited_func_lens && ulen) {
2094815581c1SSandipan Das 		if (bpf_dump_raw_ok()) {
2095815581c1SSandipan Das 			u32 __user *user_lens;
2096815581c1SSandipan Das 			u32 func_len, i;
2097815581c1SSandipan Das 
2098815581c1SSandipan Das 			/* copy the JITed image lengths for each function */
2099815581c1SSandipan Das 			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
2100815581c1SSandipan Das 			user_lens = u64_to_user_ptr(info.jited_func_lens);
2101815581c1SSandipan Das 			for (i = 0; i < ulen; i++) {
2102815581c1SSandipan Das 				func_len = prog->aux->func[i]->jited_len;
2103815581c1SSandipan Das 				if (put_user(func_len, &user_lens[i]))
2104815581c1SSandipan Das 					return -EFAULT;
2105815581c1SSandipan Das 			}
2106815581c1SSandipan Das 		} else {
2107815581c1SSandipan Das 			info.jited_func_lens = 0;
2108815581c1SSandipan Das 		}
2109815581c1SSandipan Das 	}
2110815581c1SSandipan Das 
21111e270976SMartin KaFai Lau done:
21121e270976SMartin KaFai Lau 	if (copy_to_user(uinfo, &info, info_len) ||
21131e270976SMartin KaFai Lau 	    put_user(info_len, &uattr->info.info_len))
21141e270976SMartin KaFai Lau 		return -EFAULT;
21151e270976SMartin KaFai Lau 
21161e270976SMartin KaFai Lau 	return 0;
21171e270976SMartin KaFai Lau }
21181e270976SMartin KaFai Lau 
21191e270976SMartin KaFai Lau static int bpf_map_get_info_by_fd(struct bpf_map *map,
21201e270976SMartin KaFai Lau 				  const union bpf_attr *attr,
21211e270976SMartin KaFai Lau 				  union bpf_attr __user *uattr)
21221e270976SMartin KaFai Lau {
21231e270976SMartin KaFai Lau 	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
21241e270976SMartin KaFai Lau 	struct bpf_map_info info = {};
21251e270976SMartin KaFai Lau 	u32 info_len = attr->info.info_len;
21261e270976SMartin KaFai Lau 	int err;
21271e270976SMartin KaFai Lau 
2128dcab51f1SMartin KaFai Lau 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
21291e270976SMartin KaFai Lau 	if (err)
21301e270976SMartin KaFai Lau 		return err;
21311e270976SMartin KaFai Lau 	info_len = min_t(u32, sizeof(info), info_len);
21321e270976SMartin KaFai Lau 
21331e270976SMartin KaFai Lau 	info.type = map->map_type;
21341e270976SMartin KaFai Lau 	info.id = map->id;
21351e270976SMartin KaFai Lau 	info.key_size = map->key_size;
21361e270976SMartin KaFai Lau 	info.value_size = map->value_size;
21371e270976SMartin KaFai Lau 	info.max_entries = map->max_entries;
21381e270976SMartin KaFai Lau 	info.map_flags = map->map_flags;
2139ad5b177bSMartin KaFai Lau 	memcpy(info.name, map->name, sizeof(map->name));
21401e270976SMartin KaFai Lau 
214178958fcaSMartin KaFai Lau 	if (map->btf) {
214278958fcaSMartin KaFai Lau 		info.btf_id = btf_id(map->btf);
21439b2cf328SMartin KaFai Lau 		info.btf_key_type_id = map->btf_key_type_id;
21449b2cf328SMartin KaFai Lau 		info.btf_value_type_id = map->btf_value_type_id;
214578958fcaSMartin KaFai Lau 	}
214678958fcaSMartin KaFai Lau 
214752775b33SJakub Kicinski 	if (bpf_map_is_dev_bound(map)) {
214852775b33SJakub Kicinski 		err = bpf_map_offload_info_fill(&info, map);
214952775b33SJakub Kicinski 		if (err)
215052775b33SJakub Kicinski 			return err;
215152775b33SJakub Kicinski 	}
215252775b33SJakub Kicinski 
21531e270976SMartin KaFai Lau 	if (copy_to_user(uinfo, &info, info_len) ||
21541e270976SMartin KaFai Lau 	    put_user(info_len, &uattr->info.info_len))
21551e270976SMartin KaFai Lau 		return -EFAULT;
21561e270976SMartin KaFai Lau 
21571e270976SMartin KaFai Lau 	return 0;
21581e270976SMartin KaFai Lau }
21591e270976SMartin KaFai Lau 
216062dab84cSMartin KaFai Lau static int bpf_btf_get_info_by_fd(struct btf *btf,
216162dab84cSMartin KaFai Lau 				  const union bpf_attr *attr,
216262dab84cSMartin KaFai Lau 				  union bpf_attr __user *uattr)
216362dab84cSMartin KaFai Lau {
216462dab84cSMartin KaFai Lau 	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
216562dab84cSMartin KaFai Lau 	u32 info_len = attr->info.info_len;
216662dab84cSMartin KaFai Lau 	int err;
216762dab84cSMartin KaFai Lau 
2168dcab51f1SMartin KaFai Lau 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
216962dab84cSMartin KaFai Lau 	if (err)
217062dab84cSMartin KaFai Lau 		return err;
217162dab84cSMartin KaFai Lau 
217262dab84cSMartin KaFai Lau 	return btf_get_info_by_fd(btf, attr, uattr);
217362dab84cSMartin KaFai Lau }
217462dab84cSMartin KaFai Lau 
21751e270976SMartin KaFai Lau #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
21761e270976SMartin KaFai Lau 
21771e270976SMartin KaFai Lau static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
21781e270976SMartin KaFai Lau 				  union bpf_attr __user *uattr)
21791e270976SMartin KaFai Lau {
21801e270976SMartin KaFai Lau 	int ufd = attr->info.bpf_fd;
21811e270976SMartin KaFai Lau 	struct fd f;
21821e270976SMartin KaFai Lau 	int err;
21831e270976SMartin KaFai Lau 
21841e270976SMartin KaFai Lau 	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
21851e270976SMartin KaFai Lau 		return -EINVAL;
21861e270976SMartin KaFai Lau 
21871e270976SMartin KaFai Lau 	f = fdget(ufd);
21881e270976SMartin KaFai Lau 	if (!f.file)
21891e270976SMartin KaFai Lau 		return -EBADFD;
21901e270976SMartin KaFai Lau 
21911e270976SMartin KaFai Lau 	if (f.file->f_op == &bpf_prog_fops)
21921e270976SMartin KaFai Lau 		err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
21931e270976SMartin KaFai Lau 					      uattr);
21941e270976SMartin KaFai Lau 	else if (f.file->f_op == &bpf_map_fops)
21951e270976SMartin KaFai Lau 		err = bpf_map_get_info_by_fd(f.file->private_data, attr,
21961e270976SMartin KaFai Lau 					     uattr);
219760197cfbSMartin KaFai Lau 	else if (f.file->f_op == &btf_fops)
219862dab84cSMartin KaFai Lau 		err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
21991e270976SMartin KaFai Lau 	else
22001e270976SMartin KaFai Lau 		err = -EINVAL;
22011e270976SMartin KaFai Lau 
22021e270976SMartin KaFai Lau 	fdput(f);
22031e270976SMartin KaFai Lau 	return err;
22041e270976SMartin KaFai Lau }
22051e270976SMartin KaFai Lau 
2206f56a653cSMartin KaFai Lau #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
2207f56a653cSMartin KaFai Lau 
2208f56a653cSMartin KaFai Lau static int bpf_btf_load(const union bpf_attr *attr)
2209f56a653cSMartin KaFai Lau {
2210f56a653cSMartin KaFai Lau 	if (CHECK_ATTR(BPF_BTF_LOAD))
2211f56a653cSMartin KaFai Lau 		return -EINVAL;
2212f56a653cSMartin KaFai Lau 
2213f56a653cSMartin KaFai Lau 	if (!capable(CAP_SYS_ADMIN))
2214f56a653cSMartin KaFai Lau 		return -EPERM;
2215f56a653cSMartin KaFai Lau 
2216f56a653cSMartin KaFai Lau 	return btf_new_fd(attr);
2217f56a653cSMartin KaFai Lau }
2218f56a653cSMartin KaFai Lau 
221978958fcaSMartin KaFai Lau #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
222078958fcaSMartin KaFai Lau 
222178958fcaSMartin KaFai Lau static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
222278958fcaSMartin KaFai Lau {
222378958fcaSMartin KaFai Lau 	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
222478958fcaSMartin KaFai Lau 		return -EINVAL;
222578958fcaSMartin KaFai Lau 
222678958fcaSMartin KaFai Lau 	if (!capable(CAP_SYS_ADMIN))
222778958fcaSMartin KaFai Lau 		return -EPERM;
222878958fcaSMartin KaFai Lau 
222978958fcaSMartin KaFai Lau 	return btf_get_fd_by_id(attr->btf_id);
223078958fcaSMartin KaFai Lau }
223178958fcaSMartin KaFai Lau 
223241bdc4b4SYonghong Song static int bpf_task_fd_query_copy(const union bpf_attr *attr,
223341bdc4b4SYonghong Song 				    union bpf_attr __user *uattr,
223441bdc4b4SYonghong Song 				    u32 prog_id, u32 fd_type,
223541bdc4b4SYonghong Song 				    const char *buf, u64 probe_offset,
223641bdc4b4SYonghong Song 				    u64 probe_addr)
223741bdc4b4SYonghong Song {
223841bdc4b4SYonghong Song 	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
223941bdc4b4SYonghong Song 	u32 len = buf ? strlen(buf) : 0, input_len;
224041bdc4b4SYonghong Song 	int err = 0;
224141bdc4b4SYonghong Song 
224241bdc4b4SYonghong Song 	if (put_user(len, &uattr->task_fd_query.buf_len))
224341bdc4b4SYonghong Song 		return -EFAULT;
224441bdc4b4SYonghong Song 	input_len = attr->task_fd_query.buf_len;
224541bdc4b4SYonghong Song 	if (input_len && ubuf) {
224641bdc4b4SYonghong Song 		if (!len) {
224741bdc4b4SYonghong Song 			/* nothing to copy, just make ubuf NULL terminated */
224841bdc4b4SYonghong Song 			char zero = '\0';
224941bdc4b4SYonghong Song 
225041bdc4b4SYonghong Song 			if (put_user(zero, ubuf))
225141bdc4b4SYonghong Song 				return -EFAULT;
225241bdc4b4SYonghong Song 		} else if (input_len >= len + 1) {
225341bdc4b4SYonghong Song 			/* ubuf can hold the string with NULL terminator */
225441bdc4b4SYonghong Song 			if (copy_to_user(ubuf, buf, len + 1))
225541bdc4b4SYonghong Song 				return -EFAULT;
225641bdc4b4SYonghong Song 		} else {
225741bdc4b4SYonghong Song 			/* ubuf cannot hold the string with NULL terminator,
225841bdc4b4SYonghong Song 			 * do a partial copy with NULL terminator.
225941bdc4b4SYonghong Song 			 */
226041bdc4b4SYonghong Song 			char zero = '\0';
226141bdc4b4SYonghong Song 
226241bdc4b4SYonghong Song 			err = -ENOSPC;
226341bdc4b4SYonghong Song 			if (copy_to_user(ubuf, buf, input_len - 1))
226441bdc4b4SYonghong Song 				return -EFAULT;
226541bdc4b4SYonghong Song 			if (put_user(zero, ubuf + input_len - 1))
226641bdc4b4SYonghong Song 				return -EFAULT;
226741bdc4b4SYonghong Song 		}
226841bdc4b4SYonghong Song 	}
226941bdc4b4SYonghong Song 
227041bdc4b4SYonghong Song 	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
227141bdc4b4SYonghong Song 	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
227241bdc4b4SYonghong Song 	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
227341bdc4b4SYonghong Song 	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
227441bdc4b4SYonghong Song 		return -EFAULT;
227541bdc4b4SYonghong Song 
227641bdc4b4SYonghong Song 	return err;
227741bdc4b4SYonghong Song }
227841bdc4b4SYonghong Song 
227941bdc4b4SYonghong Song #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
228041bdc4b4SYonghong Song 
228141bdc4b4SYonghong Song static int bpf_task_fd_query(const union bpf_attr *attr,
228241bdc4b4SYonghong Song 			     union bpf_attr __user *uattr)
228341bdc4b4SYonghong Song {
228441bdc4b4SYonghong Song 	pid_t pid = attr->task_fd_query.pid;
228541bdc4b4SYonghong Song 	u32 fd = attr->task_fd_query.fd;
228641bdc4b4SYonghong Song 	const struct perf_event *event;
228741bdc4b4SYonghong Song 	struct files_struct *files;
228841bdc4b4SYonghong Song 	struct task_struct *task;
228941bdc4b4SYonghong Song 	struct file *file;
229041bdc4b4SYonghong Song 	int err;
229141bdc4b4SYonghong Song 
229241bdc4b4SYonghong Song 	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
229341bdc4b4SYonghong Song 		return -EINVAL;
229441bdc4b4SYonghong Song 
229541bdc4b4SYonghong Song 	if (!capable(CAP_SYS_ADMIN))
229641bdc4b4SYonghong Song 		return -EPERM;
229741bdc4b4SYonghong Song 
229841bdc4b4SYonghong Song 	if (attr->task_fd_query.flags != 0)
229941bdc4b4SYonghong Song 		return -EINVAL;
230041bdc4b4SYonghong Song 
230141bdc4b4SYonghong Song 	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
230241bdc4b4SYonghong Song 	if (!task)
230341bdc4b4SYonghong Song 		return -ENOENT;
230441bdc4b4SYonghong Song 
230541bdc4b4SYonghong Song 	files = get_files_struct(task);
230641bdc4b4SYonghong Song 	put_task_struct(task);
230741bdc4b4SYonghong Song 	if (!files)
230841bdc4b4SYonghong Song 		return -ENOENT;
230941bdc4b4SYonghong Song 
231041bdc4b4SYonghong Song 	err = 0;
231141bdc4b4SYonghong Song 	spin_lock(&files->file_lock);
231241bdc4b4SYonghong Song 	file = fcheck_files(files, fd);
231341bdc4b4SYonghong Song 	if (!file)
231441bdc4b4SYonghong Song 		err = -EBADF;
231541bdc4b4SYonghong Song 	else
231641bdc4b4SYonghong Song 		get_file(file);
231741bdc4b4SYonghong Song 	spin_unlock(&files->file_lock);
231841bdc4b4SYonghong Song 	put_files_struct(files);
231941bdc4b4SYonghong Song 
232041bdc4b4SYonghong Song 	if (err)
232141bdc4b4SYonghong Song 		goto out;
232241bdc4b4SYonghong Song 
232341bdc4b4SYonghong Song 	if (file->f_op == &bpf_raw_tp_fops) {
232441bdc4b4SYonghong Song 		struct bpf_raw_tracepoint *raw_tp = file->private_data;
232541bdc4b4SYonghong Song 		struct bpf_raw_event_map *btp = raw_tp->btp;
232641bdc4b4SYonghong Song 
232741bdc4b4SYonghong Song 		err = bpf_task_fd_query_copy(attr, uattr,
232841bdc4b4SYonghong Song 					     raw_tp->prog->aux->id,
232941bdc4b4SYonghong Song 					     BPF_FD_TYPE_RAW_TRACEPOINT,
233041bdc4b4SYonghong Song 					     btp->tp->name, 0, 0);
233141bdc4b4SYonghong Song 		goto put_file;
233241bdc4b4SYonghong Song 	}
233341bdc4b4SYonghong Song 
233441bdc4b4SYonghong Song 	event = perf_get_event(file);
233541bdc4b4SYonghong Song 	if (!IS_ERR(event)) {
233641bdc4b4SYonghong Song 		u64 probe_offset, probe_addr;
233741bdc4b4SYonghong Song 		u32 prog_id, fd_type;
233841bdc4b4SYonghong Song 		const char *buf;
233941bdc4b4SYonghong Song 
234041bdc4b4SYonghong Song 		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
234141bdc4b4SYonghong Song 					      &buf, &probe_offset,
234241bdc4b4SYonghong Song 					      &probe_addr);
234341bdc4b4SYonghong Song 		if (!err)
234441bdc4b4SYonghong Song 			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
234541bdc4b4SYonghong Song 						     fd_type, buf,
234641bdc4b4SYonghong Song 						     probe_offset,
234741bdc4b4SYonghong Song 						     probe_addr);
234841bdc4b4SYonghong Song 		goto put_file;
234941bdc4b4SYonghong Song 	}
235041bdc4b4SYonghong Song 
235141bdc4b4SYonghong Song 	err = -ENOTSUPP;
235241bdc4b4SYonghong Song put_file:
235341bdc4b4SYonghong Song 	fput(file);
235441bdc4b4SYonghong Song out:
235541bdc4b4SYonghong Song 	return err;
235641bdc4b4SYonghong Song }
235741bdc4b4SYonghong Song 
235899c55f7dSAlexei Starovoitov SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
235999c55f7dSAlexei Starovoitov {
236099c55f7dSAlexei Starovoitov 	union bpf_attr attr = {};
236199c55f7dSAlexei Starovoitov 	int err;
236299c55f7dSAlexei Starovoitov 
23630fa4fe85SChenbo Feng 	if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
236499c55f7dSAlexei Starovoitov 		return -EPERM;
236599c55f7dSAlexei Starovoitov 
2366dcab51f1SMartin KaFai Lau 	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
236799c55f7dSAlexei Starovoitov 	if (err)
236899c55f7dSAlexei Starovoitov 		return err;
23691e270976SMartin KaFai Lau 	size = min_t(u32, size, sizeof(attr));
237099c55f7dSAlexei Starovoitov 
237199c55f7dSAlexei Starovoitov 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
237299c55f7dSAlexei Starovoitov 	if (copy_from_user(&attr, uattr, size) != 0)
237399c55f7dSAlexei Starovoitov 		return -EFAULT;
237499c55f7dSAlexei Starovoitov 
2375afdb09c7SChenbo Feng 	err = security_bpf(cmd, &attr, size);
2376afdb09c7SChenbo Feng 	if (err < 0)
2377afdb09c7SChenbo Feng 		return err;
2378afdb09c7SChenbo Feng 
237999c55f7dSAlexei Starovoitov 	switch (cmd) {
238099c55f7dSAlexei Starovoitov 	case BPF_MAP_CREATE:
238199c55f7dSAlexei Starovoitov 		err = map_create(&attr);
238299c55f7dSAlexei Starovoitov 		break;
2383db20fd2bSAlexei Starovoitov 	case BPF_MAP_LOOKUP_ELEM:
2384db20fd2bSAlexei Starovoitov 		err = map_lookup_elem(&attr);
2385db20fd2bSAlexei Starovoitov 		break;
2386db20fd2bSAlexei Starovoitov 	case BPF_MAP_UPDATE_ELEM:
2387db20fd2bSAlexei Starovoitov 		err = map_update_elem(&attr);
2388db20fd2bSAlexei Starovoitov 		break;
2389db20fd2bSAlexei Starovoitov 	case BPF_MAP_DELETE_ELEM:
2390db20fd2bSAlexei Starovoitov 		err = map_delete_elem(&attr);
2391db20fd2bSAlexei Starovoitov 		break;
2392db20fd2bSAlexei Starovoitov 	case BPF_MAP_GET_NEXT_KEY:
2393db20fd2bSAlexei Starovoitov 		err = map_get_next_key(&attr);
2394db20fd2bSAlexei Starovoitov 		break;
239509756af4SAlexei Starovoitov 	case BPF_PROG_LOAD:
239609756af4SAlexei Starovoitov 		err = bpf_prog_load(&attr);
239709756af4SAlexei Starovoitov 		break;
2398b2197755SDaniel Borkmann 	case BPF_OBJ_PIN:
2399b2197755SDaniel Borkmann 		err = bpf_obj_pin(&attr);
2400b2197755SDaniel Borkmann 		break;
2401b2197755SDaniel Borkmann 	case BPF_OBJ_GET:
2402b2197755SDaniel Borkmann 		err = bpf_obj_get(&attr);
2403b2197755SDaniel Borkmann 		break;
2404f4324551SDaniel Mack 	case BPF_PROG_ATTACH:
2405f4324551SDaniel Mack 		err = bpf_prog_attach(&attr);
2406f4324551SDaniel Mack 		break;
2407f4324551SDaniel Mack 	case BPF_PROG_DETACH:
2408f4324551SDaniel Mack 		err = bpf_prog_detach(&attr);
2409f4324551SDaniel Mack 		break;
2410468e2f64SAlexei Starovoitov 	case BPF_PROG_QUERY:
2411468e2f64SAlexei Starovoitov 		err = bpf_prog_query(&attr, uattr);
2412468e2f64SAlexei Starovoitov 		break;
24131cf1cae9SAlexei Starovoitov 	case BPF_PROG_TEST_RUN:
24141cf1cae9SAlexei Starovoitov 		err = bpf_prog_test_run(&attr, uattr);
24151cf1cae9SAlexei Starovoitov 		break;
241634ad5580SMartin KaFai Lau 	case BPF_PROG_GET_NEXT_ID:
241734ad5580SMartin KaFai Lau 		err = bpf_obj_get_next_id(&attr, uattr,
241834ad5580SMartin KaFai Lau 					  &prog_idr, &prog_idr_lock);
241934ad5580SMartin KaFai Lau 		break;
242034ad5580SMartin KaFai Lau 	case BPF_MAP_GET_NEXT_ID:
242134ad5580SMartin KaFai Lau 		err = bpf_obj_get_next_id(&attr, uattr,
242234ad5580SMartin KaFai Lau 					  &map_idr, &map_idr_lock);
242334ad5580SMartin KaFai Lau 		break;
2424b16d9aa4SMartin KaFai Lau 	case BPF_PROG_GET_FD_BY_ID:
2425b16d9aa4SMartin KaFai Lau 		err = bpf_prog_get_fd_by_id(&attr);
2426b16d9aa4SMartin KaFai Lau 		break;
2427bd5f5f4eSMartin KaFai Lau 	case BPF_MAP_GET_FD_BY_ID:
2428bd5f5f4eSMartin KaFai Lau 		err = bpf_map_get_fd_by_id(&attr);
2429bd5f5f4eSMartin KaFai Lau 		break;
24301e270976SMartin KaFai Lau 	case BPF_OBJ_GET_INFO_BY_FD:
24311e270976SMartin KaFai Lau 		err = bpf_obj_get_info_by_fd(&attr, uattr);
24321e270976SMartin KaFai Lau 		break;
2433c4f6699dSAlexei Starovoitov 	case BPF_RAW_TRACEPOINT_OPEN:
2434c4f6699dSAlexei Starovoitov 		err = bpf_raw_tracepoint_open(&attr);
2435c4f6699dSAlexei Starovoitov 		break;
2436f56a653cSMartin KaFai Lau 	case BPF_BTF_LOAD:
2437f56a653cSMartin KaFai Lau 		err = bpf_btf_load(&attr);
2438f56a653cSMartin KaFai Lau 		break;
243978958fcaSMartin KaFai Lau 	case BPF_BTF_GET_FD_BY_ID:
244078958fcaSMartin KaFai Lau 		err = bpf_btf_get_fd_by_id(&attr);
244178958fcaSMartin KaFai Lau 		break;
244241bdc4b4SYonghong Song 	case BPF_TASK_FD_QUERY:
244341bdc4b4SYonghong Song 		err = bpf_task_fd_query(&attr, uattr);
244441bdc4b4SYonghong Song 		break;
244599c55f7dSAlexei Starovoitov 	default:
244699c55f7dSAlexei Starovoitov 		err = -EINVAL;
244799c55f7dSAlexei Starovoitov 		break;
244899c55f7dSAlexei Starovoitov 	}
244999c55f7dSAlexei Starovoitov 
245099c55f7dSAlexei Starovoitov 	return err;
245199c55f7dSAlexei Starovoitov }
2452