xref: /linux/kernel/bpf/syscall.c (revision c79c3c34f75d72a066e292b10aa50fc758c97c89)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf_trace.h>
6 #include <linux/bpf_lirc.h>
7 #include <linux/bpf_verifier.h>
8 #include <linux/btf.h>
9 #include <linux/syscalls.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/vmalloc.h>
13 #include <linux/mmzone.h>
14 #include <linux/anon_inodes.h>
15 #include <linux/fdtable.h>
16 #include <linux/file.h>
17 #include <linux/fs.h>
18 #include <linux/license.h>
19 #include <linux/filter.h>
20 #include <linux/kernel.h>
21 #include <linux/idr.h>
22 #include <linux/cred.h>
23 #include <linux/timekeeping.h>
24 #include <linux/ctype.h>
25 #include <linux/nospec.h>
26 #include <linux/audit.h>
27 #include <uapi/linux/btf.h>
28 #include <linux/pgtable.h>
29 #include <linux/bpf_lsm.h>
30 #include <linux/poll.h>
31 #include <linux/bpf-netns.h>
32 #include <linux/rcupdate_trace.h>
33 #include <linux/memcontrol.h>
34 
35 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
36 			  (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
37 			  (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
38 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
39 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
40 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
41 			IS_FD_HASH(map))
42 
43 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
44 
45 DEFINE_PER_CPU(int, bpf_prog_active);
46 static DEFINE_IDR(prog_idr);
47 static DEFINE_SPINLOCK(prog_idr_lock);
48 static DEFINE_IDR(map_idr);
49 static DEFINE_SPINLOCK(map_idr_lock);
50 static DEFINE_IDR(link_idr);
51 static DEFINE_SPINLOCK(link_idr_lock);
52 
53 int sysctl_unprivileged_bpf_disabled __read_mostly;
54 
55 static const struct bpf_map_ops * const bpf_map_types[] = {
56 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
57 #define BPF_MAP_TYPE(_id, _ops) \
58 	[_id] = &_ops,
59 #define BPF_LINK_TYPE(_id, _name)
60 #include <linux/bpf_types.h>
61 #undef BPF_PROG_TYPE
62 #undef BPF_MAP_TYPE
63 #undef BPF_LINK_TYPE
64 };
65 
66 /*
67  * If we're handed a bigger struct than we know of, ensure all the unknown bits
68  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
69  * we don't know about yet.
70  *
71  * There is a ToCToU between this function call and the following
72  * copy_from_user() call. However, this is not a concern since this function is
73  * meant to be a future-proofing of bits.
74  */
75 int bpf_check_uarg_tail_zero(void __user *uaddr,
76 			     size_t expected_size,
77 			     size_t actual_size)
78 {
79 	unsigned char __user *addr = uaddr + expected_size;
80 	int res;
81 
82 	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
83 		return -E2BIG;
84 
85 	if (actual_size <= expected_size)
86 		return 0;
87 
88 	res = check_zeroed_user(addr, actual_size - expected_size);
89 	if (res < 0)
90 		return res;
91 	return res ? 0 : -E2BIG;
92 }
93 
94 const struct bpf_map_ops bpf_map_offload_ops = {
95 	.map_meta_equal = bpf_map_meta_equal,
96 	.map_alloc = bpf_map_offload_map_alloc,
97 	.map_free = bpf_map_offload_map_free,
98 	.map_check_btf = map_check_no_btf,
99 };
100 
101 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
102 {
103 	const struct bpf_map_ops *ops;
104 	u32 type = attr->map_type;
105 	struct bpf_map *map;
106 	int err;
107 
108 	if (type >= ARRAY_SIZE(bpf_map_types))
109 		return ERR_PTR(-EINVAL);
110 	type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
111 	ops = bpf_map_types[type];
112 	if (!ops)
113 		return ERR_PTR(-EINVAL);
114 
115 	if (ops->map_alloc_check) {
116 		err = ops->map_alloc_check(attr);
117 		if (err)
118 			return ERR_PTR(err);
119 	}
120 	if (attr->map_ifindex)
121 		ops = &bpf_map_offload_ops;
122 	map = ops->map_alloc(attr);
123 	if (IS_ERR(map))
124 		return map;
125 	map->ops = ops;
126 	map->map_type = type;
127 	return map;
128 }
129 
130 static u32 bpf_map_value_size(const struct bpf_map *map)
131 {
132 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
133 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
134 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
135 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
136 		return round_up(map->value_size, 8) * num_possible_cpus();
137 	else if (IS_FD_MAP(map))
138 		return sizeof(u32);
139 	else
140 		return  map->value_size;
141 }
142 
143 static void maybe_wait_bpf_programs(struct bpf_map *map)
144 {
145 	/* Wait for any running BPF programs to complete so that
146 	 * userspace, when we return to it, knows that all programs
147 	 * that could be running use the new map value.
148 	 */
149 	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
150 	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
151 		synchronize_rcu();
152 }
153 
154 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
155 				void *value, __u64 flags)
156 {
157 	int err;
158 
159 	/* Need to create a kthread, thus must support schedule */
160 	if (bpf_map_is_dev_bound(map)) {
161 		return bpf_map_offload_update_elem(map, key, value, flags);
162 	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
163 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
164 		return map->ops->map_update_elem(map, key, value, flags);
165 	} else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
166 		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
167 		return sock_map_update_elem_sys(map, key, value, flags);
168 	} else if (IS_FD_PROG_ARRAY(map)) {
169 		return bpf_fd_array_map_update_elem(map, f.file, key, value,
170 						    flags);
171 	}
172 
173 	bpf_disable_instrumentation();
174 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
175 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
176 		err = bpf_percpu_hash_update(map, key, value, flags);
177 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
178 		err = bpf_percpu_array_update(map, key, value, flags);
179 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
180 		err = bpf_percpu_cgroup_storage_update(map, key, value,
181 						       flags);
182 	} else if (IS_FD_ARRAY(map)) {
183 		rcu_read_lock();
184 		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
185 						   flags);
186 		rcu_read_unlock();
187 	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
188 		rcu_read_lock();
189 		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
190 						  flags);
191 		rcu_read_unlock();
192 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
193 		/* rcu_read_lock() is not needed */
194 		err = bpf_fd_reuseport_array_update_elem(map, key, value,
195 							 flags);
196 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
197 		   map->map_type == BPF_MAP_TYPE_STACK) {
198 		err = map->ops->map_push_elem(map, value, flags);
199 	} else {
200 		rcu_read_lock();
201 		err = map->ops->map_update_elem(map, key, value, flags);
202 		rcu_read_unlock();
203 	}
204 	bpf_enable_instrumentation();
205 	maybe_wait_bpf_programs(map);
206 
207 	return err;
208 }
209 
210 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
211 			      __u64 flags)
212 {
213 	void *ptr;
214 	int err;
215 
216 	if (bpf_map_is_dev_bound(map))
217 		return bpf_map_offload_lookup_elem(map, key, value);
218 
219 	bpf_disable_instrumentation();
220 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
221 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
222 		err = bpf_percpu_hash_copy(map, key, value);
223 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
224 		err = bpf_percpu_array_copy(map, key, value);
225 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
226 		err = bpf_percpu_cgroup_storage_copy(map, key, value);
227 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
228 		err = bpf_stackmap_copy(map, key, value);
229 	} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
230 		err = bpf_fd_array_map_lookup_elem(map, key, value);
231 	} else if (IS_FD_HASH(map)) {
232 		err = bpf_fd_htab_map_lookup_elem(map, key, value);
233 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
234 		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
235 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
236 		   map->map_type == BPF_MAP_TYPE_STACK) {
237 		err = map->ops->map_peek_elem(map, value);
238 	} else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
239 		/* struct_ops map requires directly updating "value" */
240 		err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
241 	} else {
242 		rcu_read_lock();
243 		if (map->ops->map_lookup_elem_sys_only)
244 			ptr = map->ops->map_lookup_elem_sys_only(map, key);
245 		else
246 			ptr = map->ops->map_lookup_elem(map, key);
247 		if (IS_ERR(ptr)) {
248 			err = PTR_ERR(ptr);
249 		} else if (!ptr) {
250 			err = -ENOENT;
251 		} else {
252 			err = 0;
253 			if (flags & BPF_F_LOCK)
254 				/* lock 'ptr' and copy everything but lock */
255 				copy_map_value_locked(map, value, ptr, true);
256 			else
257 				copy_map_value(map, value, ptr);
258 			/* mask lock, since value wasn't zero inited */
259 			check_and_init_map_lock(map, value);
260 		}
261 		rcu_read_unlock();
262 	}
263 
264 	bpf_enable_instrumentation();
265 	maybe_wait_bpf_programs(map);
266 
267 	return err;
268 }
269 
270 /* Please, do not use this function outside from the map creation path
271  * (e.g. in map update path) without taking care of setting the active
272  * memory cgroup (see at bpf_map_kmalloc_node() for example).
273  */
274 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
275 {
276 	/* We really just want to fail instead of triggering OOM killer
277 	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
278 	 * which is used for lower order allocation requests.
279 	 *
280 	 * It has been observed that higher order allocation requests done by
281 	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
282 	 * to reclaim memory from the page cache, thus we set
283 	 * __GFP_RETRY_MAYFAIL to avoid such situations.
284 	 */
285 
286 	const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT;
287 	unsigned int flags = 0;
288 	unsigned long align = 1;
289 	void *area;
290 
291 	if (size >= SIZE_MAX)
292 		return NULL;
293 
294 	/* kmalloc()'ed memory can't be mmap()'ed */
295 	if (mmapable) {
296 		BUG_ON(!PAGE_ALIGNED(size));
297 		align = SHMLBA;
298 		flags = VM_USERMAP;
299 	} else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
300 		area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
301 				    numa_node);
302 		if (area != NULL)
303 			return area;
304 	}
305 
306 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
307 			gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
308 			flags, numa_node, __builtin_return_address(0));
309 }
310 
311 void *bpf_map_area_alloc(u64 size, int numa_node)
312 {
313 	return __bpf_map_area_alloc(size, numa_node, false);
314 }
315 
316 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
317 {
318 	return __bpf_map_area_alloc(size, numa_node, true);
319 }
320 
321 void bpf_map_area_free(void *area)
322 {
323 	kvfree(area);
324 }
325 
326 static u32 bpf_map_flags_retain_permanent(u32 flags)
327 {
328 	/* Some map creation flags are not tied to the map object but
329 	 * rather to the map fd instead, so they have no meaning upon
330 	 * map object inspection since multiple file descriptors with
331 	 * different (access) properties can exist here. Thus, given
332 	 * this has zero meaning for the map itself, lets clear these
333 	 * from here.
334 	 */
335 	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
336 }
337 
338 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
339 {
340 	map->map_type = attr->map_type;
341 	map->key_size = attr->key_size;
342 	map->value_size = attr->value_size;
343 	map->max_entries = attr->max_entries;
344 	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
345 	map->numa_node = bpf_map_attr_numa_node(attr);
346 }
347 
348 static int bpf_map_alloc_id(struct bpf_map *map)
349 {
350 	int id;
351 
352 	idr_preload(GFP_KERNEL);
353 	spin_lock_bh(&map_idr_lock);
354 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
355 	if (id > 0)
356 		map->id = id;
357 	spin_unlock_bh(&map_idr_lock);
358 	idr_preload_end();
359 
360 	if (WARN_ON_ONCE(!id))
361 		return -ENOSPC;
362 
363 	return id > 0 ? 0 : id;
364 }
365 
366 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
367 {
368 	unsigned long flags;
369 
370 	/* Offloaded maps are removed from the IDR store when their device
371 	 * disappears - even if someone holds an fd to them they are unusable,
372 	 * the memory is gone, all ops will fail; they are simply waiting for
373 	 * refcnt to drop to be freed.
374 	 */
375 	if (!map->id)
376 		return;
377 
378 	if (do_idr_lock)
379 		spin_lock_irqsave(&map_idr_lock, flags);
380 	else
381 		__acquire(&map_idr_lock);
382 
383 	idr_remove(&map_idr, map->id);
384 	map->id = 0;
385 
386 	if (do_idr_lock)
387 		spin_unlock_irqrestore(&map_idr_lock, flags);
388 	else
389 		__release(&map_idr_lock);
390 }
391 
392 #ifdef CONFIG_MEMCG_KMEM
393 static void bpf_map_save_memcg(struct bpf_map *map)
394 {
395 	map->memcg = get_mem_cgroup_from_mm(current->mm);
396 }
397 
398 static void bpf_map_release_memcg(struct bpf_map *map)
399 {
400 	mem_cgroup_put(map->memcg);
401 }
402 
403 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
404 			   int node)
405 {
406 	struct mem_cgroup *old_memcg;
407 	void *ptr;
408 
409 	old_memcg = set_active_memcg(map->memcg);
410 	ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
411 	set_active_memcg(old_memcg);
412 
413 	return ptr;
414 }
415 
416 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
417 {
418 	struct mem_cgroup *old_memcg;
419 	void *ptr;
420 
421 	old_memcg = set_active_memcg(map->memcg);
422 	ptr = kzalloc(size, flags | __GFP_ACCOUNT);
423 	set_active_memcg(old_memcg);
424 
425 	return ptr;
426 }
427 
428 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
429 				    size_t align, gfp_t flags)
430 {
431 	struct mem_cgroup *old_memcg;
432 	void __percpu *ptr;
433 
434 	old_memcg = set_active_memcg(map->memcg);
435 	ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
436 	set_active_memcg(old_memcg);
437 
438 	return ptr;
439 }
440 
441 #else
442 static void bpf_map_save_memcg(struct bpf_map *map)
443 {
444 }
445 
446 static void bpf_map_release_memcg(struct bpf_map *map)
447 {
448 }
449 #endif
450 
451 /* called from workqueue */
452 static void bpf_map_free_deferred(struct work_struct *work)
453 {
454 	struct bpf_map *map = container_of(work, struct bpf_map, work);
455 
456 	security_bpf_map_free(map);
457 	bpf_map_release_memcg(map);
458 	/* implementation dependent freeing */
459 	map->ops->map_free(map);
460 }
461 
462 static void bpf_map_put_uref(struct bpf_map *map)
463 {
464 	if (atomic64_dec_and_test(&map->usercnt)) {
465 		if (map->ops->map_release_uref)
466 			map->ops->map_release_uref(map);
467 	}
468 }
469 
470 /* decrement map refcnt and schedule it for freeing via workqueue
471  * (unrelying map implementation ops->map_free() might sleep)
472  */
473 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
474 {
475 	if (atomic64_dec_and_test(&map->refcnt)) {
476 		/* bpf_map_free_id() must be called first */
477 		bpf_map_free_id(map, do_idr_lock);
478 		btf_put(map->btf);
479 		INIT_WORK(&map->work, bpf_map_free_deferred);
480 		schedule_work(&map->work);
481 	}
482 }
483 
484 void bpf_map_put(struct bpf_map *map)
485 {
486 	__bpf_map_put(map, true);
487 }
488 EXPORT_SYMBOL_GPL(bpf_map_put);
489 
490 void bpf_map_put_with_uref(struct bpf_map *map)
491 {
492 	bpf_map_put_uref(map);
493 	bpf_map_put(map);
494 }
495 
496 static int bpf_map_release(struct inode *inode, struct file *filp)
497 {
498 	struct bpf_map *map = filp->private_data;
499 
500 	if (map->ops->map_release)
501 		map->ops->map_release(map, filp);
502 
503 	bpf_map_put_with_uref(map);
504 	return 0;
505 }
506 
507 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
508 {
509 	fmode_t mode = f.file->f_mode;
510 
511 	/* Our file permissions may have been overridden by global
512 	 * map permissions facing syscall side.
513 	 */
514 	if (READ_ONCE(map->frozen))
515 		mode &= ~FMODE_CAN_WRITE;
516 	return mode;
517 }
518 
519 #ifdef CONFIG_PROC_FS
520 /* Provides an approximation of the map's memory footprint.
521  * Used only to provide a backward compatibility and display
522  * a reasonable "memlock" info.
523  */
524 static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)
525 {
526 	unsigned long size;
527 
528 	size = round_up(map->key_size + bpf_map_value_size(map), 8);
529 
530 	return round_up(map->max_entries * size, PAGE_SIZE);
531 }
532 
533 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
534 {
535 	const struct bpf_map *map = filp->private_data;
536 	const struct bpf_array *array;
537 	u32 type = 0, jited = 0;
538 
539 	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
540 		array = container_of(map, struct bpf_array, map);
541 		type  = array->aux->type;
542 		jited = array->aux->jited;
543 	}
544 
545 	seq_printf(m,
546 		   "map_type:\t%u\n"
547 		   "key_size:\t%u\n"
548 		   "value_size:\t%u\n"
549 		   "max_entries:\t%u\n"
550 		   "map_flags:\t%#x\n"
551 		   "memlock:\t%lu\n"
552 		   "map_id:\t%u\n"
553 		   "frozen:\t%u\n",
554 		   map->map_type,
555 		   map->key_size,
556 		   map->value_size,
557 		   map->max_entries,
558 		   map->map_flags,
559 		   bpf_map_memory_footprint(map),
560 		   map->id,
561 		   READ_ONCE(map->frozen));
562 	if (type) {
563 		seq_printf(m, "owner_prog_type:\t%u\n", type);
564 		seq_printf(m, "owner_jited:\t%u\n", jited);
565 	}
566 }
567 #endif
568 
569 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
570 			      loff_t *ppos)
571 {
572 	/* We need this handler such that alloc_file() enables
573 	 * f_mode with FMODE_CAN_READ.
574 	 */
575 	return -EINVAL;
576 }
577 
578 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
579 			       size_t siz, loff_t *ppos)
580 {
581 	/* We need this handler such that alloc_file() enables
582 	 * f_mode with FMODE_CAN_WRITE.
583 	 */
584 	return -EINVAL;
585 }
586 
587 /* called for any extra memory-mapped regions (except initial) */
588 static void bpf_map_mmap_open(struct vm_area_struct *vma)
589 {
590 	struct bpf_map *map = vma->vm_file->private_data;
591 
592 	if (vma->vm_flags & VM_MAYWRITE) {
593 		mutex_lock(&map->freeze_mutex);
594 		map->writecnt++;
595 		mutex_unlock(&map->freeze_mutex);
596 	}
597 }
598 
599 /* called for all unmapped memory region (including initial) */
600 static void bpf_map_mmap_close(struct vm_area_struct *vma)
601 {
602 	struct bpf_map *map = vma->vm_file->private_data;
603 
604 	if (vma->vm_flags & VM_MAYWRITE) {
605 		mutex_lock(&map->freeze_mutex);
606 		map->writecnt--;
607 		mutex_unlock(&map->freeze_mutex);
608 	}
609 }
610 
611 static const struct vm_operations_struct bpf_map_default_vmops = {
612 	.open		= bpf_map_mmap_open,
613 	.close		= bpf_map_mmap_close,
614 };
615 
616 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
617 {
618 	struct bpf_map *map = filp->private_data;
619 	int err;
620 
621 	if (!map->ops->map_mmap || map_value_has_spin_lock(map))
622 		return -ENOTSUPP;
623 
624 	if (!(vma->vm_flags & VM_SHARED))
625 		return -EINVAL;
626 
627 	mutex_lock(&map->freeze_mutex);
628 
629 	if (vma->vm_flags & VM_WRITE) {
630 		if (map->frozen) {
631 			err = -EPERM;
632 			goto out;
633 		}
634 		/* map is meant to be read-only, so do not allow mapping as
635 		 * writable, because it's possible to leak a writable page
636 		 * reference and allows user-space to still modify it after
637 		 * freezing, while verifier will assume contents do not change
638 		 */
639 		if (map->map_flags & BPF_F_RDONLY_PROG) {
640 			err = -EACCES;
641 			goto out;
642 		}
643 	}
644 
645 	/* set default open/close callbacks */
646 	vma->vm_ops = &bpf_map_default_vmops;
647 	vma->vm_private_data = map;
648 	vma->vm_flags &= ~VM_MAYEXEC;
649 	if (!(vma->vm_flags & VM_WRITE))
650 		/* disallow re-mapping with PROT_WRITE */
651 		vma->vm_flags &= ~VM_MAYWRITE;
652 
653 	err = map->ops->map_mmap(map, vma);
654 	if (err)
655 		goto out;
656 
657 	if (vma->vm_flags & VM_MAYWRITE)
658 		map->writecnt++;
659 out:
660 	mutex_unlock(&map->freeze_mutex);
661 	return err;
662 }
663 
664 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
665 {
666 	struct bpf_map *map = filp->private_data;
667 
668 	if (map->ops->map_poll)
669 		return map->ops->map_poll(map, filp, pts);
670 
671 	return EPOLLERR;
672 }
673 
674 const struct file_operations bpf_map_fops = {
675 #ifdef CONFIG_PROC_FS
676 	.show_fdinfo	= bpf_map_show_fdinfo,
677 #endif
678 	.release	= bpf_map_release,
679 	.read		= bpf_dummy_read,
680 	.write		= bpf_dummy_write,
681 	.mmap		= bpf_map_mmap,
682 	.poll		= bpf_map_poll,
683 };
684 
685 int bpf_map_new_fd(struct bpf_map *map, int flags)
686 {
687 	int ret;
688 
689 	ret = security_bpf_map(map, OPEN_FMODE(flags));
690 	if (ret < 0)
691 		return ret;
692 
693 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
694 				flags | O_CLOEXEC);
695 }
696 
697 int bpf_get_file_flag(int flags)
698 {
699 	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
700 		return -EINVAL;
701 	if (flags & BPF_F_RDONLY)
702 		return O_RDONLY;
703 	if (flags & BPF_F_WRONLY)
704 		return O_WRONLY;
705 	return O_RDWR;
706 }
707 
708 /* helper macro to check that unused fields 'union bpf_attr' are zero */
709 #define CHECK_ATTR(CMD) \
710 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
711 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
712 		   sizeof(*attr) - \
713 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
714 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
715 
716 /* dst and src must have at least "size" number of bytes.
717  * Return strlen on success and < 0 on error.
718  */
719 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
720 {
721 	const char *end = src + size;
722 	const char *orig_src = src;
723 
724 	memset(dst, 0, size);
725 	/* Copy all isalnum(), '_' and '.' chars. */
726 	while (src < end && *src) {
727 		if (!isalnum(*src) &&
728 		    *src != '_' && *src != '.')
729 			return -EINVAL;
730 		*dst++ = *src++;
731 	}
732 
733 	/* No '\0' found in "size" number of bytes */
734 	if (src == end)
735 		return -EINVAL;
736 
737 	return src - orig_src;
738 }
739 
740 int map_check_no_btf(const struct bpf_map *map,
741 		     const struct btf *btf,
742 		     const struct btf_type *key_type,
743 		     const struct btf_type *value_type)
744 {
745 	return -ENOTSUPP;
746 }
747 
748 static int map_check_btf(struct bpf_map *map, const struct btf *btf,
749 			 u32 btf_key_id, u32 btf_value_id)
750 {
751 	const struct btf_type *key_type, *value_type;
752 	u32 key_size, value_size;
753 	int ret = 0;
754 
755 	/* Some maps allow key to be unspecified. */
756 	if (btf_key_id) {
757 		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
758 		if (!key_type || key_size != map->key_size)
759 			return -EINVAL;
760 	} else {
761 		key_type = btf_type_by_id(btf, 0);
762 		if (!map->ops->map_check_btf)
763 			return -EINVAL;
764 	}
765 
766 	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
767 	if (!value_type || value_size != map->value_size)
768 		return -EINVAL;
769 
770 	map->spin_lock_off = btf_find_spin_lock(btf, value_type);
771 
772 	if (map_value_has_spin_lock(map)) {
773 		if (map->map_flags & BPF_F_RDONLY_PROG)
774 			return -EACCES;
775 		if (map->map_type != BPF_MAP_TYPE_HASH &&
776 		    map->map_type != BPF_MAP_TYPE_ARRAY &&
777 		    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
778 		    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
779 		    map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
780 		    map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
781 			return -ENOTSUPP;
782 		if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
783 		    map->value_size) {
784 			WARN_ONCE(1,
785 				  "verifier bug spin_lock_off %d value_size %d\n",
786 				  map->spin_lock_off, map->value_size);
787 			return -EFAULT;
788 		}
789 	}
790 
791 	if (map->ops->map_check_btf)
792 		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
793 
794 	return ret;
795 }
796 
797 #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
798 /* called via syscall */
799 static int map_create(union bpf_attr *attr)
800 {
801 	int numa_node = bpf_map_attr_numa_node(attr);
802 	struct bpf_map *map;
803 	int f_flags;
804 	int err;
805 
806 	err = CHECK_ATTR(BPF_MAP_CREATE);
807 	if (err)
808 		return -EINVAL;
809 
810 	if (attr->btf_vmlinux_value_type_id) {
811 		if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
812 		    attr->btf_key_type_id || attr->btf_value_type_id)
813 			return -EINVAL;
814 	} else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
815 		return -EINVAL;
816 	}
817 
818 	f_flags = bpf_get_file_flag(attr->map_flags);
819 	if (f_flags < 0)
820 		return f_flags;
821 
822 	if (numa_node != NUMA_NO_NODE &&
823 	    ((unsigned int)numa_node >= nr_node_ids ||
824 	     !node_online(numa_node)))
825 		return -EINVAL;
826 
827 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
828 	map = find_and_alloc_map(attr);
829 	if (IS_ERR(map))
830 		return PTR_ERR(map);
831 
832 	err = bpf_obj_name_cpy(map->name, attr->map_name,
833 			       sizeof(attr->map_name));
834 	if (err < 0)
835 		goto free_map;
836 
837 	atomic64_set(&map->refcnt, 1);
838 	atomic64_set(&map->usercnt, 1);
839 	mutex_init(&map->freeze_mutex);
840 
841 	map->spin_lock_off = -EINVAL;
842 	if (attr->btf_key_type_id || attr->btf_value_type_id ||
843 	    /* Even the map's value is a kernel's struct,
844 	     * the bpf_prog.o must have BTF to begin with
845 	     * to figure out the corresponding kernel's
846 	     * counter part.  Thus, attr->btf_fd has
847 	     * to be valid also.
848 	     */
849 	    attr->btf_vmlinux_value_type_id) {
850 		struct btf *btf;
851 
852 		btf = btf_get_by_fd(attr->btf_fd);
853 		if (IS_ERR(btf)) {
854 			err = PTR_ERR(btf);
855 			goto free_map;
856 		}
857 		map->btf = btf;
858 
859 		if (attr->btf_value_type_id) {
860 			err = map_check_btf(map, btf, attr->btf_key_type_id,
861 					    attr->btf_value_type_id);
862 			if (err)
863 				goto free_map;
864 		}
865 
866 		map->btf_key_type_id = attr->btf_key_type_id;
867 		map->btf_value_type_id = attr->btf_value_type_id;
868 		map->btf_vmlinux_value_type_id =
869 			attr->btf_vmlinux_value_type_id;
870 	}
871 
872 	err = security_bpf_map_alloc(map);
873 	if (err)
874 		goto free_map;
875 
876 	err = bpf_map_alloc_id(map);
877 	if (err)
878 		goto free_map_sec;
879 
880 	bpf_map_save_memcg(map);
881 
882 	err = bpf_map_new_fd(map, f_flags);
883 	if (err < 0) {
884 		/* failed to allocate fd.
885 		 * bpf_map_put_with_uref() is needed because the above
886 		 * bpf_map_alloc_id() has published the map
887 		 * to the userspace and the userspace may
888 		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
889 		 */
890 		bpf_map_put_with_uref(map);
891 		return err;
892 	}
893 
894 	return err;
895 
896 free_map_sec:
897 	security_bpf_map_free(map);
898 free_map:
899 	btf_put(map->btf);
900 	map->ops->map_free(map);
901 	return err;
902 }
903 
904 /* if error is returned, fd is released.
905  * On success caller should complete fd access with matching fdput()
906  */
907 struct bpf_map *__bpf_map_get(struct fd f)
908 {
909 	if (!f.file)
910 		return ERR_PTR(-EBADF);
911 	if (f.file->f_op != &bpf_map_fops) {
912 		fdput(f);
913 		return ERR_PTR(-EINVAL);
914 	}
915 
916 	return f.file->private_data;
917 }
918 
919 void bpf_map_inc(struct bpf_map *map)
920 {
921 	atomic64_inc(&map->refcnt);
922 }
923 EXPORT_SYMBOL_GPL(bpf_map_inc);
924 
925 void bpf_map_inc_with_uref(struct bpf_map *map)
926 {
927 	atomic64_inc(&map->refcnt);
928 	atomic64_inc(&map->usercnt);
929 }
930 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
931 
932 struct bpf_map *bpf_map_get(u32 ufd)
933 {
934 	struct fd f = fdget(ufd);
935 	struct bpf_map *map;
936 
937 	map = __bpf_map_get(f);
938 	if (IS_ERR(map))
939 		return map;
940 
941 	bpf_map_inc(map);
942 	fdput(f);
943 
944 	return map;
945 }
946 
947 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
948 {
949 	struct fd f = fdget(ufd);
950 	struct bpf_map *map;
951 
952 	map = __bpf_map_get(f);
953 	if (IS_ERR(map))
954 		return map;
955 
956 	bpf_map_inc_with_uref(map);
957 	fdput(f);
958 
959 	return map;
960 }
961 
962 /* map_idr_lock should have been held */
963 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
964 {
965 	int refold;
966 
967 	refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
968 	if (!refold)
969 		return ERR_PTR(-ENOENT);
970 	if (uref)
971 		atomic64_inc(&map->usercnt);
972 
973 	return map;
974 }
975 
976 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
977 {
978 	spin_lock_bh(&map_idr_lock);
979 	map = __bpf_map_inc_not_zero(map, false);
980 	spin_unlock_bh(&map_idr_lock);
981 
982 	return map;
983 }
984 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
985 
986 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
987 {
988 	return -ENOTSUPP;
989 }
990 
991 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
992 {
993 	if (key_size)
994 		return memdup_user(ukey, key_size);
995 
996 	if (ukey)
997 		return ERR_PTR(-EINVAL);
998 
999 	return NULL;
1000 }
1001 
1002 /* last field in 'union bpf_attr' used by this command */
1003 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1004 
1005 static int map_lookup_elem(union bpf_attr *attr)
1006 {
1007 	void __user *ukey = u64_to_user_ptr(attr->key);
1008 	void __user *uvalue = u64_to_user_ptr(attr->value);
1009 	int ufd = attr->map_fd;
1010 	struct bpf_map *map;
1011 	void *key, *value;
1012 	u32 value_size;
1013 	struct fd f;
1014 	int err;
1015 
1016 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1017 		return -EINVAL;
1018 
1019 	if (attr->flags & ~BPF_F_LOCK)
1020 		return -EINVAL;
1021 
1022 	f = fdget(ufd);
1023 	map = __bpf_map_get(f);
1024 	if (IS_ERR(map))
1025 		return PTR_ERR(map);
1026 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1027 		err = -EPERM;
1028 		goto err_put;
1029 	}
1030 
1031 	if ((attr->flags & BPF_F_LOCK) &&
1032 	    !map_value_has_spin_lock(map)) {
1033 		err = -EINVAL;
1034 		goto err_put;
1035 	}
1036 
1037 	key = __bpf_copy_key(ukey, map->key_size);
1038 	if (IS_ERR(key)) {
1039 		err = PTR_ERR(key);
1040 		goto err_put;
1041 	}
1042 
1043 	value_size = bpf_map_value_size(map);
1044 
1045 	err = -ENOMEM;
1046 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1047 	if (!value)
1048 		goto free_key;
1049 
1050 	err = bpf_map_copy_value(map, key, value, attr->flags);
1051 	if (err)
1052 		goto free_value;
1053 
1054 	err = -EFAULT;
1055 	if (copy_to_user(uvalue, value, value_size) != 0)
1056 		goto free_value;
1057 
1058 	err = 0;
1059 
1060 free_value:
1061 	kfree(value);
1062 free_key:
1063 	kfree(key);
1064 err_put:
1065 	fdput(f);
1066 	return err;
1067 }
1068 
1069 
1070 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1071 
1072 static int map_update_elem(union bpf_attr *attr)
1073 {
1074 	void __user *ukey = u64_to_user_ptr(attr->key);
1075 	void __user *uvalue = u64_to_user_ptr(attr->value);
1076 	int ufd = attr->map_fd;
1077 	struct bpf_map *map;
1078 	void *key, *value;
1079 	u32 value_size;
1080 	struct fd f;
1081 	int err;
1082 
1083 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1084 		return -EINVAL;
1085 
1086 	f = fdget(ufd);
1087 	map = __bpf_map_get(f);
1088 	if (IS_ERR(map))
1089 		return PTR_ERR(map);
1090 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1091 		err = -EPERM;
1092 		goto err_put;
1093 	}
1094 
1095 	if ((attr->flags & BPF_F_LOCK) &&
1096 	    !map_value_has_spin_lock(map)) {
1097 		err = -EINVAL;
1098 		goto err_put;
1099 	}
1100 
1101 	key = __bpf_copy_key(ukey, map->key_size);
1102 	if (IS_ERR(key)) {
1103 		err = PTR_ERR(key);
1104 		goto err_put;
1105 	}
1106 
1107 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1108 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
1109 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
1110 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
1111 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
1112 	else
1113 		value_size = map->value_size;
1114 
1115 	err = -ENOMEM;
1116 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1117 	if (!value)
1118 		goto free_key;
1119 
1120 	err = -EFAULT;
1121 	if (copy_from_user(value, uvalue, value_size) != 0)
1122 		goto free_value;
1123 
1124 	err = bpf_map_update_value(map, f, key, value, attr->flags);
1125 
1126 free_value:
1127 	kfree(value);
1128 free_key:
1129 	kfree(key);
1130 err_put:
1131 	fdput(f);
1132 	return err;
1133 }
1134 
1135 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1136 
1137 static int map_delete_elem(union bpf_attr *attr)
1138 {
1139 	void __user *ukey = u64_to_user_ptr(attr->key);
1140 	int ufd = attr->map_fd;
1141 	struct bpf_map *map;
1142 	struct fd f;
1143 	void *key;
1144 	int err;
1145 
1146 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1147 		return -EINVAL;
1148 
1149 	f = fdget(ufd);
1150 	map = __bpf_map_get(f);
1151 	if (IS_ERR(map))
1152 		return PTR_ERR(map);
1153 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1154 		err = -EPERM;
1155 		goto err_put;
1156 	}
1157 
1158 	key = __bpf_copy_key(ukey, map->key_size);
1159 	if (IS_ERR(key)) {
1160 		err = PTR_ERR(key);
1161 		goto err_put;
1162 	}
1163 
1164 	if (bpf_map_is_dev_bound(map)) {
1165 		err = bpf_map_offload_delete_elem(map, key);
1166 		goto out;
1167 	} else if (IS_FD_PROG_ARRAY(map) ||
1168 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1169 		/* These maps require sleepable context */
1170 		err = map->ops->map_delete_elem(map, key);
1171 		goto out;
1172 	}
1173 
1174 	bpf_disable_instrumentation();
1175 	rcu_read_lock();
1176 	err = map->ops->map_delete_elem(map, key);
1177 	rcu_read_unlock();
1178 	bpf_enable_instrumentation();
1179 	maybe_wait_bpf_programs(map);
1180 out:
1181 	kfree(key);
1182 err_put:
1183 	fdput(f);
1184 	return err;
1185 }
1186 
1187 /* last field in 'union bpf_attr' used by this command */
1188 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1189 
1190 static int map_get_next_key(union bpf_attr *attr)
1191 {
1192 	void __user *ukey = u64_to_user_ptr(attr->key);
1193 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1194 	int ufd = attr->map_fd;
1195 	struct bpf_map *map;
1196 	void *key, *next_key;
1197 	struct fd f;
1198 	int err;
1199 
1200 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1201 		return -EINVAL;
1202 
1203 	f = fdget(ufd);
1204 	map = __bpf_map_get(f);
1205 	if (IS_ERR(map))
1206 		return PTR_ERR(map);
1207 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1208 		err = -EPERM;
1209 		goto err_put;
1210 	}
1211 
1212 	if (ukey) {
1213 		key = __bpf_copy_key(ukey, map->key_size);
1214 		if (IS_ERR(key)) {
1215 			err = PTR_ERR(key);
1216 			goto err_put;
1217 		}
1218 	} else {
1219 		key = NULL;
1220 	}
1221 
1222 	err = -ENOMEM;
1223 	next_key = kmalloc(map->key_size, GFP_USER);
1224 	if (!next_key)
1225 		goto free_key;
1226 
1227 	if (bpf_map_is_dev_bound(map)) {
1228 		err = bpf_map_offload_get_next_key(map, key, next_key);
1229 		goto out;
1230 	}
1231 
1232 	rcu_read_lock();
1233 	err = map->ops->map_get_next_key(map, key, next_key);
1234 	rcu_read_unlock();
1235 out:
1236 	if (err)
1237 		goto free_next_key;
1238 
1239 	err = -EFAULT;
1240 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1241 		goto free_next_key;
1242 
1243 	err = 0;
1244 
1245 free_next_key:
1246 	kfree(next_key);
1247 free_key:
1248 	kfree(key);
1249 err_put:
1250 	fdput(f);
1251 	return err;
1252 }
1253 
1254 int generic_map_delete_batch(struct bpf_map *map,
1255 			     const union bpf_attr *attr,
1256 			     union bpf_attr __user *uattr)
1257 {
1258 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1259 	u32 cp, max_count;
1260 	int err = 0;
1261 	void *key;
1262 
1263 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1264 		return -EINVAL;
1265 
1266 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1267 	    !map_value_has_spin_lock(map)) {
1268 		return -EINVAL;
1269 	}
1270 
1271 	max_count = attr->batch.count;
1272 	if (!max_count)
1273 		return 0;
1274 
1275 	key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1276 	if (!key)
1277 		return -ENOMEM;
1278 
1279 	for (cp = 0; cp < max_count; cp++) {
1280 		err = -EFAULT;
1281 		if (copy_from_user(key, keys + cp * map->key_size,
1282 				   map->key_size))
1283 			break;
1284 
1285 		if (bpf_map_is_dev_bound(map)) {
1286 			err = bpf_map_offload_delete_elem(map, key);
1287 			break;
1288 		}
1289 
1290 		bpf_disable_instrumentation();
1291 		rcu_read_lock();
1292 		err = map->ops->map_delete_elem(map, key);
1293 		rcu_read_unlock();
1294 		bpf_enable_instrumentation();
1295 		maybe_wait_bpf_programs(map);
1296 		if (err)
1297 			break;
1298 	}
1299 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1300 		err = -EFAULT;
1301 
1302 	kfree(key);
1303 	return err;
1304 }
1305 
1306 int generic_map_update_batch(struct bpf_map *map,
1307 			     const union bpf_attr *attr,
1308 			     union bpf_attr __user *uattr)
1309 {
1310 	void __user *values = u64_to_user_ptr(attr->batch.values);
1311 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1312 	u32 value_size, cp, max_count;
1313 	int ufd = attr->map_fd;
1314 	void *key, *value;
1315 	struct fd f;
1316 	int err = 0;
1317 
1318 	f = fdget(ufd);
1319 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1320 		return -EINVAL;
1321 
1322 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1323 	    !map_value_has_spin_lock(map)) {
1324 		return -EINVAL;
1325 	}
1326 
1327 	value_size = bpf_map_value_size(map);
1328 
1329 	max_count = attr->batch.count;
1330 	if (!max_count)
1331 		return 0;
1332 
1333 	key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1334 	if (!key)
1335 		return -ENOMEM;
1336 
1337 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1338 	if (!value) {
1339 		kfree(key);
1340 		return -ENOMEM;
1341 	}
1342 
1343 	for (cp = 0; cp < max_count; cp++) {
1344 		err = -EFAULT;
1345 		if (copy_from_user(key, keys + cp * map->key_size,
1346 		    map->key_size) ||
1347 		    copy_from_user(value, values + cp * value_size, value_size))
1348 			break;
1349 
1350 		err = bpf_map_update_value(map, f, key, value,
1351 					   attr->batch.elem_flags);
1352 
1353 		if (err)
1354 			break;
1355 	}
1356 
1357 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1358 		err = -EFAULT;
1359 
1360 	kfree(value);
1361 	kfree(key);
1362 	return err;
1363 }
1364 
1365 #define MAP_LOOKUP_RETRIES 3
1366 
1367 int generic_map_lookup_batch(struct bpf_map *map,
1368 				    const union bpf_attr *attr,
1369 				    union bpf_attr __user *uattr)
1370 {
1371 	void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1372 	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1373 	void __user *values = u64_to_user_ptr(attr->batch.values);
1374 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1375 	void *buf, *buf_prevkey, *prev_key, *key, *value;
1376 	int err, retry = MAP_LOOKUP_RETRIES;
1377 	u32 value_size, cp, max_count;
1378 
1379 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1380 		return -EINVAL;
1381 
1382 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1383 	    !map_value_has_spin_lock(map))
1384 		return -EINVAL;
1385 
1386 	value_size = bpf_map_value_size(map);
1387 
1388 	max_count = attr->batch.count;
1389 	if (!max_count)
1390 		return 0;
1391 
1392 	if (put_user(0, &uattr->batch.count))
1393 		return -EFAULT;
1394 
1395 	buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1396 	if (!buf_prevkey)
1397 		return -ENOMEM;
1398 
1399 	buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1400 	if (!buf) {
1401 		kfree(buf_prevkey);
1402 		return -ENOMEM;
1403 	}
1404 
1405 	err = -EFAULT;
1406 	prev_key = NULL;
1407 	if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1408 		goto free_buf;
1409 	key = buf;
1410 	value = key + map->key_size;
1411 	if (ubatch)
1412 		prev_key = buf_prevkey;
1413 
1414 	for (cp = 0; cp < max_count;) {
1415 		rcu_read_lock();
1416 		err = map->ops->map_get_next_key(map, prev_key, key);
1417 		rcu_read_unlock();
1418 		if (err)
1419 			break;
1420 		err = bpf_map_copy_value(map, key, value,
1421 					 attr->batch.elem_flags);
1422 
1423 		if (err == -ENOENT) {
1424 			if (retry) {
1425 				retry--;
1426 				continue;
1427 			}
1428 			err = -EINTR;
1429 			break;
1430 		}
1431 
1432 		if (err)
1433 			goto free_buf;
1434 
1435 		if (copy_to_user(keys + cp * map->key_size, key,
1436 				 map->key_size)) {
1437 			err = -EFAULT;
1438 			goto free_buf;
1439 		}
1440 		if (copy_to_user(values + cp * value_size, value, value_size)) {
1441 			err = -EFAULT;
1442 			goto free_buf;
1443 		}
1444 
1445 		if (!prev_key)
1446 			prev_key = buf_prevkey;
1447 
1448 		swap(prev_key, key);
1449 		retry = MAP_LOOKUP_RETRIES;
1450 		cp++;
1451 	}
1452 
1453 	if (err == -EFAULT)
1454 		goto free_buf;
1455 
1456 	if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1457 		    (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1458 		err = -EFAULT;
1459 
1460 free_buf:
1461 	kfree(buf_prevkey);
1462 	kfree(buf);
1463 	return err;
1464 }
1465 
1466 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
1467 
1468 static int map_lookup_and_delete_elem(union bpf_attr *attr)
1469 {
1470 	void __user *ukey = u64_to_user_ptr(attr->key);
1471 	void __user *uvalue = u64_to_user_ptr(attr->value);
1472 	int ufd = attr->map_fd;
1473 	struct bpf_map *map;
1474 	void *key, *value;
1475 	u32 value_size;
1476 	struct fd f;
1477 	int err;
1478 
1479 	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1480 		return -EINVAL;
1481 
1482 	f = fdget(ufd);
1483 	map = __bpf_map_get(f);
1484 	if (IS_ERR(map))
1485 		return PTR_ERR(map);
1486 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1487 	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1488 		err = -EPERM;
1489 		goto err_put;
1490 	}
1491 
1492 	key = __bpf_copy_key(ukey, map->key_size);
1493 	if (IS_ERR(key)) {
1494 		err = PTR_ERR(key);
1495 		goto err_put;
1496 	}
1497 
1498 	value_size = map->value_size;
1499 
1500 	err = -ENOMEM;
1501 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1502 	if (!value)
1503 		goto free_key;
1504 
1505 	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1506 	    map->map_type == BPF_MAP_TYPE_STACK) {
1507 		err = map->ops->map_pop_elem(map, value);
1508 	} else {
1509 		err = -ENOTSUPP;
1510 	}
1511 
1512 	if (err)
1513 		goto free_value;
1514 
1515 	if (copy_to_user(uvalue, value, value_size) != 0) {
1516 		err = -EFAULT;
1517 		goto free_value;
1518 	}
1519 
1520 	err = 0;
1521 
1522 free_value:
1523 	kfree(value);
1524 free_key:
1525 	kfree(key);
1526 err_put:
1527 	fdput(f);
1528 	return err;
1529 }
1530 
1531 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
1532 
1533 static int map_freeze(const union bpf_attr *attr)
1534 {
1535 	int err = 0, ufd = attr->map_fd;
1536 	struct bpf_map *map;
1537 	struct fd f;
1538 
1539 	if (CHECK_ATTR(BPF_MAP_FREEZE))
1540 		return -EINVAL;
1541 
1542 	f = fdget(ufd);
1543 	map = __bpf_map_get(f);
1544 	if (IS_ERR(map))
1545 		return PTR_ERR(map);
1546 
1547 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1548 		fdput(f);
1549 		return -ENOTSUPP;
1550 	}
1551 
1552 	mutex_lock(&map->freeze_mutex);
1553 
1554 	if (map->writecnt) {
1555 		err = -EBUSY;
1556 		goto err_put;
1557 	}
1558 	if (READ_ONCE(map->frozen)) {
1559 		err = -EBUSY;
1560 		goto err_put;
1561 	}
1562 	if (!bpf_capable()) {
1563 		err = -EPERM;
1564 		goto err_put;
1565 	}
1566 
1567 	WRITE_ONCE(map->frozen, true);
1568 err_put:
1569 	mutex_unlock(&map->freeze_mutex);
1570 	fdput(f);
1571 	return err;
1572 }
1573 
1574 static const struct bpf_prog_ops * const bpf_prog_types[] = {
1575 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1576 	[_id] = & _name ## _prog_ops,
1577 #define BPF_MAP_TYPE(_id, _ops)
1578 #define BPF_LINK_TYPE(_id, _name)
1579 #include <linux/bpf_types.h>
1580 #undef BPF_PROG_TYPE
1581 #undef BPF_MAP_TYPE
1582 #undef BPF_LINK_TYPE
1583 };
1584 
1585 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1586 {
1587 	const struct bpf_prog_ops *ops;
1588 
1589 	if (type >= ARRAY_SIZE(bpf_prog_types))
1590 		return -EINVAL;
1591 	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1592 	ops = bpf_prog_types[type];
1593 	if (!ops)
1594 		return -EINVAL;
1595 
1596 	if (!bpf_prog_is_dev_bound(prog->aux))
1597 		prog->aux->ops = ops;
1598 	else
1599 		prog->aux->ops = &bpf_offload_prog_ops;
1600 	prog->type = type;
1601 	return 0;
1602 }
1603 
1604 enum bpf_audit {
1605 	BPF_AUDIT_LOAD,
1606 	BPF_AUDIT_UNLOAD,
1607 	BPF_AUDIT_MAX,
1608 };
1609 
1610 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1611 	[BPF_AUDIT_LOAD]   = "LOAD",
1612 	[BPF_AUDIT_UNLOAD] = "UNLOAD",
1613 };
1614 
1615 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1616 {
1617 	struct audit_context *ctx = NULL;
1618 	struct audit_buffer *ab;
1619 
1620 	if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1621 		return;
1622 	if (audit_enabled == AUDIT_OFF)
1623 		return;
1624 	if (op == BPF_AUDIT_LOAD)
1625 		ctx = audit_context();
1626 	ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1627 	if (unlikely(!ab))
1628 		return;
1629 	audit_log_format(ab, "prog-id=%u op=%s",
1630 			 prog->aux->id, bpf_audit_str[op]);
1631 	audit_log_end(ab);
1632 }
1633 
1634 static int bpf_prog_alloc_id(struct bpf_prog *prog)
1635 {
1636 	int id;
1637 
1638 	idr_preload(GFP_KERNEL);
1639 	spin_lock_bh(&prog_idr_lock);
1640 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1641 	if (id > 0)
1642 		prog->aux->id = id;
1643 	spin_unlock_bh(&prog_idr_lock);
1644 	idr_preload_end();
1645 
1646 	/* id is in [1, INT_MAX) */
1647 	if (WARN_ON_ONCE(!id))
1648 		return -ENOSPC;
1649 
1650 	return id > 0 ? 0 : id;
1651 }
1652 
1653 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1654 {
1655 	/* cBPF to eBPF migrations are currently not in the idr store.
1656 	 * Offloaded programs are removed from the store when their device
1657 	 * disappears - even if someone grabs an fd to them they are unusable,
1658 	 * simply waiting for refcnt to drop to be freed.
1659 	 */
1660 	if (!prog->aux->id)
1661 		return;
1662 
1663 	if (do_idr_lock)
1664 		spin_lock_bh(&prog_idr_lock);
1665 	else
1666 		__acquire(&prog_idr_lock);
1667 
1668 	idr_remove(&prog_idr, prog->aux->id);
1669 	prog->aux->id = 0;
1670 
1671 	if (do_idr_lock)
1672 		spin_unlock_bh(&prog_idr_lock);
1673 	else
1674 		__release(&prog_idr_lock);
1675 }
1676 
1677 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1678 {
1679 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1680 
1681 	kvfree(aux->func_info);
1682 	kfree(aux->func_info_aux);
1683 	free_uid(aux->user);
1684 	security_bpf_prog_free(aux);
1685 	bpf_prog_free(aux->prog);
1686 }
1687 
1688 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1689 {
1690 	bpf_prog_kallsyms_del_all(prog);
1691 	btf_put(prog->aux->btf);
1692 	bpf_prog_free_linfo(prog);
1693 	if (prog->aux->attach_btf)
1694 		btf_put(prog->aux->attach_btf);
1695 
1696 	if (deferred) {
1697 		if (prog->aux->sleepable)
1698 			call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
1699 		else
1700 			call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1701 	} else {
1702 		__bpf_prog_put_rcu(&prog->aux->rcu);
1703 	}
1704 }
1705 
1706 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1707 {
1708 	if (atomic64_dec_and_test(&prog->aux->refcnt)) {
1709 		perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1710 		bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
1711 		/* bpf_prog_free_id() must be called first */
1712 		bpf_prog_free_id(prog, do_idr_lock);
1713 		__bpf_prog_put_noref(prog, true);
1714 	}
1715 }
1716 
1717 void bpf_prog_put(struct bpf_prog *prog)
1718 {
1719 	__bpf_prog_put(prog, true);
1720 }
1721 EXPORT_SYMBOL_GPL(bpf_prog_put);
1722 
1723 static int bpf_prog_release(struct inode *inode, struct file *filp)
1724 {
1725 	struct bpf_prog *prog = filp->private_data;
1726 
1727 	bpf_prog_put(prog);
1728 	return 0;
1729 }
1730 
1731 static void bpf_prog_get_stats(const struct bpf_prog *prog,
1732 			       struct bpf_prog_stats *stats)
1733 {
1734 	u64 nsecs = 0, cnt = 0, misses = 0;
1735 	int cpu;
1736 
1737 	for_each_possible_cpu(cpu) {
1738 		const struct bpf_prog_stats *st;
1739 		unsigned int start;
1740 		u64 tnsecs, tcnt, tmisses;
1741 
1742 		st = per_cpu_ptr(prog->stats, cpu);
1743 		do {
1744 			start = u64_stats_fetch_begin_irq(&st->syncp);
1745 			tnsecs = st->nsecs;
1746 			tcnt = st->cnt;
1747 			tmisses = st->misses;
1748 		} while (u64_stats_fetch_retry_irq(&st->syncp, start));
1749 		nsecs += tnsecs;
1750 		cnt += tcnt;
1751 		misses += tmisses;
1752 	}
1753 	stats->nsecs = nsecs;
1754 	stats->cnt = cnt;
1755 	stats->misses = misses;
1756 }
1757 
1758 #ifdef CONFIG_PROC_FS
1759 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1760 {
1761 	const struct bpf_prog *prog = filp->private_data;
1762 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1763 	struct bpf_prog_stats stats;
1764 
1765 	bpf_prog_get_stats(prog, &stats);
1766 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1767 	seq_printf(m,
1768 		   "prog_type:\t%u\n"
1769 		   "prog_jited:\t%u\n"
1770 		   "prog_tag:\t%s\n"
1771 		   "memlock:\t%llu\n"
1772 		   "prog_id:\t%u\n"
1773 		   "run_time_ns:\t%llu\n"
1774 		   "run_cnt:\t%llu\n"
1775 		   "recursion_misses:\t%llu\n",
1776 		   prog->type,
1777 		   prog->jited,
1778 		   prog_tag,
1779 		   prog->pages * 1ULL << PAGE_SHIFT,
1780 		   prog->aux->id,
1781 		   stats.nsecs,
1782 		   stats.cnt,
1783 		   stats.misses);
1784 }
1785 #endif
1786 
1787 const struct file_operations bpf_prog_fops = {
1788 #ifdef CONFIG_PROC_FS
1789 	.show_fdinfo	= bpf_prog_show_fdinfo,
1790 #endif
1791 	.release	= bpf_prog_release,
1792 	.read		= bpf_dummy_read,
1793 	.write		= bpf_dummy_write,
1794 };
1795 
1796 int bpf_prog_new_fd(struct bpf_prog *prog)
1797 {
1798 	int ret;
1799 
1800 	ret = security_bpf_prog(prog);
1801 	if (ret < 0)
1802 		return ret;
1803 
1804 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1805 				O_RDWR | O_CLOEXEC);
1806 }
1807 
1808 static struct bpf_prog *____bpf_prog_get(struct fd f)
1809 {
1810 	if (!f.file)
1811 		return ERR_PTR(-EBADF);
1812 	if (f.file->f_op != &bpf_prog_fops) {
1813 		fdput(f);
1814 		return ERR_PTR(-EINVAL);
1815 	}
1816 
1817 	return f.file->private_data;
1818 }
1819 
1820 void bpf_prog_add(struct bpf_prog *prog, int i)
1821 {
1822 	atomic64_add(i, &prog->aux->refcnt);
1823 }
1824 EXPORT_SYMBOL_GPL(bpf_prog_add);
1825 
1826 void bpf_prog_sub(struct bpf_prog *prog, int i)
1827 {
1828 	/* Only to be used for undoing previous bpf_prog_add() in some
1829 	 * error path. We still know that another entity in our call
1830 	 * path holds a reference to the program, thus atomic_sub() can
1831 	 * be safely used in such cases!
1832 	 */
1833 	WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
1834 }
1835 EXPORT_SYMBOL_GPL(bpf_prog_sub);
1836 
1837 void bpf_prog_inc(struct bpf_prog *prog)
1838 {
1839 	atomic64_inc(&prog->aux->refcnt);
1840 }
1841 EXPORT_SYMBOL_GPL(bpf_prog_inc);
1842 
1843 /* prog_idr_lock should have been held */
1844 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1845 {
1846 	int refold;
1847 
1848 	refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1849 
1850 	if (!refold)
1851 		return ERR_PTR(-ENOENT);
1852 
1853 	return prog;
1854 }
1855 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1856 
1857 bool bpf_prog_get_ok(struct bpf_prog *prog,
1858 			    enum bpf_prog_type *attach_type, bool attach_drv)
1859 {
1860 	/* not an attachment, just a refcount inc, always allow */
1861 	if (!attach_type)
1862 		return true;
1863 
1864 	if (prog->type != *attach_type)
1865 		return false;
1866 	if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1867 		return false;
1868 
1869 	return true;
1870 }
1871 
1872 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1873 				       bool attach_drv)
1874 {
1875 	struct fd f = fdget(ufd);
1876 	struct bpf_prog *prog;
1877 
1878 	prog = ____bpf_prog_get(f);
1879 	if (IS_ERR(prog))
1880 		return prog;
1881 	if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1882 		prog = ERR_PTR(-EINVAL);
1883 		goto out;
1884 	}
1885 
1886 	bpf_prog_inc(prog);
1887 out:
1888 	fdput(f);
1889 	return prog;
1890 }
1891 
1892 struct bpf_prog *bpf_prog_get(u32 ufd)
1893 {
1894 	return __bpf_prog_get(ufd, NULL, false);
1895 }
1896 
1897 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1898 				       bool attach_drv)
1899 {
1900 	return __bpf_prog_get(ufd, &type, attach_drv);
1901 }
1902 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1903 
1904 /* Initially all BPF programs could be loaded w/o specifying
1905  * expected_attach_type. Later for some of them specifying expected_attach_type
1906  * at load time became required so that program could be validated properly.
1907  * Programs of types that are allowed to be loaded both w/ and w/o (for
1908  * backward compatibility) expected_attach_type, should have the default attach
1909  * type assigned to expected_attach_type for the latter case, so that it can be
1910  * validated later at attach time.
1911  *
1912  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1913  * prog type requires it but has some attach types that have to be backward
1914  * compatible.
1915  */
1916 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1917 {
1918 	switch (attr->prog_type) {
1919 	case BPF_PROG_TYPE_CGROUP_SOCK:
1920 		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1921 		 * exist so checking for non-zero is the way to go here.
1922 		 */
1923 		if (!attr->expected_attach_type)
1924 			attr->expected_attach_type =
1925 				BPF_CGROUP_INET_SOCK_CREATE;
1926 		break;
1927 	}
1928 }
1929 
1930 static int
1931 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
1932 			   enum bpf_attach_type expected_attach_type,
1933 			   struct btf *attach_btf, u32 btf_id,
1934 			   struct bpf_prog *dst_prog)
1935 {
1936 	if (btf_id) {
1937 		if (btf_id > BTF_MAX_TYPE)
1938 			return -EINVAL;
1939 
1940 		if (!attach_btf && !dst_prog)
1941 			return -EINVAL;
1942 
1943 		switch (prog_type) {
1944 		case BPF_PROG_TYPE_TRACING:
1945 		case BPF_PROG_TYPE_LSM:
1946 		case BPF_PROG_TYPE_STRUCT_OPS:
1947 		case BPF_PROG_TYPE_EXT:
1948 			break;
1949 		default:
1950 			return -EINVAL;
1951 		}
1952 	}
1953 
1954 	if (attach_btf && (!btf_id || dst_prog))
1955 		return -EINVAL;
1956 
1957 	if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
1958 	    prog_type != BPF_PROG_TYPE_EXT)
1959 		return -EINVAL;
1960 
1961 	switch (prog_type) {
1962 	case BPF_PROG_TYPE_CGROUP_SOCK:
1963 		switch (expected_attach_type) {
1964 		case BPF_CGROUP_INET_SOCK_CREATE:
1965 		case BPF_CGROUP_INET_SOCK_RELEASE:
1966 		case BPF_CGROUP_INET4_POST_BIND:
1967 		case BPF_CGROUP_INET6_POST_BIND:
1968 			return 0;
1969 		default:
1970 			return -EINVAL;
1971 		}
1972 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1973 		switch (expected_attach_type) {
1974 		case BPF_CGROUP_INET4_BIND:
1975 		case BPF_CGROUP_INET6_BIND:
1976 		case BPF_CGROUP_INET4_CONNECT:
1977 		case BPF_CGROUP_INET6_CONNECT:
1978 		case BPF_CGROUP_INET4_GETPEERNAME:
1979 		case BPF_CGROUP_INET6_GETPEERNAME:
1980 		case BPF_CGROUP_INET4_GETSOCKNAME:
1981 		case BPF_CGROUP_INET6_GETSOCKNAME:
1982 		case BPF_CGROUP_UDP4_SENDMSG:
1983 		case BPF_CGROUP_UDP6_SENDMSG:
1984 		case BPF_CGROUP_UDP4_RECVMSG:
1985 		case BPF_CGROUP_UDP6_RECVMSG:
1986 			return 0;
1987 		default:
1988 			return -EINVAL;
1989 		}
1990 	case BPF_PROG_TYPE_CGROUP_SKB:
1991 		switch (expected_attach_type) {
1992 		case BPF_CGROUP_INET_INGRESS:
1993 		case BPF_CGROUP_INET_EGRESS:
1994 			return 0;
1995 		default:
1996 			return -EINVAL;
1997 		}
1998 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
1999 		switch (expected_attach_type) {
2000 		case BPF_CGROUP_SETSOCKOPT:
2001 		case BPF_CGROUP_GETSOCKOPT:
2002 			return 0;
2003 		default:
2004 			return -EINVAL;
2005 		}
2006 	case BPF_PROG_TYPE_SK_LOOKUP:
2007 		if (expected_attach_type == BPF_SK_LOOKUP)
2008 			return 0;
2009 		return -EINVAL;
2010 	case BPF_PROG_TYPE_EXT:
2011 		if (expected_attach_type)
2012 			return -EINVAL;
2013 		fallthrough;
2014 	default:
2015 		return 0;
2016 	}
2017 }
2018 
2019 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2020 {
2021 	switch (prog_type) {
2022 	case BPF_PROG_TYPE_SCHED_CLS:
2023 	case BPF_PROG_TYPE_SCHED_ACT:
2024 	case BPF_PROG_TYPE_XDP:
2025 	case BPF_PROG_TYPE_LWT_IN:
2026 	case BPF_PROG_TYPE_LWT_OUT:
2027 	case BPF_PROG_TYPE_LWT_XMIT:
2028 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2029 	case BPF_PROG_TYPE_SK_SKB:
2030 	case BPF_PROG_TYPE_SK_MSG:
2031 	case BPF_PROG_TYPE_LIRC_MODE2:
2032 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2033 	case BPF_PROG_TYPE_CGROUP_DEVICE:
2034 	case BPF_PROG_TYPE_CGROUP_SOCK:
2035 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2036 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2037 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2038 	case BPF_PROG_TYPE_SOCK_OPS:
2039 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2040 		return true;
2041 	case BPF_PROG_TYPE_CGROUP_SKB:
2042 		/* always unpriv */
2043 	case BPF_PROG_TYPE_SK_REUSEPORT:
2044 		/* equivalent to SOCKET_FILTER. need CAP_BPF only */
2045 	default:
2046 		return false;
2047 	}
2048 }
2049 
2050 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2051 {
2052 	switch (prog_type) {
2053 	case BPF_PROG_TYPE_KPROBE:
2054 	case BPF_PROG_TYPE_TRACEPOINT:
2055 	case BPF_PROG_TYPE_PERF_EVENT:
2056 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2057 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2058 	case BPF_PROG_TYPE_TRACING:
2059 	case BPF_PROG_TYPE_LSM:
2060 	case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2061 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2062 		return true;
2063 	default:
2064 		return false;
2065 	}
2066 }
2067 
2068 /* last field in 'union bpf_attr' used by this command */
2069 #define	BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
2070 
2071 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
2072 {
2073 	enum bpf_prog_type type = attr->prog_type;
2074 	struct bpf_prog *prog, *dst_prog = NULL;
2075 	struct btf *attach_btf = NULL;
2076 	int err;
2077 	char license[128];
2078 	bool is_gpl;
2079 
2080 	if (CHECK_ATTR(BPF_PROG_LOAD))
2081 		return -EINVAL;
2082 
2083 	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2084 				 BPF_F_ANY_ALIGNMENT |
2085 				 BPF_F_TEST_STATE_FREQ |
2086 				 BPF_F_SLEEPABLE |
2087 				 BPF_F_TEST_RND_HI32))
2088 		return -EINVAL;
2089 
2090 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2091 	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2092 	    !bpf_capable())
2093 		return -EPERM;
2094 
2095 	/* copy eBPF program license from user space */
2096 	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
2097 			      sizeof(license) - 1) < 0)
2098 		return -EFAULT;
2099 	license[sizeof(license) - 1] = 0;
2100 
2101 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2102 	is_gpl = license_is_gpl_compatible(license);
2103 
2104 	if (attr->insn_cnt == 0 ||
2105 	    attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2106 		return -E2BIG;
2107 	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2108 	    type != BPF_PROG_TYPE_CGROUP_SKB &&
2109 	    !bpf_capable())
2110 		return -EPERM;
2111 
2112 	if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2113 		return -EPERM;
2114 	if (is_perfmon_prog_type(type) && !perfmon_capable())
2115 		return -EPERM;
2116 
2117 	/* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2118 	 * or btf, we need to check which one it is
2119 	 */
2120 	if (attr->attach_prog_fd) {
2121 		dst_prog = bpf_prog_get(attr->attach_prog_fd);
2122 		if (IS_ERR(dst_prog)) {
2123 			dst_prog = NULL;
2124 			attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2125 			if (IS_ERR(attach_btf))
2126 				return -EINVAL;
2127 			if (!btf_is_kernel(attach_btf)) {
2128 				/* attaching through specifying bpf_prog's BTF
2129 				 * objects directly might be supported eventually
2130 				 */
2131 				btf_put(attach_btf);
2132 				return -ENOTSUPP;
2133 			}
2134 		}
2135 	} else if (attr->attach_btf_id) {
2136 		/* fall back to vmlinux BTF, if BTF type ID is specified */
2137 		attach_btf = bpf_get_btf_vmlinux();
2138 		if (IS_ERR(attach_btf))
2139 			return PTR_ERR(attach_btf);
2140 		if (!attach_btf)
2141 			return -EINVAL;
2142 		btf_get(attach_btf);
2143 	}
2144 
2145 	bpf_prog_load_fixup_attach_type(attr);
2146 	if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2147 				       attach_btf, attr->attach_btf_id,
2148 				       dst_prog)) {
2149 		if (dst_prog)
2150 			bpf_prog_put(dst_prog);
2151 		if (attach_btf)
2152 			btf_put(attach_btf);
2153 		return -EINVAL;
2154 	}
2155 
2156 	/* plain bpf_prog allocation */
2157 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2158 	if (!prog) {
2159 		if (dst_prog)
2160 			bpf_prog_put(dst_prog);
2161 		if (attach_btf)
2162 			btf_put(attach_btf);
2163 		return -ENOMEM;
2164 	}
2165 
2166 	prog->expected_attach_type = attr->expected_attach_type;
2167 	prog->aux->attach_btf = attach_btf;
2168 	prog->aux->attach_btf_id = attr->attach_btf_id;
2169 	prog->aux->dst_prog = dst_prog;
2170 	prog->aux->offload_requested = !!attr->prog_ifindex;
2171 	prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2172 
2173 	err = security_bpf_prog_alloc(prog->aux);
2174 	if (err)
2175 		goto free_prog;
2176 
2177 	prog->aux->user = get_current_user();
2178 	prog->len = attr->insn_cnt;
2179 
2180 	err = -EFAULT;
2181 	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
2182 			   bpf_prog_insn_size(prog)) != 0)
2183 		goto free_prog_sec;
2184 
2185 	prog->orig_prog = NULL;
2186 	prog->jited = 0;
2187 
2188 	atomic64_set(&prog->aux->refcnt, 1);
2189 	prog->gpl_compatible = is_gpl ? 1 : 0;
2190 
2191 	if (bpf_prog_is_dev_bound(prog->aux)) {
2192 		err = bpf_prog_offload_init(prog, attr);
2193 		if (err)
2194 			goto free_prog_sec;
2195 	}
2196 
2197 	/* find program type: socket_filter vs tracing_filter */
2198 	err = find_prog_type(type, prog);
2199 	if (err < 0)
2200 		goto free_prog_sec;
2201 
2202 	prog->aux->load_time = ktime_get_boottime_ns();
2203 	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2204 			       sizeof(attr->prog_name));
2205 	if (err < 0)
2206 		goto free_prog_sec;
2207 
2208 	/* run eBPF verifier */
2209 	err = bpf_check(&prog, attr, uattr);
2210 	if (err < 0)
2211 		goto free_used_maps;
2212 
2213 	prog = bpf_prog_select_runtime(prog, &err);
2214 	if (err < 0)
2215 		goto free_used_maps;
2216 
2217 	err = bpf_prog_alloc_id(prog);
2218 	if (err)
2219 		goto free_used_maps;
2220 
2221 	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
2222 	 * effectively publicly exposed. However, retrieving via
2223 	 * bpf_prog_get_fd_by_id() will take another reference,
2224 	 * therefore it cannot be gone underneath us.
2225 	 *
2226 	 * Only for the time /after/ successful bpf_prog_new_fd()
2227 	 * and before returning to userspace, we might just hold
2228 	 * one reference and any parallel close on that fd could
2229 	 * rip everything out. Hence, below notifications must
2230 	 * happen before bpf_prog_new_fd().
2231 	 *
2232 	 * Also, any failure handling from this point onwards must
2233 	 * be using bpf_prog_put() given the program is exposed.
2234 	 */
2235 	bpf_prog_kallsyms_add(prog);
2236 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2237 	bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2238 
2239 	err = bpf_prog_new_fd(prog);
2240 	if (err < 0)
2241 		bpf_prog_put(prog);
2242 	return err;
2243 
2244 free_used_maps:
2245 	/* In case we have subprogs, we need to wait for a grace
2246 	 * period before we can tear down JIT memory since symbols
2247 	 * are already exposed under kallsyms.
2248 	 */
2249 	__bpf_prog_put_noref(prog, prog->aux->func_cnt);
2250 	return err;
2251 free_prog_sec:
2252 	free_uid(prog->aux->user);
2253 	security_bpf_prog_free(prog->aux);
2254 free_prog:
2255 	if (prog->aux->attach_btf)
2256 		btf_put(prog->aux->attach_btf);
2257 	bpf_prog_free(prog);
2258 	return err;
2259 }
2260 
2261 #define BPF_OBJ_LAST_FIELD file_flags
2262 
2263 static int bpf_obj_pin(const union bpf_attr *attr)
2264 {
2265 	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2266 		return -EINVAL;
2267 
2268 	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2269 }
2270 
2271 static int bpf_obj_get(const union bpf_attr *attr)
2272 {
2273 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2274 	    attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2275 		return -EINVAL;
2276 
2277 	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2278 				attr->file_flags);
2279 }
2280 
2281 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2282 		   const struct bpf_link_ops *ops, struct bpf_prog *prog)
2283 {
2284 	atomic64_set(&link->refcnt, 1);
2285 	link->type = type;
2286 	link->id = 0;
2287 	link->ops = ops;
2288 	link->prog = prog;
2289 }
2290 
2291 static void bpf_link_free_id(int id)
2292 {
2293 	if (!id)
2294 		return;
2295 
2296 	spin_lock_bh(&link_idr_lock);
2297 	idr_remove(&link_idr, id);
2298 	spin_unlock_bh(&link_idr_lock);
2299 }
2300 
2301 /* Clean up bpf_link and corresponding anon_inode file and FD. After
2302  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2303  * anon_inode's release() call. This helper marksbpf_link as
2304  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2305  * is not decremented, it's the responsibility of a calling code that failed
2306  * to complete bpf_link initialization.
2307  */
2308 void bpf_link_cleanup(struct bpf_link_primer *primer)
2309 {
2310 	primer->link->prog = NULL;
2311 	bpf_link_free_id(primer->id);
2312 	fput(primer->file);
2313 	put_unused_fd(primer->fd);
2314 }
2315 
2316 void bpf_link_inc(struct bpf_link *link)
2317 {
2318 	atomic64_inc(&link->refcnt);
2319 }
2320 
2321 /* bpf_link_free is guaranteed to be called from process context */
2322 static void bpf_link_free(struct bpf_link *link)
2323 {
2324 	bpf_link_free_id(link->id);
2325 	if (link->prog) {
2326 		/* detach BPF program, clean up used resources */
2327 		link->ops->release(link);
2328 		bpf_prog_put(link->prog);
2329 	}
2330 	/* free bpf_link and its containing memory */
2331 	link->ops->dealloc(link);
2332 }
2333 
2334 static void bpf_link_put_deferred(struct work_struct *work)
2335 {
2336 	struct bpf_link *link = container_of(work, struct bpf_link, work);
2337 
2338 	bpf_link_free(link);
2339 }
2340 
2341 /* bpf_link_put can be called from atomic context, but ensures that resources
2342  * are freed from process context
2343  */
2344 void bpf_link_put(struct bpf_link *link)
2345 {
2346 	if (!atomic64_dec_and_test(&link->refcnt))
2347 		return;
2348 
2349 	if (in_atomic()) {
2350 		INIT_WORK(&link->work, bpf_link_put_deferred);
2351 		schedule_work(&link->work);
2352 	} else {
2353 		bpf_link_free(link);
2354 	}
2355 }
2356 
2357 static int bpf_link_release(struct inode *inode, struct file *filp)
2358 {
2359 	struct bpf_link *link = filp->private_data;
2360 
2361 	bpf_link_put(link);
2362 	return 0;
2363 }
2364 
2365 #ifdef CONFIG_PROC_FS
2366 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2367 #define BPF_MAP_TYPE(_id, _ops)
2368 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2369 static const char *bpf_link_type_strs[] = {
2370 	[BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2371 #include <linux/bpf_types.h>
2372 };
2373 #undef BPF_PROG_TYPE
2374 #undef BPF_MAP_TYPE
2375 #undef BPF_LINK_TYPE
2376 
2377 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2378 {
2379 	const struct bpf_link *link = filp->private_data;
2380 	const struct bpf_prog *prog = link->prog;
2381 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2382 
2383 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2384 	seq_printf(m,
2385 		   "link_type:\t%s\n"
2386 		   "link_id:\t%u\n"
2387 		   "prog_tag:\t%s\n"
2388 		   "prog_id:\t%u\n",
2389 		   bpf_link_type_strs[link->type],
2390 		   link->id,
2391 		   prog_tag,
2392 		   prog->aux->id);
2393 	if (link->ops->show_fdinfo)
2394 		link->ops->show_fdinfo(link, m);
2395 }
2396 #endif
2397 
2398 static const struct file_operations bpf_link_fops = {
2399 #ifdef CONFIG_PROC_FS
2400 	.show_fdinfo	= bpf_link_show_fdinfo,
2401 #endif
2402 	.release	= bpf_link_release,
2403 	.read		= bpf_dummy_read,
2404 	.write		= bpf_dummy_write,
2405 };
2406 
2407 static int bpf_link_alloc_id(struct bpf_link *link)
2408 {
2409 	int id;
2410 
2411 	idr_preload(GFP_KERNEL);
2412 	spin_lock_bh(&link_idr_lock);
2413 	id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2414 	spin_unlock_bh(&link_idr_lock);
2415 	idr_preload_end();
2416 
2417 	return id;
2418 }
2419 
2420 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2421  * reserving unused FD and allocating ID from link_idr. This is to be paired
2422  * with bpf_link_settle() to install FD and ID and expose bpf_link to
2423  * user-space, if bpf_link is successfully attached. If not, bpf_link and
2424  * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2425  * transient state is passed around in struct bpf_link_primer.
2426  * This is preferred way to create and initialize bpf_link, especially when
2427  * there are complicated and expensive operations inbetween creating bpf_link
2428  * itself and attaching it to BPF hook. By using bpf_link_prime() and
2429  * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2430  * expensive (and potentially failing) roll back operations in a rare case
2431  * that file, FD, or ID can't be allocated.
2432  */
2433 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2434 {
2435 	struct file *file;
2436 	int fd, id;
2437 
2438 	fd = get_unused_fd_flags(O_CLOEXEC);
2439 	if (fd < 0)
2440 		return fd;
2441 
2442 
2443 	id = bpf_link_alloc_id(link);
2444 	if (id < 0) {
2445 		put_unused_fd(fd);
2446 		return id;
2447 	}
2448 
2449 	file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2450 	if (IS_ERR(file)) {
2451 		bpf_link_free_id(id);
2452 		put_unused_fd(fd);
2453 		return PTR_ERR(file);
2454 	}
2455 
2456 	primer->link = link;
2457 	primer->file = file;
2458 	primer->fd = fd;
2459 	primer->id = id;
2460 	return 0;
2461 }
2462 
2463 int bpf_link_settle(struct bpf_link_primer *primer)
2464 {
2465 	/* make bpf_link fetchable by ID */
2466 	spin_lock_bh(&link_idr_lock);
2467 	primer->link->id = primer->id;
2468 	spin_unlock_bh(&link_idr_lock);
2469 	/* make bpf_link fetchable by FD */
2470 	fd_install(primer->fd, primer->file);
2471 	/* pass through installed FD */
2472 	return primer->fd;
2473 }
2474 
2475 int bpf_link_new_fd(struct bpf_link *link)
2476 {
2477 	return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
2478 }
2479 
2480 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
2481 {
2482 	struct fd f = fdget(ufd);
2483 	struct bpf_link *link;
2484 
2485 	if (!f.file)
2486 		return ERR_PTR(-EBADF);
2487 	if (f.file->f_op != &bpf_link_fops) {
2488 		fdput(f);
2489 		return ERR_PTR(-EINVAL);
2490 	}
2491 
2492 	link = f.file->private_data;
2493 	bpf_link_inc(link);
2494 	fdput(f);
2495 
2496 	return link;
2497 }
2498 
2499 struct bpf_tracing_link {
2500 	struct bpf_link link;
2501 	enum bpf_attach_type attach_type;
2502 	struct bpf_trampoline *trampoline;
2503 	struct bpf_prog *tgt_prog;
2504 };
2505 
2506 static void bpf_tracing_link_release(struct bpf_link *link)
2507 {
2508 	struct bpf_tracing_link *tr_link =
2509 		container_of(link, struct bpf_tracing_link, link);
2510 
2511 	WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog,
2512 						tr_link->trampoline));
2513 
2514 	bpf_trampoline_put(tr_link->trampoline);
2515 
2516 	/* tgt_prog is NULL if target is a kernel function */
2517 	if (tr_link->tgt_prog)
2518 		bpf_prog_put(tr_link->tgt_prog);
2519 }
2520 
2521 static void bpf_tracing_link_dealloc(struct bpf_link *link)
2522 {
2523 	struct bpf_tracing_link *tr_link =
2524 		container_of(link, struct bpf_tracing_link, link);
2525 
2526 	kfree(tr_link);
2527 }
2528 
2529 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2530 					 struct seq_file *seq)
2531 {
2532 	struct bpf_tracing_link *tr_link =
2533 		container_of(link, struct bpf_tracing_link, link);
2534 
2535 	seq_printf(seq,
2536 		   "attach_type:\t%d\n",
2537 		   tr_link->attach_type);
2538 }
2539 
2540 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2541 					   struct bpf_link_info *info)
2542 {
2543 	struct bpf_tracing_link *tr_link =
2544 		container_of(link, struct bpf_tracing_link, link);
2545 
2546 	info->tracing.attach_type = tr_link->attach_type;
2547 
2548 	return 0;
2549 }
2550 
2551 static const struct bpf_link_ops bpf_tracing_link_lops = {
2552 	.release = bpf_tracing_link_release,
2553 	.dealloc = bpf_tracing_link_dealloc,
2554 	.show_fdinfo = bpf_tracing_link_show_fdinfo,
2555 	.fill_link_info = bpf_tracing_link_fill_link_info,
2556 };
2557 
2558 static int bpf_tracing_prog_attach(struct bpf_prog *prog,
2559 				   int tgt_prog_fd,
2560 				   u32 btf_id)
2561 {
2562 	struct bpf_link_primer link_primer;
2563 	struct bpf_prog *tgt_prog = NULL;
2564 	struct bpf_trampoline *tr = NULL;
2565 	struct bpf_tracing_link *link;
2566 	u64 key = 0;
2567 	int err;
2568 
2569 	switch (prog->type) {
2570 	case BPF_PROG_TYPE_TRACING:
2571 		if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
2572 		    prog->expected_attach_type != BPF_TRACE_FEXIT &&
2573 		    prog->expected_attach_type != BPF_MODIFY_RETURN) {
2574 			err = -EINVAL;
2575 			goto out_put_prog;
2576 		}
2577 		break;
2578 	case BPF_PROG_TYPE_EXT:
2579 		if (prog->expected_attach_type != 0) {
2580 			err = -EINVAL;
2581 			goto out_put_prog;
2582 		}
2583 		break;
2584 	case BPF_PROG_TYPE_LSM:
2585 		if (prog->expected_attach_type != BPF_LSM_MAC) {
2586 			err = -EINVAL;
2587 			goto out_put_prog;
2588 		}
2589 		break;
2590 	default:
2591 		err = -EINVAL;
2592 		goto out_put_prog;
2593 	}
2594 
2595 	if (!!tgt_prog_fd != !!btf_id) {
2596 		err = -EINVAL;
2597 		goto out_put_prog;
2598 	}
2599 
2600 	if (tgt_prog_fd) {
2601 		/* For now we only allow new targets for BPF_PROG_TYPE_EXT */
2602 		if (prog->type != BPF_PROG_TYPE_EXT) {
2603 			err = -EINVAL;
2604 			goto out_put_prog;
2605 		}
2606 
2607 		tgt_prog = bpf_prog_get(tgt_prog_fd);
2608 		if (IS_ERR(tgt_prog)) {
2609 			err = PTR_ERR(tgt_prog);
2610 			tgt_prog = NULL;
2611 			goto out_put_prog;
2612 		}
2613 
2614 		key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
2615 	}
2616 
2617 	link = kzalloc(sizeof(*link), GFP_USER);
2618 	if (!link) {
2619 		err = -ENOMEM;
2620 		goto out_put_prog;
2621 	}
2622 	bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
2623 		      &bpf_tracing_link_lops, prog);
2624 	link->attach_type = prog->expected_attach_type;
2625 
2626 	mutex_lock(&prog->aux->dst_mutex);
2627 
2628 	/* There are a few possible cases here:
2629 	 *
2630 	 * - if prog->aux->dst_trampoline is set, the program was just loaded
2631 	 *   and not yet attached to anything, so we can use the values stored
2632 	 *   in prog->aux
2633 	 *
2634 	 * - if prog->aux->dst_trampoline is NULL, the program has already been
2635          *   attached to a target and its initial target was cleared (below)
2636 	 *
2637 	 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
2638 	 *   target_btf_id using the link_create API.
2639 	 *
2640 	 * - if tgt_prog == NULL when this function was called using the old
2641          *   raw_tracepoint_open API, and we need a target from prog->aux
2642          *
2643          * The combination of no saved target in prog->aux, and no target
2644          * specified on load is illegal, and we reject that here.
2645 	 */
2646 	if (!prog->aux->dst_trampoline && !tgt_prog) {
2647 		err = -ENOENT;
2648 		goto out_unlock;
2649 	}
2650 
2651 	if (!prog->aux->dst_trampoline ||
2652 	    (key && key != prog->aux->dst_trampoline->key)) {
2653 		/* If there is no saved target, or the specified target is
2654 		 * different from the destination specified at load time, we
2655 		 * need a new trampoline and a check for compatibility
2656 		 */
2657 		struct bpf_attach_target_info tgt_info = {};
2658 
2659 		err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
2660 					      &tgt_info);
2661 		if (err)
2662 			goto out_unlock;
2663 
2664 		tr = bpf_trampoline_get(key, &tgt_info);
2665 		if (!tr) {
2666 			err = -ENOMEM;
2667 			goto out_unlock;
2668 		}
2669 	} else {
2670 		/* The caller didn't specify a target, or the target was the
2671 		 * same as the destination supplied during program load. This
2672 		 * means we can reuse the trampoline and reference from program
2673 		 * load time, and there is no need to allocate a new one. This
2674 		 * can only happen once for any program, as the saved values in
2675 		 * prog->aux are cleared below.
2676 		 */
2677 		tr = prog->aux->dst_trampoline;
2678 		tgt_prog = prog->aux->dst_prog;
2679 	}
2680 
2681 	err = bpf_link_prime(&link->link, &link_primer);
2682 	if (err)
2683 		goto out_unlock;
2684 
2685 	err = bpf_trampoline_link_prog(prog, tr);
2686 	if (err) {
2687 		bpf_link_cleanup(&link_primer);
2688 		link = NULL;
2689 		goto out_unlock;
2690 	}
2691 
2692 	link->tgt_prog = tgt_prog;
2693 	link->trampoline = tr;
2694 
2695 	/* Always clear the trampoline and target prog from prog->aux to make
2696 	 * sure the original attach destination is not kept alive after a
2697 	 * program is (re-)attached to another target.
2698 	 */
2699 	if (prog->aux->dst_prog &&
2700 	    (tgt_prog_fd || tr != prog->aux->dst_trampoline))
2701 		/* got extra prog ref from syscall, or attaching to different prog */
2702 		bpf_prog_put(prog->aux->dst_prog);
2703 	if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
2704 		/* we allocated a new trampoline, so free the old one */
2705 		bpf_trampoline_put(prog->aux->dst_trampoline);
2706 
2707 	prog->aux->dst_prog = NULL;
2708 	prog->aux->dst_trampoline = NULL;
2709 	mutex_unlock(&prog->aux->dst_mutex);
2710 
2711 	return bpf_link_settle(&link_primer);
2712 out_unlock:
2713 	if (tr && tr != prog->aux->dst_trampoline)
2714 		bpf_trampoline_put(tr);
2715 	mutex_unlock(&prog->aux->dst_mutex);
2716 	kfree(link);
2717 out_put_prog:
2718 	if (tgt_prog_fd && tgt_prog)
2719 		bpf_prog_put(tgt_prog);
2720 	return err;
2721 }
2722 
2723 struct bpf_raw_tp_link {
2724 	struct bpf_link link;
2725 	struct bpf_raw_event_map *btp;
2726 };
2727 
2728 static void bpf_raw_tp_link_release(struct bpf_link *link)
2729 {
2730 	struct bpf_raw_tp_link *raw_tp =
2731 		container_of(link, struct bpf_raw_tp_link, link);
2732 
2733 	bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
2734 	bpf_put_raw_tracepoint(raw_tp->btp);
2735 }
2736 
2737 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
2738 {
2739 	struct bpf_raw_tp_link *raw_tp =
2740 		container_of(link, struct bpf_raw_tp_link, link);
2741 
2742 	kfree(raw_tp);
2743 }
2744 
2745 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
2746 					struct seq_file *seq)
2747 {
2748 	struct bpf_raw_tp_link *raw_tp_link =
2749 		container_of(link, struct bpf_raw_tp_link, link);
2750 
2751 	seq_printf(seq,
2752 		   "tp_name:\t%s\n",
2753 		   raw_tp_link->btp->tp->name);
2754 }
2755 
2756 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
2757 					  struct bpf_link_info *info)
2758 {
2759 	struct bpf_raw_tp_link *raw_tp_link =
2760 		container_of(link, struct bpf_raw_tp_link, link);
2761 	char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
2762 	const char *tp_name = raw_tp_link->btp->tp->name;
2763 	u32 ulen = info->raw_tracepoint.tp_name_len;
2764 	size_t tp_len = strlen(tp_name);
2765 
2766 	if (!ulen ^ !ubuf)
2767 		return -EINVAL;
2768 
2769 	info->raw_tracepoint.tp_name_len = tp_len + 1;
2770 
2771 	if (!ubuf)
2772 		return 0;
2773 
2774 	if (ulen >= tp_len + 1) {
2775 		if (copy_to_user(ubuf, tp_name, tp_len + 1))
2776 			return -EFAULT;
2777 	} else {
2778 		char zero = '\0';
2779 
2780 		if (copy_to_user(ubuf, tp_name, ulen - 1))
2781 			return -EFAULT;
2782 		if (put_user(zero, ubuf + ulen - 1))
2783 			return -EFAULT;
2784 		return -ENOSPC;
2785 	}
2786 
2787 	return 0;
2788 }
2789 
2790 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
2791 	.release = bpf_raw_tp_link_release,
2792 	.dealloc = bpf_raw_tp_link_dealloc,
2793 	.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
2794 	.fill_link_info = bpf_raw_tp_link_fill_link_info,
2795 };
2796 
2797 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
2798 
2799 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
2800 {
2801 	struct bpf_link_primer link_primer;
2802 	struct bpf_raw_tp_link *link;
2803 	struct bpf_raw_event_map *btp;
2804 	struct bpf_prog *prog;
2805 	const char *tp_name;
2806 	char buf[128];
2807 	int err;
2808 
2809 	if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
2810 		return -EINVAL;
2811 
2812 	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
2813 	if (IS_ERR(prog))
2814 		return PTR_ERR(prog);
2815 
2816 	switch (prog->type) {
2817 	case BPF_PROG_TYPE_TRACING:
2818 	case BPF_PROG_TYPE_EXT:
2819 	case BPF_PROG_TYPE_LSM:
2820 		if (attr->raw_tracepoint.name) {
2821 			/* The attach point for this category of programs
2822 			 * should be specified via btf_id during program load.
2823 			 */
2824 			err = -EINVAL;
2825 			goto out_put_prog;
2826 		}
2827 		if (prog->type == BPF_PROG_TYPE_TRACING &&
2828 		    prog->expected_attach_type == BPF_TRACE_RAW_TP) {
2829 			tp_name = prog->aux->attach_func_name;
2830 			break;
2831 		}
2832 		err = bpf_tracing_prog_attach(prog, 0, 0);
2833 		if (err >= 0)
2834 			return err;
2835 		goto out_put_prog;
2836 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2837 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2838 		if (strncpy_from_user(buf,
2839 				      u64_to_user_ptr(attr->raw_tracepoint.name),
2840 				      sizeof(buf) - 1) < 0) {
2841 			err = -EFAULT;
2842 			goto out_put_prog;
2843 		}
2844 		buf[sizeof(buf) - 1] = 0;
2845 		tp_name = buf;
2846 		break;
2847 	default:
2848 		err = -EINVAL;
2849 		goto out_put_prog;
2850 	}
2851 
2852 	btp = bpf_get_raw_tracepoint(tp_name);
2853 	if (!btp) {
2854 		err = -ENOENT;
2855 		goto out_put_prog;
2856 	}
2857 
2858 	link = kzalloc(sizeof(*link), GFP_USER);
2859 	if (!link) {
2860 		err = -ENOMEM;
2861 		goto out_put_btp;
2862 	}
2863 	bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
2864 		      &bpf_raw_tp_link_lops, prog);
2865 	link->btp = btp;
2866 
2867 	err = bpf_link_prime(&link->link, &link_primer);
2868 	if (err) {
2869 		kfree(link);
2870 		goto out_put_btp;
2871 	}
2872 
2873 	err = bpf_probe_register(link->btp, prog);
2874 	if (err) {
2875 		bpf_link_cleanup(&link_primer);
2876 		goto out_put_btp;
2877 	}
2878 
2879 	return bpf_link_settle(&link_primer);
2880 
2881 out_put_btp:
2882 	bpf_put_raw_tracepoint(btp);
2883 out_put_prog:
2884 	bpf_prog_put(prog);
2885 	return err;
2886 }
2887 
2888 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
2889 					     enum bpf_attach_type attach_type)
2890 {
2891 	switch (prog->type) {
2892 	case BPF_PROG_TYPE_CGROUP_SOCK:
2893 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2894 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2895 	case BPF_PROG_TYPE_SK_LOOKUP:
2896 		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
2897 	case BPF_PROG_TYPE_CGROUP_SKB:
2898 		if (!capable(CAP_NET_ADMIN))
2899 			/* cg-skb progs can be loaded by unpriv user.
2900 			 * check permissions at attach time.
2901 			 */
2902 			return -EPERM;
2903 		return prog->enforce_expected_attach_type &&
2904 			prog->expected_attach_type != attach_type ?
2905 			-EINVAL : 0;
2906 	default:
2907 		return 0;
2908 	}
2909 }
2910 
2911 static enum bpf_prog_type
2912 attach_type_to_prog_type(enum bpf_attach_type attach_type)
2913 {
2914 	switch (attach_type) {
2915 	case BPF_CGROUP_INET_INGRESS:
2916 	case BPF_CGROUP_INET_EGRESS:
2917 		return BPF_PROG_TYPE_CGROUP_SKB;
2918 	case BPF_CGROUP_INET_SOCK_CREATE:
2919 	case BPF_CGROUP_INET_SOCK_RELEASE:
2920 	case BPF_CGROUP_INET4_POST_BIND:
2921 	case BPF_CGROUP_INET6_POST_BIND:
2922 		return BPF_PROG_TYPE_CGROUP_SOCK;
2923 	case BPF_CGROUP_INET4_BIND:
2924 	case BPF_CGROUP_INET6_BIND:
2925 	case BPF_CGROUP_INET4_CONNECT:
2926 	case BPF_CGROUP_INET6_CONNECT:
2927 	case BPF_CGROUP_INET4_GETPEERNAME:
2928 	case BPF_CGROUP_INET6_GETPEERNAME:
2929 	case BPF_CGROUP_INET4_GETSOCKNAME:
2930 	case BPF_CGROUP_INET6_GETSOCKNAME:
2931 	case BPF_CGROUP_UDP4_SENDMSG:
2932 	case BPF_CGROUP_UDP6_SENDMSG:
2933 	case BPF_CGROUP_UDP4_RECVMSG:
2934 	case BPF_CGROUP_UDP6_RECVMSG:
2935 		return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
2936 	case BPF_CGROUP_SOCK_OPS:
2937 		return BPF_PROG_TYPE_SOCK_OPS;
2938 	case BPF_CGROUP_DEVICE:
2939 		return BPF_PROG_TYPE_CGROUP_DEVICE;
2940 	case BPF_SK_MSG_VERDICT:
2941 		return BPF_PROG_TYPE_SK_MSG;
2942 	case BPF_SK_SKB_STREAM_PARSER:
2943 	case BPF_SK_SKB_STREAM_VERDICT:
2944 		return BPF_PROG_TYPE_SK_SKB;
2945 	case BPF_LIRC_MODE2:
2946 		return BPF_PROG_TYPE_LIRC_MODE2;
2947 	case BPF_FLOW_DISSECTOR:
2948 		return BPF_PROG_TYPE_FLOW_DISSECTOR;
2949 	case BPF_CGROUP_SYSCTL:
2950 		return BPF_PROG_TYPE_CGROUP_SYSCTL;
2951 	case BPF_CGROUP_GETSOCKOPT:
2952 	case BPF_CGROUP_SETSOCKOPT:
2953 		return BPF_PROG_TYPE_CGROUP_SOCKOPT;
2954 	case BPF_TRACE_ITER:
2955 		return BPF_PROG_TYPE_TRACING;
2956 	case BPF_SK_LOOKUP:
2957 		return BPF_PROG_TYPE_SK_LOOKUP;
2958 	case BPF_XDP:
2959 		return BPF_PROG_TYPE_XDP;
2960 	default:
2961 		return BPF_PROG_TYPE_UNSPEC;
2962 	}
2963 }
2964 
2965 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
2966 
2967 #define BPF_F_ATTACH_MASK \
2968 	(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
2969 
2970 static int bpf_prog_attach(const union bpf_attr *attr)
2971 {
2972 	enum bpf_prog_type ptype;
2973 	struct bpf_prog *prog;
2974 	int ret;
2975 
2976 	if (CHECK_ATTR(BPF_PROG_ATTACH))
2977 		return -EINVAL;
2978 
2979 	if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
2980 		return -EINVAL;
2981 
2982 	ptype = attach_type_to_prog_type(attr->attach_type);
2983 	if (ptype == BPF_PROG_TYPE_UNSPEC)
2984 		return -EINVAL;
2985 
2986 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
2987 	if (IS_ERR(prog))
2988 		return PTR_ERR(prog);
2989 
2990 	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
2991 		bpf_prog_put(prog);
2992 		return -EINVAL;
2993 	}
2994 
2995 	switch (ptype) {
2996 	case BPF_PROG_TYPE_SK_SKB:
2997 	case BPF_PROG_TYPE_SK_MSG:
2998 		ret = sock_map_get_from_fd(attr, prog);
2999 		break;
3000 	case BPF_PROG_TYPE_LIRC_MODE2:
3001 		ret = lirc_prog_attach(attr, prog);
3002 		break;
3003 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3004 		ret = netns_bpf_prog_attach(attr, prog);
3005 		break;
3006 	case BPF_PROG_TYPE_CGROUP_DEVICE:
3007 	case BPF_PROG_TYPE_CGROUP_SKB:
3008 	case BPF_PROG_TYPE_CGROUP_SOCK:
3009 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3010 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3011 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3012 	case BPF_PROG_TYPE_SOCK_OPS:
3013 		ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3014 		break;
3015 	default:
3016 		ret = -EINVAL;
3017 	}
3018 
3019 	if (ret)
3020 		bpf_prog_put(prog);
3021 	return ret;
3022 }
3023 
3024 #define BPF_PROG_DETACH_LAST_FIELD attach_type
3025 
3026 static int bpf_prog_detach(const union bpf_attr *attr)
3027 {
3028 	enum bpf_prog_type ptype;
3029 
3030 	if (CHECK_ATTR(BPF_PROG_DETACH))
3031 		return -EINVAL;
3032 
3033 	ptype = attach_type_to_prog_type(attr->attach_type);
3034 
3035 	switch (ptype) {
3036 	case BPF_PROG_TYPE_SK_MSG:
3037 	case BPF_PROG_TYPE_SK_SKB:
3038 		return sock_map_prog_detach(attr, ptype);
3039 	case BPF_PROG_TYPE_LIRC_MODE2:
3040 		return lirc_prog_detach(attr);
3041 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3042 		return netns_bpf_prog_detach(attr, ptype);
3043 	case BPF_PROG_TYPE_CGROUP_DEVICE:
3044 	case BPF_PROG_TYPE_CGROUP_SKB:
3045 	case BPF_PROG_TYPE_CGROUP_SOCK:
3046 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3047 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3048 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3049 	case BPF_PROG_TYPE_SOCK_OPS:
3050 		return cgroup_bpf_prog_detach(attr, ptype);
3051 	default:
3052 		return -EINVAL;
3053 	}
3054 }
3055 
3056 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
3057 
3058 static int bpf_prog_query(const union bpf_attr *attr,
3059 			  union bpf_attr __user *uattr)
3060 {
3061 	if (!capable(CAP_NET_ADMIN))
3062 		return -EPERM;
3063 	if (CHECK_ATTR(BPF_PROG_QUERY))
3064 		return -EINVAL;
3065 	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
3066 		return -EINVAL;
3067 
3068 	switch (attr->query.attach_type) {
3069 	case BPF_CGROUP_INET_INGRESS:
3070 	case BPF_CGROUP_INET_EGRESS:
3071 	case BPF_CGROUP_INET_SOCK_CREATE:
3072 	case BPF_CGROUP_INET_SOCK_RELEASE:
3073 	case BPF_CGROUP_INET4_BIND:
3074 	case BPF_CGROUP_INET6_BIND:
3075 	case BPF_CGROUP_INET4_POST_BIND:
3076 	case BPF_CGROUP_INET6_POST_BIND:
3077 	case BPF_CGROUP_INET4_CONNECT:
3078 	case BPF_CGROUP_INET6_CONNECT:
3079 	case BPF_CGROUP_INET4_GETPEERNAME:
3080 	case BPF_CGROUP_INET6_GETPEERNAME:
3081 	case BPF_CGROUP_INET4_GETSOCKNAME:
3082 	case BPF_CGROUP_INET6_GETSOCKNAME:
3083 	case BPF_CGROUP_UDP4_SENDMSG:
3084 	case BPF_CGROUP_UDP6_SENDMSG:
3085 	case BPF_CGROUP_UDP4_RECVMSG:
3086 	case BPF_CGROUP_UDP6_RECVMSG:
3087 	case BPF_CGROUP_SOCK_OPS:
3088 	case BPF_CGROUP_DEVICE:
3089 	case BPF_CGROUP_SYSCTL:
3090 	case BPF_CGROUP_GETSOCKOPT:
3091 	case BPF_CGROUP_SETSOCKOPT:
3092 		return cgroup_bpf_prog_query(attr, uattr);
3093 	case BPF_LIRC_MODE2:
3094 		return lirc_prog_query(attr, uattr);
3095 	case BPF_FLOW_DISSECTOR:
3096 	case BPF_SK_LOOKUP:
3097 		return netns_bpf_prog_query(attr, uattr);
3098 	default:
3099 		return -EINVAL;
3100 	}
3101 }
3102 
3103 #define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu
3104 
3105 static int bpf_prog_test_run(const union bpf_attr *attr,
3106 			     union bpf_attr __user *uattr)
3107 {
3108 	struct bpf_prog *prog;
3109 	int ret = -ENOTSUPP;
3110 
3111 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
3112 		return -EINVAL;
3113 
3114 	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
3115 	    (!attr->test.ctx_size_in && attr->test.ctx_in))
3116 		return -EINVAL;
3117 
3118 	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
3119 	    (!attr->test.ctx_size_out && attr->test.ctx_out))
3120 		return -EINVAL;
3121 
3122 	prog = bpf_prog_get(attr->test.prog_fd);
3123 	if (IS_ERR(prog))
3124 		return PTR_ERR(prog);
3125 
3126 	if (prog->aux->ops->test_run)
3127 		ret = prog->aux->ops->test_run(prog, attr, uattr);
3128 
3129 	bpf_prog_put(prog);
3130 	return ret;
3131 }
3132 
3133 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
3134 
3135 static int bpf_obj_get_next_id(const union bpf_attr *attr,
3136 			       union bpf_attr __user *uattr,
3137 			       struct idr *idr,
3138 			       spinlock_t *lock)
3139 {
3140 	u32 next_id = attr->start_id;
3141 	int err = 0;
3142 
3143 	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3144 		return -EINVAL;
3145 
3146 	if (!capable(CAP_SYS_ADMIN))
3147 		return -EPERM;
3148 
3149 	next_id++;
3150 	spin_lock_bh(lock);
3151 	if (!idr_get_next(idr, &next_id))
3152 		err = -ENOENT;
3153 	spin_unlock_bh(lock);
3154 
3155 	if (!err)
3156 		err = put_user(next_id, &uattr->next_id);
3157 
3158 	return err;
3159 }
3160 
3161 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
3162 {
3163 	struct bpf_map *map;
3164 
3165 	spin_lock_bh(&map_idr_lock);
3166 again:
3167 	map = idr_get_next(&map_idr, id);
3168 	if (map) {
3169 		map = __bpf_map_inc_not_zero(map, false);
3170 		if (IS_ERR(map)) {
3171 			(*id)++;
3172 			goto again;
3173 		}
3174 	}
3175 	spin_unlock_bh(&map_idr_lock);
3176 
3177 	return map;
3178 }
3179 
3180 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
3181 {
3182 	struct bpf_prog *prog;
3183 
3184 	spin_lock_bh(&prog_idr_lock);
3185 again:
3186 	prog = idr_get_next(&prog_idr, id);
3187 	if (prog) {
3188 		prog = bpf_prog_inc_not_zero(prog);
3189 		if (IS_ERR(prog)) {
3190 			(*id)++;
3191 			goto again;
3192 		}
3193 	}
3194 	spin_unlock_bh(&prog_idr_lock);
3195 
3196 	return prog;
3197 }
3198 
3199 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
3200 
3201 struct bpf_prog *bpf_prog_by_id(u32 id)
3202 {
3203 	struct bpf_prog *prog;
3204 
3205 	if (!id)
3206 		return ERR_PTR(-ENOENT);
3207 
3208 	spin_lock_bh(&prog_idr_lock);
3209 	prog = idr_find(&prog_idr, id);
3210 	if (prog)
3211 		prog = bpf_prog_inc_not_zero(prog);
3212 	else
3213 		prog = ERR_PTR(-ENOENT);
3214 	spin_unlock_bh(&prog_idr_lock);
3215 	return prog;
3216 }
3217 
3218 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
3219 {
3220 	struct bpf_prog *prog;
3221 	u32 id = attr->prog_id;
3222 	int fd;
3223 
3224 	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
3225 		return -EINVAL;
3226 
3227 	if (!capable(CAP_SYS_ADMIN))
3228 		return -EPERM;
3229 
3230 	prog = bpf_prog_by_id(id);
3231 	if (IS_ERR(prog))
3232 		return PTR_ERR(prog);
3233 
3234 	fd = bpf_prog_new_fd(prog);
3235 	if (fd < 0)
3236 		bpf_prog_put(prog);
3237 
3238 	return fd;
3239 }
3240 
3241 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
3242 
3243 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
3244 {
3245 	struct bpf_map *map;
3246 	u32 id = attr->map_id;
3247 	int f_flags;
3248 	int fd;
3249 
3250 	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
3251 	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
3252 		return -EINVAL;
3253 
3254 	if (!capable(CAP_SYS_ADMIN))
3255 		return -EPERM;
3256 
3257 	f_flags = bpf_get_file_flag(attr->open_flags);
3258 	if (f_flags < 0)
3259 		return f_flags;
3260 
3261 	spin_lock_bh(&map_idr_lock);
3262 	map = idr_find(&map_idr, id);
3263 	if (map)
3264 		map = __bpf_map_inc_not_zero(map, true);
3265 	else
3266 		map = ERR_PTR(-ENOENT);
3267 	spin_unlock_bh(&map_idr_lock);
3268 
3269 	if (IS_ERR(map))
3270 		return PTR_ERR(map);
3271 
3272 	fd = bpf_map_new_fd(map, f_flags);
3273 	if (fd < 0)
3274 		bpf_map_put_with_uref(map);
3275 
3276 	return fd;
3277 }
3278 
3279 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
3280 					      unsigned long addr, u32 *off,
3281 					      u32 *type)
3282 {
3283 	const struct bpf_map *map;
3284 	int i;
3285 
3286 	mutex_lock(&prog->aux->used_maps_mutex);
3287 	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3288 		map = prog->aux->used_maps[i];
3289 		if (map == (void *)addr) {
3290 			*type = BPF_PSEUDO_MAP_FD;
3291 			goto out;
3292 		}
3293 		if (!map->ops->map_direct_value_meta)
3294 			continue;
3295 		if (!map->ops->map_direct_value_meta(map, addr, off)) {
3296 			*type = BPF_PSEUDO_MAP_VALUE;
3297 			goto out;
3298 		}
3299 	}
3300 	map = NULL;
3301 
3302 out:
3303 	mutex_unlock(&prog->aux->used_maps_mutex);
3304 	return map;
3305 }
3306 
3307 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
3308 					      const struct cred *f_cred)
3309 {
3310 	const struct bpf_map *map;
3311 	struct bpf_insn *insns;
3312 	u32 off, type;
3313 	u64 imm;
3314 	u8 code;
3315 	int i;
3316 
3317 	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
3318 			GFP_USER);
3319 	if (!insns)
3320 		return insns;
3321 
3322 	for (i = 0; i < prog->len; i++) {
3323 		code = insns[i].code;
3324 
3325 		if (code == (BPF_JMP | BPF_TAIL_CALL)) {
3326 			insns[i].code = BPF_JMP | BPF_CALL;
3327 			insns[i].imm = BPF_FUNC_tail_call;
3328 			/* fall-through */
3329 		}
3330 		if (code == (BPF_JMP | BPF_CALL) ||
3331 		    code == (BPF_JMP | BPF_CALL_ARGS)) {
3332 			if (code == (BPF_JMP | BPF_CALL_ARGS))
3333 				insns[i].code = BPF_JMP | BPF_CALL;
3334 			if (!bpf_dump_raw_ok(f_cred))
3335 				insns[i].imm = 0;
3336 			continue;
3337 		}
3338 		if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
3339 			insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
3340 			continue;
3341 		}
3342 
3343 		if (code != (BPF_LD | BPF_IMM | BPF_DW))
3344 			continue;
3345 
3346 		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
3347 		map = bpf_map_from_imm(prog, imm, &off, &type);
3348 		if (map) {
3349 			insns[i].src_reg = type;
3350 			insns[i].imm = map->id;
3351 			insns[i + 1].imm = off;
3352 			continue;
3353 		}
3354 	}
3355 
3356 	return insns;
3357 }
3358 
3359 static int set_info_rec_size(struct bpf_prog_info *info)
3360 {
3361 	/*
3362 	 * Ensure info.*_rec_size is the same as kernel expected size
3363 	 *
3364 	 * or
3365 	 *
3366 	 * Only allow zero *_rec_size if both _rec_size and _cnt are
3367 	 * zero.  In this case, the kernel will set the expected
3368 	 * _rec_size back to the info.
3369 	 */
3370 
3371 	if ((info->nr_func_info || info->func_info_rec_size) &&
3372 	    info->func_info_rec_size != sizeof(struct bpf_func_info))
3373 		return -EINVAL;
3374 
3375 	if ((info->nr_line_info || info->line_info_rec_size) &&
3376 	    info->line_info_rec_size != sizeof(struct bpf_line_info))
3377 		return -EINVAL;
3378 
3379 	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
3380 	    info->jited_line_info_rec_size != sizeof(__u64))
3381 		return -EINVAL;
3382 
3383 	info->func_info_rec_size = sizeof(struct bpf_func_info);
3384 	info->line_info_rec_size = sizeof(struct bpf_line_info);
3385 	info->jited_line_info_rec_size = sizeof(__u64);
3386 
3387 	return 0;
3388 }
3389 
3390 static int bpf_prog_get_info_by_fd(struct file *file,
3391 				   struct bpf_prog *prog,
3392 				   const union bpf_attr *attr,
3393 				   union bpf_attr __user *uattr)
3394 {
3395 	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3396 	struct bpf_prog_info info;
3397 	u32 info_len = attr->info.info_len;
3398 	struct bpf_prog_stats stats;
3399 	char __user *uinsns;
3400 	u32 ulen;
3401 	int err;
3402 
3403 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3404 	if (err)
3405 		return err;
3406 	info_len = min_t(u32, sizeof(info), info_len);
3407 
3408 	memset(&info, 0, sizeof(info));
3409 	if (copy_from_user(&info, uinfo, info_len))
3410 		return -EFAULT;
3411 
3412 	info.type = prog->type;
3413 	info.id = prog->aux->id;
3414 	info.load_time = prog->aux->load_time;
3415 	info.created_by_uid = from_kuid_munged(current_user_ns(),
3416 					       prog->aux->user->uid);
3417 	info.gpl_compatible = prog->gpl_compatible;
3418 
3419 	memcpy(info.tag, prog->tag, sizeof(prog->tag));
3420 	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3421 
3422 	mutex_lock(&prog->aux->used_maps_mutex);
3423 	ulen = info.nr_map_ids;
3424 	info.nr_map_ids = prog->aux->used_map_cnt;
3425 	ulen = min_t(u32, info.nr_map_ids, ulen);
3426 	if (ulen) {
3427 		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
3428 		u32 i;
3429 
3430 		for (i = 0; i < ulen; i++)
3431 			if (put_user(prog->aux->used_maps[i]->id,
3432 				     &user_map_ids[i])) {
3433 				mutex_unlock(&prog->aux->used_maps_mutex);
3434 				return -EFAULT;
3435 			}
3436 	}
3437 	mutex_unlock(&prog->aux->used_maps_mutex);
3438 
3439 	err = set_info_rec_size(&info);
3440 	if (err)
3441 		return err;
3442 
3443 	bpf_prog_get_stats(prog, &stats);
3444 	info.run_time_ns = stats.nsecs;
3445 	info.run_cnt = stats.cnt;
3446 	info.recursion_misses = stats.misses;
3447 
3448 	if (!bpf_capable()) {
3449 		info.jited_prog_len = 0;
3450 		info.xlated_prog_len = 0;
3451 		info.nr_jited_ksyms = 0;
3452 		info.nr_jited_func_lens = 0;
3453 		info.nr_func_info = 0;
3454 		info.nr_line_info = 0;
3455 		info.nr_jited_line_info = 0;
3456 		goto done;
3457 	}
3458 
3459 	ulen = info.xlated_prog_len;
3460 	info.xlated_prog_len = bpf_prog_insn_size(prog);
3461 	if (info.xlated_prog_len && ulen) {
3462 		struct bpf_insn *insns_sanitized;
3463 		bool fault;
3464 
3465 		if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
3466 			info.xlated_prog_insns = 0;
3467 			goto done;
3468 		}
3469 		insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
3470 		if (!insns_sanitized)
3471 			return -ENOMEM;
3472 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
3473 		ulen = min_t(u32, info.xlated_prog_len, ulen);
3474 		fault = copy_to_user(uinsns, insns_sanitized, ulen);
3475 		kfree(insns_sanitized);
3476 		if (fault)
3477 			return -EFAULT;
3478 	}
3479 
3480 	if (bpf_prog_is_dev_bound(prog->aux)) {
3481 		err = bpf_prog_offload_info_fill(&info, prog);
3482 		if (err)
3483 			return err;
3484 		goto done;
3485 	}
3486 
3487 	/* NOTE: the following code is supposed to be skipped for offload.
3488 	 * bpf_prog_offload_info_fill() is the place to fill similar fields
3489 	 * for offload.
3490 	 */
3491 	ulen = info.jited_prog_len;
3492 	if (prog->aux->func_cnt) {
3493 		u32 i;
3494 
3495 		info.jited_prog_len = 0;
3496 		for (i = 0; i < prog->aux->func_cnt; i++)
3497 			info.jited_prog_len += prog->aux->func[i]->jited_len;
3498 	} else {
3499 		info.jited_prog_len = prog->jited_len;
3500 	}
3501 
3502 	if (info.jited_prog_len && ulen) {
3503 		if (bpf_dump_raw_ok(file->f_cred)) {
3504 			uinsns = u64_to_user_ptr(info.jited_prog_insns);
3505 			ulen = min_t(u32, info.jited_prog_len, ulen);
3506 
3507 			/* for multi-function programs, copy the JITed
3508 			 * instructions for all the functions
3509 			 */
3510 			if (prog->aux->func_cnt) {
3511 				u32 len, free, i;
3512 				u8 *img;
3513 
3514 				free = ulen;
3515 				for (i = 0; i < prog->aux->func_cnt; i++) {
3516 					len = prog->aux->func[i]->jited_len;
3517 					len = min_t(u32, len, free);
3518 					img = (u8 *) prog->aux->func[i]->bpf_func;
3519 					if (copy_to_user(uinsns, img, len))
3520 						return -EFAULT;
3521 					uinsns += len;
3522 					free -= len;
3523 					if (!free)
3524 						break;
3525 				}
3526 			} else {
3527 				if (copy_to_user(uinsns, prog->bpf_func, ulen))
3528 					return -EFAULT;
3529 			}
3530 		} else {
3531 			info.jited_prog_insns = 0;
3532 		}
3533 	}
3534 
3535 	ulen = info.nr_jited_ksyms;
3536 	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
3537 	if (ulen) {
3538 		if (bpf_dump_raw_ok(file->f_cred)) {
3539 			unsigned long ksym_addr;
3540 			u64 __user *user_ksyms;
3541 			u32 i;
3542 
3543 			/* copy the address of the kernel symbol
3544 			 * corresponding to each function
3545 			 */
3546 			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
3547 			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
3548 			if (prog->aux->func_cnt) {
3549 				for (i = 0; i < ulen; i++) {
3550 					ksym_addr = (unsigned long)
3551 						prog->aux->func[i]->bpf_func;
3552 					if (put_user((u64) ksym_addr,
3553 						     &user_ksyms[i]))
3554 						return -EFAULT;
3555 				}
3556 			} else {
3557 				ksym_addr = (unsigned long) prog->bpf_func;
3558 				if (put_user((u64) ksym_addr, &user_ksyms[0]))
3559 					return -EFAULT;
3560 			}
3561 		} else {
3562 			info.jited_ksyms = 0;
3563 		}
3564 	}
3565 
3566 	ulen = info.nr_jited_func_lens;
3567 	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
3568 	if (ulen) {
3569 		if (bpf_dump_raw_ok(file->f_cred)) {
3570 			u32 __user *user_lens;
3571 			u32 func_len, i;
3572 
3573 			/* copy the JITed image lengths for each function */
3574 			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
3575 			user_lens = u64_to_user_ptr(info.jited_func_lens);
3576 			if (prog->aux->func_cnt) {
3577 				for (i = 0; i < ulen; i++) {
3578 					func_len =
3579 						prog->aux->func[i]->jited_len;
3580 					if (put_user(func_len, &user_lens[i]))
3581 						return -EFAULT;
3582 				}
3583 			} else {
3584 				func_len = prog->jited_len;
3585 				if (put_user(func_len, &user_lens[0]))
3586 					return -EFAULT;
3587 			}
3588 		} else {
3589 			info.jited_func_lens = 0;
3590 		}
3591 	}
3592 
3593 	if (prog->aux->btf)
3594 		info.btf_id = btf_obj_id(prog->aux->btf);
3595 
3596 	ulen = info.nr_func_info;
3597 	info.nr_func_info = prog->aux->func_info_cnt;
3598 	if (info.nr_func_info && ulen) {
3599 		char __user *user_finfo;
3600 
3601 		user_finfo = u64_to_user_ptr(info.func_info);
3602 		ulen = min_t(u32, info.nr_func_info, ulen);
3603 		if (copy_to_user(user_finfo, prog->aux->func_info,
3604 				 info.func_info_rec_size * ulen))
3605 			return -EFAULT;
3606 	}
3607 
3608 	ulen = info.nr_line_info;
3609 	info.nr_line_info = prog->aux->nr_linfo;
3610 	if (info.nr_line_info && ulen) {
3611 		__u8 __user *user_linfo;
3612 
3613 		user_linfo = u64_to_user_ptr(info.line_info);
3614 		ulen = min_t(u32, info.nr_line_info, ulen);
3615 		if (copy_to_user(user_linfo, prog->aux->linfo,
3616 				 info.line_info_rec_size * ulen))
3617 			return -EFAULT;
3618 	}
3619 
3620 	ulen = info.nr_jited_line_info;
3621 	if (prog->aux->jited_linfo)
3622 		info.nr_jited_line_info = prog->aux->nr_linfo;
3623 	else
3624 		info.nr_jited_line_info = 0;
3625 	if (info.nr_jited_line_info && ulen) {
3626 		if (bpf_dump_raw_ok(file->f_cred)) {
3627 			__u64 __user *user_linfo;
3628 			u32 i;
3629 
3630 			user_linfo = u64_to_user_ptr(info.jited_line_info);
3631 			ulen = min_t(u32, info.nr_jited_line_info, ulen);
3632 			for (i = 0; i < ulen; i++) {
3633 				if (put_user((__u64)(long)prog->aux->jited_linfo[i],
3634 					     &user_linfo[i]))
3635 					return -EFAULT;
3636 			}
3637 		} else {
3638 			info.jited_line_info = 0;
3639 		}
3640 	}
3641 
3642 	ulen = info.nr_prog_tags;
3643 	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
3644 	if (ulen) {
3645 		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
3646 		u32 i;
3647 
3648 		user_prog_tags = u64_to_user_ptr(info.prog_tags);
3649 		ulen = min_t(u32, info.nr_prog_tags, ulen);
3650 		if (prog->aux->func_cnt) {
3651 			for (i = 0; i < ulen; i++) {
3652 				if (copy_to_user(user_prog_tags[i],
3653 						 prog->aux->func[i]->tag,
3654 						 BPF_TAG_SIZE))
3655 					return -EFAULT;
3656 			}
3657 		} else {
3658 			if (copy_to_user(user_prog_tags[0],
3659 					 prog->tag, BPF_TAG_SIZE))
3660 				return -EFAULT;
3661 		}
3662 	}
3663 
3664 done:
3665 	if (copy_to_user(uinfo, &info, info_len) ||
3666 	    put_user(info_len, &uattr->info.info_len))
3667 		return -EFAULT;
3668 
3669 	return 0;
3670 }
3671 
3672 static int bpf_map_get_info_by_fd(struct file *file,
3673 				  struct bpf_map *map,
3674 				  const union bpf_attr *attr,
3675 				  union bpf_attr __user *uattr)
3676 {
3677 	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3678 	struct bpf_map_info info;
3679 	u32 info_len = attr->info.info_len;
3680 	int err;
3681 
3682 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3683 	if (err)
3684 		return err;
3685 	info_len = min_t(u32, sizeof(info), info_len);
3686 
3687 	memset(&info, 0, sizeof(info));
3688 	info.type = map->map_type;
3689 	info.id = map->id;
3690 	info.key_size = map->key_size;
3691 	info.value_size = map->value_size;
3692 	info.max_entries = map->max_entries;
3693 	info.map_flags = map->map_flags;
3694 	memcpy(info.name, map->name, sizeof(map->name));
3695 
3696 	if (map->btf) {
3697 		info.btf_id = btf_obj_id(map->btf);
3698 		info.btf_key_type_id = map->btf_key_type_id;
3699 		info.btf_value_type_id = map->btf_value_type_id;
3700 	}
3701 	info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
3702 
3703 	if (bpf_map_is_dev_bound(map)) {
3704 		err = bpf_map_offload_info_fill(&info, map);
3705 		if (err)
3706 			return err;
3707 	}
3708 
3709 	if (copy_to_user(uinfo, &info, info_len) ||
3710 	    put_user(info_len, &uattr->info.info_len))
3711 		return -EFAULT;
3712 
3713 	return 0;
3714 }
3715 
3716 static int bpf_btf_get_info_by_fd(struct file *file,
3717 				  struct btf *btf,
3718 				  const union bpf_attr *attr,
3719 				  union bpf_attr __user *uattr)
3720 {
3721 	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3722 	u32 info_len = attr->info.info_len;
3723 	int err;
3724 
3725 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
3726 	if (err)
3727 		return err;
3728 
3729 	return btf_get_info_by_fd(btf, attr, uattr);
3730 }
3731 
3732 static int bpf_link_get_info_by_fd(struct file *file,
3733 				  struct bpf_link *link,
3734 				  const union bpf_attr *attr,
3735 				  union bpf_attr __user *uattr)
3736 {
3737 	struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3738 	struct bpf_link_info info;
3739 	u32 info_len = attr->info.info_len;
3740 	int err;
3741 
3742 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3743 	if (err)
3744 		return err;
3745 	info_len = min_t(u32, sizeof(info), info_len);
3746 
3747 	memset(&info, 0, sizeof(info));
3748 	if (copy_from_user(&info, uinfo, info_len))
3749 		return -EFAULT;
3750 
3751 	info.type = link->type;
3752 	info.id = link->id;
3753 	info.prog_id = link->prog->aux->id;
3754 
3755 	if (link->ops->fill_link_info) {
3756 		err = link->ops->fill_link_info(link, &info);
3757 		if (err)
3758 			return err;
3759 	}
3760 
3761 	if (copy_to_user(uinfo, &info, info_len) ||
3762 	    put_user(info_len, &uattr->info.info_len))
3763 		return -EFAULT;
3764 
3765 	return 0;
3766 }
3767 
3768 
3769 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
3770 
3771 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
3772 				  union bpf_attr __user *uattr)
3773 {
3774 	int ufd = attr->info.bpf_fd;
3775 	struct fd f;
3776 	int err;
3777 
3778 	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
3779 		return -EINVAL;
3780 
3781 	f = fdget(ufd);
3782 	if (!f.file)
3783 		return -EBADFD;
3784 
3785 	if (f.file->f_op == &bpf_prog_fops)
3786 		err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
3787 					      uattr);
3788 	else if (f.file->f_op == &bpf_map_fops)
3789 		err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
3790 					     uattr);
3791 	else if (f.file->f_op == &btf_fops)
3792 		err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
3793 	else if (f.file->f_op == &bpf_link_fops)
3794 		err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
3795 					      attr, uattr);
3796 	else
3797 		err = -EINVAL;
3798 
3799 	fdput(f);
3800 	return err;
3801 }
3802 
3803 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
3804 
3805 static int bpf_btf_load(const union bpf_attr *attr)
3806 {
3807 	if (CHECK_ATTR(BPF_BTF_LOAD))
3808 		return -EINVAL;
3809 
3810 	if (!bpf_capable())
3811 		return -EPERM;
3812 
3813 	return btf_new_fd(attr);
3814 }
3815 
3816 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
3817 
3818 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
3819 {
3820 	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
3821 		return -EINVAL;
3822 
3823 	if (!capable(CAP_SYS_ADMIN))
3824 		return -EPERM;
3825 
3826 	return btf_get_fd_by_id(attr->btf_id);
3827 }
3828 
3829 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
3830 				    union bpf_attr __user *uattr,
3831 				    u32 prog_id, u32 fd_type,
3832 				    const char *buf, u64 probe_offset,
3833 				    u64 probe_addr)
3834 {
3835 	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
3836 	u32 len = buf ? strlen(buf) : 0, input_len;
3837 	int err = 0;
3838 
3839 	if (put_user(len, &uattr->task_fd_query.buf_len))
3840 		return -EFAULT;
3841 	input_len = attr->task_fd_query.buf_len;
3842 	if (input_len && ubuf) {
3843 		if (!len) {
3844 			/* nothing to copy, just make ubuf NULL terminated */
3845 			char zero = '\0';
3846 
3847 			if (put_user(zero, ubuf))
3848 				return -EFAULT;
3849 		} else if (input_len >= len + 1) {
3850 			/* ubuf can hold the string with NULL terminator */
3851 			if (copy_to_user(ubuf, buf, len + 1))
3852 				return -EFAULT;
3853 		} else {
3854 			/* ubuf cannot hold the string with NULL terminator,
3855 			 * do a partial copy with NULL terminator.
3856 			 */
3857 			char zero = '\0';
3858 
3859 			err = -ENOSPC;
3860 			if (copy_to_user(ubuf, buf, input_len - 1))
3861 				return -EFAULT;
3862 			if (put_user(zero, ubuf + input_len - 1))
3863 				return -EFAULT;
3864 		}
3865 	}
3866 
3867 	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
3868 	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
3869 	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
3870 	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
3871 		return -EFAULT;
3872 
3873 	return err;
3874 }
3875 
3876 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
3877 
3878 static int bpf_task_fd_query(const union bpf_attr *attr,
3879 			     union bpf_attr __user *uattr)
3880 {
3881 	pid_t pid = attr->task_fd_query.pid;
3882 	u32 fd = attr->task_fd_query.fd;
3883 	const struct perf_event *event;
3884 	struct task_struct *task;
3885 	struct file *file;
3886 	int err;
3887 
3888 	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
3889 		return -EINVAL;
3890 
3891 	if (!capable(CAP_SYS_ADMIN))
3892 		return -EPERM;
3893 
3894 	if (attr->task_fd_query.flags != 0)
3895 		return -EINVAL;
3896 
3897 	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3898 	if (!task)
3899 		return -ENOENT;
3900 
3901 	err = 0;
3902 	file = fget_task(task, fd);
3903 	put_task_struct(task);
3904 	if (!file)
3905 		return -EBADF;
3906 
3907 	if (file->f_op == &bpf_link_fops) {
3908 		struct bpf_link *link = file->private_data;
3909 
3910 		if (link->ops == &bpf_raw_tp_link_lops) {
3911 			struct bpf_raw_tp_link *raw_tp =
3912 				container_of(link, struct bpf_raw_tp_link, link);
3913 			struct bpf_raw_event_map *btp = raw_tp->btp;
3914 
3915 			err = bpf_task_fd_query_copy(attr, uattr,
3916 						     raw_tp->link.prog->aux->id,
3917 						     BPF_FD_TYPE_RAW_TRACEPOINT,
3918 						     btp->tp->name, 0, 0);
3919 			goto put_file;
3920 		}
3921 		goto out_not_supp;
3922 	}
3923 
3924 	event = perf_get_event(file);
3925 	if (!IS_ERR(event)) {
3926 		u64 probe_offset, probe_addr;
3927 		u32 prog_id, fd_type;
3928 		const char *buf;
3929 
3930 		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
3931 					      &buf, &probe_offset,
3932 					      &probe_addr);
3933 		if (!err)
3934 			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
3935 						     fd_type, buf,
3936 						     probe_offset,
3937 						     probe_addr);
3938 		goto put_file;
3939 	}
3940 
3941 out_not_supp:
3942 	err = -ENOTSUPP;
3943 put_file:
3944 	fput(file);
3945 	return err;
3946 }
3947 
3948 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
3949 
3950 #define BPF_DO_BATCH(fn)			\
3951 	do {					\
3952 		if (!fn) {			\
3953 			err = -ENOTSUPP;	\
3954 			goto err_put;		\
3955 		}				\
3956 		err = fn(map, attr, uattr);	\
3957 	} while (0)
3958 
3959 static int bpf_map_do_batch(const union bpf_attr *attr,
3960 			    union bpf_attr __user *uattr,
3961 			    int cmd)
3962 {
3963 	struct bpf_map *map;
3964 	int err, ufd;
3965 	struct fd f;
3966 
3967 	if (CHECK_ATTR(BPF_MAP_BATCH))
3968 		return -EINVAL;
3969 
3970 	ufd = attr->batch.map_fd;
3971 	f = fdget(ufd);
3972 	map = __bpf_map_get(f);
3973 	if (IS_ERR(map))
3974 		return PTR_ERR(map);
3975 
3976 	if ((cmd == BPF_MAP_LOOKUP_BATCH ||
3977 	     cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
3978 	    !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
3979 		err = -EPERM;
3980 		goto err_put;
3981 	}
3982 
3983 	if (cmd != BPF_MAP_LOOKUP_BATCH &&
3984 	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
3985 		err = -EPERM;
3986 		goto err_put;
3987 	}
3988 
3989 	if (cmd == BPF_MAP_LOOKUP_BATCH)
3990 		BPF_DO_BATCH(map->ops->map_lookup_batch);
3991 	else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
3992 		BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
3993 	else if (cmd == BPF_MAP_UPDATE_BATCH)
3994 		BPF_DO_BATCH(map->ops->map_update_batch);
3995 	else
3996 		BPF_DO_BATCH(map->ops->map_delete_batch);
3997 
3998 err_put:
3999 	fdput(f);
4000 	return err;
4001 }
4002 
4003 static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
4004 {
4005 	if (attr->link_create.attach_type != prog->expected_attach_type)
4006 		return -EINVAL;
4007 
4008 	if (prog->expected_attach_type == BPF_TRACE_ITER)
4009 		return bpf_iter_link_attach(attr, prog);
4010 	else if (prog->type == BPF_PROG_TYPE_EXT)
4011 		return bpf_tracing_prog_attach(prog,
4012 					       attr->link_create.target_fd,
4013 					       attr->link_create.target_btf_id);
4014 	return -EINVAL;
4015 }
4016 
4017 #define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
4018 static int link_create(union bpf_attr *attr)
4019 {
4020 	enum bpf_prog_type ptype;
4021 	struct bpf_prog *prog;
4022 	int ret;
4023 
4024 	if (CHECK_ATTR(BPF_LINK_CREATE))
4025 		return -EINVAL;
4026 
4027 	prog = bpf_prog_get(attr->link_create.prog_fd);
4028 	if (IS_ERR(prog))
4029 		return PTR_ERR(prog);
4030 
4031 	ret = bpf_prog_attach_check_attach_type(prog,
4032 						attr->link_create.attach_type);
4033 	if (ret)
4034 		goto out;
4035 
4036 	if (prog->type == BPF_PROG_TYPE_EXT) {
4037 		ret = tracing_bpf_link_attach(attr, prog);
4038 		goto out;
4039 	}
4040 
4041 	ptype = attach_type_to_prog_type(attr->link_create.attach_type);
4042 	if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
4043 		ret = -EINVAL;
4044 		goto out;
4045 	}
4046 
4047 	switch (ptype) {
4048 	case BPF_PROG_TYPE_CGROUP_SKB:
4049 	case BPF_PROG_TYPE_CGROUP_SOCK:
4050 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4051 	case BPF_PROG_TYPE_SOCK_OPS:
4052 	case BPF_PROG_TYPE_CGROUP_DEVICE:
4053 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
4054 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4055 		ret = cgroup_bpf_link_attach(attr, prog);
4056 		break;
4057 	case BPF_PROG_TYPE_TRACING:
4058 		ret = tracing_bpf_link_attach(attr, prog);
4059 		break;
4060 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4061 	case BPF_PROG_TYPE_SK_LOOKUP:
4062 		ret = netns_bpf_link_create(attr, prog);
4063 		break;
4064 #ifdef CONFIG_NET
4065 	case BPF_PROG_TYPE_XDP:
4066 		ret = bpf_xdp_link_attach(attr, prog);
4067 		break;
4068 #endif
4069 	default:
4070 		ret = -EINVAL;
4071 	}
4072 
4073 out:
4074 	if (ret < 0)
4075 		bpf_prog_put(prog);
4076 	return ret;
4077 }
4078 
4079 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
4080 
4081 static int link_update(union bpf_attr *attr)
4082 {
4083 	struct bpf_prog *old_prog = NULL, *new_prog;
4084 	struct bpf_link *link;
4085 	u32 flags;
4086 	int ret;
4087 
4088 	if (CHECK_ATTR(BPF_LINK_UPDATE))
4089 		return -EINVAL;
4090 
4091 	flags = attr->link_update.flags;
4092 	if (flags & ~BPF_F_REPLACE)
4093 		return -EINVAL;
4094 
4095 	link = bpf_link_get_from_fd(attr->link_update.link_fd);
4096 	if (IS_ERR(link))
4097 		return PTR_ERR(link);
4098 
4099 	new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
4100 	if (IS_ERR(new_prog)) {
4101 		ret = PTR_ERR(new_prog);
4102 		goto out_put_link;
4103 	}
4104 
4105 	if (flags & BPF_F_REPLACE) {
4106 		old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
4107 		if (IS_ERR(old_prog)) {
4108 			ret = PTR_ERR(old_prog);
4109 			old_prog = NULL;
4110 			goto out_put_progs;
4111 		}
4112 	} else if (attr->link_update.old_prog_fd) {
4113 		ret = -EINVAL;
4114 		goto out_put_progs;
4115 	}
4116 
4117 	if (link->ops->update_prog)
4118 		ret = link->ops->update_prog(link, new_prog, old_prog);
4119 	else
4120 		ret = -EINVAL;
4121 
4122 out_put_progs:
4123 	if (old_prog)
4124 		bpf_prog_put(old_prog);
4125 	if (ret)
4126 		bpf_prog_put(new_prog);
4127 out_put_link:
4128 	bpf_link_put(link);
4129 	return ret;
4130 }
4131 
4132 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
4133 
4134 static int link_detach(union bpf_attr *attr)
4135 {
4136 	struct bpf_link *link;
4137 	int ret;
4138 
4139 	if (CHECK_ATTR(BPF_LINK_DETACH))
4140 		return -EINVAL;
4141 
4142 	link = bpf_link_get_from_fd(attr->link_detach.link_fd);
4143 	if (IS_ERR(link))
4144 		return PTR_ERR(link);
4145 
4146 	if (link->ops->detach)
4147 		ret = link->ops->detach(link);
4148 	else
4149 		ret = -EOPNOTSUPP;
4150 
4151 	bpf_link_put(link);
4152 	return ret;
4153 }
4154 
4155 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
4156 {
4157 	return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
4158 }
4159 
4160 struct bpf_link *bpf_link_by_id(u32 id)
4161 {
4162 	struct bpf_link *link;
4163 
4164 	if (!id)
4165 		return ERR_PTR(-ENOENT);
4166 
4167 	spin_lock_bh(&link_idr_lock);
4168 	/* before link is "settled", ID is 0, pretend it doesn't exist yet */
4169 	link = idr_find(&link_idr, id);
4170 	if (link) {
4171 		if (link->id)
4172 			link = bpf_link_inc_not_zero(link);
4173 		else
4174 			link = ERR_PTR(-EAGAIN);
4175 	} else {
4176 		link = ERR_PTR(-ENOENT);
4177 	}
4178 	spin_unlock_bh(&link_idr_lock);
4179 	return link;
4180 }
4181 
4182 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
4183 
4184 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
4185 {
4186 	struct bpf_link *link;
4187 	u32 id = attr->link_id;
4188 	int fd;
4189 
4190 	if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
4191 		return -EINVAL;
4192 
4193 	if (!capable(CAP_SYS_ADMIN))
4194 		return -EPERM;
4195 
4196 	link = bpf_link_by_id(id);
4197 	if (IS_ERR(link))
4198 		return PTR_ERR(link);
4199 
4200 	fd = bpf_link_new_fd(link);
4201 	if (fd < 0)
4202 		bpf_link_put(link);
4203 
4204 	return fd;
4205 }
4206 
4207 DEFINE_MUTEX(bpf_stats_enabled_mutex);
4208 
4209 static int bpf_stats_release(struct inode *inode, struct file *file)
4210 {
4211 	mutex_lock(&bpf_stats_enabled_mutex);
4212 	static_key_slow_dec(&bpf_stats_enabled_key.key);
4213 	mutex_unlock(&bpf_stats_enabled_mutex);
4214 	return 0;
4215 }
4216 
4217 static const struct file_operations bpf_stats_fops = {
4218 	.release = bpf_stats_release,
4219 };
4220 
4221 static int bpf_enable_runtime_stats(void)
4222 {
4223 	int fd;
4224 
4225 	mutex_lock(&bpf_stats_enabled_mutex);
4226 
4227 	/* Set a very high limit to avoid overflow */
4228 	if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
4229 		mutex_unlock(&bpf_stats_enabled_mutex);
4230 		return -EBUSY;
4231 	}
4232 
4233 	fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
4234 	if (fd >= 0)
4235 		static_key_slow_inc(&bpf_stats_enabled_key.key);
4236 
4237 	mutex_unlock(&bpf_stats_enabled_mutex);
4238 	return fd;
4239 }
4240 
4241 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
4242 
4243 static int bpf_enable_stats(union bpf_attr *attr)
4244 {
4245 
4246 	if (CHECK_ATTR(BPF_ENABLE_STATS))
4247 		return -EINVAL;
4248 
4249 	if (!capable(CAP_SYS_ADMIN))
4250 		return -EPERM;
4251 
4252 	switch (attr->enable_stats.type) {
4253 	case BPF_STATS_RUN_TIME:
4254 		return bpf_enable_runtime_stats();
4255 	default:
4256 		break;
4257 	}
4258 	return -EINVAL;
4259 }
4260 
4261 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
4262 
4263 static int bpf_iter_create(union bpf_attr *attr)
4264 {
4265 	struct bpf_link *link;
4266 	int err;
4267 
4268 	if (CHECK_ATTR(BPF_ITER_CREATE))
4269 		return -EINVAL;
4270 
4271 	if (attr->iter_create.flags)
4272 		return -EINVAL;
4273 
4274 	link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4275 	if (IS_ERR(link))
4276 		return PTR_ERR(link);
4277 
4278 	err = bpf_iter_new_fd(link);
4279 	bpf_link_put(link);
4280 
4281 	return err;
4282 }
4283 
4284 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
4285 
4286 static int bpf_prog_bind_map(union bpf_attr *attr)
4287 {
4288 	struct bpf_prog *prog;
4289 	struct bpf_map *map;
4290 	struct bpf_map **used_maps_old, **used_maps_new;
4291 	int i, ret = 0;
4292 
4293 	if (CHECK_ATTR(BPF_PROG_BIND_MAP))
4294 		return -EINVAL;
4295 
4296 	if (attr->prog_bind_map.flags)
4297 		return -EINVAL;
4298 
4299 	prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
4300 	if (IS_ERR(prog))
4301 		return PTR_ERR(prog);
4302 
4303 	map = bpf_map_get(attr->prog_bind_map.map_fd);
4304 	if (IS_ERR(map)) {
4305 		ret = PTR_ERR(map);
4306 		goto out_prog_put;
4307 	}
4308 
4309 	mutex_lock(&prog->aux->used_maps_mutex);
4310 
4311 	used_maps_old = prog->aux->used_maps;
4312 
4313 	for (i = 0; i < prog->aux->used_map_cnt; i++)
4314 		if (used_maps_old[i] == map) {
4315 			bpf_map_put(map);
4316 			goto out_unlock;
4317 		}
4318 
4319 	used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
4320 				      sizeof(used_maps_new[0]),
4321 				      GFP_KERNEL);
4322 	if (!used_maps_new) {
4323 		ret = -ENOMEM;
4324 		goto out_unlock;
4325 	}
4326 
4327 	memcpy(used_maps_new, used_maps_old,
4328 	       sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
4329 	used_maps_new[prog->aux->used_map_cnt] = map;
4330 
4331 	prog->aux->used_map_cnt++;
4332 	prog->aux->used_maps = used_maps_new;
4333 
4334 	kfree(used_maps_old);
4335 
4336 out_unlock:
4337 	mutex_unlock(&prog->aux->used_maps_mutex);
4338 
4339 	if (ret)
4340 		bpf_map_put(map);
4341 out_prog_put:
4342 	bpf_prog_put(prog);
4343 	return ret;
4344 }
4345 
4346 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
4347 {
4348 	union bpf_attr attr;
4349 	int err;
4350 
4351 	if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
4352 		return -EPERM;
4353 
4354 	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
4355 	if (err)
4356 		return err;
4357 	size = min_t(u32, size, sizeof(attr));
4358 
4359 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
4360 	memset(&attr, 0, sizeof(attr));
4361 	if (copy_from_user(&attr, uattr, size) != 0)
4362 		return -EFAULT;
4363 
4364 	err = security_bpf(cmd, &attr, size);
4365 	if (err < 0)
4366 		return err;
4367 
4368 	switch (cmd) {
4369 	case BPF_MAP_CREATE:
4370 		err = map_create(&attr);
4371 		break;
4372 	case BPF_MAP_LOOKUP_ELEM:
4373 		err = map_lookup_elem(&attr);
4374 		break;
4375 	case BPF_MAP_UPDATE_ELEM:
4376 		err = map_update_elem(&attr);
4377 		break;
4378 	case BPF_MAP_DELETE_ELEM:
4379 		err = map_delete_elem(&attr);
4380 		break;
4381 	case BPF_MAP_GET_NEXT_KEY:
4382 		err = map_get_next_key(&attr);
4383 		break;
4384 	case BPF_MAP_FREEZE:
4385 		err = map_freeze(&attr);
4386 		break;
4387 	case BPF_PROG_LOAD:
4388 		err = bpf_prog_load(&attr, uattr);
4389 		break;
4390 	case BPF_OBJ_PIN:
4391 		err = bpf_obj_pin(&attr);
4392 		break;
4393 	case BPF_OBJ_GET:
4394 		err = bpf_obj_get(&attr);
4395 		break;
4396 	case BPF_PROG_ATTACH:
4397 		err = bpf_prog_attach(&attr);
4398 		break;
4399 	case BPF_PROG_DETACH:
4400 		err = bpf_prog_detach(&attr);
4401 		break;
4402 	case BPF_PROG_QUERY:
4403 		err = bpf_prog_query(&attr, uattr);
4404 		break;
4405 	case BPF_PROG_TEST_RUN:
4406 		err = bpf_prog_test_run(&attr, uattr);
4407 		break;
4408 	case BPF_PROG_GET_NEXT_ID:
4409 		err = bpf_obj_get_next_id(&attr, uattr,
4410 					  &prog_idr, &prog_idr_lock);
4411 		break;
4412 	case BPF_MAP_GET_NEXT_ID:
4413 		err = bpf_obj_get_next_id(&attr, uattr,
4414 					  &map_idr, &map_idr_lock);
4415 		break;
4416 	case BPF_BTF_GET_NEXT_ID:
4417 		err = bpf_obj_get_next_id(&attr, uattr,
4418 					  &btf_idr, &btf_idr_lock);
4419 		break;
4420 	case BPF_PROG_GET_FD_BY_ID:
4421 		err = bpf_prog_get_fd_by_id(&attr);
4422 		break;
4423 	case BPF_MAP_GET_FD_BY_ID:
4424 		err = bpf_map_get_fd_by_id(&attr);
4425 		break;
4426 	case BPF_OBJ_GET_INFO_BY_FD:
4427 		err = bpf_obj_get_info_by_fd(&attr, uattr);
4428 		break;
4429 	case BPF_RAW_TRACEPOINT_OPEN:
4430 		err = bpf_raw_tracepoint_open(&attr);
4431 		break;
4432 	case BPF_BTF_LOAD:
4433 		err = bpf_btf_load(&attr);
4434 		break;
4435 	case BPF_BTF_GET_FD_BY_ID:
4436 		err = bpf_btf_get_fd_by_id(&attr);
4437 		break;
4438 	case BPF_TASK_FD_QUERY:
4439 		err = bpf_task_fd_query(&attr, uattr);
4440 		break;
4441 	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
4442 		err = map_lookup_and_delete_elem(&attr);
4443 		break;
4444 	case BPF_MAP_LOOKUP_BATCH:
4445 		err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
4446 		break;
4447 	case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
4448 		err = bpf_map_do_batch(&attr, uattr,
4449 				       BPF_MAP_LOOKUP_AND_DELETE_BATCH);
4450 		break;
4451 	case BPF_MAP_UPDATE_BATCH:
4452 		err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
4453 		break;
4454 	case BPF_MAP_DELETE_BATCH:
4455 		err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
4456 		break;
4457 	case BPF_LINK_CREATE:
4458 		err = link_create(&attr);
4459 		break;
4460 	case BPF_LINK_UPDATE:
4461 		err = link_update(&attr);
4462 		break;
4463 	case BPF_LINK_GET_FD_BY_ID:
4464 		err = bpf_link_get_fd_by_id(&attr);
4465 		break;
4466 	case BPF_LINK_GET_NEXT_ID:
4467 		err = bpf_obj_get_next_id(&attr, uattr,
4468 					  &link_idr, &link_idr_lock);
4469 		break;
4470 	case BPF_ENABLE_STATS:
4471 		err = bpf_enable_stats(&attr);
4472 		break;
4473 	case BPF_ITER_CREATE:
4474 		err = bpf_iter_create(&attr);
4475 		break;
4476 	case BPF_LINK_DETACH:
4477 		err = link_detach(&attr);
4478 		break;
4479 	case BPF_PROG_BIND_MAP:
4480 		err = bpf_prog_bind_map(&attr);
4481 		break;
4482 	default:
4483 		err = -EINVAL;
4484 		break;
4485 	}
4486 
4487 	return err;
4488 }
4489