xref: /linux/kernel/bpf/syscall.c (revision 4fc012daf9c074772421c904357abf586336b1ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf-cgroup.h>
6 #include <linux/bpf_trace.h>
7 #include <linux/bpf_lirc.h>
8 #include <linux/bpf_verifier.h>
9 #include <linux/bsearch.h>
10 #include <linux/btf.h>
11 #include <linux/syscalls.h>
12 #include <linux/slab.h>
13 #include <linux/sched/signal.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmzone.h>
16 #include <linux/anon_inodes.h>
17 #include <linux/fdtable.h>
18 #include <linux/file.h>
19 #include <linux/fs.h>
20 #include <linux/license.h>
21 #include <linux/filter.h>
22 #include <linux/kernel.h>
23 #include <linux/idr.h>
24 #include <linux/cred.h>
25 #include <linux/timekeeping.h>
26 #include <linux/ctype.h>
27 #include <linux/nospec.h>
28 #include <linux/audit.h>
29 #include <uapi/linux/btf.h>
30 #include <linux/pgtable.h>
31 #include <linux/bpf_lsm.h>
32 #include <linux/poll.h>
33 #include <linux/sort.h>
34 #include <linux/bpf-netns.h>
35 #include <linux/rcupdate_trace.h>
36 #include <linux/memcontrol.h>
37 #include <linux/trace_events.h>
38 #include <linux/tracepoint.h>
39 #include <linux/overflow.h>
40 
41 #include <net/netfilter/nf_bpf_link.h>
42 #include <net/netkit.h>
43 #include <net/tcx.h>
44 
45 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
46 			  (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
47 			  (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
48 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
49 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
50 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
51 			IS_FD_HASH(map))
52 
53 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
54 
55 DEFINE_PER_CPU(int, bpf_prog_active);
56 static DEFINE_IDR(prog_idr);
57 static DEFINE_SPINLOCK(prog_idr_lock);
58 static DEFINE_IDR(map_idr);
59 static DEFINE_SPINLOCK(map_idr_lock);
60 static DEFINE_IDR(link_idr);
61 static DEFINE_SPINLOCK(link_idr_lock);
62 
63 int sysctl_unprivileged_bpf_disabled __read_mostly =
64 	IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
65 
66 static const struct bpf_map_ops * const bpf_map_types[] = {
67 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
68 #define BPF_MAP_TYPE(_id, _ops) \
69 	[_id] = &_ops,
70 #define BPF_LINK_TYPE(_id, _name)
71 #include <linux/bpf_types.h>
72 #undef BPF_PROG_TYPE
73 #undef BPF_MAP_TYPE
74 #undef BPF_LINK_TYPE
75 };
76 
77 /*
78  * If we're handed a bigger struct than we know of, ensure all the unknown bits
79  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
80  * we don't know about yet.
81  *
82  * There is a ToCToU between this function call and the following
83  * copy_from_user() call. However, this is not a concern since this function is
84  * meant to be a future-proofing of bits.
85  */
86 int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
87 			     size_t expected_size,
88 			     size_t actual_size)
89 {
90 	int res;
91 
92 	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
93 		return -E2BIG;
94 
95 	if (actual_size <= expected_size)
96 		return 0;
97 
98 	if (uaddr.is_kernel)
99 		res = memchr_inv(uaddr.kernel + expected_size, 0,
100 				 actual_size - expected_size) == NULL;
101 	else
102 		res = check_zeroed_user(uaddr.user + expected_size,
103 					actual_size - expected_size);
104 	if (res < 0)
105 		return res;
106 	return res ? 0 : -E2BIG;
107 }
108 
109 const struct bpf_map_ops bpf_map_offload_ops = {
110 	.map_meta_equal = bpf_map_meta_equal,
111 	.map_alloc = bpf_map_offload_map_alloc,
112 	.map_free = bpf_map_offload_map_free,
113 	.map_check_btf = map_check_no_btf,
114 	.map_mem_usage = bpf_map_offload_map_mem_usage,
115 };
116 
117 static void bpf_map_write_active_inc(struct bpf_map *map)
118 {
119 	atomic64_inc(&map->writecnt);
120 }
121 
122 static void bpf_map_write_active_dec(struct bpf_map *map)
123 {
124 	atomic64_dec(&map->writecnt);
125 }
126 
127 bool bpf_map_write_active(const struct bpf_map *map)
128 {
129 	return atomic64_read(&map->writecnt) != 0;
130 }
131 
132 static u32 bpf_map_value_size(const struct bpf_map *map)
133 {
134 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
135 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
136 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
137 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
138 		return round_up(map->value_size, 8) * num_possible_cpus();
139 	else if (IS_FD_MAP(map))
140 		return sizeof(u32);
141 	else
142 		return  map->value_size;
143 }
144 
145 static void maybe_wait_bpf_programs(struct bpf_map *map)
146 {
147 	/* Wait for any running non-sleepable BPF programs to complete so that
148 	 * userspace, when we return to it, knows that all non-sleepable
149 	 * programs that could be running use the new map value. For sleepable
150 	 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait
151 	 * for the completions of these programs, but considering the waiting
152 	 * time can be very long and userspace may think it will hang forever,
153 	 * so don't handle sleepable BPF programs now.
154 	 */
155 	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
156 	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
157 		synchronize_rcu();
158 }
159 
160 static void unpin_uptr_kaddr(void *kaddr)
161 {
162 	if (kaddr)
163 		unpin_user_page(virt_to_page(kaddr));
164 }
165 
166 static void __bpf_obj_unpin_uptrs(struct btf_record *rec, u32 cnt, void *obj)
167 {
168 	const struct btf_field *field;
169 	void **uptr_addr;
170 	int i;
171 
172 	for (i = 0, field = rec->fields; i < cnt; i++, field++) {
173 		if (field->type != BPF_UPTR)
174 			continue;
175 
176 		uptr_addr = obj + field->offset;
177 		unpin_uptr_kaddr(*uptr_addr);
178 	}
179 }
180 
181 static void bpf_obj_unpin_uptrs(struct btf_record *rec, void *obj)
182 {
183 	if (!btf_record_has_field(rec, BPF_UPTR))
184 		return;
185 
186 	__bpf_obj_unpin_uptrs(rec, rec->cnt, obj);
187 }
188 
189 static int bpf_obj_pin_uptrs(struct btf_record *rec, void *obj)
190 {
191 	const struct btf_field *field;
192 	const struct btf_type *t;
193 	unsigned long start, end;
194 	struct page *page;
195 	void **uptr_addr;
196 	int i, err;
197 
198 	if (!btf_record_has_field(rec, BPF_UPTR))
199 		return 0;
200 
201 	for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) {
202 		if (field->type != BPF_UPTR)
203 			continue;
204 
205 		uptr_addr = obj + field->offset;
206 		start = *(unsigned long *)uptr_addr;
207 		if (!start)
208 			continue;
209 
210 		t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id);
211 		/* t->size was checked for zero before */
212 		if (check_add_overflow(start, t->size - 1, &end)) {
213 			err = -EFAULT;
214 			goto unpin_all;
215 		}
216 
217 		/* The uptr's struct cannot span across two pages */
218 		if ((start & PAGE_MASK) != (end & PAGE_MASK)) {
219 			err = -EOPNOTSUPP;
220 			goto unpin_all;
221 		}
222 
223 		err = pin_user_pages_fast(start, 1, FOLL_LONGTERM | FOLL_WRITE, &page);
224 		if (err != 1)
225 			goto unpin_all;
226 
227 		if (PageHighMem(page)) {
228 			err = -EOPNOTSUPP;
229 			unpin_user_page(page);
230 			goto unpin_all;
231 		}
232 
233 		*uptr_addr = page_address(page) + offset_in_page(start);
234 	}
235 
236 	return 0;
237 
238 unpin_all:
239 	__bpf_obj_unpin_uptrs(rec, i, obj);
240 	return err;
241 }
242 
243 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
244 				void *key, void *value, __u64 flags)
245 {
246 	int err;
247 
248 	/* Need to create a kthread, thus must support schedule */
249 	if (bpf_map_is_offloaded(map)) {
250 		return bpf_map_offload_update_elem(map, key, value, flags);
251 	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
252 		   map->map_type == BPF_MAP_TYPE_ARENA ||
253 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
254 		return map->ops->map_update_elem(map, key, value, flags);
255 	} else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
256 		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
257 		return sock_map_update_elem_sys(map, key, value, flags);
258 	} else if (IS_FD_PROG_ARRAY(map)) {
259 		return bpf_fd_array_map_update_elem(map, map_file, key, value,
260 						    flags);
261 	}
262 
263 	bpf_disable_instrumentation();
264 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
265 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
266 		err = bpf_percpu_hash_update(map, key, value, flags);
267 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
268 		err = bpf_percpu_array_update(map, key, value, flags);
269 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
270 		err = bpf_percpu_cgroup_storage_update(map, key, value,
271 						       flags);
272 	} else if (IS_FD_ARRAY(map)) {
273 		err = bpf_fd_array_map_update_elem(map, map_file, key, value,
274 						   flags);
275 	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
276 		err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
277 						  flags);
278 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
279 		/* rcu_read_lock() is not needed */
280 		err = bpf_fd_reuseport_array_update_elem(map, key, value,
281 							 flags);
282 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
283 		   map->map_type == BPF_MAP_TYPE_STACK ||
284 		   map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
285 		err = map->ops->map_push_elem(map, value, flags);
286 	} else {
287 		err = bpf_obj_pin_uptrs(map->record, value);
288 		if (!err) {
289 			rcu_read_lock();
290 			err = map->ops->map_update_elem(map, key, value, flags);
291 			rcu_read_unlock();
292 			if (err)
293 				bpf_obj_unpin_uptrs(map->record, value);
294 		}
295 	}
296 	bpf_enable_instrumentation();
297 
298 	return err;
299 }
300 
301 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
302 			      __u64 flags)
303 {
304 	void *ptr;
305 	int err;
306 
307 	if (bpf_map_is_offloaded(map))
308 		return bpf_map_offload_lookup_elem(map, key, value);
309 
310 	bpf_disable_instrumentation();
311 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
312 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
313 		err = bpf_percpu_hash_copy(map, key, value);
314 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
315 		err = bpf_percpu_array_copy(map, key, value);
316 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
317 		err = bpf_percpu_cgroup_storage_copy(map, key, value);
318 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
319 		err = bpf_stackmap_copy(map, key, value);
320 	} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
321 		err = bpf_fd_array_map_lookup_elem(map, key, value);
322 	} else if (IS_FD_HASH(map)) {
323 		err = bpf_fd_htab_map_lookup_elem(map, key, value);
324 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
325 		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
326 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
327 		   map->map_type == BPF_MAP_TYPE_STACK ||
328 		   map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
329 		err = map->ops->map_peek_elem(map, value);
330 	} else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
331 		/* struct_ops map requires directly updating "value" */
332 		err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
333 	} else {
334 		rcu_read_lock();
335 		if (map->ops->map_lookup_elem_sys_only)
336 			ptr = map->ops->map_lookup_elem_sys_only(map, key);
337 		else
338 			ptr = map->ops->map_lookup_elem(map, key);
339 		if (IS_ERR(ptr)) {
340 			err = PTR_ERR(ptr);
341 		} else if (!ptr) {
342 			err = -ENOENT;
343 		} else {
344 			err = 0;
345 			if (flags & BPF_F_LOCK)
346 				/* lock 'ptr' and copy everything but lock */
347 				copy_map_value_locked(map, value, ptr, true);
348 			else
349 				copy_map_value(map, value, ptr);
350 			/* mask lock and timer, since value wasn't zero inited */
351 			check_and_init_map_value(map, value);
352 		}
353 		rcu_read_unlock();
354 	}
355 
356 	bpf_enable_instrumentation();
357 
358 	return err;
359 }
360 
361 /* Please, do not use this function outside from the map creation path
362  * (e.g. in map update path) without taking care of setting the active
363  * memory cgroup (see at bpf_map_kmalloc_node() for example).
364  */
365 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
366 {
367 	/* We really just want to fail instead of triggering OOM killer
368 	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
369 	 * which is used for lower order allocation requests.
370 	 *
371 	 * It has been observed that higher order allocation requests done by
372 	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
373 	 * to reclaim memory from the page cache, thus we set
374 	 * __GFP_RETRY_MAYFAIL to avoid such situations.
375 	 */
376 
377 	gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO);
378 	unsigned int flags = 0;
379 	unsigned long align = 1;
380 	void *area;
381 
382 	if (size >= SIZE_MAX)
383 		return NULL;
384 
385 	/* kmalloc()'ed memory can't be mmap()'ed */
386 	if (mmapable) {
387 		BUG_ON(!PAGE_ALIGNED(size));
388 		align = SHMLBA;
389 		flags = VM_USERMAP;
390 	} else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
391 		area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
392 				    numa_node);
393 		if (area != NULL)
394 			return area;
395 	}
396 
397 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
398 			gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
399 			flags, numa_node, __builtin_return_address(0));
400 }
401 
402 void *bpf_map_area_alloc(u64 size, int numa_node)
403 {
404 	return __bpf_map_area_alloc(size, numa_node, false);
405 }
406 
407 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
408 {
409 	return __bpf_map_area_alloc(size, numa_node, true);
410 }
411 
412 void bpf_map_area_free(void *area)
413 {
414 	kvfree(area);
415 }
416 
417 static u32 bpf_map_flags_retain_permanent(u32 flags)
418 {
419 	/* Some map creation flags are not tied to the map object but
420 	 * rather to the map fd instead, so they have no meaning upon
421 	 * map object inspection since multiple file descriptors with
422 	 * different (access) properties can exist here. Thus, given
423 	 * this has zero meaning for the map itself, lets clear these
424 	 * from here.
425 	 */
426 	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
427 }
428 
429 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
430 {
431 	map->map_type = attr->map_type;
432 	map->key_size = attr->key_size;
433 	map->value_size = attr->value_size;
434 	map->max_entries = attr->max_entries;
435 	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
436 	map->numa_node = bpf_map_attr_numa_node(attr);
437 	map->map_extra = attr->map_extra;
438 }
439 
440 static int bpf_map_alloc_id(struct bpf_map *map)
441 {
442 	int id;
443 
444 	idr_preload(GFP_KERNEL);
445 	spin_lock_bh(&map_idr_lock);
446 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
447 	if (id > 0)
448 		map->id = id;
449 	spin_unlock_bh(&map_idr_lock);
450 	idr_preload_end();
451 
452 	if (WARN_ON_ONCE(!id))
453 		return -ENOSPC;
454 
455 	return id > 0 ? 0 : id;
456 }
457 
458 void bpf_map_free_id(struct bpf_map *map)
459 {
460 	unsigned long flags;
461 
462 	/* Offloaded maps are removed from the IDR store when their device
463 	 * disappears - even if someone holds an fd to them they are unusable,
464 	 * the memory is gone, all ops will fail; they are simply waiting for
465 	 * refcnt to drop to be freed.
466 	 */
467 	if (!map->id)
468 		return;
469 
470 	spin_lock_irqsave(&map_idr_lock, flags);
471 
472 	idr_remove(&map_idr, map->id);
473 	map->id = 0;
474 
475 	spin_unlock_irqrestore(&map_idr_lock, flags);
476 }
477 
478 #ifdef CONFIG_MEMCG
479 static void bpf_map_save_memcg(struct bpf_map *map)
480 {
481 	/* Currently if a map is created by a process belonging to the root
482 	 * memory cgroup, get_obj_cgroup_from_current() will return NULL.
483 	 * So we have to check map->objcg for being NULL each time it's
484 	 * being used.
485 	 */
486 	if (memcg_bpf_enabled())
487 		map->objcg = get_obj_cgroup_from_current();
488 }
489 
490 static void bpf_map_release_memcg(struct bpf_map *map)
491 {
492 	if (map->objcg)
493 		obj_cgroup_put(map->objcg);
494 }
495 
496 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
497 {
498 	if (map->objcg)
499 		return get_mem_cgroup_from_objcg(map->objcg);
500 
501 	return root_mem_cgroup;
502 }
503 
504 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
505 			   int node)
506 {
507 	struct mem_cgroup *memcg, *old_memcg;
508 	void *ptr;
509 
510 	memcg = bpf_map_get_memcg(map);
511 	old_memcg = set_active_memcg(memcg);
512 	ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
513 	set_active_memcg(old_memcg);
514 	mem_cgroup_put(memcg);
515 
516 	return ptr;
517 }
518 
519 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
520 {
521 	struct mem_cgroup *memcg, *old_memcg;
522 	void *ptr;
523 
524 	memcg = bpf_map_get_memcg(map);
525 	old_memcg = set_active_memcg(memcg);
526 	ptr = kzalloc(size, flags | __GFP_ACCOUNT);
527 	set_active_memcg(old_memcg);
528 	mem_cgroup_put(memcg);
529 
530 	return ptr;
531 }
532 
533 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
534 		       gfp_t flags)
535 {
536 	struct mem_cgroup *memcg, *old_memcg;
537 	void *ptr;
538 
539 	memcg = bpf_map_get_memcg(map);
540 	old_memcg = set_active_memcg(memcg);
541 	ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
542 	set_active_memcg(old_memcg);
543 	mem_cgroup_put(memcg);
544 
545 	return ptr;
546 }
547 
548 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
549 				    size_t align, gfp_t flags)
550 {
551 	struct mem_cgroup *memcg, *old_memcg;
552 	void __percpu *ptr;
553 
554 	memcg = bpf_map_get_memcg(map);
555 	old_memcg = set_active_memcg(memcg);
556 	ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
557 	set_active_memcg(old_memcg);
558 	mem_cgroup_put(memcg);
559 
560 	return ptr;
561 }
562 
563 #else
564 static void bpf_map_save_memcg(struct bpf_map *map)
565 {
566 }
567 
568 static void bpf_map_release_memcg(struct bpf_map *map)
569 {
570 }
571 #endif
572 
573 static bool can_alloc_pages(void)
574 {
575 	return preempt_count() == 0 && !irqs_disabled() &&
576 		!IS_ENABLED(CONFIG_PREEMPT_RT);
577 }
578 
579 static struct page *__bpf_alloc_page(int nid)
580 {
581 	if (!can_alloc_pages())
582 		return alloc_pages_nolock(nid, 0);
583 
584 	return alloc_pages_node(nid,
585 				GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT
586 				| __GFP_NOWARN,
587 				0);
588 }
589 
590 int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
591 			unsigned long nr_pages, struct page **pages)
592 {
593 	unsigned long i, j;
594 	struct page *pg;
595 	int ret = 0;
596 #ifdef CONFIG_MEMCG
597 	struct mem_cgroup *memcg, *old_memcg;
598 
599 	memcg = bpf_map_get_memcg(map);
600 	old_memcg = set_active_memcg(memcg);
601 #endif
602 	for (i = 0; i < nr_pages; i++) {
603 		pg = __bpf_alloc_page(nid);
604 
605 		if (pg) {
606 			pages[i] = pg;
607 			continue;
608 		}
609 		for (j = 0; j < i; j++)
610 			free_pages_nolock(pages[j], 0);
611 		ret = -ENOMEM;
612 		break;
613 	}
614 
615 #ifdef CONFIG_MEMCG
616 	set_active_memcg(old_memcg);
617 	mem_cgroup_put(memcg);
618 #endif
619 	return ret;
620 }
621 
622 
623 static int btf_field_cmp(const void *a, const void *b)
624 {
625 	const struct btf_field *f1 = a, *f2 = b;
626 
627 	if (f1->offset < f2->offset)
628 		return -1;
629 	else if (f1->offset > f2->offset)
630 		return 1;
631 	return 0;
632 }
633 
634 struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset,
635 				  u32 field_mask)
636 {
637 	struct btf_field *field;
638 
639 	if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask))
640 		return NULL;
641 	field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp);
642 	if (!field || !(field->type & field_mask))
643 		return NULL;
644 	return field;
645 }
646 
647 void btf_record_free(struct btf_record *rec)
648 {
649 	int i;
650 
651 	if (IS_ERR_OR_NULL(rec))
652 		return;
653 	for (i = 0; i < rec->cnt; i++) {
654 		switch (rec->fields[i].type) {
655 		case BPF_KPTR_UNREF:
656 		case BPF_KPTR_REF:
657 		case BPF_KPTR_PERCPU:
658 		case BPF_UPTR:
659 			if (rec->fields[i].kptr.module)
660 				module_put(rec->fields[i].kptr.module);
661 			if (btf_is_kernel(rec->fields[i].kptr.btf))
662 				btf_put(rec->fields[i].kptr.btf);
663 			break;
664 		case BPF_LIST_HEAD:
665 		case BPF_LIST_NODE:
666 		case BPF_RB_ROOT:
667 		case BPF_RB_NODE:
668 		case BPF_SPIN_LOCK:
669 		case BPF_RES_SPIN_LOCK:
670 		case BPF_TIMER:
671 		case BPF_REFCOUNT:
672 		case BPF_WORKQUEUE:
673 			/* Nothing to release */
674 			break;
675 		default:
676 			WARN_ON_ONCE(1);
677 			continue;
678 		}
679 	}
680 	kfree(rec);
681 }
682 
683 void bpf_map_free_record(struct bpf_map *map)
684 {
685 	btf_record_free(map->record);
686 	map->record = NULL;
687 }
688 
689 struct btf_record *btf_record_dup(const struct btf_record *rec)
690 {
691 	const struct btf_field *fields;
692 	struct btf_record *new_rec;
693 	int ret, size, i;
694 
695 	if (IS_ERR_OR_NULL(rec))
696 		return NULL;
697 	size = struct_size(rec, fields, rec->cnt);
698 	new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN);
699 	if (!new_rec)
700 		return ERR_PTR(-ENOMEM);
701 	/* Do a deep copy of the btf_record */
702 	fields = rec->fields;
703 	new_rec->cnt = 0;
704 	for (i = 0; i < rec->cnt; i++) {
705 		switch (fields[i].type) {
706 		case BPF_KPTR_UNREF:
707 		case BPF_KPTR_REF:
708 		case BPF_KPTR_PERCPU:
709 		case BPF_UPTR:
710 			if (btf_is_kernel(fields[i].kptr.btf))
711 				btf_get(fields[i].kptr.btf);
712 			if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
713 				ret = -ENXIO;
714 				goto free;
715 			}
716 			break;
717 		case BPF_LIST_HEAD:
718 		case BPF_LIST_NODE:
719 		case BPF_RB_ROOT:
720 		case BPF_RB_NODE:
721 		case BPF_SPIN_LOCK:
722 		case BPF_RES_SPIN_LOCK:
723 		case BPF_TIMER:
724 		case BPF_REFCOUNT:
725 		case BPF_WORKQUEUE:
726 			/* Nothing to acquire */
727 			break;
728 		default:
729 			ret = -EFAULT;
730 			WARN_ON_ONCE(1);
731 			goto free;
732 		}
733 		new_rec->cnt++;
734 	}
735 	return new_rec;
736 free:
737 	btf_record_free(new_rec);
738 	return ERR_PTR(ret);
739 }
740 
741 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b)
742 {
743 	bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b);
744 	int size;
745 
746 	if (!a_has_fields && !b_has_fields)
747 		return true;
748 	if (a_has_fields != b_has_fields)
749 		return false;
750 	if (rec_a->cnt != rec_b->cnt)
751 		return false;
752 	size = struct_size(rec_a, fields, rec_a->cnt);
753 	/* btf_parse_fields uses kzalloc to allocate a btf_record, so unused
754 	 * members are zeroed out. So memcmp is safe to do without worrying
755 	 * about padding/unused fields.
756 	 *
757 	 * While spin_lock, timer, and kptr have no relation to map BTF,
758 	 * list_head metadata is specific to map BTF, the btf and value_rec
759 	 * members in particular. btf is the map BTF, while value_rec points to
760 	 * btf_record in that map BTF.
761 	 *
762 	 * So while by default, we don't rely on the map BTF (which the records
763 	 * were parsed from) matching for both records, which is not backwards
764 	 * compatible, in case list_head is part of it, we implicitly rely on
765 	 * that by way of depending on memcmp succeeding for it.
766 	 */
767 	return !memcmp(rec_a, rec_b, size);
768 }
769 
770 void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
771 {
772 	if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER)))
773 		return;
774 	bpf_timer_cancel_and_free(obj + rec->timer_off);
775 }
776 
777 void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj)
778 {
779 	if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_WORKQUEUE)))
780 		return;
781 	bpf_wq_cancel_and_free(obj + rec->wq_off);
782 }
783 
784 void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
785 {
786 	const struct btf_field *fields;
787 	int i;
788 
789 	if (IS_ERR_OR_NULL(rec))
790 		return;
791 	fields = rec->fields;
792 	for (i = 0; i < rec->cnt; i++) {
793 		struct btf_struct_meta *pointee_struct_meta;
794 		const struct btf_field *field = &fields[i];
795 		void *field_ptr = obj + field->offset;
796 		void *xchgd_field;
797 
798 		switch (fields[i].type) {
799 		case BPF_SPIN_LOCK:
800 		case BPF_RES_SPIN_LOCK:
801 			break;
802 		case BPF_TIMER:
803 			bpf_timer_cancel_and_free(field_ptr);
804 			break;
805 		case BPF_WORKQUEUE:
806 			bpf_wq_cancel_and_free(field_ptr);
807 			break;
808 		case BPF_KPTR_UNREF:
809 			WRITE_ONCE(*(u64 *)field_ptr, 0);
810 			break;
811 		case BPF_KPTR_REF:
812 		case BPF_KPTR_PERCPU:
813 			xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
814 			if (!xchgd_field)
815 				break;
816 
817 			if (!btf_is_kernel(field->kptr.btf)) {
818 				pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
819 									   field->kptr.btf_id);
820 				__bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
821 								 pointee_struct_meta->record : NULL,
822 								 fields[i].type == BPF_KPTR_PERCPU);
823 			} else {
824 				field->kptr.dtor(xchgd_field);
825 			}
826 			break;
827 		case BPF_UPTR:
828 			/* The caller ensured that no one is using the uptr */
829 			unpin_uptr_kaddr(*(void **)field_ptr);
830 			break;
831 		case BPF_LIST_HEAD:
832 			if (WARN_ON_ONCE(rec->spin_lock_off < 0))
833 				continue;
834 			bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
835 			break;
836 		case BPF_RB_ROOT:
837 			if (WARN_ON_ONCE(rec->spin_lock_off < 0))
838 				continue;
839 			bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off);
840 			break;
841 		case BPF_LIST_NODE:
842 		case BPF_RB_NODE:
843 		case BPF_REFCOUNT:
844 			break;
845 		default:
846 			WARN_ON_ONCE(1);
847 			continue;
848 		}
849 	}
850 }
851 
852 static void bpf_map_free(struct bpf_map *map)
853 {
854 	struct btf_record *rec = map->record;
855 	struct btf *btf = map->btf;
856 
857 	/* implementation dependent freeing. Disabling migration to simplify
858 	 * the free of values or special fields allocated from bpf memory
859 	 * allocator.
860 	 */
861 	migrate_disable();
862 	map->ops->map_free(map);
863 	migrate_enable();
864 
865 	/* Delay freeing of btf_record for maps, as map_free
866 	 * callback usually needs access to them. It is better to do it here
867 	 * than require each callback to do the free itself manually.
868 	 *
869 	 * Note that the btf_record stashed in map->inner_map_meta->record was
870 	 * already freed using the map_free callback for map in map case which
871 	 * eventually calls bpf_map_free_meta, since inner_map_meta is only a
872 	 * template bpf_map struct used during verification.
873 	 */
874 	btf_record_free(rec);
875 	/* Delay freeing of btf for maps, as map_free callback may need
876 	 * struct_meta info which will be freed with btf_put().
877 	 */
878 	btf_put(btf);
879 }
880 
881 /* called from workqueue */
882 static void bpf_map_free_deferred(struct work_struct *work)
883 {
884 	struct bpf_map *map = container_of(work, struct bpf_map, work);
885 
886 	security_bpf_map_free(map);
887 	bpf_map_release_memcg(map);
888 	bpf_map_free(map);
889 }
890 
891 static void bpf_map_put_uref(struct bpf_map *map)
892 {
893 	if (atomic64_dec_and_test(&map->usercnt)) {
894 		if (map->ops->map_release_uref)
895 			map->ops->map_release_uref(map);
896 	}
897 }
898 
899 static void bpf_map_free_in_work(struct bpf_map *map)
900 {
901 	INIT_WORK(&map->work, bpf_map_free_deferred);
902 	/* Avoid spawning kworkers, since they all might contend
903 	 * for the same mutex like slab_mutex.
904 	 */
905 	queue_work(system_unbound_wq, &map->work);
906 }
907 
908 static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
909 {
910 	bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
911 }
912 
913 static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
914 {
915 	if (rcu_trace_implies_rcu_gp())
916 		bpf_map_free_rcu_gp(rcu);
917 	else
918 		call_rcu(rcu, bpf_map_free_rcu_gp);
919 }
920 
921 /* decrement map refcnt and schedule it for freeing via workqueue
922  * (underlying map implementation ops->map_free() might sleep)
923  */
924 void bpf_map_put(struct bpf_map *map)
925 {
926 	if (atomic64_dec_and_test(&map->refcnt)) {
927 		/* bpf_map_free_id() must be called first */
928 		bpf_map_free_id(map);
929 
930 		WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
931 		if (READ_ONCE(map->free_after_mult_rcu_gp))
932 			call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
933 		else if (READ_ONCE(map->free_after_rcu_gp))
934 			call_rcu(&map->rcu, bpf_map_free_rcu_gp);
935 		else
936 			bpf_map_free_in_work(map);
937 	}
938 }
939 EXPORT_SYMBOL_GPL(bpf_map_put);
940 
941 void bpf_map_put_with_uref(struct bpf_map *map)
942 {
943 	bpf_map_put_uref(map);
944 	bpf_map_put(map);
945 }
946 
947 static int bpf_map_release(struct inode *inode, struct file *filp)
948 {
949 	struct bpf_map *map = filp->private_data;
950 
951 	if (map->ops->map_release)
952 		map->ops->map_release(map, filp);
953 
954 	bpf_map_put_with_uref(map);
955 	return 0;
956 }
957 
958 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
959 {
960 	fmode_t mode = fd_file(f)->f_mode;
961 
962 	/* Our file permissions may have been overridden by global
963 	 * map permissions facing syscall side.
964 	 */
965 	if (READ_ONCE(map->frozen))
966 		mode &= ~FMODE_CAN_WRITE;
967 	return mode;
968 }
969 
970 #ifdef CONFIG_PROC_FS
971 /* Show the memory usage of a bpf map */
972 static u64 bpf_map_memory_usage(const struct bpf_map *map)
973 {
974 	return map->ops->map_mem_usage(map);
975 }
976 
977 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
978 {
979 	struct bpf_map *map = filp->private_data;
980 	u32 type = 0, jited = 0;
981 
982 	if (map_type_contains_progs(map)) {
983 		spin_lock(&map->owner.lock);
984 		type  = map->owner.type;
985 		jited = map->owner.jited;
986 		spin_unlock(&map->owner.lock);
987 	}
988 
989 	seq_printf(m,
990 		   "map_type:\t%u\n"
991 		   "key_size:\t%u\n"
992 		   "value_size:\t%u\n"
993 		   "max_entries:\t%u\n"
994 		   "map_flags:\t%#x\n"
995 		   "map_extra:\t%#llx\n"
996 		   "memlock:\t%llu\n"
997 		   "map_id:\t%u\n"
998 		   "frozen:\t%u\n",
999 		   map->map_type,
1000 		   map->key_size,
1001 		   map->value_size,
1002 		   map->max_entries,
1003 		   map->map_flags,
1004 		   (unsigned long long)map->map_extra,
1005 		   bpf_map_memory_usage(map),
1006 		   map->id,
1007 		   READ_ONCE(map->frozen));
1008 	if (type) {
1009 		seq_printf(m, "owner_prog_type:\t%u\n", type);
1010 		seq_printf(m, "owner_jited:\t%u\n", jited);
1011 	}
1012 }
1013 #endif
1014 
1015 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
1016 			      loff_t *ppos)
1017 {
1018 	/* We need this handler such that alloc_file() enables
1019 	 * f_mode with FMODE_CAN_READ.
1020 	 */
1021 	return -EINVAL;
1022 }
1023 
1024 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
1025 			       size_t siz, loff_t *ppos)
1026 {
1027 	/* We need this handler such that alloc_file() enables
1028 	 * f_mode with FMODE_CAN_WRITE.
1029 	 */
1030 	return -EINVAL;
1031 }
1032 
1033 /* called for any extra memory-mapped regions (except initial) */
1034 static void bpf_map_mmap_open(struct vm_area_struct *vma)
1035 {
1036 	struct bpf_map *map = vma->vm_file->private_data;
1037 
1038 	if (vma->vm_flags & VM_MAYWRITE)
1039 		bpf_map_write_active_inc(map);
1040 }
1041 
1042 /* called for all unmapped memory region (including initial) */
1043 static void bpf_map_mmap_close(struct vm_area_struct *vma)
1044 {
1045 	struct bpf_map *map = vma->vm_file->private_data;
1046 
1047 	if (vma->vm_flags & VM_MAYWRITE)
1048 		bpf_map_write_active_dec(map);
1049 }
1050 
1051 static const struct vm_operations_struct bpf_map_default_vmops = {
1052 	.open		= bpf_map_mmap_open,
1053 	.close		= bpf_map_mmap_close,
1054 };
1055 
1056 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
1057 {
1058 	struct bpf_map *map = filp->private_data;
1059 	int err = 0;
1060 
1061 	if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
1062 		return -ENOTSUPP;
1063 
1064 	if (!(vma->vm_flags & VM_SHARED))
1065 		return -EINVAL;
1066 
1067 	mutex_lock(&map->freeze_mutex);
1068 
1069 	if (vma->vm_flags & VM_WRITE) {
1070 		if (map->frozen) {
1071 			err = -EPERM;
1072 			goto out;
1073 		}
1074 		/* map is meant to be read-only, so do not allow mapping as
1075 		 * writable, because it's possible to leak a writable page
1076 		 * reference and allows user-space to still modify it after
1077 		 * freezing, while verifier will assume contents do not change
1078 		 */
1079 		if (map->map_flags & BPF_F_RDONLY_PROG) {
1080 			err = -EACCES;
1081 			goto out;
1082 		}
1083 		bpf_map_write_active_inc(map);
1084 	}
1085 out:
1086 	mutex_unlock(&map->freeze_mutex);
1087 	if (err)
1088 		return err;
1089 
1090 	/* set default open/close callbacks */
1091 	vma->vm_ops = &bpf_map_default_vmops;
1092 	vma->vm_private_data = map;
1093 	vm_flags_clear(vma, VM_MAYEXEC);
1094 	/* If mapping is read-only, then disallow potentially re-mapping with
1095 	 * PROT_WRITE by dropping VM_MAYWRITE flag. This VM_MAYWRITE clearing
1096 	 * means that as far as BPF map's memory-mapped VMAs are concerned,
1097 	 * VM_WRITE and VM_MAYWRITE and equivalent, if one of them is set,
1098 	 * both should be set, so we can forget about VM_MAYWRITE and always
1099 	 * check just VM_WRITE
1100 	 */
1101 	if (!(vma->vm_flags & VM_WRITE))
1102 		vm_flags_clear(vma, VM_MAYWRITE);
1103 
1104 	err = map->ops->map_mmap(map, vma);
1105 	if (err) {
1106 		if (vma->vm_flags & VM_WRITE)
1107 			bpf_map_write_active_dec(map);
1108 	}
1109 
1110 	return err;
1111 }
1112 
1113 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
1114 {
1115 	struct bpf_map *map = filp->private_data;
1116 
1117 	if (map->ops->map_poll)
1118 		return map->ops->map_poll(map, filp, pts);
1119 
1120 	return EPOLLERR;
1121 }
1122 
1123 static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr,
1124 					   unsigned long len, unsigned long pgoff,
1125 					   unsigned long flags)
1126 {
1127 	struct bpf_map *map = filp->private_data;
1128 
1129 	if (map->ops->map_get_unmapped_area)
1130 		return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags);
1131 #ifdef CONFIG_MMU
1132 	return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
1133 #else
1134 	return addr;
1135 #endif
1136 }
1137 
1138 const struct file_operations bpf_map_fops = {
1139 #ifdef CONFIG_PROC_FS
1140 	.show_fdinfo	= bpf_map_show_fdinfo,
1141 #endif
1142 	.release	= bpf_map_release,
1143 	.read		= bpf_dummy_read,
1144 	.write		= bpf_dummy_write,
1145 	.mmap		= bpf_map_mmap,
1146 	.poll		= bpf_map_poll,
1147 	.get_unmapped_area = bpf_get_unmapped_area,
1148 };
1149 
1150 int bpf_map_new_fd(struct bpf_map *map, int flags)
1151 {
1152 	int ret;
1153 
1154 	ret = security_bpf_map(map, OPEN_FMODE(flags));
1155 	if (ret < 0)
1156 		return ret;
1157 
1158 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
1159 				flags | O_CLOEXEC);
1160 }
1161 
1162 int bpf_get_file_flag(int flags)
1163 {
1164 	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
1165 		return -EINVAL;
1166 	if (flags & BPF_F_RDONLY)
1167 		return O_RDONLY;
1168 	if (flags & BPF_F_WRONLY)
1169 		return O_WRONLY;
1170 	return O_RDWR;
1171 }
1172 
1173 /* helper macro to check that unused fields 'union bpf_attr' are zero */
1174 #define CHECK_ATTR(CMD) \
1175 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
1176 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
1177 		   sizeof(*attr) - \
1178 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
1179 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
1180 
1181 /* dst and src must have at least "size" number of bytes.
1182  * Return strlen on success and < 0 on error.
1183  */
1184 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
1185 {
1186 	const char *end = src + size;
1187 	const char *orig_src = src;
1188 
1189 	memset(dst, 0, size);
1190 	/* Copy all isalnum(), '_' and '.' chars. */
1191 	while (src < end && *src) {
1192 		if (!isalnum(*src) &&
1193 		    *src != '_' && *src != '.')
1194 			return -EINVAL;
1195 		*dst++ = *src++;
1196 	}
1197 
1198 	/* No '\0' found in "size" number of bytes */
1199 	if (src == end)
1200 		return -EINVAL;
1201 
1202 	return src - orig_src;
1203 }
1204 
1205 int map_check_no_btf(const struct bpf_map *map,
1206 		     const struct btf *btf,
1207 		     const struct btf_type *key_type,
1208 		     const struct btf_type *value_type)
1209 {
1210 	return -ENOTSUPP;
1211 }
1212 
1213 static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
1214 			 const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
1215 {
1216 	const struct btf_type *key_type, *value_type;
1217 	u32 key_size, value_size;
1218 	int ret = 0;
1219 
1220 	/* Some maps allow key to be unspecified. */
1221 	if (btf_key_id) {
1222 		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
1223 		if (!key_type || key_size != map->key_size)
1224 			return -EINVAL;
1225 	} else {
1226 		key_type = btf_type_by_id(btf, 0);
1227 		if (!map->ops->map_check_btf)
1228 			return -EINVAL;
1229 	}
1230 
1231 	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
1232 	if (!value_type || value_size != map->value_size)
1233 		return -EINVAL;
1234 
1235 	map->record = btf_parse_fields(btf, value_type,
1236 				       BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
1237 				       BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE | BPF_UPTR,
1238 				       map->value_size);
1239 	if (!IS_ERR_OR_NULL(map->record)) {
1240 		int i;
1241 
1242 		if (!bpf_token_capable(token, CAP_BPF)) {
1243 			ret = -EPERM;
1244 			goto free_map_tab;
1245 		}
1246 		if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
1247 			ret = -EACCES;
1248 			goto free_map_tab;
1249 		}
1250 		for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) {
1251 			switch (map->record->field_mask & (1 << i)) {
1252 			case 0:
1253 				continue;
1254 			case BPF_SPIN_LOCK:
1255 			case BPF_RES_SPIN_LOCK:
1256 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1257 				    map->map_type != BPF_MAP_TYPE_ARRAY &&
1258 				    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
1259 				    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1260 				    map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1261 				    map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1262 				    map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1263 					ret = -EOPNOTSUPP;
1264 					goto free_map_tab;
1265 				}
1266 				break;
1267 			case BPF_TIMER:
1268 			case BPF_WORKQUEUE:
1269 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1270 				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1271 				    map->map_type != BPF_MAP_TYPE_ARRAY) {
1272 					ret = -EOPNOTSUPP;
1273 					goto free_map_tab;
1274 				}
1275 				break;
1276 			case BPF_KPTR_UNREF:
1277 			case BPF_KPTR_REF:
1278 			case BPF_KPTR_PERCPU:
1279 			case BPF_REFCOUNT:
1280 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1281 				    map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
1282 				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1283 				    map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH &&
1284 				    map->map_type != BPF_MAP_TYPE_ARRAY &&
1285 				    map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
1286 				    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1287 				    map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1288 				    map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1289 				    map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1290 					ret = -EOPNOTSUPP;
1291 					goto free_map_tab;
1292 				}
1293 				break;
1294 			case BPF_UPTR:
1295 				if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) {
1296 					ret = -EOPNOTSUPP;
1297 					goto free_map_tab;
1298 				}
1299 				break;
1300 			case BPF_LIST_HEAD:
1301 			case BPF_RB_ROOT:
1302 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1303 				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1304 				    map->map_type != BPF_MAP_TYPE_ARRAY) {
1305 					ret = -EOPNOTSUPP;
1306 					goto free_map_tab;
1307 				}
1308 				break;
1309 			default:
1310 				/* Fail if map_type checks are missing for a field type */
1311 				ret = -EOPNOTSUPP;
1312 				goto free_map_tab;
1313 			}
1314 		}
1315 	}
1316 
1317 	ret = btf_check_and_fixup_fields(btf, map->record);
1318 	if (ret < 0)
1319 		goto free_map_tab;
1320 
1321 	if (map->ops->map_check_btf) {
1322 		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
1323 		if (ret < 0)
1324 			goto free_map_tab;
1325 	}
1326 
1327 	return ret;
1328 free_map_tab:
1329 	bpf_map_free_record(map);
1330 	return ret;
1331 }
1332 
1333 static bool bpf_net_capable(void)
1334 {
1335 	return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
1336 }
1337 
1338 #define BPF_MAP_CREATE_LAST_FIELD map_token_fd
1339 /* called via syscall */
1340 static int map_create(union bpf_attr *attr, bool kernel)
1341 {
1342 	const struct bpf_map_ops *ops;
1343 	struct bpf_token *token = NULL;
1344 	int numa_node = bpf_map_attr_numa_node(attr);
1345 	u32 map_type = attr->map_type;
1346 	struct bpf_map *map;
1347 	bool token_flag;
1348 	int f_flags;
1349 	int err;
1350 
1351 	err = CHECK_ATTR(BPF_MAP_CREATE);
1352 	if (err)
1353 		return -EINVAL;
1354 
1355 	/* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it
1356 	 * to avoid per-map type checks tripping on unknown flag
1357 	 */
1358 	token_flag = attr->map_flags & BPF_F_TOKEN_FD;
1359 	attr->map_flags &= ~BPF_F_TOKEN_FD;
1360 
1361 	if (attr->btf_vmlinux_value_type_id) {
1362 		if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
1363 		    attr->btf_key_type_id || attr->btf_value_type_id)
1364 			return -EINVAL;
1365 	} else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
1366 		return -EINVAL;
1367 	}
1368 
1369 	if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
1370 	    attr->map_type != BPF_MAP_TYPE_ARENA &&
1371 	    attr->map_extra != 0)
1372 		return -EINVAL;
1373 
1374 	f_flags = bpf_get_file_flag(attr->map_flags);
1375 	if (f_flags < 0)
1376 		return f_flags;
1377 
1378 	if (numa_node != NUMA_NO_NODE &&
1379 	    ((unsigned int)numa_node >= nr_node_ids ||
1380 	     !node_online(numa_node)))
1381 		return -EINVAL;
1382 
1383 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1384 	map_type = attr->map_type;
1385 	if (map_type >= ARRAY_SIZE(bpf_map_types))
1386 		return -EINVAL;
1387 	map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
1388 	ops = bpf_map_types[map_type];
1389 	if (!ops)
1390 		return -EINVAL;
1391 
1392 	if (ops->map_alloc_check) {
1393 		err = ops->map_alloc_check(attr);
1394 		if (err)
1395 			return err;
1396 	}
1397 	if (attr->map_ifindex)
1398 		ops = &bpf_map_offload_ops;
1399 	if (!ops->map_mem_usage)
1400 		return -EINVAL;
1401 
1402 	if (token_flag) {
1403 		token = bpf_token_get_from_fd(attr->map_token_fd);
1404 		if (IS_ERR(token))
1405 			return PTR_ERR(token);
1406 
1407 		/* if current token doesn't grant map creation permissions,
1408 		 * then we can't use this token, so ignore it and rely on
1409 		 * system-wide capabilities checks
1410 		 */
1411 		if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) ||
1412 		    !bpf_token_allow_map_type(token, attr->map_type)) {
1413 			bpf_token_put(token);
1414 			token = NULL;
1415 		}
1416 	}
1417 
1418 	err = -EPERM;
1419 
1420 	/* Intent here is for unprivileged_bpf_disabled to block BPF map
1421 	 * creation for unprivileged users; other actions depend
1422 	 * on fd availability and access to bpffs, so are dependent on
1423 	 * object creation success. Even with unprivileged BPF disabled,
1424 	 * capability checks are still carried out.
1425 	 */
1426 	if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF))
1427 		goto put_token;
1428 
1429 	/* check privileged map type permissions */
1430 	switch (map_type) {
1431 	case BPF_MAP_TYPE_ARRAY:
1432 	case BPF_MAP_TYPE_PERCPU_ARRAY:
1433 	case BPF_MAP_TYPE_PROG_ARRAY:
1434 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1435 	case BPF_MAP_TYPE_CGROUP_ARRAY:
1436 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1437 	case BPF_MAP_TYPE_HASH:
1438 	case BPF_MAP_TYPE_PERCPU_HASH:
1439 	case BPF_MAP_TYPE_HASH_OF_MAPS:
1440 	case BPF_MAP_TYPE_RINGBUF:
1441 	case BPF_MAP_TYPE_USER_RINGBUF:
1442 	case BPF_MAP_TYPE_CGROUP_STORAGE:
1443 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
1444 		/* unprivileged */
1445 		break;
1446 	case BPF_MAP_TYPE_SK_STORAGE:
1447 	case BPF_MAP_TYPE_INODE_STORAGE:
1448 	case BPF_MAP_TYPE_TASK_STORAGE:
1449 	case BPF_MAP_TYPE_CGRP_STORAGE:
1450 	case BPF_MAP_TYPE_BLOOM_FILTER:
1451 	case BPF_MAP_TYPE_LPM_TRIE:
1452 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
1453 	case BPF_MAP_TYPE_STACK_TRACE:
1454 	case BPF_MAP_TYPE_QUEUE:
1455 	case BPF_MAP_TYPE_STACK:
1456 	case BPF_MAP_TYPE_LRU_HASH:
1457 	case BPF_MAP_TYPE_LRU_PERCPU_HASH:
1458 	case BPF_MAP_TYPE_STRUCT_OPS:
1459 	case BPF_MAP_TYPE_CPUMAP:
1460 	case BPF_MAP_TYPE_ARENA:
1461 		if (!bpf_token_capable(token, CAP_BPF))
1462 			goto put_token;
1463 		break;
1464 	case BPF_MAP_TYPE_SOCKMAP:
1465 	case BPF_MAP_TYPE_SOCKHASH:
1466 	case BPF_MAP_TYPE_DEVMAP:
1467 	case BPF_MAP_TYPE_DEVMAP_HASH:
1468 	case BPF_MAP_TYPE_XSKMAP:
1469 		if (!bpf_token_capable(token, CAP_NET_ADMIN))
1470 			goto put_token;
1471 		break;
1472 	default:
1473 		WARN(1, "unsupported map type %d", map_type);
1474 		goto put_token;
1475 	}
1476 
1477 	map = ops->map_alloc(attr);
1478 	if (IS_ERR(map)) {
1479 		err = PTR_ERR(map);
1480 		goto put_token;
1481 	}
1482 	map->ops = ops;
1483 	map->map_type = map_type;
1484 
1485 	err = bpf_obj_name_cpy(map->name, attr->map_name,
1486 			       sizeof(attr->map_name));
1487 	if (err < 0)
1488 		goto free_map;
1489 
1490 	atomic64_set(&map->refcnt, 1);
1491 	atomic64_set(&map->usercnt, 1);
1492 	mutex_init(&map->freeze_mutex);
1493 	spin_lock_init(&map->owner.lock);
1494 
1495 	if (attr->btf_key_type_id || attr->btf_value_type_id ||
1496 	    /* Even the map's value is a kernel's struct,
1497 	     * the bpf_prog.o must have BTF to begin with
1498 	     * to figure out the corresponding kernel's
1499 	     * counter part.  Thus, attr->btf_fd has
1500 	     * to be valid also.
1501 	     */
1502 	    attr->btf_vmlinux_value_type_id) {
1503 		struct btf *btf;
1504 
1505 		btf = btf_get_by_fd(attr->btf_fd);
1506 		if (IS_ERR(btf)) {
1507 			err = PTR_ERR(btf);
1508 			goto free_map;
1509 		}
1510 		if (btf_is_kernel(btf)) {
1511 			btf_put(btf);
1512 			err = -EACCES;
1513 			goto free_map;
1514 		}
1515 		map->btf = btf;
1516 
1517 		if (attr->btf_value_type_id) {
1518 			err = map_check_btf(map, token, btf, attr->btf_key_type_id,
1519 					    attr->btf_value_type_id);
1520 			if (err)
1521 				goto free_map;
1522 		}
1523 
1524 		map->btf_key_type_id = attr->btf_key_type_id;
1525 		map->btf_value_type_id = attr->btf_value_type_id;
1526 		map->btf_vmlinux_value_type_id =
1527 			attr->btf_vmlinux_value_type_id;
1528 	}
1529 
1530 	err = security_bpf_map_create(map, attr, token, kernel);
1531 	if (err)
1532 		goto free_map_sec;
1533 
1534 	err = bpf_map_alloc_id(map);
1535 	if (err)
1536 		goto free_map_sec;
1537 
1538 	bpf_map_save_memcg(map);
1539 	bpf_token_put(token);
1540 
1541 	err = bpf_map_new_fd(map, f_flags);
1542 	if (err < 0) {
1543 		/* failed to allocate fd.
1544 		 * bpf_map_put_with_uref() is needed because the above
1545 		 * bpf_map_alloc_id() has published the map
1546 		 * to the userspace and the userspace may
1547 		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
1548 		 */
1549 		bpf_map_put_with_uref(map);
1550 		return err;
1551 	}
1552 
1553 	return err;
1554 
1555 free_map_sec:
1556 	security_bpf_map_free(map);
1557 free_map:
1558 	bpf_map_free(map);
1559 put_token:
1560 	bpf_token_put(token);
1561 	return err;
1562 }
1563 
1564 void bpf_map_inc(struct bpf_map *map)
1565 {
1566 	atomic64_inc(&map->refcnt);
1567 }
1568 EXPORT_SYMBOL_GPL(bpf_map_inc);
1569 
1570 void bpf_map_inc_with_uref(struct bpf_map *map)
1571 {
1572 	atomic64_inc(&map->refcnt);
1573 	atomic64_inc(&map->usercnt);
1574 }
1575 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
1576 
1577 struct bpf_map *bpf_map_get(u32 ufd)
1578 {
1579 	CLASS(fd, f)(ufd);
1580 	struct bpf_map *map = __bpf_map_get(f);
1581 
1582 	if (!IS_ERR(map))
1583 		bpf_map_inc(map);
1584 
1585 	return map;
1586 }
1587 EXPORT_SYMBOL_NS(bpf_map_get, "BPF_INTERNAL");
1588 
1589 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1590 {
1591 	CLASS(fd, f)(ufd);
1592 	struct bpf_map *map = __bpf_map_get(f);
1593 
1594 	if (!IS_ERR(map))
1595 		bpf_map_inc_with_uref(map);
1596 
1597 	return map;
1598 }
1599 
1600 /* map_idr_lock should have been held or the map should have been
1601  * protected by rcu read lock.
1602  */
1603 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
1604 {
1605 	int refold;
1606 
1607 	refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
1608 	if (!refold)
1609 		return ERR_PTR(-ENOENT);
1610 	if (uref)
1611 		atomic64_inc(&map->usercnt);
1612 
1613 	return map;
1614 }
1615 
1616 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
1617 {
1618 	lockdep_assert(rcu_read_lock_held());
1619 	return __bpf_map_inc_not_zero(map, false);
1620 }
1621 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1622 
1623 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1624 {
1625 	return -ENOTSUPP;
1626 }
1627 
1628 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1629 {
1630 	if (key_size)
1631 		return vmemdup_user(ukey, key_size);
1632 
1633 	if (ukey)
1634 		return ERR_PTR(-EINVAL);
1635 
1636 	return NULL;
1637 }
1638 
1639 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1640 {
1641 	if (key_size)
1642 		return kvmemdup_bpfptr(ukey, key_size);
1643 
1644 	if (!bpfptr_is_null(ukey))
1645 		return ERR_PTR(-EINVAL);
1646 
1647 	return NULL;
1648 }
1649 
1650 /* last field in 'union bpf_attr' used by this command */
1651 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1652 
1653 static int map_lookup_elem(union bpf_attr *attr)
1654 {
1655 	void __user *ukey = u64_to_user_ptr(attr->key);
1656 	void __user *uvalue = u64_to_user_ptr(attr->value);
1657 	struct bpf_map *map;
1658 	void *key, *value;
1659 	u32 value_size;
1660 	int err;
1661 
1662 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1663 		return -EINVAL;
1664 
1665 	if (attr->flags & ~BPF_F_LOCK)
1666 		return -EINVAL;
1667 
1668 	CLASS(fd, f)(attr->map_fd);
1669 	map = __bpf_map_get(f);
1670 	if (IS_ERR(map))
1671 		return PTR_ERR(map);
1672 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
1673 		return -EPERM;
1674 
1675 	if ((attr->flags & BPF_F_LOCK) &&
1676 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1677 		return -EINVAL;
1678 
1679 	key = __bpf_copy_key(ukey, map->key_size);
1680 	if (IS_ERR(key))
1681 		return PTR_ERR(key);
1682 
1683 	value_size = bpf_map_value_size(map);
1684 
1685 	err = -ENOMEM;
1686 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1687 	if (!value)
1688 		goto free_key;
1689 
1690 	if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
1691 		if (copy_from_user(value, uvalue, value_size))
1692 			err = -EFAULT;
1693 		else
1694 			err = bpf_map_copy_value(map, key, value, attr->flags);
1695 		goto free_value;
1696 	}
1697 
1698 	err = bpf_map_copy_value(map, key, value, attr->flags);
1699 	if (err)
1700 		goto free_value;
1701 
1702 	err = -EFAULT;
1703 	if (copy_to_user(uvalue, value, value_size) != 0)
1704 		goto free_value;
1705 
1706 	err = 0;
1707 
1708 free_value:
1709 	kvfree(value);
1710 free_key:
1711 	kvfree(key);
1712 	return err;
1713 }
1714 
1715 
1716 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1717 
1718 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1719 {
1720 	bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1721 	bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1722 	struct bpf_map *map;
1723 	void *key, *value;
1724 	u32 value_size;
1725 	int err;
1726 
1727 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1728 		return -EINVAL;
1729 
1730 	CLASS(fd, f)(attr->map_fd);
1731 	map = __bpf_map_get(f);
1732 	if (IS_ERR(map))
1733 		return PTR_ERR(map);
1734 	bpf_map_write_active_inc(map);
1735 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1736 		err = -EPERM;
1737 		goto err_put;
1738 	}
1739 
1740 	if ((attr->flags & BPF_F_LOCK) &&
1741 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1742 		err = -EINVAL;
1743 		goto err_put;
1744 	}
1745 
1746 	key = ___bpf_copy_key(ukey, map->key_size);
1747 	if (IS_ERR(key)) {
1748 		err = PTR_ERR(key);
1749 		goto err_put;
1750 	}
1751 
1752 	value_size = bpf_map_value_size(map);
1753 	value = kvmemdup_bpfptr(uvalue, value_size);
1754 	if (IS_ERR(value)) {
1755 		err = PTR_ERR(value);
1756 		goto free_key;
1757 	}
1758 
1759 	err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags);
1760 	if (!err)
1761 		maybe_wait_bpf_programs(map);
1762 
1763 	kvfree(value);
1764 free_key:
1765 	kvfree(key);
1766 err_put:
1767 	bpf_map_write_active_dec(map);
1768 	return err;
1769 }
1770 
1771 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1772 
1773 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
1774 {
1775 	bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1776 	struct bpf_map *map;
1777 	void *key;
1778 	int err;
1779 
1780 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1781 		return -EINVAL;
1782 
1783 	CLASS(fd, f)(attr->map_fd);
1784 	map = __bpf_map_get(f);
1785 	if (IS_ERR(map))
1786 		return PTR_ERR(map);
1787 	bpf_map_write_active_inc(map);
1788 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1789 		err = -EPERM;
1790 		goto err_put;
1791 	}
1792 
1793 	key = ___bpf_copy_key(ukey, map->key_size);
1794 	if (IS_ERR(key)) {
1795 		err = PTR_ERR(key);
1796 		goto err_put;
1797 	}
1798 
1799 	if (bpf_map_is_offloaded(map)) {
1800 		err = bpf_map_offload_delete_elem(map, key);
1801 		goto out;
1802 	} else if (IS_FD_PROG_ARRAY(map) ||
1803 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1804 		/* These maps require sleepable context */
1805 		err = map->ops->map_delete_elem(map, key);
1806 		goto out;
1807 	}
1808 
1809 	bpf_disable_instrumentation();
1810 	rcu_read_lock();
1811 	err = map->ops->map_delete_elem(map, key);
1812 	rcu_read_unlock();
1813 	bpf_enable_instrumentation();
1814 	if (!err)
1815 		maybe_wait_bpf_programs(map);
1816 out:
1817 	kvfree(key);
1818 err_put:
1819 	bpf_map_write_active_dec(map);
1820 	return err;
1821 }
1822 
1823 /* last field in 'union bpf_attr' used by this command */
1824 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1825 
1826 static int map_get_next_key(union bpf_attr *attr)
1827 {
1828 	void __user *ukey = u64_to_user_ptr(attr->key);
1829 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1830 	struct bpf_map *map;
1831 	void *key, *next_key;
1832 	int err;
1833 
1834 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1835 		return -EINVAL;
1836 
1837 	CLASS(fd, f)(attr->map_fd);
1838 	map = __bpf_map_get(f);
1839 	if (IS_ERR(map))
1840 		return PTR_ERR(map);
1841 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
1842 		return -EPERM;
1843 
1844 	if (ukey) {
1845 		key = __bpf_copy_key(ukey, map->key_size);
1846 		if (IS_ERR(key))
1847 			return PTR_ERR(key);
1848 	} else {
1849 		key = NULL;
1850 	}
1851 
1852 	err = -ENOMEM;
1853 	next_key = kvmalloc(map->key_size, GFP_USER);
1854 	if (!next_key)
1855 		goto free_key;
1856 
1857 	if (bpf_map_is_offloaded(map)) {
1858 		err = bpf_map_offload_get_next_key(map, key, next_key);
1859 		goto out;
1860 	}
1861 
1862 	rcu_read_lock();
1863 	err = map->ops->map_get_next_key(map, key, next_key);
1864 	rcu_read_unlock();
1865 out:
1866 	if (err)
1867 		goto free_next_key;
1868 
1869 	err = -EFAULT;
1870 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1871 		goto free_next_key;
1872 
1873 	err = 0;
1874 
1875 free_next_key:
1876 	kvfree(next_key);
1877 free_key:
1878 	kvfree(key);
1879 	return err;
1880 }
1881 
1882 int generic_map_delete_batch(struct bpf_map *map,
1883 			     const union bpf_attr *attr,
1884 			     union bpf_attr __user *uattr)
1885 {
1886 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1887 	u32 cp, max_count;
1888 	int err = 0;
1889 	void *key;
1890 
1891 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1892 		return -EINVAL;
1893 
1894 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1895 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1896 		return -EINVAL;
1897 	}
1898 
1899 	max_count = attr->batch.count;
1900 	if (!max_count)
1901 		return 0;
1902 
1903 	if (put_user(0, &uattr->batch.count))
1904 		return -EFAULT;
1905 
1906 	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1907 	if (!key)
1908 		return -ENOMEM;
1909 
1910 	for (cp = 0; cp < max_count; cp++) {
1911 		err = -EFAULT;
1912 		if (copy_from_user(key, keys + cp * map->key_size,
1913 				   map->key_size))
1914 			break;
1915 
1916 		if (bpf_map_is_offloaded(map)) {
1917 			err = bpf_map_offload_delete_elem(map, key);
1918 			break;
1919 		}
1920 
1921 		bpf_disable_instrumentation();
1922 		rcu_read_lock();
1923 		err = map->ops->map_delete_elem(map, key);
1924 		rcu_read_unlock();
1925 		bpf_enable_instrumentation();
1926 		if (err)
1927 			break;
1928 		cond_resched();
1929 	}
1930 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1931 		err = -EFAULT;
1932 
1933 	kvfree(key);
1934 
1935 	return err;
1936 }
1937 
1938 int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
1939 			     const union bpf_attr *attr,
1940 			     union bpf_attr __user *uattr)
1941 {
1942 	void __user *values = u64_to_user_ptr(attr->batch.values);
1943 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1944 	u32 value_size, cp, max_count;
1945 	void *key, *value;
1946 	int err = 0;
1947 
1948 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1949 		return -EINVAL;
1950 
1951 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1952 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1953 		return -EINVAL;
1954 	}
1955 
1956 	value_size = bpf_map_value_size(map);
1957 
1958 	max_count = attr->batch.count;
1959 	if (!max_count)
1960 		return 0;
1961 
1962 	if (put_user(0, &uattr->batch.count))
1963 		return -EFAULT;
1964 
1965 	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1966 	if (!key)
1967 		return -ENOMEM;
1968 
1969 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1970 	if (!value) {
1971 		kvfree(key);
1972 		return -ENOMEM;
1973 	}
1974 
1975 	for (cp = 0; cp < max_count; cp++) {
1976 		err = -EFAULT;
1977 		if (copy_from_user(key, keys + cp * map->key_size,
1978 		    map->key_size) ||
1979 		    copy_from_user(value, values + cp * value_size, value_size))
1980 			break;
1981 
1982 		err = bpf_map_update_value(map, map_file, key, value,
1983 					   attr->batch.elem_flags);
1984 
1985 		if (err)
1986 			break;
1987 		cond_resched();
1988 	}
1989 
1990 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1991 		err = -EFAULT;
1992 
1993 	kvfree(value);
1994 	kvfree(key);
1995 
1996 	return err;
1997 }
1998 
1999 int generic_map_lookup_batch(struct bpf_map *map,
2000 				    const union bpf_attr *attr,
2001 				    union bpf_attr __user *uattr)
2002 {
2003 	void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
2004 	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
2005 	void __user *values = u64_to_user_ptr(attr->batch.values);
2006 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
2007 	void *buf, *buf_prevkey, *prev_key, *key, *value;
2008 	u32 value_size, cp, max_count;
2009 	int err;
2010 
2011 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
2012 		return -EINVAL;
2013 
2014 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
2015 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK))
2016 		return -EINVAL;
2017 
2018 	value_size = bpf_map_value_size(map);
2019 
2020 	max_count = attr->batch.count;
2021 	if (!max_count)
2022 		return 0;
2023 
2024 	if (put_user(0, &uattr->batch.count))
2025 		return -EFAULT;
2026 
2027 	buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
2028 	if (!buf_prevkey)
2029 		return -ENOMEM;
2030 
2031 	buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
2032 	if (!buf) {
2033 		kvfree(buf_prevkey);
2034 		return -ENOMEM;
2035 	}
2036 
2037 	err = -EFAULT;
2038 	prev_key = NULL;
2039 	if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
2040 		goto free_buf;
2041 	key = buf;
2042 	value = key + map->key_size;
2043 	if (ubatch)
2044 		prev_key = buf_prevkey;
2045 
2046 	for (cp = 0; cp < max_count;) {
2047 		rcu_read_lock();
2048 		err = map->ops->map_get_next_key(map, prev_key, key);
2049 		rcu_read_unlock();
2050 		if (err)
2051 			break;
2052 		err = bpf_map_copy_value(map, key, value,
2053 					 attr->batch.elem_flags);
2054 
2055 		if (err == -ENOENT)
2056 			goto next_key;
2057 
2058 		if (err)
2059 			goto free_buf;
2060 
2061 		if (copy_to_user(keys + cp * map->key_size, key,
2062 				 map->key_size)) {
2063 			err = -EFAULT;
2064 			goto free_buf;
2065 		}
2066 		if (copy_to_user(values + cp * value_size, value, value_size)) {
2067 			err = -EFAULT;
2068 			goto free_buf;
2069 		}
2070 
2071 		cp++;
2072 next_key:
2073 		if (!prev_key)
2074 			prev_key = buf_prevkey;
2075 
2076 		swap(prev_key, key);
2077 		cond_resched();
2078 	}
2079 
2080 	if (err == -EFAULT)
2081 		goto free_buf;
2082 
2083 	if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
2084 		    (cp && copy_to_user(uobatch, prev_key, map->key_size))))
2085 		err = -EFAULT;
2086 
2087 free_buf:
2088 	kvfree(buf_prevkey);
2089 	kvfree(buf);
2090 	return err;
2091 }
2092 
2093 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
2094 
2095 static int map_lookup_and_delete_elem(union bpf_attr *attr)
2096 {
2097 	void __user *ukey = u64_to_user_ptr(attr->key);
2098 	void __user *uvalue = u64_to_user_ptr(attr->value);
2099 	struct bpf_map *map;
2100 	void *key, *value;
2101 	u32 value_size;
2102 	int err;
2103 
2104 	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
2105 		return -EINVAL;
2106 
2107 	if (attr->flags & ~BPF_F_LOCK)
2108 		return -EINVAL;
2109 
2110 	CLASS(fd, f)(attr->map_fd);
2111 	map = __bpf_map_get(f);
2112 	if (IS_ERR(map))
2113 		return PTR_ERR(map);
2114 	bpf_map_write_active_inc(map);
2115 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
2116 	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
2117 		err = -EPERM;
2118 		goto err_put;
2119 	}
2120 
2121 	if (attr->flags &&
2122 	    (map->map_type == BPF_MAP_TYPE_QUEUE ||
2123 	     map->map_type == BPF_MAP_TYPE_STACK)) {
2124 		err = -EINVAL;
2125 		goto err_put;
2126 	}
2127 
2128 	if ((attr->flags & BPF_F_LOCK) &&
2129 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
2130 		err = -EINVAL;
2131 		goto err_put;
2132 	}
2133 
2134 	key = __bpf_copy_key(ukey, map->key_size);
2135 	if (IS_ERR(key)) {
2136 		err = PTR_ERR(key);
2137 		goto err_put;
2138 	}
2139 
2140 	value_size = bpf_map_value_size(map);
2141 
2142 	err = -ENOMEM;
2143 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
2144 	if (!value)
2145 		goto free_key;
2146 
2147 	err = -ENOTSUPP;
2148 	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
2149 	    map->map_type == BPF_MAP_TYPE_STACK) {
2150 		err = map->ops->map_pop_elem(map, value);
2151 	} else if (map->map_type == BPF_MAP_TYPE_HASH ||
2152 		   map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2153 		   map->map_type == BPF_MAP_TYPE_LRU_HASH ||
2154 		   map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2155 		if (!bpf_map_is_offloaded(map)) {
2156 			bpf_disable_instrumentation();
2157 			rcu_read_lock();
2158 			err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
2159 			rcu_read_unlock();
2160 			bpf_enable_instrumentation();
2161 		}
2162 	}
2163 
2164 	if (err)
2165 		goto free_value;
2166 
2167 	if (copy_to_user(uvalue, value, value_size) != 0) {
2168 		err = -EFAULT;
2169 		goto free_value;
2170 	}
2171 
2172 	err = 0;
2173 
2174 free_value:
2175 	kvfree(value);
2176 free_key:
2177 	kvfree(key);
2178 err_put:
2179 	bpf_map_write_active_dec(map);
2180 	return err;
2181 }
2182 
2183 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
2184 
2185 static int map_freeze(const union bpf_attr *attr)
2186 {
2187 	int err = 0;
2188 	struct bpf_map *map;
2189 
2190 	if (CHECK_ATTR(BPF_MAP_FREEZE))
2191 		return -EINVAL;
2192 
2193 	CLASS(fd, f)(attr->map_fd);
2194 	map = __bpf_map_get(f);
2195 	if (IS_ERR(map))
2196 		return PTR_ERR(map);
2197 
2198 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record))
2199 		return -ENOTSUPP;
2200 
2201 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE))
2202 		return -EPERM;
2203 
2204 	mutex_lock(&map->freeze_mutex);
2205 	if (bpf_map_write_active(map)) {
2206 		err = -EBUSY;
2207 		goto err_put;
2208 	}
2209 	if (READ_ONCE(map->frozen)) {
2210 		err = -EBUSY;
2211 		goto err_put;
2212 	}
2213 
2214 	WRITE_ONCE(map->frozen, true);
2215 err_put:
2216 	mutex_unlock(&map->freeze_mutex);
2217 	return err;
2218 }
2219 
2220 static const struct bpf_prog_ops * const bpf_prog_types[] = {
2221 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2222 	[_id] = & _name ## _prog_ops,
2223 #define BPF_MAP_TYPE(_id, _ops)
2224 #define BPF_LINK_TYPE(_id, _name)
2225 #include <linux/bpf_types.h>
2226 #undef BPF_PROG_TYPE
2227 #undef BPF_MAP_TYPE
2228 #undef BPF_LINK_TYPE
2229 };
2230 
2231 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
2232 {
2233 	const struct bpf_prog_ops *ops;
2234 
2235 	if (type >= ARRAY_SIZE(bpf_prog_types))
2236 		return -EINVAL;
2237 	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
2238 	ops = bpf_prog_types[type];
2239 	if (!ops)
2240 		return -EINVAL;
2241 
2242 	if (!bpf_prog_is_offloaded(prog->aux))
2243 		prog->aux->ops = ops;
2244 	else
2245 		prog->aux->ops = &bpf_offload_prog_ops;
2246 	prog->type = type;
2247 	return 0;
2248 }
2249 
2250 enum bpf_audit {
2251 	BPF_AUDIT_LOAD,
2252 	BPF_AUDIT_UNLOAD,
2253 	BPF_AUDIT_MAX,
2254 };
2255 
2256 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
2257 	[BPF_AUDIT_LOAD]   = "LOAD",
2258 	[BPF_AUDIT_UNLOAD] = "UNLOAD",
2259 };
2260 
2261 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
2262 {
2263 	struct audit_context *ctx = NULL;
2264 	struct audit_buffer *ab;
2265 
2266 	if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
2267 		return;
2268 	if (audit_enabled == AUDIT_OFF)
2269 		return;
2270 	if (!in_irq() && !irqs_disabled())
2271 		ctx = audit_context();
2272 	ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
2273 	if (unlikely(!ab))
2274 		return;
2275 	audit_log_format(ab, "prog-id=%u op=%s",
2276 			 prog->aux->id, bpf_audit_str[op]);
2277 	audit_log_end(ab);
2278 }
2279 
2280 static int bpf_prog_alloc_id(struct bpf_prog *prog)
2281 {
2282 	int id;
2283 
2284 	idr_preload(GFP_KERNEL);
2285 	spin_lock_bh(&prog_idr_lock);
2286 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
2287 	if (id > 0)
2288 		prog->aux->id = id;
2289 	spin_unlock_bh(&prog_idr_lock);
2290 	idr_preload_end();
2291 
2292 	/* id is in [1, INT_MAX) */
2293 	if (WARN_ON_ONCE(!id))
2294 		return -ENOSPC;
2295 
2296 	return id > 0 ? 0 : id;
2297 }
2298 
2299 void bpf_prog_free_id(struct bpf_prog *prog)
2300 {
2301 	unsigned long flags;
2302 
2303 	/* cBPF to eBPF migrations are currently not in the idr store.
2304 	 * Offloaded programs are removed from the store when their device
2305 	 * disappears - even if someone grabs an fd to them they are unusable,
2306 	 * simply waiting for refcnt to drop to be freed.
2307 	 */
2308 	if (!prog->aux->id)
2309 		return;
2310 
2311 	spin_lock_irqsave(&prog_idr_lock, flags);
2312 	idr_remove(&prog_idr, prog->aux->id);
2313 	prog->aux->id = 0;
2314 	spin_unlock_irqrestore(&prog_idr_lock, flags);
2315 }
2316 
2317 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
2318 {
2319 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
2320 
2321 	kvfree(aux->func_info);
2322 	kfree(aux->func_info_aux);
2323 	free_uid(aux->user);
2324 	security_bpf_prog_free(aux->prog);
2325 	bpf_prog_free(aux->prog);
2326 }
2327 
2328 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
2329 {
2330 	bpf_prog_kallsyms_del_all(prog);
2331 	btf_put(prog->aux->btf);
2332 	module_put(prog->aux->mod);
2333 	kvfree(prog->aux->jited_linfo);
2334 	kvfree(prog->aux->linfo);
2335 	kfree(prog->aux->kfunc_tab);
2336 	kfree(prog->aux->ctx_arg_info);
2337 	if (prog->aux->attach_btf)
2338 		btf_put(prog->aux->attach_btf);
2339 
2340 	if (deferred) {
2341 		if (prog->sleepable)
2342 			call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
2343 		else
2344 			call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
2345 	} else {
2346 		__bpf_prog_put_rcu(&prog->aux->rcu);
2347 	}
2348 }
2349 
2350 static void bpf_prog_put_deferred(struct work_struct *work)
2351 {
2352 	struct bpf_prog_aux *aux;
2353 	struct bpf_prog *prog;
2354 
2355 	aux = container_of(work, struct bpf_prog_aux, work);
2356 	prog = aux->prog;
2357 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
2358 	bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
2359 	bpf_prog_free_id(prog);
2360 	__bpf_prog_put_noref(prog, true);
2361 }
2362 
2363 static void __bpf_prog_put(struct bpf_prog *prog)
2364 {
2365 	struct bpf_prog_aux *aux = prog->aux;
2366 
2367 	if (atomic64_dec_and_test(&aux->refcnt)) {
2368 		if (in_irq() || irqs_disabled()) {
2369 			INIT_WORK(&aux->work, bpf_prog_put_deferred);
2370 			schedule_work(&aux->work);
2371 		} else {
2372 			bpf_prog_put_deferred(&aux->work);
2373 		}
2374 	}
2375 }
2376 
2377 void bpf_prog_put(struct bpf_prog *prog)
2378 {
2379 	__bpf_prog_put(prog);
2380 }
2381 EXPORT_SYMBOL_GPL(bpf_prog_put);
2382 
2383 static int bpf_prog_release(struct inode *inode, struct file *filp)
2384 {
2385 	struct bpf_prog *prog = filp->private_data;
2386 
2387 	bpf_prog_put(prog);
2388 	return 0;
2389 }
2390 
2391 struct bpf_prog_kstats {
2392 	u64 nsecs;
2393 	u64 cnt;
2394 	u64 misses;
2395 };
2396 
2397 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2398 {
2399 	struct bpf_prog_stats *stats;
2400 	unsigned int flags;
2401 
2402 	stats = this_cpu_ptr(prog->stats);
2403 	flags = u64_stats_update_begin_irqsave(&stats->syncp);
2404 	u64_stats_inc(&stats->misses);
2405 	u64_stats_update_end_irqrestore(&stats->syncp, flags);
2406 }
2407 
2408 static void bpf_prog_get_stats(const struct bpf_prog *prog,
2409 			       struct bpf_prog_kstats *stats)
2410 {
2411 	u64 nsecs = 0, cnt = 0, misses = 0;
2412 	int cpu;
2413 
2414 	for_each_possible_cpu(cpu) {
2415 		const struct bpf_prog_stats *st;
2416 		unsigned int start;
2417 		u64 tnsecs, tcnt, tmisses;
2418 
2419 		st = per_cpu_ptr(prog->stats, cpu);
2420 		do {
2421 			start = u64_stats_fetch_begin(&st->syncp);
2422 			tnsecs = u64_stats_read(&st->nsecs);
2423 			tcnt = u64_stats_read(&st->cnt);
2424 			tmisses = u64_stats_read(&st->misses);
2425 		} while (u64_stats_fetch_retry(&st->syncp, start));
2426 		nsecs += tnsecs;
2427 		cnt += tcnt;
2428 		misses += tmisses;
2429 	}
2430 	stats->nsecs = nsecs;
2431 	stats->cnt = cnt;
2432 	stats->misses = misses;
2433 }
2434 
2435 #ifdef CONFIG_PROC_FS
2436 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
2437 {
2438 	const struct bpf_prog *prog = filp->private_data;
2439 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2440 	struct bpf_prog_kstats stats;
2441 
2442 	bpf_prog_get_stats(prog, &stats);
2443 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2444 	seq_printf(m,
2445 		   "prog_type:\t%u\n"
2446 		   "prog_jited:\t%u\n"
2447 		   "prog_tag:\t%s\n"
2448 		   "memlock:\t%llu\n"
2449 		   "prog_id:\t%u\n"
2450 		   "run_time_ns:\t%llu\n"
2451 		   "run_cnt:\t%llu\n"
2452 		   "recursion_misses:\t%llu\n"
2453 		   "verified_insns:\t%u\n",
2454 		   prog->type,
2455 		   prog->jited,
2456 		   prog_tag,
2457 		   prog->pages * 1ULL << PAGE_SHIFT,
2458 		   prog->aux->id,
2459 		   stats.nsecs,
2460 		   stats.cnt,
2461 		   stats.misses,
2462 		   prog->aux->verified_insns);
2463 }
2464 #endif
2465 
2466 const struct file_operations bpf_prog_fops = {
2467 #ifdef CONFIG_PROC_FS
2468 	.show_fdinfo	= bpf_prog_show_fdinfo,
2469 #endif
2470 	.release	= bpf_prog_release,
2471 	.read		= bpf_dummy_read,
2472 	.write		= bpf_dummy_write,
2473 };
2474 
2475 int bpf_prog_new_fd(struct bpf_prog *prog)
2476 {
2477 	int ret;
2478 
2479 	ret = security_bpf_prog(prog);
2480 	if (ret < 0)
2481 		return ret;
2482 
2483 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
2484 				O_RDWR | O_CLOEXEC);
2485 }
2486 
2487 void bpf_prog_add(struct bpf_prog *prog, int i)
2488 {
2489 	atomic64_add(i, &prog->aux->refcnt);
2490 }
2491 EXPORT_SYMBOL_GPL(bpf_prog_add);
2492 
2493 void bpf_prog_sub(struct bpf_prog *prog, int i)
2494 {
2495 	/* Only to be used for undoing previous bpf_prog_add() in some
2496 	 * error path. We still know that another entity in our call
2497 	 * path holds a reference to the program, thus atomic_sub() can
2498 	 * be safely used in such cases!
2499 	 */
2500 	WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
2501 }
2502 EXPORT_SYMBOL_GPL(bpf_prog_sub);
2503 
2504 void bpf_prog_inc(struct bpf_prog *prog)
2505 {
2506 	atomic64_inc(&prog->aux->refcnt);
2507 }
2508 EXPORT_SYMBOL_GPL(bpf_prog_inc);
2509 
2510 /* prog_idr_lock should have been held */
2511 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
2512 {
2513 	int refold;
2514 
2515 	refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
2516 
2517 	if (!refold)
2518 		return ERR_PTR(-ENOENT);
2519 
2520 	return prog;
2521 }
2522 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
2523 
2524 bool bpf_prog_get_ok(struct bpf_prog *prog,
2525 			    enum bpf_prog_type *attach_type, bool attach_drv)
2526 {
2527 	/* not an attachment, just a refcount inc, always allow */
2528 	if (!attach_type)
2529 		return true;
2530 
2531 	if (prog->type != *attach_type)
2532 		return false;
2533 	if (bpf_prog_is_offloaded(prog->aux) && !attach_drv)
2534 		return false;
2535 
2536 	return true;
2537 }
2538 
2539 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
2540 				       bool attach_drv)
2541 {
2542 	CLASS(fd, f)(ufd);
2543 	struct bpf_prog *prog;
2544 
2545 	if (fd_empty(f))
2546 		return ERR_PTR(-EBADF);
2547 	if (fd_file(f)->f_op != &bpf_prog_fops)
2548 		return ERR_PTR(-EINVAL);
2549 
2550 	prog = fd_file(f)->private_data;
2551 	if (!bpf_prog_get_ok(prog, attach_type, attach_drv))
2552 		return ERR_PTR(-EINVAL);
2553 
2554 	bpf_prog_inc(prog);
2555 	return prog;
2556 }
2557 
2558 struct bpf_prog *bpf_prog_get(u32 ufd)
2559 {
2560 	return __bpf_prog_get(ufd, NULL, false);
2561 }
2562 
2563 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2564 				       bool attach_drv)
2565 {
2566 	return __bpf_prog_get(ufd, &type, attach_drv);
2567 }
2568 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
2569 
2570 /* Initially all BPF programs could be loaded w/o specifying
2571  * expected_attach_type. Later for some of them specifying expected_attach_type
2572  * at load time became required so that program could be validated properly.
2573  * Programs of types that are allowed to be loaded both w/ and w/o (for
2574  * backward compatibility) expected_attach_type, should have the default attach
2575  * type assigned to expected_attach_type for the latter case, so that it can be
2576  * validated later at attach time.
2577  *
2578  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
2579  * prog type requires it but has some attach types that have to be backward
2580  * compatible.
2581  */
2582 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
2583 {
2584 	switch (attr->prog_type) {
2585 	case BPF_PROG_TYPE_CGROUP_SOCK:
2586 		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
2587 		 * exist so checking for non-zero is the way to go here.
2588 		 */
2589 		if (!attr->expected_attach_type)
2590 			attr->expected_attach_type =
2591 				BPF_CGROUP_INET_SOCK_CREATE;
2592 		break;
2593 	case BPF_PROG_TYPE_SK_REUSEPORT:
2594 		if (!attr->expected_attach_type)
2595 			attr->expected_attach_type =
2596 				BPF_SK_REUSEPORT_SELECT;
2597 		break;
2598 	}
2599 }
2600 
2601 static int
2602 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2603 			   enum bpf_attach_type expected_attach_type,
2604 			   struct btf *attach_btf, u32 btf_id,
2605 			   struct bpf_prog *dst_prog)
2606 {
2607 	if (btf_id) {
2608 		if (btf_id > BTF_MAX_TYPE)
2609 			return -EINVAL;
2610 
2611 		if (!attach_btf && !dst_prog)
2612 			return -EINVAL;
2613 
2614 		switch (prog_type) {
2615 		case BPF_PROG_TYPE_TRACING:
2616 		case BPF_PROG_TYPE_LSM:
2617 		case BPF_PROG_TYPE_STRUCT_OPS:
2618 		case BPF_PROG_TYPE_EXT:
2619 			break;
2620 		default:
2621 			return -EINVAL;
2622 		}
2623 	}
2624 
2625 	if (attach_btf && (!btf_id || dst_prog))
2626 		return -EINVAL;
2627 
2628 	if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2629 	    prog_type != BPF_PROG_TYPE_EXT)
2630 		return -EINVAL;
2631 
2632 	switch (prog_type) {
2633 	case BPF_PROG_TYPE_CGROUP_SOCK:
2634 		switch (expected_attach_type) {
2635 		case BPF_CGROUP_INET_SOCK_CREATE:
2636 		case BPF_CGROUP_INET_SOCK_RELEASE:
2637 		case BPF_CGROUP_INET4_POST_BIND:
2638 		case BPF_CGROUP_INET6_POST_BIND:
2639 			return 0;
2640 		default:
2641 			return -EINVAL;
2642 		}
2643 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2644 		switch (expected_attach_type) {
2645 		case BPF_CGROUP_INET4_BIND:
2646 		case BPF_CGROUP_INET6_BIND:
2647 		case BPF_CGROUP_INET4_CONNECT:
2648 		case BPF_CGROUP_INET6_CONNECT:
2649 		case BPF_CGROUP_UNIX_CONNECT:
2650 		case BPF_CGROUP_INET4_GETPEERNAME:
2651 		case BPF_CGROUP_INET6_GETPEERNAME:
2652 		case BPF_CGROUP_UNIX_GETPEERNAME:
2653 		case BPF_CGROUP_INET4_GETSOCKNAME:
2654 		case BPF_CGROUP_INET6_GETSOCKNAME:
2655 		case BPF_CGROUP_UNIX_GETSOCKNAME:
2656 		case BPF_CGROUP_UDP4_SENDMSG:
2657 		case BPF_CGROUP_UDP6_SENDMSG:
2658 		case BPF_CGROUP_UNIX_SENDMSG:
2659 		case BPF_CGROUP_UDP4_RECVMSG:
2660 		case BPF_CGROUP_UDP6_RECVMSG:
2661 		case BPF_CGROUP_UNIX_RECVMSG:
2662 			return 0;
2663 		default:
2664 			return -EINVAL;
2665 		}
2666 	case BPF_PROG_TYPE_CGROUP_SKB:
2667 		switch (expected_attach_type) {
2668 		case BPF_CGROUP_INET_INGRESS:
2669 		case BPF_CGROUP_INET_EGRESS:
2670 			return 0;
2671 		default:
2672 			return -EINVAL;
2673 		}
2674 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2675 		switch (expected_attach_type) {
2676 		case BPF_CGROUP_SETSOCKOPT:
2677 		case BPF_CGROUP_GETSOCKOPT:
2678 			return 0;
2679 		default:
2680 			return -EINVAL;
2681 		}
2682 	case BPF_PROG_TYPE_SK_LOOKUP:
2683 		if (expected_attach_type == BPF_SK_LOOKUP)
2684 			return 0;
2685 		return -EINVAL;
2686 	case BPF_PROG_TYPE_SK_REUSEPORT:
2687 		switch (expected_attach_type) {
2688 		case BPF_SK_REUSEPORT_SELECT:
2689 		case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2690 			return 0;
2691 		default:
2692 			return -EINVAL;
2693 		}
2694 	case BPF_PROG_TYPE_NETFILTER:
2695 		if (expected_attach_type == BPF_NETFILTER)
2696 			return 0;
2697 		return -EINVAL;
2698 	case BPF_PROG_TYPE_SYSCALL:
2699 	case BPF_PROG_TYPE_EXT:
2700 		if (expected_attach_type)
2701 			return -EINVAL;
2702 		fallthrough;
2703 	default:
2704 		return 0;
2705 	}
2706 }
2707 
2708 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2709 {
2710 	switch (prog_type) {
2711 	case BPF_PROG_TYPE_SCHED_CLS:
2712 	case BPF_PROG_TYPE_SCHED_ACT:
2713 	case BPF_PROG_TYPE_XDP:
2714 	case BPF_PROG_TYPE_LWT_IN:
2715 	case BPF_PROG_TYPE_LWT_OUT:
2716 	case BPF_PROG_TYPE_LWT_XMIT:
2717 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2718 	case BPF_PROG_TYPE_SK_SKB:
2719 	case BPF_PROG_TYPE_SK_MSG:
2720 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2721 	case BPF_PROG_TYPE_CGROUP_DEVICE:
2722 	case BPF_PROG_TYPE_CGROUP_SOCK:
2723 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2724 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2725 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2726 	case BPF_PROG_TYPE_SOCK_OPS:
2727 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2728 	case BPF_PROG_TYPE_NETFILTER:
2729 		return true;
2730 	case BPF_PROG_TYPE_CGROUP_SKB:
2731 		/* always unpriv */
2732 	case BPF_PROG_TYPE_SK_REUSEPORT:
2733 		/* equivalent to SOCKET_FILTER. need CAP_BPF only */
2734 	default:
2735 		return false;
2736 	}
2737 }
2738 
2739 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2740 {
2741 	switch (prog_type) {
2742 	case BPF_PROG_TYPE_KPROBE:
2743 	case BPF_PROG_TYPE_TRACEPOINT:
2744 	case BPF_PROG_TYPE_PERF_EVENT:
2745 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2746 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2747 	case BPF_PROG_TYPE_TRACING:
2748 	case BPF_PROG_TYPE_LSM:
2749 	case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2750 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2751 		return true;
2752 	default:
2753 		return false;
2754 	}
2755 }
2756 
2757 /* last field in 'union bpf_attr' used by this command */
2758 #define BPF_PROG_LOAD_LAST_FIELD fd_array_cnt
2759 
2760 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
2761 {
2762 	enum bpf_prog_type type = attr->prog_type;
2763 	struct bpf_prog *prog, *dst_prog = NULL;
2764 	struct btf *attach_btf = NULL;
2765 	struct bpf_token *token = NULL;
2766 	bool bpf_cap;
2767 	int err;
2768 	char license[128];
2769 
2770 	if (CHECK_ATTR(BPF_PROG_LOAD))
2771 		return -EINVAL;
2772 
2773 	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2774 				 BPF_F_ANY_ALIGNMENT |
2775 				 BPF_F_TEST_STATE_FREQ |
2776 				 BPF_F_SLEEPABLE |
2777 				 BPF_F_TEST_RND_HI32 |
2778 				 BPF_F_XDP_HAS_FRAGS |
2779 				 BPF_F_XDP_DEV_BOUND_ONLY |
2780 				 BPF_F_TEST_REG_INVARIANTS |
2781 				 BPF_F_TOKEN_FD))
2782 		return -EINVAL;
2783 
2784 	bpf_prog_load_fixup_attach_type(attr);
2785 
2786 	if (attr->prog_flags & BPF_F_TOKEN_FD) {
2787 		token = bpf_token_get_from_fd(attr->prog_token_fd);
2788 		if (IS_ERR(token))
2789 			return PTR_ERR(token);
2790 		/* if current token doesn't grant prog loading permissions,
2791 		 * then we can't use this token, so ignore it and rely on
2792 		 * system-wide capabilities checks
2793 		 */
2794 		if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) ||
2795 		    !bpf_token_allow_prog_type(token, attr->prog_type,
2796 					       attr->expected_attach_type)) {
2797 			bpf_token_put(token);
2798 			token = NULL;
2799 		}
2800 	}
2801 
2802 	bpf_cap = bpf_token_capable(token, CAP_BPF);
2803 	err = -EPERM;
2804 
2805 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2806 	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2807 	    !bpf_cap)
2808 		goto put_token;
2809 
2810 	/* Intent here is for unprivileged_bpf_disabled to block BPF program
2811 	 * creation for unprivileged users; other actions depend
2812 	 * on fd availability and access to bpffs, so are dependent on
2813 	 * object creation success. Even with unprivileged BPF disabled,
2814 	 * capability checks are still carried out for these
2815 	 * and other operations.
2816 	 */
2817 	if (sysctl_unprivileged_bpf_disabled && !bpf_cap)
2818 		goto put_token;
2819 
2820 	if (attr->insn_cnt == 0 ||
2821 	    attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) {
2822 		err = -E2BIG;
2823 		goto put_token;
2824 	}
2825 	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2826 	    type != BPF_PROG_TYPE_CGROUP_SKB &&
2827 	    !bpf_cap)
2828 		goto put_token;
2829 
2830 	if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN))
2831 		goto put_token;
2832 	if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON))
2833 		goto put_token;
2834 
2835 	/* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2836 	 * or btf, we need to check which one it is
2837 	 */
2838 	if (attr->attach_prog_fd) {
2839 		dst_prog = bpf_prog_get(attr->attach_prog_fd);
2840 		if (IS_ERR(dst_prog)) {
2841 			dst_prog = NULL;
2842 			attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2843 			if (IS_ERR(attach_btf)) {
2844 				err = -EINVAL;
2845 				goto put_token;
2846 			}
2847 			if (!btf_is_kernel(attach_btf)) {
2848 				/* attaching through specifying bpf_prog's BTF
2849 				 * objects directly might be supported eventually
2850 				 */
2851 				btf_put(attach_btf);
2852 				err = -ENOTSUPP;
2853 				goto put_token;
2854 			}
2855 		}
2856 	} else if (attr->attach_btf_id) {
2857 		/* fall back to vmlinux BTF, if BTF type ID is specified */
2858 		attach_btf = bpf_get_btf_vmlinux();
2859 		if (IS_ERR(attach_btf)) {
2860 			err = PTR_ERR(attach_btf);
2861 			goto put_token;
2862 		}
2863 		if (!attach_btf) {
2864 			err = -EINVAL;
2865 			goto put_token;
2866 		}
2867 		btf_get(attach_btf);
2868 	}
2869 
2870 	if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2871 				       attach_btf, attr->attach_btf_id,
2872 				       dst_prog)) {
2873 		if (dst_prog)
2874 			bpf_prog_put(dst_prog);
2875 		if (attach_btf)
2876 			btf_put(attach_btf);
2877 		err = -EINVAL;
2878 		goto put_token;
2879 	}
2880 
2881 	/* plain bpf_prog allocation */
2882 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2883 	if (!prog) {
2884 		if (dst_prog)
2885 			bpf_prog_put(dst_prog);
2886 		if (attach_btf)
2887 			btf_put(attach_btf);
2888 		err = -EINVAL;
2889 		goto put_token;
2890 	}
2891 
2892 	prog->expected_attach_type = attr->expected_attach_type;
2893 	prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE);
2894 	prog->aux->attach_btf = attach_btf;
2895 	prog->aux->attach_btf_id = attr->attach_btf_id;
2896 	prog->aux->dst_prog = dst_prog;
2897 	prog->aux->dev_bound = !!attr->prog_ifindex;
2898 	prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
2899 
2900 	/* move token into prog->aux, reuse taken refcnt */
2901 	prog->aux->token = token;
2902 	token = NULL;
2903 
2904 	prog->aux->user = get_current_user();
2905 	prog->len = attr->insn_cnt;
2906 
2907 	err = -EFAULT;
2908 	if (copy_from_bpfptr(prog->insns,
2909 			     make_bpfptr(attr->insns, uattr.is_kernel),
2910 			     bpf_prog_insn_size(prog)) != 0)
2911 		goto free_prog;
2912 	/* copy eBPF program license from user space */
2913 	if (strncpy_from_bpfptr(license,
2914 				make_bpfptr(attr->license, uattr.is_kernel),
2915 				sizeof(license) - 1) < 0)
2916 		goto free_prog;
2917 	license[sizeof(license) - 1] = 0;
2918 
2919 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2920 	prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0;
2921 
2922 	prog->orig_prog = NULL;
2923 	prog->jited = 0;
2924 
2925 	atomic64_set(&prog->aux->refcnt, 1);
2926 
2927 	if (bpf_prog_is_dev_bound(prog->aux)) {
2928 		err = bpf_prog_dev_bound_init(prog, attr);
2929 		if (err)
2930 			goto free_prog;
2931 	}
2932 
2933 	if (type == BPF_PROG_TYPE_EXT && dst_prog &&
2934 	    bpf_prog_is_dev_bound(dst_prog->aux)) {
2935 		err = bpf_prog_dev_bound_inherit(prog, dst_prog);
2936 		if (err)
2937 			goto free_prog;
2938 	}
2939 
2940 	/*
2941 	 * Bookkeeping for managing the program attachment chain.
2942 	 *
2943 	 * It might be tempting to set attach_tracing_prog flag at the attachment
2944 	 * time, but this will not prevent from loading bunch of tracing prog
2945 	 * first, then attach them one to another.
2946 	 *
2947 	 * The flag attach_tracing_prog is set for the whole program lifecycle, and
2948 	 * doesn't have to be cleared in bpf_tracing_link_release, since tracing
2949 	 * programs cannot change attachment target.
2950 	 */
2951 	if (type == BPF_PROG_TYPE_TRACING && dst_prog &&
2952 	    dst_prog->type == BPF_PROG_TYPE_TRACING) {
2953 		prog->aux->attach_tracing_prog = true;
2954 	}
2955 
2956 	/* find program type: socket_filter vs tracing_filter */
2957 	err = find_prog_type(type, prog);
2958 	if (err < 0)
2959 		goto free_prog;
2960 
2961 	prog->aux->load_time = ktime_get_boottime_ns();
2962 	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2963 			       sizeof(attr->prog_name));
2964 	if (err < 0)
2965 		goto free_prog;
2966 
2967 	err = security_bpf_prog_load(prog, attr, token, uattr.is_kernel);
2968 	if (err)
2969 		goto free_prog_sec;
2970 
2971 	/* run eBPF verifier */
2972 	err = bpf_check(&prog, attr, uattr, uattr_size);
2973 	if (err < 0)
2974 		goto free_used_maps;
2975 
2976 	prog = bpf_prog_select_runtime(prog, &err);
2977 	if (err < 0)
2978 		goto free_used_maps;
2979 
2980 	err = bpf_prog_alloc_id(prog);
2981 	if (err)
2982 		goto free_used_maps;
2983 
2984 	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
2985 	 * effectively publicly exposed. However, retrieving via
2986 	 * bpf_prog_get_fd_by_id() will take another reference,
2987 	 * therefore it cannot be gone underneath us.
2988 	 *
2989 	 * Only for the time /after/ successful bpf_prog_new_fd()
2990 	 * and before returning to userspace, we might just hold
2991 	 * one reference and any parallel close on that fd could
2992 	 * rip everything out. Hence, below notifications must
2993 	 * happen before bpf_prog_new_fd().
2994 	 *
2995 	 * Also, any failure handling from this point onwards must
2996 	 * be using bpf_prog_put() given the program is exposed.
2997 	 */
2998 	bpf_prog_kallsyms_add(prog);
2999 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
3000 	bpf_audit_prog(prog, BPF_AUDIT_LOAD);
3001 
3002 	err = bpf_prog_new_fd(prog);
3003 	if (err < 0)
3004 		bpf_prog_put(prog);
3005 	return err;
3006 
3007 free_used_maps:
3008 	/* In case we have subprogs, we need to wait for a grace
3009 	 * period before we can tear down JIT memory since symbols
3010 	 * are already exposed under kallsyms.
3011 	 */
3012 	__bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
3013 	return err;
3014 
3015 free_prog_sec:
3016 	security_bpf_prog_free(prog);
3017 free_prog:
3018 	free_uid(prog->aux->user);
3019 	if (prog->aux->attach_btf)
3020 		btf_put(prog->aux->attach_btf);
3021 	bpf_prog_free(prog);
3022 put_token:
3023 	bpf_token_put(token);
3024 	return err;
3025 }
3026 
3027 #define BPF_OBJ_LAST_FIELD path_fd
3028 
3029 static int bpf_obj_pin(const union bpf_attr *attr)
3030 {
3031 	int path_fd;
3032 
3033 	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD)
3034 		return -EINVAL;
3035 
3036 	/* path_fd has to be accompanied by BPF_F_PATH_FD flag */
3037 	if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
3038 		return -EINVAL;
3039 
3040 	path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
3041 	return bpf_obj_pin_user(attr->bpf_fd, path_fd,
3042 				u64_to_user_ptr(attr->pathname));
3043 }
3044 
3045 static int bpf_obj_get(const union bpf_attr *attr)
3046 {
3047 	int path_fd;
3048 
3049 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
3050 	    attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD))
3051 		return -EINVAL;
3052 
3053 	/* path_fd has to be accompanied by BPF_F_PATH_FD flag */
3054 	if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
3055 		return -EINVAL;
3056 
3057 	path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
3058 	return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname),
3059 				attr->file_flags);
3060 }
3061 
3062 /* bpf_link_init_sleepable() allows to specify whether BPF link itself has
3063  * "sleepable" semantics, which normally would mean that BPF link's attach
3064  * hook can dereference link or link's underlying program for some time after
3065  * detachment due to RCU Tasks Trace-based lifetime protection scheme.
3066  * BPF program itself can be non-sleepable, yet, because it's transitively
3067  * reachable through BPF link, its freeing has to be delayed until after RCU
3068  * Tasks Trace GP.
3069  */
3070 void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
3071 			     const struct bpf_link_ops *ops, struct bpf_prog *prog,
3072 			     bool sleepable)
3073 {
3074 	WARN_ON(ops->dealloc && ops->dealloc_deferred);
3075 	atomic64_set(&link->refcnt, 1);
3076 	link->type = type;
3077 	link->sleepable = sleepable;
3078 	link->id = 0;
3079 	link->ops = ops;
3080 	link->prog = prog;
3081 }
3082 
3083 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
3084 		   const struct bpf_link_ops *ops, struct bpf_prog *prog)
3085 {
3086 	bpf_link_init_sleepable(link, type, ops, prog, false);
3087 }
3088 
3089 static void bpf_link_free_id(int id)
3090 {
3091 	if (!id)
3092 		return;
3093 
3094 	spin_lock_bh(&link_idr_lock);
3095 	idr_remove(&link_idr, id);
3096 	spin_unlock_bh(&link_idr_lock);
3097 }
3098 
3099 /* Clean up bpf_link and corresponding anon_inode file and FD. After
3100  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
3101  * anon_inode's release() call. This helper marks bpf_link as
3102  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
3103  * is not decremented, it's the responsibility of a calling code that failed
3104  * to complete bpf_link initialization.
3105  * This helper eventually calls link's dealloc callback, but does not call
3106  * link's release callback.
3107  */
3108 void bpf_link_cleanup(struct bpf_link_primer *primer)
3109 {
3110 	primer->link->prog = NULL;
3111 	bpf_link_free_id(primer->id);
3112 	fput(primer->file);
3113 	put_unused_fd(primer->fd);
3114 }
3115 
3116 void bpf_link_inc(struct bpf_link *link)
3117 {
3118 	atomic64_inc(&link->refcnt);
3119 }
3120 
3121 static void bpf_link_dealloc(struct bpf_link *link)
3122 {
3123 	/* now that we know that bpf_link itself can't be reached, put underlying BPF program */
3124 	if (link->prog)
3125 		bpf_prog_put(link->prog);
3126 
3127 	/* free bpf_link and its containing memory */
3128 	if (link->ops->dealloc_deferred)
3129 		link->ops->dealloc_deferred(link);
3130 	else
3131 		link->ops->dealloc(link);
3132 }
3133 
3134 static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
3135 {
3136 	struct bpf_link *link = container_of(rcu, struct bpf_link, rcu);
3137 
3138 	bpf_link_dealloc(link);
3139 }
3140 
3141 static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
3142 {
3143 	if (rcu_trace_implies_rcu_gp())
3144 		bpf_link_defer_dealloc_rcu_gp(rcu);
3145 	else
3146 		call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp);
3147 }
3148 
3149 /* bpf_link_free is guaranteed to be called from process context */
3150 static void bpf_link_free(struct bpf_link *link)
3151 {
3152 	const struct bpf_link_ops *ops = link->ops;
3153 
3154 	bpf_link_free_id(link->id);
3155 	/* detach BPF program, clean up used resources */
3156 	if (link->prog)
3157 		ops->release(link);
3158 	if (ops->dealloc_deferred) {
3159 		/* Schedule BPF link deallocation, which will only then
3160 		 * trigger putting BPF program refcount.
3161 		 * If underlying BPF program is sleepable or BPF link's target
3162 		 * attach hookpoint is sleepable or otherwise requires RCU GPs
3163 		 * to ensure link and its underlying BPF program is not
3164 		 * reachable anymore, we need to first wait for RCU tasks
3165 		 * trace sync, and then go through "classic" RCU grace period
3166 		 */
3167 		if (link->sleepable || (link->prog && link->prog->sleepable))
3168 			call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
3169 		else
3170 			call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
3171 	} else if (ops->dealloc) {
3172 		bpf_link_dealloc(link);
3173 	}
3174 }
3175 
3176 static void bpf_link_put_deferred(struct work_struct *work)
3177 {
3178 	struct bpf_link *link = container_of(work, struct bpf_link, work);
3179 
3180 	bpf_link_free(link);
3181 }
3182 
3183 /* bpf_link_put might be called from atomic context. It needs to be called
3184  * from sleepable context in order to acquire sleeping locks during the process.
3185  */
3186 void bpf_link_put(struct bpf_link *link)
3187 {
3188 	if (!atomic64_dec_and_test(&link->refcnt))
3189 		return;
3190 
3191 	INIT_WORK(&link->work, bpf_link_put_deferred);
3192 	schedule_work(&link->work);
3193 }
3194 EXPORT_SYMBOL(bpf_link_put);
3195 
3196 static void bpf_link_put_direct(struct bpf_link *link)
3197 {
3198 	if (!atomic64_dec_and_test(&link->refcnt))
3199 		return;
3200 	bpf_link_free(link);
3201 }
3202 
3203 static int bpf_link_release(struct inode *inode, struct file *filp)
3204 {
3205 	struct bpf_link *link = filp->private_data;
3206 
3207 	bpf_link_put_direct(link);
3208 	return 0;
3209 }
3210 
3211 #ifdef CONFIG_PROC_FS
3212 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
3213 #define BPF_MAP_TYPE(_id, _ops)
3214 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
3215 static const char *bpf_link_type_strs[] = {
3216 	[BPF_LINK_TYPE_UNSPEC] = "<invalid>",
3217 #include <linux/bpf_types.h>
3218 };
3219 #undef BPF_PROG_TYPE
3220 #undef BPF_MAP_TYPE
3221 #undef BPF_LINK_TYPE
3222 
3223 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
3224 {
3225 	const struct bpf_link *link = filp->private_data;
3226 	const struct bpf_prog *prog = link->prog;
3227 	enum bpf_link_type type = link->type;
3228 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
3229 
3230 	if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) {
3231 		seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]);
3232 	} else {
3233 		WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type);
3234 		seq_printf(m, "link_type:\t<%u>\n", type);
3235 	}
3236 	seq_printf(m, "link_id:\t%u\n", link->id);
3237 
3238 	if (prog) {
3239 		bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
3240 		seq_printf(m,
3241 			   "prog_tag:\t%s\n"
3242 			   "prog_id:\t%u\n",
3243 			   prog_tag,
3244 			   prog->aux->id);
3245 	}
3246 	if (link->ops->show_fdinfo)
3247 		link->ops->show_fdinfo(link, m);
3248 }
3249 #endif
3250 
3251 static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts)
3252 {
3253 	struct bpf_link *link = file->private_data;
3254 
3255 	return link->ops->poll(file, pts);
3256 }
3257 
3258 static const struct file_operations bpf_link_fops = {
3259 #ifdef CONFIG_PROC_FS
3260 	.show_fdinfo	= bpf_link_show_fdinfo,
3261 #endif
3262 	.release	= bpf_link_release,
3263 	.read		= bpf_dummy_read,
3264 	.write		= bpf_dummy_write,
3265 };
3266 
3267 static const struct file_operations bpf_link_fops_poll = {
3268 #ifdef CONFIG_PROC_FS
3269 	.show_fdinfo	= bpf_link_show_fdinfo,
3270 #endif
3271 	.release	= bpf_link_release,
3272 	.read		= bpf_dummy_read,
3273 	.write		= bpf_dummy_write,
3274 	.poll		= bpf_link_poll,
3275 };
3276 
3277 static int bpf_link_alloc_id(struct bpf_link *link)
3278 {
3279 	int id;
3280 
3281 	idr_preload(GFP_KERNEL);
3282 	spin_lock_bh(&link_idr_lock);
3283 	id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
3284 	spin_unlock_bh(&link_idr_lock);
3285 	idr_preload_end();
3286 
3287 	return id;
3288 }
3289 
3290 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
3291  * reserving unused FD and allocating ID from link_idr. This is to be paired
3292  * with bpf_link_settle() to install FD and ID and expose bpf_link to
3293  * user-space, if bpf_link is successfully attached. If not, bpf_link and
3294  * pre-allocated resources are to be freed with bpf_cleanup() call. All the
3295  * transient state is passed around in struct bpf_link_primer.
3296  * This is preferred way to create and initialize bpf_link, especially when
3297  * there are complicated and expensive operations in between creating bpf_link
3298  * itself and attaching it to BPF hook. By using bpf_link_prime() and
3299  * bpf_link_settle() kernel code using bpf_link doesn't have to perform
3300  * expensive (and potentially failing) roll back operations in a rare case
3301  * that file, FD, or ID can't be allocated.
3302  */
3303 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
3304 {
3305 	struct file *file;
3306 	int fd, id;
3307 
3308 	fd = get_unused_fd_flags(O_CLOEXEC);
3309 	if (fd < 0)
3310 		return fd;
3311 
3312 
3313 	id = bpf_link_alloc_id(link);
3314 	if (id < 0) {
3315 		put_unused_fd(fd);
3316 		return id;
3317 	}
3318 
3319 	file = anon_inode_getfile("bpf_link",
3320 				  link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops,
3321 				  link, O_CLOEXEC);
3322 	if (IS_ERR(file)) {
3323 		bpf_link_free_id(id);
3324 		put_unused_fd(fd);
3325 		return PTR_ERR(file);
3326 	}
3327 
3328 	primer->link = link;
3329 	primer->file = file;
3330 	primer->fd = fd;
3331 	primer->id = id;
3332 	return 0;
3333 }
3334 
3335 int bpf_link_settle(struct bpf_link_primer *primer)
3336 {
3337 	/* make bpf_link fetchable by ID */
3338 	spin_lock_bh(&link_idr_lock);
3339 	primer->link->id = primer->id;
3340 	spin_unlock_bh(&link_idr_lock);
3341 	/* make bpf_link fetchable by FD */
3342 	fd_install(primer->fd, primer->file);
3343 	/* pass through installed FD */
3344 	return primer->fd;
3345 }
3346 
3347 int bpf_link_new_fd(struct bpf_link *link)
3348 {
3349 	return anon_inode_getfd("bpf-link",
3350 				link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops,
3351 				link, O_CLOEXEC);
3352 }
3353 
3354 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
3355 {
3356 	CLASS(fd, f)(ufd);
3357 	struct bpf_link *link;
3358 
3359 	if (fd_empty(f))
3360 		return ERR_PTR(-EBADF);
3361 	if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll)
3362 		return ERR_PTR(-EINVAL);
3363 
3364 	link = fd_file(f)->private_data;
3365 	bpf_link_inc(link);
3366 	return link;
3367 }
3368 EXPORT_SYMBOL_NS(bpf_link_get_from_fd, "BPF_INTERNAL");
3369 
3370 static void bpf_tracing_link_release(struct bpf_link *link)
3371 {
3372 	struct bpf_tracing_link *tr_link =
3373 		container_of(link, struct bpf_tracing_link, link.link);
3374 
3375 	WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
3376 						tr_link->trampoline,
3377 						tr_link->tgt_prog));
3378 
3379 	bpf_trampoline_put(tr_link->trampoline);
3380 
3381 	/* tgt_prog is NULL if target is a kernel function */
3382 	if (tr_link->tgt_prog)
3383 		bpf_prog_put(tr_link->tgt_prog);
3384 }
3385 
3386 static void bpf_tracing_link_dealloc(struct bpf_link *link)
3387 {
3388 	struct bpf_tracing_link *tr_link =
3389 		container_of(link, struct bpf_tracing_link, link.link);
3390 
3391 	kfree(tr_link);
3392 }
3393 
3394 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
3395 					 struct seq_file *seq)
3396 {
3397 	struct bpf_tracing_link *tr_link =
3398 		container_of(link, struct bpf_tracing_link, link.link);
3399 	u32 target_btf_id, target_obj_id;
3400 
3401 	bpf_trampoline_unpack_key(tr_link->trampoline->key,
3402 				  &target_obj_id, &target_btf_id);
3403 	seq_printf(seq,
3404 		   "attach_type:\t%d\n"
3405 		   "target_obj_id:\t%u\n"
3406 		   "target_btf_id:\t%u\n"
3407 		   "cookie:\t%llu\n",
3408 		   tr_link->attach_type,
3409 		   target_obj_id,
3410 		   target_btf_id,
3411 		   tr_link->link.cookie);
3412 }
3413 
3414 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
3415 					   struct bpf_link_info *info)
3416 {
3417 	struct bpf_tracing_link *tr_link =
3418 		container_of(link, struct bpf_tracing_link, link.link);
3419 
3420 	info->tracing.attach_type = tr_link->attach_type;
3421 	info->tracing.cookie = tr_link->link.cookie;
3422 	bpf_trampoline_unpack_key(tr_link->trampoline->key,
3423 				  &info->tracing.target_obj_id,
3424 				  &info->tracing.target_btf_id);
3425 
3426 	return 0;
3427 }
3428 
3429 static const struct bpf_link_ops bpf_tracing_link_lops = {
3430 	.release = bpf_tracing_link_release,
3431 	.dealloc = bpf_tracing_link_dealloc,
3432 	.show_fdinfo = bpf_tracing_link_show_fdinfo,
3433 	.fill_link_info = bpf_tracing_link_fill_link_info,
3434 };
3435 
3436 static int bpf_tracing_prog_attach(struct bpf_prog *prog,
3437 				   int tgt_prog_fd,
3438 				   u32 btf_id,
3439 				   u64 bpf_cookie)
3440 {
3441 	struct bpf_link_primer link_primer;
3442 	struct bpf_prog *tgt_prog = NULL;
3443 	struct bpf_trampoline *tr = NULL;
3444 	struct bpf_tracing_link *link;
3445 	u64 key = 0;
3446 	int err;
3447 
3448 	switch (prog->type) {
3449 	case BPF_PROG_TYPE_TRACING:
3450 		if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
3451 		    prog->expected_attach_type != BPF_TRACE_FEXIT &&
3452 		    prog->expected_attach_type != BPF_MODIFY_RETURN) {
3453 			err = -EINVAL;
3454 			goto out_put_prog;
3455 		}
3456 		break;
3457 	case BPF_PROG_TYPE_EXT:
3458 		if (prog->expected_attach_type != 0) {
3459 			err = -EINVAL;
3460 			goto out_put_prog;
3461 		}
3462 		break;
3463 	case BPF_PROG_TYPE_LSM:
3464 		if (prog->expected_attach_type != BPF_LSM_MAC) {
3465 			err = -EINVAL;
3466 			goto out_put_prog;
3467 		}
3468 		break;
3469 	default:
3470 		err = -EINVAL;
3471 		goto out_put_prog;
3472 	}
3473 
3474 	if (!!tgt_prog_fd != !!btf_id) {
3475 		err = -EINVAL;
3476 		goto out_put_prog;
3477 	}
3478 
3479 	if (tgt_prog_fd) {
3480 		/*
3481 		 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this
3482 		 * part would be changed to implement the same for
3483 		 * BPF_PROG_TYPE_TRACING, do not forget to update the way how
3484 		 * attach_tracing_prog flag is set.
3485 		 */
3486 		if (prog->type != BPF_PROG_TYPE_EXT) {
3487 			err = -EINVAL;
3488 			goto out_put_prog;
3489 		}
3490 
3491 		tgt_prog = bpf_prog_get(tgt_prog_fd);
3492 		if (IS_ERR(tgt_prog)) {
3493 			err = PTR_ERR(tgt_prog);
3494 			tgt_prog = NULL;
3495 			goto out_put_prog;
3496 		}
3497 
3498 		key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
3499 	}
3500 
3501 	link = kzalloc(sizeof(*link), GFP_USER);
3502 	if (!link) {
3503 		err = -ENOMEM;
3504 		goto out_put_prog;
3505 	}
3506 	bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING,
3507 		      &bpf_tracing_link_lops, prog);
3508 	link->attach_type = prog->expected_attach_type;
3509 	link->link.cookie = bpf_cookie;
3510 
3511 	mutex_lock(&prog->aux->dst_mutex);
3512 
3513 	/* There are a few possible cases here:
3514 	 *
3515 	 * - if prog->aux->dst_trampoline is set, the program was just loaded
3516 	 *   and not yet attached to anything, so we can use the values stored
3517 	 *   in prog->aux
3518 	 *
3519 	 * - if prog->aux->dst_trampoline is NULL, the program has already been
3520 	 *   attached to a target and its initial target was cleared (below)
3521 	 *
3522 	 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
3523 	 *   target_btf_id using the link_create API.
3524 	 *
3525 	 * - if tgt_prog == NULL when this function was called using the old
3526 	 *   raw_tracepoint_open API, and we need a target from prog->aux
3527 	 *
3528 	 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
3529 	 *   was detached and is going for re-attachment.
3530 	 *
3531 	 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
3532 	 *   are NULL, then program was already attached and user did not provide
3533 	 *   tgt_prog_fd so we have no way to find out or create trampoline
3534 	 */
3535 	if (!prog->aux->dst_trampoline && !tgt_prog) {
3536 		/*
3537 		 * Allow re-attach for TRACING and LSM programs. If it's
3538 		 * currently linked, bpf_trampoline_link_prog will fail.
3539 		 * EXT programs need to specify tgt_prog_fd, so they
3540 		 * re-attach in separate code path.
3541 		 */
3542 		if (prog->type != BPF_PROG_TYPE_TRACING &&
3543 		    prog->type != BPF_PROG_TYPE_LSM) {
3544 			err = -EINVAL;
3545 			goto out_unlock;
3546 		}
3547 		/* We can allow re-attach only if we have valid attach_btf. */
3548 		if (!prog->aux->attach_btf) {
3549 			err = -EINVAL;
3550 			goto out_unlock;
3551 		}
3552 		btf_id = prog->aux->attach_btf_id;
3553 		key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
3554 	}
3555 
3556 	if (!prog->aux->dst_trampoline ||
3557 	    (key && key != prog->aux->dst_trampoline->key)) {
3558 		/* If there is no saved target, or the specified target is
3559 		 * different from the destination specified at load time, we
3560 		 * need a new trampoline and a check for compatibility
3561 		 */
3562 		struct bpf_attach_target_info tgt_info = {};
3563 
3564 		err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
3565 					      &tgt_info);
3566 		if (err)
3567 			goto out_unlock;
3568 
3569 		if (tgt_info.tgt_mod) {
3570 			module_put(prog->aux->mod);
3571 			prog->aux->mod = tgt_info.tgt_mod;
3572 		}
3573 
3574 		tr = bpf_trampoline_get(key, &tgt_info);
3575 		if (!tr) {
3576 			err = -ENOMEM;
3577 			goto out_unlock;
3578 		}
3579 	} else {
3580 		/* The caller didn't specify a target, or the target was the
3581 		 * same as the destination supplied during program load. This
3582 		 * means we can reuse the trampoline and reference from program
3583 		 * load time, and there is no need to allocate a new one. This
3584 		 * can only happen once for any program, as the saved values in
3585 		 * prog->aux are cleared below.
3586 		 */
3587 		tr = prog->aux->dst_trampoline;
3588 		tgt_prog = prog->aux->dst_prog;
3589 	}
3590 
3591 	err = bpf_link_prime(&link->link.link, &link_primer);
3592 	if (err)
3593 		goto out_unlock;
3594 
3595 	err = bpf_trampoline_link_prog(&link->link, tr, tgt_prog);
3596 	if (err) {
3597 		bpf_link_cleanup(&link_primer);
3598 		link = NULL;
3599 		goto out_unlock;
3600 	}
3601 
3602 	link->tgt_prog = tgt_prog;
3603 	link->trampoline = tr;
3604 
3605 	/* Always clear the trampoline and target prog from prog->aux to make
3606 	 * sure the original attach destination is not kept alive after a
3607 	 * program is (re-)attached to another target.
3608 	 */
3609 	if (prog->aux->dst_prog &&
3610 	    (tgt_prog_fd || tr != prog->aux->dst_trampoline))
3611 		/* got extra prog ref from syscall, or attaching to different prog */
3612 		bpf_prog_put(prog->aux->dst_prog);
3613 	if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
3614 		/* we allocated a new trampoline, so free the old one */
3615 		bpf_trampoline_put(prog->aux->dst_trampoline);
3616 
3617 	prog->aux->dst_prog = NULL;
3618 	prog->aux->dst_trampoline = NULL;
3619 	mutex_unlock(&prog->aux->dst_mutex);
3620 
3621 	return bpf_link_settle(&link_primer);
3622 out_unlock:
3623 	if (tr && tr != prog->aux->dst_trampoline)
3624 		bpf_trampoline_put(tr);
3625 	mutex_unlock(&prog->aux->dst_mutex);
3626 	kfree(link);
3627 out_put_prog:
3628 	if (tgt_prog_fd && tgt_prog)
3629 		bpf_prog_put(tgt_prog);
3630 	return err;
3631 }
3632 
3633 static void bpf_raw_tp_link_release(struct bpf_link *link)
3634 {
3635 	struct bpf_raw_tp_link *raw_tp =
3636 		container_of(link, struct bpf_raw_tp_link, link);
3637 
3638 	bpf_probe_unregister(raw_tp->btp, raw_tp);
3639 	bpf_put_raw_tracepoint(raw_tp->btp);
3640 }
3641 
3642 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
3643 {
3644 	struct bpf_raw_tp_link *raw_tp =
3645 		container_of(link, struct bpf_raw_tp_link, link);
3646 
3647 	kfree(raw_tp);
3648 }
3649 
3650 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
3651 					struct seq_file *seq)
3652 {
3653 	struct bpf_raw_tp_link *raw_tp_link =
3654 		container_of(link, struct bpf_raw_tp_link, link);
3655 
3656 	seq_printf(seq,
3657 		   "tp_name:\t%s\n"
3658 		   "cookie:\t%llu\n",
3659 		   raw_tp_link->btp->tp->name,
3660 		   raw_tp_link->cookie);
3661 }
3662 
3663 static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen,
3664 			    u32 len)
3665 {
3666 	if (ulen >= len + 1) {
3667 		if (copy_to_user(ubuf, buf, len + 1))
3668 			return -EFAULT;
3669 	} else {
3670 		char zero = '\0';
3671 
3672 		if (copy_to_user(ubuf, buf, ulen - 1))
3673 			return -EFAULT;
3674 		if (put_user(zero, ubuf + ulen - 1))
3675 			return -EFAULT;
3676 		return -ENOSPC;
3677 	}
3678 
3679 	return 0;
3680 }
3681 
3682 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3683 					  struct bpf_link_info *info)
3684 {
3685 	struct bpf_raw_tp_link *raw_tp_link =
3686 		container_of(link, struct bpf_raw_tp_link, link);
3687 	char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3688 	const char *tp_name = raw_tp_link->btp->tp->name;
3689 	u32 ulen = info->raw_tracepoint.tp_name_len;
3690 	size_t tp_len = strlen(tp_name);
3691 
3692 	if (!ulen ^ !ubuf)
3693 		return -EINVAL;
3694 
3695 	info->raw_tracepoint.tp_name_len = tp_len + 1;
3696 	info->raw_tracepoint.cookie = raw_tp_link->cookie;
3697 
3698 	if (!ubuf)
3699 		return 0;
3700 
3701 	return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len);
3702 }
3703 
3704 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3705 	.release = bpf_raw_tp_link_release,
3706 	.dealloc_deferred = bpf_raw_tp_link_dealloc,
3707 	.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3708 	.fill_link_info = bpf_raw_tp_link_fill_link_info,
3709 };
3710 
3711 #ifdef CONFIG_PERF_EVENTS
3712 struct bpf_perf_link {
3713 	struct bpf_link link;
3714 	struct file *perf_file;
3715 };
3716 
3717 static void bpf_perf_link_release(struct bpf_link *link)
3718 {
3719 	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3720 	struct perf_event *event = perf_link->perf_file->private_data;
3721 
3722 	perf_event_free_bpf_prog(event);
3723 	fput(perf_link->perf_file);
3724 }
3725 
3726 static void bpf_perf_link_dealloc(struct bpf_link *link)
3727 {
3728 	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3729 
3730 	kfree(perf_link);
3731 }
3732 
3733 static int bpf_perf_link_fill_common(const struct perf_event *event,
3734 				     char __user *uname, u32 *ulenp,
3735 				     u64 *probe_offset, u64 *probe_addr,
3736 				     u32 *fd_type, unsigned long *missed)
3737 {
3738 	const char *buf;
3739 	u32 prog_id, ulen;
3740 	size_t len;
3741 	int err;
3742 
3743 	ulen = *ulenp;
3744 	if (!ulen ^ !uname)
3745 		return -EINVAL;
3746 
3747 	err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
3748 				      probe_offset, probe_addr, missed);
3749 	if (err)
3750 		return err;
3751 
3752 	if (buf) {
3753 		len = strlen(buf);
3754 		*ulenp = len + 1;
3755 	} else {
3756 		*ulenp = 1;
3757 	}
3758 	if (!uname)
3759 		return 0;
3760 
3761 	if (buf) {
3762 		err = bpf_copy_to_user(uname, buf, ulen, len);
3763 		if (err)
3764 			return err;
3765 	} else {
3766 		char zero = '\0';
3767 
3768 		if (put_user(zero, uname))
3769 			return -EFAULT;
3770 	}
3771 	return 0;
3772 }
3773 
3774 #ifdef CONFIG_KPROBE_EVENTS
3775 static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
3776 				     struct bpf_link_info *info)
3777 {
3778 	unsigned long missed;
3779 	char __user *uname;
3780 	u64 addr, offset;
3781 	u32 ulen, type;
3782 	int err;
3783 
3784 	uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
3785 	ulen = info->perf_event.kprobe.name_len;
3786 	err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
3787 					&type, &missed);
3788 	if (err)
3789 		return err;
3790 	if (type == BPF_FD_TYPE_KRETPROBE)
3791 		info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
3792 	else
3793 		info->perf_event.type = BPF_PERF_EVENT_KPROBE;
3794 	info->perf_event.kprobe.name_len = ulen;
3795 	info->perf_event.kprobe.offset = offset;
3796 	info->perf_event.kprobe.missed = missed;
3797 	if (!kallsyms_show_value(current_cred()))
3798 		addr = 0;
3799 	info->perf_event.kprobe.addr = addr;
3800 	info->perf_event.kprobe.cookie = event->bpf_cookie;
3801 	return 0;
3802 }
3803 
3804 static void bpf_perf_link_fdinfo_kprobe(const struct perf_event *event,
3805 					struct seq_file *seq)
3806 {
3807 	const char *name;
3808 	int err;
3809 	u32 prog_id, type;
3810 	u64 offset, addr;
3811 	unsigned long missed;
3812 
3813 	err = bpf_get_perf_event_info(event, &prog_id, &type, &name,
3814 				      &offset, &addr, &missed);
3815 	if (err)
3816 		return;
3817 
3818 	seq_printf(seq,
3819 		   "name:\t%s\n"
3820 		   "offset:\t%#llx\n"
3821 		   "missed:\t%lu\n"
3822 		   "addr:\t%#llx\n"
3823 		   "event_type:\t%s\n"
3824 		   "cookie:\t%llu\n",
3825 		   name, offset, missed, addr,
3826 		   type == BPF_FD_TYPE_KRETPROBE ?  "kretprobe" : "kprobe",
3827 		   event->bpf_cookie);
3828 }
3829 #endif
3830 
3831 #ifdef CONFIG_UPROBE_EVENTS
3832 static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
3833 				     struct bpf_link_info *info)
3834 {
3835 	u64 ref_ctr_offset, offset;
3836 	char __user *uname;
3837 	u32 ulen, type;
3838 	int err;
3839 
3840 	uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
3841 	ulen = info->perf_event.uprobe.name_len;
3842 	err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &ref_ctr_offset,
3843 					&type, NULL);
3844 	if (err)
3845 		return err;
3846 
3847 	if (type == BPF_FD_TYPE_URETPROBE)
3848 		info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
3849 	else
3850 		info->perf_event.type = BPF_PERF_EVENT_UPROBE;
3851 	info->perf_event.uprobe.name_len = ulen;
3852 	info->perf_event.uprobe.offset = offset;
3853 	info->perf_event.uprobe.cookie = event->bpf_cookie;
3854 	info->perf_event.uprobe.ref_ctr_offset = ref_ctr_offset;
3855 	return 0;
3856 }
3857 
3858 static void bpf_perf_link_fdinfo_uprobe(const struct perf_event *event,
3859 					struct seq_file *seq)
3860 {
3861 	const char *name;
3862 	int err;
3863 	u32 prog_id, type;
3864 	u64 offset, ref_ctr_offset;
3865 	unsigned long missed;
3866 
3867 	err = bpf_get_perf_event_info(event, &prog_id, &type, &name,
3868 				      &offset, &ref_ctr_offset, &missed);
3869 	if (err)
3870 		return;
3871 
3872 	seq_printf(seq,
3873 		   "name:\t%s\n"
3874 		   "offset:\t%#llx\n"
3875 		   "ref_ctr_offset:\t%#llx\n"
3876 		   "event_type:\t%s\n"
3877 		   "cookie:\t%llu\n",
3878 		   name, offset, ref_ctr_offset,
3879 		   type == BPF_FD_TYPE_URETPROBE ?  "uretprobe" : "uprobe",
3880 		   event->bpf_cookie);
3881 }
3882 #endif
3883 
3884 static int bpf_perf_link_fill_probe(const struct perf_event *event,
3885 				    struct bpf_link_info *info)
3886 {
3887 #ifdef CONFIG_KPROBE_EVENTS
3888 	if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
3889 		return bpf_perf_link_fill_kprobe(event, info);
3890 #endif
3891 #ifdef CONFIG_UPROBE_EVENTS
3892 	if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
3893 		return bpf_perf_link_fill_uprobe(event, info);
3894 #endif
3895 	return -EOPNOTSUPP;
3896 }
3897 
3898 static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
3899 					 struct bpf_link_info *info)
3900 {
3901 	char __user *uname;
3902 	u32 ulen;
3903 	int err;
3904 
3905 	uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
3906 	ulen = info->perf_event.tracepoint.name_len;
3907 	err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL);
3908 	if (err)
3909 		return err;
3910 
3911 	info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
3912 	info->perf_event.tracepoint.name_len = ulen;
3913 	info->perf_event.tracepoint.cookie = event->bpf_cookie;
3914 	return 0;
3915 }
3916 
3917 static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
3918 					 struct bpf_link_info *info)
3919 {
3920 	info->perf_event.event.type = event->attr.type;
3921 	info->perf_event.event.config = event->attr.config;
3922 	info->perf_event.event.cookie = event->bpf_cookie;
3923 	info->perf_event.type = BPF_PERF_EVENT_EVENT;
3924 	return 0;
3925 }
3926 
3927 static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
3928 					struct bpf_link_info *info)
3929 {
3930 	struct bpf_perf_link *perf_link;
3931 	const struct perf_event *event;
3932 
3933 	perf_link = container_of(link, struct bpf_perf_link, link);
3934 	event = perf_get_event(perf_link->perf_file);
3935 	if (IS_ERR(event))
3936 		return PTR_ERR(event);
3937 
3938 	switch (event->prog->type) {
3939 	case BPF_PROG_TYPE_PERF_EVENT:
3940 		return bpf_perf_link_fill_perf_event(event, info);
3941 	case BPF_PROG_TYPE_TRACEPOINT:
3942 		return bpf_perf_link_fill_tracepoint(event, info);
3943 	case BPF_PROG_TYPE_KPROBE:
3944 		return bpf_perf_link_fill_probe(event, info);
3945 	default:
3946 		return -EOPNOTSUPP;
3947 	}
3948 }
3949 
3950 static void bpf_perf_event_link_show_fdinfo(const struct perf_event *event,
3951 					    struct seq_file *seq)
3952 {
3953 	seq_printf(seq,
3954 		   "type:\t%u\n"
3955 		   "config:\t%llu\n"
3956 		   "event_type:\t%s\n"
3957 		   "cookie:\t%llu\n",
3958 		   event->attr.type, event->attr.config,
3959 		   "event", event->bpf_cookie);
3960 }
3961 
3962 static void bpf_tracepoint_link_show_fdinfo(const struct perf_event *event,
3963 					    struct seq_file *seq)
3964 {
3965 	int err;
3966 	const char *name;
3967 	u32 prog_id;
3968 
3969 	err = bpf_get_perf_event_info(event, &prog_id, NULL, &name, NULL,
3970 				      NULL, NULL);
3971 	if (err)
3972 		return;
3973 
3974 	seq_printf(seq,
3975 		   "tp_name:\t%s\n"
3976 		   "event_type:\t%s\n"
3977 		   "cookie:\t%llu\n",
3978 		   name, "tracepoint", event->bpf_cookie);
3979 }
3980 
3981 static void bpf_probe_link_show_fdinfo(const struct perf_event *event,
3982 				       struct seq_file *seq)
3983 {
3984 #ifdef CONFIG_KPROBE_EVENTS
3985 	if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
3986 		return bpf_perf_link_fdinfo_kprobe(event, seq);
3987 #endif
3988 
3989 #ifdef CONFIG_UPROBE_EVENTS
3990 	if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
3991 		return bpf_perf_link_fdinfo_uprobe(event, seq);
3992 #endif
3993 }
3994 
3995 static void bpf_perf_link_show_fdinfo(const struct bpf_link *link,
3996 				      struct seq_file *seq)
3997 {
3998 	struct bpf_perf_link *perf_link;
3999 	const struct perf_event *event;
4000 
4001 	perf_link = container_of(link, struct bpf_perf_link, link);
4002 	event = perf_get_event(perf_link->perf_file);
4003 	if (IS_ERR(event))
4004 		return;
4005 
4006 	switch (event->prog->type) {
4007 	case BPF_PROG_TYPE_PERF_EVENT:
4008 		return bpf_perf_event_link_show_fdinfo(event, seq);
4009 	case BPF_PROG_TYPE_TRACEPOINT:
4010 		return bpf_tracepoint_link_show_fdinfo(event, seq);
4011 	case BPF_PROG_TYPE_KPROBE:
4012 		return bpf_probe_link_show_fdinfo(event, seq);
4013 	default:
4014 		return;
4015 	}
4016 }
4017 
4018 static const struct bpf_link_ops bpf_perf_link_lops = {
4019 	.release = bpf_perf_link_release,
4020 	.dealloc = bpf_perf_link_dealloc,
4021 	.fill_link_info = bpf_perf_link_fill_link_info,
4022 	.show_fdinfo = bpf_perf_link_show_fdinfo,
4023 };
4024 
4025 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
4026 {
4027 	struct bpf_link_primer link_primer;
4028 	struct bpf_perf_link *link;
4029 	struct perf_event *event;
4030 	struct file *perf_file;
4031 	int err;
4032 
4033 	if (attr->link_create.flags)
4034 		return -EINVAL;
4035 
4036 	perf_file = perf_event_get(attr->link_create.target_fd);
4037 	if (IS_ERR(perf_file))
4038 		return PTR_ERR(perf_file);
4039 
4040 	link = kzalloc(sizeof(*link), GFP_USER);
4041 	if (!link) {
4042 		err = -ENOMEM;
4043 		goto out_put_file;
4044 	}
4045 	bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog);
4046 	link->perf_file = perf_file;
4047 
4048 	err = bpf_link_prime(&link->link, &link_primer);
4049 	if (err) {
4050 		kfree(link);
4051 		goto out_put_file;
4052 	}
4053 
4054 	event = perf_file->private_data;
4055 	err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
4056 	if (err) {
4057 		bpf_link_cleanup(&link_primer);
4058 		goto out_put_file;
4059 	}
4060 	/* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
4061 	bpf_prog_inc(prog);
4062 
4063 	return bpf_link_settle(&link_primer);
4064 
4065 out_put_file:
4066 	fput(perf_file);
4067 	return err;
4068 }
4069 #else
4070 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
4071 {
4072 	return -EOPNOTSUPP;
4073 }
4074 #endif /* CONFIG_PERF_EVENTS */
4075 
4076 static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
4077 				  const char __user *user_tp_name, u64 cookie)
4078 {
4079 	struct bpf_link_primer link_primer;
4080 	struct bpf_raw_tp_link *link;
4081 	struct bpf_raw_event_map *btp;
4082 	const char *tp_name;
4083 	char buf[128];
4084 	int err;
4085 
4086 	switch (prog->type) {
4087 	case BPF_PROG_TYPE_TRACING:
4088 	case BPF_PROG_TYPE_EXT:
4089 	case BPF_PROG_TYPE_LSM:
4090 		if (user_tp_name)
4091 			/* The attach point for this category of programs
4092 			 * should be specified via btf_id during program load.
4093 			 */
4094 			return -EINVAL;
4095 		if (prog->type == BPF_PROG_TYPE_TRACING &&
4096 		    prog->expected_attach_type == BPF_TRACE_RAW_TP) {
4097 			tp_name = prog->aux->attach_func_name;
4098 			break;
4099 		}
4100 		return bpf_tracing_prog_attach(prog, 0, 0, 0);
4101 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
4102 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
4103 		if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
4104 			return -EFAULT;
4105 		buf[sizeof(buf) - 1] = 0;
4106 		tp_name = buf;
4107 		break;
4108 	default:
4109 		return -EINVAL;
4110 	}
4111 
4112 	btp = bpf_get_raw_tracepoint(tp_name);
4113 	if (!btp)
4114 		return -ENOENT;
4115 
4116 	link = kzalloc(sizeof(*link), GFP_USER);
4117 	if (!link) {
4118 		err = -ENOMEM;
4119 		goto out_put_btp;
4120 	}
4121 	bpf_link_init_sleepable(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
4122 				&bpf_raw_tp_link_lops, prog,
4123 				tracepoint_is_faultable(btp->tp));
4124 	link->btp = btp;
4125 	link->cookie = cookie;
4126 
4127 	err = bpf_link_prime(&link->link, &link_primer);
4128 	if (err) {
4129 		kfree(link);
4130 		goto out_put_btp;
4131 	}
4132 
4133 	err = bpf_probe_register(link->btp, link);
4134 	if (err) {
4135 		bpf_link_cleanup(&link_primer);
4136 		goto out_put_btp;
4137 	}
4138 
4139 	return bpf_link_settle(&link_primer);
4140 
4141 out_put_btp:
4142 	bpf_put_raw_tracepoint(btp);
4143 	return err;
4144 }
4145 
4146 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.cookie
4147 
4148 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
4149 {
4150 	struct bpf_prog *prog;
4151 	void __user *tp_name;
4152 	__u64 cookie;
4153 	int fd;
4154 
4155 	if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
4156 		return -EINVAL;
4157 
4158 	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
4159 	if (IS_ERR(prog))
4160 		return PTR_ERR(prog);
4161 
4162 	tp_name = u64_to_user_ptr(attr->raw_tracepoint.name);
4163 	cookie = attr->raw_tracepoint.cookie;
4164 	fd = bpf_raw_tp_link_attach(prog, tp_name, cookie);
4165 	if (fd < 0)
4166 		bpf_prog_put(prog);
4167 	return fd;
4168 }
4169 
4170 static enum bpf_prog_type
4171 attach_type_to_prog_type(enum bpf_attach_type attach_type)
4172 {
4173 	switch (attach_type) {
4174 	case BPF_CGROUP_INET_INGRESS:
4175 	case BPF_CGROUP_INET_EGRESS:
4176 		return BPF_PROG_TYPE_CGROUP_SKB;
4177 	case BPF_CGROUP_INET_SOCK_CREATE:
4178 	case BPF_CGROUP_INET_SOCK_RELEASE:
4179 	case BPF_CGROUP_INET4_POST_BIND:
4180 	case BPF_CGROUP_INET6_POST_BIND:
4181 		return BPF_PROG_TYPE_CGROUP_SOCK;
4182 	case BPF_CGROUP_INET4_BIND:
4183 	case BPF_CGROUP_INET6_BIND:
4184 	case BPF_CGROUP_INET4_CONNECT:
4185 	case BPF_CGROUP_INET6_CONNECT:
4186 	case BPF_CGROUP_UNIX_CONNECT:
4187 	case BPF_CGROUP_INET4_GETPEERNAME:
4188 	case BPF_CGROUP_INET6_GETPEERNAME:
4189 	case BPF_CGROUP_UNIX_GETPEERNAME:
4190 	case BPF_CGROUP_INET4_GETSOCKNAME:
4191 	case BPF_CGROUP_INET6_GETSOCKNAME:
4192 	case BPF_CGROUP_UNIX_GETSOCKNAME:
4193 	case BPF_CGROUP_UDP4_SENDMSG:
4194 	case BPF_CGROUP_UDP6_SENDMSG:
4195 	case BPF_CGROUP_UNIX_SENDMSG:
4196 	case BPF_CGROUP_UDP4_RECVMSG:
4197 	case BPF_CGROUP_UDP6_RECVMSG:
4198 	case BPF_CGROUP_UNIX_RECVMSG:
4199 		return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
4200 	case BPF_CGROUP_SOCK_OPS:
4201 		return BPF_PROG_TYPE_SOCK_OPS;
4202 	case BPF_CGROUP_DEVICE:
4203 		return BPF_PROG_TYPE_CGROUP_DEVICE;
4204 	case BPF_SK_MSG_VERDICT:
4205 		return BPF_PROG_TYPE_SK_MSG;
4206 	case BPF_SK_SKB_STREAM_PARSER:
4207 	case BPF_SK_SKB_STREAM_VERDICT:
4208 	case BPF_SK_SKB_VERDICT:
4209 		return BPF_PROG_TYPE_SK_SKB;
4210 	case BPF_LIRC_MODE2:
4211 		return BPF_PROG_TYPE_LIRC_MODE2;
4212 	case BPF_FLOW_DISSECTOR:
4213 		return BPF_PROG_TYPE_FLOW_DISSECTOR;
4214 	case BPF_CGROUP_SYSCTL:
4215 		return BPF_PROG_TYPE_CGROUP_SYSCTL;
4216 	case BPF_CGROUP_GETSOCKOPT:
4217 	case BPF_CGROUP_SETSOCKOPT:
4218 		return BPF_PROG_TYPE_CGROUP_SOCKOPT;
4219 	case BPF_TRACE_ITER:
4220 	case BPF_TRACE_RAW_TP:
4221 	case BPF_TRACE_FENTRY:
4222 	case BPF_TRACE_FEXIT:
4223 	case BPF_MODIFY_RETURN:
4224 		return BPF_PROG_TYPE_TRACING;
4225 	case BPF_LSM_MAC:
4226 		return BPF_PROG_TYPE_LSM;
4227 	case BPF_SK_LOOKUP:
4228 		return BPF_PROG_TYPE_SK_LOOKUP;
4229 	case BPF_XDP:
4230 		return BPF_PROG_TYPE_XDP;
4231 	case BPF_LSM_CGROUP:
4232 		return BPF_PROG_TYPE_LSM;
4233 	case BPF_TCX_INGRESS:
4234 	case BPF_TCX_EGRESS:
4235 	case BPF_NETKIT_PRIMARY:
4236 	case BPF_NETKIT_PEER:
4237 		return BPF_PROG_TYPE_SCHED_CLS;
4238 	default:
4239 		return BPF_PROG_TYPE_UNSPEC;
4240 	}
4241 }
4242 
4243 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
4244 					     enum bpf_attach_type attach_type)
4245 {
4246 	enum bpf_prog_type ptype;
4247 
4248 	switch (prog->type) {
4249 	case BPF_PROG_TYPE_CGROUP_SOCK:
4250 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4251 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4252 	case BPF_PROG_TYPE_SK_LOOKUP:
4253 		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
4254 	case BPF_PROG_TYPE_CGROUP_SKB:
4255 		if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN))
4256 			/* cg-skb progs can be loaded by unpriv user.
4257 			 * check permissions at attach time.
4258 			 */
4259 			return -EPERM;
4260 
4261 		ptype = attach_type_to_prog_type(attach_type);
4262 		if (prog->type != ptype)
4263 			return -EINVAL;
4264 
4265 		return prog->enforce_expected_attach_type &&
4266 			prog->expected_attach_type != attach_type ?
4267 			-EINVAL : 0;
4268 	case BPF_PROG_TYPE_EXT:
4269 		return 0;
4270 	case BPF_PROG_TYPE_NETFILTER:
4271 		if (attach_type != BPF_NETFILTER)
4272 			return -EINVAL;
4273 		return 0;
4274 	case BPF_PROG_TYPE_PERF_EVENT:
4275 	case BPF_PROG_TYPE_TRACEPOINT:
4276 		if (attach_type != BPF_PERF_EVENT)
4277 			return -EINVAL;
4278 		return 0;
4279 	case BPF_PROG_TYPE_KPROBE:
4280 		if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
4281 		    attach_type != BPF_TRACE_KPROBE_MULTI)
4282 			return -EINVAL;
4283 		if (prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION &&
4284 		    attach_type != BPF_TRACE_KPROBE_SESSION)
4285 			return -EINVAL;
4286 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
4287 		    attach_type != BPF_TRACE_UPROBE_MULTI)
4288 			return -EINVAL;
4289 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION &&
4290 		    attach_type != BPF_TRACE_UPROBE_SESSION)
4291 			return -EINVAL;
4292 		if (attach_type != BPF_PERF_EVENT &&
4293 		    attach_type != BPF_TRACE_KPROBE_MULTI &&
4294 		    attach_type != BPF_TRACE_KPROBE_SESSION &&
4295 		    attach_type != BPF_TRACE_UPROBE_MULTI &&
4296 		    attach_type != BPF_TRACE_UPROBE_SESSION)
4297 			return -EINVAL;
4298 		return 0;
4299 	case BPF_PROG_TYPE_SCHED_CLS:
4300 		if (attach_type != BPF_TCX_INGRESS &&
4301 		    attach_type != BPF_TCX_EGRESS &&
4302 		    attach_type != BPF_NETKIT_PRIMARY &&
4303 		    attach_type != BPF_NETKIT_PEER)
4304 			return -EINVAL;
4305 		return 0;
4306 	default:
4307 		ptype = attach_type_to_prog_type(attach_type);
4308 		if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type)
4309 			return -EINVAL;
4310 		return 0;
4311 	}
4312 }
4313 
4314 static bool is_cgroup_prog_type(enum bpf_prog_type ptype, enum bpf_attach_type atype,
4315 				bool check_atype)
4316 {
4317 	switch (ptype) {
4318 	case BPF_PROG_TYPE_CGROUP_DEVICE:
4319 	case BPF_PROG_TYPE_CGROUP_SKB:
4320 	case BPF_PROG_TYPE_CGROUP_SOCK:
4321 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4322 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4323 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
4324 	case BPF_PROG_TYPE_SOCK_OPS:
4325 		return true;
4326 	case BPF_PROG_TYPE_LSM:
4327 		return check_atype ? atype == BPF_LSM_CGROUP : true;
4328 	default:
4329 		return false;
4330 	}
4331 }
4332 
4333 #define BPF_PROG_ATTACH_LAST_FIELD expected_revision
4334 
4335 #define BPF_F_ATTACH_MASK_BASE	\
4336 	(BPF_F_ALLOW_OVERRIDE |	\
4337 	 BPF_F_ALLOW_MULTI |	\
4338 	 BPF_F_REPLACE |	\
4339 	 BPF_F_PREORDER)
4340 
4341 #define BPF_F_ATTACH_MASK_MPROG	\
4342 	(BPF_F_REPLACE |	\
4343 	 BPF_F_BEFORE |		\
4344 	 BPF_F_AFTER |		\
4345 	 BPF_F_ID |		\
4346 	 BPF_F_LINK)
4347 
4348 static int bpf_prog_attach(const union bpf_attr *attr)
4349 {
4350 	enum bpf_prog_type ptype;
4351 	struct bpf_prog *prog;
4352 	int ret;
4353 
4354 	if (CHECK_ATTR(BPF_PROG_ATTACH))
4355 		return -EINVAL;
4356 
4357 	ptype = attach_type_to_prog_type(attr->attach_type);
4358 	if (ptype == BPF_PROG_TYPE_UNSPEC)
4359 		return -EINVAL;
4360 	if (bpf_mprog_supported(ptype)) {
4361 		if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4362 			return -EINVAL;
4363 	} else if (is_cgroup_prog_type(ptype, 0, false)) {
4364 		if (attr->attach_flags & ~(BPF_F_ATTACH_MASK_BASE | BPF_F_ATTACH_MASK_MPROG))
4365 			return -EINVAL;
4366 	} else {
4367 		if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
4368 			return -EINVAL;
4369 		if (attr->relative_fd ||
4370 		    attr->expected_revision)
4371 			return -EINVAL;
4372 	}
4373 
4374 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4375 	if (IS_ERR(prog))
4376 		return PTR_ERR(prog);
4377 
4378 	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
4379 		bpf_prog_put(prog);
4380 		return -EINVAL;
4381 	}
4382 
4383 	if (is_cgroup_prog_type(ptype, prog->expected_attach_type, true)) {
4384 		ret = cgroup_bpf_prog_attach(attr, ptype, prog);
4385 		goto out;
4386 	}
4387 
4388 	switch (ptype) {
4389 	case BPF_PROG_TYPE_SK_SKB:
4390 	case BPF_PROG_TYPE_SK_MSG:
4391 		ret = sock_map_get_from_fd(attr, prog);
4392 		break;
4393 	case BPF_PROG_TYPE_LIRC_MODE2:
4394 		ret = lirc_prog_attach(attr, prog);
4395 		break;
4396 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4397 		ret = netns_bpf_prog_attach(attr, prog);
4398 		break;
4399 	case BPF_PROG_TYPE_SCHED_CLS:
4400 		if (attr->attach_type == BPF_TCX_INGRESS ||
4401 		    attr->attach_type == BPF_TCX_EGRESS)
4402 			ret = tcx_prog_attach(attr, prog);
4403 		else
4404 			ret = netkit_prog_attach(attr, prog);
4405 		break;
4406 	default:
4407 		ret = -EINVAL;
4408 	}
4409 out:
4410 	if (ret)
4411 		bpf_prog_put(prog);
4412 	return ret;
4413 }
4414 
4415 #define BPF_PROG_DETACH_LAST_FIELD expected_revision
4416 
4417 static int bpf_prog_detach(const union bpf_attr *attr)
4418 {
4419 	struct bpf_prog *prog = NULL;
4420 	enum bpf_prog_type ptype;
4421 	int ret;
4422 
4423 	if (CHECK_ATTR(BPF_PROG_DETACH))
4424 		return -EINVAL;
4425 
4426 	ptype = attach_type_to_prog_type(attr->attach_type);
4427 	if (bpf_mprog_supported(ptype)) {
4428 		if (ptype == BPF_PROG_TYPE_UNSPEC)
4429 			return -EINVAL;
4430 		if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4431 			return -EINVAL;
4432 		if (attr->attach_bpf_fd) {
4433 			prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4434 			if (IS_ERR(prog))
4435 				return PTR_ERR(prog);
4436 		}
4437 	} else if (is_cgroup_prog_type(ptype, 0, false)) {
4438 		if (attr->attach_flags || attr->relative_fd)
4439 			return -EINVAL;
4440 	} else if (attr->attach_flags ||
4441 		   attr->relative_fd ||
4442 		   attr->expected_revision) {
4443 		return -EINVAL;
4444 	}
4445 
4446 	switch (ptype) {
4447 	case BPF_PROG_TYPE_SK_MSG:
4448 	case BPF_PROG_TYPE_SK_SKB:
4449 		ret = sock_map_prog_detach(attr, ptype);
4450 		break;
4451 	case BPF_PROG_TYPE_LIRC_MODE2:
4452 		ret = lirc_prog_detach(attr);
4453 		break;
4454 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4455 		ret = netns_bpf_prog_detach(attr, ptype);
4456 		break;
4457 	case BPF_PROG_TYPE_CGROUP_DEVICE:
4458 	case BPF_PROG_TYPE_CGROUP_SKB:
4459 	case BPF_PROG_TYPE_CGROUP_SOCK:
4460 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4461 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4462 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
4463 	case BPF_PROG_TYPE_SOCK_OPS:
4464 	case BPF_PROG_TYPE_LSM:
4465 		ret = cgroup_bpf_prog_detach(attr, ptype);
4466 		break;
4467 	case BPF_PROG_TYPE_SCHED_CLS:
4468 		if (attr->attach_type == BPF_TCX_INGRESS ||
4469 		    attr->attach_type == BPF_TCX_EGRESS)
4470 			ret = tcx_prog_detach(attr, prog);
4471 		else
4472 			ret = netkit_prog_detach(attr, prog);
4473 		break;
4474 	default:
4475 		ret = -EINVAL;
4476 	}
4477 
4478 	if (prog)
4479 		bpf_prog_put(prog);
4480 	return ret;
4481 }
4482 
4483 #define BPF_PROG_QUERY_LAST_FIELD query.revision
4484 
4485 static int bpf_prog_query(const union bpf_attr *attr,
4486 			  union bpf_attr __user *uattr)
4487 {
4488 	if (!bpf_net_capable())
4489 		return -EPERM;
4490 	if (CHECK_ATTR(BPF_PROG_QUERY))
4491 		return -EINVAL;
4492 	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
4493 		return -EINVAL;
4494 
4495 	switch (attr->query.attach_type) {
4496 	case BPF_CGROUP_INET_INGRESS:
4497 	case BPF_CGROUP_INET_EGRESS:
4498 	case BPF_CGROUP_INET_SOCK_CREATE:
4499 	case BPF_CGROUP_INET_SOCK_RELEASE:
4500 	case BPF_CGROUP_INET4_BIND:
4501 	case BPF_CGROUP_INET6_BIND:
4502 	case BPF_CGROUP_INET4_POST_BIND:
4503 	case BPF_CGROUP_INET6_POST_BIND:
4504 	case BPF_CGROUP_INET4_CONNECT:
4505 	case BPF_CGROUP_INET6_CONNECT:
4506 	case BPF_CGROUP_UNIX_CONNECT:
4507 	case BPF_CGROUP_INET4_GETPEERNAME:
4508 	case BPF_CGROUP_INET6_GETPEERNAME:
4509 	case BPF_CGROUP_UNIX_GETPEERNAME:
4510 	case BPF_CGROUP_INET4_GETSOCKNAME:
4511 	case BPF_CGROUP_INET6_GETSOCKNAME:
4512 	case BPF_CGROUP_UNIX_GETSOCKNAME:
4513 	case BPF_CGROUP_UDP4_SENDMSG:
4514 	case BPF_CGROUP_UDP6_SENDMSG:
4515 	case BPF_CGROUP_UNIX_SENDMSG:
4516 	case BPF_CGROUP_UDP4_RECVMSG:
4517 	case BPF_CGROUP_UDP6_RECVMSG:
4518 	case BPF_CGROUP_UNIX_RECVMSG:
4519 	case BPF_CGROUP_SOCK_OPS:
4520 	case BPF_CGROUP_DEVICE:
4521 	case BPF_CGROUP_SYSCTL:
4522 	case BPF_CGROUP_GETSOCKOPT:
4523 	case BPF_CGROUP_SETSOCKOPT:
4524 	case BPF_LSM_CGROUP:
4525 		return cgroup_bpf_prog_query(attr, uattr);
4526 	case BPF_LIRC_MODE2:
4527 		return lirc_prog_query(attr, uattr);
4528 	case BPF_FLOW_DISSECTOR:
4529 	case BPF_SK_LOOKUP:
4530 		return netns_bpf_prog_query(attr, uattr);
4531 	case BPF_SK_SKB_STREAM_PARSER:
4532 	case BPF_SK_SKB_STREAM_VERDICT:
4533 	case BPF_SK_MSG_VERDICT:
4534 	case BPF_SK_SKB_VERDICT:
4535 		return sock_map_bpf_prog_query(attr, uattr);
4536 	case BPF_TCX_INGRESS:
4537 	case BPF_TCX_EGRESS:
4538 		return tcx_prog_query(attr, uattr);
4539 	case BPF_NETKIT_PRIMARY:
4540 	case BPF_NETKIT_PEER:
4541 		return netkit_prog_query(attr, uattr);
4542 	default:
4543 		return -EINVAL;
4544 	}
4545 }
4546 
4547 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
4548 
4549 static int bpf_prog_test_run(const union bpf_attr *attr,
4550 			     union bpf_attr __user *uattr)
4551 {
4552 	struct bpf_prog *prog;
4553 	int ret = -ENOTSUPP;
4554 
4555 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
4556 		return -EINVAL;
4557 
4558 	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
4559 	    (!attr->test.ctx_size_in && attr->test.ctx_in))
4560 		return -EINVAL;
4561 
4562 	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
4563 	    (!attr->test.ctx_size_out && attr->test.ctx_out))
4564 		return -EINVAL;
4565 
4566 	prog = bpf_prog_get(attr->test.prog_fd);
4567 	if (IS_ERR(prog))
4568 		return PTR_ERR(prog);
4569 
4570 	if (prog->aux->ops->test_run)
4571 		ret = prog->aux->ops->test_run(prog, attr, uattr);
4572 
4573 	bpf_prog_put(prog);
4574 	return ret;
4575 }
4576 
4577 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
4578 
4579 static int bpf_obj_get_next_id(const union bpf_attr *attr,
4580 			       union bpf_attr __user *uattr,
4581 			       struct idr *idr,
4582 			       spinlock_t *lock)
4583 {
4584 	u32 next_id = attr->start_id;
4585 	int err = 0;
4586 
4587 	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
4588 		return -EINVAL;
4589 
4590 	if (!capable(CAP_SYS_ADMIN))
4591 		return -EPERM;
4592 
4593 	next_id++;
4594 	spin_lock_bh(lock);
4595 	if (!idr_get_next(idr, &next_id))
4596 		err = -ENOENT;
4597 	spin_unlock_bh(lock);
4598 
4599 	if (!err)
4600 		err = put_user(next_id, &uattr->next_id);
4601 
4602 	return err;
4603 }
4604 
4605 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
4606 {
4607 	struct bpf_map *map;
4608 
4609 	spin_lock_bh(&map_idr_lock);
4610 again:
4611 	map = idr_get_next(&map_idr, id);
4612 	if (map) {
4613 		map = __bpf_map_inc_not_zero(map, false);
4614 		if (IS_ERR(map)) {
4615 			(*id)++;
4616 			goto again;
4617 		}
4618 	}
4619 	spin_unlock_bh(&map_idr_lock);
4620 
4621 	return map;
4622 }
4623 
4624 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
4625 {
4626 	struct bpf_prog *prog;
4627 
4628 	spin_lock_bh(&prog_idr_lock);
4629 again:
4630 	prog = idr_get_next(&prog_idr, id);
4631 	if (prog) {
4632 		prog = bpf_prog_inc_not_zero(prog);
4633 		if (IS_ERR(prog)) {
4634 			(*id)++;
4635 			goto again;
4636 		}
4637 	}
4638 	spin_unlock_bh(&prog_idr_lock);
4639 
4640 	return prog;
4641 }
4642 
4643 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
4644 
4645 struct bpf_prog *bpf_prog_by_id(u32 id)
4646 {
4647 	struct bpf_prog *prog;
4648 
4649 	if (!id)
4650 		return ERR_PTR(-ENOENT);
4651 
4652 	spin_lock_bh(&prog_idr_lock);
4653 	prog = idr_find(&prog_idr, id);
4654 	if (prog)
4655 		prog = bpf_prog_inc_not_zero(prog);
4656 	else
4657 		prog = ERR_PTR(-ENOENT);
4658 	spin_unlock_bh(&prog_idr_lock);
4659 	return prog;
4660 }
4661 
4662 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
4663 {
4664 	struct bpf_prog *prog;
4665 	u32 id = attr->prog_id;
4666 	int fd;
4667 
4668 	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
4669 		return -EINVAL;
4670 
4671 	if (!capable(CAP_SYS_ADMIN))
4672 		return -EPERM;
4673 
4674 	prog = bpf_prog_by_id(id);
4675 	if (IS_ERR(prog))
4676 		return PTR_ERR(prog);
4677 
4678 	fd = bpf_prog_new_fd(prog);
4679 	if (fd < 0)
4680 		bpf_prog_put(prog);
4681 
4682 	return fd;
4683 }
4684 
4685 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
4686 
4687 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
4688 {
4689 	struct bpf_map *map;
4690 	u32 id = attr->map_id;
4691 	int f_flags;
4692 	int fd;
4693 
4694 	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
4695 	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
4696 		return -EINVAL;
4697 
4698 	if (!capable(CAP_SYS_ADMIN))
4699 		return -EPERM;
4700 
4701 	f_flags = bpf_get_file_flag(attr->open_flags);
4702 	if (f_flags < 0)
4703 		return f_flags;
4704 
4705 	spin_lock_bh(&map_idr_lock);
4706 	map = idr_find(&map_idr, id);
4707 	if (map)
4708 		map = __bpf_map_inc_not_zero(map, true);
4709 	else
4710 		map = ERR_PTR(-ENOENT);
4711 	spin_unlock_bh(&map_idr_lock);
4712 
4713 	if (IS_ERR(map))
4714 		return PTR_ERR(map);
4715 
4716 	fd = bpf_map_new_fd(map, f_flags);
4717 	if (fd < 0)
4718 		bpf_map_put_with_uref(map);
4719 
4720 	return fd;
4721 }
4722 
4723 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
4724 					      unsigned long addr, u32 *off,
4725 					      u32 *type)
4726 {
4727 	const struct bpf_map *map;
4728 	int i;
4729 
4730 	mutex_lock(&prog->aux->used_maps_mutex);
4731 	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
4732 		map = prog->aux->used_maps[i];
4733 		if (map == (void *)addr) {
4734 			*type = BPF_PSEUDO_MAP_FD;
4735 			goto out;
4736 		}
4737 		if (!map->ops->map_direct_value_meta)
4738 			continue;
4739 		if (!map->ops->map_direct_value_meta(map, addr, off)) {
4740 			*type = BPF_PSEUDO_MAP_VALUE;
4741 			goto out;
4742 		}
4743 	}
4744 	map = NULL;
4745 
4746 out:
4747 	mutex_unlock(&prog->aux->used_maps_mutex);
4748 	return map;
4749 }
4750 
4751 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
4752 					      const struct cred *f_cred)
4753 {
4754 	const struct bpf_map *map;
4755 	struct bpf_insn *insns;
4756 	u32 off, type;
4757 	u64 imm;
4758 	u8 code;
4759 	int i;
4760 
4761 	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
4762 			GFP_USER);
4763 	if (!insns)
4764 		return insns;
4765 
4766 	for (i = 0; i < prog->len; i++) {
4767 		code = insns[i].code;
4768 
4769 		if (code == (BPF_JMP | BPF_TAIL_CALL)) {
4770 			insns[i].code = BPF_JMP | BPF_CALL;
4771 			insns[i].imm = BPF_FUNC_tail_call;
4772 			/* fall-through */
4773 		}
4774 		if (code == (BPF_JMP | BPF_CALL) ||
4775 		    code == (BPF_JMP | BPF_CALL_ARGS)) {
4776 			if (code == (BPF_JMP | BPF_CALL_ARGS))
4777 				insns[i].code = BPF_JMP | BPF_CALL;
4778 			if (!bpf_dump_raw_ok(f_cred))
4779 				insns[i].imm = 0;
4780 			continue;
4781 		}
4782 		if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
4783 			insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
4784 			continue;
4785 		}
4786 
4787 		if ((BPF_CLASS(code) == BPF_LDX || BPF_CLASS(code) == BPF_STX ||
4788 		     BPF_CLASS(code) == BPF_ST) && BPF_MODE(code) == BPF_PROBE_MEM32) {
4789 			insns[i].code = BPF_CLASS(code) | BPF_SIZE(code) | BPF_MEM;
4790 			continue;
4791 		}
4792 
4793 		if (code != (BPF_LD | BPF_IMM | BPF_DW))
4794 			continue;
4795 
4796 		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
4797 		map = bpf_map_from_imm(prog, imm, &off, &type);
4798 		if (map) {
4799 			insns[i].src_reg = type;
4800 			insns[i].imm = map->id;
4801 			insns[i + 1].imm = off;
4802 			continue;
4803 		}
4804 	}
4805 
4806 	return insns;
4807 }
4808 
4809 static int set_info_rec_size(struct bpf_prog_info *info)
4810 {
4811 	/*
4812 	 * Ensure info.*_rec_size is the same as kernel expected size
4813 	 *
4814 	 * or
4815 	 *
4816 	 * Only allow zero *_rec_size if both _rec_size and _cnt are
4817 	 * zero.  In this case, the kernel will set the expected
4818 	 * _rec_size back to the info.
4819 	 */
4820 
4821 	if ((info->nr_func_info || info->func_info_rec_size) &&
4822 	    info->func_info_rec_size != sizeof(struct bpf_func_info))
4823 		return -EINVAL;
4824 
4825 	if ((info->nr_line_info || info->line_info_rec_size) &&
4826 	    info->line_info_rec_size != sizeof(struct bpf_line_info))
4827 		return -EINVAL;
4828 
4829 	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
4830 	    info->jited_line_info_rec_size != sizeof(__u64))
4831 		return -EINVAL;
4832 
4833 	info->func_info_rec_size = sizeof(struct bpf_func_info);
4834 	info->line_info_rec_size = sizeof(struct bpf_line_info);
4835 	info->jited_line_info_rec_size = sizeof(__u64);
4836 
4837 	return 0;
4838 }
4839 
4840 static int bpf_prog_get_info_by_fd(struct file *file,
4841 				   struct bpf_prog *prog,
4842 				   const union bpf_attr *attr,
4843 				   union bpf_attr __user *uattr)
4844 {
4845 	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4846 	struct btf *attach_btf = bpf_prog_get_target_btf(prog);
4847 	struct bpf_prog_info info;
4848 	u32 info_len = attr->info.info_len;
4849 	struct bpf_prog_kstats stats;
4850 	char __user *uinsns;
4851 	u32 ulen;
4852 	int err;
4853 
4854 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4855 	if (err)
4856 		return err;
4857 	info_len = min_t(u32, sizeof(info), info_len);
4858 
4859 	memset(&info, 0, sizeof(info));
4860 	if (copy_from_user(&info, uinfo, info_len))
4861 		return -EFAULT;
4862 
4863 	info.type = prog->type;
4864 	info.id = prog->aux->id;
4865 	info.load_time = prog->aux->load_time;
4866 	info.created_by_uid = from_kuid_munged(current_user_ns(),
4867 					       prog->aux->user->uid);
4868 	info.gpl_compatible = prog->gpl_compatible;
4869 
4870 	memcpy(info.tag, prog->tag, sizeof(prog->tag));
4871 	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
4872 
4873 	mutex_lock(&prog->aux->used_maps_mutex);
4874 	ulen = info.nr_map_ids;
4875 	info.nr_map_ids = prog->aux->used_map_cnt;
4876 	ulen = min_t(u32, info.nr_map_ids, ulen);
4877 	if (ulen) {
4878 		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
4879 		u32 i;
4880 
4881 		for (i = 0; i < ulen; i++)
4882 			if (put_user(prog->aux->used_maps[i]->id,
4883 				     &user_map_ids[i])) {
4884 				mutex_unlock(&prog->aux->used_maps_mutex);
4885 				return -EFAULT;
4886 			}
4887 	}
4888 	mutex_unlock(&prog->aux->used_maps_mutex);
4889 
4890 	err = set_info_rec_size(&info);
4891 	if (err)
4892 		return err;
4893 
4894 	bpf_prog_get_stats(prog, &stats);
4895 	info.run_time_ns = stats.nsecs;
4896 	info.run_cnt = stats.cnt;
4897 	info.recursion_misses = stats.misses;
4898 
4899 	info.verified_insns = prog->aux->verified_insns;
4900 	if (prog->aux->btf)
4901 		info.btf_id = btf_obj_id(prog->aux->btf);
4902 
4903 	if (!bpf_capable()) {
4904 		info.jited_prog_len = 0;
4905 		info.xlated_prog_len = 0;
4906 		info.nr_jited_ksyms = 0;
4907 		info.nr_jited_func_lens = 0;
4908 		info.nr_func_info = 0;
4909 		info.nr_line_info = 0;
4910 		info.nr_jited_line_info = 0;
4911 		goto done;
4912 	}
4913 
4914 	ulen = info.xlated_prog_len;
4915 	info.xlated_prog_len = bpf_prog_insn_size(prog);
4916 	if (info.xlated_prog_len && ulen) {
4917 		struct bpf_insn *insns_sanitized;
4918 		bool fault;
4919 
4920 		if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
4921 			info.xlated_prog_insns = 0;
4922 			goto done;
4923 		}
4924 		insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
4925 		if (!insns_sanitized)
4926 			return -ENOMEM;
4927 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
4928 		ulen = min_t(u32, info.xlated_prog_len, ulen);
4929 		fault = copy_to_user(uinsns, insns_sanitized, ulen);
4930 		kfree(insns_sanitized);
4931 		if (fault)
4932 			return -EFAULT;
4933 	}
4934 
4935 	if (bpf_prog_is_offloaded(prog->aux)) {
4936 		err = bpf_prog_offload_info_fill(&info, prog);
4937 		if (err)
4938 			return err;
4939 		goto done;
4940 	}
4941 
4942 	/* NOTE: the following code is supposed to be skipped for offload.
4943 	 * bpf_prog_offload_info_fill() is the place to fill similar fields
4944 	 * for offload.
4945 	 */
4946 	ulen = info.jited_prog_len;
4947 	if (prog->aux->func_cnt) {
4948 		u32 i;
4949 
4950 		info.jited_prog_len = 0;
4951 		for (i = 0; i < prog->aux->func_cnt; i++)
4952 			info.jited_prog_len += prog->aux->func[i]->jited_len;
4953 	} else {
4954 		info.jited_prog_len = prog->jited_len;
4955 	}
4956 
4957 	if (info.jited_prog_len && ulen) {
4958 		if (bpf_dump_raw_ok(file->f_cred)) {
4959 			uinsns = u64_to_user_ptr(info.jited_prog_insns);
4960 			ulen = min_t(u32, info.jited_prog_len, ulen);
4961 
4962 			/* for multi-function programs, copy the JITed
4963 			 * instructions for all the functions
4964 			 */
4965 			if (prog->aux->func_cnt) {
4966 				u32 len, free, i;
4967 				u8 *img;
4968 
4969 				free = ulen;
4970 				for (i = 0; i < prog->aux->func_cnt; i++) {
4971 					len = prog->aux->func[i]->jited_len;
4972 					len = min_t(u32, len, free);
4973 					img = (u8 *) prog->aux->func[i]->bpf_func;
4974 					if (copy_to_user(uinsns, img, len))
4975 						return -EFAULT;
4976 					uinsns += len;
4977 					free -= len;
4978 					if (!free)
4979 						break;
4980 				}
4981 			} else {
4982 				if (copy_to_user(uinsns, prog->bpf_func, ulen))
4983 					return -EFAULT;
4984 			}
4985 		} else {
4986 			info.jited_prog_insns = 0;
4987 		}
4988 	}
4989 
4990 	ulen = info.nr_jited_ksyms;
4991 	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
4992 	if (ulen) {
4993 		if (bpf_dump_raw_ok(file->f_cred)) {
4994 			unsigned long ksym_addr;
4995 			u64 __user *user_ksyms;
4996 			u32 i;
4997 
4998 			/* copy the address of the kernel symbol
4999 			 * corresponding to each function
5000 			 */
5001 			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
5002 			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
5003 			if (prog->aux->func_cnt) {
5004 				for (i = 0; i < ulen; i++) {
5005 					ksym_addr = (unsigned long)
5006 						prog->aux->func[i]->bpf_func;
5007 					if (put_user((u64) ksym_addr,
5008 						     &user_ksyms[i]))
5009 						return -EFAULT;
5010 				}
5011 			} else {
5012 				ksym_addr = (unsigned long) prog->bpf_func;
5013 				if (put_user((u64) ksym_addr, &user_ksyms[0]))
5014 					return -EFAULT;
5015 			}
5016 		} else {
5017 			info.jited_ksyms = 0;
5018 		}
5019 	}
5020 
5021 	ulen = info.nr_jited_func_lens;
5022 	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
5023 	if (ulen) {
5024 		if (bpf_dump_raw_ok(file->f_cred)) {
5025 			u32 __user *user_lens;
5026 			u32 func_len, i;
5027 
5028 			/* copy the JITed image lengths for each function */
5029 			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
5030 			user_lens = u64_to_user_ptr(info.jited_func_lens);
5031 			if (prog->aux->func_cnt) {
5032 				for (i = 0; i < ulen; i++) {
5033 					func_len =
5034 						prog->aux->func[i]->jited_len;
5035 					if (put_user(func_len, &user_lens[i]))
5036 						return -EFAULT;
5037 				}
5038 			} else {
5039 				func_len = prog->jited_len;
5040 				if (put_user(func_len, &user_lens[0]))
5041 					return -EFAULT;
5042 			}
5043 		} else {
5044 			info.jited_func_lens = 0;
5045 		}
5046 	}
5047 
5048 	info.attach_btf_id = prog->aux->attach_btf_id;
5049 	if (attach_btf)
5050 		info.attach_btf_obj_id = btf_obj_id(attach_btf);
5051 
5052 	ulen = info.nr_func_info;
5053 	info.nr_func_info = prog->aux->func_info_cnt;
5054 	if (info.nr_func_info && ulen) {
5055 		char __user *user_finfo;
5056 
5057 		user_finfo = u64_to_user_ptr(info.func_info);
5058 		ulen = min_t(u32, info.nr_func_info, ulen);
5059 		if (copy_to_user(user_finfo, prog->aux->func_info,
5060 				 info.func_info_rec_size * ulen))
5061 			return -EFAULT;
5062 	}
5063 
5064 	ulen = info.nr_line_info;
5065 	info.nr_line_info = prog->aux->nr_linfo;
5066 	if (info.nr_line_info && ulen) {
5067 		__u8 __user *user_linfo;
5068 
5069 		user_linfo = u64_to_user_ptr(info.line_info);
5070 		ulen = min_t(u32, info.nr_line_info, ulen);
5071 		if (copy_to_user(user_linfo, prog->aux->linfo,
5072 				 info.line_info_rec_size * ulen))
5073 			return -EFAULT;
5074 	}
5075 
5076 	ulen = info.nr_jited_line_info;
5077 	if (prog->aux->jited_linfo)
5078 		info.nr_jited_line_info = prog->aux->nr_linfo;
5079 	else
5080 		info.nr_jited_line_info = 0;
5081 	if (info.nr_jited_line_info && ulen) {
5082 		if (bpf_dump_raw_ok(file->f_cred)) {
5083 			unsigned long line_addr;
5084 			__u64 __user *user_linfo;
5085 			u32 i;
5086 
5087 			user_linfo = u64_to_user_ptr(info.jited_line_info);
5088 			ulen = min_t(u32, info.nr_jited_line_info, ulen);
5089 			for (i = 0; i < ulen; i++) {
5090 				line_addr = (unsigned long)prog->aux->jited_linfo[i];
5091 				if (put_user((__u64)line_addr, &user_linfo[i]))
5092 					return -EFAULT;
5093 			}
5094 		} else {
5095 			info.jited_line_info = 0;
5096 		}
5097 	}
5098 
5099 	ulen = info.nr_prog_tags;
5100 	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
5101 	if (ulen) {
5102 		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
5103 		u32 i;
5104 
5105 		user_prog_tags = u64_to_user_ptr(info.prog_tags);
5106 		ulen = min_t(u32, info.nr_prog_tags, ulen);
5107 		if (prog->aux->func_cnt) {
5108 			for (i = 0; i < ulen; i++) {
5109 				if (copy_to_user(user_prog_tags[i],
5110 						 prog->aux->func[i]->tag,
5111 						 BPF_TAG_SIZE))
5112 					return -EFAULT;
5113 			}
5114 		} else {
5115 			if (copy_to_user(user_prog_tags[0],
5116 					 prog->tag, BPF_TAG_SIZE))
5117 				return -EFAULT;
5118 		}
5119 	}
5120 
5121 done:
5122 	if (copy_to_user(uinfo, &info, info_len) ||
5123 	    put_user(info_len, &uattr->info.info_len))
5124 		return -EFAULT;
5125 
5126 	return 0;
5127 }
5128 
5129 static int bpf_map_get_info_by_fd(struct file *file,
5130 				  struct bpf_map *map,
5131 				  const union bpf_attr *attr,
5132 				  union bpf_attr __user *uattr)
5133 {
5134 	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
5135 	struct bpf_map_info info;
5136 	u32 info_len = attr->info.info_len;
5137 	int err;
5138 
5139 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
5140 	if (err)
5141 		return err;
5142 	info_len = min_t(u32, sizeof(info), info_len);
5143 
5144 	memset(&info, 0, sizeof(info));
5145 	info.type = map->map_type;
5146 	info.id = map->id;
5147 	info.key_size = map->key_size;
5148 	info.value_size = map->value_size;
5149 	info.max_entries = map->max_entries;
5150 	info.map_flags = map->map_flags;
5151 	info.map_extra = map->map_extra;
5152 	memcpy(info.name, map->name, sizeof(map->name));
5153 
5154 	if (map->btf) {
5155 		info.btf_id = btf_obj_id(map->btf);
5156 		info.btf_key_type_id = map->btf_key_type_id;
5157 		info.btf_value_type_id = map->btf_value_type_id;
5158 	}
5159 	info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
5160 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS)
5161 		bpf_map_struct_ops_info_fill(&info, map);
5162 
5163 	if (bpf_map_is_offloaded(map)) {
5164 		err = bpf_map_offload_info_fill(&info, map);
5165 		if (err)
5166 			return err;
5167 	}
5168 
5169 	if (copy_to_user(uinfo, &info, info_len) ||
5170 	    put_user(info_len, &uattr->info.info_len))
5171 		return -EFAULT;
5172 
5173 	return 0;
5174 }
5175 
5176 static int bpf_btf_get_info_by_fd(struct file *file,
5177 				  struct btf *btf,
5178 				  const union bpf_attr *attr,
5179 				  union bpf_attr __user *uattr)
5180 {
5181 	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
5182 	u32 info_len = attr->info.info_len;
5183 	int err;
5184 
5185 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
5186 	if (err)
5187 		return err;
5188 
5189 	return btf_get_info_by_fd(btf, attr, uattr);
5190 }
5191 
5192 static int bpf_link_get_info_by_fd(struct file *file,
5193 				  struct bpf_link *link,
5194 				  const union bpf_attr *attr,
5195 				  union bpf_attr __user *uattr)
5196 {
5197 	struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
5198 	struct bpf_link_info info;
5199 	u32 info_len = attr->info.info_len;
5200 	int err;
5201 
5202 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
5203 	if (err)
5204 		return err;
5205 	info_len = min_t(u32, sizeof(info), info_len);
5206 
5207 	memset(&info, 0, sizeof(info));
5208 	if (copy_from_user(&info, uinfo, info_len))
5209 		return -EFAULT;
5210 
5211 	info.type = link->type;
5212 	info.id = link->id;
5213 	if (link->prog)
5214 		info.prog_id = link->prog->aux->id;
5215 
5216 	if (link->ops->fill_link_info) {
5217 		err = link->ops->fill_link_info(link, &info);
5218 		if (err)
5219 			return err;
5220 	}
5221 
5222 	if (copy_to_user(uinfo, &info, info_len) ||
5223 	    put_user(info_len, &uattr->info.info_len))
5224 		return -EFAULT;
5225 
5226 	return 0;
5227 }
5228 
5229 
5230 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
5231 
5232 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
5233 				  union bpf_attr __user *uattr)
5234 {
5235 	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
5236 		return -EINVAL;
5237 
5238 	CLASS(fd, f)(attr->info.bpf_fd);
5239 	if (fd_empty(f))
5240 		return -EBADFD;
5241 
5242 	if (fd_file(f)->f_op == &bpf_prog_fops)
5243 		return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
5244 					      uattr);
5245 	else if (fd_file(f)->f_op == &bpf_map_fops)
5246 		return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
5247 					     uattr);
5248 	else if (fd_file(f)->f_op == &btf_fops)
5249 		return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr);
5250 	else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll)
5251 		return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data,
5252 					      attr, uattr);
5253 	return -EINVAL;
5254 }
5255 
5256 #define BPF_BTF_LOAD_LAST_FIELD btf_token_fd
5257 
5258 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
5259 {
5260 	struct bpf_token *token = NULL;
5261 
5262 	if (CHECK_ATTR(BPF_BTF_LOAD))
5263 		return -EINVAL;
5264 
5265 	if (attr->btf_flags & ~BPF_F_TOKEN_FD)
5266 		return -EINVAL;
5267 
5268 	if (attr->btf_flags & BPF_F_TOKEN_FD) {
5269 		token = bpf_token_get_from_fd(attr->btf_token_fd);
5270 		if (IS_ERR(token))
5271 			return PTR_ERR(token);
5272 		if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) {
5273 			bpf_token_put(token);
5274 			token = NULL;
5275 		}
5276 	}
5277 
5278 	if (!bpf_token_capable(token, CAP_BPF)) {
5279 		bpf_token_put(token);
5280 		return -EPERM;
5281 	}
5282 
5283 	bpf_token_put(token);
5284 
5285 	return btf_new_fd(attr, uattr, uattr_size);
5286 }
5287 
5288 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD fd_by_id_token_fd
5289 
5290 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
5291 {
5292 	struct bpf_token *token = NULL;
5293 
5294 	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
5295 		return -EINVAL;
5296 
5297 	if (attr->open_flags & ~BPF_F_TOKEN_FD)
5298 		return -EINVAL;
5299 
5300 	if (attr->open_flags & BPF_F_TOKEN_FD) {
5301 		token = bpf_token_get_from_fd(attr->fd_by_id_token_fd);
5302 		if (IS_ERR(token))
5303 			return PTR_ERR(token);
5304 		if (!bpf_token_allow_cmd(token, BPF_BTF_GET_FD_BY_ID)) {
5305 			bpf_token_put(token);
5306 			token = NULL;
5307 		}
5308 	}
5309 
5310 	if (!bpf_token_capable(token, CAP_SYS_ADMIN)) {
5311 		bpf_token_put(token);
5312 		return -EPERM;
5313 	}
5314 
5315 	bpf_token_put(token);
5316 
5317 	return btf_get_fd_by_id(attr->btf_id);
5318 }
5319 
5320 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
5321 				    union bpf_attr __user *uattr,
5322 				    u32 prog_id, u32 fd_type,
5323 				    const char *buf, u64 probe_offset,
5324 				    u64 probe_addr)
5325 {
5326 	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
5327 	u32 len = buf ? strlen(buf) : 0, input_len;
5328 	int err = 0;
5329 
5330 	if (put_user(len, &uattr->task_fd_query.buf_len))
5331 		return -EFAULT;
5332 	input_len = attr->task_fd_query.buf_len;
5333 	if (input_len && ubuf) {
5334 		if (!len) {
5335 			/* nothing to copy, just make ubuf NULL terminated */
5336 			char zero = '\0';
5337 
5338 			if (put_user(zero, ubuf))
5339 				return -EFAULT;
5340 		} else if (input_len >= len + 1) {
5341 			/* ubuf can hold the string with NULL terminator */
5342 			if (copy_to_user(ubuf, buf, len + 1))
5343 				return -EFAULT;
5344 		} else {
5345 			/* ubuf cannot hold the string with NULL terminator,
5346 			 * do a partial copy with NULL terminator.
5347 			 */
5348 			char zero = '\0';
5349 
5350 			err = -ENOSPC;
5351 			if (copy_to_user(ubuf, buf, input_len - 1))
5352 				return -EFAULT;
5353 			if (put_user(zero, ubuf + input_len - 1))
5354 				return -EFAULT;
5355 		}
5356 	}
5357 
5358 	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
5359 	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
5360 	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
5361 	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
5362 		return -EFAULT;
5363 
5364 	return err;
5365 }
5366 
5367 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
5368 
5369 static int bpf_task_fd_query(const union bpf_attr *attr,
5370 			     union bpf_attr __user *uattr)
5371 {
5372 	pid_t pid = attr->task_fd_query.pid;
5373 	u32 fd = attr->task_fd_query.fd;
5374 	const struct perf_event *event;
5375 	struct task_struct *task;
5376 	struct file *file;
5377 	int err;
5378 
5379 	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
5380 		return -EINVAL;
5381 
5382 	if (!capable(CAP_SYS_ADMIN))
5383 		return -EPERM;
5384 
5385 	if (attr->task_fd_query.flags != 0)
5386 		return -EINVAL;
5387 
5388 	rcu_read_lock();
5389 	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
5390 	rcu_read_unlock();
5391 	if (!task)
5392 		return -ENOENT;
5393 
5394 	err = 0;
5395 	file = fget_task(task, fd);
5396 	put_task_struct(task);
5397 	if (!file)
5398 		return -EBADF;
5399 
5400 	if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) {
5401 		struct bpf_link *link = file->private_data;
5402 
5403 		if (link->ops == &bpf_raw_tp_link_lops) {
5404 			struct bpf_raw_tp_link *raw_tp =
5405 				container_of(link, struct bpf_raw_tp_link, link);
5406 			struct bpf_raw_event_map *btp = raw_tp->btp;
5407 
5408 			err = bpf_task_fd_query_copy(attr, uattr,
5409 						     raw_tp->link.prog->aux->id,
5410 						     BPF_FD_TYPE_RAW_TRACEPOINT,
5411 						     btp->tp->name, 0, 0);
5412 			goto put_file;
5413 		}
5414 		goto out_not_supp;
5415 	}
5416 
5417 	event = perf_get_event(file);
5418 	if (!IS_ERR(event)) {
5419 		u64 probe_offset, probe_addr;
5420 		u32 prog_id, fd_type;
5421 		const char *buf;
5422 
5423 		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
5424 					      &buf, &probe_offset,
5425 					      &probe_addr, NULL);
5426 		if (!err)
5427 			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
5428 						     fd_type, buf,
5429 						     probe_offset,
5430 						     probe_addr);
5431 		goto put_file;
5432 	}
5433 
5434 out_not_supp:
5435 	err = -ENOTSUPP;
5436 put_file:
5437 	fput(file);
5438 	return err;
5439 }
5440 
5441 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
5442 
5443 #define BPF_DO_BATCH(fn, ...)			\
5444 	do {					\
5445 		if (!fn) {			\
5446 			err = -ENOTSUPP;	\
5447 			goto err_put;		\
5448 		}				\
5449 		err = fn(__VA_ARGS__);		\
5450 	} while (0)
5451 
5452 static int bpf_map_do_batch(const union bpf_attr *attr,
5453 			    union bpf_attr __user *uattr,
5454 			    int cmd)
5455 {
5456 	bool has_read  = cmd == BPF_MAP_LOOKUP_BATCH ||
5457 			 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
5458 	bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
5459 	struct bpf_map *map;
5460 	int err;
5461 
5462 	if (CHECK_ATTR(BPF_MAP_BATCH))
5463 		return -EINVAL;
5464 
5465 	CLASS(fd, f)(attr->batch.map_fd);
5466 
5467 	map = __bpf_map_get(f);
5468 	if (IS_ERR(map))
5469 		return PTR_ERR(map);
5470 	if (has_write)
5471 		bpf_map_write_active_inc(map);
5472 	if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
5473 		err = -EPERM;
5474 		goto err_put;
5475 	}
5476 	if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
5477 		err = -EPERM;
5478 		goto err_put;
5479 	}
5480 
5481 	if (cmd == BPF_MAP_LOOKUP_BATCH)
5482 		BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
5483 	else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
5484 		BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
5485 	else if (cmd == BPF_MAP_UPDATE_BATCH)
5486 		BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr);
5487 	else
5488 		BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
5489 err_put:
5490 	if (has_write) {
5491 		maybe_wait_bpf_programs(map);
5492 		bpf_map_write_active_dec(map);
5493 	}
5494 	return err;
5495 }
5496 
5497 #define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid
5498 static int link_create(union bpf_attr *attr, bpfptr_t uattr)
5499 {
5500 	struct bpf_prog *prog;
5501 	int ret;
5502 
5503 	if (CHECK_ATTR(BPF_LINK_CREATE))
5504 		return -EINVAL;
5505 
5506 	if (attr->link_create.attach_type == BPF_STRUCT_OPS)
5507 		return bpf_struct_ops_link_create(attr);
5508 
5509 	prog = bpf_prog_get(attr->link_create.prog_fd);
5510 	if (IS_ERR(prog))
5511 		return PTR_ERR(prog);
5512 
5513 	ret = bpf_prog_attach_check_attach_type(prog,
5514 						attr->link_create.attach_type);
5515 	if (ret)
5516 		goto out;
5517 
5518 	switch (prog->type) {
5519 	case BPF_PROG_TYPE_CGROUP_SKB:
5520 	case BPF_PROG_TYPE_CGROUP_SOCK:
5521 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
5522 	case BPF_PROG_TYPE_SOCK_OPS:
5523 	case BPF_PROG_TYPE_CGROUP_DEVICE:
5524 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
5525 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
5526 		ret = cgroup_bpf_link_attach(attr, prog);
5527 		break;
5528 	case BPF_PROG_TYPE_EXT:
5529 		ret = bpf_tracing_prog_attach(prog,
5530 					      attr->link_create.target_fd,
5531 					      attr->link_create.target_btf_id,
5532 					      attr->link_create.tracing.cookie);
5533 		break;
5534 	case BPF_PROG_TYPE_LSM:
5535 	case BPF_PROG_TYPE_TRACING:
5536 		if (attr->link_create.attach_type != prog->expected_attach_type) {
5537 			ret = -EINVAL;
5538 			goto out;
5539 		}
5540 		if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
5541 			ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie);
5542 		else if (prog->expected_attach_type == BPF_TRACE_ITER)
5543 			ret = bpf_iter_link_attach(attr, uattr, prog);
5544 		else if (prog->expected_attach_type == BPF_LSM_CGROUP)
5545 			ret = cgroup_bpf_link_attach(attr, prog);
5546 		else
5547 			ret = bpf_tracing_prog_attach(prog,
5548 						      attr->link_create.target_fd,
5549 						      attr->link_create.target_btf_id,
5550 						      attr->link_create.tracing.cookie);
5551 		break;
5552 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
5553 	case BPF_PROG_TYPE_SK_LOOKUP:
5554 		ret = netns_bpf_link_create(attr, prog);
5555 		break;
5556 	case BPF_PROG_TYPE_SK_MSG:
5557 	case BPF_PROG_TYPE_SK_SKB:
5558 		ret = sock_map_link_create(attr, prog);
5559 		break;
5560 #ifdef CONFIG_NET
5561 	case BPF_PROG_TYPE_XDP:
5562 		ret = bpf_xdp_link_attach(attr, prog);
5563 		break;
5564 	case BPF_PROG_TYPE_SCHED_CLS:
5565 		if (attr->link_create.attach_type == BPF_TCX_INGRESS ||
5566 		    attr->link_create.attach_type == BPF_TCX_EGRESS)
5567 			ret = tcx_link_attach(attr, prog);
5568 		else
5569 			ret = netkit_link_attach(attr, prog);
5570 		break;
5571 	case BPF_PROG_TYPE_NETFILTER:
5572 		ret = bpf_nf_link_attach(attr, prog);
5573 		break;
5574 #endif
5575 	case BPF_PROG_TYPE_PERF_EVENT:
5576 	case BPF_PROG_TYPE_TRACEPOINT:
5577 		ret = bpf_perf_link_attach(attr, prog);
5578 		break;
5579 	case BPF_PROG_TYPE_KPROBE:
5580 		if (attr->link_create.attach_type == BPF_PERF_EVENT)
5581 			ret = bpf_perf_link_attach(attr, prog);
5582 		else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI ||
5583 			 attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION)
5584 			ret = bpf_kprobe_multi_link_attach(attr, prog);
5585 		else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI ||
5586 			 attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION)
5587 			ret = bpf_uprobe_multi_link_attach(attr, prog);
5588 		break;
5589 	default:
5590 		ret = -EINVAL;
5591 	}
5592 
5593 out:
5594 	if (ret < 0)
5595 		bpf_prog_put(prog);
5596 	return ret;
5597 }
5598 
5599 static int link_update_map(struct bpf_link *link, union bpf_attr *attr)
5600 {
5601 	struct bpf_map *new_map, *old_map = NULL;
5602 	int ret;
5603 
5604 	new_map = bpf_map_get(attr->link_update.new_map_fd);
5605 	if (IS_ERR(new_map))
5606 		return PTR_ERR(new_map);
5607 
5608 	if (attr->link_update.flags & BPF_F_REPLACE) {
5609 		old_map = bpf_map_get(attr->link_update.old_map_fd);
5610 		if (IS_ERR(old_map)) {
5611 			ret = PTR_ERR(old_map);
5612 			goto out_put;
5613 		}
5614 	} else if (attr->link_update.old_map_fd) {
5615 		ret = -EINVAL;
5616 		goto out_put;
5617 	}
5618 
5619 	ret = link->ops->update_map(link, new_map, old_map);
5620 
5621 	if (old_map)
5622 		bpf_map_put(old_map);
5623 out_put:
5624 	bpf_map_put(new_map);
5625 	return ret;
5626 }
5627 
5628 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
5629 
5630 static int link_update(union bpf_attr *attr)
5631 {
5632 	struct bpf_prog *old_prog = NULL, *new_prog;
5633 	struct bpf_link *link;
5634 	u32 flags;
5635 	int ret;
5636 
5637 	if (CHECK_ATTR(BPF_LINK_UPDATE))
5638 		return -EINVAL;
5639 
5640 	flags = attr->link_update.flags;
5641 	if (flags & ~BPF_F_REPLACE)
5642 		return -EINVAL;
5643 
5644 	link = bpf_link_get_from_fd(attr->link_update.link_fd);
5645 	if (IS_ERR(link))
5646 		return PTR_ERR(link);
5647 
5648 	if (link->ops->update_map) {
5649 		ret = link_update_map(link, attr);
5650 		goto out_put_link;
5651 	}
5652 
5653 	new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
5654 	if (IS_ERR(new_prog)) {
5655 		ret = PTR_ERR(new_prog);
5656 		goto out_put_link;
5657 	}
5658 
5659 	if (flags & BPF_F_REPLACE) {
5660 		old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
5661 		if (IS_ERR(old_prog)) {
5662 			ret = PTR_ERR(old_prog);
5663 			old_prog = NULL;
5664 			goto out_put_progs;
5665 		}
5666 	} else if (attr->link_update.old_prog_fd) {
5667 		ret = -EINVAL;
5668 		goto out_put_progs;
5669 	}
5670 
5671 	if (link->ops->update_prog)
5672 		ret = link->ops->update_prog(link, new_prog, old_prog);
5673 	else
5674 		ret = -EINVAL;
5675 
5676 out_put_progs:
5677 	if (old_prog)
5678 		bpf_prog_put(old_prog);
5679 	if (ret)
5680 		bpf_prog_put(new_prog);
5681 out_put_link:
5682 	bpf_link_put_direct(link);
5683 	return ret;
5684 }
5685 
5686 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
5687 
5688 static int link_detach(union bpf_attr *attr)
5689 {
5690 	struct bpf_link *link;
5691 	int ret;
5692 
5693 	if (CHECK_ATTR(BPF_LINK_DETACH))
5694 		return -EINVAL;
5695 
5696 	link = bpf_link_get_from_fd(attr->link_detach.link_fd);
5697 	if (IS_ERR(link))
5698 		return PTR_ERR(link);
5699 
5700 	if (link->ops->detach)
5701 		ret = link->ops->detach(link);
5702 	else
5703 		ret = -EOPNOTSUPP;
5704 
5705 	bpf_link_put_direct(link);
5706 	return ret;
5707 }
5708 
5709 struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
5710 {
5711 	return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
5712 }
5713 EXPORT_SYMBOL(bpf_link_inc_not_zero);
5714 
5715 struct bpf_link *bpf_link_by_id(u32 id)
5716 {
5717 	struct bpf_link *link;
5718 
5719 	if (!id)
5720 		return ERR_PTR(-ENOENT);
5721 
5722 	spin_lock_bh(&link_idr_lock);
5723 	/* before link is "settled", ID is 0, pretend it doesn't exist yet */
5724 	link = idr_find(&link_idr, id);
5725 	if (link) {
5726 		if (link->id)
5727 			link = bpf_link_inc_not_zero(link);
5728 		else
5729 			link = ERR_PTR(-EAGAIN);
5730 	} else {
5731 		link = ERR_PTR(-ENOENT);
5732 	}
5733 	spin_unlock_bh(&link_idr_lock);
5734 	return link;
5735 }
5736 
5737 struct bpf_link *bpf_link_get_curr_or_next(u32 *id)
5738 {
5739 	struct bpf_link *link;
5740 
5741 	spin_lock_bh(&link_idr_lock);
5742 again:
5743 	link = idr_get_next(&link_idr, id);
5744 	if (link) {
5745 		link = bpf_link_inc_not_zero(link);
5746 		if (IS_ERR(link)) {
5747 			(*id)++;
5748 			goto again;
5749 		}
5750 	}
5751 	spin_unlock_bh(&link_idr_lock);
5752 
5753 	return link;
5754 }
5755 
5756 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
5757 
5758 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
5759 {
5760 	struct bpf_link *link;
5761 	u32 id = attr->link_id;
5762 	int fd;
5763 
5764 	if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
5765 		return -EINVAL;
5766 
5767 	if (!capable(CAP_SYS_ADMIN))
5768 		return -EPERM;
5769 
5770 	link = bpf_link_by_id(id);
5771 	if (IS_ERR(link))
5772 		return PTR_ERR(link);
5773 
5774 	fd = bpf_link_new_fd(link);
5775 	if (fd < 0)
5776 		bpf_link_put_direct(link);
5777 
5778 	return fd;
5779 }
5780 
5781 DEFINE_MUTEX(bpf_stats_enabled_mutex);
5782 
5783 static int bpf_stats_release(struct inode *inode, struct file *file)
5784 {
5785 	mutex_lock(&bpf_stats_enabled_mutex);
5786 	static_key_slow_dec(&bpf_stats_enabled_key.key);
5787 	mutex_unlock(&bpf_stats_enabled_mutex);
5788 	return 0;
5789 }
5790 
5791 static const struct file_operations bpf_stats_fops = {
5792 	.release = bpf_stats_release,
5793 };
5794 
5795 static int bpf_enable_runtime_stats(void)
5796 {
5797 	int fd;
5798 
5799 	mutex_lock(&bpf_stats_enabled_mutex);
5800 
5801 	/* Set a very high limit to avoid overflow */
5802 	if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
5803 		mutex_unlock(&bpf_stats_enabled_mutex);
5804 		return -EBUSY;
5805 	}
5806 
5807 	fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
5808 	if (fd >= 0)
5809 		static_key_slow_inc(&bpf_stats_enabled_key.key);
5810 
5811 	mutex_unlock(&bpf_stats_enabled_mutex);
5812 	return fd;
5813 }
5814 
5815 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
5816 
5817 static int bpf_enable_stats(union bpf_attr *attr)
5818 {
5819 
5820 	if (CHECK_ATTR(BPF_ENABLE_STATS))
5821 		return -EINVAL;
5822 
5823 	if (!capable(CAP_SYS_ADMIN))
5824 		return -EPERM;
5825 
5826 	switch (attr->enable_stats.type) {
5827 	case BPF_STATS_RUN_TIME:
5828 		return bpf_enable_runtime_stats();
5829 	default:
5830 		break;
5831 	}
5832 	return -EINVAL;
5833 }
5834 
5835 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
5836 
5837 static int bpf_iter_create(union bpf_attr *attr)
5838 {
5839 	struct bpf_link *link;
5840 	int err;
5841 
5842 	if (CHECK_ATTR(BPF_ITER_CREATE))
5843 		return -EINVAL;
5844 
5845 	if (attr->iter_create.flags)
5846 		return -EINVAL;
5847 
5848 	link = bpf_link_get_from_fd(attr->iter_create.link_fd);
5849 	if (IS_ERR(link))
5850 		return PTR_ERR(link);
5851 
5852 	err = bpf_iter_new_fd(link);
5853 	bpf_link_put_direct(link);
5854 
5855 	return err;
5856 }
5857 
5858 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
5859 
5860 static int bpf_prog_bind_map(union bpf_attr *attr)
5861 {
5862 	struct bpf_prog *prog;
5863 	struct bpf_map *map;
5864 	struct bpf_map **used_maps_old, **used_maps_new;
5865 	int i, ret = 0;
5866 
5867 	if (CHECK_ATTR(BPF_PROG_BIND_MAP))
5868 		return -EINVAL;
5869 
5870 	if (attr->prog_bind_map.flags)
5871 		return -EINVAL;
5872 
5873 	prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
5874 	if (IS_ERR(prog))
5875 		return PTR_ERR(prog);
5876 
5877 	map = bpf_map_get(attr->prog_bind_map.map_fd);
5878 	if (IS_ERR(map)) {
5879 		ret = PTR_ERR(map);
5880 		goto out_prog_put;
5881 	}
5882 
5883 	mutex_lock(&prog->aux->used_maps_mutex);
5884 
5885 	used_maps_old = prog->aux->used_maps;
5886 
5887 	for (i = 0; i < prog->aux->used_map_cnt; i++)
5888 		if (used_maps_old[i] == map) {
5889 			bpf_map_put(map);
5890 			goto out_unlock;
5891 		}
5892 
5893 	used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
5894 				      sizeof(used_maps_new[0]),
5895 				      GFP_KERNEL);
5896 	if (!used_maps_new) {
5897 		ret = -ENOMEM;
5898 		goto out_unlock;
5899 	}
5900 
5901 	/* The bpf program will not access the bpf map, but for the sake of
5902 	 * simplicity, increase sleepable_refcnt for sleepable program as well.
5903 	 */
5904 	if (prog->sleepable)
5905 		atomic64_inc(&map->sleepable_refcnt);
5906 	memcpy(used_maps_new, used_maps_old,
5907 	       sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
5908 	used_maps_new[prog->aux->used_map_cnt] = map;
5909 
5910 	prog->aux->used_map_cnt++;
5911 	prog->aux->used_maps = used_maps_new;
5912 
5913 	kfree(used_maps_old);
5914 
5915 out_unlock:
5916 	mutex_unlock(&prog->aux->used_maps_mutex);
5917 
5918 	if (ret)
5919 		bpf_map_put(map);
5920 out_prog_put:
5921 	bpf_prog_put(prog);
5922 	return ret;
5923 }
5924 
5925 #define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd
5926 
5927 static int token_create(union bpf_attr *attr)
5928 {
5929 	if (CHECK_ATTR(BPF_TOKEN_CREATE))
5930 		return -EINVAL;
5931 
5932 	/* no flags are supported yet */
5933 	if (attr->token_create.flags)
5934 		return -EINVAL;
5935 
5936 	return bpf_token_create(attr);
5937 }
5938 
5939 static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size)
5940 {
5941 	union bpf_attr attr;
5942 	int err;
5943 
5944 	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
5945 	if (err)
5946 		return err;
5947 	size = min_t(u32, size, sizeof(attr));
5948 
5949 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
5950 	memset(&attr, 0, sizeof(attr));
5951 	if (copy_from_bpfptr(&attr, uattr, size) != 0)
5952 		return -EFAULT;
5953 
5954 	err = security_bpf(cmd, &attr, size, uattr.is_kernel);
5955 	if (err < 0)
5956 		return err;
5957 
5958 	switch (cmd) {
5959 	case BPF_MAP_CREATE:
5960 		err = map_create(&attr, uattr.is_kernel);
5961 		break;
5962 	case BPF_MAP_LOOKUP_ELEM:
5963 		err = map_lookup_elem(&attr);
5964 		break;
5965 	case BPF_MAP_UPDATE_ELEM:
5966 		err = map_update_elem(&attr, uattr);
5967 		break;
5968 	case BPF_MAP_DELETE_ELEM:
5969 		err = map_delete_elem(&attr, uattr);
5970 		break;
5971 	case BPF_MAP_GET_NEXT_KEY:
5972 		err = map_get_next_key(&attr);
5973 		break;
5974 	case BPF_MAP_FREEZE:
5975 		err = map_freeze(&attr);
5976 		break;
5977 	case BPF_PROG_LOAD:
5978 		err = bpf_prog_load(&attr, uattr, size);
5979 		break;
5980 	case BPF_OBJ_PIN:
5981 		err = bpf_obj_pin(&attr);
5982 		break;
5983 	case BPF_OBJ_GET:
5984 		err = bpf_obj_get(&attr);
5985 		break;
5986 	case BPF_PROG_ATTACH:
5987 		err = bpf_prog_attach(&attr);
5988 		break;
5989 	case BPF_PROG_DETACH:
5990 		err = bpf_prog_detach(&attr);
5991 		break;
5992 	case BPF_PROG_QUERY:
5993 		err = bpf_prog_query(&attr, uattr.user);
5994 		break;
5995 	case BPF_PROG_TEST_RUN:
5996 		err = bpf_prog_test_run(&attr, uattr.user);
5997 		break;
5998 	case BPF_PROG_GET_NEXT_ID:
5999 		err = bpf_obj_get_next_id(&attr, uattr.user,
6000 					  &prog_idr, &prog_idr_lock);
6001 		break;
6002 	case BPF_MAP_GET_NEXT_ID:
6003 		err = bpf_obj_get_next_id(&attr, uattr.user,
6004 					  &map_idr, &map_idr_lock);
6005 		break;
6006 	case BPF_BTF_GET_NEXT_ID:
6007 		err = bpf_obj_get_next_id(&attr, uattr.user,
6008 					  &btf_idr, &btf_idr_lock);
6009 		break;
6010 	case BPF_PROG_GET_FD_BY_ID:
6011 		err = bpf_prog_get_fd_by_id(&attr);
6012 		break;
6013 	case BPF_MAP_GET_FD_BY_ID:
6014 		err = bpf_map_get_fd_by_id(&attr);
6015 		break;
6016 	case BPF_OBJ_GET_INFO_BY_FD:
6017 		err = bpf_obj_get_info_by_fd(&attr, uattr.user);
6018 		break;
6019 	case BPF_RAW_TRACEPOINT_OPEN:
6020 		err = bpf_raw_tracepoint_open(&attr);
6021 		break;
6022 	case BPF_BTF_LOAD:
6023 		err = bpf_btf_load(&attr, uattr, size);
6024 		break;
6025 	case BPF_BTF_GET_FD_BY_ID:
6026 		err = bpf_btf_get_fd_by_id(&attr);
6027 		break;
6028 	case BPF_TASK_FD_QUERY:
6029 		err = bpf_task_fd_query(&attr, uattr.user);
6030 		break;
6031 	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
6032 		err = map_lookup_and_delete_elem(&attr);
6033 		break;
6034 	case BPF_MAP_LOOKUP_BATCH:
6035 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
6036 		break;
6037 	case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
6038 		err = bpf_map_do_batch(&attr, uattr.user,
6039 				       BPF_MAP_LOOKUP_AND_DELETE_BATCH);
6040 		break;
6041 	case BPF_MAP_UPDATE_BATCH:
6042 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
6043 		break;
6044 	case BPF_MAP_DELETE_BATCH:
6045 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
6046 		break;
6047 	case BPF_LINK_CREATE:
6048 		err = link_create(&attr, uattr);
6049 		break;
6050 	case BPF_LINK_UPDATE:
6051 		err = link_update(&attr);
6052 		break;
6053 	case BPF_LINK_GET_FD_BY_ID:
6054 		err = bpf_link_get_fd_by_id(&attr);
6055 		break;
6056 	case BPF_LINK_GET_NEXT_ID:
6057 		err = bpf_obj_get_next_id(&attr, uattr.user,
6058 					  &link_idr, &link_idr_lock);
6059 		break;
6060 	case BPF_ENABLE_STATS:
6061 		err = bpf_enable_stats(&attr);
6062 		break;
6063 	case BPF_ITER_CREATE:
6064 		err = bpf_iter_create(&attr);
6065 		break;
6066 	case BPF_LINK_DETACH:
6067 		err = link_detach(&attr);
6068 		break;
6069 	case BPF_PROG_BIND_MAP:
6070 		err = bpf_prog_bind_map(&attr);
6071 		break;
6072 	case BPF_TOKEN_CREATE:
6073 		err = token_create(&attr);
6074 		break;
6075 	default:
6076 		err = -EINVAL;
6077 		break;
6078 	}
6079 
6080 	return err;
6081 }
6082 
6083 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
6084 {
6085 	return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
6086 }
6087 
6088 static bool syscall_prog_is_valid_access(int off, int size,
6089 					 enum bpf_access_type type,
6090 					 const struct bpf_prog *prog,
6091 					 struct bpf_insn_access_aux *info)
6092 {
6093 	if (off < 0 || off >= U16_MAX)
6094 		return false;
6095 	if (off % size != 0)
6096 		return false;
6097 	return true;
6098 }
6099 
6100 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
6101 {
6102 	switch (cmd) {
6103 	case BPF_MAP_CREATE:
6104 	case BPF_MAP_DELETE_ELEM:
6105 	case BPF_MAP_UPDATE_ELEM:
6106 	case BPF_MAP_FREEZE:
6107 	case BPF_MAP_GET_FD_BY_ID:
6108 	case BPF_PROG_LOAD:
6109 	case BPF_BTF_LOAD:
6110 	case BPF_LINK_CREATE:
6111 	case BPF_RAW_TRACEPOINT_OPEN:
6112 		break;
6113 	default:
6114 		return -EINVAL;
6115 	}
6116 	return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
6117 }
6118 
6119 
6120 /* To shut up -Wmissing-prototypes.
6121  * This function is used by the kernel light skeleton
6122  * to load bpf programs when modules are loaded or during kernel boot.
6123  * See tools/lib/bpf/skel_internal.h
6124  */
6125 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
6126 
6127 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
6128 {
6129 	struct bpf_prog * __maybe_unused prog;
6130 	struct bpf_tramp_run_ctx __maybe_unused run_ctx;
6131 
6132 	switch (cmd) {
6133 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
6134 	case BPF_PROG_TEST_RUN:
6135 		if (attr->test.data_in || attr->test.data_out ||
6136 		    attr->test.ctx_out || attr->test.duration ||
6137 		    attr->test.repeat || attr->test.flags)
6138 			return -EINVAL;
6139 
6140 		prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
6141 		if (IS_ERR(prog))
6142 			return PTR_ERR(prog);
6143 
6144 		if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
6145 		    attr->test.ctx_size_in > U16_MAX) {
6146 			bpf_prog_put(prog);
6147 			return -EINVAL;
6148 		}
6149 
6150 		run_ctx.bpf_cookie = 0;
6151 		if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
6152 			/* recursion detected */
6153 			__bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
6154 			bpf_prog_put(prog);
6155 			return -EBUSY;
6156 		}
6157 		attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
6158 		__bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
6159 						&run_ctx);
6160 		bpf_prog_put(prog);
6161 		return 0;
6162 #endif
6163 	default:
6164 		return ____bpf_sys_bpf(cmd, attr, size);
6165 	}
6166 }
6167 EXPORT_SYMBOL_NS(kern_sys_bpf, "BPF_INTERNAL");
6168 
6169 static const struct bpf_func_proto bpf_sys_bpf_proto = {
6170 	.func		= bpf_sys_bpf,
6171 	.gpl_only	= false,
6172 	.ret_type	= RET_INTEGER,
6173 	.arg1_type	= ARG_ANYTHING,
6174 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
6175 	.arg3_type	= ARG_CONST_SIZE,
6176 };
6177 
6178 const struct bpf_func_proto * __weak
6179 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6180 {
6181 	return bpf_base_func_proto(func_id, prog);
6182 }
6183 
6184 BPF_CALL_1(bpf_sys_close, u32, fd)
6185 {
6186 	/* When bpf program calls this helper there should not be
6187 	 * an fdget() without matching completed fdput().
6188 	 * This helper is allowed in the following callchain only:
6189 	 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
6190 	 */
6191 	return close_fd(fd);
6192 }
6193 
6194 static const struct bpf_func_proto bpf_sys_close_proto = {
6195 	.func		= bpf_sys_close,
6196 	.gpl_only	= false,
6197 	.ret_type	= RET_INTEGER,
6198 	.arg1_type	= ARG_ANYTHING,
6199 };
6200 
6201 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
6202 {
6203 	*res = 0;
6204 	if (flags)
6205 		return -EINVAL;
6206 
6207 	if (name_sz <= 1 || name[name_sz - 1])
6208 		return -EINVAL;
6209 
6210 	if (!bpf_dump_raw_ok(current_cred()))
6211 		return -EPERM;
6212 
6213 	*res = kallsyms_lookup_name(name);
6214 	return *res ? 0 : -ENOENT;
6215 }
6216 
6217 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
6218 	.func		= bpf_kallsyms_lookup_name,
6219 	.gpl_only	= false,
6220 	.ret_type	= RET_INTEGER,
6221 	.arg1_type	= ARG_PTR_TO_MEM,
6222 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
6223 	.arg3_type	= ARG_ANYTHING,
6224 	.arg4_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
6225 	.arg4_size	= sizeof(u64),
6226 };
6227 
6228 static const struct bpf_func_proto *
6229 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6230 {
6231 	switch (func_id) {
6232 	case BPF_FUNC_sys_bpf:
6233 		return !bpf_token_capable(prog->aux->token, CAP_PERFMON)
6234 		       ? NULL : &bpf_sys_bpf_proto;
6235 	case BPF_FUNC_btf_find_by_name_kind:
6236 		return &bpf_btf_find_by_name_kind_proto;
6237 	case BPF_FUNC_sys_close:
6238 		return &bpf_sys_close_proto;
6239 	case BPF_FUNC_kallsyms_lookup_name:
6240 		return &bpf_kallsyms_lookup_name_proto;
6241 	default:
6242 		return tracing_prog_func_proto(func_id, prog);
6243 	}
6244 }
6245 
6246 const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
6247 	.get_func_proto  = syscall_prog_func_proto,
6248 	.is_valid_access = syscall_prog_is_valid_access,
6249 };
6250 
6251 const struct bpf_prog_ops bpf_syscall_prog_ops = {
6252 	.test_run = bpf_prog_test_run_syscall,
6253 };
6254 
6255 #ifdef CONFIG_SYSCTL
6256 static int bpf_stats_handler(const struct ctl_table *table, int write,
6257 			     void *buffer, size_t *lenp, loff_t *ppos)
6258 {
6259 	struct static_key *key = (struct static_key *)table->data;
6260 	static int saved_val;
6261 	int val, ret;
6262 	struct ctl_table tmp = {
6263 		.data   = &val,
6264 		.maxlen = sizeof(val),
6265 		.mode   = table->mode,
6266 		.extra1 = SYSCTL_ZERO,
6267 		.extra2 = SYSCTL_ONE,
6268 	};
6269 
6270 	if (write && !capable(CAP_SYS_ADMIN))
6271 		return -EPERM;
6272 
6273 	mutex_lock(&bpf_stats_enabled_mutex);
6274 	val = saved_val;
6275 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
6276 	if (write && !ret && val != saved_val) {
6277 		if (val)
6278 			static_key_slow_inc(key);
6279 		else
6280 			static_key_slow_dec(key);
6281 		saved_val = val;
6282 	}
6283 	mutex_unlock(&bpf_stats_enabled_mutex);
6284 	return ret;
6285 }
6286 
6287 void __weak unpriv_ebpf_notify(int new_state)
6288 {
6289 }
6290 
6291 static int bpf_unpriv_handler(const struct ctl_table *table, int write,
6292 			      void *buffer, size_t *lenp, loff_t *ppos)
6293 {
6294 	int ret, unpriv_enable = *(int *)table->data;
6295 	bool locked_state = unpriv_enable == 1;
6296 	struct ctl_table tmp = *table;
6297 
6298 	if (write && !capable(CAP_SYS_ADMIN))
6299 		return -EPERM;
6300 
6301 	tmp.data = &unpriv_enable;
6302 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
6303 	if (write && !ret) {
6304 		if (locked_state && unpriv_enable != 1)
6305 			return -EPERM;
6306 		*(int *)table->data = unpriv_enable;
6307 	}
6308 
6309 	if (write)
6310 		unpriv_ebpf_notify(unpriv_enable);
6311 
6312 	return ret;
6313 }
6314 
6315 static const struct ctl_table bpf_syscall_table[] = {
6316 	{
6317 		.procname	= "unprivileged_bpf_disabled",
6318 		.data		= &sysctl_unprivileged_bpf_disabled,
6319 		.maxlen		= sizeof(sysctl_unprivileged_bpf_disabled),
6320 		.mode		= 0644,
6321 		.proc_handler	= bpf_unpriv_handler,
6322 		.extra1		= SYSCTL_ZERO,
6323 		.extra2		= SYSCTL_TWO,
6324 	},
6325 	{
6326 		.procname	= "bpf_stats_enabled",
6327 		.data		= &bpf_stats_enabled_key.key,
6328 		.mode		= 0644,
6329 		.proc_handler	= bpf_stats_handler,
6330 	},
6331 };
6332 
6333 static int __init bpf_syscall_sysctl_init(void)
6334 {
6335 	register_sysctl_init("kernel", bpf_syscall_table);
6336 	return 0;
6337 }
6338 late_initcall(bpf_syscall_sysctl_init);
6339 #endif /* CONFIG_SYSCTL */
6340