xref: /linux/kernel/bpf/syscall.c (revision 9d027a35a52a4ea9400390ef4414e4e9dcd54193)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf-cgroup.h>
6 #include <linux/bpf_trace.h>
7 #include <linux/bpf_lirc.h>
8 #include <linux/bpf_verifier.h>
9 #include <linux/bsearch.h>
10 #include <linux/btf.h>
11 #include <linux/syscalls.h>
12 #include <linux/slab.h>
13 #include <linux/sched/signal.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmzone.h>
16 #include <linux/anon_inodes.h>
17 #include <linux/fdtable.h>
18 #include <linux/file.h>
19 #include <linux/fs.h>
20 #include <linux/license.h>
21 #include <linux/filter.h>
22 #include <linux/kernel.h>
23 #include <linux/idr.h>
24 #include <linux/cred.h>
25 #include <linux/timekeeping.h>
26 #include <linux/ctype.h>
27 #include <linux/nospec.h>
28 #include <linux/audit.h>
29 #include <uapi/linux/btf.h>
30 #include <linux/pgtable.h>
31 #include <linux/bpf_lsm.h>
32 #include <linux/poll.h>
33 #include <linux/sort.h>
34 #include <linux/bpf-netns.h>
35 #include <linux/rcupdate_trace.h>
36 #include <linux/memcontrol.h>
37 #include <linux/trace_events.h>
38 
39 #include <net/netfilter/nf_bpf_link.h>
40 #include <net/netkit.h>
41 #include <net/tcx.h>
42 
43 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
44 			  (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
45 			  (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
46 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
47 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
48 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
49 			IS_FD_HASH(map))
50 
51 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
52 
53 DEFINE_PER_CPU(int, bpf_prog_active);
54 static DEFINE_IDR(prog_idr);
55 static DEFINE_SPINLOCK(prog_idr_lock);
56 static DEFINE_IDR(map_idr);
57 static DEFINE_SPINLOCK(map_idr_lock);
58 static DEFINE_IDR(link_idr);
59 static DEFINE_SPINLOCK(link_idr_lock);
60 
61 int sysctl_unprivileged_bpf_disabled __read_mostly =
62 	IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
63 
64 static const struct bpf_map_ops * const bpf_map_types[] = {
65 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
66 #define BPF_MAP_TYPE(_id, _ops) \
67 	[_id] = &_ops,
68 #define BPF_LINK_TYPE(_id, _name)
69 #include <linux/bpf_types.h>
70 #undef BPF_PROG_TYPE
71 #undef BPF_MAP_TYPE
72 #undef BPF_LINK_TYPE
73 };
74 
75 /*
76  * If we're handed a bigger struct than we know of, ensure all the unknown bits
77  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
78  * we don't know about yet.
79  *
80  * There is a ToCToU between this function call and the following
81  * copy_from_user() call. However, this is not a concern since this function is
82  * meant to be a future-proofing of bits.
83  */
84 int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
85 			     size_t expected_size,
86 			     size_t actual_size)
87 {
88 	int res;
89 
90 	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
91 		return -E2BIG;
92 
93 	if (actual_size <= expected_size)
94 		return 0;
95 
96 	if (uaddr.is_kernel)
97 		res = memchr_inv(uaddr.kernel + expected_size, 0,
98 				 actual_size - expected_size) == NULL;
99 	else
100 		res = check_zeroed_user(uaddr.user + expected_size,
101 					actual_size - expected_size);
102 	if (res < 0)
103 		return res;
104 	return res ? 0 : -E2BIG;
105 }
106 
107 const struct bpf_map_ops bpf_map_offload_ops = {
108 	.map_meta_equal = bpf_map_meta_equal,
109 	.map_alloc = bpf_map_offload_map_alloc,
110 	.map_free = bpf_map_offload_map_free,
111 	.map_check_btf = map_check_no_btf,
112 	.map_mem_usage = bpf_map_offload_map_mem_usage,
113 };
114 
115 static void bpf_map_write_active_inc(struct bpf_map *map)
116 {
117 	atomic64_inc(&map->writecnt);
118 }
119 
120 static void bpf_map_write_active_dec(struct bpf_map *map)
121 {
122 	atomic64_dec(&map->writecnt);
123 }
124 
125 bool bpf_map_write_active(const struct bpf_map *map)
126 {
127 	return atomic64_read(&map->writecnt) != 0;
128 }
129 
130 static u32 bpf_map_value_size(const struct bpf_map *map)
131 {
132 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
133 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
134 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
135 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
136 		return round_up(map->value_size, 8) * num_possible_cpus();
137 	else if (IS_FD_MAP(map))
138 		return sizeof(u32);
139 	else
140 		return  map->value_size;
141 }
142 
143 static void maybe_wait_bpf_programs(struct bpf_map *map)
144 {
145 	/* Wait for any running non-sleepable BPF programs to complete so that
146 	 * userspace, when we return to it, knows that all non-sleepable
147 	 * programs that could be running use the new map value. For sleepable
148 	 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait
149 	 * for the completions of these programs, but considering the waiting
150 	 * time can be very long and userspace may think it will hang forever,
151 	 * so don't handle sleepable BPF programs now.
152 	 */
153 	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
154 	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
155 		synchronize_rcu();
156 }
157 
158 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
159 				void *key, void *value, __u64 flags)
160 {
161 	int err;
162 
163 	/* Need to create a kthread, thus must support schedule */
164 	if (bpf_map_is_offloaded(map)) {
165 		return bpf_map_offload_update_elem(map, key, value, flags);
166 	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
167 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
168 		return map->ops->map_update_elem(map, key, value, flags);
169 	} else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
170 		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
171 		return sock_map_update_elem_sys(map, key, value, flags);
172 	} else if (IS_FD_PROG_ARRAY(map)) {
173 		return bpf_fd_array_map_update_elem(map, map_file, key, value,
174 						    flags);
175 	}
176 
177 	bpf_disable_instrumentation();
178 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
179 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
180 		err = bpf_percpu_hash_update(map, key, value, flags);
181 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
182 		err = bpf_percpu_array_update(map, key, value, flags);
183 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
184 		err = bpf_percpu_cgroup_storage_update(map, key, value,
185 						       flags);
186 	} else if (IS_FD_ARRAY(map)) {
187 		err = bpf_fd_array_map_update_elem(map, map_file, key, value,
188 						   flags);
189 	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
190 		err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
191 						  flags);
192 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
193 		/* rcu_read_lock() is not needed */
194 		err = bpf_fd_reuseport_array_update_elem(map, key, value,
195 							 flags);
196 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
197 		   map->map_type == BPF_MAP_TYPE_STACK ||
198 		   map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
199 		err = map->ops->map_push_elem(map, value, flags);
200 	} else {
201 		rcu_read_lock();
202 		err = map->ops->map_update_elem(map, key, value, flags);
203 		rcu_read_unlock();
204 	}
205 	bpf_enable_instrumentation();
206 
207 	return err;
208 }
209 
210 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
211 			      __u64 flags)
212 {
213 	void *ptr;
214 	int err;
215 
216 	if (bpf_map_is_offloaded(map))
217 		return bpf_map_offload_lookup_elem(map, key, value);
218 
219 	bpf_disable_instrumentation();
220 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
221 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
222 		err = bpf_percpu_hash_copy(map, key, value);
223 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
224 		err = bpf_percpu_array_copy(map, key, value);
225 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
226 		err = bpf_percpu_cgroup_storage_copy(map, key, value);
227 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
228 		err = bpf_stackmap_copy(map, key, value);
229 	} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
230 		err = bpf_fd_array_map_lookup_elem(map, key, value);
231 	} else if (IS_FD_HASH(map)) {
232 		err = bpf_fd_htab_map_lookup_elem(map, key, value);
233 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
234 		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
235 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
236 		   map->map_type == BPF_MAP_TYPE_STACK ||
237 		   map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
238 		err = map->ops->map_peek_elem(map, value);
239 	} else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
240 		/* struct_ops map requires directly updating "value" */
241 		err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
242 	} else {
243 		rcu_read_lock();
244 		if (map->ops->map_lookup_elem_sys_only)
245 			ptr = map->ops->map_lookup_elem_sys_only(map, key);
246 		else
247 			ptr = map->ops->map_lookup_elem(map, key);
248 		if (IS_ERR(ptr)) {
249 			err = PTR_ERR(ptr);
250 		} else if (!ptr) {
251 			err = -ENOENT;
252 		} else {
253 			err = 0;
254 			if (flags & BPF_F_LOCK)
255 				/* lock 'ptr' and copy everything but lock */
256 				copy_map_value_locked(map, value, ptr, true);
257 			else
258 				copy_map_value(map, value, ptr);
259 			/* mask lock and timer, since value wasn't zero inited */
260 			check_and_init_map_value(map, value);
261 		}
262 		rcu_read_unlock();
263 	}
264 
265 	bpf_enable_instrumentation();
266 
267 	return err;
268 }
269 
270 /* Please, do not use this function outside from the map creation path
271  * (e.g. in map update path) without taking care of setting the active
272  * memory cgroup (see at bpf_map_kmalloc_node() for example).
273  */
274 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
275 {
276 	/* We really just want to fail instead of triggering OOM killer
277 	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
278 	 * which is used for lower order allocation requests.
279 	 *
280 	 * It has been observed that higher order allocation requests done by
281 	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
282 	 * to reclaim memory from the page cache, thus we set
283 	 * __GFP_RETRY_MAYFAIL to avoid such situations.
284 	 */
285 
286 	gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO);
287 	unsigned int flags = 0;
288 	unsigned long align = 1;
289 	void *area;
290 
291 	if (size >= SIZE_MAX)
292 		return NULL;
293 
294 	/* kmalloc()'ed memory can't be mmap()'ed */
295 	if (mmapable) {
296 		BUG_ON(!PAGE_ALIGNED(size));
297 		align = SHMLBA;
298 		flags = VM_USERMAP;
299 	} else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
300 		area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
301 				    numa_node);
302 		if (area != NULL)
303 			return area;
304 	}
305 
306 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
307 			gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
308 			flags, numa_node, __builtin_return_address(0));
309 }
310 
311 void *bpf_map_area_alloc(u64 size, int numa_node)
312 {
313 	return __bpf_map_area_alloc(size, numa_node, false);
314 }
315 
316 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
317 {
318 	return __bpf_map_area_alloc(size, numa_node, true);
319 }
320 
321 void bpf_map_area_free(void *area)
322 {
323 	kvfree(area);
324 }
325 
326 static u32 bpf_map_flags_retain_permanent(u32 flags)
327 {
328 	/* Some map creation flags are not tied to the map object but
329 	 * rather to the map fd instead, so they have no meaning upon
330 	 * map object inspection since multiple file descriptors with
331 	 * different (access) properties can exist here. Thus, given
332 	 * this has zero meaning for the map itself, lets clear these
333 	 * from here.
334 	 */
335 	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
336 }
337 
338 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
339 {
340 	map->map_type = attr->map_type;
341 	map->key_size = attr->key_size;
342 	map->value_size = attr->value_size;
343 	map->max_entries = attr->max_entries;
344 	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
345 	map->numa_node = bpf_map_attr_numa_node(attr);
346 	map->map_extra = attr->map_extra;
347 }
348 
349 static int bpf_map_alloc_id(struct bpf_map *map)
350 {
351 	int id;
352 
353 	idr_preload(GFP_KERNEL);
354 	spin_lock_bh(&map_idr_lock);
355 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
356 	if (id > 0)
357 		map->id = id;
358 	spin_unlock_bh(&map_idr_lock);
359 	idr_preload_end();
360 
361 	if (WARN_ON_ONCE(!id))
362 		return -ENOSPC;
363 
364 	return id > 0 ? 0 : id;
365 }
366 
367 void bpf_map_free_id(struct bpf_map *map)
368 {
369 	unsigned long flags;
370 
371 	/* Offloaded maps are removed from the IDR store when their device
372 	 * disappears - even if someone holds an fd to them they are unusable,
373 	 * the memory is gone, all ops will fail; they are simply waiting for
374 	 * refcnt to drop to be freed.
375 	 */
376 	if (!map->id)
377 		return;
378 
379 	spin_lock_irqsave(&map_idr_lock, flags);
380 
381 	idr_remove(&map_idr, map->id);
382 	map->id = 0;
383 
384 	spin_unlock_irqrestore(&map_idr_lock, flags);
385 }
386 
387 #ifdef CONFIG_MEMCG_KMEM
388 static void bpf_map_save_memcg(struct bpf_map *map)
389 {
390 	/* Currently if a map is created by a process belonging to the root
391 	 * memory cgroup, get_obj_cgroup_from_current() will return NULL.
392 	 * So we have to check map->objcg for being NULL each time it's
393 	 * being used.
394 	 */
395 	if (memcg_bpf_enabled())
396 		map->objcg = get_obj_cgroup_from_current();
397 }
398 
399 static void bpf_map_release_memcg(struct bpf_map *map)
400 {
401 	if (map->objcg)
402 		obj_cgroup_put(map->objcg);
403 }
404 
405 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
406 {
407 	if (map->objcg)
408 		return get_mem_cgroup_from_objcg(map->objcg);
409 
410 	return root_mem_cgroup;
411 }
412 
413 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
414 			   int node)
415 {
416 	struct mem_cgroup *memcg, *old_memcg;
417 	void *ptr;
418 
419 	memcg = bpf_map_get_memcg(map);
420 	old_memcg = set_active_memcg(memcg);
421 	ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
422 	set_active_memcg(old_memcg);
423 	mem_cgroup_put(memcg);
424 
425 	return ptr;
426 }
427 
428 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
429 {
430 	struct mem_cgroup *memcg, *old_memcg;
431 	void *ptr;
432 
433 	memcg = bpf_map_get_memcg(map);
434 	old_memcg = set_active_memcg(memcg);
435 	ptr = kzalloc(size, flags | __GFP_ACCOUNT);
436 	set_active_memcg(old_memcg);
437 	mem_cgroup_put(memcg);
438 
439 	return ptr;
440 }
441 
442 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
443 		       gfp_t flags)
444 {
445 	struct mem_cgroup *memcg, *old_memcg;
446 	void *ptr;
447 
448 	memcg = bpf_map_get_memcg(map);
449 	old_memcg = set_active_memcg(memcg);
450 	ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
451 	set_active_memcg(old_memcg);
452 	mem_cgroup_put(memcg);
453 
454 	return ptr;
455 }
456 
457 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
458 				    size_t align, gfp_t flags)
459 {
460 	struct mem_cgroup *memcg, *old_memcg;
461 	void __percpu *ptr;
462 
463 	memcg = bpf_map_get_memcg(map);
464 	old_memcg = set_active_memcg(memcg);
465 	ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
466 	set_active_memcg(old_memcg);
467 	mem_cgroup_put(memcg);
468 
469 	return ptr;
470 }
471 
472 #else
473 static void bpf_map_save_memcg(struct bpf_map *map)
474 {
475 }
476 
477 static void bpf_map_release_memcg(struct bpf_map *map)
478 {
479 }
480 #endif
481 
482 static int btf_field_cmp(const void *a, const void *b)
483 {
484 	const struct btf_field *f1 = a, *f2 = b;
485 
486 	if (f1->offset < f2->offset)
487 		return -1;
488 	else if (f1->offset > f2->offset)
489 		return 1;
490 	return 0;
491 }
492 
493 struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset,
494 				  u32 field_mask)
495 {
496 	struct btf_field *field;
497 
498 	if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask))
499 		return NULL;
500 	field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp);
501 	if (!field || !(field->type & field_mask))
502 		return NULL;
503 	return field;
504 }
505 
506 void btf_record_free(struct btf_record *rec)
507 {
508 	int i;
509 
510 	if (IS_ERR_OR_NULL(rec))
511 		return;
512 	for (i = 0; i < rec->cnt; i++) {
513 		switch (rec->fields[i].type) {
514 		case BPF_KPTR_UNREF:
515 		case BPF_KPTR_REF:
516 		case BPF_KPTR_PERCPU:
517 			if (rec->fields[i].kptr.module)
518 				module_put(rec->fields[i].kptr.module);
519 			btf_put(rec->fields[i].kptr.btf);
520 			break;
521 		case BPF_LIST_HEAD:
522 		case BPF_LIST_NODE:
523 		case BPF_RB_ROOT:
524 		case BPF_RB_NODE:
525 		case BPF_SPIN_LOCK:
526 		case BPF_TIMER:
527 		case BPF_REFCOUNT:
528 			/* Nothing to release */
529 			break;
530 		default:
531 			WARN_ON_ONCE(1);
532 			continue;
533 		}
534 	}
535 	kfree(rec);
536 }
537 
538 void bpf_map_free_record(struct bpf_map *map)
539 {
540 	btf_record_free(map->record);
541 	map->record = NULL;
542 }
543 
544 struct btf_record *btf_record_dup(const struct btf_record *rec)
545 {
546 	const struct btf_field *fields;
547 	struct btf_record *new_rec;
548 	int ret, size, i;
549 
550 	if (IS_ERR_OR_NULL(rec))
551 		return NULL;
552 	size = offsetof(struct btf_record, fields[rec->cnt]);
553 	new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN);
554 	if (!new_rec)
555 		return ERR_PTR(-ENOMEM);
556 	/* Do a deep copy of the btf_record */
557 	fields = rec->fields;
558 	new_rec->cnt = 0;
559 	for (i = 0; i < rec->cnt; i++) {
560 		switch (fields[i].type) {
561 		case BPF_KPTR_UNREF:
562 		case BPF_KPTR_REF:
563 		case BPF_KPTR_PERCPU:
564 			btf_get(fields[i].kptr.btf);
565 			if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
566 				ret = -ENXIO;
567 				goto free;
568 			}
569 			break;
570 		case BPF_LIST_HEAD:
571 		case BPF_LIST_NODE:
572 		case BPF_RB_ROOT:
573 		case BPF_RB_NODE:
574 		case BPF_SPIN_LOCK:
575 		case BPF_TIMER:
576 		case BPF_REFCOUNT:
577 			/* Nothing to acquire */
578 			break;
579 		default:
580 			ret = -EFAULT;
581 			WARN_ON_ONCE(1);
582 			goto free;
583 		}
584 		new_rec->cnt++;
585 	}
586 	return new_rec;
587 free:
588 	btf_record_free(new_rec);
589 	return ERR_PTR(ret);
590 }
591 
592 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b)
593 {
594 	bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b);
595 	int size;
596 
597 	if (!a_has_fields && !b_has_fields)
598 		return true;
599 	if (a_has_fields != b_has_fields)
600 		return false;
601 	if (rec_a->cnt != rec_b->cnt)
602 		return false;
603 	size = offsetof(struct btf_record, fields[rec_a->cnt]);
604 	/* btf_parse_fields uses kzalloc to allocate a btf_record, so unused
605 	 * members are zeroed out. So memcmp is safe to do without worrying
606 	 * about padding/unused fields.
607 	 *
608 	 * While spin_lock, timer, and kptr have no relation to map BTF,
609 	 * list_head metadata is specific to map BTF, the btf and value_rec
610 	 * members in particular. btf is the map BTF, while value_rec points to
611 	 * btf_record in that map BTF.
612 	 *
613 	 * So while by default, we don't rely on the map BTF (which the records
614 	 * were parsed from) matching for both records, which is not backwards
615 	 * compatible, in case list_head is part of it, we implicitly rely on
616 	 * that by way of depending on memcmp succeeding for it.
617 	 */
618 	return !memcmp(rec_a, rec_b, size);
619 }
620 
621 void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
622 {
623 	if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER)))
624 		return;
625 	bpf_timer_cancel_and_free(obj + rec->timer_off);
626 }
627 
628 void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
629 {
630 	const struct btf_field *fields;
631 	int i;
632 
633 	if (IS_ERR_OR_NULL(rec))
634 		return;
635 	fields = rec->fields;
636 	for (i = 0; i < rec->cnt; i++) {
637 		struct btf_struct_meta *pointee_struct_meta;
638 		const struct btf_field *field = &fields[i];
639 		void *field_ptr = obj + field->offset;
640 		void *xchgd_field;
641 
642 		switch (fields[i].type) {
643 		case BPF_SPIN_LOCK:
644 			break;
645 		case BPF_TIMER:
646 			bpf_timer_cancel_and_free(field_ptr);
647 			break;
648 		case BPF_KPTR_UNREF:
649 			WRITE_ONCE(*(u64 *)field_ptr, 0);
650 			break;
651 		case BPF_KPTR_REF:
652 		case BPF_KPTR_PERCPU:
653 			xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
654 			if (!xchgd_field)
655 				break;
656 
657 			if (!btf_is_kernel(field->kptr.btf)) {
658 				pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
659 									   field->kptr.btf_id);
660 				migrate_disable();
661 				__bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
662 								 pointee_struct_meta->record : NULL,
663 								 fields[i].type == BPF_KPTR_PERCPU);
664 				migrate_enable();
665 			} else {
666 				field->kptr.dtor(xchgd_field);
667 			}
668 			break;
669 		case BPF_LIST_HEAD:
670 			if (WARN_ON_ONCE(rec->spin_lock_off < 0))
671 				continue;
672 			bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
673 			break;
674 		case BPF_RB_ROOT:
675 			if (WARN_ON_ONCE(rec->spin_lock_off < 0))
676 				continue;
677 			bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off);
678 			break;
679 		case BPF_LIST_NODE:
680 		case BPF_RB_NODE:
681 		case BPF_REFCOUNT:
682 			break;
683 		default:
684 			WARN_ON_ONCE(1);
685 			continue;
686 		}
687 	}
688 }
689 
690 /* called from workqueue */
691 static void bpf_map_free_deferred(struct work_struct *work)
692 {
693 	struct bpf_map *map = container_of(work, struct bpf_map, work);
694 	struct btf_record *rec = map->record;
695 	struct btf *btf = map->btf;
696 
697 	security_bpf_map_free(map);
698 	bpf_map_release_memcg(map);
699 	/* implementation dependent freeing */
700 	map->ops->map_free(map);
701 	/* Delay freeing of btf_record for maps, as map_free
702 	 * callback usually needs access to them. It is better to do it here
703 	 * than require each callback to do the free itself manually.
704 	 *
705 	 * Note that the btf_record stashed in map->inner_map_meta->record was
706 	 * already freed using the map_free callback for map in map case which
707 	 * eventually calls bpf_map_free_meta, since inner_map_meta is only a
708 	 * template bpf_map struct used during verification.
709 	 */
710 	btf_record_free(rec);
711 	/* Delay freeing of btf for maps, as map_free callback may need
712 	 * struct_meta info which will be freed with btf_put().
713 	 */
714 	btf_put(btf);
715 }
716 
717 static void bpf_map_put_uref(struct bpf_map *map)
718 {
719 	if (atomic64_dec_and_test(&map->usercnt)) {
720 		if (map->ops->map_release_uref)
721 			map->ops->map_release_uref(map);
722 	}
723 }
724 
725 static void bpf_map_free_in_work(struct bpf_map *map)
726 {
727 	INIT_WORK(&map->work, bpf_map_free_deferred);
728 	/* Avoid spawning kworkers, since they all might contend
729 	 * for the same mutex like slab_mutex.
730 	 */
731 	queue_work(system_unbound_wq, &map->work);
732 }
733 
734 static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
735 {
736 	bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
737 }
738 
739 static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
740 {
741 	if (rcu_trace_implies_rcu_gp())
742 		bpf_map_free_rcu_gp(rcu);
743 	else
744 		call_rcu(rcu, bpf_map_free_rcu_gp);
745 }
746 
747 /* decrement map refcnt and schedule it for freeing via workqueue
748  * (underlying map implementation ops->map_free() might sleep)
749  */
750 void bpf_map_put(struct bpf_map *map)
751 {
752 	if (atomic64_dec_and_test(&map->refcnt)) {
753 		/* bpf_map_free_id() must be called first */
754 		bpf_map_free_id(map);
755 
756 		WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
757 		if (READ_ONCE(map->free_after_mult_rcu_gp))
758 			call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
759 		else if (READ_ONCE(map->free_after_rcu_gp))
760 			call_rcu(&map->rcu, bpf_map_free_rcu_gp);
761 		else
762 			bpf_map_free_in_work(map);
763 	}
764 }
765 EXPORT_SYMBOL_GPL(bpf_map_put);
766 
767 void bpf_map_put_with_uref(struct bpf_map *map)
768 {
769 	bpf_map_put_uref(map);
770 	bpf_map_put(map);
771 }
772 
773 static int bpf_map_release(struct inode *inode, struct file *filp)
774 {
775 	struct bpf_map *map = filp->private_data;
776 
777 	if (map->ops->map_release)
778 		map->ops->map_release(map, filp);
779 
780 	bpf_map_put_with_uref(map);
781 	return 0;
782 }
783 
784 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
785 {
786 	fmode_t mode = f.file->f_mode;
787 
788 	/* Our file permissions may have been overridden by global
789 	 * map permissions facing syscall side.
790 	 */
791 	if (READ_ONCE(map->frozen))
792 		mode &= ~FMODE_CAN_WRITE;
793 	return mode;
794 }
795 
796 #ifdef CONFIG_PROC_FS
797 /* Show the memory usage of a bpf map */
798 static u64 bpf_map_memory_usage(const struct bpf_map *map)
799 {
800 	return map->ops->map_mem_usage(map);
801 }
802 
803 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
804 {
805 	struct bpf_map *map = filp->private_data;
806 	u32 type = 0, jited = 0;
807 
808 	if (map_type_contains_progs(map)) {
809 		spin_lock(&map->owner.lock);
810 		type  = map->owner.type;
811 		jited = map->owner.jited;
812 		spin_unlock(&map->owner.lock);
813 	}
814 
815 	seq_printf(m,
816 		   "map_type:\t%u\n"
817 		   "key_size:\t%u\n"
818 		   "value_size:\t%u\n"
819 		   "max_entries:\t%u\n"
820 		   "map_flags:\t%#x\n"
821 		   "map_extra:\t%#llx\n"
822 		   "memlock:\t%llu\n"
823 		   "map_id:\t%u\n"
824 		   "frozen:\t%u\n",
825 		   map->map_type,
826 		   map->key_size,
827 		   map->value_size,
828 		   map->max_entries,
829 		   map->map_flags,
830 		   (unsigned long long)map->map_extra,
831 		   bpf_map_memory_usage(map),
832 		   map->id,
833 		   READ_ONCE(map->frozen));
834 	if (type) {
835 		seq_printf(m, "owner_prog_type:\t%u\n", type);
836 		seq_printf(m, "owner_jited:\t%u\n", jited);
837 	}
838 }
839 #endif
840 
841 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
842 			      loff_t *ppos)
843 {
844 	/* We need this handler such that alloc_file() enables
845 	 * f_mode with FMODE_CAN_READ.
846 	 */
847 	return -EINVAL;
848 }
849 
850 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
851 			       size_t siz, loff_t *ppos)
852 {
853 	/* We need this handler such that alloc_file() enables
854 	 * f_mode with FMODE_CAN_WRITE.
855 	 */
856 	return -EINVAL;
857 }
858 
859 /* called for any extra memory-mapped regions (except initial) */
860 static void bpf_map_mmap_open(struct vm_area_struct *vma)
861 {
862 	struct bpf_map *map = vma->vm_file->private_data;
863 
864 	if (vma->vm_flags & VM_MAYWRITE)
865 		bpf_map_write_active_inc(map);
866 }
867 
868 /* called for all unmapped memory region (including initial) */
869 static void bpf_map_mmap_close(struct vm_area_struct *vma)
870 {
871 	struct bpf_map *map = vma->vm_file->private_data;
872 
873 	if (vma->vm_flags & VM_MAYWRITE)
874 		bpf_map_write_active_dec(map);
875 }
876 
877 static const struct vm_operations_struct bpf_map_default_vmops = {
878 	.open		= bpf_map_mmap_open,
879 	.close		= bpf_map_mmap_close,
880 };
881 
882 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
883 {
884 	struct bpf_map *map = filp->private_data;
885 	int err;
886 
887 	if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
888 		return -ENOTSUPP;
889 
890 	if (!(vma->vm_flags & VM_SHARED))
891 		return -EINVAL;
892 
893 	mutex_lock(&map->freeze_mutex);
894 
895 	if (vma->vm_flags & VM_WRITE) {
896 		if (map->frozen) {
897 			err = -EPERM;
898 			goto out;
899 		}
900 		/* map is meant to be read-only, so do not allow mapping as
901 		 * writable, because it's possible to leak a writable page
902 		 * reference and allows user-space to still modify it after
903 		 * freezing, while verifier will assume contents do not change
904 		 */
905 		if (map->map_flags & BPF_F_RDONLY_PROG) {
906 			err = -EACCES;
907 			goto out;
908 		}
909 	}
910 
911 	/* set default open/close callbacks */
912 	vma->vm_ops = &bpf_map_default_vmops;
913 	vma->vm_private_data = map;
914 	vm_flags_clear(vma, VM_MAYEXEC);
915 	if (!(vma->vm_flags & VM_WRITE))
916 		/* disallow re-mapping with PROT_WRITE */
917 		vm_flags_clear(vma, VM_MAYWRITE);
918 
919 	err = map->ops->map_mmap(map, vma);
920 	if (err)
921 		goto out;
922 
923 	if (vma->vm_flags & VM_MAYWRITE)
924 		bpf_map_write_active_inc(map);
925 out:
926 	mutex_unlock(&map->freeze_mutex);
927 	return err;
928 }
929 
930 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
931 {
932 	struct bpf_map *map = filp->private_data;
933 
934 	if (map->ops->map_poll)
935 		return map->ops->map_poll(map, filp, pts);
936 
937 	return EPOLLERR;
938 }
939 
940 const struct file_operations bpf_map_fops = {
941 #ifdef CONFIG_PROC_FS
942 	.show_fdinfo	= bpf_map_show_fdinfo,
943 #endif
944 	.release	= bpf_map_release,
945 	.read		= bpf_dummy_read,
946 	.write		= bpf_dummy_write,
947 	.mmap		= bpf_map_mmap,
948 	.poll		= bpf_map_poll,
949 };
950 
951 int bpf_map_new_fd(struct bpf_map *map, int flags)
952 {
953 	int ret;
954 
955 	ret = security_bpf_map(map, OPEN_FMODE(flags));
956 	if (ret < 0)
957 		return ret;
958 
959 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
960 				flags | O_CLOEXEC);
961 }
962 
963 int bpf_get_file_flag(int flags)
964 {
965 	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
966 		return -EINVAL;
967 	if (flags & BPF_F_RDONLY)
968 		return O_RDONLY;
969 	if (flags & BPF_F_WRONLY)
970 		return O_WRONLY;
971 	return O_RDWR;
972 }
973 
974 /* helper macro to check that unused fields 'union bpf_attr' are zero */
975 #define CHECK_ATTR(CMD) \
976 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
977 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
978 		   sizeof(*attr) - \
979 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
980 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
981 
982 /* dst and src must have at least "size" number of bytes.
983  * Return strlen on success and < 0 on error.
984  */
985 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
986 {
987 	const char *end = src + size;
988 	const char *orig_src = src;
989 
990 	memset(dst, 0, size);
991 	/* Copy all isalnum(), '_' and '.' chars. */
992 	while (src < end && *src) {
993 		if (!isalnum(*src) &&
994 		    *src != '_' && *src != '.')
995 			return -EINVAL;
996 		*dst++ = *src++;
997 	}
998 
999 	/* No '\0' found in "size" number of bytes */
1000 	if (src == end)
1001 		return -EINVAL;
1002 
1003 	return src - orig_src;
1004 }
1005 
1006 int map_check_no_btf(const struct bpf_map *map,
1007 		     const struct btf *btf,
1008 		     const struct btf_type *key_type,
1009 		     const struct btf_type *value_type)
1010 {
1011 	return -ENOTSUPP;
1012 }
1013 
1014 static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
1015 			 const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
1016 {
1017 	const struct btf_type *key_type, *value_type;
1018 	u32 key_size, value_size;
1019 	int ret = 0;
1020 
1021 	/* Some maps allow key to be unspecified. */
1022 	if (btf_key_id) {
1023 		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
1024 		if (!key_type || key_size != map->key_size)
1025 			return -EINVAL;
1026 	} else {
1027 		key_type = btf_type_by_id(btf, 0);
1028 		if (!map->ops->map_check_btf)
1029 			return -EINVAL;
1030 	}
1031 
1032 	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
1033 	if (!value_type || value_size != map->value_size)
1034 		return -EINVAL;
1035 
1036 	map->record = btf_parse_fields(btf, value_type,
1037 				       BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
1038 				       BPF_RB_ROOT | BPF_REFCOUNT,
1039 				       map->value_size);
1040 	if (!IS_ERR_OR_NULL(map->record)) {
1041 		int i;
1042 
1043 		if (!bpf_token_capable(token, CAP_BPF)) {
1044 			ret = -EPERM;
1045 			goto free_map_tab;
1046 		}
1047 		if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
1048 			ret = -EACCES;
1049 			goto free_map_tab;
1050 		}
1051 		for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) {
1052 			switch (map->record->field_mask & (1 << i)) {
1053 			case 0:
1054 				continue;
1055 			case BPF_SPIN_LOCK:
1056 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1057 				    map->map_type != BPF_MAP_TYPE_ARRAY &&
1058 				    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
1059 				    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1060 				    map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1061 				    map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1062 				    map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1063 					ret = -EOPNOTSUPP;
1064 					goto free_map_tab;
1065 				}
1066 				break;
1067 			case BPF_TIMER:
1068 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1069 				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1070 				    map->map_type != BPF_MAP_TYPE_ARRAY) {
1071 					ret = -EOPNOTSUPP;
1072 					goto free_map_tab;
1073 				}
1074 				break;
1075 			case BPF_KPTR_UNREF:
1076 			case BPF_KPTR_REF:
1077 			case BPF_KPTR_PERCPU:
1078 			case BPF_REFCOUNT:
1079 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1080 				    map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
1081 				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1082 				    map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH &&
1083 				    map->map_type != BPF_MAP_TYPE_ARRAY &&
1084 				    map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
1085 				    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1086 				    map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1087 				    map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1088 				    map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1089 					ret = -EOPNOTSUPP;
1090 					goto free_map_tab;
1091 				}
1092 				break;
1093 			case BPF_LIST_HEAD:
1094 			case BPF_RB_ROOT:
1095 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1096 				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1097 				    map->map_type != BPF_MAP_TYPE_ARRAY) {
1098 					ret = -EOPNOTSUPP;
1099 					goto free_map_tab;
1100 				}
1101 				break;
1102 			default:
1103 				/* Fail if map_type checks are missing for a field type */
1104 				ret = -EOPNOTSUPP;
1105 				goto free_map_tab;
1106 			}
1107 		}
1108 	}
1109 
1110 	ret = btf_check_and_fixup_fields(btf, map->record);
1111 	if (ret < 0)
1112 		goto free_map_tab;
1113 
1114 	if (map->ops->map_check_btf) {
1115 		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
1116 		if (ret < 0)
1117 			goto free_map_tab;
1118 	}
1119 
1120 	return ret;
1121 free_map_tab:
1122 	bpf_map_free_record(map);
1123 	return ret;
1124 }
1125 
1126 static bool bpf_net_capable(void)
1127 {
1128 	return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
1129 }
1130 
1131 #define BPF_MAP_CREATE_LAST_FIELD map_token_fd
1132 /* called via syscall */
1133 static int map_create(union bpf_attr *attr)
1134 {
1135 	const struct bpf_map_ops *ops;
1136 	struct bpf_token *token = NULL;
1137 	int numa_node = bpf_map_attr_numa_node(attr);
1138 	u32 map_type = attr->map_type;
1139 	struct bpf_map *map;
1140 	int f_flags;
1141 	int err;
1142 
1143 	err = CHECK_ATTR(BPF_MAP_CREATE);
1144 	if (err)
1145 		return -EINVAL;
1146 
1147 	if (attr->btf_vmlinux_value_type_id) {
1148 		if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
1149 		    attr->btf_key_type_id || attr->btf_value_type_id)
1150 			return -EINVAL;
1151 	} else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
1152 		return -EINVAL;
1153 	}
1154 
1155 	if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
1156 	    attr->map_extra != 0)
1157 		return -EINVAL;
1158 
1159 	f_flags = bpf_get_file_flag(attr->map_flags);
1160 	if (f_flags < 0)
1161 		return f_flags;
1162 
1163 	if (numa_node != NUMA_NO_NODE &&
1164 	    ((unsigned int)numa_node >= nr_node_ids ||
1165 	     !node_online(numa_node)))
1166 		return -EINVAL;
1167 
1168 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1169 	map_type = attr->map_type;
1170 	if (map_type >= ARRAY_SIZE(bpf_map_types))
1171 		return -EINVAL;
1172 	map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
1173 	ops = bpf_map_types[map_type];
1174 	if (!ops)
1175 		return -EINVAL;
1176 
1177 	if (ops->map_alloc_check) {
1178 		err = ops->map_alloc_check(attr);
1179 		if (err)
1180 			return err;
1181 	}
1182 	if (attr->map_ifindex)
1183 		ops = &bpf_map_offload_ops;
1184 	if (!ops->map_mem_usage)
1185 		return -EINVAL;
1186 
1187 	if (attr->map_token_fd) {
1188 		token = bpf_token_get_from_fd(attr->map_token_fd);
1189 		if (IS_ERR(token))
1190 			return PTR_ERR(token);
1191 
1192 		/* if current token doesn't grant map creation permissions,
1193 		 * then we can't use this token, so ignore it and rely on
1194 		 * system-wide capabilities checks
1195 		 */
1196 		if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) ||
1197 		    !bpf_token_allow_map_type(token, attr->map_type)) {
1198 			bpf_token_put(token);
1199 			token = NULL;
1200 		}
1201 	}
1202 
1203 	err = -EPERM;
1204 
1205 	/* Intent here is for unprivileged_bpf_disabled to block BPF map
1206 	 * creation for unprivileged users; other actions depend
1207 	 * on fd availability and access to bpffs, so are dependent on
1208 	 * object creation success. Even with unprivileged BPF disabled,
1209 	 * capability checks are still carried out.
1210 	 */
1211 	if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF))
1212 		goto put_token;
1213 
1214 	/* check privileged map type permissions */
1215 	switch (map_type) {
1216 	case BPF_MAP_TYPE_ARRAY:
1217 	case BPF_MAP_TYPE_PERCPU_ARRAY:
1218 	case BPF_MAP_TYPE_PROG_ARRAY:
1219 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1220 	case BPF_MAP_TYPE_CGROUP_ARRAY:
1221 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1222 	case BPF_MAP_TYPE_HASH:
1223 	case BPF_MAP_TYPE_PERCPU_HASH:
1224 	case BPF_MAP_TYPE_HASH_OF_MAPS:
1225 	case BPF_MAP_TYPE_RINGBUF:
1226 	case BPF_MAP_TYPE_USER_RINGBUF:
1227 	case BPF_MAP_TYPE_CGROUP_STORAGE:
1228 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
1229 		/* unprivileged */
1230 		break;
1231 	case BPF_MAP_TYPE_SK_STORAGE:
1232 	case BPF_MAP_TYPE_INODE_STORAGE:
1233 	case BPF_MAP_TYPE_TASK_STORAGE:
1234 	case BPF_MAP_TYPE_CGRP_STORAGE:
1235 	case BPF_MAP_TYPE_BLOOM_FILTER:
1236 	case BPF_MAP_TYPE_LPM_TRIE:
1237 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
1238 	case BPF_MAP_TYPE_STACK_TRACE:
1239 	case BPF_MAP_TYPE_QUEUE:
1240 	case BPF_MAP_TYPE_STACK:
1241 	case BPF_MAP_TYPE_LRU_HASH:
1242 	case BPF_MAP_TYPE_LRU_PERCPU_HASH:
1243 	case BPF_MAP_TYPE_STRUCT_OPS:
1244 	case BPF_MAP_TYPE_CPUMAP:
1245 		if (!bpf_token_capable(token, CAP_BPF))
1246 			goto put_token;
1247 		break;
1248 	case BPF_MAP_TYPE_SOCKMAP:
1249 	case BPF_MAP_TYPE_SOCKHASH:
1250 	case BPF_MAP_TYPE_DEVMAP:
1251 	case BPF_MAP_TYPE_DEVMAP_HASH:
1252 	case BPF_MAP_TYPE_XSKMAP:
1253 		if (!bpf_token_capable(token, CAP_NET_ADMIN))
1254 			goto put_token;
1255 		break;
1256 	default:
1257 		WARN(1, "unsupported map type %d", map_type);
1258 		goto put_token;
1259 	}
1260 
1261 	map = ops->map_alloc(attr);
1262 	if (IS_ERR(map)) {
1263 		err = PTR_ERR(map);
1264 		goto put_token;
1265 	}
1266 	map->ops = ops;
1267 	map->map_type = map_type;
1268 
1269 	err = bpf_obj_name_cpy(map->name, attr->map_name,
1270 			       sizeof(attr->map_name));
1271 	if (err < 0)
1272 		goto free_map;
1273 
1274 	atomic64_set(&map->refcnt, 1);
1275 	atomic64_set(&map->usercnt, 1);
1276 	mutex_init(&map->freeze_mutex);
1277 	spin_lock_init(&map->owner.lock);
1278 
1279 	if (attr->btf_key_type_id || attr->btf_value_type_id ||
1280 	    /* Even the map's value is a kernel's struct,
1281 	     * the bpf_prog.o must have BTF to begin with
1282 	     * to figure out the corresponding kernel's
1283 	     * counter part.  Thus, attr->btf_fd has
1284 	     * to be valid also.
1285 	     */
1286 	    attr->btf_vmlinux_value_type_id) {
1287 		struct btf *btf;
1288 
1289 		btf = btf_get_by_fd(attr->btf_fd);
1290 		if (IS_ERR(btf)) {
1291 			err = PTR_ERR(btf);
1292 			goto free_map;
1293 		}
1294 		if (btf_is_kernel(btf)) {
1295 			btf_put(btf);
1296 			err = -EACCES;
1297 			goto free_map;
1298 		}
1299 		map->btf = btf;
1300 
1301 		if (attr->btf_value_type_id) {
1302 			err = map_check_btf(map, token, btf, attr->btf_key_type_id,
1303 					    attr->btf_value_type_id);
1304 			if (err)
1305 				goto free_map;
1306 		}
1307 
1308 		map->btf_key_type_id = attr->btf_key_type_id;
1309 		map->btf_value_type_id = attr->btf_value_type_id;
1310 		map->btf_vmlinux_value_type_id =
1311 			attr->btf_vmlinux_value_type_id;
1312 	}
1313 
1314 	err = security_bpf_map_create(map, attr, token);
1315 	if (err)
1316 		goto free_map_sec;
1317 
1318 	err = bpf_map_alloc_id(map);
1319 	if (err)
1320 		goto free_map_sec;
1321 
1322 	bpf_map_save_memcg(map);
1323 	bpf_token_put(token);
1324 
1325 	err = bpf_map_new_fd(map, f_flags);
1326 	if (err < 0) {
1327 		/* failed to allocate fd.
1328 		 * bpf_map_put_with_uref() is needed because the above
1329 		 * bpf_map_alloc_id() has published the map
1330 		 * to the userspace and the userspace may
1331 		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
1332 		 */
1333 		bpf_map_put_with_uref(map);
1334 		return err;
1335 	}
1336 
1337 	return err;
1338 
1339 free_map_sec:
1340 	security_bpf_map_free(map);
1341 free_map:
1342 	btf_put(map->btf);
1343 	map->ops->map_free(map);
1344 put_token:
1345 	bpf_token_put(token);
1346 	return err;
1347 }
1348 
1349 /* if error is returned, fd is released.
1350  * On success caller should complete fd access with matching fdput()
1351  */
1352 struct bpf_map *__bpf_map_get(struct fd f)
1353 {
1354 	if (!f.file)
1355 		return ERR_PTR(-EBADF);
1356 	if (f.file->f_op != &bpf_map_fops) {
1357 		fdput(f);
1358 		return ERR_PTR(-EINVAL);
1359 	}
1360 
1361 	return f.file->private_data;
1362 }
1363 
1364 void bpf_map_inc(struct bpf_map *map)
1365 {
1366 	atomic64_inc(&map->refcnt);
1367 }
1368 EXPORT_SYMBOL_GPL(bpf_map_inc);
1369 
1370 void bpf_map_inc_with_uref(struct bpf_map *map)
1371 {
1372 	atomic64_inc(&map->refcnt);
1373 	atomic64_inc(&map->usercnt);
1374 }
1375 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
1376 
1377 struct bpf_map *bpf_map_get(u32 ufd)
1378 {
1379 	struct fd f = fdget(ufd);
1380 	struct bpf_map *map;
1381 
1382 	map = __bpf_map_get(f);
1383 	if (IS_ERR(map))
1384 		return map;
1385 
1386 	bpf_map_inc(map);
1387 	fdput(f);
1388 
1389 	return map;
1390 }
1391 EXPORT_SYMBOL(bpf_map_get);
1392 
1393 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1394 {
1395 	struct fd f = fdget(ufd);
1396 	struct bpf_map *map;
1397 
1398 	map = __bpf_map_get(f);
1399 	if (IS_ERR(map))
1400 		return map;
1401 
1402 	bpf_map_inc_with_uref(map);
1403 	fdput(f);
1404 
1405 	return map;
1406 }
1407 
1408 /* map_idr_lock should have been held or the map should have been
1409  * protected by rcu read lock.
1410  */
1411 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
1412 {
1413 	int refold;
1414 
1415 	refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
1416 	if (!refold)
1417 		return ERR_PTR(-ENOENT);
1418 	if (uref)
1419 		atomic64_inc(&map->usercnt);
1420 
1421 	return map;
1422 }
1423 
1424 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
1425 {
1426 	spin_lock_bh(&map_idr_lock);
1427 	map = __bpf_map_inc_not_zero(map, false);
1428 	spin_unlock_bh(&map_idr_lock);
1429 
1430 	return map;
1431 }
1432 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1433 
1434 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1435 {
1436 	return -ENOTSUPP;
1437 }
1438 
1439 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1440 {
1441 	if (key_size)
1442 		return vmemdup_user(ukey, key_size);
1443 
1444 	if (ukey)
1445 		return ERR_PTR(-EINVAL);
1446 
1447 	return NULL;
1448 }
1449 
1450 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1451 {
1452 	if (key_size)
1453 		return kvmemdup_bpfptr(ukey, key_size);
1454 
1455 	if (!bpfptr_is_null(ukey))
1456 		return ERR_PTR(-EINVAL);
1457 
1458 	return NULL;
1459 }
1460 
1461 /* last field in 'union bpf_attr' used by this command */
1462 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1463 
1464 static int map_lookup_elem(union bpf_attr *attr)
1465 {
1466 	void __user *ukey = u64_to_user_ptr(attr->key);
1467 	void __user *uvalue = u64_to_user_ptr(attr->value);
1468 	int ufd = attr->map_fd;
1469 	struct bpf_map *map;
1470 	void *key, *value;
1471 	u32 value_size;
1472 	struct fd f;
1473 	int err;
1474 
1475 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1476 		return -EINVAL;
1477 
1478 	if (attr->flags & ~BPF_F_LOCK)
1479 		return -EINVAL;
1480 
1481 	f = fdget(ufd);
1482 	map = __bpf_map_get(f);
1483 	if (IS_ERR(map))
1484 		return PTR_ERR(map);
1485 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1486 		err = -EPERM;
1487 		goto err_put;
1488 	}
1489 
1490 	if ((attr->flags & BPF_F_LOCK) &&
1491 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1492 		err = -EINVAL;
1493 		goto err_put;
1494 	}
1495 
1496 	key = __bpf_copy_key(ukey, map->key_size);
1497 	if (IS_ERR(key)) {
1498 		err = PTR_ERR(key);
1499 		goto err_put;
1500 	}
1501 
1502 	value_size = bpf_map_value_size(map);
1503 
1504 	err = -ENOMEM;
1505 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1506 	if (!value)
1507 		goto free_key;
1508 
1509 	if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
1510 		if (copy_from_user(value, uvalue, value_size))
1511 			err = -EFAULT;
1512 		else
1513 			err = bpf_map_copy_value(map, key, value, attr->flags);
1514 		goto free_value;
1515 	}
1516 
1517 	err = bpf_map_copy_value(map, key, value, attr->flags);
1518 	if (err)
1519 		goto free_value;
1520 
1521 	err = -EFAULT;
1522 	if (copy_to_user(uvalue, value, value_size) != 0)
1523 		goto free_value;
1524 
1525 	err = 0;
1526 
1527 free_value:
1528 	kvfree(value);
1529 free_key:
1530 	kvfree(key);
1531 err_put:
1532 	fdput(f);
1533 	return err;
1534 }
1535 
1536 
1537 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1538 
1539 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1540 {
1541 	bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1542 	bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1543 	int ufd = attr->map_fd;
1544 	struct bpf_map *map;
1545 	void *key, *value;
1546 	u32 value_size;
1547 	struct fd f;
1548 	int err;
1549 
1550 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1551 		return -EINVAL;
1552 
1553 	f = fdget(ufd);
1554 	map = __bpf_map_get(f);
1555 	if (IS_ERR(map))
1556 		return PTR_ERR(map);
1557 	bpf_map_write_active_inc(map);
1558 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1559 		err = -EPERM;
1560 		goto err_put;
1561 	}
1562 
1563 	if ((attr->flags & BPF_F_LOCK) &&
1564 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1565 		err = -EINVAL;
1566 		goto err_put;
1567 	}
1568 
1569 	key = ___bpf_copy_key(ukey, map->key_size);
1570 	if (IS_ERR(key)) {
1571 		err = PTR_ERR(key);
1572 		goto err_put;
1573 	}
1574 
1575 	value_size = bpf_map_value_size(map);
1576 	value = kvmemdup_bpfptr(uvalue, value_size);
1577 	if (IS_ERR(value)) {
1578 		err = PTR_ERR(value);
1579 		goto free_key;
1580 	}
1581 
1582 	err = bpf_map_update_value(map, f.file, key, value, attr->flags);
1583 	if (!err)
1584 		maybe_wait_bpf_programs(map);
1585 
1586 	kvfree(value);
1587 free_key:
1588 	kvfree(key);
1589 err_put:
1590 	bpf_map_write_active_dec(map);
1591 	fdput(f);
1592 	return err;
1593 }
1594 
1595 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1596 
1597 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
1598 {
1599 	bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1600 	int ufd = attr->map_fd;
1601 	struct bpf_map *map;
1602 	struct fd f;
1603 	void *key;
1604 	int err;
1605 
1606 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1607 		return -EINVAL;
1608 
1609 	f = fdget(ufd);
1610 	map = __bpf_map_get(f);
1611 	if (IS_ERR(map))
1612 		return PTR_ERR(map);
1613 	bpf_map_write_active_inc(map);
1614 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1615 		err = -EPERM;
1616 		goto err_put;
1617 	}
1618 
1619 	key = ___bpf_copy_key(ukey, map->key_size);
1620 	if (IS_ERR(key)) {
1621 		err = PTR_ERR(key);
1622 		goto err_put;
1623 	}
1624 
1625 	if (bpf_map_is_offloaded(map)) {
1626 		err = bpf_map_offload_delete_elem(map, key);
1627 		goto out;
1628 	} else if (IS_FD_PROG_ARRAY(map) ||
1629 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1630 		/* These maps require sleepable context */
1631 		err = map->ops->map_delete_elem(map, key);
1632 		goto out;
1633 	}
1634 
1635 	bpf_disable_instrumentation();
1636 	rcu_read_lock();
1637 	err = map->ops->map_delete_elem(map, key);
1638 	rcu_read_unlock();
1639 	bpf_enable_instrumentation();
1640 	if (!err)
1641 		maybe_wait_bpf_programs(map);
1642 out:
1643 	kvfree(key);
1644 err_put:
1645 	bpf_map_write_active_dec(map);
1646 	fdput(f);
1647 	return err;
1648 }
1649 
1650 /* last field in 'union bpf_attr' used by this command */
1651 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1652 
1653 static int map_get_next_key(union bpf_attr *attr)
1654 {
1655 	void __user *ukey = u64_to_user_ptr(attr->key);
1656 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1657 	int ufd = attr->map_fd;
1658 	struct bpf_map *map;
1659 	void *key, *next_key;
1660 	struct fd f;
1661 	int err;
1662 
1663 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1664 		return -EINVAL;
1665 
1666 	f = fdget(ufd);
1667 	map = __bpf_map_get(f);
1668 	if (IS_ERR(map))
1669 		return PTR_ERR(map);
1670 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1671 		err = -EPERM;
1672 		goto err_put;
1673 	}
1674 
1675 	if (ukey) {
1676 		key = __bpf_copy_key(ukey, map->key_size);
1677 		if (IS_ERR(key)) {
1678 			err = PTR_ERR(key);
1679 			goto err_put;
1680 		}
1681 	} else {
1682 		key = NULL;
1683 	}
1684 
1685 	err = -ENOMEM;
1686 	next_key = kvmalloc(map->key_size, GFP_USER);
1687 	if (!next_key)
1688 		goto free_key;
1689 
1690 	if (bpf_map_is_offloaded(map)) {
1691 		err = bpf_map_offload_get_next_key(map, key, next_key);
1692 		goto out;
1693 	}
1694 
1695 	rcu_read_lock();
1696 	err = map->ops->map_get_next_key(map, key, next_key);
1697 	rcu_read_unlock();
1698 out:
1699 	if (err)
1700 		goto free_next_key;
1701 
1702 	err = -EFAULT;
1703 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1704 		goto free_next_key;
1705 
1706 	err = 0;
1707 
1708 free_next_key:
1709 	kvfree(next_key);
1710 free_key:
1711 	kvfree(key);
1712 err_put:
1713 	fdput(f);
1714 	return err;
1715 }
1716 
1717 int generic_map_delete_batch(struct bpf_map *map,
1718 			     const union bpf_attr *attr,
1719 			     union bpf_attr __user *uattr)
1720 {
1721 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1722 	u32 cp, max_count;
1723 	int err = 0;
1724 	void *key;
1725 
1726 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1727 		return -EINVAL;
1728 
1729 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1730 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1731 		return -EINVAL;
1732 	}
1733 
1734 	max_count = attr->batch.count;
1735 	if (!max_count)
1736 		return 0;
1737 
1738 	if (put_user(0, &uattr->batch.count))
1739 		return -EFAULT;
1740 
1741 	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1742 	if (!key)
1743 		return -ENOMEM;
1744 
1745 	for (cp = 0; cp < max_count; cp++) {
1746 		err = -EFAULT;
1747 		if (copy_from_user(key, keys + cp * map->key_size,
1748 				   map->key_size))
1749 			break;
1750 
1751 		if (bpf_map_is_offloaded(map)) {
1752 			err = bpf_map_offload_delete_elem(map, key);
1753 			break;
1754 		}
1755 
1756 		bpf_disable_instrumentation();
1757 		rcu_read_lock();
1758 		err = map->ops->map_delete_elem(map, key);
1759 		rcu_read_unlock();
1760 		bpf_enable_instrumentation();
1761 		if (err)
1762 			break;
1763 		cond_resched();
1764 	}
1765 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1766 		err = -EFAULT;
1767 
1768 	kvfree(key);
1769 
1770 	return err;
1771 }
1772 
1773 int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
1774 			     const union bpf_attr *attr,
1775 			     union bpf_attr __user *uattr)
1776 {
1777 	void __user *values = u64_to_user_ptr(attr->batch.values);
1778 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1779 	u32 value_size, cp, max_count;
1780 	void *key, *value;
1781 	int err = 0;
1782 
1783 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1784 		return -EINVAL;
1785 
1786 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1787 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1788 		return -EINVAL;
1789 	}
1790 
1791 	value_size = bpf_map_value_size(map);
1792 
1793 	max_count = attr->batch.count;
1794 	if (!max_count)
1795 		return 0;
1796 
1797 	if (put_user(0, &uattr->batch.count))
1798 		return -EFAULT;
1799 
1800 	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1801 	if (!key)
1802 		return -ENOMEM;
1803 
1804 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1805 	if (!value) {
1806 		kvfree(key);
1807 		return -ENOMEM;
1808 	}
1809 
1810 	for (cp = 0; cp < max_count; cp++) {
1811 		err = -EFAULT;
1812 		if (copy_from_user(key, keys + cp * map->key_size,
1813 		    map->key_size) ||
1814 		    copy_from_user(value, values + cp * value_size, value_size))
1815 			break;
1816 
1817 		err = bpf_map_update_value(map, map_file, key, value,
1818 					   attr->batch.elem_flags);
1819 
1820 		if (err)
1821 			break;
1822 		cond_resched();
1823 	}
1824 
1825 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1826 		err = -EFAULT;
1827 
1828 	kvfree(value);
1829 	kvfree(key);
1830 
1831 	return err;
1832 }
1833 
1834 #define MAP_LOOKUP_RETRIES 3
1835 
1836 int generic_map_lookup_batch(struct bpf_map *map,
1837 				    const union bpf_attr *attr,
1838 				    union bpf_attr __user *uattr)
1839 {
1840 	void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1841 	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1842 	void __user *values = u64_to_user_ptr(attr->batch.values);
1843 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1844 	void *buf, *buf_prevkey, *prev_key, *key, *value;
1845 	int err, retry = MAP_LOOKUP_RETRIES;
1846 	u32 value_size, cp, max_count;
1847 
1848 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1849 		return -EINVAL;
1850 
1851 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1852 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1853 		return -EINVAL;
1854 
1855 	value_size = bpf_map_value_size(map);
1856 
1857 	max_count = attr->batch.count;
1858 	if (!max_count)
1859 		return 0;
1860 
1861 	if (put_user(0, &uattr->batch.count))
1862 		return -EFAULT;
1863 
1864 	buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1865 	if (!buf_prevkey)
1866 		return -ENOMEM;
1867 
1868 	buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1869 	if (!buf) {
1870 		kvfree(buf_prevkey);
1871 		return -ENOMEM;
1872 	}
1873 
1874 	err = -EFAULT;
1875 	prev_key = NULL;
1876 	if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1877 		goto free_buf;
1878 	key = buf;
1879 	value = key + map->key_size;
1880 	if (ubatch)
1881 		prev_key = buf_prevkey;
1882 
1883 	for (cp = 0; cp < max_count;) {
1884 		rcu_read_lock();
1885 		err = map->ops->map_get_next_key(map, prev_key, key);
1886 		rcu_read_unlock();
1887 		if (err)
1888 			break;
1889 		err = bpf_map_copy_value(map, key, value,
1890 					 attr->batch.elem_flags);
1891 
1892 		if (err == -ENOENT) {
1893 			if (retry) {
1894 				retry--;
1895 				continue;
1896 			}
1897 			err = -EINTR;
1898 			break;
1899 		}
1900 
1901 		if (err)
1902 			goto free_buf;
1903 
1904 		if (copy_to_user(keys + cp * map->key_size, key,
1905 				 map->key_size)) {
1906 			err = -EFAULT;
1907 			goto free_buf;
1908 		}
1909 		if (copy_to_user(values + cp * value_size, value, value_size)) {
1910 			err = -EFAULT;
1911 			goto free_buf;
1912 		}
1913 
1914 		if (!prev_key)
1915 			prev_key = buf_prevkey;
1916 
1917 		swap(prev_key, key);
1918 		retry = MAP_LOOKUP_RETRIES;
1919 		cp++;
1920 		cond_resched();
1921 	}
1922 
1923 	if (err == -EFAULT)
1924 		goto free_buf;
1925 
1926 	if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1927 		    (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1928 		err = -EFAULT;
1929 
1930 free_buf:
1931 	kvfree(buf_prevkey);
1932 	kvfree(buf);
1933 	return err;
1934 }
1935 
1936 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
1937 
1938 static int map_lookup_and_delete_elem(union bpf_attr *attr)
1939 {
1940 	void __user *ukey = u64_to_user_ptr(attr->key);
1941 	void __user *uvalue = u64_to_user_ptr(attr->value);
1942 	int ufd = attr->map_fd;
1943 	struct bpf_map *map;
1944 	void *key, *value;
1945 	u32 value_size;
1946 	struct fd f;
1947 	int err;
1948 
1949 	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1950 		return -EINVAL;
1951 
1952 	if (attr->flags & ~BPF_F_LOCK)
1953 		return -EINVAL;
1954 
1955 	f = fdget(ufd);
1956 	map = __bpf_map_get(f);
1957 	if (IS_ERR(map))
1958 		return PTR_ERR(map);
1959 	bpf_map_write_active_inc(map);
1960 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1961 	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1962 		err = -EPERM;
1963 		goto err_put;
1964 	}
1965 
1966 	if (attr->flags &&
1967 	    (map->map_type == BPF_MAP_TYPE_QUEUE ||
1968 	     map->map_type == BPF_MAP_TYPE_STACK)) {
1969 		err = -EINVAL;
1970 		goto err_put;
1971 	}
1972 
1973 	if ((attr->flags & BPF_F_LOCK) &&
1974 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1975 		err = -EINVAL;
1976 		goto err_put;
1977 	}
1978 
1979 	key = __bpf_copy_key(ukey, map->key_size);
1980 	if (IS_ERR(key)) {
1981 		err = PTR_ERR(key);
1982 		goto err_put;
1983 	}
1984 
1985 	value_size = bpf_map_value_size(map);
1986 
1987 	err = -ENOMEM;
1988 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1989 	if (!value)
1990 		goto free_key;
1991 
1992 	err = -ENOTSUPP;
1993 	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1994 	    map->map_type == BPF_MAP_TYPE_STACK) {
1995 		err = map->ops->map_pop_elem(map, value);
1996 	} else if (map->map_type == BPF_MAP_TYPE_HASH ||
1997 		   map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1998 		   map->map_type == BPF_MAP_TYPE_LRU_HASH ||
1999 		   map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2000 		if (!bpf_map_is_offloaded(map)) {
2001 			bpf_disable_instrumentation();
2002 			rcu_read_lock();
2003 			err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
2004 			rcu_read_unlock();
2005 			bpf_enable_instrumentation();
2006 		}
2007 	}
2008 
2009 	if (err)
2010 		goto free_value;
2011 
2012 	if (copy_to_user(uvalue, value, value_size) != 0) {
2013 		err = -EFAULT;
2014 		goto free_value;
2015 	}
2016 
2017 	err = 0;
2018 
2019 free_value:
2020 	kvfree(value);
2021 free_key:
2022 	kvfree(key);
2023 err_put:
2024 	bpf_map_write_active_dec(map);
2025 	fdput(f);
2026 	return err;
2027 }
2028 
2029 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
2030 
2031 static int map_freeze(const union bpf_attr *attr)
2032 {
2033 	int err = 0, ufd = attr->map_fd;
2034 	struct bpf_map *map;
2035 	struct fd f;
2036 
2037 	if (CHECK_ATTR(BPF_MAP_FREEZE))
2038 		return -EINVAL;
2039 
2040 	f = fdget(ufd);
2041 	map = __bpf_map_get(f);
2042 	if (IS_ERR(map))
2043 		return PTR_ERR(map);
2044 
2045 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) {
2046 		fdput(f);
2047 		return -ENOTSUPP;
2048 	}
2049 
2050 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
2051 		fdput(f);
2052 		return -EPERM;
2053 	}
2054 
2055 	mutex_lock(&map->freeze_mutex);
2056 	if (bpf_map_write_active(map)) {
2057 		err = -EBUSY;
2058 		goto err_put;
2059 	}
2060 	if (READ_ONCE(map->frozen)) {
2061 		err = -EBUSY;
2062 		goto err_put;
2063 	}
2064 
2065 	WRITE_ONCE(map->frozen, true);
2066 err_put:
2067 	mutex_unlock(&map->freeze_mutex);
2068 	fdput(f);
2069 	return err;
2070 }
2071 
2072 static const struct bpf_prog_ops * const bpf_prog_types[] = {
2073 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2074 	[_id] = & _name ## _prog_ops,
2075 #define BPF_MAP_TYPE(_id, _ops)
2076 #define BPF_LINK_TYPE(_id, _name)
2077 #include <linux/bpf_types.h>
2078 #undef BPF_PROG_TYPE
2079 #undef BPF_MAP_TYPE
2080 #undef BPF_LINK_TYPE
2081 };
2082 
2083 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
2084 {
2085 	const struct bpf_prog_ops *ops;
2086 
2087 	if (type >= ARRAY_SIZE(bpf_prog_types))
2088 		return -EINVAL;
2089 	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
2090 	ops = bpf_prog_types[type];
2091 	if (!ops)
2092 		return -EINVAL;
2093 
2094 	if (!bpf_prog_is_offloaded(prog->aux))
2095 		prog->aux->ops = ops;
2096 	else
2097 		prog->aux->ops = &bpf_offload_prog_ops;
2098 	prog->type = type;
2099 	return 0;
2100 }
2101 
2102 enum bpf_audit {
2103 	BPF_AUDIT_LOAD,
2104 	BPF_AUDIT_UNLOAD,
2105 	BPF_AUDIT_MAX,
2106 };
2107 
2108 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
2109 	[BPF_AUDIT_LOAD]   = "LOAD",
2110 	[BPF_AUDIT_UNLOAD] = "UNLOAD",
2111 };
2112 
2113 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
2114 {
2115 	struct audit_context *ctx = NULL;
2116 	struct audit_buffer *ab;
2117 
2118 	if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
2119 		return;
2120 	if (audit_enabled == AUDIT_OFF)
2121 		return;
2122 	if (!in_irq() && !irqs_disabled())
2123 		ctx = audit_context();
2124 	ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
2125 	if (unlikely(!ab))
2126 		return;
2127 	audit_log_format(ab, "prog-id=%u op=%s",
2128 			 prog->aux->id, bpf_audit_str[op]);
2129 	audit_log_end(ab);
2130 }
2131 
2132 static int bpf_prog_alloc_id(struct bpf_prog *prog)
2133 {
2134 	int id;
2135 
2136 	idr_preload(GFP_KERNEL);
2137 	spin_lock_bh(&prog_idr_lock);
2138 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
2139 	if (id > 0)
2140 		prog->aux->id = id;
2141 	spin_unlock_bh(&prog_idr_lock);
2142 	idr_preload_end();
2143 
2144 	/* id is in [1, INT_MAX) */
2145 	if (WARN_ON_ONCE(!id))
2146 		return -ENOSPC;
2147 
2148 	return id > 0 ? 0 : id;
2149 }
2150 
2151 void bpf_prog_free_id(struct bpf_prog *prog)
2152 {
2153 	unsigned long flags;
2154 
2155 	/* cBPF to eBPF migrations are currently not in the idr store.
2156 	 * Offloaded programs are removed from the store when their device
2157 	 * disappears - even if someone grabs an fd to them they are unusable,
2158 	 * simply waiting for refcnt to drop to be freed.
2159 	 */
2160 	if (!prog->aux->id)
2161 		return;
2162 
2163 	spin_lock_irqsave(&prog_idr_lock, flags);
2164 	idr_remove(&prog_idr, prog->aux->id);
2165 	prog->aux->id = 0;
2166 	spin_unlock_irqrestore(&prog_idr_lock, flags);
2167 }
2168 
2169 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
2170 {
2171 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
2172 
2173 	kvfree(aux->func_info);
2174 	kfree(aux->func_info_aux);
2175 	free_uid(aux->user);
2176 	security_bpf_prog_free(aux->prog);
2177 	bpf_prog_free(aux->prog);
2178 }
2179 
2180 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
2181 {
2182 	bpf_prog_kallsyms_del_all(prog);
2183 	btf_put(prog->aux->btf);
2184 	module_put(prog->aux->mod);
2185 	kvfree(prog->aux->jited_linfo);
2186 	kvfree(prog->aux->linfo);
2187 	kfree(prog->aux->kfunc_tab);
2188 	if (prog->aux->attach_btf)
2189 		btf_put(prog->aux->attach_btf);
2190 
2191 	if (deferred) {
2192 		if (prog->aux->sleepable)
2193 			call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
2194 		else
2195 			call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
2196 	} else {
2197 		__bpf_prog_put_rcu(&prog->aux->rcu);
2198 	}
2199 }
2200 
2201 static void bpf_prog_put_deferred(struct work_struct *work)
2202 {
2203 	struct bpf_prog_aux *aux;
2204 	struct bpf_prog *prog;
2205 
2206 	aux = container_of(work, struct bpf_prog_aux, work);
2207 	prog = aux->prog;
2208 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
2209 	bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
2210 	bpf_prog_free_id(prog);
2211 	__bpf_prog_put_noref(prog, true);
2212 }
2213 
2214 static void __bpf_prog_put(struct bpf_prog *prog)
2215 {
2216 	struct bpf_prog_aux *aux = prog->aux;
2217 
2218 	if (atomic64_dec_and_test(&aux->refcnt)) {
2219 		if (in_irq() || irqs_disabled()) {
2220 			INIT_WORK(&aux->work, bpf_prog_put_deferred);
2221 			schedule_work(&aux->work);
2222 		} else {
2223 			bpf_prog_put_deferred(&aux->work);
2224 		}
2225 	}
2226 }
2227 
2228 void bpf_prog_put(struct bpf_prog *prog)
2229 {
2230 	__bpf_prog_put(prog);
2231 }
2232 EXPORT_SYMBOL_GPL(bpf_prog_put);
2233 
2234 static int bpf_prog_release(struct inode *inode, struct file *filp)
2235 {
2236 	struct bpf_prog *prog = filp->private_data;
2237 
2238 	bpf_prog_put(prog);
2239 	return 0;
2240 }
2241 
2242 struct bpf_prog_kstats {
2243 	u64 nsecs;
2244 	u64 cnt;
2245 	u64 misses;
2246 };
2247 
2248 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2249 {
2250 	struct bpf_prog_stats *stats;
2251 	unsigned int flags;
2252 
2253 	stats = this_cpu_ptr(prog->stats);
2254 	flags = u64_stats_update_begin_irqsave(&stats->syncp);
2255 	u64_stats_inc(&stats->misses);
2256 	u64_stats_update_end_irqrestore(&stats->syncp, flags);
2257 }
2258 
2259 static void bpf_prog_get_stats(const struct bpf_prog *prog,
2260 			       struct bpf_prog_kstats *stats)
2261 {
2262 	u64 nsecs = 0, cnt = 0, misses = 0;
2263 	int cpu;
2264 
2265 	for_each_possible_cpu(cpu) {
2266 		const struct bpf_prog_stats *st;
2267 		unsigned int start;
2268 		u64 tnsecs, tcnt, tmisses;
2269 
2270 		st = per_cpu_ptr(prog->stats, cpu);
2271 		do {
2272 			start = u64_stats_fetch_begin(&st->syncp);
2273 			tnsecs = u64_stats_read(&st->nsecs);
2274 			tcnt = u64_stats_read(&st->cnt);
2275 			tmisses = u64_stats_read(&st->misses);
2276 		} while (u64_stats_fetch_retry(&st->syncp, start));
2277 		nsecs += tnsecs;
2278 		cnt += tcnt;
2279 		misses += tmisses;
2280 	}
2281 	stats->nsecs = nsecs;
2282 	stats->cnt = cnt;
2283 	stats->misses = misses;
2284 }
2285 
2286 #ifdef CONFIG_PROC_FS
2287 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
2288 {
2289 	const struct bpf_prog *prog = filp->private_data;
2290 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2291 	struct bpf_prog_kstats stats;
2292 
2293 	bpf_prog_get_stats(prog, &stats);
2294 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2295 	seq_printf(m,
2296 		   "prog_type:\t%u\n"
2297 		   "prog_jited:\t%u\n"
2298 		   "prog_tag:\t%s\n"
2299 		   "memlock:\t%llu\n"
2300 		   "prog_id:\t%u\n"
2301 		   "run_time_ns:\t%llu\n"
2302 		   "run_cnt:\t%llu\n"
2303 		   "recursion_misses:\t%llu\n"
2304 		   "verified_insns:\t%u\n",
2305 		   prog->type,
2306 		   prog->jited,
2307 		   prog_tag,
2308 		   prog->pages * 1ULL << PAGE_SHIFT,
2309 		   prog->aux->id,
2310 		   stats.nsecs,
2311 		   stats.cnt,
2312 		   stats.misses,
2313 		   prog->aux->verified_insns);
2314 }
2315 #endif
2316 
2317 const struct file_operations bpf_prog_fops = {
2318 #ifdef CONFIG_PROC_FS
2319 	.show_fdinfo	= bpf_prog_show_fdinfo,
2320 #endif
2321 	.release	= bpf_prog_release,
2322 	.read		= bpf_dummy_read,
2323 	.write		= bpf_dummy_write,
2324 };
2325 
2326 int bpf_prog_new_fd(struct bpf_prog *prog)
2327 {
2328 	int ret;
2329 
2330 	ret = security_bpf_prog(prog);
2331 	if (ret < 0)
2332 		return ret;
2333 
2334 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
2335 				O_RDWR | O_CLOEXEC);
2336 }
2337 
2338 static struct bpf_prog *____bpf_prog_get(struct fd f)
2339 {
2340 	if (!f.file)
2341 		return ERR_PTR(-EBADF);
2342 	if (f.file->f_op != &bpf_prog_fops) {
2343 		fdput(f);
2344 		return ERR_PTR(-EINVAL);
2345 	}
2346 
2347 	return f.file->private_data;
2348 }
2349 
2350 void bpf_prog_add(struct bpf_prog *prog, int i)
2351 {
2352 	atomic64_add(i, &prog->aux->refcnt);
2353 }
2354 EXPORT_SYMBOL_GPL(bpf_prog_add);
2355 
2356 void bpf_prog_sub(struct bpf_prog *prog, int i)
2357 {
2358 	/* Only to be used for undoing previous bpf_prog_add() in some
2359 	 * error path. We still know that another entity in our call
2360 	 * path holds a reference to the program, thus atomic_sub() can
2361 	 * be safely used in such cases!
2362 	 */
2363 	WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
2364 }
2365 EXPORT_SYMBOL_GPL(bpf_prog_sub);
2366 
2367 void bpf_prog_inc(struct bpf_prog *prog)
2368 {
2369 	atomic64_inc(&prog->aux->refcnt);
2370 }
2371 EXPORT_SYMBOL_GPL(bpf_prog_inc);
2372 
2373 /* prog_idr_lock should have been held */
2374 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
2375 {
2376 	int refold;
2377 
2378 	refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
2379 
2380 	if (!refold)
2381 		return ERR_PTR(-ENOENT);
2382 
2383 	return prog;
2384 }
2385 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
2386 
2387 bool bpf_prog_get_ok(struct bpf_prog *prog,
2388 			    enum bpf_prog_type *attach_type, bool attach_drv)
2389 {
2390 	/* not an attachment, just a refcount inc, always allow */
2391 	if (!attach_type)
2392 		return true;
2393 
2394 	if (prog->type != *attach_type)
2395 		return false;
2396 	if (bpf_prog_is_offloaded(prog->aux) && !attach_drv)
2397 		return false;
2398 
2399 	return true;
2400 }
2401 
2402 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
2403 				       bool attach_drv)
2404 {
2405 	struct fd f = fdget(ufd);
2406 	struct bpf_prog *prog;
2407 
2408 	prog = ____bpf_prog_get(f);
2409 	if (IS_ERR(prog))
2410 		return prog;
2411 	if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
2412 		prog = ERR_PTR(-EINVAL);
2413 		goto out;
2414 	}
2415 
2416 	bpf_prog_inc(prog);
2417 out:
2418 	fdput(f);
2419 	return prog;
2420 }
2421 
2422 struct bpf_prog *bpf_prog_get(u32 ufd)
2423 {
2424 	return __bpf_prog_get(ufd, NULL, false);
2425 }
2426 
2427 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2428 				       bool attach_drv)
2429 {
2430 	return __bpf_prog_get(ufd, &type, attach_drv);
2431 }
2432 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
2433 
2434 /* Initially all BPF programs could be loaded w/o specifying
2435  * expected_attach_type. Later for some of them specifying expected_attach_type
2436  * at load time became required so that program could be validated properly.
2437  * Programs of types that are allowed to be loaded both w/ and w/o (for
2438  * backward compatibility) expected_attach_type, should have the default attach
2439  * type assigned to expected_attach_type for the latter case, so that it can be
2440  * validated later at attach time.
2441  *
2442  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
2443  * prog type requires it but has some attach types that have to be backward
2444  * compatible.
2445  */
2446 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
2447 {
2448 	switch (attr->prog_type) {
2449 	case BPF_PROG_TYPE_CGROUP_SOCK:
2450 		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
2451 		 * exist so checking for non-zero is the way to go here.
2452 		 */
2453 		if (!attr->expected_attach_type)
2454 			attr->expected_attach_type =
2455 				BPF_CGROUP_INET_SOCK_CREATE;
2456 		break;
2457 	case BPF_PROG_TYPE_SK_REUSEPORT:
2458 		if (!attr->expected_attach_type)
2459 			attr->expected_attach_type =
2460 				BPF_SK_REUSEPORT_SELECT;
2461 		break;
2462 	}
2463 }
2464 
2465 static int
2466 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2467 			   enum bpf_attach_type expected_attach_type,
2468 			   struct btf *attach_btf, u32 btf_id,
2469 			   struct bpf_prog *dst_prog)
2470 {
2471 	if (btf_id) {
2472 		if (btf_id > BTF_MAX_TYPE)
2473 			return -EINVAL;
2474 
2475 		if (!attach_btf && !dst_prog)
2476 			return -EINVAL;
2477 
2478 		switch (prog_type) {
2479 		case BPF_PROG_TYPE_TRACING:
2480 		case BPF_PROG_TYPE_LSM:
2481 		case BPF_PROG_TYPE_STRUCT_OPS:
2482 		case BPF_PROG_TYPE_EXT:
2483 			break;
2484 		default:
2485 			return -EINVAL;
2486 		}
2487 	}
2488 
2489 	if (attach_btf && (!btf_id || dst_prog))
2490 		return -EINVAL;
2491 
2492 	if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2493 	    prog_type != BPF_PROG_TYPE_EXT)
2494 		return -EINVAL;
2495 
2496 	switch (prog_type) {
2497 	case BPF_PROG_TYPE_CGROUP_SOCK:
2498 		switch (expected_attach_type) {
2499 		case BPF_CGROUP_INET_SOCK_CREATE:
2500 		case BPF_CGROUP_INET_SOCK_RELEASE:
2501 		case BPF_CGROUP_INET4_POST_BIND:
2502 		case BPF_CGROUP_INET6_POST_BIND:
2503 			return 0;
2504 		default:
2505 			return -EINVAL;
2506 		}
2507 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2508 		switch (expected_attach_type) {
2509 		case BPF_CGROUP_INET4_BIND:
2510 		case BPF_CGROUP_INET6_BIND:
2511 		case BPF_CGROUP_INET4_CONNECT:
2512 		case BPF_CGROUP_INET6_CONNECT:
2513 		case BPF_CGROUP_UNIX_CONNECT:
2514 		case BPF_CGROUP_INET4_GETPEERNAME:
2515 		case BPF_CGROUP_INET6_GETPEERNAME:
2516 		case BPF_CGROUP_UNIX_GETPEERNAME:
2517 		case BPF_CGROUP_INET4_GETSOCKNAME:
2518 		case BPF_CGROUP_INET6_GETSOCKNAME:
2519 		case BPF_CGROUP_UNIX_GETSOCKNAME:
2520 		case BPF_CGROUP_UDP4_SENDMSG:
2521 		case BPF_CGROUP_UDP6_SENDMSG:
2522 		case BPF_CGROUP_UNIX_SENDMSG:
2523 		case BPF_CGROUP_UDP4_RECVMSG:
2524 		case BPF_CGROUP_UDP6_RECVMSG:
2525 		case BPF_CGROUP_UNIX_RECVMSG:
2526 			return 0;
2527 		default:
2528 			return -EINVAL;
2529 		}
2530 	case BPF_PROG_TYPE_CGROUP_SKB:
2531 		switch (expected_attach_type) {
2532 		case BPF_CGROUP_INET_INGRESS:
2533 		case BPF_CGROUP_INET_EGRESS:
2534 			return 0;
2535 		default:
2536 			return -EINVAL;
2537 		}
2538 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2539 		switch (expected_attach_type) {
2540 		case BPF_CGROUP_SETSOCKOPT:
2541 		case BPF_CGROUP_GETSOCKOPT:
2542 			return 0;
2543 		default:
2544 			return -EINVAL;
2545 		}
2546 	case BPF_PROG_TYPE_SK_LOOKUP:
2547 		if (expected_attach_type == BPF_SK_LOOKUP)
2548 			return 0;
2549 		return -EINVAL;
2550 	case BPF_PROG_TYPE_SK_REUSEPORT:
2551 		switch (expected_attach_type) {
2552 		case BPF_SK_REUSEPORT_SELECT:
2553 		case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2554 			return 0;
2555 		default:
2556 			return -EINVAL;
2557 		}
2558 	case BPF_PROG_TYPE_NETFILTER:
2559 		if (expected_attach_type == BPF_NETFILTER)
2560 			return 0;
2561 		return -EINVAL;
2562 	case BPF_PROG_TYPE_SYSCALL:
2563 	case BPF_PROG_TYPE_EXT:
2564 		if (expected_attach_type)
2565 			return -EINVAL;
2566 		fallthrough;
2567 	default:
2568 		return 0;
2569 	}
2570 }
2571 
2572 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2573 {
2574 	switch (prog_type) {
2575 	case BPF_PROG_TYPE_SCHED_CLS:
2576 	case BPF_PROG_TYPE_SCHED_ACT:
2577 	case BPF_PROG_TYPE_XDP:
2578 	case BPF_PROG_TYPE_LWT_IN:
2579 	case BPF_PROG_TYPE_LWT_OUT:
2580 	case BPF_PROG_TYPE_LWT_XMIT:
2581 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2582 	case BPF_PROG_TYPE_SK_SKB:
2583 	case BPF_PROG_TYPE_SK_MSG:
2584 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2585 	case BPF_PROG_TYPE_CGROUP_DEVICE:
2586 	case BPF_PROG_TYPE_CGROUP_SOCK:
2587 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2588 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2589 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2590 	case BPF_PROG_TYPE_SOCK_OPS:
2591 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2592 	case BPF_PROG_TYPE_NETFILTER:
2593 		return true;
2594 	case BPF_PROG_TYPE_CGROUP_SKB:
2595 		/* always unpriv */
2596 	case BPF_PROG_TYPE_SK_REUSEPORT:
2597 		/* equivalent to SOCKET_FILTER. need CAP_BPF only */
2598 	default:
2599 		return false;
2600 	}
2601 }
2602 
2603 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2604 {
2605 	switch (prog_type) {
2606 	case BPF_PROG_TYPE_KPROBE:
2607 	case BPF_PROG_TYPE_TRACEPOINT:
2608 	case BPF_PROG_TYPE_PERF_EVENT:
2609 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2610 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2611 	case BPF_PROG_TYPE_TRACING:
2612 	case BPF_PROG_TYPE_LSM:
2613 	case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2614 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2615 		return true;
2616 	default:
2617 		return false;
2618 	}
2619 }
2620 
2621 /* last field in 'union bpf_attr' used by this command */
2622 #define BPF_PROG_LOAD_LAST_FIELD prog_token_fd
2623 
2624 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
2625 {
2626 	enum bpf_prog_type type = attr->prog_type;
2627 	struct bpf_prog *prog, *dst_prog = NULL;
2628 	struct btf *attach_btf = NULL;
2629 	struct bpf_token *token = NULL;
2630 	bool bpf_cap;
2631 	int err;
2632 	char license[128];
2633 
2634 	if (CHECK_ATTR(BPF_PROG_LOAD))
2635 		return -EINVAL;
2636 
2637 	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2638 				 BPF_F_ANY_ALIGNMENT |
2639 				 BPF_F_TEST_STATE_FREQ |
2640 				 BPF_F_SLEEPABLE |
2641 				 BPF_F_TEST_RND_HI32 |
2642 				 BPF_F_XDP_HAS_FRAGS |
2643 				 BPF_F_XDP_DEV_BOUND_ONLY |
2644 				 BPF_F_TEST_REG_INVARIANTS))
2645 		return -EINVAL;
2646 
2647 	bpf_prog_load_fixup_attach_type(attr);
2648 
2649 	if (attr->prog_token_fd) {
2650 		token = bpf_token_get_from_fd(attr->prog_token_fd);
2651 		if (IS_ERR(token))
2652 			return PTR_ERR(token);
2653 		/* if current token doesn't grant prog loading permissions,
2654 		 * then we can't use this token, so ignore it and rely on
2655 		 * system-wide capabilities checks
2656 		 */
2657 		if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) ||
2658 		    !bpf_token_allow_prog_type(token, attr->prog_type,
2659 					       attr->expected_attach_type)) {
2660 			bpf_token_put(token);
2661 			token = NULL;
2662 		}
2663 	}
2664 
2665 	bpf_cap = bpf_token_capable(token, CAP_BPF);
2666 	err = -EPERM;
2667 
2668 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2669 	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2670 	    !bpf_cap)
2671 		goto put_token;
2672 
2673 	/* Intent here is for unprivileged_bpf_disabled to block BPF program
2674 	 * creation for unprivileged users; other actions depend
2675 	 * on fd availability and access to bpffs, so are dependent on
2676 	 * object creation success. Even with unprivileged BPF disabled,
2677 	 * capability checks are still carried out for these
2678 	 * and other operations.
2679 	 */
2680 	if (sysctl_unprivileged_bpf_disabled && !bpf_cap)
2681 		goto put_token;
2682 
2683 	if (attr->insn_cnt == 0 ||
2684 	    attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) {
2685 		err = -E2BIG;
2686 		goto put_token;
2687 	}
2688 	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2689 	    type != BPF_PROG_TYPE_CGROUP_SKB &&
2690 	    !bpf_cap)
2691 		goto put_token;
2692 
2693 	if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN))
2694 		goto put_token;
2695 	if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON))
2696 		goto put_token;
2697 
2698 	/* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2699 	 * or btf, we need to check which one it is
2700 	 */
2701 	if (attr->attach_prog_fd) {
2702 		dst_prog = bpf_prog_get(attr->attach_prog_fd);
2703 		if (IS_ERR(dst_prog)) {
2704 			dst_prog = NULL;
2705 			attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2706 			if (IS_ERR(attach_btf)) {
2707 				err = -EINVAL;
2708 				goto put_token;
2709 			}
2710 			if (!btf_is_kernel(attach_btf)) {
2711 				/* attaching through specifying bpf_prog's BTF
2712 				 * objects directly might be supported eventually
2713 				 */
2714 				btf_put(attach_btf);
2715 				err = -ENOTSUPP;
2716 				goto put_token;
2717 			}
2718 		}
2719 	} else if (attr->attach_btf_id) {
2720 		/* fall back to vmlinux BTF, if BTF type ID is specified */
2721 		attach_btf = bpf_get_btf_vmlinux();
2722 		if (IS_ERR(attach_btf)) {
2723 			err = PTR_ERR(attach_btf);
2724 			goto put_token;
2725 		}
2726 		if (!attach_btf) {
2727 			err = -EINVAL;
2728 			goto put_token;
2729 		}
2730 		btf_get(attach_btf);
2731 	}
2732 
2733 	if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2734 				       attach_btf, attr->attach_btf_id,
2735 				       dst_prog)) {
2736 		if (dst_prog)
2737 			bpf_prog_put(dst_prog);
2738 		if (attach_btf)
2739 			btf_put(attach_btf);
2740 		err = -EINVAL;
2741 		goto put_token;
2742 	}
2743 
2744 	/* plain bpf_prog allocation */
2745 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2746 	if (!prog) {
2747 		if (dst_prog)
2748 			bpf_prog_put(dst_prog);
2749 		if (attach_btf)
2750 			btf_put(attach_btf);
2751 		err = -EINVAL;
2752 		goto put_token;
2753 	}
2754 
2755 	prog->expected_attach_type = attr->expected_attach_type;
2756 	prog->aux->attach_btf = attach_btf;
2757 	prog->aux->attach_btf_id = attr->attach_btf_id;
2758 	prog->aux->dst_prog = dst_prog;
2759 	prog->aux->dev_bound = !!attr->prog_ifindex;
2760 	prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2761 	prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
2762 
2763 	/* move token into prog->aux, reuse taken refcnt */
2764 	prog->aux->token = token;
2765 	token = NULL;
2766 
2767 	prog->aux->user = get_current_user();
2768 	prog->len = attr->insn_cnt;
2769 
2770 	err = -EFAULT;
2771 	if (copy_from_bpfptr(prog->insns,
2772 			     make_bpfptr(attr->insns, uattr.is_kernel),
2773 			     bpf_prog_insn_size(prog)) != 0)
2774 		goto free_prog;
2775 	/* copy eBPF program license from user space */
2776 	if (strncpy_from_bpfptr(license,
2777 				make_bpfptr(attr->license, uattr.is_kernel),
2778 				sizeof(license) - 1) < 0)
2779 		goto free_prog;
2780 	license[sizeof(license) - 1] = 0;
2781 
2782 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2783 	prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0;
2784 
2785 	prog->orig_prog = NULL;
2786 	prog->jited = 0;
2787 
2788 	atomic64_set(&prog->aux->refcnt, 1);
2789 
2790 	if (bpf_prog_is_dev_bound(prog->aux)) {
2791 		err = bpf_prog_dev_bound_init(prog, attr);
2792 		if (err)
2793 			goto free_prog;
2794 	}
2795 
2796 	if (type == BPF_PROG_TYPE_EXT && dst_prog &&
2797 	    bpf_prog_is_dev_bound(dst_prog->aux)) {
2798 		err = bpf_prog_dev_bound_inherit(prog, dst_prog);
2799 		if (err)
2800 			goto free_prog;
2801 	}
2802 
2803 	/* find program type: socket_filter vs tracing_filter */
2804 	err = find_prog_type(type, prog);
2805 	if (err < 0)
2806 		goto free_prog;
2807 
2808 	prog->aux->load_time = ktime_get_boottime_ns();
2809 	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2810 			       sizeof(attr->prog_name));
2811 	if (err < 0)
2812 		goto free_prog;
2813 
2814 	err = security_bpf_prog_load(prog, attr, token);
2815 	if (err)
2816 		goto free_prog_sec;
2817 
2818 	/* run eBPF verifier */
2819 	err = bpf_check(&prog, attr, uattr, uattr_size);
2820 	if (err < 0)
2821 		goto free_used_maps;
2822 
2823 	prog = bpf_prog_select_runtime(prog, &err);
2824 	if (err < 0)
2825 		goto free_used_maps;
2826 
2827 	err = bpf_prog_alloc_id(prog);
2828 	if (err)
2829 		goto free_used_maps;
2830 
2831 	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
2832 	 * effectively publicly exposed. However, retrieving via
2833 	 * bpf_prog_get_fd_by_id() will take another reference,
2834 	 * therefore it cannot be gone underneath us.
2835 	 *
2836 	 * Only for the time /after/ successful bpf_prog_new_fd()
2837 	 * and before returning to userspace, we might just hold
2838 	 * one reference and any parallel close on that fd could
2839 	 * rip everything out. Hence, below notifications must
2840 	 * happen before bpf_prog_new_fd().
2841 	 *
2842 	 * Also, any failure handling from this point onwards must
2843 	 * be using bpf_prog_put() given the program is exposed.
2844 	 */
2845 	bpf_prog_kallsyms_add(prog);
2846 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2847 	bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2848 
2849 	err = bpf_prog_new_fd(prog);
2850 	if (err < 0)
2851 		bpf_prog_put(prog);
2852 	return err;
2853 
2854 free_used_maps:
2855 	/* In case we have subprogs, we need to wait for a grace
2856 	 * period before we can tear down JIT memory since symbols
2857 	 * are already exposed under kallsyms.
2858 	 */
2859 	__bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
2860 	return err;
2861 
2862 free_prog_sec:
2863 	security_bpf_prog_free(prog);
2864 free_prog:
2865 	free_uid(prog->aux->user);
2866 	if (prog->aux->attach_btf)
2867 		btf_put(prog->aux->attach_btf);
2868 	bpf_prog_free(prog);
2869 put_token:
2870 	bpf_token_put(token);
2871 	return err;
2872 }
2873 
2874 #define BPF_OBJ_LAST_FIELD path_fd
2875 
2876 static int bpf_obj_pin(const union bpf_attr *attr)
2877 {
2878 	int path_fd;
2879 
2880 	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD)
2881 		return -EINVAL;
2882 
2883 	/* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2884 	if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2885 		return -EINVAL;
2886 
2887 	path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2888 	return bpf_obj_pin_user(attr->bpf_fd, path_fd,
2889 				u64_to_user_ptr(attr->pathname));
2890 }
2891 
2892 static int bpf_obj_get(const union bpf_attr *attr)
2893 {
2894 	int path_fd;
2895 
2896 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2897 	    attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD))
2898 		return -EINVAL;
2899 
2900 	/* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2901 	if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2902 		return -EINVAL;
2903 
2904 	path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2905 	return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname),
2906 				attr->file_flags);
2907 }
2908 
2909 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2910 		   const struct bpf_link_ops *ops, struct bpf_prog *prog)
2911 {
2912 	atomic64_set(&link->refcnt, 1);
2913 	link->type = type;
2914 	link->id = 0;
2915 	link->ops = ops;
2916 	link->prog = prog;
2917 }
2918 
2919 static void bpf_link_free_id(int id)
2920 {
2921 	if (!id)
2922 		return;
2923 
2924 	spin_lock_bh(&link_idr_lock);
2925 	idr_remove(&link_idr, id);
2926 	spin_unlock_bh(&link_idr_lock);
2927 }
2928 
2929 /* Clean up bpf_link and corresponding anon_inode file and FD. After
2930  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2931  * anon_inode's release() call. This helper marks bpf_link as
2932  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2933  * is not decremented, it's the responsibility of a calling code that failed
2934  * to complete bpf_link initialization.
2935  * This helper eventually calls link's dealloc callback, but does not call
2936  * link's release callback.
2937  */
2938 void bpf_link_cleanup(struct bpf_link_primer *primer)
2939 {
2940 	primer->link->prog = NULL;
2941 	bpf_link_free_id(primer->id);
2942 	fput(primer->file);
2943 	put_unused_fd(primer->fd);
2944 }
2945 
2946 void bpf_link_inc(struct bpf_link *link)
2947 {
2948 	atomic64_inc(&link->refcnt);
2949 }
2950 
2951 /* bpf_link_free is guaranteed to be called from process context */
2952 static void bpf_link_free(struct bpf_link *link)
2953 {
2954 	bpf_link_free_id(link->id);
2955 	if (link->prog) {
2956 		/* detach BPF program, clean up used resources */
2957 		link->ops->release(link);
2958 		bpf_prog_put(link->prog);
2959 	}
2960 	/* free bpf_link and its containing memory */
2961 	link->ops->dealloc(link);
2962 }
2963 
2964 static void bpf_link_put_deferred(struct work_struct *work)
2965 {
2966 	struct bpf_link *link = container_of(work, struct bpf_link, work);
2967 
2968 	bpf_link_free(link);
2969 }
2970 
2971 /* bpf_link_put might be called from atomic context. It needs to be called
2972  * from sleepable context in order to acquire sleeping locks during the process.
2973  */
2974 void bpf_link_put(struct bpf_link *link)
2975 {
2976 	if (!atomic64_dec_and_test(&link->refcnt))
2977 		return;
2978 
2979 	INIT_WORK(&link->work, bpf_link_put_deferred);
2980 	schedule_work(&link->work);
2981 }
2982 EXPORT_SYMBOL(bpf_link_put);
2983 
2984 static void bpf_link_put_direct(struct bpf_link *link)
2985 {
2986 	if (!atomic64_dec_and_test(&link->refcnt))
2987 		return;
2988 	bpf_link_free(link);
2989 }
2990 
2991 static int bpf_link_release(struct inode *inode, struct file *filp)
2992 {
2993 	struct bpf_link *link = filp->private_data;
2994 
2995 	bpf_link_put_direct(link);
2996 	return 0;
2997 }
2998 
2999 #ifdef CONFIG_PROC_FS
3000 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
3001 #define BPF_MAP_TYPE(_id, _ops)
3002 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
3003 static const char *bpf_link_type_strs[] = {
3004 	[BPF_LINK_TYPE_UNSPEC] = "<invalid>",
3005 #include <linux/bpf_types.h>
3006 };
3007 #undef BPF_PROG_TYPE
3008 #undef BPF_MAP_TYPE
3009 #undef BPF_LINK_TYPE
3010 
3011 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
3012 {
3013 	const struct bpf_link *link = filp->private_data;
3014 	const struct bpf_prog *prog = link->prog;
3015 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
3016 
3017 	seq_printf(m,
3018 		   "link_type:\t%s\n"
3019 		   "link_id:\t%u\n",
3020 		   bpf_link_type_strs[link->type],
3021 		   link->id);
3022 	if (prog) {
3023 		bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
3024 		seq_printf(m,
3025 			   "prog_tag:\t%s\n"
3026 			   "prog_id:\t%u\n",
3027 			   prog_tag,
3028 			   prog->aux->id);
3029 	}
3030 	if (link->ops->show_fdinfo)
3031 		link->ops->show_fdinfo(link, m);
3032 }
3033 #endif
3034 
3035 static const struct file_operations bpf_link_fops = {
3036 #ifdef CONFIG_PROC_FS
3037 	.show_fdinfo	= bpf_link_show_fdinfo,
3038 #endif
3039 	.release	= bpf_link_release,
3040 	.read		= bpf_dummy_read,
3041 	.write		= bpf_dummy_write,
3042 };
3043 
3044 static int bpf_link_alloc_id(struct bpf_link *link)
3045 {
3046 	int id;
3047 
3048 	idr_preload(GFP_KERNEL);
3049 	spin_lock_bh(&link_idr_lock);
3050 	id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
3051 	spin_unlock_bh(&link_idr_lock);
3052 	idr_preload_end();
3053 
3054 	return id;
3055 }
3056 
3057 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
3058  * reserving unused FD and allocating ID from link_idr. This is to be paired
3059  * with bpf_link_settle() to install FD and ID and expose bpf_link to
3060  * user-space, if bpf_link is successfully attached. If not, bpf_link and
3061  * pre-allocated resources are to be freed with bpf_cleanup() call. All the
3062  * transient state is passed around in struct bpf_link_primer.
3063  * This is preferred way to create and initialize bpf_link, especially when
3064  * there are complicated and expensive operations in between creating bpf_link
3065  * itself and attaching it to BPF hook. By using bpf_link_prime() and
3066  * bpf_link_settle() kernel code using bpf_link doesn't have to perform
3067  * expensive (and potentially failing) roll back operations in a rare case
3068  * that file, FD, or ID can't be allocated.
3069  */
3070 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
3071 {
3072 	struct file *file;
3073 	int fd, id;
3074 
3075 	fd = get_unused_fd_flags(O_CLOEXEC);
3076 	if (fd < 0)
3077 		return fd;
3078 
3079 
3080 	id = bpf_link_alloc_id(link);
3081 	if (id < 0) {
3082 		put_unused_fd(fd);
3083 		return id;
3084 	}
3085 
3086 	file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
3087 	if (IS_ERR(file)) {
3088 		bpf_link_free_id(id);
3089 		put_unused_fd(fd);
3090 		return PTR_ERR(file);
3091 	}
3092 
3093 	primer->link = link;
3094 	primer->file = file;
3095 	primer->fd = fd;
3096 	primer->id = id;
3097 	return 0;
3098 }
3099 
3100 int bpf_link_settle(struct bpf_link_primer *primer)
3101 {
3102 	/* make bpf_link fetchable by ID */
3103 	spin_lock_bh(&link_idr_lock);
3104 	primer->link->id = primer->id;
3105 	spin_unlock_bh(&link_idr_lock);
3106 	/* make bpf_link fetchable by FD */
3107 	fd_install(primer->fd, primer->file);
3108 	/* pass through installed FD */
3109 	return primer->fd;
3110 }
3111 
3112 int bpf_link_new_fd(struct bpf_link *link)
3113 {
3114 	return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
3115 }
3116 
3117 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
3118 {
3119 	struct fd f = fdget(ufd);
3120 	struct bpf_link *link;
3121 
3122 	if (!f.file)
3123 		return ERR_PTR(-EBADF);
3124 	if (f.file->f_op != &bpf_link_fops) {
3125 		fdput(f);
3126 		return ERR_PTR(-EINVAL);
3127 	}
3128 
3129 	link = f.file->private_data;
3130 	bpf_link_inc(link);
3131 	fdput(f);
3132 
3133 	return link;
3134 }
3135 EXPORT_SYMBOL(bpf_link_get_from_fd);
3136 
3137 static void bpf_tracing_link_release(struct bpf_link *link)
3138 {
3139 	struct bpf_tracing_link *tr_link =
3140 		container_of(link, struct bpf_tracing_link, link.link);
3141 
3142 	WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
3143 						tr_link->trampoline));
3144 
3145 	bpf_trampoline_put(tr_link->trampoline);
3146 
3147 	/* tgt_prog is NULL if target is a kernel function */
3148 	if (tr_link->tgt_prog)
3149 		bpf_prog_put(tr_link->tgt_prog);
3150 }
3151 
3152 static void bpf_tracing_link_dealloc(struct bpf_link *link)
3153 {
3154 	struct bpf_tracing_link *tr_link =
3155 		container_of(link, struct bpf_tracing_link, link.link);
3156 
3157 	kfree(tr_link);
3158 }
3159 
3160 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
3161 					 struct seq_file *seq)
3162 {
3163 	struct bpf_tracing_link *tr_link =
3164 		container_of(link, struct bpf_tracing_link, link.link);
3165 	u32 target_btf_id, target_obj_id;
3166 
3167 	bpf_trampoline_unpack_key(tr_link->trampoline->key,
3168 				  &target_obj_id, &target_btf_id);
3169 	seq_printf(seq,
3170 		   "attach_type:\t%d\n"
3171 		   "target_obj_id:\t%u\n"
3172 		   "target_btf_id:\t%u\n",
3173 		   tr_link->attach_type,
3174 		   target_obj_id,
3175 		   target_btf_id);
3176 }
3177 
3178 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
3179 					   struct bpf_link_info *info)
3180 {
3181 	struct bpf_tracing_link *tr_link =
3182 		container_of(link, struct bpf_tracing_link, link.link);
3183 
3184 	info->tracing.attach_type = tr_link->attach_type;
3185 	bpf_trampoline_unpack_key(tr_link->trampoline->key,
3186 				  &info->tracing.target_obj_id,
3187 				  &info->tracing.target_btf_id);
3188 
3189 	return 0;
3190 }
3191 
3192 static const struct bpf_link_ops bpf_tracing_link_lops = {
3193 	.release = bpf_tracing_link_release,
3194 	.dealloc = bpf_tracing_link_dealloc,
3195 	.show_fdinfo = bpf_tracing_link_show_fdinfo,
3196 	.fill_link_info = bpf_tracing_link_fill_link_info,
3197 };
3198 
3199 static int bpf_tracing_prog_attach(struct bpf_prog *prog,
3200 				   int tgt_prog_fd,
3201 				   u32 btf_id,
3202 				   u64 bpf_cookie)
3203 {
3204 	struct bpf_link_primer link_primer;
3205 	struct bpf_prog *tgt_prog = NULL;
3206 	struct bpf_trampoline *tr = NULL;
3207 	struct bpf_tracing_link *link;
3208 	u64 key = 0;
3209 	int err;
3210 
3211 	switch (prog->type) {
3212 	case BPF_PROG_TYPE_TRACING:
3213 		if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
3214 		    prog->expected_attach_type != BPF_TRACE_FEXIT &&
3215 		    prog->expected_attach_type != BPF_MODIFY_RETURN) {
3216 			err = -EINVAL;
3217 			goto out_put_prog;
3218 		}
3219 		break;
3220 	case BPF_PROG_TYPE_EXT:
3221 		if (prog->expected_attach_type != 0) {
3222 			err = -EINVAL;
3223 			goto out_put_prog;
3224 		}
3225 		break;
3226 	case BPF_PROG_TYPE_LSM:
3227 		if (prog->expected_attach_type != BPF_LSM_MAC) {
3228 			err = -EINVAL;
3229 			goto out_put_prog;
3230 		}
3231 		break;
3232 	default:
3233 		err = -EINVAL;
3234 		goto out_put_prog;
3235 	}
3236 
3237 	if (!!tgt_prog_fd != !!btf_id) {
3238 		err = -EINVAL;
3239 		goto out_put_prog;
3240 	}
3241 
3242 	if (tgt_prog_fd) {
3243 		/* For now we only allow new targets for BPF_PROG_TYPE_EXT */
3244 		if (prog->type != BPF_PROG_TYPE_EXT) {
3245 			err = -EINVAL;
3246 			goto out_put_prog;
3247 		}
3248 
3249 		tgt_prog = bpf_prog_get(tgt_prog_fd);
3250 		if (IS_ERR(tgt_prog)) {
3251 			err = PTR_ERR(tgt_prog);
3252 			tgt_prog = NULL;
3253 			goto out_put_prog;
3254 		}
3255 
3256 		key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
3257 	}
3258 
3259 	link = kzalloc(sizeof(*link), GFP_USER);
3260 	if (!link) {
3261 		err = -ENOMEM;
3262 		goto out_put_prog;
3263 	}
3264 	bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING,
3265 		      &bpf_tracing_link_lops, prog);
3266 	link->attach_type = prog->expected_attach_type;
3267 	link->link.cookie = bpf_cookie;
3268 
3269 	mutex_lock(&prog->aux->dst_mutex);
3270 
3271 	/* There are a few possible cases here:
3272 	 *
3273 	 * - if prog->aux->dst_trampoline is set, the program was just loaded
3274 	 *   and not yet attached to anything, so we can use the values stored
3275 	 *   in prog->aux
3276 	 *
3277 	 * - if prog->aux->dst_trampoline is NULL, the program has already been
3278          *   attached to a target and its initial target was cleared (below)
3279 	 *
3280 	 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
3281 	 *   target_btf_id using the link_create API.
3282 	 *
3283 	 * - if tgt_prog == NULL when this function was called using the old
3284 	 *   raw_tracepoint_open API, and we need a target from prog->aux
3285 	 *
3286 	 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
3287 	 *   was detached and is going for re-attachment.
3288 	 */
3289 	if (!prog->aux->dst_trampoline && !tgt_prog) {
3290 		/*
3291 		 * Allow re-attach for TRACING and LSM programs. If it's
3292 		 * currently linked, bpf_trampoline_link_prog will fail.
3293 		 * EXT programs need to specify tgt_prog_fd, so they
3294 		 * re-attach in separate code path.
3295 		 */
3296 		if (prog->type != BPF_PROG_TYPE_TRACING &&
3297 		    prog->type != BPF_PROG_TYPE_LSM) {
3298 			err = -EINVAL;
3299 			goto out_unlock;
3300 		}
3301 		btf_id = prog->aux->attach_btf_id;
3302 		key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
3303 	}
3304 
3305 	if (!prog->aux->dst_trampoline ||
3306 	    (key && key != prog->aux->dst_trampoline->key)) {
3307 		/* If there is no saved target, or the specified target is
3308 		 * different from the destination specified at load time, we
3309 		 * need a new trampoline and a check for compatibility
3310 		 */
3311 		struct bpf_attach_target_info tgt_info = {};
3312 
3313 		err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
3314 					      &tgt_info);
3315 		if (err)
3316 			goto out_unlock;
3317 
3318 		if (tgt_info.tgt_mod) {
3319 			module_put(prog->aux->mod);
3320 			prog->aux->mod = tgt_info.tgt_mod;
3321 		}
3322 
3323 		tr = bpf_trampoline_get(key, &tgt_info);
3324 		if (!tr) {
3325 			err = -ENOMEM;
3326 			goto out_unlock;
3327 		}
3328 	} else {
3329 		/* The caller didn't specify a target, or the target was the
3330 		 * same as the destination supplied during program load. This
3331 		 * means we can reuse the trampoline and reference from program
3332 		 * load time, and there is no need to allocate a new one. This
3333 		 * can only happen once for any program, as the saved values in
3334 		 * prog->aux are cleared below.
3335 		 */
3336 		tr = prog->aux->dst_trampoline;
3337 		tgt_prog = prog->aux->dst_prog;
3338 	}
3339 
3340 	err = bpf_link_prime(&link->link.link, &link_primer);
3341 	if (err)
3342 		goto out_unlock;
3343 
3344 	err = bpf_trampoline_link_prog(&link->link, tr);
3345 	if (err) {
3346 		bpf_link_cleanup(&link_primer);
3347 		link = NULL;
3348 		goto out_unlock;
3349 	}
3350 
3351 	link->tgt_prog = tgt_prog;
3352 	link->trampoline = tr;
3353 
3354 	/* Always clear the trampoline and target prog from prog->aux to make
3355 	 * sure the original attach destination is not kept alive after a
3356 	 * program is (re-)attached to another target.
3357 	 */
3358 	if (prog->aux->dst_prog &&
3359 	    (tgt_prog_fd || tr != prog->aux->dst_trampoline))
3360 		/* got extra prog ref from syscall, or attaching to different prog */
3361 		bpf_prog_put(prog->aux->dst_prog);
3362 	if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
3363 		/* we allocated a new trampoline, so free the old one */
3364 		bpf_trampoline_put(prog->aux->dst_trampoline);
3365 
3366 	prog->aux->dst_prog = NULL;
3367 	prog->aux->dst_trampoline = NULL;
3368 	mutex_unlock(&prog->aux->dst_mutex);
3369 
3370 	return bpf_link_settle(&link_primer);
3371 out_unlock:
3372 	if (tr && tr != prog->aux->dst_trampoline)
3373 		bpf_trampoline_put(tr);
3374 	mutex_unlock(&prog->aux->dst_mutex);
3375 	kfree(link);
3376 out_put_prog:
3377 	if (tgt_prog_fd && tgt_prog)
3378 		bpf_prog_put(tgt_prog);
3379 	return err;
3380 }
3381 
3382 struct bpf_raw_tp_link {
3383 	struct bpf_link link;
3384 	struct bpf_raw_event_map *btp;
3385 };
3386 
3387 static void bpf_raw_tp_link_release(struct bpf_link *link)
3388 {
3389 	struct bpf_raw_tp_link *raw_tp =
3390 		container_of(link, struct bpf_raw_tp_link, link);
3391 
3392 	bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
3393 	bpf_put_raw_tracepoint(raw_tp->btp);
3394 }
3395 
3396 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
3397 {
3398 	struct bpf_raw_tp_link *raw_tp =
3399 		container_of(link, struct bpf_raw_tp_link, link);
3400 
3401 	kfree(raw_tp);
3402 }
3403 
3404 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
3405 					struct seq_file *seq)
3406 {
3407 	struct bpf_raw_tp_link *raw_tp_link =
3408 		container_of(link, struct bpf_raw_tp_link, link);
3409 
3410 	seq_printf(seq,
3411 		   "tp_name:\t%s\n",
3412 		   raw_tp_link->btp->tp->name);
3413 }
3414 
3415 static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen,
3416 			    u32 len)
3417 {
3418 	if (ulen >= len + 1) {
3419 		if (copy_to_user(ubuf, buf, len + 1))
3420 			return -EFAULT;
3421 	} else {
3422 		char zero = '\0';
3423 
3424 		if (copy_to_user(ubuf, buf, ulen - 1))
3425 			return -EFAULT;
3426 		if (put_user(zero, ubuf + ulen - 1))
3427 			return -EFAULT;
3428 		return -ENOSPC;
3429 	}
3430 
3431 	return 0;
3432 }
3433 
3434 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3435 					  struct bpf_link_info *info)
3436 {
3437 	struct bpf_raw_tp_link *raw_tp_link =
3438 		container_of(link, struct bpf_raw_tp_link, link);
3439 	char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3440 	const char *tp_name = raw_tp_link->btp->tp->name;
3441 	u32 ulen = info->raw_tracepoint.tp_name_len;
3442 	size_t tp_len = strlen(tp_name);
3443 
3444 	if (!ulen ^ !ubuf)
3445 		return -EINVAL;
3446 
3447 	info->raw_tracepoint.tp_name_len = tp_len + 1;
3448 
3449 	if (!ubuf)
3450 		return 0;
3451 
3452 	return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len);
3453 }
3454 
3455 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3456 	.release = bpf_raw_tp_link_release,
3457 	.dealloc = bpf_raw_tp_link_dealloc,
3458 	.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3459 	.fill_link_info = bpf_raw_tp_link_fill_link_info,
3460 };
3461 
3462 #ifdef CONFIG_PERF_EVENTS
3463 struct bpf_perf_link {
3464 	struct bpf_link link;
3465 	struct file *perf_file;
3466 };
3467 
3468 static void bpf_perf_link_release(struct bpf_link *link)
3469 {
3470 	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3471 	struct perf_event *event = perf_link->perf_file->private_data;
3472 
3473 	perf_event_free_bpf_prog(event);
3474 	fput(perf_link->perf_file);
3475 }
3476 
3477 static void bpf_perf_link_dealloc(struct bpf_link *link)
3478 {
3479 	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3480 
3481 	kfree(perf_link);
3482 }
3483 
3484 static int bpf_perf_link_fill_common(const struct perf_event *event,
3485 				     char __user *uname, u32 ulen,
3486 				     u64 *probe_offset, u64 *probe_addr,
3487 				     u32 *fd_type, unsigned long *missed)
3488 {
3489 	const char *buf;
3490 	u32 prog_id;
3491 	size_t len;
3492 	int err;
3493 
3494 	if (!ulen ^ !uname)
3495 		return -EINVAL;
3496 
3497 	err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
3498 				      probe_offset, probe_addr, missed);
3499 	if (err)
3500 		return err;
3501 	if (!uname)
3502 		return 0;
3503 	if (buf) {
3504 		len = strlen(buf);
3505 		err = bpf_copy_to_user(uname, buf, ulen, len);
3506 		if (err)
3507 			return err;
3508 	} else {
3509 		char zero = '\0';
3510 
3511 		if (put_user(zero, uname))
3512 			return -EFAULT;
3513 	}
3514 	return 0;
3515 }
3516 
3517 #ifdef CONFIG_KPROBE_EVENTS
3518 static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
3519 				     struct bpf_link_info *info)
3520 {
3521 	unsigned long missed;
3522 	char __user *uname;
3523 	u64 addr, offset;
3524 	u32 ulen, type;
3525 	int err;
3526 
3527 	uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
3528 	ulen = info->perf_event.kprobe.name_len;
3529 	err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
3530 					&type, &missed);
3531 	if (err)
3532 		return err;
3533 	if (type == BPF_FD_TYPE_KRETPROBE)
3534 		info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
3535 	else
3536 		info->perf_event.type = BPF_PERF_EVENT_KPROBE;
3537 
3538 	info->perf_event.kprobe.offset = offset;
3539 	info->perf_event.kprobe.missed = missed;
3540 	if (!kallsyms_show_value(current_cred()))
3541 		addr = 0;
3542 	info->perf_event.kprobe.addr = addr;
3543 	return 0;
3544 }
3545 #endif
3546 
3547 #ifdef CONFIG_UPROBE_EVENTS
3548 static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
3549 				     struct bpf_link_info *info)
3550 {
3551 	char __user *uname;
3552 	u64 addr, offset;
3553 	u32 ulen, type;
3554 	int err;
3555 
3556 	uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
3557 	ulen = info->perf_event.uprobe.name_len;
3558 	err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
3559 					&type, NULL);
3560 	if (err)
3561 		return err;
3562 
3563 	if (type == BPF_FD_TYPE_URETPROBE)
3564 		info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
3565 	else
3566 		info->perf_event.type = BPF_PERF_EVENT_UPROBE;
3567 	info->perf_event.uprobe.offset = offset;
3568 	return 0;
3569 }
3570 #endif
3571 
3572 static int bpf_perf_link_fill_probe(const struct perf_event *event,
3573 				    struct bpf_link_info *info)
3574 {
3575 #ifdef CONFIG_KPROBE_EVENTS
3576 	if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
3577 		return bpf_perf_link_fill_kprobe(event, info);
3578 #endif
3579 #ifdef CONFIG_UPROBE_EVENTS
3580 	if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
3581 		return bpf_perf_link_fill_uprobe(event, info);
3582 #endif
3583 	return -EOPNOTSUPP;
3584 }
3585 
3586 static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
3587 					 struct bpf_link_info *info)
3588 {
3589 	char __user *uname;
3590 	u32 ulen;
3591 
3592 	uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
3593 	ulen = info->perf_event.tracepoint.name_len;
3594 	info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
3595 	return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL);
3596 }
3597 
3598 static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
3599 					 struct bpf_link_info *info)
3600 {
3601 	info->perf_event.event.type = event->attr.type;
3602 	info->perf_event.event.config = event->attr.config;
3603 	info->perf_event.type = BPF_PERF_EVENT_EVENT;
3604 	return 0;
3605 }
3606 
3607 static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
3608 					struct bpf_link_info *info)
3609 {
3610 	struct bpf_perf_link *perf_link;
3611 	const struct perf_event *event;
3612 
3613 	perf_link = container_of(link, struct bpf_perf_link, link);
3614 	event = perf_get_event(perf_link->perf_file);
3615 	if (IS_ERR(event))
3616 		return PTR_ERR(event);
3617 
3618 	switch (event->prog->type) {
3619 	case BPF_PROG_TYPE_PERF_EVENT:
3620 		return bpf_perf_link_fill_perf_event(event, info);
3621 	case BPF_PROG_TYPE_TRACEPOINT:
3622 		return bpf_perf_link_fill_tracepoint(event, info);
3623 	case BPF_PROG_TYPE_KPROBE:
3624 		return bpf_perf_link_fill_probe(event, info);
3625 	default:
3626 		return -EOPNOTSUPP;
3627 	}
3628 }
3629 
3630 static const struct bpf_link_ops bpf_perf_link_lops = {
3631 	.release = bpf_perf_link_release,
3632 	.dealloc = bpf_perf_link_dealloc,
3633 	.fill_link_info = bpf_perf_link_fill_link_info,
3634 };
3635 
3636 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3637 {
3638 	struct bpf_link_primer link_primer;
3639 	struct bpf_perf_link *link;
3640 	struct perf_event *event;
3641 	struct file *perf_file;
3642 	int err;
3643 
3644 	if (attr->link_create.flags)
3645 		return -EINVAL;
3646 
3647 	perf_file = perf_event_get(attr->link_create.target_fd);
3648 	if (IS_ERR(perf_file))
3649 		return PTR_ERR(perf_file);
3650 
3651 	link = kzalloc(sizeof(*link), GFP_USER);
3652 	if (!link) {
3653 		err = -ENOMEM;
3654 		goto out_put_file;
3655 	}
3656 	bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog);
3657 	link->perf_file = perf_file;
3658 
3659 	err = bpf_link_prime(&link->link, &link_primer);
3660 	if (err) {
3661 		kfree(link);
3662 		goto out_put_file;
3663 	}
3664 
3665 	event = perf_file->private_data;
3666 	err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
3667 	if (err) {
3668 		bpf_link_cleanup(&link_primer);
3669 		goto out_put_file;
3670 	}
3671 	/* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
3672 	bpf_prog_inc(prog);
3673 
3674 	return bpf_link_settle(&link_primer);
3675 
3676 out_put_file:
3677 	fput(perf_file);
3678 	return err;
3679 }
3680 #else
3681 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3682 {
3683 	return -EOPNOTSUPP;
3684 }
3685 #endif /* CONFIG_PERF_EVENTS */
3686 
3687 static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
3688 				  const char __user *user_tp_name)
3689 {
3690 	struct bpf_link_primer link_primer;
3691 	struct bpf_raw_tp_link *link;
3692 	struct bpf_raw_event_map *btp;
3693 	const char *tp_name;
3694 	char buf[128];
3695 	int err;
3696 
3697 	switch (prog->type) {
3698 	case BPF_PROG_TYPE_TRACING:
3699 	case BPF_PROG_TYPE_EXT:
3700 	case BPF_PROG_TYPE_LSM:
3701 		if (user_tp_name)
3702 			/* The attach point for this category of programs
3703 			 * should be specified via btf_id during program load.
3704 			 */
3705 			return -EINVAL;
3706 		if (prog->type == BPF_PROG_TYPE_TRACING &&
3707 		    prog->expected_attach_type == BPF_TRACE_RAW_TP) {
3708 			tp_name = prog->aux->attach_func_name;
3709 			break;
3710 		}
3711 		return bpf_tracing_prog_attach(prog, 0, 0, 0);
3712 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
3713 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3714 		if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
3715 			return -EFAULT;
3716 		buf[sizeof(buf) - 1] = 0;
3717 		tp_name = buf;
3718 		break;
3719 	default:
3720 		return -EINVAL;
3721 	}
3722 
3723 	btp = bpf_get_raw_tracepoint(tp_name);
3724 	if (!btp)
3725 		return -ENOENT;
3726 
3727 	link = kzalloc(sizeof(*link), GFP_USER);
3728 	if (!link) {
3729 		err = -ENOMEM;
3730 		goto out_put_btp;
3731 	}
3732 	bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
3733 		      &bpf_raw_tp_link_lops, prog);
3734 	link->btp = btp;
3735 
3736 	err = bpf_link_prime(&link->link, &link_primer);
3737 	if (err) {
3738 		kfree(link);
3739 		goto out_put_btp;
3740 	}
3741 
3742 	err = bpf_probe_register(link->btp, prog);
3743 	if (err) {
3744 		bpf_link_cleanup(&link_primer);
3745 		goto out_put_btp;
3746 	}
3747 
3748 	return bpf_link_settle(&link_primer);
3749 
3750 out_put_btp:
3751 	bpf_put_raw_tracepoint(btp);
3752 	return err;
3753 }
3754 
3755 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
3756 
3757 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
3758 {
3759 	struct bpf_prog *prog;
3760 	int fd;
3761 
3762 	if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
3763 		return -EINVAL;
3764 
3765 	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
3766 	if (IS_ERR(prog))
3767 		return PTR_ERR(prog);
3768 
3769 	fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name));
3770 	if (fd < 0)
3771 		bpf_prog_put(prog);
3772 	return fd;
3773 }
3774 
3775 static enum bpf_prog_type
3776 attach_type_to_prog_type(enum bpf_attach_type attach_type)
3777 {
3778 	switch (attach_type) {
3779 	case BPF_CGROUP_INET_INGRESS:
3780 	case BPF_CGROUP_INET_EGRESS:
3781 		return BPF_PROG_TYPE_CGROUP_SKB;
3782 	case BPF_CGROUP_INET_SOCK_CREATE:
3783 	case BPF_CGROUP_INET_SOCK_RELEASE:
3784 	case BPF_CGROUP_INET4_POST_BIND:
3785 	case BPF_CGROUP_INET6_POST_BIND:
3786 		return BPF_PROG_TYPE_CGROUP_SOCK;
3787 	case BPF_CGROUP_INET4_BIND:
3788 	case BPF_CGROUP_INET6_BIND:
3789 	case BPF_CGROUP_INET4_CONNECT:
3790 	case BPF_CGROUP_INET6_CONNECT:
3791 	case BPF_CGROUP_UNIX_CONNECT:
3792 	case BPF_CGROUP_INET4_GETPEERNAME:
3793 	case BPF_CGROUP_INET6_GETPEERNAME:
3794 	case BPF_CGROUP_UNIX_GETPEERNAME:
3795 	case BPF_CGROUP_INET4_GETSOCKNAME:
3796 	case BPF_CGROUP_INET6_GETSOCKNAME:
3797 	case BPF_CGROUP_UNIX_GETSOCKNAME:
3798 	case BPF_CGROUP_UDP4_SENDMSG:
3799 	case BPF_CGROUP_UDP6_SENDMSG:
3800 	case BPF_CGROUP_UNIX_SENDMSG:
3801 	case BPF_CGROUP_UDP4_RECVMSG:
3802 	case BPF_CGROUP_UDP6_RECVMSG:
3803 	case BPF_CGROUP_UNIX_RECVMSG:
3804 		return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
3805 	case BPF_CGROUP_SOCK_OPS:
3806 		return BPF_PROG_TYPE_SOCK_OPS;
3807 	case BPF_CGROUP_DEVICE:
3808 		return BPF_PROG_TYPE_CGROUP_DEVICE;
3809 	case BPF_SK_MSG_VERDICT:
3810 		return BPF_PROG_TYPE_SK_MSG;
3811 	case BPF_SK_SKB_STREAM_PARSER:
3812 	case BPF_SK_SKB_STREAM_VERDICT:
3813 	case BPF_SK_SKB_VERDICT:
3814 		return BPF_PROG_TYPE_SK_SKB;
3815 	case BPF_LIRC_MODE2:
3816 		return BPF_PROG_TYPE_LIRC_MODE2;
3817 	case BPF_FLOW_DISSECTOR:
3818 		return BPF_PROG_TYPE_FLOW_DISSECTOR;
3819 	case BPF_CGROUP_SYSCTL:
3820 		return BPF_PROG_TYPE_CGROUP_SYSCTL;
3821 	case BPF_CGROUP_GETSOCKOPT:
3822 	case BPF_CGROUP_SETSOCKOPT:
3823 		return BPF_PROG_TYPE_CGROUP_SOCKOPT;
3824 	case BPF_TRACE_ITER:
3825 	case BPF_TRACE_RAW_TP:
3826 	case BPF_TRACE_FENTRY:
3827 	case BPF_TRACE_FEXIT:
3828 	case BPF_MODIFY_RETURN:
3829 		return BPF_PROG_TYPE_TRACING;
3830 	case BPF_LSM_MAC:
3831 		return BPF_PROG_TYPE_LSM;
3832 	case BPF_SK_LOOKUP:
3833 		return BPF_PROG_TYPE_SK_LOOKUP;
3834 	case BPF_XDP:
3835 		return BPF_PROG_TYPE_XDP;
3836 	case BPF_LSM_CGROUP:
3837 		return BPF_PROG_TYPE_LSM;
3838 	case BPF_TCX_INGRESS:
3839 	case BPF_TCX_EGRESS:
3840 	case BPF_NETKIT_PRIMARY:
3841 	case BPF_NETKIT_PEER:
3842 		return BPF_PROG_TYPE_SCHED_CLS;
3843 	default:
3844 		return BPF_PROG_TYPE_UNSPEC;
3845 	}
3846 }
3847 
3848 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
3849 					     enum bpf_attach_type attach_type)
3850 {
3851 	enum bpf_prog_type ptype;
3852 
3853 	switch (prog->type) {
3854 	case BPF_PROG_TYPE_CGROUP_SOCK:
3855 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3856 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3857 	case BPF_PROG_TYPE_SK_LOOKUP:
3858 		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
3859 	case BPF_PROG_TYPE_CGROUP_SKB:
3860 		if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN))
3861 			/* cg-skb progs can be loaded by unpriv user.
3862 			 * check permissions at attach time.
3863 			 */
3864 			return -EPERM;
3865 		return prog->enforce_expected_attach_type &&
3866 			prog->expected_attach_type != attach_type ?
3867 			-EINVAL : 0;
3868 	case BPF_PROG_TYPE_EXT:
3869 		return 0;
3870 	case BPF_PROG_TYPE_NETFILTER:
3871 		if (attach_type != BPF_NETFILTER)
3872 			return -EINVAL;
3873 		return 0;
3874 	case BPF_PROG_TYPE_PERF_EVENT:
3875 	case BPF_PROG_TYPE_TRACEPOINT:
3876 		if (attach_type != BPF_PERF_EVENT)
3877 			return -EINVAL;
3878 		return 0;
3879 	case BPF_PROG_TYPE_KPROBE:
3880 		if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
3881 		    attach_type != BPF_TRACE_KPROBE_MULTI)
3882 			return -EINVAL;
3883 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
3884 		    attach_type != BPF_TRACE_UPROBE_MULTI)
3885 			return -EINVAL;
3886 		if (attach_type != BPF_PERF_EVENT &&
3887 		    attach_type != BPF_TRACE_KPROBE_MULTI &&
3888 		    attach_type != BPF_TRACE_UPROBE_MULTI)
3889 			return -EINVAL;
3890 		return 0;
3891 	case BPF_PROG_TYPE_SCHED_CLS:
3892 		if (attach_type != BPF_TCX_INGRESS &&
3893 		    attach_type != BPF_TCX_EGRESS &&
3894 		    attach_type != BPF_NETKIT_PRIMARY &&
3895 		    attach_type != BPF_NETKIT_PEER)
3896 			return -EINVAL;
3897 		return 0;
3898 	default:
3899 		ptype = attach_type_to_prog_type(attach_type);
3900 		if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type)
3901 			return -EINVAL;
3902 		return 0;
3903 	}
3904 }
3905 
3906 #define BPF_PROG_ATTACH_LAST_FIELD expected_revision
3907 
3908 #define BPF_F_ATTACH_MASK_BASE	\
3909 	(BPF_F_ALLOW_OVERRIDE |	\
3910 	 BPF_F_ALLOW_MULTI |	\
3911 	 BPF_F_REPLACE)
3912 
3913 #define BPF_F_ATTACH_MASK_MPROG	\
3914 	(BPF_F_REPLACE |	\
3915 	 BPF_F_BEFORE |		\
3916 	 BPF_F_AFTER |		\
3917 	 BPF_F_ID |		\
3918 	 BPF_F_LINK)
3919 
3920 static int bpf_prog_attach(const union bpf_attr *attr)
3921 {
3922 	enum bpf_prog_type ptype;
3923 	struct bpf_prog *prog;
3924 	int ret;
3925 
3926 	if (CHECK_ATTR(BPF_PROG_ATTACH))
3927 		return -EINVAL;
3928 
3929 	ptype = attach_type_to_prog_type(attr->attach_type);
3930 	if (ptype == BPF_PROG_TYPE_UNSPEC)
3931 		return -EINVAL;
3932 	if (bpf_mprog_supported(ptype)) {
3933 		if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
3934 			return -EINVAL;
3935 	} else {
3936 		if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
3937 			return -EINVAL;
3938 		if (attr->relative_fd ||
3939 		    attr->expected_revision)
3940 			return -EINVAL;
3941 	}
3942 
3943 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3944 	if (IS_ERR(prog))
3945 		return PTR_ERR(prog);
3946 
3947 	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
3948 		bpf_prog_put(prog);
3949 		return -EINVAL;
3950 	}
3951 
3952 	switch (ptype) {
3953 	case BPF_PROG_TYPE_SK_SKB:
3954 	case BPF_PROG_TYPE_SK_MSG:
3955 		ret = sock_map_get_from_fd(attr, prog);
3956 		break;
3957 	case BPF_PROG_TYPE_LIRC_MODE2:
3958 		ret = lirc_prog_attach(attr, prog);
3959 		break;
3960 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3961 		ret = netns_bpf_prog_attach(attr, prog);
3962 		break;
3963 	case BPF_PROG_TYPE_CGROUP_DEVICE:
3964 	case BPF_PROG_TYPE_CGROUP_SKB:
3965 	case BPF_PROG_TYPE_CGROUP_SOCK:
3966 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3967 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3968 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3969 	case BPF_PROG_TYPE_SOCK_OPS:
3970 	case BPF_PROG_TYPE_LSM:
3971 		if (ptype == BPF_PROG_TYPE_LSM &&
3972 		    prog->expected_attach_type != BPF_LSM_CGROUP)
3973 			ret = -EINVAL;
3974 		else
3975 			ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3976 		break;
3977 	case BPF_PROG_TYPE_SCHED_CLS:
3978 		if (attr->attach_type == BPF_TCX_INGRESS ||
3979 		    attr->attach_type == BPF_TCX_EGRESS)
3980 			ret = tcx_prog_attach(attr, prog);
3981 		else
3982 			ret = netkit_prog_attach(attr, prog);
3983 		break;
3984 	default:
3985 		ret = -EINVAL;
3986 	}
3987 
3988 	if (ret)
3989 		bpf_prog_put(prog);
3990 	return ret;
3991 }
3992 
3993 #define BPF_PROG_DETACH_LAST_FIELD expected_revision
3994 
3995 static int bpf_prog_detach(const union bpf_attr *attr)
3996 {
3997 	struct bpf_prog *prog = NULL;
3998 	enum bpf_prog_type ptype;
3999 	int ret;
4000 
4001 	if (CHECK_ATTR(BPF_PROG_DETACH))
4002 		return -EINVAL;
4003 
4004 	ptype = attach_type_to_prog_type(attr->attach_type);
4005 	if (bpf_mprog_supported(ptype)) {
4006 		if (ptype == BPF_PROG_TYPE_UNSPEC)
4007 			return -EINVAL;
4008 		if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
4009 			return -EINVAL;
4010 		if (attr->attach_bpf_fd) {
4011 			prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
4012 			if (IS_ERR(prog))
4013 				return PTR_ERR(prog);
4014 		}
4015 	} else if (attr->attach_flags ||
4016 		   attr->relative_fd ||
4017 		   attr->expected_revision) {
4018 		return -EINVAL;
4019 	}
4020 
4021 	switch (ptype) {
4022 	case BPF_PROG_TYPE_SK_MSG:
4023 	case BPF_PROG_TYPE_SK_SKB:
4024 		ret = sock_map_prog_detach(attr, ptype);
4025 		break;
4026 	case BPF_PROG_TYPE_LIRC_MODE2:
4027 		ret = lirc_prog_detach(attr);
4028 		break;
4029 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4030 		ret = netns_bpf_prog_detach(attr, ptype);
4031 		break;
4032 	case BPF_PROG_TYPE_CGROUP_DEVICE:
4033 	case BPF_PROG_TYPE_CGROUP_SKB:
4034 	case BPF_PROG_TYPE_CGROUP_SOCK:
4035 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4036 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4037 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
4038 	case BPF_PROG_TYPE_SOCK_OPS:
4039 	case BPF_PROG_TYPE_LSM:
4040 		ret = cgroup_bpf_prog_detach(attr, ptype);
4041 		break;
4042 	case BPF_PROG_TYPE_SCHED_CLS:
4043 		if (attr->attach_type == BPF_TCX_INGRESS ||
4044 		    attr->attach_type == BPF_TCX_EGRESS)
4045 			ret = tcx_prog_detach(attr, prog);
4046 		else
4047 			ret = netkit_prog_detach(attr, prog);
4048 		break;
4049 	default:
4050 		ret = -EINVAL;
4051 	}
4052 
4053 	if (prog)
4054 		bpf_prog_put(prog);
4055 	return ret;
4056 }
4057 
4058 #define BPF_PROG_QUERY_LAST_FIELD query.revision
4059 
4060 static int bpf_prog_query(const union bpf_attr *attr,
4061 			  union bpf_attr __user *uattr)
4062 {
4063 	if (!bpf_net_capable())
4064 		return -EPERM;
4065 	if (CHECK_ATTR(BPF_PROG_QUERY))
4066 		return -EINVAL;
4067 	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
4068 		return -EINVAL;
4069 
4070 	switch (attr->query.attach_type) {
4071 	case BPF_CGROUP_INET_INGRESS:
4072 	case BPF_CGROUP_INET_EGRESS:
4073 	case BPF_CGROUP_INET_SOCK_CREATE:
4074 	case BPF_CGROUP_INET_SOCK_RELEASE:
4075 	case BPF_CGROUP_INET4_BIND:
4076 	case BPF_CGROUP_INET6_BIND:
4077 	case BPF_CGROUP_INET4_POST_BIND:
4078 	case BPF_CGROUP_INET6_POST_BIND:
4079 	case BPF_CGROUP_INET4_CONNECT:
4080 	case BPF_CGROUP_INET6_CONNECT:
4081 	case BPF_CGROUP_UNIX_CONNECT:
4082 	case BPF_CGROUP_INET4_GETPEERNAME:
4083 	case BPF_CGROUP_INET6_GETPEERNAME:
4084 	case BPF_CGROUP_UNIX_GETPEERNAME:
4085 	case BPF_CGROUP_INET4_GETSOCKNAME:
4086 	case BPF_CGROUP_INET6_GETSOCKNAME:
4087 	case BPF_CGROUP_UNIX_GETSOCKNAME:
4088 	case BPF_CGROUP_UDP4_SENDMSG:
4089 	case BPF_CGROUP_UDP6_SENDMSG:
4090 	case BPF_CGROUP_UNIX_SENDMSG:
4091 	case BPF_CGROUP_UDP4_RECVMSG:
4092 	case BPF_CGROUP_UDP6_RECVMSG:
4093 	case BPF_CGROUP_UNIX_RECVMSG:
4094 	case BPF_CGROUP_SOCK_OPS:
4095 	case BPF_CGROUP_DEVICE:
4096 	case BPF_CGROUP_SYSCTL:
4097 	case BPF_CGROUP_GETSOCKOPT:
4098 	case BPF_CGROUP_SETSOCKOPT:
4099 	case BPF_LSM_CGROUP:
4100 		return cgroup_bpf_prog_query(attr, uattr);
4101 	case BPF_LIRC_MODE2:
4102 		return lirc_prog_query(attr, uattr);
4103 	case BPF_FLOW_DISSECTOR:
4104 	case BPF_SK_LOOKUP:
4105 		return netns_bpf_prog_query(attr, uattr);
4106 	case BPF_SK_SKB_STREAM_PARSER:
4107 	case BPF_SK_SKB_STREAM_VERDICT:
4108 	case BPF_SK_MSG_VERDICT:
4109 	case BPF_SK_SKB_VERDICT:
4110 		return sock_map_bpf_prog_query(attr, uattr);
4111 	case BPF_TCX_INGRESS:
4112 	case BPF_TCX_EGRESS:
4113 		return tcx_prog_query(attr, uattr);
4114 	case BPF_NETKIT_PRIMARY:
4115 	case BPF_NETKIT_PEER:
4116 		return netkit_prog_query(attr, uattr);
4117 	default:
4118 		return -EINVAL;
4119 	}
4120 }
4121 
4122 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
4123 
4124 static int bpf_prog_test_run(const union bpf_attr *attr,
4125 			     union bpf_attr __user *uattr)
4126 {
4127 	struct bpf_prog *prog;
4128 	int ret = -ENOTSUPP;
4129 
4130 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
4131 		return -EINVAL;
4132 
4133 	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
4134 	    (!attr->test.ctx_size_in && attr->test.ctx_in))
4135 		return -EINVAL;
4136 
4137 	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
4138 	    (!attr->test.ctx_size_out && attr->test.ctx_out))
4139 		return -EINVAL;
4140 
4141 	prog = bpf_prog_get(attr->test.prog_fd);
4142 	if (IS_ERR(prog))
4143 		return PTR_ERR(prog);
4144 
4145 	if (prog->aux->ops->test_run)
4146 		ret = prog->aux->ops->test_run(prog, attr, uattr);
4147 
4148 	bpf_prog_put(prog);
4149 	return ret;
4150 }
4151 
4152 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
4153 
4154 static int bpf_obj_get_next_id(const union bpf_attr *attr,
4155 			       union bpf_attr __user *uattr,
4156 			       struct idr *idr,
4157 			       spinlock_t *lock)
4158 {
4159 	u32 next_id = attr->start_id;
4160 	int err = 0;
4161 
4162 	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
4163 		return -EINVAL;
4164 
4165 	if (!capable(CAP_SYS_ADMIN))
4166 		return -EPERM;
4167 
4168 	next_id++;
4169 	spin_lock_bh(lock);
4170 	if (!idr_get_next(idr, &next_id))
4171 		err = -ENOENT;
4172 	spin_unlock_bh(lock);
4173 
4174 	if (!err)
4175 		err = put_user(next_id, &uattr->next_id);
4176 
4177 	return err;
4178 }
4179 
4180 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
4181 {
4182 	struct bpf_map *map;
4183 
4184 	spin_lock_bh(&map_idr_lock);
4185 again:
4186 	map = idr_get_next(&map_idr, id);
4187 	if (map) {
4188 		map = __bpf_map_inc_not_zero(map, false);
4189 		if (IS_ERR(map)) {
4190 			(*id)++;
4191 			goto again;
4192 		}
4193 	}
4194 	spin_unlock_bh(&map_idr_lock);
4195 
4196 	return map;
4197 }
4198 
4199 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
4200 {
4201 	struct bpf_prog *prog;
4202 
4203 	spin_lock_bh(&prog_idr_lock);
4204 again:
4205 	prog = idr_get_next(&prog_idr, id);
4206 	if (prog) {
4207 		prog = bpf_prog_inc_not_zero(prog);
4208 		if (IS_ERR(prog)) {
4209 			(*id)++;
4210 			goto again;
4211 		}
4212 	}
4213 	spin_unlock_bh(&prog_idr_lock);
4214 
4215 	return prog;
4216 }
4217 
4218 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
4219 
4220 struct bpf_prog *bpf_prog_by_id(u32 id)
4221 {
4222 	struct bpf_prog *prog;
4223 
4224 	if (!id)
4225 		return ERR_PTR(-ENOENT);
4226 
4227 	spin_lock_bh(&prog_idr_lock);
4228 	prog = idr_find(&prog_idr, id);
4229 	if (prog)
4230 		prog = bpf_prog_inc_not_zero(prog);
4231 	else
4232 		prog = ERR_PTR(-ENOENT);
4233 	spin_unlock_bh(&prog_idr_lock);
4234 	return prog;
4235 }
4236 
4237 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
4238 {
4239 	struct bpf_prog *prog;
4240 	u32 id = attr->prog_id;
4241 	int fd;
4242 
4243 	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
4244 		return -EINVAL;
4245 
4246 	if (!capable(CAP_SYS_ADMIN))
4247 		return -EPERM;
4248 
4249 	prog = bpf_prog_by_id(id);
4250 	if (IS_ERR(prog))
4251 		return PTR_ERR(prog);
4252 
4253 	fd = bpf_prog_new_fd(prog);
4254 	if (fd < 0)
4255 		bpf_prog_put(prog);
4256 
4257 	return fd;
4258 }
4259 
4260 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
4261 
4262 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
4263 {
4264 	struct bpf_map *map;
4265 	u32 id = attr->map_id;
4266 	int f_flags;
4267 	int fd;
4268 
4269 	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
4270 	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
4271 		return -EINVAL;
4272 
4273 	if (!capable(CAP_SYS_ADMIN))
4274 		return -EPERM;
4275 
4276 	f_flags = bpf_get_file_flag(attr->open_flags);
4277 	if (f_flags < 0)
4278 		return f_flags;
4279 
4280 	spin_lock_bh(&map_idr_lock);
4281 	map = idr_find(&map_idr, id);
4282 	if (map)
4283 		map = __bpf_map_inc_not_zero(map, true);
4284 	else
4285 		map = ERR_PTR(-ENOENT);
4286 	spin_unlock_bh(&map_idr_lock);
4287 
4288 	if (IS_ERR(map))
4289 		return PTR_ERR(map);
4290 
4291 	fd = bpf_map_new_fd(map, f_flags);
4292 	if (fd < 0)
4293 		bpf_map_put_with_uref(map);
4294 
4295 	return fd;
4296 }
4297 
4298 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
4299 					      unsigned long addr, u32 *off,
4300 					      u32 *type)
4301 {
4302 	const struct bpf_map *map;
4303 	int i;
4304 
4305 	mutex_lock(&prog->aux->used_maps_mutex);
4306 	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
4307 		map = prog->aux->used_maps[i];
4308 		if (map == (void *)addr) {
4309 			*type = BPF_PSEUDO_MAP_FD;
4310 			goto out;
4311 		}
4312 		if (!map->ops->map_direct_value_meta)
4313 			continue;
4314 		if (!map->ops->map_direct_value_meta(map, addr, off)) {
4315 			*type = BPF_PSEUDO_MAP_VALUE;
4316 			goto out;
4317 		}
4318 	}
4319 	map = NULL;
4320 
4321 out:
4322 	mutex_unlock(&prog->aux->used_maps_mutex);
4323 	return map;
4324 }
4325 
4326 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
4327 					      const struct cred *f_cred)
4328 {
4329 	const struct bpf_map *map;
4330 	struct bpf_insn *insns;
4331 	u32 off, type;
4332 	u64 imm;
4333 	u8 code;
4334 	int i;
4335 
4336 	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
4337 			GFP_USER);
4338 	if (!insns)
4339 		return insns;
4340 
4341 	for (i = 0; i < prog->len; i++) {
4342 		code = insns[i].code;
4343 
4344 		if (code == (BPF_JMP | BPF_TAIL_CALL)) {
4345 			insns[i].code = BPF_JMP | BPF_CALL;
4346 			insns[i].imm = BPF_FUNC_tail_call;
4347 			/* fall-through */
4348 		}
4349 		if (code == (BPF_JMP | BPF_CALL) ||
4350 		    code == (BPF_JMP | BPF_CALL_ARGS)) {
4351 			if (code == (BPF_JMP | BPF_CALL_ARGS))
4352 				insns[i].code = BPF_JMP | BPF_CALL;
4353 			if (!bpf_dump_raw_ok(f_cred))
4354 				insns[i].imm = 0;
4355 			continue;
4356 		}
4357 		if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
4358 			insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
4359 			continue;
4360 		}
4361 
4362 		if (code != (BPF_LD | BPF_IMM | BPF_DW))
4363 			continue;
4364 
4365 		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
4366 		map = bpf_map_from_imm(prog, imm, &off, &type);
4367 		if (map) {
4368 			insns[i].src_reg = type;
4369 			insns[i].imm = map->id;
4370 			insns[i + 1].imm = off;
4371 			continue;
4372 		}
4373 	}
4374 
4375 	return insns;
4376 }
4377 
4378 static int set_info_rec_size(struct bpf_prog_info *info)
4379 {
4380 	/*
4381 	 * Ensure info.*_rec_size is the same as kernel expected size
4382 	 *
4383 	 * or
4384 	 *
4385 	 * Only allow zero *_rec_size if both _rec_size and _cnt are
4386 	 * zero.  In this case, the kernel will set the expected
4387 	 * _rec_size back to the info.
4388 	 */
4389 
4390 	if ((info->nr_func_info || info->func_info_rec_size) &&
4391 	    info->func_info_rec_size != sizeof(struct bpf_func_info))
4392 		return -EINVAL;
4393 
4394 	if ((info->nr_line_info || info->line_info_rec_size) &&
4395 	    info->line_info_rec_size != sizeof(struct bpf_line_info))
4396 		return -EINVAL;
4397 
4398 	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
4399 	    info->jited_line_info_rec_size != sizeof(__u64))
4400 		return -EINVAL;
4401 
4402 	info->func_info_rec_size = sizeof(struct bpf_func_info);
4403 	info->line_info_rec_size = sizeof(struct bpf_line_info);
4404 	info->jited_line_info_rec_size = sizeof(__u64);
4405 
4406 	return 0;
4407 }
4408 
4409 static int bpf_prog_get_info_by_fd(struct file *file,
4410 				   struct bpf_prog *prog,
4411 				   const union bpf_attr *attr,
4412 				   union bpf_attr __user *uattr)
4413 {
4414 	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4415 	struct btf *attach_btf = bpf_prog_get_target_btf(prog);
4416 	struct bpf_prog_info info;
4417 	u32 info_len = attr->info.info_len;
4418 	struct bpf_prog_kstats stats;
4419 	char __user *uinsns;
4420 	u32 ulen;
4421 	int err;
4422 
4423 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4424 	if (err)
4425 		return err;
4426 	info_len = min_t(u32, sizeof(info), info_len);
4427 
4428 	memset(&info, 0, sizeof(info));
4429 	if (copy_from_user(&info, uinfo, info_len))
4430 		return -EFAULT;
4431 
4432 	info.type = prog->type;
4433 	info.id = prog->aux->id;
4434 	info.load_time = prog->aux->load_time;
4435 	info.created_by_uid = from_kuid_munged(current_user_ns(),
4436 					       prog->aux->user->uid);
4437 	info.gpl_compatible = prog->gpl_compatible;
4438 
4439 	memcpy(info.tag, prog->tag, sizeof(prog->tag));
4440 	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
4441 
4442 	mutex_lock(&prog->aux->used_maps_mutex);
4443 	ulen = info.nr_map_ids;
4444 	info.nr_map_ids = prog->aux->used_map_cnt;
4445 	ulen = min_t(u32, info.nr_map_ids, ulen);
4446 	if (ulen) {
4447 		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
4448 		u32 i;
4449 
4450 		for (i = 0; i < ulen; i++)
4451 			if (put_user(prog->aux->used_maps[i]->id,
4452 				     &user_map_ids[i])) {
4453 				mutex_unlock(&prog->aux->used_maps_mutex);
4454 				return -EFAULT;
4455 			}
4456 	}
4457 	mutex_unlock(&prog->aux->used_maps_mutex);
4458 
4459 	err = set_info_rec_size(&info);
4460 	if (err)
4461 		return err;
4462 
4463 	bpf_prog_get_stats(prog, &stats);
4464 	info.run_time_ns = stats.nsecs;
4465 	info.run_cnt = stats.cnt;
4466 	info.recursion_misses = stats.misses;
4467 
4468 	info.verified_insns = prog->aux->verified_insns;
4469 
4470 	if (!bpf_capable()) {
4471 		info.jited_prog_len = 0;
4472 		info.xlated_prog_len = 0;
4473 		info.nr_jited_ksyms = 0;
4474 		info.nr_jited_func_lens = 0;
4475 		info.nr_func_info = 0;
4476 		info.nr_line_info = 0;
4477 		info.nr_jited_line_info = 0;
4478 		goto done;
4479 	}
4480 
4481 	ulen = info.xlated_prog_len;
4482 	info.xlated_prog_len = bpf_prog_insn_size(prog);
4483 	if (info.xlated_prog_len && ulen) {
4484 		struct bpf_insn *insns_sanitized;
4485 		bool fault;
4486 
4487 		if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
4488 			info.xlated_prog_insns = 0;
4489 			goto done;
4490 		}
4491 		insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
4492 		if (!insns_sanitized)
4493 			return -ENOMEM;
4494 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
4495 		ulen = min_t(u32, info.xlated_prog_len, ulen);
4496 		fault = copy_to_user(uinsns, insns_sanitized, ulen);
4497 		kfree(insns_sanitized);
4498 		if (fault)
4499 			return -EFAULT;
4500 	}
4501 
4502 	if (bpf_prog_is_offloaded(prog->aux)) {
4503 		err = bpf_prog_offload_info_fill(&info, prog);
4504 		if (err)
4505 			return err;
4506 		goto done;
4507 	}
4508 
4509 	/* NOTE: the following code is supposed to be skipped for offload.
4510 	 * bpf_prog_offload_info_fill() is the place to fill similar fields
4511 	 * for offload.
4512 	 */
4513 	ulen = info.jited_prog_len;
4514 	if (prog->aux->func_cnt) {
4515 		u32 i;
4516 
4517 		info.jited_prog_len = 0;
4518 		for (i = 0; i < prog->aux->func_cnt; i++)
4519 			info.jited_prog_len += prog->aux->func[i]->jited_len;
4520 	} else {
4521 		info.jited_prog_len = prog->jited_len;
4522 	}
4523 
4524 	if (info.jited_prog_len && ulen) {
4525 		if (bpf_dump_raw_ok(file->f_cred)) {
4526 			uinsns = u64_to_user_ptr(info.jited_prog_insns);
4527 			ulen = min_t(u32, info.jited_prog_len, ulen);
4528 
4529 			/* for multi-function programs, copy the JITed
4530 			 * instructions for all the functions
4531 			 */
4532 			if (prog->aux->func_cnt) {
4533 				u32 len, free, i;
4534 				u8 *img;
4535 
4536 				free = ulen;
4537 				for (i = 0; i < prog->aux->func_cnt; i++) {
4538 					len = prog->aux->func[i]->jited_len;
4539 					len = min_t(u32, len, free);
4540 					img = (u8 *) prog->aux->func[i]->bpf_func;
4541 					if (copy_to_user(uinsns, img, len))
4542 						return -EFAULT;
4543 					uinsns += len;
4544 					free -= len;
4545 					if (!free)
4546 						break;
4547 				}
4548 			} else {
4549 				if (copy_to_user(uinsns, prog->bpf_func, ulen))
4550 					return -EFAULT;
4551 			}
4552 		} else {
4553 			info.jited_prog_insns = 0;
4554 		}
4555 	}
4556 
4557 	ulen = info.nr_jited_ksyms;
4558 	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
4559 	if (ulen) {
4560 		if (bpf_dump_raw_ok(file->f_cred)) {
4561 			unsigned long ksym_addr;
4562 			u64 __user *user_ksyms;
4563 			u32 i;
4564 
4565 			/* copy the address of the kernel symbol
4566 			 * corresponding to each function
4567 			 */
4568 			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
4569 			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
4570 			if (prog->aux->func_cnt) {
4571 				for (i = 0; i < ulen; i++) {
4572 					ksym_addr = (unsigned long)
4573 						prog->aux->func[i]->bpf_func;
4574 					if (put_user((u64) ksym_addr,
4575 						     &user_ksyms[i]))
4576 						return -EFAULT;
4577 				}
4578 			} else {
4579 				ksym_addr = (unsigned long) prog->bpf_func;
4580 				if (put_user((u64) ksym_addr, &user_ksyms[0]))
4581 					return -EFAULT;
4582 			}
4583 		} else {
4584 			info.jited_ksyms = 0;
4585 		}
4586 	}
4587 
4588 	ulen = info.nr_jited_func_lens;
4589 	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
4590 	if (ulen) {
4591 		if (bpf_dump_raw_ok(file->f_cred)) {
4592 			u32 __user *user_lens;
4593 			u32 func_len, i;
4594 
4595 			/* copy the JITed image lengths for each function */
4596 			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
4597 			user_lens = u64_to_user_ptr(info.jited_func_lens);
4598 			if (prog->aux->func_cnt) {
4599 				for (i = 0; i < ulen; i++) {
4600 					func_len =
4601 						prog->aux->func[i]->jited_len;
4602 					if (put_user(func_len, &user_lens[i]))
4603 						return -EFAULT;
4604 				}
4605 			} else {
4606 				func_len = prog->jited_len;
4607 				if (put_user(func_len, &user_lens[0]))
4608 					return -EFAULT;
4609 			}
4610 		} else {
4611 			info.jited_func_lens = 0;
4612 		}
4613 	}
4614 
4615 	if (prog->aux->btf)
4616 		info.btf_id = btf_obj_id(prog->aux->btf);
4617 	info.attach_btf_id = prog->aux->attach_btf_id;
4618 	if (attach_btf)
4619 		info.attach_btf_obj_id = btf_obj_id(attach_btf);
4620 
4621 	ulen = info.nr_func_info;
4622 	info.nr_func_info = prog->aux->func_info_cnt;
4623 	if (info.nr_func_info && ulen) {
4624 		char __user *user_finfo;
4625 
4626 		user_finfo = u64_to_user_ptr(info.func_info);
4627 		ulen = min_t(u32, info.nr_func_info, ulen);
4628 		if (copy_to_user(user_finfo, prog->aux->func_info,
4629 				 info.func_info_rec_size * ulen))
4630 			return -EFAULT;
4631 	}
4632 
4633 	ulen = info.nr_line_info;
4634 	info.nr_line_info = prog->aux->nr_linfo;
4635 	if (info.nr_line_info && ulen) {
4636 		__u8 __user *user_linfo;
4637 
4638 		user_linfo = u64_to_user_ptr(info.line_info);
4639 		ulen = min_t(u32, info.nr_line_info, ulen);
4640 		if (copy_to_user(user_linfo, prog->aux->linfo,
4641 				 info.line_info_rec_size * ulen))
4642 			return -EFAULT;
4643 	}
4644 
4645 	ulen = info.nr_jited_line_info;
4646 	if (prog->aux->jited_linfo)
4647 		info.nr_jited_line_info = prog->aux->nr_linfo;
4648 	else
4649 		info.nr_jited_line_info = 0;
4650 	if (info.nr_jited_line_info && ulen) {
4651 		if (bpf_dump_raw_ok(file->f_cred)) {
4652 			unsigned long line_addr;
4653 			__u64 __user *user_linfo;
4654 			u32 i;
4655 
4656 			user_linfo = u64_to_user_ptr(info.jited_line_info);
4657 			ulen = min_t(u32, info.nr_jited_line_info, ulen);
4658 			for (i = 0; i < ulen; i++) {
4659 				line_addr = (unsigned long)prog->aux->jited_linfo[i];
4660 				if (put_user((__u64)line_addr, &user_linfo[i]))
4661 					return -EFAULT;
4662 			}
4663 		} else {
4664 			info.jited_line_info = 0;
4665 		}
4666 	}
4667 
4668 	ulen = info.nr_prog_tags;
4669 	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
4670 	if (ulen) {
4671 		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
4672 		u32 i;
4673 
4674 		user_prog_tags = u64_to_user_ptr(info.prog_tags);
4675 		ulen = min_t(u32, info.nr_prog_tags, ulen);
4676 		if (prog->aux->func_cnt) {
4677 			for (i = 0; i < ulen; i++) {
4678 				if (copy_to_user(user_prog_tags[i],
4679 						 prog->aux->func[i]->tag,
4680 						 BPF_TAG_SIZE))
4681 					return -EFAULT;
4682 			}
4683 		} else {
4684 			if (copy_to_user(user_prog_tags[0],
4685 					 prog->tag, BPF_TAG_SIZE))
4686 				return -EFAULT;
4687 		}
4688 	}
4689 
4690 done:
4691 	if (copy_to_user(uinfo, &info, info_len) ||
4692 	    put_user(info_len, &uattr->info.info_len))
4693 		return -EFAULT;
4694 
4695 	return 0;
4696 }
4697 
4698 static int bpf_map_get_info_by_fd(struct file *file,
4699 				  struct bpf_map *map,
4700 				  const union bpf_attr *attr,
4701 				  union bpf_attr __user *uattr)
4702 {
4703 	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4704 	struct bpf_map_info info;
4705 	u32 info_len = attr->info.info_len;
4706 	int err;
4707 
4708 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4709 	if (err)
4710 		return err;
4711 	info_len = min_t(u32, sizeof(info), info_len);
4712 
4713 	memset(&info, 0, sizeof(info));
4714 	info.type = map->map_type;
4715 	info.id = map->id;
4716 	info.key_size = map->key_size;
4717 	info.value_size = map->value_size;
4718 	info.max_entries = map->max_entries;
4719 	info.map_flags = map->map_flags;
4720 	info.map_extra = map->map_extra;
4721 	memcpy(info.name, map->name, sizeof(map->name));
4722 
4723 	if (map->btf) {
4724 		info.btf_id = btf_obj_id(map->btf);
4725 		info.btf_key_type_id = map->btf_key_type_id;
4726 		info.btf_value_type_id = map->btf_value_type_id;
4727 	}
4728 	info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4729 
4730 	if (bpf_map_is_offloaded(map)) {
4731 		err = bpf_map_offload_info_fill(&info, map);
4732 		if (err)
4733 			return err;
4734 	}
4735 
4736 	if (copy_to_user(uinfo, &info, info_len) ||
4737 	    put_user(info_len, &uattr->info.info_len))
4738 		return -EFAULT;
4739 
4740 	return 0;
4741 }
4742 
4743 static int bpf_btf_get_info_by_fd(struct file *file,
4744 				  struct btf *btf,
4745 				  const union bpf_attr *attr,
4746 				  union bpf_attr __user *uattr)
4747 {
4748 	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4749 	u32 info_len = attr->info.info_len;
4750 	int err;
4751 
4752 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
4753 	if (err)
4754 		return err;
4755 
4756 	return btf_get_info_by_fd(btf, attr, uattr);
4757 }
4758 
4759 static int bpf_link_get_info_by_fd(struct file *file,
4760 				  struct bpf_link *link,
4761 				  const union bpf_attr *attr,
4762 				  union bpf_attr __user *uattr)
4763 {
4764 	struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4765 	struct bpf_link_info info;
4766 	u32 info_len = attr->info.info_len;
4767 	int err;
4768 
4769 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4770 	if (err)
4771 		return err;
4772 	info_len = min_t(u32, sizeof(info), info_len);
4773 
4774 	memset(&info, 0, sizeof(info));
4775 	if (copy_from_user(&info, uinfo, info_len))
4776 		return -EFAULT;
4777 
4778 	info.type = link->type;
4779 	info.id = link->id;
4780 	if (link->prog)
4781 		info.prog_id = link->prog->aux->id;
4782 
4783 	if (link->ops->fill_link_info) {
4784 		err = link->ops->fill_link_info(link, &info);
4785 		if (err)
4786 			return err;
4787 	}
4788 
4789 	if (copy_to_user(uinfo, &info, info_len) ||
4790 	    put_user(info_len, &uattr->info.info_len))
4791 		return -EFAULT;
4792 
4793 	return 0;
4794 }
4795 
4796 
4797 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
4798 
4799 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
4800 				  union bpf_attr __user *uattr)
4801 {
4802 	int ufd = attr->info.bpf_fd;
4803 	struct fd f;
4804 	int err;
4805 
4806 	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
4807 		return -EINVAL;
4808 
4809 	f = fdget(ufd);
4810 	if (!f.file)
4811 		return -EBADFD;
4812 
4813 	if (f.file->f_op == &bpf_prog_fops)
4814 		err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
4815 					      uattr);
4816 	else if (f.file->f_op == &bpf_map_fops)
4817 		err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
4818 					     uattr);
4819 	else if (f.file->f_op == &btf_fops)
4820 		err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
4821 	else if (f.file->f_op == &bpf_link_fops)
4822 		err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
4823 					      attr, uattr);
4824 	else
4825 		err = -EINVAL;
4826 
4827 	fdput(f);
4828 	return err;
4829 }
4830 
4831 #define BPF_BTF_LOAD_LAST_FIELD btf_token_fd
4832 
4833 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
4834 {
4835 	struct bpf_token *token = NULL;
4836 
4837 	if (CHECK_ATTR(BPF_BTF_LOAD))
4838 		return -EINVAL;
4839 
4840 	if (attr->btf_token_fd) {
4841 		token = bpf_token_get_from_fd(attr->btf_token_fd);
4842 		if (IS_ERR(token))
4843 			return PTR_ERR(token);
4844 		if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) {
4845 			bpf_token_put(token);
4846 			token = NULL;
4847 		}
4848 	}
4849 
4850 	if (!bpf_token_capable(token, CAP_BPF)) {
4851 		bpf_token_put(token);
4852 		return -EPERM;
4853 	}
4854 
4855 	bpf_token_put(token);
4856 
4857 	return btf_new_fd(attr, uattr, uattr_size);
4858 }
4859 
4860 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
4861 
4862 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
4863 {
4864 	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
4865 		return -EINVAL;
4866 
4867 	if (!capable(CAP_SYS_ADMIN))
4868 		return -EPERM;
4869 
4870 	return btf_get_fd_by_id(attr->btf_id);
4871 }
4872 
4873 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
4874 				    union bpf_attr __user *uattr,
4875 				    u32 prog_id, u32 fd_type,
4876 				    const char *buf, u64 probe_offset,
4877 				    u64 probe_addr)
4878 {
4879 	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
4880 	u32 len = buf ? strlen(buf) : 0, input_len;
4881 	int err = 0;
4882 
4883 	if (put_user(len, &uattr->task_fd_query.buf_len))
4884 		return -EFAULT;
4885 	input_len = attr->task_fd_query.buf_len;
4886 	if (input_len && ubuf) {
4887 		if (!len) {
4888 			/* nothing to copy, just make ubuf NULL terminated */
4889 			char zero = '\0';
4890 
4891 			if (put_user(zero, ubuf))
4892 				return -EFAULT;
4893 		} else if (input_len >= len + 1) {
4894 			/* ubuf can hold the string with NULL terminator */
4895 			if (copy_to_user(ubuf, buf, len + 1))
4896 				return -EFAULT;
4897 		} else {
4898 			/* ubuf cannot hold the string with NULL terminator,
4899 			 * do a partial copy with NULL terminator.
4900 			 */
4901 			char zero = '\0';
4902 
4903 			err = -ENOSPC;
4904 			if (copy_to_user(ubuf, buf, input_len - 1))
4905 				return -EFAULT;
4906 			if (put_user(zero, ubuf + input_len - 1))
4907 				return -EFAULT;
4908 		}
4909 	}
4910 
4911 	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
4912 	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
4913 	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
4914 	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
4915 		return -EFAULT;
4916 
4917 	return err;
4918 }
4919 
4920 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
4921 
4922 static int bpf_task_fd_query(const union bpf_attr *attr,
4923 			     union bpf_attr __user *uattr)
4924 {
4925 	pid_t pid = attr->task_fd_query.pid;
4926 	u32 fd = attr->task_fd_query.fd;
4927 	const struct perf_event *event;
4928 	struct task_struct *task;
4929 	struct file *file;
4930 	int err;
4931 
4932 	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
4933 		return -EINVAL;
4934 
4935 	if (!capable(CAP_SYS_ADMIN))
4936 		return -EPERM;
4937 
4938 	if (attr->task_fd_query.flags != 0)
4939 		return -EINVAL;
4940 
4941 	rcu_read_lock();
4942 	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
4943 	rcu_read_unlock();
4944 	if (!task)
4945 		return -ENOENT;
4946 
4947 	err = 0;
4948 	file = fget_task(task, fd);
4949 	put_task_struct(task);
4950 	if (!file)
4951 		return -EBADF;
4952 
4953 	if (file->f_op == &bpf_link_fops) {
4954 		struct bpf_link *link = file->private_data;
4955 
4956 		if (link->ops == &bpf_raw_tp_link_lops) {
4957 			struct bpf_raw_tp_link *raw_tp =
4958 				container_of(link, struct bpf_raw_tp_link, link);
4959 			struct bpf_raw_event_map *btp = raw_tp->btp;
4960 
4961 			err = bpf_task_fd_query_copy(attr, uattr,
4962 						     raw_tp->link.prog->aux->id,
4963 						     BPF_FD_TYPE_RAW_TRACEPOINT,
4964 						     btp->tp->name, 0, 0);
4965 			goto put_file;
4966 		}
4967 		goto out_not_supp;
4968 	}
4969 
4970 	event = perf_get_event(file);
4971 	if (!IS_ERR(event)) {
4972 		u64 probe_offset, probe_addr;
4973 		u32 prog_id, fd_type;
4974 		const char *buf;
4975 
4976 		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
4977 					      &buf, &probe_offset,
4978 					      &probe_addr, NULL);
4979 		if (!err)
4980 			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
4981 						     fd_type, buf,
4982 						     probe_offset,
4983 						     probe_addr);
4984 		goto put_file;
4985 	}
4986 
4987 out_not_supp:
4988 	err = -ENOTSUPP;
4989 put_file:
4990 	fput(file);
4991 	return err;
4992 }
4993 
4994 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
4995 
4996 #define BPF_DO_BATCH(fn, ...)			\
4997 	do {					\
4998 		if (!fn) {			\
4999 			err = -ENOTSUPP;	\
5000 			goto err_put;		\
5001 		}				\
5002 		err = fn(__VA_ARGS__);		\
5003 	} while (0)
5004 
5005 static int bpf_map_do_batch(const union bpf_attr *attr,
5006 			    union bpf_attr __user *uattr,
5007 			    int cmd)
5008 {
5009 	bool has_read  = cmd == BPF_MAP_LOOKUP_BATCH ||
5010 			 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
5011 	bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
5012 	struct bpf_map *map;
5013 	int err, ufd;
5014 	struct fd f;
5015 
5016 	if (CHECK_ATTR(BPF_MAP_BATCH))
5017 		return -EINVAL;
5018 
5019 	ufd = attr->batch.map_fd;
5020 	f = fdget(ufd);
5021 	map = __bpf_map_get(f);
5022 	if (IS_ERR(map))
5023 		return PTR_ERR(map);
5024 	if (has_write)
5025 		bpf_map_write_active_inc(map);
5026 	if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
5027 		err = -EPERM;
5028 		goto err_put;
5029 	}
5030 	if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
5031 		err = -EPERM;
5032 		goto err_put;
5033 	}
5034 
5035 	if (cmd == BPF_MAP_LOOKUP_BATCH)
5036 		BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
5037 	else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
5038 		BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
5039 	else if (cmd == BPF_MAP_UPDATE_BATCH)
5040 		BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr);
5041 	else
5042 		BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
5043 err_put:
5044 	if (has_write) {
5045 		maybe_wait_bpf_programs(map);
5046 		bpf_map_write_active_dec(map);
5047 	}
5048 	fdput(f);
5049 	return err;
5050 }
5051 
5052 #define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid
5053 static int link_create(union bpf_attr *attr, bpfptr_t uattr)
5054 {
5055 	struct bpf_prog *prog;
5056 	int ret;
5057 
5058 	if (CHECK_ATTR(BPF_LINK_CREATE))
5059 		return -EINVAL;
5060 
5061 	if (attr->link_create.attach_type == BPF_STRUCT_OPS)
5062 		return bpf_struct_ops_link_create(attr);
5063 
5064 	prog = bpf_prog_get(attr->link_create.prog_fd);
5065 	if (IS_ERR(prog))
5066 		return PTR_ERR(prog);
5067 
5068 	ret = bpf_prog_attach_check_attach_type(prog,
5069 						attr->link_create.attach_type);
5070 	if (ret)
5071 		goto out;
5072 
5073 	switch (prog->type) {
5074 	case BPF_PROG_TYPE_CGROUP_SKB:
5075 	case BPF_PROG_TYPE_CGROUP_SOCK:
5076 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
5077 	case BPF_PROG_TYPE_SOCK_OPS:
5078 	case BPF_PROG_TYPE_CGROUP_DEVICE:
5079 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
5080 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
5081 		ret = cgroup_bpf_link_attach(attr, prog);
5082 		break;
5083 	case BPF_PROG_TYPE_EXT:
5084 		ret = bpf_tracing_prog_attach(prog,
5085 					      attr->link_create.target_fd,
5086 					      attr->link_create.target_btf_id,
5087 					      attr->link_create.tracing.cookie);
5088 		break;
5089 	case BPF_PROG_TYPE_LSM:
5090 	case BPF_PROG_TYPE_TRACING:
5091 		if (attr->link_create.attach_type != prog->expected_attach_type) {
5092 			ret = -EINVAL;
5093 			goto out;
5094 		}
5095 		if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
5096 			ret = bpf_raw_tp_link_attach(prog, NULL);
5097 		else if (prog->expected_attach_type == BPF_TRACE_ITER)
5098 			ret = bpf_iter_link_attach(attr, uattr, prog);
5099 		else if (prog->expected_attach_type == BPF_LSM_CGROUP)
5100 			ret = cgroup_bpf_link_attach(attr, prog);
5101 		else
5102 			ret = bpf_tracing_prog_attach(prog,
5103 						      attr->link_create.target_fd,
5104 						      attr->link_create.target_btf_id,
5105 						      attr->link_create.tracing.cookie);
5106 		break;
5107 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
5108 	case BPF_PROG_TYPE_SK_LOOKUP:
5109 		ret = netns_bpf_link_create(attr, prog);
5110 		break;
5111 #ifdef CONFIG_NET
5112 	case BPF_PROG_TYPE_XDP:
5113 		ret = bpf_xdp_link_attach(attr, prog);
5114 		break;
5115 	case BPF_PROG_TYPE_SCHED_CLS:
5116 		if (attr->link_create.attach_type == BPF_TCX_INGRESS ||
5117 		    attr->link_create.attach_type == BPF_TCX_EGRESS)
5118 			ret = tcx_link_attach(attr, prog);
5119 		else
5120 			ret = netkit_link_attach(attr, prog);
5121 		break;
5122 	case BPF_PROG_TYPE_NETFILTER:
5123 		ret = bpf_nf_link_attach(attr, prog);
5124 		break;
5125 #endif
5126 	case BPF_PROG_TYPE_PERF_EVENT:
5127 	case BPF_PROG_TYPE_TRACEPOINT:
5128 		ret = bpf_perf_link_attach(attr, prog);
5129 		break;
5130 	case BPF_PROG_TYPE_KPROBE:
5131 		if (attr->link_create.attach_type == BPF_PERF_EVENT)
5132 			ret = bpf_perf_link_attach(attr, prog);
5133 		else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI)
5134 			ret = bpf_kprobe_multi_link_attach(attr, prog);
5135 		else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI)
5136 			ret = bpf_uprobe_multi_link_attach(attr, prog);
5137 		break;
5138 	default:
5139 		ret = -EINVAL;
5140 	}
5141 
5142 out:
5143 	if (ret < 0)
5144 		bpf_prog_put(prog);
5145 	return ret;
5146 }
5147 
5148 static int link_update_map(struct bpf_link *link, union bpf_attr *attr)
5149 {
5150 	struct bpf_map *new_map, *old_map = NULL;
5151 	int ret;
5152 
5153 	new_map = bpf_map_get(attr->link_update.new_map_fd);
5154 	if (IS_ERR(new_map))
5155 		return PTR_ERR(new_map);
5156 
5157 	if (attr->link_update.flags & BPF_F_REPLACE) {
5158 		old_map = bpf_map_get(attr->link_update.old_map_fd);
5159 		if (IS_ERR(old_map)) {
5160 			ret = PTR_ERR(old_map);
5161 			goto out_put;
5162 		}
5163 	} else if (attr->link_update.old_map_fd) {
5164 		ret = -EINVAL;
5165 		goto out_put;
5166 	}
5167 
5168 	ret = link->ops->update_map(link, new_map, old_map);
5169 
5170 	if (old_map)
5171 		bpf_map_put(old_map);
5172 out_put:
5173 	bpf_map_put(new_map);
5174 	return ret;
5175 }
5176 
5177 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
5178 
5179 static int link_update(union bpf_attr *attr)
5180 {
5181 	struct bpf_prog *old_prog = NULL, *new_prog;
5182 	struct bpf_link *link;
5183 	u32 flags;
5184 	int ret;
5185 
5186 	if (CHECK_ATTR(BPF_LINK_UPDATE))
5187 		return -EINVAL;
5188 
5189 	flags = attr->link_update.flags;
5190 	if (flags & ~BPF_F_REPLACE)
5191 		return -EINVAL;
5192 
5193 	link = bpf_link_get_from_fd(attr->link_update.link_fd);
5194 	if (IS_ERR(link))
5195 		return PTR_ERR(link);
5196 
5197 	if (link->ops->update_map) {
5198 		ret = link_update_map(link, attr);
5199 		goto out_put_link;
5200 	}
5201 
5202 	new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
5203 	if (IS_ERR(new_prog)) {
5204 		ret = PTR_ERR(new_prog);
5205 		goto out_put_link;
5206 	}
5207 
5208 	if (flags & BPF_F_REPLACE) {
5209 		old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
5210 		if (IS_ERR(old_prog)) {
5211 			ret = PTR_ERR(old_prog);
5212 			old_prog = NULL;
5213 			goto out_put_progs;
5214 		}
5215 	} else if (attr->link_update.old_prog_fd) {
5216 		ret = -EINVAL;
5217 		goto out_put_progs;
5218 	}
5219 
5220 	if (link->ops->update_prog)
5221 		ret = link->ops->update_prog(link, new_prog, old_prog);
5222 	else
5223 		ret = -EINVAL;
5224 
5225 out_put_progs:
5226 	if (old_prog)
5227 		bpf_prog_put(old_prog);
5228 	if (ret)
5229 		bpf_prog_put(new_prog);
5230 out_put_link:
5231 	bpf_link_put_direct(link);
5232 	return ret;
5233 }
5234 
5235 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
5236 
5237 static int link_detach(union bpf_attr *attr)
5238 {
5239 	struct bpf_link *link;
5240 	int ret;
5241 
5242 	if (CHECK_ATTR(BPF_LINK_DETACH))
5243 		return -EINVAL;
5244 
5245 	link = bpf_link_get_from_fd(attr->link_detach.link_fd);
5246 	if (IS_ERR(link))
5247 		return PTR_ERR(link);
5248 
5249 	if (link->ops->detach)
5250 		ret = link->ops->detach(link);
5251 	else
5252 		ret = -EOPNOTSUPP;
5253 
5254 	bpf_link_put_direct(link);
5255 	return ret;
5256 }
5257 
5258 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
5259 {
5260 	return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
5261 }
5262 
5263 struct bpf_link *bpf_link_by_id(u32 id)
5264 {
5265 	struct bpf_link *link;
5266 
5267 	if (!id)
5268 		return ERR_PTR(-ENOENT);
5269 
5270 	spin_lock_bh(&link_idr_lock);
5271 	/* before link is "settled", ID is 0, pretend it doesn't exist yet */
5272 	link = idr_find(&link_idr, id);
5273 	if (link) {
5274 		if (link->id)
5275 			link = bpf_link_inc_not_zero(link);
5276 		else
5277 			link = ERR_PTR(-EAGAIN);
5278 	} else {
5279 		link = ERR_PTR(-ENOENT);
5280 	}
5281 	spin_unlock_bh(&link_idr_lock);
5282 	return link;
5283 }
5284 
5285 struct bpf_link *bpf_link_get_curr_or_next(u32 *id)
5286 {
5287 	struct bpf_link *link;
5288 
5289 	spin_lock_bh(&link_idr_lock);
5290 again:
5291 	link = idr_get_next(&link_idr, id);
5292 	if (link) {
5293 		link = bpf_link_inc_not_zero(link);
5294 		if (IS_ERR(link)) {
5295 			(*id)++;
5296 			goto again;
5297 		}
5298 	}
5299 	spin_unlock_bh(&link_idr_lock);
5300 
5301 	return link;
5302 }
5303 
5304 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
5305 
5306 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
5307 {
5308 	struct bpf_link *link;
5309 	u32 id = attr->link_id;
5310 	int fd;
5311 
5312 	if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
5313 		return -EINVAL;
5314 
5315 	if (!capable(CAP_SYS_ADMIN))
5316 		return -EPERM;
5317 
5318 	link = bpf_link_by_id(id);
5319 	if (IS_ERR(link))
5320 		return PTR_ERR(link);
5321 
5322 	fd = bpf_link_new_fd(link);
5323 	if (fd < 0)
5324 		bpf_link_put_direct(link);
5325 
5326 	return fd;
5327 }
5328 
5329 DEFINE_MUTEX(bpf_stats_enabled_mutex);
5330 
5331 static int bpf_stats_release(struct inode *inode, struct file *file)
5332 {
5333 	mutex_lock(&bpf_stats_enabled_mutex);
5334 	static_key_slow_dec(&bpf_stats_enabled_key.key);
5335 	mutex_unlock(&bpf_stats_enabled_mutex);
5336 	return 0;
5337 }
5338 
5339 static const struct file_operations bpf_stats_fops = {
5340 	.release = bpf_stats_release,
5341 };
5342 
5343 static int bpf_enable_runtime_stats(void)
5344 {
5345 	int fd;
5346 
5347 	mutex_lock(&bpf_stats_enabled_mutex);
5348 
5349 	/* Set a very high limit to avoid overflow */
5350 	if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
5351 		mutex_unlock(&bpf_stats_enabled_mutex);
5352 		return -EBUSY;
5353 	}
5354 
5355 	fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
5356 	if (fd >= 0)
5357 		static_key_slow_inc(&bpf_stats_enabled_key.key);
5358 
5359 	mutex_unlock(&bpf_stats_enabled_mutex);
5360 	return fd;
5361 }
5362 
5363 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
5364 
5365 static int bpf_enable_stats(union bpf_attr *attr)
5366 {
5367 
5368 	if (CHECK_ATTR(BPF_ENABLE_STATS))
5369 		return -EINVAL;
5370 
5371 	if (!capable(CAP_SYS_ADMIN))
5372 		return -EPERM;
5373 
5374 	switch (attr->enable_stats.type) {
5375 	case BPF_STATS_RUN_TIME:
5376 		return bpf_enable_runtime_stats();
5377 	default:
5378 		break;
5379 	}
5380 	return -EINVAL;
5381 }
5382 
5383 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
5384 
5385 static int bpf_iter_create(union bpf_attr *attr)
5386 {
5387 	struct bpf_link *link;
5388 	int err;
5389 
5390 	if (CHECK_ATTR(BPF_ITER_CREATE))
5391 		return -EINVAL;
5392 
5393 	if (attr->iter_create.flags)
5394 		return -EINVAL;
5395 
5396 	link = bpf_link_get_from_fd(attr->iter_create.link_fd);
5397 	if (IS_ERR(link))
5398 		return PTR_ERR(link);
5399 
5400 	err = bpf_iter_new_fd(link);
5401 	bpf_link_put_direct(link);
5402 
5403 	return err;
5404 }
5405 
5406 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
5407 
5408 static int bpf_prog_bind_map(union bpf_attr *attr)
5409 {
5410 	struct bpf_prog *prog;
5411 	struct bpf_map *map;
5412 	struct bpf_map **used_maps_old, **used_maps_new;
5413 	int i, ret = 0;
5414 
5415 	if (CHECK_ATTR(BPF_PROG_BIND_MAP))
5416 		return -EINVAL;
5417 
5418 	if (attr->prog_bind_map.flags)
5419 		return -EINVAL;
5420 
5421 	prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
5422 	if (IS_ERR(prog))
5423 		return PTR_ERR(prog);
5424 
5425 	map = bpf_map_get(attr->prog_bind_map.map_fd);
5426 	if (IS_ERR(map)) {
5427 		ret = PTR_ERR(map);
5428 		goto out_prog_put;
5429 	}
5430 
5431 	mutex_lock(&prog->aux->used_maps_mutex);
5432 
5433 	used_maps_old = prog->aux->used_maps;
5434 
5435 	for (i = 0; i < prog->aux->used_map_cnt; i++)
5436 		if (used_maps_old[i] == map) {
5437 			bpf_map_put(map);
5438 			goto out_unlock;
5439 		}
5440 
5441 	used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
5442 				      sizeof(used_maps_new[0]),
5443 				      GFP_KERNEL);
5444 	if (!used_maps_new) {
5445 		ret = -ENOMEM;
5446 		goto out_unlock;
5447 	}
5448 
5449 	/* The bpf program will not access the bpf map, but for the sake of
5450 	 * simplicity, increase sleepable_refcnt for sleepable program as well.
5451 	 */
5452 	if (prog->aux->sleepable)
5453 		atomic64_inc(&map->sleepable_refcnt);
5454 	memcpy(used_maps_new, used_maps_old,
5455 	       sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
5456 	used_maps_new[prog->aux->used_map_cnt] = map;
5457 
5458 	prog->aux->used_map_cnt++;
5459 	prog->aux->used_maps = used_maps_new;
5460 
5461 	kfree(used_maps_old);
5462 
5463 out_unlock:
5464 	mutex_unlock(&prog->aux->used_maps_mutex);
5465 
5466 	if (ret)
5467 		bpf_map_put(map);
5468 out_prog_put:
5469 	bpf_prog_put(prog);
5470 	return ret;
5471 }
5472 
5473 #define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd
5474 
5475 static int token_create(union bpf_attr *attr)
5476 {
5477 	if (CHECK_ATTR(BPF_TOKEN_CREATE))
5478 		return -EINVAL;
5479 
5480 	/* no flags are supported yet */
5481 	if (attr->token_create.flags)
5482 		return -EINVAL;
5483 
5484 	return bpf_token_create(attr);
5485 }
5486 
5487 static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
5488 {
5489 	union bpf_attr attr;
5490 	int err;
5491 
5492 	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
5493 	if (err)
5494 		return err;
5495 	size = min_t(u32, size, sizeof(attr));
5496 
5497 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
5498 	memset(&attr, 0, sizeof(attr));
5499 	if (copy_from_bpfptr(&attr, uattr, size) != 0)
5500 		return -EFAULT;
5501 
5502 	err = security_bpf(cmd, &attr, size);
5503 	if (err < 0)
5504 		return err;
5505 
5506 	switch (cmd) {
5507 	case BPF_MAP_CREATE:
5508 		err = map_create(&attr);
5509 		break;
5510 	case BPF_MAP_LOOKUP_ELEM:
5511 		err = map_lookup_elem(&attr);
5512 		break;
5513 	case BPF_MAP_UPDATE_ELEM:
5514 		err = map_update_elem(&attr, uattr);
5515 		break;
5516 	case BPF_MAP_DELETE_ELEM:
5517 		err = map_delete_elem(&attr, uattr);
5518 		break;
5519 	case BPF_MAP_GET_NEXT_KEY:
5520 		err = map_get_next_key(&attr);
5521 		break;
5522 	case BPF_MAP_FREEZE:
5523 		err = map_freeze(&attr);
5524 		break;
5525 	case BPF_PROG_LOAD:
5526 		err = bpf_prog_load(&attr, uattr, size);
5527 		break;
5528 	case BPF_OBJ_PIN:
5529 		err = bpf_obj_pin(&attr);
5530 		break;
5531 	case BPF_OBJ_GET:
5532 		err = bpf_obj_get(&attr);
5533 		break;
5534 	case BPF_PROG_ATTACH:
5535 		err = bpf_prog_attach(&attr);
5536 		break;
5537 	case BPF_PROG_DETACH:
5538 		err = bpf_prog_detach(&attr);
5539 		break;
5540 	case BPF_PROG_QUERY:
5541 		err = bpf_prog_query(&attr, uattr.user);
5542 		break;
5543 	case BPF_PROG_TEST_RUN:
5544 		err = bpf_prog_test_run(&attr, uattr.user);
5545 		break;
5546 	case BPF_PROG_GET_NEXT_ID:
5547 		err = bpf_obj_get_next_id(&attr, uattr.user,
5548 					  &prog_idr, &prog_idr_lock);
5549 		break;
5550 	case BPF_MAP_GET_NEXT_ID:
5551 		err = bpf_obj_get_next_id(&attr, uattr.user,
5552 					  &map_idr, &map_idr_lock);
5553 		break;
5554 	case BPF_BTF_GET_NEXT_ID:
5555 		err = bpf_obj_get_next_id(&attr, uattr.user,
5556 					  &btf_idr, &btf_idr_lock);
5557 		break;
5558 	case BPF_PROG_GET_FD_BY_ID:
5559 		err = bpf_prog_get_fd_by_id(&attr);
5560 		break;
5561 	case BPF_MAP_GET_FD_BY_ID:
5562 		err = bpf_map_get_fd_by_id(&attr);
5563 		break;
5564 	case BPF_OBJ_GET_INFO_BY_FD:
5565 		err = bpf_obj_get_info_by_fd(&attr, uattr.user);
5566 		break;
5567 	case BPF_RAW_TRACEPOINT_OPEN:
5568 		err = bpf_raw_tracepoint_open(&attr);
5569 		break;
5570 	case BPF_BTF_LOAD:
5571 		err = bpf_btf_load(&attr, uattr, size);
5572 		break;
5573 	case BPF_BTF_GET_FD_BY_ID:
5574 		err = bpf_btf_get_fd_by_id(&attr);
5575 		break;
5576 	case BPF_TASK_FD_QUERY:
5577 		err = bpf_task_fd_query(&attr, uattr.user);
5578 		break;
5579 	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
5580 		err = map_lookup_and_delete_elem(&attr);
5581 		break;
5582 	case BPF_MAP_LOOKUP_BATCH:
5583 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
5584 		break;
5585 	case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
5586 		err = bpf_map_do_batch(&attr, uattr.user,
5587 				       BPF_MAP_LOOKUP_AND_DELETE_BATCH);
5588 		break;
5589 	case BPF_MAP_UPDATE_BATCH:
5590 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
5591 		break;
5592 	case BPF_MAP_DELETE_BATCH:
5593 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
5594 		break;
5595 	case BPF_LINK_CREATE:
5596 		err = link_create(&attr, uattr);
5597 		break;
5598 	case BPF_LINK_UPDATE:
5599 		err = link_update(&attr);
5600 		break;
5601 	case BPF_LINK_GET_FD_BY_ID:
5602 		err = bpf_link_get_fd_by_id(&attr);
5603 		break;
5604 	case BPF_LINK_GET_NEXT_ID:
5605 		err = bpf_obj_get_next_id(&attr, uattr.user,
5606 					  &link_idr, &link_idr_lock);
5607 		break;
5608 	case BPF_ENABLE_STATS:
5609 		err = bpf_enable_stats(&attr);
5610 		break;
5611 	case BPF_ITER_CREATE:
5612 		err = bpf_iter_create(&attr);
5613 		break;
5614 	case BPF_LINK_DETACH:
5615 		err = link_detach(&attr);
5616 		break;
5617 	case BPF_PROG_BIND_MAP:
5618 		err = bpf_prog_bind_map(&attr);
5619 		break;
5620 	case BPF_TOKEN_CREATE:
5621 		err = token_create(&attr);
5622 		break;
5623 	default:
5624 		err = -EINVAL;
5625 		break;
5626 	}
5627 
5628 	return err;
5629 }
5630 
5631 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
5632 {
5633 	return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
5634 }
5635 
5636 static bool syscall_prog_is_valid_access(int off, int size,
5637 					 enum bpf_access_type type,
5638 					 const struct bpf_prog *prog,
5639 					 struct bpf_insn_access_aux *info)
5640 {
5641 	if (off < 0 || off >= U16_MAX)
5642 		return false;
5643 	if (off % size != 0)
5644 		return false;
5645 	return true;
5646 }
5647 
5648 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
5649 {
5650 	switch (cmd) {
5651 	case BPF_MAP_CREATE:
5652 	case BPF_MAP_DELETE_ELEM:
5653 	case BPF_MAP_UPDATE_ELEM:
5654 	case BPF_MAP_FREEZE:
5655 	case BPF_MAP_GET_FD_BY_ID:
5656 	case BPF_PROG_LOAD:
5657 	case BPF_BTF_LOAD:
5658 	case BPF_LINK_CREATE:
5659 	case BPF_RAW_TRACEPOINT_OPEN:
5660 		break;
5661 	default:
5662 		return -EINVAL;
5663 	}
5664 	return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
5665 }
5666 
5667 
5668 /* To shut up -Wmissing-prototypes.
5669  * This function is used by the kernel light skeleton
5670  * to load bpf programs when modules are loaded or during kernel boot.
5671  * See tools/lib/bpf/skel_internal.h
5672  */
5673 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
5674 
5675 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
5676 {
5677 	struct bpf_prog * __maybe_unused prog;
5678 	struct bpf_tramp_run_ctx __maybe_unused run_ctx;
5679 
5680 	switch (cmd) {
5681 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
5682 	case BPF_PROG_TEST_RUN:
5683 		if (attr->test.data_in || attr->test.data_out ||
5684 		    attr->test.ctx_out || attr->test.duration ||
5685 		    attr->test.repeat || attr->test.flags)
5686 			return -EINVAL;
5687 
5688 		prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
5689 		if (IS_ERR(prog))
5690 			return PTR_ERR(prog);
5691 
5692 		if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
5693 		    attr->test.ctx_size_in > U16_MAX) {
5694 			bpf_prog_put(prog);
5695 			return -EINVAL;
5696 		}
5697 
5698 		run_ctx.bpf_cookie = 0;
5699 		if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
5700 			/* recursion detected */
5701 			__bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
5702 			bpf_prog_put(prog);
5703 			return -EBUSY;
5704 		}
5705 		attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
5706 		__bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
5707 						&run_ctx);
5708 		bpf_prog_put(prog);
5709 		return 0;
5710 #endif
5711 	default:
5712 		return ____bpf_sys_bpf(cmd, attr, size);
5713 	}
5714 }
5715 EXPORT_SYMBOL(kern_sys_bpf);
5716 
5717 static const struct bpf_func_proto bpf_sys_bpf_proto = {
5718 	.func		= bpf_sys_bpf,
5719 	.gpl_only	= false,
5720 	.ret_type	= RET_INTEGER,
5721 	.arg1_type	= ARG_ANYTHING,
5722 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
5723 	.arg3_type	= ARG_CONST_SIZE,
5724 };
5725 
5726 const struct bpf_func_proto * __weak
5727 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5728 {
5729 	return bpf_base_func_proto(func_id, prog);
5730 }
5731 
5732 BPF_CALL_1(bpf_sys_close, u32, fd)
5733 {
5734 	/* When bpf program calls this helper there should not be
5735 	 * an fdget() without matching completed fdput().
5736 	 * This helper is allowed in the following callchain only:
5737 	 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
5738 	 */
5739 	return close_fd(fd);
5740 }
5741 
5742 static const struct bpf_func_proto bpf_sys_close_proto = {
5743 	.func		= bpf_sys_close,
5744 	.gpl_only	= false,
5745 	.ret_type	= RET_INTEGER,
5746 	.arg1_type	= ARG_ANYTHING,
5747 };
5748 
5749 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
5750 {
5751 	if (flags)
5752 		return -EINVAL;
5753 
5754 	if (name_sz <= 1 || name[name_sz - 1])
5755 		return -EINVAL;
5756 
5757 	if (!bpf_dump_raw_ok(current_cred()))
5758 		return -EPERM;
5759 
5760 	*res = kallsyms_lookup_name(name);
5761 	return *res ? 0 : -ENOENT;
5762 }
5763 
5764 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
5765 	.func		= bpf_kallsyms_lookup_name,
5766 	.gpl_only	= false,
5767 	.ret_type	= RET_INTEGER,
5768 	.arg1_type	= ARG_PTR_TO_MEM,
5769 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
5770 	.arg3_type	= ARG_ANYTHING,
5771 	.arg4_type	= ARG_PTR_TO_LONG,
5772 };
5773 
5774 static const struct bpf_func_proto *
5775 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5776 {
5777 	switch (func_id) {
5778 	case BPF_FUNC_sys_bpf:
5779 		return !bpf_token_capable(prog->aux->token, CAP_PERFMON)
5780 		       ? NULL : &bpf_sys_bpf_proto;
5781 	case BPF_FUNC_btf_find_by_name_kind:
5782 		return &bpf_btf_find_by_name_kind_proto;
5783 	case BPF_FUNC_sys_close:
5784 		return &bpf_sys_close_proto;
5785 	case BPF_FUNC_kallsyms_lookup_name:
5786 		return &bpf_kallsyms_lookup_name_proto;
5787 	default:
5788 		return tracing_prog_func_proto(func_id, prog);
5789 	}
5790 }
5791 
5792 const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
5793 	.get_func_proto  = syscall_prog_func_proto,
5794 	.is_valid_access = syscall_prog_is_valid_access,
5795 };
5796 
5797 const struct bpf_prog_ops bpf_syscall_prog_ops = {
5798 	.test_run = bpf_prog_test_run_syscall,
5799 };
5800 
5801 #ifdef CONFIG_SYSCTL
5802 static int bpf_stats_handler(struct ctl_table *table, int write,
5803 			     void *buffer, size_t *lenp, loff_t *ppos)
5804 {
5805 	struct static_key *key = (struct static_key *)table->data;
5806 	static int saved_val;
5807 	int val, ret;
5808 	struct ctl_table tmp = {
5809 		.data   = &val,
5810 		.maxlen = sizeof(val),
5811 		.mode   = table->mode,
5812 		.extra1 = SYSCTL_ZERO,
5813 		.extra2 = SYSCTL_ONE,
5814 	};
5815 
5816 	if (write && !capable(CAP_SYS_ADMIN))
5817 		return -EPERM;
5818 
5819 	mutex_lock(&bpf_stats_enabled_mutex);
5820 	val = saved_val;
5821 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5822 	if (write && !ret && val != saved_val) {
5823 		if (val)
5824 			static_key_slow_inc(key);
5825 		else
5826 			static_key_slow_dec(key);
5827 		saved_val = val;
5828 	}
5829 	mutex_unlock(&bpf_stats_enabled_mutex);
5830 	return ret;
5831 }
5832 
5833 void __weak unpriv_ebpf_notify(int new_state)
5834 {
5835 }
5836 
5837 static int bpf_unpriv_handler(struct ctl_table *table, int write,
5838 			      void *buffer, size_t *lenp, loff_t *ppos)
5839 {
5840 	int ret, unpriv_enable = *(int *)table->data;
5841 	bool locked_state = unpriv_enable == 1;
5842 	struct ctl_table tmp = *table;
5843 
5844 	if (write && !capable(CAP_SYS_ADMIN))
5845 		return -EPERM;
5846 
5847 	tmp.data = &unpriv_enable;
5848 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5849 	if (write && !ret) {
5850 		if (locked_state && unpriv_enable != 1)
5851 			return -EPERM;
5852 		*(int *)table->data = unpriv_enable;
5853 	}
5854 
5855 	if (write)
5856 		unpriv_ebpf_notify(unpriv_enable);
5857 
5858 	return ret;
5859 }
5860 
5861 static struct ctl_table bpf_syscall_table[] = {
5862 	{
5863 		.procname	= "unprivileged_bpf_disabled",
5864 		.data		= &sysctl_unprivileged_bpf_disabled,
5865 		.maxlen		= sizeof(sysctl_unprivileged_bpf_disabled),
5866 		.mode		= 0644,
5867 		.proc_handler	= bpf_unpriv_handler,
5868 		.extra1		= SYSCTL_ZERO,
5869 		.extra2		= SYSCTL_TWO,
5870 	},
5871 	{
5872 		.procname	= "bpf_stats_enabled",
5873 		.data		= &bpf_stats_enabled_key.key,
5874 		.mode		= 0644,
5875 		.proc_handler	= bpf_stats_handler,
5876 	},
5877 	{ }
5878 };
5879 
5880 static int __init bpf_syscall_sysctl_init(void)
5881 {
5882 	register_sysctl_init("kernel", bpf_syscall_table);
5883 	return 0;
5884 }
5885 late_initcall(bpf_syscall_sysctl_init);
5886 #endif /* CONFIG_SYSCTL */
5887