xref: /linux/kernel/bpf/syscall.c (revision a7d22ca2a483d6c69c0791954447464297315ffa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf_trace.h>
6 #include <linux/bpf_lirc.h>
7 #include <linux/btf.h>
8 #include <linux/syscalls.h>
9 #include <linux/slab.h>
10 #include <linux/sched/signal.h>
11 #include <linux/vmalloc.h>
12 #include <linux/mmzone.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/fdtable.h>
15 #include <linux/file.h>
16 #include <linux/fs.h>
17 #include <linux/license.h>
18 #include <linux/filter.h>
19 #include <linux/version.h>
20 #include <linux/kernel.h>
21 #include <linux/idr.h>
22 #include <linux/cred.h>
23 #include <linux/timekeeping.h>
24 #include <linux/ctype.h>
25 #include <linux/nospec.h>
26 #include <linux/audit.h>
27 #include <uapi/linux/btf.h>
28 
29 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
30 			  (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
31 			  (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
32 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
33 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
34 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
35 			IS_FD_HASH(map))
36 
37 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
38 
39 DEFINE_PER_CPU(int, bpf_prog_active);
40 static DEFINE_IDR(prog_idr);
41 static DEFINE_SPINLOCK(prog_idr_lock);
42 static DEFINE_IDR(map_idr);
43 static DEFINE_SPINLOCK(map_idr_lock);
44 
45 int sysctl_unprivileged_bpf_disabled __read_mostly;
46 
47 static const struct bpf_map_ops * const bpf_map_types[] = {
48 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
49 #define BPF_MAP_TYPE(_id, _ops) \
50 	[_id] = &_ops,
51 #include <linux/bpf_types.h>
52 #undef BPF_PROG_TYPE
53 #undef BPF_MAP_TYPE
54 };
55 
56 /*
57  * If we're handed a bigger struct than we know of, ensure all the unknown bits
58  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
59  * we don't know about yet.
60  *
61  * There is a ToCToU between this function call and the following
62  * copy_from_user() call. However, this is not a concern since this function is
63  * meant to be a future-proofing of bits.
64  */
65 int bpf_check_uarg_tail_zero(void __user *uaddr,
66 			     size_t expected_size,
67 			     size_t actual_size)
68 {
69 	unsigned char __user *addr;
70 	unsigned char __user *end;
71 	unsigned char val;
72 	int err;
73 
74 	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
75 		return -E2BIG;
76 
77 	if (unlikely(!access_ok(uaddr, actual_size)))
78 		return -EFAULT;
79 
80 	if (actual_size <= expected_size)
81 		return 0;
82 
83 	addr = uaddr + expected_size;
84 	end  = uaddr + actual_size;
85 
86 	for (; addr < end; addr++) {
87 		err = get_user(val, addr);
88 		if (err)
89 			return err;
90 		if (val)
91 			return -E2BIG;
92 	}
93 
94 	return 0;
95 }
96 
97 const struct bpf_map_ops bpf_map_offload_ops = {
98 	.map_alloc = bpf_map_offload_map_alloc,
99 	.map_free = bpf_map_offload_map_free,
100 	.map_check_btf = map_check_no_btf,
101 };
102 
103 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
104 {
105 	const struct bpf_map_ops *ops;
106 	u32 type = attr->map_type;
107 	struct bpf_map *map;
108 	int err;
109 
110 	if (type >= ARRAY_SIZE(bpf_map_types))
111 		return ERR_PTR(-EINVAL);
112 	type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
113 	ops = bpf_map_types[type];
114 	if (!ops)
115 		return ERR_PTR(-EINVAL);
116 
117 	if (ops->map_alloc_check) {
118 		err = ops->map_alloc_check(attr);
119 		if (err)
120 			return ERR_PTR(err);
121 	}
122 	if (attr->map_ifindex)
123 		ops = &bpf_map_offload_ops;
124 	map = ops->map_alloc(attr);
125 	if (IS_ERR(map))
126 		return map;
127 	map->ops = ops;
128 	map->map_type = type;
129 	return map;
130 }
131 
132 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
133 {
134 	/* We really just want to fail instead of triggering OOM killer
135 	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
136 	 * which is used for lower order allocation requests.
137 	 *
138 	 * It has been observed that higher order allocation requests done by
139 	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
140 	 * to reclaim memory from the page cache, thus we set
141 	 * __GFP_RETRY_MAYFAIL to avoid such situations.
142 	 */
143 
144 	const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
145 	void *area;
146 
147 	if (size >= SIZE_MAX)
148 		return NULL;
149 
150 	/* kmalloc()'ed memory can't be mmap()'ed */
151 	if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
152 		area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
153 				    numa_node);
154 		if (area != NULL)
155 			return area;
156 	}
157 	if (mmapable) {
158 		BUG_ON(!PAGE_ALIGNED(size));
159 		return vmalloc_user_node_flags(size, numa_node, GFP_KERNEL |
160 					       __GFP_RETRY_MAYFAIL | flags);
161 	}
162 	return __vmalloc_node_flags_caller(size, numa_node,
163 					   GFP_KERNEL | __GFP_RETRY_MAYFAIL |
164 					   flags, __builtin_return_address(0));
165 }
166 
167 void *bpf_map_area_alloc(u64 size, int numa_node)
168 {
169 	return __bpf_map_area_alloc(size, numa_node, false);
170 }
171 
172 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
173 {
174 	return __bpf_map_area_alloc(size, numa_node, true);
175 }
176 
177 void bpf_map_area_free(void *area)
178 {
179 	kvfree(area);
180 }
181 
182 static u32 bpf_map_flags_retain_permanent(u32 flags)
183 {
184 	/* Some map creation flags are not tied to the map object but
185 	 * rather to the map fd instead, so they have no meaning upon
186 	 * map object inspection since multiple file descriptors with
187 	 * different (access) properties can exist here. Thus, given
188 	 * this has zero meaning for the map itself, lets clear these
189 	 * from here.
190 	 */
191 	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
192 }
193 
194 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
195 {
196 	map->map_type = attr->map_type;
197 	map->key_size = attr->key_size;
198 	map->value_size = attr->value_size;
199 	map->max_entries = attr->max_entries;
200 	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
201 	map->numa_node = bpf_map_attr_numa_node(attr);
202 }
203 
204 static int bpf_charge_memlock(struct user_struct *user, u32 pages)
205 {
206 	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
207 
208 	if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
209 		atomic_long_sub(pages, &user->locked_vm);
210 		return -EPERM;
211 	}
212 	return 0;
213 }
214 
215 static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
216 {
217 	if (user)
218 		atomic_long_sub(pages, &user->locked_vm);
219 }
220 
221 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
222 {
223 	u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
224 	struct user_struct *user;
225 	int ret;
226 
227 	if (size >= U32_MAX - PAGE_SIZE)
228 		return -E2BIG;
229 
230 	user = get_current_user();
231 	ret = bpf_charge_memlock(user, pages);
232 	if (ret) {
233 		free_uid(user);
234 		return ret;
235 	}
236 
237 	mem->pages = pages;
238 	mem->user = user;
239 
240 	return 0;
241 }
242 
243 void bpf_map_charge_finish(struct bpf_map_memory *mem)
244 {
245 	bpf_uncharge_memlock(mem->user, mem->pages);
246 	free_uid(mem->user);
247 }
248 
249 void bpf_map_charge_move(struct bpf_map_memory *dst,
250 			 struct bpf_map_memory *src)
251 {
252 	*dst = *src;
253 
254 	/* Make sure src will not be used for the redundant uncharging. */
255 	memset(src, 0, sizeof(struct bpf_map_memory));
256 }
257 
258 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
259 {
260 	int ret;
261 
262 	ret = bpf_charge_memlock(map->memory.user, pages);
263 	if (ret)
264 		return ret;
265 	map->memory.pages += pages;
266 	return ret;
267 }
268 
269 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
270 {
271 	bpf_uncharge_memlock(map->memory.user, pages);
272 	map->memory.pages -= pages;
273 }
274 
275 static int bpf_map_alloc_id(struct bpf_map *map)
276 {
277 	int id;
278 
279 	idr_preload(GFP_KERNEL);
280 	spin_lock_bh(&map_idr_lock);
281 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
282 	if (id > 0)
283 		map->id = id;
284 	spin_unlock_bh(&map_idr_lock);
285 	idr_preload_end();
286 
287 	if (WARN_ON_ONCE(!id))
288 		return -ENOSPC;
289 
290 	return id > 0 ? 0 : id;
291 }
292 
293 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
294 {
295 	unsigned long flags;
296 
297 	/* Offloaded maps are removed from the IDR store when their device
298 	 * disappears - even if someone holds an fd to them they are unusable,
299 	 * the memory is gone, all ops will fail; they are simply waiting for
300 	 * refcnt to drop to be freed.
301 	 */
302 	if (!map->id)
303 		return;
304 
305 	if (do_idr_lock)
306 		spin_lock_irqsave(&map_idr_lock, flags);
307 	else
308 		__acquire(&map_idr_lock);
309 
310 	idr_remove(&map_idr, map->id);
311 	map->id = 0;
312 
313 	if (do_idr_lock)
314 		spin_unlock_irqrestore(&map_idr_lock, flags);
315 	else
316 		__release(&map_idr_lock);
317 }
318 
319 /* called from workqueue */
320 static void bpf_map_free_deferred(struct work_struct *work)
321 {
322 	struct bpf_map *map = container_of(work, struct bpf_map, work);
323 	struct bpf_map_memory mem;
324 
325 	bpf_map_charge_move(&mem, &map->memory);
326 	security_bpf_map_free(map);
327 	/* implementation dependent freeing */
328 	map->ops->map_free(map);
329 	bpf_map_charge_finish(&mem);
330 }
331 
332 static void bpf_map_put_uref(struct bpf_map *map)
333 {
334 	if (atomic64_dec_and_test(&map->usercnt)) {
335 		if (map->ops->map_release_uref)
336 			map->ops->map_release_uref(map);
337 	}
338 }
339 
340 /* decrement map refcnt and schedule it for freeing via workqueue
341  * (unrelying map implementation ops->map_free() might sleep)
342  */
343 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
344 {
345 	if (atomic64_dec_and_test(&map->refcnt)) {
346 		/* bpf_map_free_id() must be called first */
347 		bpf_map_free_id(map, do_idr_lock);
348 		btf_put(map->btf);
349 		INIT_WORK(&map->work, bpf_map_free_deferred);
350 		schedule_work(&map->work);
351 	}
352 }
353 
354 void bpf_map_put(struct bpf_map *map)
355 {
356 	__bpf_map_put(map, true);
357 }
358 EXPORT_SYMBOL_GPL(bpf_map_put);
359 
360 void bpf_map_put_with_uref(struct bpf_map *map)
361 {
362 	bpf_map_put_uref(map);
363 	bpf_map_put(map);
364 }
365 
366 static int bpf_map_release(struct inode *inode, struct file *filp)
367 {
368 	struct bpf_map *map = filp->private_data;
369 
370 	if (map->ops->map_release)
371 		map->ops->map_release(map, filp);
372 
373 	bpf_map_put_with_uref(map);
374 	return 0;
375 }
376 
377 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
378 {
379 	fmode_t mode = f.file->f_mode;
380 
381 	/* Our file permissions may have been overridden by global
382 	 * map permissions facing syscall side.
383 	 */
384 	if (READ_ONCE(map->frozen))
385 		mode &= ~FMODE_CAN_WRITE;
386 	return mode;
387 }
388 
389 #ifdef CONFIG_PROC_FS
390 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
391 {
392 	const struct bpf_map *map = filp->private_data;
393 	const struct bpf_array *array;
394 	u32 type = 0, jited = 0;
395 
396 	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
397 		array = container_of(map, struct bpf_array, map);
398 		type  = array->aux->type;
399 		jited = array->aux->jited;
400 	}
401 
402 	seq_printf(m,
403 		   "map_type:\t%u\n"
404 		   "key_size:\t%u\n"
405 		   "value_size:\t%u\n"
406 		   "max_entries:\t%u\n"
407 		   "map_flags:\t%#x\n"
408 		   "memlock:\t%llu\n"
409 		   "map_id:\t%u\n"
410 		   "frozen:\t%u\n",
411 		   map->map_type,
412 		   map->key_size,
413 		   map->value_size,
414 		   map->max_entries,
415 		   map->map_flags,
416 		   map->memory.pages * 1ULL << PAGE_SHIFT,
417 		   map->id,
418 		   READ_ONCE(map->frozen));
419 	if (type) {
420 		seq_printf(m, "owner_prog_type:\t%u\n", type);
421 		seq_printf(m, "owner_jited:\t%u\n", jited);
422 	}
423 }
424 #endif
425 
426 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
427 			      loff_t *ppos)
428 {
429 	/* We need this handler such that alloc_file() enables
430 	 * f_mode with FMODE_CAN_READ.
431 	 */
432 	return -EINVAL;
433 }
434 
435 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
436 			       size_t siz, loff_t *ppos)
437 {
438 	/* We need this handler such that alloc_file() enables
439 	 * f_mode with FMODE_CAN_WRITE.
440 	 */
441 	return -EINVAL;
442 }
443 
444 /* called for any extra memory-mapped regions (except initial) */
445 static void bpf_map_mmap_open(struct vm_area_struct *vma)
446 {
447 	struct bpf_map *map = vma->vm_file->private_data;
448 
449 	bpf_map_inc_with_uref(map);
450 
451 	if (vma->vm_flags & VM_WRITE) {
452 		mutex_lock(&map->freeze_mutex);
453 		map->writecnt++;
454 		mutex_unlock(&map->freeze_mutex);
455 	}
456 }
457 
458 /* called for all unmapped memory region (including initial) */
459 static void bpf_map_mmap_close(struct vm_area_struct *vma)
460 {
461 	struct bpf_map *map = vma->vm_file->private_data;
462 
463 	if (vma->vm_flags & VM_WRITE) {
464 		mutex_lock(&map->freeze_mutex);
465 		map->writecnt--;
466 		mutex_unlock(&map->freeze_mutex);
467 	}
468 
469 	bpf_map_put_with_uref(map);
470 }
471 
472 static const struct vm_operations_struct bpf_map_default_vmops = {
473 	.open		= bpf_map_mmap_open,
474 	.close		= bpf_map_mmap_close,
475 };
476 
477 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
478 {
479 	struct bpf_map *map = filp->private_data;
480 	int err;
481 
482 	if (!map->ops->map_mmap || map_value_has_spin_lock(map))
483 		return -ENOTSUPP;
484 
485 	if (!(vma->vm_flags & VM_SHARED))
486 		return -EINVAL;
487 
488 	mutex_lock(&map->freeze_mutex);
489 
490 	if ((vma->vm_flags & VM_WRITE) && map->frozen) {
491 		err = -EPERM;
492 		goto out;
493 	}
494 
495 	/* set default open/close callbacks */
496 	vma->vm_ops = &bpf_map_default_vmops;
497 	vma->vm_private_data = map;
498 
499 	err = map->ops->map_mmap(map, vma);
500 	if (err)
501 		goto out;
502 
503 	bpf_map_inc_with_uref(map);
504 
505 	if (vma->vm_flags & VM_WRITE)
506 		map->writecnt++;
507 out:
508 	mutex_unlock(&map->freeze_mutex);
509 	return err;
510 }
511 
512 const struct file_operations bpf_map_fops = {
513 #ifdef CONFIG_PROC_FS
514 	.show_fdinfo	= bpf_map_show_fdinfo,
515 #endif
516 	.release	= bpf_map_release,
517 	.read		= bpf_dummy_read,
518 	.write		= bpf_dummy_write,
519 	.mmap		= bpf_map_mmap,
520 };
521 
522 int bpf_map_new_fd(struct bpf_map *map, int flags)
523 {
524 	int ret;
525 
526 	ret = security_bpf_map(map, OPEN_FMODE(flags));
527 	if (ret < 0)
528 		return ret;
529 
530 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
531 				flags | O_CLOEXEC);
532 }
533 
534 int bpf_get_file_flag(int flags)
535 {
536 	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
537 		return -EINVAL;
538 	if (flags & BPF_F_RDONLY)
539 		return O_RDONLY;
540 	if (flags & BPF_F_WRONLY)
541 		return O_WRONLY;
542 	return O_RDWR;
543 }
544 
545 /* helper macro to check that unused fields 'union bpf_attr' are zero */
546 #define CHECK_ATTR(CMD) \
547 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
548 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
549 		   sizeof(*attr) - \
550 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
551 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
552 
553 /* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes.
554  * Return 0 on success and < 0 on error.
555  */
556 static int bpf_obj_name_cpy(char *dst, const char *src)
557 {
558 	const char *end = src + BPF_OBJ_NAME_LEN;
559 
560 	memset(dst, 0, BPF_OBJ_NAME_LEN);
561 	/* Copy all isalnum(), '_' and '.' chars. */
562 	while (src < end && *src) {
563 		if (!isalnum(*src) &&
564 		    *src != '_' && *src != '.')
565 			return -EINVAL;
566 		*dst++ = *src++;
567 	}
568 
569 	/* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */
570 	if (src == end)
571 		return -EINVAL;
572 
573 	return 0;
574 }
575 
576 int map_check_no_btf(const struct bpf_map *map,
577 		     const struct btf *btf,
578 		     const struct btf_type *key_type,
579 		     const struct btf_type *value_type)
580 {
581 	return -ENOTSUPP;
582 }
583 
584 static int map_check_btf(struct bpf_map *map, const struct btf *btf,
585 			 u32 btf_key_id, u32 btf_value_id)
586 {
587 	const struct btf_type *key_type, *value_type;
588 	u32 key_size, value_size;
589 	int ret = 0;
590 
591 	/* Some maps allow key to be unspecified. */
592 	if (btf_key_id) {
593 		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
594 		if (!key_type || key_size != map->key_size)
595 			return -EINVAL;
596 	} else {
597 		key_type = btf_type_by_id(btf, 0);
598 		if (!map->ops->map_check_btf)
599 			return -EINVAL;
600 	}
601 
602 	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
603 	if (!value_type || value_size != map->value_size)
604 		return -EINVAL;
605 
606 	map->spin_lock_off = btf_find_spin_lock(btf, value_type);
607 
608 	if (map_value_has_spin_lock(map)) {
609 		if (map->map_flags & BPF_F_RDONLY_PROG)
610 			return -EACCES;
611 		if (map->map_type != BPF_MAP_TYPE_HASH &&
612 		    map->map_type != BPF_MAP_TYPE_ARRAY &&
613 		    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
614 		    map->map_type != BPF_MAP_TYPE_SK_STORAGE)
615 			return -ENOTSUPP;
616 		if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
617 		    map->value_size) {
618 			WARN_ONCE(1,
619 				  "verifier bug spin_lock_off %d value_size %d\n",
620 				  map->spin_lock_off, map->value_size);
621 			return -EFAULT;
622 		}
623 	}
624 
625 	if (map->ops->map_check_btf)
626 		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
627 
628 	return ret;
629 }
630 
631 #define BPF_MAP_CREATE_LAST_FIELD btf_value_type_id
632 /* called via syscall */
633 static int map_create(union bpf_attr *attr)
634 {
635 	int numa_node = bpf_map_attr_numa_node(attr);
636 	struct bpf_map_memory mem;
637 	struct bpf_map *map;
638 	int f_flags;
639 	int err;
640 
641 	err = CHECK_ATTR(BPF_MAP_CREATE);
642 	if (err)
643 		return -EINVAL;
644 
645 	f_flags = bpf_get_file_flag(attr->map_flags);
646 	if (f_flags < 0)
647 		return f_flags;
648 
649 	if (numa_node != NUMA_NO_NODE &&
650 	    ((unsigned int)numa_node >= nr_node_ids ||
651 	     !node_online(numa_node)))
652 		return -EINVAL;
653 
654 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
655 	map = find_and_alloc_map(attr);
656 	if (IS_ERR(map))
657 		return PTR_ERR(map);
658 
659 	err = bpf_obj_name_cpy(map->name, attr->map_name);
660 	if (err)
661 		goto free_map;
662 
663 	atomic64_set(&map->refcnt, 1);
664 	atomic64_set(&map->usercnt, 1);
665 	mutex_init(&map->freeze_mutex);
666 
667 	if (attr->btf_key_type_id || attr->btf_value_type_id) {
668 		struct btf *btf;
669 
670 		if (!attr->btf_value_type_id) {
671 			err = -EINVAL;
672 			goto free_map;
673 		}
674 
675 		btf = btf_get_by_fd(attr->btf_fd);
676 		if (IS_ERR(btf)) {
677 			err = PTR_ERR(btf);
678 			goto free_map;
679 		}
680 
681 		err = map_check_btf(map, btf, attr->btf_key_type_id,
682 				    attr->btf_value_type_id);
683 		if (err) {
684 			btf_put(btf);
685 			goto free_map;
686 		}
687 
688 		map->btf = btf;
689 		map->btf_key_type_id = attr->btf_key_type_id;
690 		map->btf_value_type_id = attr->btf_value_type_id;
691 	} else {
692 		map->spin_lock_off = -EINVAL;
693 	}
694 
695 	err = security_bpf_map_alloc(map);
696 	if (err)
697 		goto free_map;
698 
699 	err = bpf_map_alloc_id(map);
700 	if (err)
701 		goto free_map_sec;
702 
703 	err = bpf_map_new_fd(map, f_flags);
704 	if (err < 0) {
705 		/* failed to allocate fd.
706 		 * bpf_map_put_with_uref() is needed because the above
707 		 * bpf_map_alloc_id() has published the map
708 		 * to the userspace and the userspace may
709 		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
710 		 */
711 		bpf_map_put_with_uref(map);
712 		return err;
713 	}
714 
715 	return err;
716 
717 free_map_sec:
718 	security_bpf_map_free(map);
719 free_map:
720 	btf_put(map->btf);
721 	bpf_map_charge_move(&mem, &map->memory);
722 	map->ops->map_free(map);
723 	bpf_map_charge_finish(&mem);
724 	return err;
725 }
726 
727 /* if error is returned, fd is released.
728  * On success caller should complete fd access with matching fdput()
729  */
730 struct bpf_map *__bpf_map_get(struct fd f)
731 {
732 	if (!f.file)
733 		return ERR_PTR(-EBADF);
734 	if (f.file->f_op != &bpf_map_fops) {
735 		fdput(f);
736 		return ERR_PTR(-EINVAL);
737 	}
738 
739 	return f.file->private_data;
740 }
741 
742 void bpf_map_inc(struct bpf_map *map)
743 {
744 	atomic64_inc(&map->refcnt);
745 }
746 EXPORT_SYMBOL_GPL(bpf_map_inc);
747 
748 void bpf_map_inc_with_uref(struct bpf_map *map)
749 {
750 	atomic64_inc(&map->refcnt);
751 	atomic64_inc(&map->usercnt);
752 }
753 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
754 
755 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
756 {
757 	struct fd f = fdget(ufd);
758 	struct bpf_map *map;
759 
760 	map = __bpf_map_get(f);
761 	if (IS_ERR(map))
762 		return map;
763 
764 	bpf_map_inc_with_uref(map);
765 	fdput(f);
766 
767 	return map;
768 }
769 
770 /* map_idr_lock should have been held */
771 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
772 {
773 	int refold;
774 
775 	refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
776 	if (!refold)
777 		return ERR_PTR(-ENOENT);
778 	if (uref)
779 		atomic64_inc(&map->usercnt);
780 
781 	return map;
782 }
783 
784 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
785 {
786 	spin_lock_bh(&map_idr_lock);
787 	map = __bpf_map_inc_not_zero(map, false);
788 	spin_unlock_bh(&map_idr_lock);
789 
790 	return map;
791 }
792 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
793 
794 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
795 {
796 	return -ENOTSUPP;
797 }
798 
799 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
800 {
801 	if (key_size)
802 		return memdup_user(ukey, key_size);
803 
804 	if (ukey)
805 		return ERR_PTR(-EINVAL);
806 
807 	return NULL;
808 }
809 
810 /* last field in 'union bpf_attr' used by this command */
811 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
812 
813 static int map_lookup_elem(union bpf_attr *attr)
814 {
815 	void __user *ukey = u64_to_user_ptr(attr->key);
816 	void __user *uvalue = u64_to_user_ptr(attr->value);
817 	int ufd = attr->map_fd;
818 	struct bpf_map *map;
819 	void *key, *value, *ptr;
820 	u32 value_size;
821 	struct fd f;
822 	int err;
823 
824 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
825 		return -EINVAL;
826 
827 	if (attr->flags & ~BPF_F_LOCK)
828 		return -EINVAL;
829 
830 	f = fdget(ufd);
831 	map = __bpf_map_get(f);
832 	if (IS_ERR(map))
833 		return PTR_ERR(map);
834 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
835 		err = -EPERM;
836 		goto err_put;
837 	}
838 
839 	if ((attr->flags & BPF_F_LOCK) &&
840 	    !map_value_has_spin_lock(map)) {
841 		err = -EINVAL;
842 		goto err_put;
843 	}
844 
845 	key = __bpf_copy_key(ukey, map->key_size);
846 	if (IS_ERR(key)) {
847 		err = PTR_ERR(key);
848 		goto err_put;
849 	}
850 
851 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
852 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
853 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
854 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
855 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
856 	else if (IS_FD_MAP(map))
857 		value_size = sizeof(u32);
858 	else
859 		value_size = map->value_size;
860 
861 	err = -ENOMEM;
862 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
863 	if (!value)
864 		goto free_key;
865 
866 	if (bpf_map_is_dev_bound(map)) {
867 		err = bpf_map_offload_lookup_elem(map, key, value);
868 		goto done;
869 	}
870 
871 	preempt_disable();
872 	this_cpu_inc(bpf_prog_active);
873 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
874 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
875 		err = bpf_percpu_hash_copy(map, key, value);
876 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
877 		err = bpf_percpu_array_copy(map, key, value);
878 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
879 		err = bpf_percpu_cgroup_storage_copy(map, key, value);
880 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
881 		err = bpf_stackmap_copy(map, key, value);
882 	} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
883 		err = bpf_fd_array_map_lookup_elem(map, key, value);
884 	} else if (IS_FD_HASH(map)) {
885 		err = bpf_fd_htab_map_lookup_elem(map, key, value);
886 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
887 		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
888 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
889 		   map->map_type == BPF_MAP_TYPE_STACK) {
890 		err = map->ops->map_peek_elem(map, value);
891 	} else {
892 		rcu_read_lock();
893 		if (map->ops->map_lookup_elem_sys_only)
894 			ptr = map->ops->map_lookup_elem_sys_only(map, key);
895 		else
896 			ptr = map->ops->map_lookup_elem(map, key);
897 		if (IS_ERR(ptr)) {
898 			err = PTR_ERR(ptr);
899 		} else if (!ptr) {
900 			err = -ENOENT;
901 		} else {
902 			err = 0;
903 			if (attr->flags & BPF_F_LOCK)
904 				/* lock 'ptr' and copy everything but lock */
905 				copy_map_value_locked(map, value, ptr, true);
906 			else
907 				copy_map_value(map, value, ptr);
908 			/* mask lock, since value wasn't zero inited */
909 			check_and_init_map_lock(map, value);
910 		}
911 		rcu_read_unlock();
912 	}
913 	this_cpu_dec(bpf_prog_active);
914 	preempt_enable();
915 
916 done:
917 	if (err)
918 		goto free_value;
919 
920 	err = -EFAULT;
921 	if (copy_to_user(uvalue, value, value_size) != 0)
922 		goto free_value;
923 
924 	err = 0;
925 
926 free_value:
927 	kfree(value);
928 free_key:
929 	kfree(key);
930 err_put:
931 	fdput(f);
932 	return err;
933 }
934 
935 static void maybe_wait_bpf_programs(struct bpf_map *map)
936 {
937 	/* Wait for any running BPF programs to complete so that
938 	 * userspace, when we return to it, knows that all programs
939 	 * that could be running use the new map value.
940 	 */
941 	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
942 	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
943 		synchronize_rcu();
944 }
945 
946 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
947 
948 static int map_update_elem(union bpf_attr *attr)
949 {
950 	void __user *ukey = u64_to_user_ptr(attr->key);
951 	void __user *uvalue = u64_to_user_ptr(attr->value);
952 	int ufd = attr->map_fd;
953 	struct bpf_map *map;
954 	void *key, *value;
955 	u32 value_size;
956 	struct fd f;
957 	int err;
958 
959 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
960 		return -EINVAL;
961 
962 	f = fdget(ufd);
963 	map = __bpf_map_get(f);
964 	if (IS_ERR(map))
965 		return PTR_ERR(map);
966 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
967 		err = -EPERM;
968 		goto err_put;
969 	}
970 
971 	if ((attr->flags & BPF_F_LOCK) &&
972 	    !map_value_has_spin_lock(map)) {
973 		err = -EINVAL;
974 		goto err_put;
975 	}
976 
977 	key = __bpf_copy_key(ukey, map->key_size);
978 	if (IS_ERR(key)) {
979 		err = PTR_ERR(key);
980 		goto err_put;
981 	}
982 
983 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
984 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
985 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
986 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
987 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
988 	else
989 		value_size = map->value_size;
990 
991 	err = -ENOMEM;
992 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
993 	if (!value)
994 		goto free_key;
995 
996 	err = -EFAULT;
997 	if (copy_from_user(value, uvalue, value_size) != 0)
998 		goto free_value;
999 
1000 	/* Need to create a kthread, thus must support schedule */
1001 	if (bpf_map_is_dev_bound(map)) {
1002 		err = bpf_map_offload_update_elem(map, key, value, attr->flags);
1003 		goto out;
1004 	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
1005 		   map->map_type == BPF_MAP_TYPE_SOCKHASH ||
1006 		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
1007 		err = map->ops->map_update_elem(map, key, value, attr->flags);
1008 		goto out;
1009 	} else if (IS_FD_PROG_ARRAY(map)) {
1010 		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
1011 						   attr->flags);
1012 		goto out;
1013 	}
1014 
1015 	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
1016 	 * inside bpf map update or delete otherwise deadlocks are possible
1017 	 */
1018 	preempt_disable();
1019 	__this_cpu_inc(bpf_prog_active);
1020 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1021 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
1022 		err = bpf_percpu_hash_update(map, key, value, attr->flags);
1023 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
1024 		err = bpf_percpu_array_update(map, key, value, attr->flags);
1025 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
1026 		err = bpf_percpu_cgroup_storage_update(map, key, value,
1027 						       attr->flags);
1028 	} else if (IS_FD_ARRAY(map)) {
1029 		rcu_read_lock();
1030 		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
1031 						   attr->flags);
1032 		rcu_read_unlock();
1033 	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
1034 		rcu_read_lock();
1035 		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
1036 						  attr->flags);
1037 		rcu_read_unlock();
1038 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
1039 		/* rcu_read_lock() is not needed */
1040 		err = bpf_fd_reuseport_array_update_elem(map, key, value,
1041 							 attr->flags);
1042 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1043 		   map->map_type == BPF_MAP_TYPE_STACK) {
1044 		err = map->ops->map_push_elem(map, value, attr->flags);
1045 	} else {
1046 		rcu_read_lock();
1047 		err = map->ops->map_update_elem(map, key, value, attr->flags);
1048 		rcu_read_unlock();
1049 	}
1050 	__this_cpu_dec(bpf_prog_active);
1051 	preempt_enable();
1052 	maybe_wait_bpf_programs(map);
1053 out:
1054 free_value:
1055 	kfree(value);
1056 free_key:
1057 	kfree(key);
1058 err_put:
1059 	fdput(f);
1060 	return err;
1061 }
1062 
1063 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1064 
1065 static int map_delete_elem(union bpf_attr *attr)
1066 {
1067 	void __user *ukey = u64_to_user_ptr(attr->key);
1068 	int ufd = attr->map_fd;
1069 	struct bpf_map *map;
1070 	struct fd f;
1071 	void *key;
1072 	int err;
1073 
1074 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1075 		return -EINVAL;
1076 
1077 	f = fdget(ufd);
1078 	map = __bpf_map_get(f);
1079 	if (IS_ERR(map))
1080 		return PTR_ERR(map);
1081 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1082 		err = -EPERM;
1083 		goto err_put;
1084 	}
1085 
1086 	key = __bpf_copy_key(ukey, map->key_size);
1087 	if (IS_ERR(key)) {
1088 		err = PTR_ERR(key);
1089 		goto err_put;
1090 	}
1091 
1092 	if (bpf_map_is_dev_bound(map)) {
1093 		err = bpf_map_offload_delete_elem(map, key);
1094 		goto out;
1095 	} else if (IS_FD_PROG_ARRAY(map)) {
1096 		err = map->ops->map_delete_elem(map, key);
1097 		goto out;
1098 	}
1099 
1100 	preempt_disable();
1101 	__this_cpu_inc(bpf_prog_active);
1102 	rcu_read_lock();
1103 	err = map->ops->map_delete_elem(map, key);
1104 	rcu_read_unlock();
1105 	__this_cpu_dec(bpf_prog_active);
1106 	preempt_enable();
1107 	maybe_wait_bpf_programs(map);
1108 out:
1109 	kfree(key);
1110 err_put:
1111 	fdput(f);
1112 	return err;
1113 }
1114 
1115 /* last field in 'union bpf_attr' used by this command */
1116 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1117 
1118 static int map_get_next_key(union bpf_attr *attr)
1119 {
1120 	void __user *ukey = u64_to_user_ptr(attr->key);
1121 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1122 	int ufd = attr->map_fd;
1123 	struct bpf_map *map;
1124 	void *key, *next_key;
1125 	struct fd f;
1126 	int err;
1127 
1128 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1129 		return -EINVAL;
1130 
1131 	f = fdget(ufd);
1132 	map = __bpf_map_get(f);
1133 	if (IS_ERR(map))
1134 		return PTR_ERR(map);
1135 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1136 		err = -EPERM;
1137 		goto err_put;
1138 	}
1139 
1140 	if (ukey) {
1141 		key = __bpf_copy_key(ukey, map->key_size);
1142 		if (IS_ERR(key)) {
1143 			err = PTR_ERR(key);
1144 			goto err_put;
1145 		}
1146 	} else {
1147 		key = NULL;
1148 	}
1149 
1150 	err = -ENOMEM;
1151 	next_key = kmalloc(map->key_size, GFP_USER);
1152 	if (!next_key)
1153 		goto free_key;
1154 
1155 	if (bpf_map_is_dev_bound(map)) {
1156 		err = bpf_map_offload_get_next_key(map, key, next_key);
1157 		goto out;
1158 	}
1159 
1160 	rcu_read_lock();
1161 	err = map->ops->map_get_next_key(map, key, next_key);
1162 	rcu_read_unlock();
1163 out:
1164 	if (err)
1165 		goto free_next_key;
1166 
1167 	err = -EFAULT;
1168 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1169 		goto free_next_key;
1170 
1171 	err = 0;
1172 
1173 free_next_key:
1174 	kfree(next_key);
1175 free_key:
1176 	kfree(key);
1177 err_put:
1178 	fdput(f);
1179 	return err;
1180 }
1181 
1182 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
1183 
1184 static int map_lookup_and_delete_elem(union bpf_attr *attr)
1185 {
1186 	void __user *ukey = u64_to_user_ptr(attr->key);
1187 	void __user *uvalue = u64_to_user_ptr(attr->value);
1188 	int ufd = attr->map_fd;
1189 	struct bpf_map *map;
1190 	void *key, *value;
1191 	u32 value_size;
1192 	struct fd f;
1193 	int err;
1194 
1195 	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1196 		return -EINVAL;
1197 
1198 	f = fdget(ufd);
1199 	map = __bpf_map_get(f);
1200 	if (IS_ERR(map))
1201 		return PTR_ERR(map);
1202 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1203 		err = -EPERM;
1204 		goto err_put;
1205 	}
1206 
1207 	key = __bpf_copy_key(ukey, map->key_size);
1208 	if (IS_ERR(key)) {
1209 		err = PTR_ERR(key);
1210 		goto err_put;
1211 	}
1212 
1213 	value_size = map->value_size;
1214 
1215 	err = -ENOMEM;
1216 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1217 	if (!value)
1218 		goto free_key;
1219 
1220 	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1221 	    map->map_type == BPF_MAP_TYPE_STACK) {
1222 		err = map->ops->map_pop_elem(map, value);
1223 	} else {
1224 		err = -ENOTSUPP;
1225 	}
1226 
1227 	if (err)
1228 		goto free_value;
1229 
1230 	if (copy_to_user(uvalue, value, value_size) != 0)
1231 		goto free_value;
1232 
1233 	err = 0;
1234 
1235 free_value:
1236 	kfree(value);
1237 free_key:
1238 	kfree(key);
1239 err_put:
1240 	fdput(f);
1241 	return err;
1242 }
1243 
1244 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
1245 
1246 static int map_freeze(const union bpf_attr *attr)
1247 {
1248 	int err = 0, ufd = attr->map_fd;
1249 	struct bpf_map *map;
1250 	struct fd f;
1251 
1252 	if (CHECK_ATTR(BPF_MAP_FREEZE))
1253 		return -EINVAL;
1254 
1255 	f = fdget(ufd);
1256 	map = __bpf_map_get(f);
1257 	if (IS_ERR(map))
1258 		return PTR_ERR(map);
1259 
1260 	mutex_lock(&map->freeze_mutex);
1261 
1262 	if (map->writecnt) {
1263 		err = -EBUSY;
1264 		goto err_put;
1265 	}
1266 	if (READ_ONCE(map->frozen)) {
1267 		err = -EBUSY;
1268 		goto err_put;
1269 	}
1270 	if (!capable(CAP_SYS_ADMIN)) {
1271 		err = -EPERM;
1272 		goto err_put;
1273 	}
1274 
1275 	WRITE_ONCE(map->frozen, true);
1276 err_put:
1277 	mutex_unlock(&map->freeze_mutex);
1278 	fdput(f);
1279 	return err;
1280 }
1281 
1282 static const struct bpf_prog_ops * const bpf_prog_types[] = {
1283 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1284 	[_id] = & _name ## _prog_ops,
1285 #define BPF_MAP_TYPE(_id, _ops)
1286 #include <linux/bpf_types.h>
1287 #undef BPF_PROG_TYPE
1288 #undef BPF_MAP_TYPE
1289 };
1290 
1291 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1292 {
1293 	const struct bpf_prog_ops *ops;
1294 
1295 	if (type >= ARRAY_SIZE(bpf_prog_types))
1296 		return -EINVAL;
1297 	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1298 	ops = bpf_prog_types[type];
1299 	if (!ops)
1300 		return -EINVAL;
1301 
1302 	if (!bpf_prog_is_dev_bound(prog->aux))
1303 		prog->aux->ops = ops;
1304 	else
1305 		prog->aux->ops = &bpf_offload_prog_ops;
1306 	prog->type = type;
1307 	return 0;
1308 }
1309 
1310 enum bpf_audit {
1311 	BPF_AUDIT_LOAD,
1312 	BPF_AUDIT_UNLOAD,
1313 	BPF_AUDIT_MAX,
1314 };
1315 
1316 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1317 	[BPF_AUDIT_LOAD]   = "LOAD",
1318 	[BPF_AUDIT_UNLOAD] = "UNLOAD",
1319 };
1320 
1321 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1322 {
1323 	struct audit_context *ctx = NULL;
1324 	struct audit_buffer *ab;
1325 
1326 	if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1327 		return;
1328 	if (audit_enabled == AUDIT_OFF)
1329 		return;
1330 	if (op == BPF_AUDIT_LOAD)
1331 		ctx = audit_context();
1332 	ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1333 	if (unlikely(!ab))
1334 		return;
1335 	audit_log_format(ab, "prog-id=%u op=%s",
1336 			 prog->aux->id, bpf_audit_str[op]);
1337 	audit_log_end(ab);
1338 }
1339 
1340 int __bpf_prog_charge(struct user_struct *user, u32 pages)
1341 {
1342 	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1343 	unsigned long user_bufs;
1344 
1345 	if (user) {
1346 		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
1347 		if (user_bufs > memlock_limit) {
1348 			atomic_long_sub(pages, &user->locked_vm);
1349 			return -EPERM;
1350 		}
1351 	}
1352 
1353 	return 0;
1354 }
1355 
1356 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
1357 {
1358 	if (user)
1359 		atomic_long_sub(pages, &user->locked_vm);
1360 }
1361 
1362 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
1363 {
1364 	struct user_struct *user = get_current_user();
1365 	int ret;
1366 
1367 	ret = __bpf_prog_charge(user, prog->pages);
1368 	if (ret) {
1369 		free_uid(user);
1370 		return ret;
1371 	}
1372 
1373 	prog->aux->user = user;
1374 	return 0;
1375 }
1376 
1377 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
1378 {
1379 	struct user_struct *user = prog->aux->user;
1380 
1381 	__bpf_prog_uncharge(user, prog->pages);
1382 	free_uid(user);
1383 }
1384 
1385 static int bpf_prog_alloc_id(struct bpf_prog *prog)
1386 {
1387 	int id;
1388 
1389 	idr_preload(GFP_KERNEL);
1390 	spin_lock_bh(&prog_idr_lock);
1391 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1392 	if (id > 0)
1393 		prog->aux->id = id;
1394 	spin_unlock_bh(&prog_idr_lock);
1395 	idr_preload_end();
1396 
1397 	/* id is in [1, INT_MAX) */
1398 	if (WARN_ON_ONCE(!id))
1399 		return -ENOSPC;
1400 
1401 	return id > 0 ? 0 : id;
1402 }
1403 
1404 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1405 {
1406 	/* cBPF to eBPF migrations are currently not in the idr store.
1407 	 * Offloaded programs are removed from the store when their device
1408 	 * disappears - even if someone grabs an fd to them they are unusable,
1409 	 * simply waiting for refcnt to drop to be freed.
1410 	 */
1411 	if (!prog->aux->id)
1412 		return;
1413 
1414 	if (do_idr_lock)
1415 		spin_lock_bh(&prog_idr_lock);
1416 	else
1417 		__acquire(&prog_idr_lock);
1418 
1419 	idr_remove(&prog_idr, prog->aux->id);
1420 	prog->aux->id = 0;
1421 
1422 	if (do_idr_lock)
1423 		spin_unlock_bh(&prog_idr_lock);
1424 	else
1425 		__release(&prog_idr_lock);
1426 }
1427 
1428 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1429 {
1430 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1431 
1432 	kvfree(aux->func_info);
1433 	kfree(aux->func_info_aux);
1434 	bpf_prog_uncharge_memlock(aux->prog);
1435 	security_bpf_prog_free(aux);
1436 	bpf_prog_free(aux->prog);
1437 }
1438 
1439 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1440 {
1441 	bpf_prog_kallsyms_del_all(prog);
1442 	btf_put(prog->aux->btf);
1443 	bpf_prog_free_linfo(prog);
1444 
1445 	if (deferred)
1446 		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1447 	else
1448 		__bpf_prog_put_rcu(&prog->aux->rcu);
1449 }
1450 
1451 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1452 {
1453 	if (atomic64_dec_and_test(&prog->aux->refcnt)) {
1454 		perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1455 		bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
1456 		/* bpf_prog_free_id() must be called first */
1457 		bpf_prog_free_id(prog, do_idr_lock);
1458 		__bpf_prog_put_noref(prog, true);
1459 	}
1460 }
1461 
1462 void bpf_prog_put(struct bpf_prog *prog)
1463 {
1464 	__bpf_prog_put(prog, true);
1465 }
1466 EXPORT_SYMBOL_GPL(bpf_prog_put);
1467 
1468 static int bpf_prog_release(struct inode *inode, struct file *filp)
1469 {
1470 	struct bpf_prog *prog = filp->private_data;
1471 
1472 	bpf_prog_put(prog);
1473 	return 0;
1474 }
1475 
1476 static void bpf_prog_get_stats(const struct bpf_prog *prog,
1477 			       struct bpf_prog_stats *stats)
1478 {
1479 	u64 nsecs = 0, cnt = 0;
1480 	int cpu;
1481 
1482 	for_each_possible_cpu(cpu) {
1483 		const struct bpf_prog_stats *st;
1484 		unsigned int start;
1485 		u64 tnsecs, tcnt;
1486 
1487 		st = per_cpu_ptr(prog->aux->stats, cpu);
1488 		do {
1489 			start = u64_stats_fetch_begin_irq(&st->syncp);
1490 			tnsecs = st->nsecs;
1491 			tcnt = st->cnt;
1492 		} while (u64_stats_fetch_retry_irq(&st->syncp, start));
1493 		nsecs += tnsecs;
1494 		cnt += tcnt;
1495 	}
1496 	stats->nsecs = nsecs;
1497 	stats->cnt = cnt;
1498 }
1499 
1500 #ifdef CONFIG_PROC_FS
1501 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1502 {
1503 	const struct bpf_prog *prog = filp->private_data;
1504 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1505 	struct bpf_prog_stats stats;
1506 
1507 	bpf_prog_get_stats(prog, &stats);
1508 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1509 	seq_printf(m,
1510 		   "prog_type:\t%u\n"
1511 		   "prog_jited:\t%u\n"
1512 		   "prog_tag:\t%s\n"
1513 		   "memlock:\t%llu\n"
1514 		   "prog_id:\t%u\n"
1515 		   "run_time_ns:\t%llu\n"
1516 		   "run_cnt:\t%llu\n",
1517 		   prog->type,
1518 		   prog->jited,
1519 		   prog_tag,
1520 		   prog->pages * 1ULL << PAGE_SHIFT,
1521 		   prog->aux->id,
1522 		   stats.nsecs,
1523 		   stats.cnt);
1524 }
1525 #endif
1526 
1527 const struct file_operations bpf_prog_fops = {
1528 #ifdef CONFIG_PROC_FS
1529 	.show_fdinfo	= bpf_prog_show_fdinfo,
1530 #endif
1531 	.release	= bpf_prog_release,
1532 	.read		= bpf_dummy_read,
1533 	.write		= bpf_dummy_write,
1534 };
1535 
1536 int bpf_prog_new_fd(struct bpf_prog *prog)
1537 {
1538 	int ret;
1539 
1540 	ret = security_bpf_prog(prog);
1541 	if (ret < 0)
1542 		return ret;
1543 
1544 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1545 				O_RDWR | O_CLOEXEC);
1546 }
1547 
1548 static struct bpf_prog *____bpf_prog_get(struct fd f)
1549 {
1550 	if (!f.file)
1551 		return ERR_PTR(-EBADF);
1552 	if (f.file->f_op != &bpf_prog_fops) {
1553 		fdput(f);
1554 		return ERR_PTR(-EINVAL);
1555 	}
1556 
1557 	return f.file->private_data;
1558 }
1559 
1560 void bpf_prog_add(struct bpf_prog *prog, int i)
1561 {
1562 	atomic64_add(i, &prog->aux->refcnt);
1563 }
1564 EXPORT_SYMBOL_GPL(bpf_prog_add);
1565 
1566 void bpf_prog_sub(struct bpf_prog *prog, int i)
1567 {
1568 	/* Only to be used for undoing previous bpf_prog_add() in some
1569 	 * error path. We still know that another entity in our call
1570 	 * path holds a reference to the program, thus atomic_sub() can
1571 	 * be safely used in such cases!
1572 	 */
1573 	WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
1574 }
1575 EXPORT_SYMBOL_GPL(bpf_prog_sub);
1576 
1577 void bpf_prog_inc(struct bpf_prog *prog)
1578 {
1579 	atomic64_inc(&prog->aux->refcnt);
1580 }
1581 EXPORT_SYMBOL_GPL(bpf_prog_inc);
1582 
1583 /* prog_idr_lock should have been held */
1584 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1585 {
1586 	int refold;
1587 
1588 	refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1589 
1590 	if (!refold)
1591 		return ERR_PTR(-ENOENT);
1592 
1593 	return prog;
1594 }
1595 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1596 
1597 bool bpf_prog_get_ok(struct bpf_prog *prog,
1598 			    enum bpf_prog_type *attach_type, bool attach_drv)
1599 {
1600 	/* not an attachment, just a refcount inc, always allow */
1601 	if (!attach_type)
1602 		return true;
1603 
1604 	if (prog->type != *attach_type)
1605 		return false;
1606 	if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1607 		return false;
1608 
1609 	return true;
1610 }
1611 
1612 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1613 				       bool attach_drv)
1614 {
1615 	struct fd f = fdget(ufd);
1616 	struct bpf_prog *prog;
1617 
1618 	prog = ____bpf_prog_get(f);
1619 	if (IS_ERR(prog))
1620 		return prog;
1621 	if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1622 		prog = ERR_PTR(-EINVAL);
1623 		goto out;
1624 	}
1625 
1626 	bpf_prog_inc(prog);
1627 out:
1628 	fdput(f);
1629 	return prog;
1630 }
1631 
1632 struct bpf_prog *bpf_prog_get(u32 ufd)
1633 {
1634 	return __bpf_prog_get(ufd, NULL, false);
1635 }
1636 
1637 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1638 				       bool attach_drv)
1639 {
1640 	return __bpf_prog_get(ufd, &type, attach_drv);
1641 }
1642 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1643 
1644 /* Initially all BPF programs could be loaded w/o specifying
1645  * expected_attach_type. Later for some of them specifying expected_attach_type
1646  * at load time became required so that program could be validated properly.
1647  * Programs of types that are allowed to be loaded both w/ and w/o (for
1648  * backward compatibility) expected_attach_type, should have the default attach
1649  * type assigned to expected_attach_type for the latter case, so that it can be
1650  * validated later at attach time.
1651  *
1652  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1653  * prog type requires it but has some attach types that have to be backward
1654  * compatible.
1655  */
1656 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1657 {
1658 	switch (attr->prog_type) {
1659 	case BPF_PROG_TYPE_CGROUP_SOCK:
1660 		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1661 		 * exist so checking for non-zero is the way to go here.
1662 		 */
1663 		if (!attr->expected_attach_type)
1664 			attr->expected_attach_type =
1665 				BPF_CGROUP_INET_SOCK_CREATE;
1666 		break;
1667 	}
1668 }
1669 
1670 static int
1671 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
1672 			   enum bpf_attach_type expected_attach_type,
1673 			   u32 btf_id, u32 prog_fd)
1674 {
1675 	switch (prog_type) {
1676 	case BPF_PROG_TYPE_TRACING:
1677 		if (btf_id > BTF_MAX_TYPE)
1678 			return -EINVAL;
1679 		break;
1680 	default:
1681 		if (btf_id || prog_fd)
1682 			return -EINVAL;
1683 		break;
1684 	}
1685 
1686 	switch (prog_type) {
1687 	case BPF_PROG_TYPE_CGROUP_SOCK:
1688 		switch (expected_attach_type) {
1689 		case BPF_CGROUP_INET_SOCK_CREATE:
1690 		case BPF_CGROUP_INET4_POST_BIND:
1691 		case BPF_CGROUP_INET6_POST_BIND:
1692 			return 0;
1693 		default:
1694 			return -EINVAL;
1695 		}
1696 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1697 		switch (expected_attach_type) {
1698 		case BPF_CGROUP_INET4_BIND:
1699 		case BPF_CGROUP_INET6_BIND:
1700 		case BPF_CGROUP_INET4_CONNECT:
1701 		case BPF_CGROUP_INET6_CONNECT:
1702 		case BPF_CGROUP_UDP4_SENDMSG:
1703 		case BPF_CGROUP_UDP6_SENDMSG:
1704 		case BPF_CGROUP_UDP4_RECVMSG:
1705 		case BPF_CGROUP_UDP6_RECVMSG:
1706 			return 0;
1707 		default:
1708 			return -EINVAL;
1709 		}
1710 	case BPF_PROG_TYPE_CGROUP_SKB:
1711 		switch (expected_attach_type) {
1712 		case BPF_CGROUP_INET_INGRESS:
1713 		case BPF_CGROUP_INET_EGRESS:
1714 			return 0;
1715 		default:
1716 			return -EINVAL;
1717 		}
1718 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
1719 		switch (expected_attach_type) {
1720 		case BPF_CGROUP_SETSOCKOPT:
1721 		case BPF_CGROUP_GETSOCKOPT:
1722 			return 0;
1723 		default:
1724 			return -EINVAL;
1725 		}
1726 	default:
1727 		return 0;
1728 	}
1729 }
1730 
1731 /* last field in 'union bpf_attr' used by this command */
1732 #define	BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
1733 
1734 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
1735 {
1736 	enum bpf_prog_type type = attr->prog_type;
1737 	struct bpf_prog *prog;
1738 	int err;
1739 	char license[128];
1740 	bool is_gpl;
1741 
1742 	if (CHECK_ATTR(BPF_PROG_LOAD))
1743 		return -EINVAL;
1744 
1745 	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
1746 				 BPF_F_ANY_ALIGNMENT |
1747 				 BPF_F_TEST_STATE_FREQ |
1748 				 BPF_F_TEST_RND_HI32))
1749 		return -EINVAL;
1750 
1751 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
1752 	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
1753 	    !capable(CAP_SYS_ADMIN))
1754 		return -EPERM;
1755 
1756 	/* copy eBPF program license from user space */
1757 	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
1758 			      sizeof(license) - 1) < 0)
1759 		return -EFAULT;
1760 	license[sizeof(license) - 1] = 0;
1761 
1762 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
1763 	is_gpl = license_is_gpl_compatible(license);
1764 
1765 	if (attr->insn_cnt == 0 ||
1766 	    attr->insn_cnt > (capable(CAP_SYS_ADMIN) ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
1767 		return -E2BIG;
1768 	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
1769 	    type != BPF_PROG_TYPE_CGROUP_SKB &&
1770 	    !capable(CAP_SYS_ADMIN))
1771 		return -EPERM;
1772 
1773 	bpf_prog_load_fixup_attach_type(attr);
1774 	if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
1775 				       attr->attach_btf_id,
1776 				       attr->attach_prog_fd))
1777 		return -EINVAL;
1778 
1779 	/* plain bpf_prog allocation */
1780 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
1781 	if (!prog)
1782 		return -ENOMEM;
1783 
1784 	prog->expected_attach_type = attr->expected_attach_type;
1785 	prog->aux->attach_btf_id = attr->attach_btf_id;
1786 	if (attr->attach_prog_fd) {
1787 		struct bpf_prog *tgt_prog;
1788 
1789 		tgt_prog = bpf_prog_get(attr->attach_prog_fd);
1790 		if (IS_ERR(tgt_prog)) {
1791 			err = PTR_ERR(tgt_prog);
1792 			goto free_prog_nouncharge;
1793 		}
1794 		prog->aux->linked_prog = tgt_prog;
1795 	}
1796 
1797 	prog->aux->offload_requested = !!attr->prog_ifindex;
1798 
1799 	err = security_bpf_prog_alloc(prog->aux);
1800 	if (err)
1801 		goto free_prog_nouncharge;
1802 
1803 	err = bpf_prog_charge_memlock(prog);
1804 	if (err)
1805 		goto free_prog_sec;
1806 
1807 	prog->len = attr->insn_cnt;
1808 
1809 	err = -EFAULT;
1810 	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
1811 			   bpf_prog_insn_size(prog)) != 0)
1812 		goto free_prog;
1813 
1814 	prog->orig_prog = NULL;
1815 	prog->jited = 0;
1816 
1817 	atomic64_set(&prog->aux->refcnt, 1);
1818 	prog->gpl_compatible = is_gpl ? 1 : 0;
1819 
1820 	if (bpf_prog_is_dev_bound(prog->aux)) {
1821 		err = bpf_prog_offload_init(prog, attr);
1822 		if (err)
1823 			goto free_prog;
1824 	}
1825 
1826 	/* find program type: socket_filter vs tracing_filter */
1827 	err = find_prog_type(type, prog);
1828 	if (err < 0)
1829 		goto free_prog;
1830 
1831 	prog->aux->load_time = ktime_get_boottime_ns();
1832 	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name);
1833 	if (err)
1834 		goto free_prog;
1835 
1836 	/* run eBPF verifier */
1837 	err = bpf_check(&prog, attr, uattr);
1838 	if (err < 0)
1839 		goto free_used_maps;
1840 
1841 	prog = bpf_prog_select_runtime(prog, &err);
1842 	if (err < 0)
1843 		goto free_used_maps;
1844 
1845 	err = bpf_prog_alloc_id(prog);
1846 	if (err)
1847 		goto free_used_maps;
1848 
1849 	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
1850 	 * effectively publicly exposed. However, retrieving via
1851 	 * bpf_prog_get_fd_by_id() will take another reference,
1852 	 * therefore it cannot be gone underneath us.
1853 	 *
1854 	 * Only for the time /after/ successful bpf_prog_new_fd()
1855 	 * and before returning to userspace, we might just hold
1856 	 * one reference and any parallel close on that fd could
1857 	 * rip everything out. Hence, below notifications must
1858 	 * happen before bpf_prog_new_fd().
1859 	 *
1860 	 * Also, any failure handling from this point onwards must
1861 	 * be using bpf_prog_put() given the program is exposed.
1862 	 */
1863 	bpf_prog_kallsyms_add(prog);
1864 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
1865 	bpf_audit_prog(prog, BPF_AUDIT_LOAD);
1866 
1867 	err = bpf_prog_new_fd(prog);
1868 	if (err < 0)
1869 		bpf_prog_put(prog);
1870 	return err;
1871 
1872 free_used_maps:
1873 	/* In case we have subprogs, we need to wait for a grace
1874 	 * period before we can tear down JIT memory since symbols
1875 	 * are already exposed under kallsyms.
1876 	 */
1877 	__bpf_prog_put_noref(prog, prog->aux->func_cnt);
1878 	return err;
1879 free_prog:
1880 	bpf_prog_uncharge_memlock(prog);
1881 free_prog_sec:
1882 	security_bpf_prog_free(prog->aux);
1883 free_prog_nouncharge:
1884 	bpf_prog_free(prog);
1885 	return err;
1886 }
1887 
1888 #define BPF_OBJ_LAST_FIELD file_flags
1889 
1890 static int bpf_obj_pin(const union bpf_attr *attr)
1891 {
1892 	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
1893 		return -EINVAL;
1894 
1895 	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
1896 }
1897 
1898 static int bpf_obj_get(const union bpf_attr *attr)
1899 {
1900 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
1901 	    attr->file_flags & ~BPF_OBJ_FLAG_MASK)
1902 		return -EINVAL;
1903 
1904 	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
1905 				attr->file_flags);
1906 }
1907 
1908 static int bpf_tracing_prog_release(struct inode *inode, struct file *filp)
1909 {
1910 	struct bpf_prog *prog = filp->private_data;
1911 
1912 	WARN_ON_ONCE(bpf_trampoline_unlink_prog(prog));
1913 	bpf_prog_put(prog);
1914 	return 0;
1915 }
1916 
1917 static const struct file_operations bpf_tracing_prog_fops = {
1918 	.release	= bpf_tracing_prog_release,
1919 	.read		= bpf_dummy_read,
1920 	.write		= bpf_dummy_write,
1921 };
1922 
1923 static int bpf_tracing_prog_attach(struct bpf_prog *prog)
1924 {
1925 	int tr_fd, err;
1926 
1927 	if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
1928 	    prog->expected_attach_type != BPF_TRACE_FEXIT) {
1929 		err = -EINVAL;
1930 		goto out_put_prog;
1931 	}
1932 
1933 	err = bpf_trampoline_link_prog(prog);
1934 	if (err)
1935 		goto out_put_prog;
1936 
1937 	tr_fd = anon_inode_getfd("bpf-tracing-prog", &bpf_tracing_prog_fops,
1938 				 prog, O_CLOEXEC);
1939 	if (tr_fd < 0) {
1940 		WARN_ON_ONCE(bpf_trampoline_unlink_prog(prog));
1941 		err = tr_fd;
1942 		goto out_put_prog;
1943 	}
1944 	return tr_fd;
1945 
1946 out_put_prog:
1947 	bpf_prog_put(prog);
1948 	return err;
1949 }
1950 
1951 struct bpf_raw_tracepoint {
1952 	struct bpf_raw_event_map *btp;
1953 	struct bpf_prog *prog;
1954 };
1955 
1956 static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp)
1957 {
1958 	struct bpf_raw_tracepoint *raw_tp = filp->private_data;
1959 
1960 	if (raw_tp->prog) {
1961 		bpf_probe_unregister(raw_tp->btp, raw_tp->prog);
1962 		bpf_prog_put(raw_tp->prog);
1963 	}
1964 	bpf_put_raw_tracepoint(raw_tp->btp);
1965 	kfree(raw_tp);
1966 	return 0;
1967 }
1968 
1969 static const struct file_operations bpf_raw_tp_fops = {
1970 	.release	= bpf_raw_tracepoint_release,
1971 	.read		= bpf_dummy_read,
1972 	.write		= bpf_dummy_write,
1973 };
1974 
1975 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
1976 
1977 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
1978 {
1979 	struct bpf_raw_tracepoint *raw_tp;
1980 	struct bpf_raw_event_map *btp;
1981 	struct bpf_prog *prog;
1982 	const char *tp_name;
1983 	char buf[128];
1984 	int tp_fd, err;
1985 
1986 	if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
1987 		return -EINVAL;
1988 
1989 	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
1990 	if (IS_ERR(prog))
1991 		return PTR_ERR(prog);
1992 
1993 	if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT &&
1994 	    prog->type != BPF_PROG_TYPE_TRACING &&
1995 	    prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) {
1996 		err = -EINVAL;
1997 		goto out_put_prog;
1998 	}
1999 
2000 	if (prog->type == BPF_PROG_TYPE_TRACING) {
2001 		if (attr->raw_tracepoint.name) {
2002 			/* The attach point for this category of programs
2003 			 * should be specified via btf_id during program load.
2004 			 */
2005 			err = -EINVAL;
2006 			goto out_put_prog;
2007 		}
2008 		if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
2009 			tp_name = prog->aux->attach_func_name;
2010 		else
2011 			return bpf_tracing_prog_attach(prog);
2012 	} else {
2013 		if (strncpy_from_user(buf,
2014 				      u64_to_user_ptr(attr->raw_tracepoint.name),
2015 				      sizeof(buf) - 1) < 0) {
2016 			err = -EFAULT;
2017 			goto out_put_prog;
2018 		}
2019 		buf[sizeof(buf) - 1] = 0;
2020 		tp_name = buf;
2021 	}
2022 
2023 	btp = bpf_get_raw_tracepoint(tp_name);
2024 	if (!btp) {
2025 		err = -ENOENT;
2026 		goto out_put_prog;
2027 	}
2028 
2029 	raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
2030 	if (!raw_tp) {
2031 		err = -ENOMEM;
2032 		goto out_put_btp;
2033 	}
2034 	raw_tp->btp = btp;
2035 	raw_tp->prog = prog;
2036 
2037 	err = bpf_probe_register(raw_tp->btp, prog);
2038 	if (err)
2039 		goto out_free_tp;
2040 
2041 	tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp,
2042 				 O_CLOEXEC);
2043 	if (tp_fd < 0) {
2044 		bpf_probe_unregister(raw_tp->btp, prog);
2045 		err = tp_fd;
2046 		goto out_free_tp;
2047 	}
2048 	return tp_fd;
2049 
2050 out_free_tp:
2051 	kfree(raw_tp);
2052 out_put_btp:
2053 	bpf_put_raw_tracepoint(btp);
2054 out_put_prog:
2055 	bpf_prog_put(prog);
2056 	return err;
2057 }
2058 
2059 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
2060 					     enum bpf_attach_type attach_type)
2061 {
2062 	switch (prog->type) {
2063 	case BPF_PROG_TYPE_CGROUP_SOCK:
2064 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2065 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2066 		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
2067 	case BPF_PROG_TYPE_CGROUP_SKB:
2068 		return prog->enforce_expected_attach_type &&
2069 			prog->expected_attach_type != attach_type ?
2070 			-EINVAL : 0;
2071 	default:
2072 		return 0;
2073 	}
2074 }
2075 
2076 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
2077 
2078 #define BPF_F_ATTACH_MASK \
2079 	(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
2080 
2081 static int bpf_prog_attach(const union bpf_attr *attr)
2082 {
2083 	enum bpf_prog_type ptype;
2084 	struct bpf_prog *prog;
2085 	int ret;
2086 
2087 	if (!capable(CAP_NET_ADMIN))
2088 		return -EPERM;
2089 
2090 	if (CHECK_ATTR(BPF_PROG_ATTACH))
2091 		return -EINVAL;
2092 
2093 	if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
2094 		return -EINVAL;
2095 
2096 	switch (attr->attach_type) {
2097 	case BPF_CGROUP_INET_INGRESS:
2098 	case BPF_CGROUP_INET_EGRESS:
2099 		ptype = BPF_PROG_TYPE_CGROUP_SKB;
2100 		break;
2101 	case BPF_CGROUP_INET_SOCK_CREATE:
2102 	case BPF_CGROUP_INET4_POST_BIND:
2103 	case BPF_CGROUP_INET6_POST_BIND:
2104 		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
2105 		break;
2106 	case BPF_CGROUP_INET4_BIND:
2107 	case BPF_CGROUP_INET6_BIND:
2108 	case BPF_CGROUP_INET4_CONNECT:
2109 	case BPF_CGROUP_INET6_CONNECT:
2110 	case BPF_CGROUP_UDP4_SENDMSG:
2111 	case BPF_CGROUP_UDP6_SENDMSG:
2112 	case BPF_CGROUP_UDP4_RECVMSG:
2113 	case BPF_CGROUP_UDP6_RECVMSG:
2114 		ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
2115 		break;
2116 	case BPF_CGROUP_SOCK_OPS:
2117 		ptype = BPF_PROG_TYPE_SOCK_OPS;
2118 		break;
2119 	case BPF_CGROUP_DEVICE:
2120 		ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
2121 		break;
2122 	case BPF_SK_MSG_VERDICT:
2123 		ptype = BPF_PROG_TYPE_SK_MSG;
2124 		break;
2125 	case BPF_SK_SKB_STREAM_PARSER:
2126 	case BPF_SK_SKB_STREAM_VERDICT:
2127 		ptype = BPF_PROG_TYPE_SK_SKB;
2128 		break;
2129 	case BPF_LIRC_MODE2:
2130 		ptype = BPF_PROG_TYPE_LIRC_MODE2;
2131 		break;
2132 	case BPF_FLOW_DISSECTOR:
2133 		ptype = BPF_PROG_TYPE_FLOW_DISSECTOR;
2134 		break;
2135 	case BPF_CGROUP_SYSCTL:
2136 		ptype = BPF_PROG_TYPE_CGROUP_SYSCTL;
2137 		break;
2138 	case BPF_CGROUP_GETSOCKOPT:
2139 	case BPF_CGROUP_SETSOCKOPT:
2140 		ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT;
2141 		break;
2142 	default:
2143 		return -EINVAL;
2144 	}
2145 
2146 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
2147 	if (IS_ERR(prog))
2148 		return PTR_ERR(prog);
2149 
2150 	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
2151 		bpf_prog_put(prog);
2152 		return -EINVAL;
2153 	}
2154 
2155 	switch (ptype) {
2156 	case BPF_PROG_TYPE_SK_SKB:
2157 	case BPF_PROG_TYPE_SK_MSG:
2158 		ret = sock_map_get_from_fd(attr, prog);
2159 		break;
2160 	case BPF_PROG_TYPE_LIRC_MODE2:
2161 		ret = lirc_prog_attach(attr, prog);
2162 		break;
2163 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2164 		ret = skb_flow_dissector_bpf_prog_attach(attr, prog);
2165 		break;
2166 	default:
2167 		ret = cgroup_bpf_prog_attach(attr, ptype, prog);
2168 	}
2169 
2170 	if (ret)
2171 		bpf_prog_put(prog);
2172 	return ret;
2173 }
2174 
2175 #define BPF_PROG_DETACH_LAST_FIELD attach_type
2176 
2177 static int bpf_prog_detach(const union bpf_attr *attr)
2178 {
2179 	enum bpf_prog_type ptype;
2180 
2181 	if (!capable(CAP_NET_ADMIN))
2182 		return -EPERM;
2183 
2184 	if (CHECK_ATTR(BPF_PROG_DETACH))
2185 		return -EINVAL;
2186 
2187 	switch (attr->attach_type) {
2188 	case BPF_CGROUP_INET_INGRESS:
2189 	case BPF_CGROUP_INET_EGRESS:
2190 		ptype = BPF_PROG_TYPE_CGROUP_SKB;
2191 		break;
2192 	case BPF_CGROUP_INET_SOCK_CREATE:
2193 	case BPF_CGROUP_INET4_POST_BIND:
2194 	case BPF_CGROUP_INET6_POST_BIND:
2195 		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
2196 		break;
2197 	case BPF_CGROUP_INET4_BIND:
2198 	case BPF_CGROUP_INET6_BIND:
2199 	case BPF_CGROUP_INET4_CONNECT:
2200 	case BPF_CGROUP_INET6_CONNECT:
2201 	case BPF_CGROUP_UDP4_SENDMSG:
2202 	case BPF_CGROUP_UDP6_SENDMSG:
2203 	case BPF_CGROUP_UDP4_RECVMSG:
2204 	case BPF_CGROUP_UDP6_RECVMSG:
2205 		ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
2206 		break;
2207 	case BPF_CGROUP_SOCK_OPS:
2208 		ptype = BPF_PROG_TYPE_SOCK_OPS;
2209 		break;
2210 	case BPF_CGROUP_DEVICE:
2211 		ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
2212 		break;
2213 	case BPF_SK_MSG_VERDICT:
2214 		return sock_map_get_from_fd(attr, NULL);
2215 	case BPF_SK_SKB_STREAM_PARSER:
2216 	case BPF_SK_SKB_STREAM_VERDICT:
2217 		return sock_map_get_from_fd(attr, NULL);
2218 	case BPF_LIRC_MODE2:
2219 		return lirc_prog_detach(attr);
2220 	case BPF_FLOW_DISSECTOR:
2221 		return skb_flow_dissector_bpf_prog_detach(attr);
2222 	case BPF_CGROUP_SYSCTL:
2223 		ptype = BPF_PROG_TYPE_CGROUP_SYSCTL;
2224 		break;
2225 	case BPF_CGROUP_GETSOCKOPT:
2226 	case BPF_CGROUP_SETSOCKOPT:
2227 		ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT;
2228 		break;
2229 	default:
2230 		return -EINVAL;
2231 	}
2232 
2233 	return cgroup_bpf_prog_detach(attr, ptype);
2234 }
2235 
2236 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
2237 
2238 static int bpf_prog_query(const union bpf_attr *attr,
2239 			  union bpf_attr __user *uattr)
2240 {
2241 	if (!capable(CAP_NET_ADMIN))
2242 		return -EPERM;
2243 	if (CHECK_ATTR(BPF_PROG_QUERY))
2244 		return -EINVAL;
2245 	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
2246 		return -EINVAL;
2247 
2248 	switch (attr->query.attach_type) {
2249 	case BPF_CGROUP_INET_INGRESS:
2250 	case BPF_CGROUP_INET_EGRESS:
2251 	case BPF_CGROUP_INET_SOCK_CREATE:
2252 	case BPF_CGROUP_INET4_BIND:
2253 	case BPF_CGROUP_INET6_BIND:
2254 	case BPF_CGROUP_INET4_POST_BIND:
2255 	case BPF_CGROUP_INET6_POST_BIND:
2256 	case BPF_CGROUP_INET4_CONNECT:
2257 	case BPF_CGROUP_INET6_CONNECT:
2258 	case BPF_CGROUP_UDP4_SENDMSG:
2259 	case BPF_CGROUP_UDP6_SENDMSG:
2260 	case BPF_CGROUP_UDP4_RECVMSG:
2261 	case BPF_CGROUP_UDP6_RECVMSG:
2262 	case BPF_CGROUP_SOCK_OPS:
2263 	case BPF_CGROUP_DEVICE:
2264 	case BPF_CGROUP_SYSCTL:
2265 	case BPF_CGROUP_GETSOCKOPT:
2266 	case BPF_CGROUP_SETSOCKOPT:
2267 		break;
2268 	case BPF_LIRC_MODE2:
2269 		return lirc_prog_query(attr, uattr);
2270 	case BPF_FLOW_DISSECTOR:
2271 		return skb_flow_dissector_prog_query(attr, uattr);
2272 	default:
2273 		return -EINVAL;
2274 	}
2275 
2276 	return cgroup_bpf_prog_query(attr, uattr);
2277 }
2278 
2279 #define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out
2280 
2281 static int bpf_prog_test_run(const union bpf_attr *attr,
2282 			     union bpf_attr __user *uattr)
2283 {
2284 	struct bpf_prog *prog;
2285 	int ret = -ENOTSUPP;
2286 
2287 	if (!capable(CAP_SYS_ADMIN))
2288 		return -EPERM;
2289 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
2290 		return -EINVAL;
2291 
2292 	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
2293 	    (!attr->test.ctx_size_in && attr->test.ctx_in))
2294 		return -EINVAL;
2295 
2296 	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
2297 	    (!attr->test.ctx_size_out && attr->test.ctx_out))
2298 		return -EINVAL;
2299 
2300 	prog = bpf_prog_get(attr->test.prog_fd);
2301 	if (IS_ERR(prog))
2302 		return PTR_ERR(prog);
2303 
2304 	if (prog->aux->ops->test_run)
2305 		ret = prog->aux->ops->test_run(prog, attr, uattr);
2306 
2307 	bpf_prog_put(prog);
2308 	return ret;
2309 }
2310 
2311 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
2312 
2313 static int bpf_obj_get_next_id(const union bpf_attr *attr,
2314 			       union bpf_attr __user *uattr,
2315 			       struct idr *idr,
2316 			       spinlock_t *lock)
2317 {
2318 	u32 next_id = attr->start_id;
2319 	int err = 0;
2320 
2321 	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
2322 		return -EINVAL;
2323 
2324 	if (!capable(CAP_SYS_ADMIN))
2325 		return -EPERM;
2326 
2327 	next_id++;
2328 	spin_lock_bh(lock);
2329 	if (!idr_get_next(idr, &next_id))
2330 		err = -ENOENT;
2331 	spin_unlock_bh(lock);
2332 
2333 	if (!err)
2334 		err = put_user(next_id, &uattr->next_id);
2335 
2336 	return err;
2337 }
2338 
2339 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
2340 
2341 struct bpf_prog *bpf_prog_by_id(u32 id)
2342 {
2343 	struct bpf_prog *prog;
2344 
2345 	if (!id)
2346 		return ERR_PTR(-ENOENT);
2347 
2348 	spin_lock_bh(&prog_idr_lock);
2349 	prog = idr_find(&prog_idr, id);
2350 	if (prog)
2351 		prog = bpf_prog_inc_not_zero(prog);
2352 	else
2353 		prog = ERR_PTR(-ENOENT);
2354 	spin_unlock_bh(&prog_idr_lock);
2355 	return prog;
2356 }
2357 
2358 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
2359 {
2360 	struct bpf_prog *prog;
2361 	u32 id = attr->prog_id;
2362 	int fd;
2363 
2364 	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
2365 		return -EINVAL;
2366 
2367 	if (!capable(CAP_SYS_ADMIN))
2368 		return -EPERM;
2369 
2370 	prog = bpf_prog_by_id(id);
2371 	if (IS_ERR(prog))
2372 		return PTR_ERR(prog);
2373 
2374 	fd = bpf_prog_new_fd(prog);
2375 	if (fd < 0)
2376 		bpf_prog_put(prog);
2377 
2378 	return fd;
2379 }
2380 
2381 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
2382 
2383 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
2384 {
2385 	struct bpf_map *map;
2386 	u32 id = attr->map_id;
2387 	int f_flags;
2388 	int fd;
2389 
2390 	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
2391 	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
2392 		return -EINVAL;
2393 
2394 	if (!capable(CAP_SYS_ADMIN))
2395 		return -EPERM;
2396 
2397 	f_flags = bpf_get_file_flag(attr->open_flags);
2398 	if (f_flags < 0)
2399 		return f_flags;
2400 
2401 	spin_lock_bh(&map_idr_lock);
2402 	map = idr_find(&map_idr, id);
2403 	if (map)
2404 		map = __bpf_map_inc_not_zero(map, true);
2405 	else
2406 		map = ERR_PTR(-ENOENT);
2407 	spin_unlock_bh(&map_idr_lock);
2408 
2409 	if (IS_ERR(map))
2410 		return PTR_ERR(map);
2411 
2412 	fd = bpf_map_new_fd(map, f_flags);
2413 	if (fd < 0)
2414 		bpf_map_put_with_uref(map);
2415 
2416 	return fd;
2417 }
2418 
2419 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
2420 					      unsigned long addr, u32 *off,
2421 					      u32 *type)
2422 {
2423 	const struct bpf_map *map;
2424 	int i;
2425 
2426 	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
2427 		map = prog->aux->used_maps[i];
2428 		if (map == (void *)addr) {
2429 			*type = BPF_PSEUDO_MAP_FD;
2430 			return map;
2431 		}
2432 		if (!map->ops->map_direct_value_meta)
2433 			continue;
2434 		if (!map->ops->map_direct_value_meta(map, addr, off)) {
2435 			*type = BPF_PSEUDO_MAP_VALUE;
2436 			return map;
2437 		}
2438 	}
2439 
2440 	return NULL;
2441 }
2442 
2443 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
2444 {
2445 	const struct bpf_map *map;
2446 	struct bpf_insn *insns;
2447 	u32 off, type;
2448 	u64 imm;
2449 	int i;
2450 
2451 	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
2452 			GFP_USER);
2453 	if (!insns)
2454 		return insns;
2455 
2456 	for (i = 0; i < prog->len; i++) {
2457 		if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
2458 			insns[i].code = BPF_JMP | BPF_CALL;
2459 			insns[i].imm = BPF_FUNC_tail_call;
2460 			/* fall-through */
2461 		}
2462 		if (insns[i].code == (BPF_JMP | BPF_CALL) ||
2463 		    insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
2464 			if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
2465 				insns[i].code = BPF_JMP | BPF_CALL;
2466 			if (!bpf_dump_raw_ok())
2467 				insns[i].imm = 0;
2468 			continue;
2469 		}
2470 
2471 		if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
2472 			continue;
2473 
2474 		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
2475 		map = bpf_map_from_imm(prog, imm, &off, &type);
2476 		if (map) {
2477 			insns[i].src_reg = type;
2478 			insns[i].imm = map->id;
2479 			insns[i + 1].imm = off;
2480 			continue;
2481 		}
2482 	}
2483 
2484 	return insns;
2485 }
2486 
2487 static int set_info_rec_size(struct bpf_prog_info *info)
2488 {
2489 	/*
2490 	 * Ensure info.*_rec_size is the same as kernel expected size
2491 	 *
2492 	 * or
2493 	 *
2494 	 * Only allow zero *_rec_size if both _rec_size and _cnt are
2495 	 * zero.  In this case, the kernel will set the expected
2496 	 * _rec_size back to the info.
2497 	 */
2498 
2499 	if ((info->nr_func_info || info->func_info_rec_size) &&
2500 	    info->func_info_rec_size != sizeof(struct bpf_func_info))
2501 		return -EINVAL;
2502 
2503 	if ((info->nr_line_info || info->line_info_rec_size) &&
2504 	    info->line_info_rec_size != sizeof(struct bpf_line_info))
2505 		return -EINVAL;
2506 
2507 	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
2508 	    info->jited_line_info_rec_size != sizeof(__u64))
2509 		return -EINVAL;
2510 
2511 	info->func_info_rec_size = sizeof(struct bpf_func_info);
2512 	info->line_info_rec_size = sizeof(struct bpf_line_info);
2513 	info->jited_line_info_rec_size = sizeof(__u64);
2514 
2515 	return 0;
2516 }
2517 
2518 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2519 				   const union bpf_attr *attr,
2520 				   union bpf_attr __user *uattr)
2521 {
2522 	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
2523 	struct bpf_prog_info info = {};
2524 	u32 info_len = attr->info.info_len;
2525 	struct bpf_prog_stats stats;
2526 	char __user *uinsns;
2527 	u32 ulen;
2528 	int err;
2529 
2530 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
2531 	if (err)
2532 		return err;
2533 	info_len = min_t(u32, sizeof(info), info_len);
2534 
2535 	if (copy_from_user(&info, uinfo, info_len))
2536 		return -EFAULT;
2537 
2538 	info.type = prog->type;
2539 	info.id = prog->aux->id;
2540 	info.load_time = prog->aux->load_time;
2541 	info.created_by_uid = from_kuid_munged(current_user_ns(),
2542 					       prog->aux->user->uid);
2543 	info.gpl_compatible = prog->gpl_compatible;
2544 
2545 	memcpy(info.tag, prog->tag, sizeof(prog->tag));
2546 	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
2547 
2548 	ulen = info.nr_map_ids;
2549 	info.nr_map_ids = prog->aux->used_map_cnt;
2550 	ulen = min_t(u32, info.nr_map_ids, ulen);
2551 	if (ulen) {
2552 		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
2553 		u32 i;
2554 
2555 		for (i = 0; i < ulen; i++)
2556 			if (put_user(prog->aux->used_maps[i]->id,
2557 				     &user_map_ids[i]))
2558 				return -EFAULT;
2559 	}
2560 
2561 	err = set_info_rec_size(&info);
2562 	if (err)
2563 		return err;
2564 
2565 	bpf_prog_get_stats(prog, &stats);
2566 	info.run_time_ns = stats.nsecs;
2567 	info.run_cnt = stats.cnt;
2568 
2569 	if (!capable(CAP_SYS_ADMIN)) {
2570 		info.jited_prog_len = 0;
2571 		info.xlated_prog_len = 0;
2572 		info.nr_jited_ksyms = 0;
2573 		info.nr_jited_func_lens = 0;
2574 		info.nr_func_info = 0;
2575 		info.nr_line_info = 0;
2576 		info.nr_jited_line_info = 0;
2577 		goto done;
2578 	}
2579 
2580 	ulen = info.xlated_prog_len;
2581 	info.xlated_prog_len = bpf_prog_insn_size(prog);
2582 	if (info.xlated_prog_len && ulen) {
2583 		struct bpf_insn *insns_sanitized;
2584 		bool fault;
2585 
2586 		if (prog->blinded && !bpf_dump_raw_ok()) {
2587 			info.xlated_prog_insns = 0;
2588 			goto done;
2589 		}
2590 		insns_sanitized = bpf_insn_prepare_dump(prog);
2591 		if (!insns_sanitized)
2592 			return -ENOMEM;
2593 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
2594 		ulen = min_t(u32, info.xlated_prog_len, ulen);
2595 		fault = copy_to_user(uinsns, insns_sanitized, ulen);
2596 		kfree(insns_sanitized);
2597 		if (fault)
2598 			return -EFAULT;
2599 	}
2600 
2601 	if (bpf_prog_is_dev_bound(prog->aux)) {
2602 		err = bpf_prog_offload_info_fill(&info, prog);
2603 		if (err)
2604 			return err;
2605 		goto done;
2606 	}
2607 
2608 	/* NOTE: the following code is supposed to be skipped for offload.
2609 	 * bpf_prog_offload_info_fill() is the place to fill similar fields
2610 	 * for offload.
2611 	 */
2612 	ulen = info.jited_prog_len;
2613 	if (prog->aux->func_cnt) {
2614 		u32 i;
2615 
2616 		info.jited_prog_len = 0;
2617 		for (i = 0; i < prog->aux->func_cnt; i++)
2618 			info.jited_prog_len += prog->aux->func[i]->jited_len;
2619 	} else {
2620 		info.jited_prog_len = prog->jited_len;
2621 	}
2622 
2623 	if (info.jited_prog_len && ulen) {
2624 		if (bpf_dump_raw_ok()) {
2625 			uinsns = u64_to_user_ptr(info.jited_prog_insns);
2626 			ulen = min_t(u32, info.jited_prog_len, ulen);
2627 
2628 			/* for multi-function programs, copy the JITed
2629 			 * instructions for all the functions
2630 			 */
2631 			if (prog->aux->func_cnt) {
2632 				u32 len, free, i;
2633 				u8 *img;
2634 
2635 				free = ulen;
2636 				for (i = 0; i < prog->aux->func_cnt; i++) {
2637 					len = prog->aux->func[i]->jited_len;
2638 					len = min_t(u32, len, free);
2639 					img = (u8 *) prog->aux->func[i]->bpf_func;
2640 					if (copy_to_user(uinsns, img, len))
2641 						return -EFAULT;
2642 					uinsns += len;
2643 					free -= len;
2644 					if (!free)
2645 						break;
2646 				}
2647 			} else {
2648 				if (copy_to_user(uinsns, prog->bpf_func, ulen))
2649 					return -EFAULT;
2650 			}
2651 		} else {
2652 			info.jited_prog_insns = 0;
2653 		}
2654 	}
2655 
2656 	ulen = info.nr_jited_ksyms;
2657 	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
2658 	if (ulen) {
2659 		if (bpf_dump_raw_ok()) {
2660 			unsigned long ksym_addr;
2661 			u64 __user *user_ksyms;
2662 			u32 i;
2663 
2664 			/* copy the address of the kernel symbol
2665 			 * corresponding to each function
2666 			 */
2667 			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
2668 			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
2669 			if (prog->aux->func_cnt) {
2670 				for (i = 0; i < ulen; i++) {
2671 					ksym_addr = (unsigned long)
2672 						prog->aux->func[i]->bpf_func;
2673 					if (put_user((u64) ksym_addr,
2674 						     &user_ksyms[i]))
2675 						return -EFAULT;
2676 				}
2677 			} else {
2678 				ksym_addr = (unsigned long) prog->bpf_func;
2679 				if (put_user((u64) ksym_addr, &user_ksyms[0]))
2680 					return -EFAULT;
2681 			}
2682 		} else {
2683 			info.jited_ksyms = 0;
2684 		}
2685 	}
2686 
2687 	ulen = info.nr_jited_func_lens;
2688 	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
2689 	if (ulen) {
2690 		if (bpf_dump_raw_ok()) {
2691 			u32 __user *user_lens;
2692 			u32 func_len, i;
2693 
2694 			/* copy the JITed image lengths for each function */
2695 			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
2696 			user_lens = u64_to_user_ptr(info.jited_func_lens);
2697 			if (prog->aux->func_cnt) {
2698 				for (i = 0; i < ulen; i++) {
2699 					func_len =
2700 						prog->aux->func[i]->jited_len;
2701 					if (put_user(func_len, &user_lens[i]))
2702 						return -EFAULT;
2703 				}
2704 			} else {
2705 				func_len = prog->jited_len;
2706 				if (put_user(func_len, &user_lens[0]))
2707 					return -EFAULT;
2708 			}
2709 		} else {
2710 			info.jited_func_lens = 0;
2711 		}
2712 	}
2713 
2714 	if (prog->aux->btf)
2715 		info.btf_id = btf_id(prog->aux->btf);
2716 
2717 	ulen = info.nr_func_info;
2718 	info.nr_func_info = prog->aux->func_info_cnt;
2719 	if (info.nr_func_info && ulen) {
2720 		char __user *user_finfo;
2721 
2722 		user_finfo = u64_to_user_ptr(info.func_info);
2723 		ulen = min_t(u32, info.nr_func_info, ulen);
2724 		if (copy_to_user(user_finfo, prog->aux->func_info,
2725 				 info.func_info_rec_size * ulen))
2726 			return -EFAULT;
2727 	}
2728 
2729 	ulen = info.nr_line_info;
2730 	info.nr_line_info = prog->aux->nr_linfo;
2731 	if (info.nr_line_info && ulen) {
2732 		__u8 __user *user_linfo;
2733 
2734 		user_linfo = u64_to_user_ptr(info.line_info);
2735 		ulen = min_t(u32, info.nr_line_info, ulen);
2736 		if (copy_to_user(user_linfo, prog->aux->linfo,
2737 				 info.line_info_rec_size * ulen))
2738 			return -EFAULT;
2739 	}
2740 
2741 	ulen = info.nr_jited_line_info;
2742 	if (prog->aux->jited_linfo)
2743 		info.nr_jited_line_info = prog->aux->nr_linfo;
2744 	else
2745 		info.nr_jited_line_info = 0;
2746 	if (info.nr_jited_line_info && ulen) {
2747 		if (bpf_dump_raw_ok()) {
2748 			__u64 __user *user_linfo;
2749 			u32 i;
2750 
2751 			user_linfo = u64_to_user_ptr(info.jited_line_info);
2752 			ulen = min_t(u32, info.nr_jited_line_info, ulen);
2753 			for (i = 0; i < ulen; i++) {
2754 				if (put_user((__u64)(long)prog->aux->jited_linfo[i],
2755 					     &user_linfo[i]))
2756 					return -EFAULT;
2757 			}
2758 		} else {
2759 			info.jited_line_info = 0;
2760 		}
2761 	}
2762 
2763 	ulen = info.nr_prog_tags;
2764 	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
2765 	if (ulen) {
2766 		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
2767 		u32 i;
2768 
2769 		user_prog_tags = u64_to_user_ptr(info.prog_tags);
2770 		ulen = min_t(u32, info.nr_prog_tags, ulen);
2771 		if (prog->aux->func_cnt) {
2772 			for (i = 0; i < ulen; i++) {
2773 				if (copy_to_user(user_prog_tags[i],
2774 						 prog->aux->func[i]->tag,
2775 						 BPF_TAG_SIZE))
2776 					return -EFAULT;
2777 			}
2778 		} else {
2779 			if (copy_to_user(user_prog_tags[0],
2780 					 prog->tag, BPF_TAG_SIZE))
2781 				return -EFAULT;
2782 		}
2783 	}
2784 
2785 done:
2786 	if (copy_to_user(uinfo, &info, info_len) ||
2787 	    put_user(info_len, &uattr->info.info_len))
2788 		return -EFAULT;
2789 
2790 	return 0;
2791 }
2792 
2793 static int bpf_map_get_info_by_fd(struct bpf_map *map,
2794 				  const union bpf_attr *attr,
2795 				  union bpf_attr __user *uattr)
2796 {
2797 	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
2798 	struct bpf_map_info info = {};
2799 	u32 info_len = attr->info.info_len;
2800 	int err;
2801 
2802 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
2803 	if (err)
2804 		return err;
2805 	info_len = min_t(u32, sizeof(info), info_len);
2806 
2807 	info.type = map->map_type;
2808 	info.id = map->id;
2809 	info.key_size = map->key_size;
2810 	info.value_size = map->value_size;
2811 	info.max_entries = map->max_entries;
2812 	info.map_flags = map->map_flags;
2813 	memcpy(info.name, map->name, sizeof(map->name));
2814 
2815 	if (map->btf) {
2816 		info.btf_id = btf_id(map->btf);
2817 		info.btf_key_type_id = map->btf_key_type_id;
2818 		info.btf_value_type_id = map->btf_value_type_id;
2819 	}
2820 
2821 	if (bpf_map_is_dev_bound(map)) {
2822 		err = bpf_map_offload_info_fill(&info, map);
2823 		if (err)
2824 			return err;
2825 	}
2826 
2827 	if (copy_to_user(uinfo, &info, info_len) ||
2828 	    put_user(info_len, &uattr->info.info_len))
2829 		return -EFAULT;
2830 
2831 	return 0;
2832 }
2833 
2834 static int bpf_btf_get_info_by_fd(struct btf *btf,
2835 				  const union bpf_attr *attr,
2836 				  union bpf_attr __user *uattr)
2837 {
2838 	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
2839 	u32 info_len = attr->info.info_len;
2840 	int err;
2841 
2842 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
2843 	if (err)
2844 		return err;
2845 
2846 	return btf_get_info_by_fd(btf, attr, uattr);
2847 }
2848 
2849 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
2850 
2851 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
2852 				  union bpf_attr __user *uattr)
2853 {
2854 	int ufd = attr->info.bpf_fd;
2855 	struct fd f;
2856 	int err;
2857 
2858 	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
2859 		return -EINVAL;
2860 
2861 	f = fdget(ufd);
2862 	if (!f.file)
2863 		return -EBADFD;
2864 
2865 	if (f.file->f_op == &bpf_prog_fops)
2866 		err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
2867 					      uattr);
2868 	else if (f.file->f_op == &bpf_map_fops)
2869 		err = bpf_map_get_info_by_fd(f.file->private_data, attr,
2870 					     uattr);
2871 	else if (f.file->f_op == &btf_fops)
2872 		err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
2873 	else
2874 		err = -EINVAL;
2875 
2876 	fdput(f);
2877 	return err;
2878 }
2879 
2880 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
2881 
2882 static int bpf_btf_load(const union bpf_attr *attr)
2883 {
2884 	if (CHECK_ATTR(BPF_BTF_LOAD))
2885 		return -EINVAL;
2886 
2887 	if (!capable(CAP_SYS_ADMIN))
2888 		return -EPERM;
2889 
2890 	return btf_new_fd(attr);
2891 }
2892 
2893 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
2894 
2895 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
2896 {
2897 	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
2898 		return -EINVAL;
2899 
2900 	if (!capable(CAP_SYS_ADMIN))
2901 		return -EPERM;
2902 
2903 	return btf_get_fd_by_id(attr->btf_id);
2904 }
2905 
2906 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
2907 				    union bpf_attr __user *uattr,
2908 				    u32 prog_id, u32 fd_type,
2909 				    const char *buf, u64 probe_offset,
2910 				    u64 probe_addr)
2911 {
2912 	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
2913 	u32 len = buf ? strlen(buf) : 0, input_len;
2914 	int err = 0;
2915 
2916 	if (put_user(len, &uattr->task_fd_query.buf_len))
2917 		return -EFAULT;
2918 	input_len = attr->task_fd_query.buf_len;
2919 	if (input_len && ubuf) {
2920 		if (!len) {
2921 			/* nothing to copy, just make ubuf NULL terminated */
2922 			char zero = '\0';
2923 
2924 			if (put_user(zero, ubuf))
2925 				return -EFAULT;
2926 		} else if (input_len >= len + 1) {
2927 			/* ubuf can hold the string with NULL terminator */
2928 			if (copy_to_user(ubuf, buf, len + 1))
2929 				return -EFAULT;
2930 		} else {
2931 			/* ubuf cannot hold the string with NULL terminator,
2932 			 * do a partial copy with NULL terminator.
2933 			 */
2934 			char zero = '\0';
2935 
2936 			err = -ENOSPC;
2937 			if (copy_to_user(ubuf, buf, input_len - 1))
2938 				return -EFAULT;
2939 			if (put_user(zero, ubuf + input_len - 1))
2940 				return -EFAULT;
2941 		}
2942 	}
2943 
2944 	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
2945 	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
2946 	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
2947 	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
2948 		return -EFAULT;
2949 
2950 	return err;
2951 }
2952 
2953 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
2954 
2955 static int bpf_task_fd_query(const union bpf_attr *attr,
2956 			     union bpf_attr __user *uattr)
2957 {
2958 	pid_t pid = attr->task_fd_query.pid;
2959 	u32 fd = attr->task_fd_query.fd;
2960 	const struct perf_event *event;
2961 	struct files_struct *files;
2962 	struct task_struct *task;
2963 	struct file *file;
2964 	int err;
2965 
2966 	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
2967 		return -EINVAL;
2968 
2969 	if (!capable(CAP_SYS_ADMIN))
2970 		return -EPERM;
2971 
2972 	if (attr->task_fd_query.flags != 0)
2973 		return -EINVAL;
2974 
2975 	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
2976 	if (!task)
2977 		return -ENOENT;
2978 
2979 	files = get_files_struct(task);
2980 	put_task_struct(task);
2981 	if (!files)
2982 		return -ENOENT;
2983 
2984 	err = 0;
2985 	spin_lock(&files->file_lock);
2986 	file = fcheck_files(files, fd);
2987 	if (!file)
2988 		err = -EBADF;
2989 	else
2990 		get_file(file);
2991 	spin_unlock(&files->file_lock);
2992 	put_files_struct(files);
2993 
2994 	if (err)
2995 		goto out;
2996 
2997 	if (file->f_op == &bpf_raw_tp_fops) {
2998 		struct bpf_raw_tracepoint *raw_tp = file->private_data;
2999 		struct bpf_raw_event_map *btp = raw_tp->btp;
3000 
3001 		err = bpf_task_fd_query_copy(attr, uattr,
3002 					     raw_tp->prog->aux->id,
3003 					     BPF_FD_TYPE_RAW_TRACEPOINT,
3004 					     btp->tp->name, 0, 0);
3005 		goto put_file;
3006 	}
3007 
3008 	event = perf_get_event(file);
3009 	if (!IS_ERR(event)) {
3010 		u64 probe_offset, probe_addr;
3011 		u32 prog_id, fd_type;
3012 		const char *buf;
3013 
3014 		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
3015 					      &buf, &probe_offset,
3016 					      &probe_addr);
3017 		if (!err)
3018 			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
3019 						     fd_type, buf,
3020 						     probe_offset,
3021 						     probe_addr);
3022 		goto put_file;
3023 	}
3024 
3025 	err = -ENOTSUPP;
3026 put_file:
3027 	fput(file);
3028 out:
3029 	return err;
3030 }
3031 
3032 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
3033 {
3034 	union bpf_attr attr = {};
3035 	int err;
3036 
3037 	if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
3038 		return -EPERM;
3039 
3040 	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
3041 	if (err)
3042 		return err;
3043 	size = min_t(u32, size, sizeof(attr));
3044 
3045 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
3046 	if (copy_from_user(&attr, uattr, size) != 0)
3047 		return -EFAULT;
3048 
3049 	err = security_bpf(cmd, &attr, size);
3050 	if (err < 0)
3051 		return err;
3052 
3053 	switch (cmd) {
3054 	case BPF_MAP_CREATE:
3055 		err = map_create(&attr);
3056 		break;
3057 	case BPF_MAP_LOOKUP_ELEM:
3058 		err = map_lookup_elem(&attr);
3059 		break;
3060 	case BPF_MAP_UPDATE_ELEM:
3061 		err = map_update_elem(&attr);
3062 		break;
3063 	case BPF_MAP_DELETE_ELEM:
3064 		err = map_delete_elem(&attr);
3065 		break;
3066 	case BPF_MAP_GET_NEXT_KEY:
3067 		err = map_get_next_key(&attr);
3068 		break;
3069 	case BPF_MAP_FREEZE:
3070 		err = map_freeze(&attr);
3071 		break;
3072 	case BPF_PROG_LOAD:
3073 		err = bpf_prog_load(&attr, uattr);
3074 		break;
3075 	case BPF_OBJ_PIN:
3076 		err = bpf_obj_pin(&attr);
3077 		break;
3078 	case BPF_OBJ_GET:
3079 		err = bpf_obj_get(&attr);
3080 		break;
3081 	case BPF_PROG_ATTACH:
3082 		err = bpf_prog_attach(&attr);
3083 		break;
3084 	case BPF_PROG_DETACH:
3085 		err = bpf_prog_detach(&attr);
3086 		break;
3087 	case BPF_PROG_QUERY:
3088 		err = bpf_prog_query(&attr, uattr);
3089 		break;
3090 	case BPF_PROG_TEST_RUN:
3091 		err = bpf_prog_test_run(&attr, uattr);
3092 		break;
3093 	case BPF_PROG_GET_NEXT_ID:
3094 		err = bpf_obj_get_next_id(&attr, uattr,
3095 					  &prog_idr, &prog_idr_lock);
3096 		break;
3097 	case BPF_MAP_GET_NEXT_ID:
3098 		err = bpf_obj_get_next_id(&attr, uattr,
3099 					  &map_idr, &map_idr_lock);
3100 		break;
3101 	case BPF_BTF_GET_NEXT_ID:
3102 		err = bpf_obj_get_next_id(&attr, uattr,
3103 					  &btf_idr, &btf_idr_lock);
3104 		break;
3105 	case BPF_PROG_GET_FD_BY_ID:
3106 		err = bpf_prog_get_fd_by_id(&attr);
3107 		break;
3108 	case BPF_MAP_GET_FD_BY_ID:
3109 		err = bpf_map_get_fd_by_id(&attr);
3110 		break;
3111 	case BPF_OBJ_GET_INFO_BY_FD:
3112 		err = bpf_obj_get_info_by_fd(&attr, uattr);
3113 		break;
3114 	case BPF_RAW_TRACEPOINT_OPEN:
3115 		err = bpf_raw_tracepoint_open(&attr);
3116 		break;
3117 	case BPF_BTF_LOAD:
3118 		err = bpf_btf_load(&attr);
3119 		break;
3120 	case BPF_BTF_GET_FD_BY_ID:
3121 		err = bpf_btf_get_fd_by_id(&attr);
3122 		break;
3123 	case BPF_TASK_FD_QUERY:
3124 		err = bpf_task_fd_query(&attr, uattr);
3125 		break;
3126 	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
3127 		err = map_lookup_and_delete_elem(&attr);
3128 		break;
3129 	default:
3130 		err = -EINVAL;
3131 		break;
3132 	}
3133 
3134 	return err;
3135 }
3136