xref: /linux/kernel/bpf/syscall.c (revision c411ed854584a71b0e86ac3019b60e4789d88086)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 #include <linux/bpf.h>
13 #include <linux/bpf_trace.h>
14 #include <linux/syscalls.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mmzone.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/file.h>
21 #include <linux/license.h>
22 #include <linux/filter.h>
23 #include <linux/version.h>
24 #include <linux/kernel.h>
25 #include <linux/idr.h>
26 
27 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
28 			   (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
29 			   (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
30 			   (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
31 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
32 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
33 
34 DEFINE_PER_CPU(int, bpf_prog_active);
35 static DEFINE_IDR(prog_idr);
36 static DEFINE_SPINLOCK(prog_idr_lock);
37 static DEFINE_IDR(map_idr);
38 static DEFINE_SPINLOCK(map_idr_lock);
39 
40 int sysctl_unprivileged_bpf_disabled __read_mostly;
41 
42 static const struct bpf_map_ops * const bpf_map_types[] = {
43 #define BPF_PROG_TYPE(_id, _ops)
44 #define BPF_MAP_TYPE(_id, _ops) \
45 	[_id] = &_ops,
46 #include <linux/bpf_types.h>
47 #undef BPF_PROG_TYPE
48 #undef BPF_MAP_TYPE
49 };
50 
51 /*
52  * If we're handed a bigger struct than we know of, ensure all the unknown bits
53  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
54  * we don't know about yet.
55  *
56  * There is a ToCToU between this function call and the following
57  * copy_from_user() call. However, this is not a concern since this function is
58  * meant to be a future-proofing of bits.
59  */
60 static int check_uarg_tail_zero(void __user *uaddr,
61 				size_t expected_size,
62 				size_t actual_size)
63 {
64 	unsigned char __user *addr;
65 	unsigned char __user *end;
66 	unsigned char val;
67 	int err;
68 
69 	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
70 		return -E2BIG;
71 
72 	if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size)))
73 		return -EFAULT;
74 
75 	if (actual_size <= expected_size)
76 		return 0;
77 
78 	addr = uaddr + expected_size;
79 	end  = uaddr + actual_size;
80 
81 	for (; addr < end; addr++) {
82 		err = get_user(val, addr);
83 		if (err)
84 			return err;
85 		if (val)
86 			return -E2BIG;
87 	}
88 
89 	return 0;
90 }
91 
92 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
93 {
94 	struct bpf_map *map;
95 
96 	if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
97 	    !bpf_map_types[attr->map_type])
98 		return ERR_PTR(-EINVAL);
99 
100 	map = bpf_map_types[attr->map_type]->map_alloc(attr);
101 	if (IS_ERR(map))
102 		return map;
103 	map->ops = bpf_map_types[attr->map_type];
104 	map->map_type = attr->map_type;
105 	return map;
106 }
107 
108 void *bpf_map_area_alloc(size_t size, int numa_node)
109 {
110 	/* We definitely need __GFP_NORETRY, so OOM killer doesn't
111 	 * trigger under memory pressure as we really just want to
112 	 * fail instead.
113 	 */
114 	const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
115 	void *area;
116 
117 	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
118 		area = kmalloc_node(size, GFP_USER | flags, numa_node);
119 		if (area != NULL)
120 			return area;
121 	}
122 
123 	return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
124 					   __builtin_return_address(0));
125 }
126 
127 void bpf_map_area_free(void *area)
128 {
129 	kvfree(area);
130 }
131 
132 int bpf_map_precharge_memlock(u32 pages)
133 {
134 	struct user_struct *user = get_current_user();
135 	unsigned long memlock_limit, cur;
136 
137 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
138 	cur = atomic_long_read(&user->locked_vm);
139 	free_uid(user);
140 	if (cur + pages > memlock_limit)
141 		return -EPERM;
142 	return 0;
143 }
144 
145 static int bpf_map_charge_memlock(struct bpf_map *map)
146 {
147 	struct user_struct *user = get_current_user();
148 	unsigned long memlock_limit;
149 
150 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
151 
152 	atomic_long_add(map->pages, &user->locked_vm);
153 
154 	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
155 		atomic_long_sub(map->pages, &user->locked_vm);
156 		free_uid(user);
157 		return -EPERM;
158 	}
159 	map->user = user;
160 	return 0;
161 }
162 
163 static void bpf_map_uncharge_memlock(struct bpf_map *map)
164 {
165 	struct user_struct *user = map->user;
166 
167 	atomic_long_sub(map->pages, &user->locked_vm);
168 	free_uid(user);
169 }
170 
171 static int bpf_map_alloc_id(struct bpf_map *map)
172 {
173 	int id;
174 
175 	spin_lock_bh(&map_idr_lock);
176 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
177 	if (id > 0)
178 		map->id = id;
179 	spin_unlock_bh(&map_idr_lock);
180 
181 	if (WARN_ON_ONCE(!id))
182 		return -ENOSPC;
183 
184 	return id > 0 ? 0 : id;
185 }
186 
187 static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
188 {
189 	if (do_idr_lock)
190 		spin_lock_bh(&map_idr_lock);
191 	else
192 		__acquire(&map_idr_lock);
193 
194 	idr_remove(&map_idr, map->id);
195 
196 	if (do_idr_lock)
197 		spin_unlock_bh(&map_idr_lock);
198 	else
199 		__release(&map_idr_lock);
200 }
201 
202 /* called from workqueue */
203 static void bpf_map_free_deferred(struct work_struct *work)
204 {
205 	struct bpf_map *map = container_of(work, struct bpf_map, work);
206 
207 	bpf_map_uncharge_memlock(map);
208 	/* implementation dependent freeing */
209 	map->ops->map_free(map);
210 }
211 
212 static void bpf_map_put_uref(struct bpf_map *map)
213 {
214 	if (atomic_dec_and_test(&map->usercnt)) {
215 		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
216 			bpf_fd_array_map_clear(map);
217 	}
218 }
219 
220 /* decrement map refcnt and schedule it for freeing via workqueue
221  * (unrelying map implementation ops->map_free() might sleep)
222  */
223 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
224 {
225 	if (atomic_dec_and_test(&map->refcnt)) {
226 		/* bpf_map_free_id() must be called first */
227 		bpf_map_free_id(map, do_idr_lock);
228 		INIT_WORK(&map->work, bpf_map_free_deferred);
229 		schedule_work(&map->work);
230 	}
231 }
232 
233 void bpf_map_put(struct bpf_map *map)
234 {
235 	__bpf_map_put(map, true);
236 }
237 
238 void bpf_map_put_with_uref(struct bpf_map *map)
239 {
240 	bpf_map_put_uref(map);
241 	bpf_map_put(map);
242 }
243 
244 static int bpf_map_release(struct inode *inode, struct file *filp)
245 {
246 	struct bpf_map *map = filp->private_data;
247 
248 	if (map->ops->map_release)
249 		map->ops->map_release(map, filp);
250 
251 	bpf_map_put_with_uref(map);
252 	return 0;
253 }
254 
255 #ifdef CONFIG_PROC_FS
256 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
257 {
258 	const struct bpf_map *map = filp->private_data;
259 	const struct bpf_array *array;
260 	u32 owner_prog_type = 0;
261 	u32 owner_jited = 0;
262 
263 	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
264 		array = container_of(map, struct bpf_array, map);
265 		owner_prog_type = array->owner_prog_type;
266 		owner_jited = array->owner_jited;
267 	}
268 
269 	seq_printf(m,
270 		   "map_type:\t%u\n"
271 		   "key_size:\t%u\n"
272 		   "value_size:\t%u\n"
273 		   "max_entries:\t%u\n"
274 		   "map_flags:\t%#x\n"
275 		   "memlock:\t%llu\n",
276 		   map->map_type,
277 		   map->key_size,
278 		   map->value_size,
279 		   map->max_entries,
280 		   map->map_flags,
281 		   map->pages * 1ULL << PAGE_SHIFT);
282 
283 	if (owner_prog_type) {
284 		seq_printf(m, "owner_prog_type:\t%u\n",
285 			   owner_prog_type);
286 		seq_printf(m, "owner_jited:\t%u\n",
287 			   owner_jited);
288 	}
289 }
290 #endif
291 
292 static const struct file_operations bpf_map_fops = {
293 #ifdef CONFIG_PROC_FS
294 	.show_fdinfo	= bpf_map_show_fdinfo,
295 #endif
296 	.release	= bpf_map_release,
297 };
298 
299 int bpf_map_new_fd(struct bpf_map *map)
300 {
301 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
302 				O_RDWR | O_CLOEXEC);
303 }
304 
305 /* helper macro to check that unused fields 'union bpf_attr' are zero */
306 #define CHECK_ATTR(CMD) \
307 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
308 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
309 		   sizeof(*attr) - \
310 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
311 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
312 
313 #define BPF_MAP_CREATE_LAST_FIELD numa_node
314 /* called via syscall */
315 static int map_create(union bpf_attr *attr)
316 {
317 	int numa_node = bpf_map_attr_numa_node(attr);
318 	struct bpf_map *map;
319 	int err;
320 
321 	err = CHECK_ATTR(BPF_MAP_CREATE);
322 	if (err)
323 		return -EINVAL;
324 
325 	if (numa_node != NUMA_NO_NODE &&
326 	    (numa_node >= nr_node_ids || !node_online(numa_node)))
327 		return -EINVAL;
328 
329 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
330 	map = find_and_alloc_map(attr);
331 	if (IS_ERR(map))
332 		return PTR_ERR(map);
333 
334 	atomic_set(&map->refcnt, 1);
335 	atomic_set(&map->usercnt, 1);
336 
337 	err = bpf_map_charge_memlock(map);
338 	if (err)
339 		goto free_map_nouncharge;
340 
341 	err = bpf_map_alloc_id(map);
342 	if (err)
343 		goto free_map;
344 
345 	err = bpf_map_new_fd(map);
346 	if (err < 0) {
347 		/* failed to allocate fd.
348 		 * bpf_map_put() is needed because the above
349 		 * bpf_map_alloc_id() has published the map
350 		 * to the userspace and the userspace may
351 		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
352 		 */
353 		bpf_map_put(map);
354 		return err;
355 	}
356 
357 	trace_bpf_map_create(map, err);
358 	return err;
359 
360 free_map:
361 	bpf_map_uncharge_memlock(map);
362 free_map_nouncharge:
363 	map->ops->map_free(map);
364 	return err;
365 }
366 
367 /* if error is returned, fd is released.
368  * On success caller should complete fd access with matching fdput()
369  */
370 struct bpf_map *__bpf_map_get(struct fd f)
371 {
372 	if (!f.file)
373 		return ERR_PTR(-EBADF);
374 	if (f.file->f_op != &bpf_map_fops) {
375 		fdput(f);
376 		return ERR_PTR(-EINVAL);
377 	}
378 
379 	return f.file->private_data;
380 }
381 
382 /* prog's and map's refcnt limit */
383 #define BPF_MAX_REFCNT 32768
384 
385 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
386 {
387 	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
388 		atomic_dec(&map->refcnt);
389 		return ERR_PTR(-EBUSY);
390 	}
391 	if (uref)
392 		atomic_inc(&map->usercnt);
393 	return map;
394 }
395 
396 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
397 {
398 	struct fd f = fdget(ufd);
399 	struct bpf_map *map;
400 
401 	map = __bpf_map_get(f);
402 	if (IS_ERR(map))
403 		return map;
404 
405 	map = bpf_map_inc(map, true);
406 	fdput(f);
407 
408 	return map;
409 }
410 
411 /* map_idr_lock should have been held */
412 static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
413 					    bool uref)
414 {
415 	int refold;
416 
417 	refold = __atomic_add_unless(&map->refcnt, 1, 0);
418 
419 	if (refold >= BPF_MAX_REFCNT) {
420 		__bpf_map_put(map, false);
421 		return ERR_PTR(-EBUSY);
422 	}
423 
424 	if (!refold)
425 		return ERR_PTR(-ENOENT);
426 
427 	if (uref)
428 		atomic_inc(&map->usercnt);
429 
430 	return map;
431 }
432 
433 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
434 {
435 	return -ENOTSUPP;
436 }
437 
438 /* last field in 'union bpf_attr' used by this command */
439 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
440 
441 static int map_lookup_elem(union bpf_attr *attr)
442 {
443 	void __user *ukey = u64_to_user_ptr(attr->key);
444 	void __user *uvalue = u64_to_user_ptr(attr->value);
445 	int ufd = attr->map_fd;
446 	struct bpf_map *map;
447 	void *key, *value, *ptr;
448 	u32 value_size;
449 	struct fd f;
450 	int err;
451 
452 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
453 		return -EINVAL;
454 
455 	f = fdget(ufd);
456 	map = __bpf_map_get(f);
457 	if (IS_ERR(map))
458 		return PTR_ERR(map);
459 
460 	key = memdup_user(ukey, map->key_size);
461 	if (IS_ERR(key)) {
462 		err = PTR_ERR(key);
463 		goto err_put;
464 	}
465 
466 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
467 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
468 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
469 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
470 	else if (IS_FD_MAP(map))
471 		value_size = sizeof(u32);
472 	else
473 		value_size = map->value_size;
474 
475 	err = -ENOMEM;
476 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
477 	if (!value)
478 		goto free_key;
479 
480 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
481 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
482 		err = bpf_percpu_hash_copy(map, key, value);
483 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
484 		err = bpf_percpu_array_copy(map, key, value);
485 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
486 		err = bpf_stackmap_copy(map, key, value);
487 	} else if (IS_FD_ARRAY(map)) {
488 		err = bpf_fd_array_map_lookup_elem(map, key, value);
489 	} else if (IS_FD_HASH(map)) {
490 		err = bpf_fd_htab_map_lookup_elem(map, key, value);
491 	} else {
492 		rcu_read_lock();
493 		ptr = map->ops->map_lookup_elem(map, key);
494 		if (ptr)
495 			memcpy(value, ptr, value_size);
496 		rcu_read_unlock();
497 		err = ptr ? 0 : -ENOENT;
498 	}
499 
500 	if (err)
501 		goto free_value;
502 
503 	err = -EFAULT;
504 	if (copy_to_user(uvalue, value, value_size) != 0)
505 		goto free_value;
506 
507 	trace_bpf_map_lookup_elem(map, ufd, key, value);
508 	err = 0;
509 
510 free_value:
511 	kfree(value);
512 free_key:
513 	kfree(key);
514 err_put:
515 	fdput(f);
516 	return err;
517 }
518 
519 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
520 
521 static int map_update_elem(union bpf_attr *attr)
522 {
523 	void __user *ukey = u64_to_user_ptr(attr->key);
524 	void __user *uvalue = u64_to_user_ptr(attr->value);
525 	int ufd = attr->map_fd;
526 	struct bpf_map *map;
527 	void *key, *value;
528 	u32 value_size;
529 	struct fd f;
530 	int err;
531 
532 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
533 		return -EINVAL;
534 
535 	f = fdget(ufd);
536 	map = __bpf_map_get(f);
537 	if (IS_ERR(map))
538 		return PTR_ERR(map);
539 
540 	key = memdup_user(ukey, map->key_size);
541 	if (IS_ERR(key)) {
542 		err = PTR_ERR(key);
543 		goto err_put;
544 	}
545 
546 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
547 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
548 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
549 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
550 	else
551 		value_size = map->value_size;
552 
553 	err = -ENOMEM;
554 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
555 	if (!value)
556 		goto free_key;
557 
558 	err = -EFAULT;
559 	if (copy_from_user(value, uvalue, value_size) != 0)
560 		goto free_value;
561 
562 	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
563 	 * inside bpf map update or delete otherwise deadlocks are possible
564 	 */
565 	preempt_disable();
566 	__this_cpu_inc(bpf_prog_active);
567 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
568 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
569 		err = bpf_percpu_hash_update(map, key, value, attr->flags);
570 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
571 		err = bpf_percpu_array_update(map, key, value, attr->flags);
572 	} else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
573 		   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
574 		   map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
575 		   map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
576 		rcu_read_lock();
577 		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
578 						   attr->flags);
579 		rcu_read_unlock();
580 	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
581 		rcu_read_lock();
582 		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
583 						  attr->flags);
584 		rcu_read_unlock();
585 	} else {
586 		rcu_read_lock();
587 		err = map->ops->map_update_elem(map, key, value, attr->flags);
588 		rcu_read_unlock();
589 	}
590 	__this_cpu_dec(bpf_prog_active);
591 	preempt_enable();
592 
593 	if (!err)
594 		trace_bpf_map_update_elem(map, ufd, key, value);
595 free_value:
596 	kfree(value);
597 free_key:
598 	kfree(key);
599 err_put:
600 	fdput(f);
601 	return err;
602 }
603 
604 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
605 
606 static int map_delete_elem(union bpf_attr *attr)
607 {
608 	void __user *ukey = u64_to_user_ptr(attr->key);
609 	int ufd = attr->map_fd;
610 	struct bpf_map *map;
611 	struct fd f;
612 	void *key;
613 	int err;
614 
615 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
616 		return -EINVAL;
617 
618 	f = fdget(ufd);
619 	map = __bpf_map_get(f);
620 	if (IS_ERR(map))
621 		return PTR_ERR(map);
622 
623 	key = memdup_user(ukey, map->key_size);
624 	if (IS_ERR(key)) {
625 		err = PTR_ERR(key);
626 		goto err_put;
627 	}
628 
629 	preempt_disable();
630 	__this_cpu_inc(bpf_prog_active);
631 	rcu_read_lock();
632 	err = map->ops->map_delete_elem(map, key);
633 	rcu_read_unlock();
634 	__this_cpu_dec(bpf_prog_active);
635 	preempt_enable();
636 
637 	if (!err)
638 		trace_bpf_map_delete_elem(map, ufd, key);
639 	kfree(key);
640 err_put:
641 	fdput(f);
642 	return err;
643 }
644 
645 /* last field in 'union bpf_attr' used by this command */
646 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
647 
648 static int map_get_next_key(union bpf_attr *attr)
649 {
650 	void __user *ukey = u64_to_user_ptr(attr->key);
651 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
652 	int ufd = attr->map_fd;
653 	struct bpf_map *map;
654 	void *key, *next_key;
655 	struct fd f;
656 	int err;
657 
658 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
659 		return -EINVAL;
660 
661 	f = fdget(ufd);
662 	map = __bpf_map_get(f);
663 	if (IS_ERR(map))
664 		return PTR_ERR(map);
665 
666 	if (ukey) {
667 		key = memdup_user(ukey, map->key_size);
668 		if (IS_ERR(key)) {
669 			err = PTR_ERR(key);
670 			goto err_put;
671 		}
672 	} else {
673 		key = NULL;
674 	}
675 
676 	err = -ENOMEM;
677 	next_key = kmalloc(map->key_size, GFP_USER);
678 	if (!next_key)
679 		goto free_key;
680 
681 	rcu_read_lock();
682 	err = map->ops->map_get_next_key(map, key, next_key);
683 	rcu_read_unlock();
684 	if (err)
685 		goto free_next_key;
686 
687 	err = -EFAULT;
688 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
689 		goto free_next_key;
690 
691 	trace_bpf_map_next_key(map, ufd, key, next_key);
692 	err = 0;
693 
694 free_next_key:
695 	kfree(next_key);
696 free_key:
697 	kfree(key);
698 err_put:
699 	fdput(f);
700 	return err;
701 }
702 
703 static const struct bpf_verifier_ops * const bpf_prog_types[] = {
704 #define BPF_PROG_TYPE(_id, _ops) \
705 	[_id] = &_ops,
706 #define BPF_MAP_TYPE(_id, _ops)
707 #include <linux/bpf_types.h>
708 #undef BPF_PROG_TYPE
709 #undef BPF_MAP_TYPE
710 };
711 
712 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
713 {
714 	if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
715 		return -EINVAL;
716 
717 	prog->aux->ops = bpf_prog_types[type];
718 	prog->type = type;
719 	return 0;
720 }
721 
722 /* drop refcnt on maps used by eBPF program and free auxilary data */
723 static void free_used_maps(struct bpf_prog_aux *aux)
724 {
725 	int i;
726 
727 	for (i = 0; i < aux->used_map_cnt; i++)
728 		bpf_map_put(aux->used_maps[i]);
729 
730 	kfree(aux->used_maps);
731 }
732 
733 int __bpf_prog_charge(struct user_struct *user, u32 pages)
734 {
735 	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
736 	unsigned long user_bufs;
737 
738 	if (user) {
739 		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
740 		if (user_bufs > memlock_limit) {
741 			atomic_long_sub(pages, &user->locked_vm);
742 			return -EPERM;
743 		}
744 	}
745 
746 	return 0;
747 }
748 
749 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
750 {
751 	if (user)
752 		atomic_long_sub(pages, &user->locked_vm);
753 }
754 
755 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
756 {
757 	struct user_struct *user = get_current_user();
758 	int ret;
759 
760 	ret = __bpf_prog_charge(user, prog->pages);
761 	if (ret) {
762 		free_uid(user);
763 		return ret;
764 	}
765 
766 	prog->aux->user = user;
767 	return 0;
768 }
769 
770 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
771 {
772 	struct user_struct *user = prog->aux->user;
773 
774 	__bpf_prog_uncharge(user, prog->pages);
775 	free_uid(user);
776 }
777 
778 static int bpf_prog_alloc_id(struct bpf_prog *prog)
779 {
780 	int id;
781 
782 	spin_lock_bh(&prog_idr_lock);
783 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
784 	if (id > 0)
785 		prog->aux->id = id;
786 	spin_unlock_bh(&prog_idr_lock);
787 
788 	/* id is in [1, INT_MAX) */
789 	if (WARN_ON_ONCE(!id))
790 		return -ENOSPC;
791 
792 	return id > 0 ? 0 : id;
793 }
794 
795 static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
796 {
797 	/* cBPF to eBPF migrations are currently not in the idr store. */
798 	if (!prog->aux->id)
799 		return;
800 
801 	if (do_idr_lock)
802 		spin_lock_bh(&prog_idr_lock);
803 	else
804 		__acquire(&prog_idr_lock);
805 
806 	idr_remove(&prog_idr, prog->aux->id);
807 
808 	if (do_idr_lock)
809 		spin_unlock_bh(&prog_idr_lock);
810 	else
811 		__release(&prog_idr_lock);
812 }
813 
814 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
815 {
816 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
817 
818 	free_used_maps(aux);
819 	bpf_prog_uncharge_memlock(aux->prog);
820 	bpf_prog_free(aux->prog);
821 }
822 
823 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
824 {
825 	if (atomic_dec_and_test(&prog->aux->refcnt)) {
826 		trace_bpf_prog_put_rcu(prog);
827 		/* bpf_prog_free_id() must be called first */
828 		bpf_prog_free_id(prog, do_idr_lock);
829 		bpf_prog_kallsyms_del(prog);
830 		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
831 	}
832 }
833 
834 void bpf_prog_put(struct bpf_prog *prog)
835 {
836 	__bpf_prog_put(prog, true);
837 }
838 EXPORT_SYMBOL_GPL(bpf_prog_put);
839 
840 static int bpf_prog_release(struct inode *inode, struct file *filp)
841 {
842 	struct bpf_prog *prog = filp->private_data;
843 
844 	bpf_prog_put(prog);
845 	return 0;
846 }
847 
848 #ifdef CONFIG_PROC_FS
849 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
850 {
851 	const struct bpf_prog *prog = filp->private_data;
852 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
853 
854 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
855 	seq_printf(m,
856 		   "prog_type:\t%u\n"
857 		   "prog_jited:\t%u\n"
858 		   "prog_tag:\t%s\n"
859 		   "memlock:\t%llu\n",
860 		   prog->type,
861 		   prog->jited,
862 		   prog_tag,
863 		   prog->pages * 1ULL << PAGE_SHIFT);
864 }
865 #endif
866 
867 static const struct file_operations bpf_prog_fops = {
868 #ifdef CONFIG_PROC_FS
869 	.show_fdinfo	= bpf_prog_show_fdinfo,
870 #endif
871 	.release	= bpf_prog_release,
872 };
873 
874 int bpf_prog_new_fd(struct bpf_prog *prog)
875 {
876 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
877 				O_RDWR | O_CLOEXEC);
878 }
879 
880 static struct bpf_prog *____bpf_prog_get(struct fd f)
881 {
882 	if (!f.file)
883 		return ERR_PTR(-EBADF);
884 	if (f.file->f_op != &bpf_prog_fops) {
885 		fdput(f);
886 		return ERR_PTR(-EINVAL);
887 	}
888 
889 	return f.file->private_data;
890 }
891 
892 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
893 {
894 	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
895 		atomic_sub(i, &prog->aux->refcnt);
896 		return ERR_PTR(-EBUSY);
897 	}
898 	return prog;
899 }
900 EXPORT_SYMBOL_GPL(bpf_prog_add);
901 
902 void bpf_prog_sub(struct bpf_prog *prog, int i)
903 {
904 	/* Only to be used for undoing previous bpf_prog_add() in some
905 	 * error path. We still know that another entity in our call
906 	 * path holds a reference to the program, thus atomic_sub() can
907 	 * be safely used in such cases!
908 	 */
909 	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
910 }
911 EXPORT_SYMBOL_GPL(bpf_prog_sub);
912 
913 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
914 {
915 	return bpf_prog_add(prog, 1);
916 }
917 EXPORT_SYMBOL_GPL(bpf_prog_inc);
918 
919 /* prog_idr_lock should have been held */
920 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
921 {
922 	int refold;
923 
924 	refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
925 
926 	if (refold >= BPF_MAX_REFCNT) {
927 		__bpf_prog_put(prog, false);
928 		return ERR_PTR(-EBUSY);
929 	}
930 
931 	if (!refold)
932 		return ERR_PTR(-ENOENT);
933 
934 	return prog;
935 }
936 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
937 
938 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
939 {
940 	struct fd f = fdget(ufd);
941 	struct bpf_prog *prog;
942 
943 	prog = ____bpf_prog_get(f);
944 	if (IS_ERR(prog))
945 		return prog;
946 	if (type && prog->type != *type) {
947 		prog = ERR_PTR(-EINVAL);
948 		goto out;
949 	}
950 
951 	prog = bpf_prog_inc(prog);
952 out:
953 	fdput(f);
954 	return prog;
955 }
956 
957 struct bpf_prog *bpf_prog_get(u32 ufd)
958 {
959 	return __bpf_prog_get(ufd, NULL);
960 }
961 
962 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
963 {
964 	struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
965 
966 	if (!IS_ERR(prog))
967 		trace_bpf_prog_get_type(prog);
968 	return prog;
969 }
970 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
971 
972 /* last field in 'union bpf_attr' used by this command */
973 #define	BPF_PROG_LOAD_LAST_FIELD prog_flags
974 
975 static int bpf_prog_load(union bpf_attr *attr)
976 {
977 	enum bpf_prog_type type = attr->prog_type;
978 	struct bpf_prog *prog;
979 	int err;
980 	char license[128];
981 	bool is_gpl;
982 
983 	if (CHECK_ATTR(BPF_PROG_LOAD))
984 		return -EINVAL;
985 
986 	if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
987 		return -EINVAL;
988 
989 	/* copy eBPF program license from user space */
990 	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
991 			      sizeof(license) - 1) < 0)
992 		return -EFAULT;
993 	license[sizeof(license) - 1] = 0;
994 
995 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
996 	is_gpl = license_is_gpl_compatible(license);
997 
998 	if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
999 		return -E2BIG;
1000 
1001 	if (type == BPF_PROG_TYPE_KPROBE &&
1002 	    attr->kern_version != LINUX_VERSION_CODE)
1003 		return -EINVAL;
1004 
1005 	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
1006 	    type != BPF_PROG_TYPE_CGROUP_SKB &&
1007 	    !capable(CAP_SYS_ADMIN))
1008 		return -EPERM;
1009 
1010 	/* plain bpf_prog allocation */
1011 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
1012 	if (!prog)
1013 		return -ENOMEM;
1014 
1015 	err = bpf_prog_charge_memlock(prog);
1016 	if (err)
1017 		goto free_prog_nouncharge;
1018 
1019 	prog->len = attr->insn_cnt;
1020 
1021 	err = -EFAULT;
1022 	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
1023 			   bpf_prog_insn_size(prog)) != 0)
1024 		goto free_prog;
1025 
1026 	prog->orig_prog = NULL;
1027 	prog->jited = 0;
1028 
1029 	atomic_set(&prog->aux->refcnt, 1);
1030 	prog->gpl_compatible = is_gpl ? 1 : 0;
1031 
1032 	/* find program type: socket_filter vs tracing_filter */
1033 	err = find_prog_type(type, prog);
1034 	if (err < 0)
1035 		goto free_prog;
1036 
1037 	/* run eBPF verifier */
1038 	err = bpf_check(&prog, attr);
1039 	if (err < 0)
1040 		goto free_used_maps;
1041 
1042 	/* eBPF program is ready to be JITed */
1043 	prog = bpf_prog_select_runtime(prog, &err);
1044 	if (err < 0)
1045 		goto free_used_maps;
1046 
1047 	err = bpf_prog_alloc_id(prog);
1048 	if (err)
1049 		goto free_used_maps;
1050 
1051 	err = bpf_prog_new_fd(prog);
1052 	if (err < 0) {
1053 		/* failed to allocate fd.
1054 		 * bpf_prog_put() is needed because the above
1055 		 * bpf_prog_alloc_id() has published the prog
1056 		 * to the userspace and the userspace may
1057 		 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
1058 		 */
1059 		bpf_prog_put(prog);
1060 		return err;
1061 	}
1062 
1063 	bpf_prog_kallsyms_add(prog);
1064 	trace_bpf_prog_load(prog, err);
1065 	return err;
1066 
1067 free_used_maps:
1068 	free_used_maps(prog->aux);
1069 free_prog:
1070 	bpf_prog_uncharge_memlock(prog);
1071 free_prog_nouncharge:
1072 	bpf_prog_free(prog);
1073 	return err;
1074 }
1075 
1076 #define BPF_OBJ_LAST_FIELD bpf_fd
1077 
1078 static int bpf_obj_pin(const union bpf_attr *attr)
1079 {
1080 	if (CHECK_ATTR(BPF_OBJ))
1081 		return -EINVAL;
1082 
1083 	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
1084 }
1085 
1086 static int bpf_obj_get(const union bpf_attr *attr)
1087 {
1088 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
1089 		return -EINVAL;
1090 
1091 	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
1092 }
1093 
1094 #ifdef CONFIG_CGROUP_BPF
1095 
1096 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
1097 
1098 static int sockmap_get_from_fd(const union bpf_attr *attr)
1099 {
1100 	int ufd = attr->target_fd;
1101 	struct bpf_prog *prog;
1102 	struct bpf_map *map;
1103 	struct fd f;
1104 	int err;
1105 
1106 	f = fdget(ufd);
1107 	map = __bpf_map_get(f);
1108 	if (IS_ERR(map))
1109 		return PTR_ERR(map);
1110 
1111 	prog = bpf_prog_get_type(attr->attach_bpf_fd, BPF_PROG_TYPE_SK_SKB);
1112 	if (IS_ERR(prog)) {
1113 		fdput(f);
1114 		return PTR_ERR(prog);
1115 	}
1116 
1117 	err = sock_map_attach_prog(map, prog, attr->attach_type);
1118 	if (err) {
1119 		fdput(f);
1120 		bpf_prog_put(prog);
1121 		return err;
1122 	}
1123 
1124 	fdput(f);
1125 	return 0;
1126 }
1127 
1128 static int bpf_prog_attach(const union bpf_attr *attr)
1129 {
1130 	enum bpf_prog_type ptype;
1131 	struct bpf_prog *prog;
1132 	struct cgroup *cgrp;
1133 	int ret;
1134 
1135 	if (!capable(CAP_NET_ADMIN))
1136 		return -EPERM;
1137 
1138 	if (CHECK_ATTR(BPF_PROG_ATTACH))
1139 		return -EINVAL;
1140 
1141 	if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
1142 		return -EINVAL;
1143 
1144 	switch (attr->attach_type) {
1145 	case BPF_CGROUP_INET_INGRESS:
1146 	case BPF_CGROUP_INET_EGRESS:
1147 		ptype = BPF_PROG_TYPE_CGROUP_SKB;
1148 		break;
1149 	case BPF_CGROUP_INET_SOCK_CREATE:
1150 		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1151 		break;
1152 	case BPF_CGROUP_SOCK_OPS:
1153 		ptype = BPF_PROG_TYPE_SOCK_OPS;
1154 		break;
1155 	case BPF_SK_SKB_STREAM_PARSER:
1156 	case BPF_SK_SKB_STREAM_VERDICT:
1157 		return sockmap_get_from_fd(attr);
1158 	default:
1159 		return -EINVAL;
1160 	}
1161 
1162 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1163 	if (IS_ERR(prog))
1164 		return PTR_ERR(prog);
1165 
1166 	cgrp = cgroup_get_from_fd(attr->target_fd);
1167 	if (IS_ERR(cgrp)) {
1168 		bpf_prog_put(prog);
1169 		return PTR_ERR(cgrp);
1170 	}
1171 
1172 	ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
1173 				attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
1174 	if (ret)
1175 		bpf_prog_put(prog);
1176 	cgroup_put(cgrp);
1177 
1178 	return ret;
1179 }
1180 
1181 #define BPF_PROG_DETACH_LAST_FIELD attach_type
1182 
1183 static int bpf_prog_detach(const union bpf_attr *attr)
1184 {
1185 	struct cgroup *cgrp;
1186 	int ret;
1187 
1188 	if (!capable(CAP_NET_ADMIN))
1189 		return -EPERM;
1190 
1191 	if (CHECK_ATTR(BPF_PROG_DETACH))
1192 		return -EINVAL;
1193 
1194 	switch (attr->attach_type) {
1195 	case BPF_CGROUP_INET_INGRESS:
1196 	case BPF_CGROUP_INET_EGRESS:
1197 	case BPF_CGROUP_INET_SOCK_CREATE:
1198 	case BPF_CGROUP_SOCK_OPS:
1199 		cgrp = cgroup_get_from_fd(attr->target_fd);
1200 		if (IS_ERR(cgrp))
1201 			return PTR_ERR(cgrp);
1202 
1203 		ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
1204 		cgroup_put(cgrp);
1205 		break;
1206 
1207 	default:
1208 		return -EINVAL;
1209 	}
1210 
1211 	return ret;
1212 }
1213 
1214 #endif /* CONFIG_CGROUP_BPF */
1215 
1216 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
1217 
1218 static int bpf_prog_test_run(const union bpf_attr *attr,
1219 			     union bpf_attr __user *uattr)
1220 {
1221 	struct bpf_prog *prog;
1222 	int ret = -ENOTSUPP;
1223 
1224 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
1225 		return -EINVAL;
1226 
1227 	prog = bpf_prog_get(attr->test.prog_fd);
1228 	if (IS_ERR(prog))
1229 		return PTR_ERR(prog);
1230 
1231 	if (prog->aux->ops->test_run)
1232 		ret = prog->aux->ops->test_run(prog, attr, uattr);
1233 
1234 	bpf_prog_put(prog);
1235 	return ret;
1236 }
1237 
1238 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
1239 
1240 static int bpf_obj_get_next_id(const union bpf_attr *attr,
1241 			       union bpf_attr __user *uattr,
1242 			       struct idr *idr,
1243 			       spinlock_t *lock)
1244 {
1245 	u32 next_id = attr->start_id;
1246 	int err = 0;
1247 
1248 	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
1249 		return -EINVAL;
1250 
1251 	if (!capable(CAP_SYS_ADMIN))
1252 		return -EPERM;
1253 
1254 	next_id++;
1255 	spin_lock_bh(lock);
1256 	if (!idr_get_next(idr, &next_id))
1257 		err = -ENOENT;
1258 	spin_unlock_bh(lock);
1259 
1260 	if (!err)
1261 		err = put_user(next_id, &uattr->next_id);
1262 
1263 	return err;
1264 }
1265 
1266 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
1267 
1268 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
1269 {
1270 	struct bpf_prog *prog;
1271 	u32 id = attr->prog_id;
1272 	int fd;
1273 
1274 	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
1275 		return -EINVAL;
1276 
1277 	if (!capable(CAP_SYS_ADMIN))
1278 		return -EPERM;
1279 
1280 	spin_lock_bh(&prog_idr_lock);
1281 	prog = idr_find(&prog_idr, id);
1282 	if (prog)
1283 		prog = bpf_prog_inc_not_zero(prog);
1284 	else
1285 		prog = ERR_PTR(-ENOENT);
1286 	spin_unlock_bh(&prog_idr_lock);
1287 
1288 	if (IS_ERR(prog))
1289 		return PTR_ERR(prog);
1290 
1291 	fd = bpf_prog_new_fd(prog);
1292 	if (fd < 0)
1293 		bpf_prog_put(prog);
1294 
1295 	return fd;
1296 }
1297 
1298 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id
1299 
1300 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1301 {
1302 	struct bpf_map *map;
1303 	u32 id = attr->map_id;
1304 	int fd;
1305 
1306 	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
1307 		return -EINVAL;
1308 
1309 	if (!capable(CAP_SYS_ADMIN))
1310 		return -EPERM;
1311 
1312 	spin_lock_bh(&map_idr_lock);
1313 	map = idr_find(&map_idr, id);
1314 	if (map)
1315 		map = bpf_map_inc_not_zero(map, true);
1316 	else
1317 		map = ERR_PTR(-ENOENT);
1318 	spin_unlock_bh(&map_idr_lock);
1319 
1320 	if (IS_ERR(map))
1321 		return PTR_ERR(map);
1322 
1323 	fd = bpf_map_new_fd(map);
1324 	if (fd < 0)
1325 		bpf_map_put(map);
1326 
1327 	return fd;
1328 }
1329 
1330 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1331 				   const union bpf_attr *attr,
1332 				   union bpf_attr __user *uattr)
1333 {
1334 	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1335 	struct bpf_prog_info info = {};
1336 	u32 info_len = attr->info.info_len;
1337 	char __user *uinsns;
1338 	u32 ulen;
1339 	int err;
1340 
1341 	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1342 	if (err)
1343 		return err;
1344 	info_len = min_t(u32, sizeof(info), info_len);
1345 
1346 	if (copy_from_user(&info, uinfo, info_len))
1347 		return -EFAULT;
1348 
1349 	info.type = prog->type;
1350 	info.id = prog->aux->id;
1351 
1352 	memcpy(info.tag, prog->tag, sizeof(prog->tag));
1353 
1354 	if (!capable(CAP_SYS_ADMIN)) {
1355 		info.jited_prog_len = 0;
1356 		info.xlated_prog_len = 0;
1357 		goto done;
1358 	}
1359 
1360 	ulen = info.jited_prog_len;
1361 	info.jited_prog_len = prog->jited_len;
1362 	if (info.jited_prog_len && ulen) {
1363 		uinsns = u64_to_user_ptr(info.jited_prog_insns);
1364 		ulen = min_t(u32, info.jited_prog_len, ulen);
1365 		if (copy_to_user(uinsns, prog->bpf_func, ulen))
1366 			return -EFAULT;
1367 	}
1368 
1369 	ulen = info.xlated_prog_len;
1370 	info.xlated_prog_len = bpf_prog_insn_size(prog);
1371 	if (info.xlated_prog_len && ulen) {
1372 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
1373 		ulen = min_t(u32, info.xlated_prog_len, ulen);
1374 		if (copy_to_user(uinsns, prog->insnsi, ulen))
1375 			return -EFAULT;
1376 	}
1377 
1378 done:
1379 	if (copy_to_user(uinfo, &info, info_len) ||
1380 	    put_user(info_len, &uattr->info.info_len))
1381 		return -EFAULT;
1382 
1383 	return 0;
1384 }
1385 
1386 static int bpf_map_get_info_by_fd(struct bpf_map *map,
1387 				  const union bpf_attr *attr,
1388 				  union bpf_attr __user *uattr)
1389 {
1390 	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1391 	struct bpf_map_info info = {};
1392 	u32 info_len = attr->info.info_len;
1393 	int err;
1394 
1395 	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1396 	if (err)
1397 		return err;
1398 	info_len = min_t(u32, sizeof(info), info_len);
1399 
1400 	info.type = map->map_type;
1401 	info.id = map->id;
1402 	info.key_size = map->key_size;
1403 	info.value_size = map->value_size;
1404 	info.max_entries = map->max_entries;
1405 	info.map_flags = map->map_flags;
1406 
1407 	if (copy_to_user(uinfo, &info, info_len) ||
1408 	    put_user(info_len, &uattr->info.info_len))
1409 		return -EFAULT;
1410 
1411 	return 0;
1412 }
1413 
1414 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
1415 
1416 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
1417 				  union bpf_attr __user *uattr)
1418 {
1419 	int ufd = attr->info.bpf_fd;
1420 	struct fd f;
1421 	int err;
1422 
1423 	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
1424 		return -EINVAL;
1425 
1426 	f = fdget(ufd);
1427 	if (!f.file)
1428 		return -EBADFD;
1429 
1430 	if (f.file->f_op == &bpf_prog_fops)
1431 		err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
1432 					      uattr);
1433 	else if (f.file->f_op == &bpf_map_fops)
1434 		err = bpf_map_get_info_by_fd(f.file->private_data, attr,
1435 					     uattr);
1436 	else
1437 		err = -EINVAL;
1438 
1439 	fdput(f);
1440 	return err;
1441 }
1442 
1443 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1444 {
1445 	union bpf_attr attr = {};
1446 	int err;
1447 
1448 	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1449 		return -EPERM;
1450 
1451 	err = check_uarg_tail_zero(uattr, sizeof(attr), size);
1452 	if (err)
1453 		return err;
1454 	size = min_t(u32, size, sizeof(attr));
1455 
1456 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
1457 	if (copy_from_user(&attr, uattr, size) != 0)
1458 		return -EFAULT;
1459 
1460 	switch (cmd) {
1461 	case BPF_MAP_CREATE:
1462 		err = map_create(&attr);
1463 		break;
1464 	case BPF_MAP_LOOKUP_ELEM:
1465 		err = map_lookup_elem(&attr);
1466 		break;
1467 	case BPF_MAP_UPDATE_ELEM:
1468 		err = map_update_elem(&attr);
1469 		break;
1470 	case BPF_MAP_DELETE_ELEM:
1471 		err = map_delete_elem(&attr);
1472 		break;
1473 	case BPF_MAP_GET_NEXT_KEY:
1474 		err = map_get_next_key(&attr);
1475 		break;
1476 	case BPF_PROG_LOAD:
1477 		err = bpf_prog_load(&attr);
1478 		break;
1479 	case BPF_OBJ_PIN:
1480 		err = bpf_obj_pin(&attr);
1481 		break;
1482 	case BPF_OBJ_GET:
1483 		err = bpf_obj_get(&attr);
1484 		break;
1485 #ifdef CONFIG_CGROUP_BPF
1486 	case BPF_PROG_ATTACH:
1487 		err = bpf_prog_attach(&attr);
1488 		break;
1489 	case BPF_PROG_DETACH:
1490 		err = bpf_prog_detach(&attr);
1491 		break;
1492 #endif
1493 	case BPF_PROG_TEST_RUN:
1494 		err = bpf_prog_test_run(&attr, uattr);
1495 		break;
1496 	case BPF_PROG_GET_NEXT_ID:
1497 		err = bpf_obj_get_next_id(&attr, uattr,
1498 					  &prog_idr, &prog_idr_lock);
1499 		break;
1500 	case BPF_MAP_GET_NEXT_ID:
1501 		err = bpf_obj_get_next_id(&attr, uattr,
1502 					  &map_idr, &map_idr_lock);
1503 		break;
1504 	case BPF_PROG_GET_FD_BY_ID:
1505 		err = bpf_prog_get_fd_by_id(&attr);
1506 		break;
1507 	case BPF_MAP_GET_FD_BY_ID:
1508 		err = bpf_map_get_fd_by_id(&attr);
1509 		break;
1510 	case BPF_OBJ_GET_INFO_BY_FD:
1511 		err = bpf_obj_get_info_by_fd(&attr, uattr);
1512 		break;
1513 	default:
1514 		err = -EINVAL;
1515 		break;
1516 	}
1517 
1518 	return err;
1519 }
1520