xref: /linux/kernel/bpf/syscall.c (revision 0408c58be5a475c99b271f08d85859f7b59ec767)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 #include <linux/bpf.h>
13 #include <linux/bpf_trace.h>
14 #include <linux/syscalls.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mmzone.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/file.h>
21 #include <linux/license.h>
22 #include <linux/filter.h>
23 #include <linux/version.h>
24 #include <linux/kernel.h>
25 #include <linux/idr.h>
26 
27 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
28 			   (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
29 			   (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
30 			   (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
31 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
32 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
33 
34 DEFINE_PER_CPU(int, bpf_prog_active);
35 static DEFINE_IDR(prog_idr);
36 static DEFINE_SPINLOCK(prog_idr_lock);
37 static DEFINE_IDR(map_idr);
38 static DEFINE_SPINLOCK(map_idr_lock);
39 
40 int sysctl_unprivileged_bpf_disabled __read_mostly;
41 
42 static const struct bpf_map_ops * const bpf_map_types[] = {
43 #define BPF_PROG_TYPE(_id, _ops)
44 #define BPF_MAP_TYPE(_id, _ops) \
45 	[_id] = &_ops,
46 #include <linux/bpf_types.h>
47 #undef BPF_PROG_TYPE
48 #undef BPF_MAP_TYPE
49 };
50 
51 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
52 {
53 	struct bpf_map *map;
54 
55 	if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
56 	    !bpf_map_types[attr->map_type])
57 		return ERR_PTR(-EINVAL);
58 
59 	map = bpf_map_types[attr->map_type]->map_alloc(attr);
60 	if (IS_ERR(map))
61 		return map;
62 	map->ops = bpf_map_types[attr->map_type];
63 	map->map_type = attr->map_type;
64 	return map;
65 }
66 
67 void *bpf_map_area_alloc(size_t size)
68 {
69 	/* We definitely need __GFP_NORETRY, so OOM killer doesn't
70 	 * trigger under memory pressure as we really just want to
71 	 * fail instead.
72 	 */
73 	const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
74 	void *area;
75 
76 	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
77 		area = kmalloc(size, GFP_USER | flags);
78 		if (area != NULL)
79 			return area;
80 	}
81 
82 	return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);
83 }
84 
85 void bpf_map_area_free(void *area)
86 {
87 	kvfree(area);
88 }
89 
90 int bpf_map_precharge_memlock(u32 pages)
91 {
92 	struct user_struct *user = get_current_user();
93 	unsigned long memlock_limit, cur;
94 
95 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
96 	cur = atomic_long_read(&user->locked_vm);
97 	free_uid(user);
98 	if (cur + pages > memlock_limit)
99 		return -EPERM;
100 	return 0;
101 }
102 
103 static int bpf_map_charge_memlock(struct bpf_map *map)
104 {
105 	struct user_struct *user = get_current_user();
106 	unsigned long memlock_limit;
107 
108 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
109 
110 	atomic_long_add(map->pages, &user->locked_vm);
111 
112 	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
113 		atomic_long_sub(map->pages, &user->locked_vm);
114 		free_uid(user);
115 		return -EPERM;
116 	}
117 	map->user = user;
118 	return 0;
119 }
120 
121 static void bpf_map_uncharge_memlock(struct bpf_map *map)
122 {
123 	struct user_struct *user = map->user;
124 
125 	atomic_long_sub(map->pages, &user->locked_vm);
126 	free_uid(user);
127 }
128 
129 static int bpf_map_alloc_id(struct bpf_map *map)
130 {
131 	int id;
132 
133 	spin_lock_bh(&map_idr_lock);
134 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
135 	if (id > 0)
136 		map->id = id;
137 	spin_unlock_bh(&map_idr_lock);
138 
139 	if (WARN_ON_ONCE(!id))
140 		return -ENOSPC;
141 
142 	return id > 0 ? 0 : id;
143 }
144 
145 static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
146 {
147 	if (do_idr_lock)
148 		spin_lock_bh(&map_idr_lock);
149 	else
150 		__acquire(&map_idr_lock);
151 
152 	idr_remove(&map_idr, map->id);
153 
154 	if (do_idr_lock)
155 		spin_unlock_bh(&map_idr_lock);
156 	else
157 		__release(&map_idr_lock);
158 }
159 
160 /* called from workqueue */
161 static void bpf_map_free_deferred(struct work_struct *work)
162 {
163 	struct bpf_map *map = container_of(work, struct bpf_map, work);
164 
165 	bpf_map_uncharge_memlock(map);
166 	/* implementation dependent freeing */
167 	map->ops->map_free(map);
168 }
169 
170 static void bpf_map_put_uref(struct bpf_map *map)
171 {
172 	if (atomic_dec_and_test(&map->usercnt)) {
173 		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
174 			bpf_fd_array_map_clear(map);
175 	}
176 }
177 
178 /* decrement map refcnt and schedule it for freeing via workqueue
179  * (unrelying map implementation ops->map_free() might sleep)
180  */
181 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
182 {
183 	if (atomic_dec_and_test(&map->refcnt)) {
184 		/* bpf_map_free_id() must be called first */
185 		bpf_map_free_id(map, do_idr_lock);
186 		INIT_WORK(&map->work, bpf_map_free_deferred);
187 		schedule_work(&map->work);
188 	}
189 }
190 
191 void bpf_map_put(struct bpf_map *map)
192 {
193 	__bpf_map_put(map, true);
194 }
195 
196 void bpf_map_put_with_uref(struct bpf_map *map)
197 {
198 	bpf_map_put_uref(map);
199 	bpf_map_put(map);
200 }
201 
202 static int bpf_map_release(struct inode *inode, struct file *filp)
203 {
204 	struct bpf_map *map = filp->private_data;
205 
206 	if (map->ops->map_release)
207 		map->ops->map_release(map, filp);
208 
209 	bpf_map_put_with_uref(map);
210 	return 0;
211 }
212 
213 #ifdef CONFIG_PROC_FS
214 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
215 {
216 	const struct bpf_map *map = filp->private_data;
217 	const struct bpf_array *array;
218 	u32 owner_prog_type = 0;
219 	u32 owner_jited = 0;
220 
221 	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
222 		array = container_of(map, struct bpf_array, map);
223 		owner_prog_type = array->owner_prog_type;
224 		owner_jited = array->owner_jited;
225 	}
226 
227 	seq_printf(m,
228 		   "map_type:\t%u\n"
229 		   "key_size:\t%u\n"
230 		   "value_size:\t%u\n"
231 		   "max_entries:\t%u\n"
232 		   "map_flags:\t%#x\n"
233 		   "memlock:\t%llu\n",
234 		   map->map_type,
235 		   map->key_size,
236 		   map->value_size,
237 		   map->max_entries,
238 		   map->map_flags,
239 		   map->pages * 1ULL << PAGE_SHIFT);
240 
241 	if (owner_prog_type) {
242 		seq_printf(m, "owner_prog_type:\t%u\n",
243 			   owner_prog_type);
244 		seq_printf(m, "owner_jited:\t%u\n",
245 			   owner_jited);
246 	}
247 }
248 #endif
249 
250 static const struct file_operations bpf_map_fops = {
251 #ifdef CONFIG_PROC_FS
252 	.show_fdinfo	= bpf_map_show_fdinfo,
253 #endif
254 	.release	= bpf_map_release,
255 };
256 
257 int bpf_map_new_fd(struct bpf_map *map)
258 {
259 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
260 				O_RDWR | O_CLOEXEC);
261 }
262 
263 /* helper macro to check that unused fields 'union bpf_attr' are zero */
264 #define CHECK_ATTR(CMD) \
265 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
266 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
267 		   sizeof(*attr) - \
268 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
269 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
270 
271 #define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
272 /* called via syscall */
273 static int map_create(union bpf_attr *attr)
274 {
275 	struct bpf_map *map;
276 	int err;
277 
278 	err = CHECK_ATTR(BPF_MAP_CREATE);
279 	if (err)
280 		return -EINVAL;
281 
282 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
283 	map = find_and_alloc_map(attr);
284 	if (IS_ERR(map))
285 		return PTR_ERR(map);
286 
287 	atomic_set(&map->refcnt, 1);
288 	atomic_set(&map->usercnt, 1);
289 
290 	err = bpf_map_charge_memlock(map);
291 	if (err)
292 		goto free_map_nouncharge;
293 
294 	err = bpf_map_alloc_id(map);
295 	if (err)
296 		goto free_map;
297 
298 	err = bpf_map_new_fd(map);
299 	if (err < 0) {
300 		/* failed to allocate fd.
301 		 * bpf_map_put() is needed because the above
302 		 * bpf_map_alloc_id() has published the map
303 		 * to the userspace and the userspace may
304 		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
305 		 */
306 		bpf_map_put(map);
307 		return err;
308 	}
309 
310 	trace_bpf_map_create(map, err);
311 	return err;
312 
313 free_map:
314 	bpf_map_uncharge_memlock(map);
315 free_map_nouncharge:
316 	map->ops->map_free(map);
317 	return err;
318 }
319 
320 /* if error is returned, fd is released.
321  * On success caller should complete fd access with matching fdput()
322  */
323 struct bpf_map *__bpf_map_get(struct fd f)
324 {
325 	if (!f.file)
326 		return ERR_PTR(-EBADF);
327 	if (f.file->f_op != &bpf_map_fops) {
328 		fdput(f);
329 		return ERR_PTR(-EINVAL);
330 	}
331 
332 	return f.file->private_data;
333 }
334 
335 /* prog's and map's refcnt limit */
336 #define BPF_MAX_REFCNT 32768
337 
338 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
339 {
340 	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
341 		atomic_dec(&map->refcnt);
342 		return ERR_PTR(-EBUSY);
343 	}
344 	if (uref)
345 		atomic_inc(&map->usercnt);
346 	return map;
347 }
348 
349 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
350 {
351 	struct fd f = fdget(ufd);
352 	struct bpf_map *map;
353 
354 	map = __bpf_map_get(f);
355 	if (IS_ERR(map))
356 		return map;
357 
358 	map = bpf_map_inc(map, true);
359 	fdput(f);
360 
361 	return map;
362 }
363 
364 /* map_idr_lock should have been held */
365 static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
366 					    bool uref)
367 {
368 	int refold;
369 
370 	refold = __atomic_add_unless(&map->refcnt, 1, 0);
371 
372 	if (refold >= BPF_MAX_REFCNT) {
373 		__bpf_map_put(map, false);
374 		return ERR_PTR(-EBUSY);
375 	}
376 
377 	if (!refold)
378 		return ERR_PTR(-ENOENT);
379 
380 	if (uref)
381 		atomic_inc(&map->usercnt);
382 
383 	return map;
384 }
385 
386 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
387 {
388 	return -ENOTSUPP;
389 }
390 
391 /* last field in 'union bpf_attr' used by this command */
392 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
393 
394 static int map_lookup_elem(union bpf_attr *attr)
395 {
396 	void __user *ukey = u64_to_user_ptr(attr->key);
397 	void __user *uvalue = u64_to_user_ptr(attr->value);
398 	int ufd = attr->map_fd;
399 	struct bpf_map *map;
400 	void *key, *value, *ptr;
401 	u32 value_size;
402 	struct fd f;
403 	int err;
404 
405 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
406 		return -EINVAL;
407 
408 	f = fdget(ufd);
409 	map = __bpf_map_get(f);
410 	if (IS_ERR(map))
411 		return PTR_ERR(map);
412 
413 	err = -ENOMEM;
414 	key = kmalloc(map->key_size, GFP_USER);
415 	if (!key)
416 		goto err_put;
417 
418 	err = -EFAULT;
419 	if (copy_from_user(key, ukey, map->key_size) != 0)
420 		goto free_key;
421 
422 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
423 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
424 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
425 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
426 	else if (IS_FD_MAP(map))
427 		value_size = sizeof(u32);
428 	else
429 		value_size = map->value_size;
430 
431 	err = -ENOMEM;
432 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
433 	if (!value)
434 		goto free_key;
435 
436 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
437 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
438 		err = bpf_percpu_hash_copy(map, key, value);
439 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
440 		err = bpf_percpu_array_copy(map, key, value);
441 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
442 		err = bpf_stackmap_copy(map, key, value);
443 	} else if (IS_FD_ARRAY(map)) {
444 		err = bpf_fd_array_map_lookup_elem(map, key, value);
445 	} else if (IS_FD_HASH(map)) {
446 		err = bpf_fd_htab_map_lookup_elem(map, key, value);
447 	} else {
448 		rcu_read_lock();
449 		ptr = map->ops->map_lookup_elem(map, key);
450 		if (ptr)
451 			memcpy(value, ptr, value_size);
452 		rcu_read_unlock();
453 		err = ptr ? 0 : -ENOENT;
454 	}
455 
456 	if (err)
457 		goto free_value;
458 
459 	err = -EFAULT;
460 	if (copy_to_user(uvalue, value, value_size) != 0)
461 		goto free_value;
462 
463 	trace_bpf_map_lookup_elem(map, ufd, key, value);
464 	err = 0;
465 
466 free_value:
467 	kfree(value);
468 free_key:
469 	kfree(key);
470 err_put:
471 	fdput(f);
472 	return err;
473 }
474 
475 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
476 
477 static int map_update_elem(union bpf_attr *attr)
478 {
479 	void __user *ukey = u64_to_user_ptr(attr->key);
480 	void __user *uvalue = u64_to_user_ptr(attr->value);
481 	int ufd = attr->map_fd;
482 	struct bpf_map *map;
483 	void *key, *value;
484 	u32 value_size;
485 	struct fd f;
486 	int err;
487 
488 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
489 		return -EINVAL;
490 
491 	f = fdget(ufd);
492 	map = __bpf_map_get(f);
493 	if (IS_ERR(map))
494 		return PTR_ERR(map);
495 
496 	err = -ENOMEM;
497 	key = kmalloc(map->key_size, GFP_USER);
498 	if (!key)
499 		goto err_put;
500 
501 	err = -EFAULT;
502 	if (copy_from_user(key, ukey, map->key_size) != 0)
503 		goto free_key;
504 
505 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
506 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
507 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
508 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
509 	else
510 		value_size = map->value_size;
511 
512 	err = -ENOMEM;
513 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
514 	if (!value)
515 		goto free_key;
516 
517 	err = -EFAULT;
518 	if (copy_from_user(value, uvalue, value_size) != 0)
519 		goto free_value;
520 
521 	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
522 	 * inside bpf map update or delete otherwise deadlocks are possible
523 	 */
524 	preempt_disable();
525 	__this_cpu_inc(bpf_prog_active);
526 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
527 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
528 		err = bpf_percpu_hash_update(map, key, value, attr->flags);
529 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
530 		err = bpf_percpu_array_update(map, key, value, attr->flags);
531 	} else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
532 		   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
533 		   map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
534 		   map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
535 		rcu_read_lock();
536 		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
537 						   attr->flags);
538 		rcu_read_unlock();
539 	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
540 		rcu_read_lock();
541 		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
542 						  attr->flags);
543 		rcu_read_unlock();
544 	} else {
545 		rcu_read_lock();
546 		err = map->ops->map_update_elem(map, key, value, attr->flags);
547 		rcu_read_unlock();
548 	}
549 	__this_cpu_dec(bpf_prog_active);
550 	preempt_enable();
551 
552 	if (!err)
553 		trace_bpf_map_update_elem(map, ufd, key, value);
554 free_value:
555 	kfree(value);
556 free_key:
557 	kfree(key);
558 err_put:
559 	fdput(f);
560 	return err;
561 }
562 
563 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
564 
565 static int map_delete_elem(union bpf_attr *attr)
566 {
567 	void __user *ukey = u64_to_user_ptr(attr->key);
568 	int ufd = attr->map_fd;
569 	struct bpf_map *map;
570 	struct fd f;
571 	void *key;
572 	int err;
573 
574 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
575 		return -EINVAL;
576 
577 	f = fdget(ufd);
578 	map = __bpf_map_get(f);
579 	if (IS_ERR(map))
580 		return PTR_ERR(map);
581 
582 	err = -ENOMEM;
583 	key = kmalloc(map->key_size, GFP_USER);
584 	if (!key)
585 		goto err_put;
586 
587 	err = -EFAULT;
588 	if (copy_from_user(key, ukey, map->key_size) != 0)
589 		goto free_key;
590 
591 	preempt_disable();
592 	__this_cpu_inc(bpf_prog_active);
593 	rcu_read_lock();
594 	err = map->ops->map_delete_elem(map, key);
595 	rcu_read_unlock();
596 	__this_cpu_dec(bpf_prog_active);
597 	preempt_enable();
598 
599 	if (!err)
600 		trace_bpf_map_delete_elem(map, ufd, key);
601 free_key:
602 	kfree(key);
603 err_put:
604 	fdput(f);
605 	return err;
606 }
607 
608 /* last field in 'union bpf_attr' used by this command */
609 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
610 
611 static int map_get_next_key(union bpf_attr *attr)
612 {
613 	void __user *ukey = u64_to_user_ptr(attr->key);
614 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
615 	int ufd = attr->map_fd;
616 	struct bpf_map *map;
617 	void *key, *next_key;
618 	struct fd f;
619 	int err;
620 
621 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
622 		return -EINVAL;
623 
624 	f = fdget(ufd);
625 	map = __bpf_map_get(f);
626 	if (IS_ERR(map))
627 		return PTR_ERR(map);
628 
629 	if (ukey) {
630 		err = -ENOMEM;
631 		key = kmalloc(map->key_size, GFP_USER);
632 		if (!key)
633 			goto err_put;
634 
635 		err = -EFAULT;
636 		if (copy_from_user(key, ukey, map->key_size) != 0)
637 			goto free_key;
638 	} else {
639 		key = NULL;
640 	}
641 
642 	err = -ENOMEM;
643 	next_key = kmalloc(map->key_size, GFP_USER);
644 	if (!next_key)
645 		goto free_key;
646 
647 	rcu_read_lock();
648 	err = map->ops->map_get_next_key(map, key, next_key);
649 	rcu_read_unlock();
650 	if (err)
651 		goto free_next_key;
652 
653 	err = -EFAULT;
654 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
655 		goto free_next_key;
656 
657 	trace_bpf_map_next_key(map, ufd, key, next_key);
658 	err = 0;
659 
660 free_next_key:
661 	kfree(next_key);
662 free_key:
663 	kfree(key);
664 err_put:
665 	fdput(f);
666 	return err;
667 }
668 
669 static const struct bpf_verifier_ops * const bpf_prog_types[] = {
670 #define BPF_PROG_TYPE(_id, _ops) \
671 	[_id] = &_ops,
672 #define BPF_MAP_TYPE(_id, _ops)
673 #include <linux/bpf_types.h>
674 #undef BPF_PROG_TYPE
675 #undef BPF_MAP_TYPE
676 };
677 
678 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
679 {
680 	if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
681 		return -EINVAL;
682 
683 	prog->aux->ops = bpf_prog_types[type];
684 	prog->type = type;
685 	return 0;
686 }
687 
688 /* drop refcnt on maps used by eBPF program and free auxilary data */
689 static void free_used_maps(struct bpf_prog_aux *aux)
690 {
691 	int i;
692 
693 	for (i = 0; i < aux->used_map_cnt; i++)
694 		bpf_map_put(aux->used_maps[i]);
695 
696 	kfree(aux->used_maps);
697 }
698 
699 int __bpf_prog_charge(struct user_struct *user, u32 pages)
700 {
701 	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
702 	unsigned long user_bufs;
703 
704 	if (user) {
705 		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
706 		if (user_bufs > memlock_limit) {
707 			atomic_long_sub(pages, &user->locked_vm);
708 			return -EPERM;
709 		}
710 	}
711 
712 	return 0;
713 }
714 
715 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
716 {
717 	if (user)
718 		atomic_long_sub(pages, &user->locked_vm);
719 }
720 
721 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
722 {
723 	struct user_struct *user = get_current_user();
724 	int ret;
725 
726 	ret = __bpf_prog_charge(user, prog->pages);
727 	if (ret) {
728 		free_uid(user);
729 		return ret;
730 	}
731 
732 	prog->aux->user = user;
733 	return 0;
734 }
735 
736 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
737 {
738 	struct user_struct *user = prog->aux->user;
739 
740 	__bpf_prog_uncharge(user, prog->pages);
741 	free_uid(user);
742 }
743 
744 static int bpf_prog_alloc_id(struct bpf_prog *prog)
745 {
746 	int id;
747 
748 	spin_lock_bh(&prog_idr_lock);
749 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
750 	if (id > 0)
751 		prog->aux->id = id;
752 	spin_unlock_bh(&prog_idr_lock);
753 
754 	/* id is in [1, INT_MAX) */
755 	if (WARN_ON_ONCE(!id))
756 		return -ENOSPC;
757 
758 	return id > 0 ? 0 : id;
759 }
760 
761 static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
762 {
763 	/* cBPF to eBPF migrations are currently not in the idr store. */
764 	if (!prog->aux->id)
765 		return;
766 
767 	if (do_idr_lock)
768 		spin_lock_bh(&prog_idr_lock);
769 	else
770 		__acquire(&prog_idr_lock);
771 
772 	idr_remove(&prog_idr, prog->aux->id);
773 
774 	if (do_idr_lock)
775 		spin_unlock_bh(&prog_idr_lock);
776 	else
777 		__release(&prog_idr_lock);
778 }
779 
780 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
781 {
782 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
783 
784 	free_used_maps(aux);
785 	bpf_prog_uncharge_memlock(aux->prog);
786 	bpf_prog_free(aux->prog);
787 }
788 
789 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
790 {
791 	if (atomic_dec_and_test(&prog->aux->refcnt)) {
792 		trace_bpf_prog_put_rcu(prog);
793 		/* bpf_prog_free_id() must be called first */
794 		bpf_prog_free_id(prog, do_idr_lock);
795 		bpf_prog_kallsyms_del(prog);
796 		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
797 	}
798 }
799 
800 void bpf_prog_put(struct bpf_prog *prog)
801 {
802 	__bpf_prog_put(prog, true);
803 }
804 EXPORT_SYMBOL_GPL(bpf_prog_put);
805 
806 static int bpf_prog_release(struct inode *inode, struct file *filp)
807 {
808 	struct bpf_prog *prog = filp->private_data;
809 
810 	bpf_prog_put(prog);
811 	return 0;
812 }
813 
814 #ifdef CONFIG_PROC_FS
815 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
816 {
817 	const struct bpf_prog *prog = filp->private_data;
818 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
819 
820 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
821 	seq_printf(m,
822 		   "prog_type:\t%u\n"
823 		   "prog_jited:\t%u\n"
824 		   "prog_tag:\t%s\n"
825 		   "memlock:\t%llu\n",
826 		   prog->type,
827 		   prog->jited,
828 		   prog_tag,
829 		   prog->pages * 1ULL << PAGE_SHIFT);
830 }
831 #endif
832 
833 static const struct file_operations bpf_prog_fops = {
834 #ifdef CONFIG_PROC_FS
835 	.show_fdinfo	= bpf_prog_show_fdinfo,
836 #endif
837 	.release	= bpf_prog_release,
838 };
839 
840 int bpf_prog_new_fd(struct bpf_prog *prog)
841 {
842 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
843 				O_RDWR | O_CLOEXEC);
844 }
845 
846 static struct bpf_prog *____bpf_prog_get(struct fd f)
847 {
848 	if (!f.file)
849 		return ERR_PTR(-EBADF);
850 	if (f.file->f_op != &bpf_prog_fops) {
851 		fdput(f);
852 		return ERR_PTR(-EINVAL);
853 	}
854 
855 	return f.file->private_data;
856 }
857 
858 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
859 {
860 	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
861 		atomic_sub(i, &prog->aux->refcnt);
862 		return ERR_PTR(-EBUSY);
863 	}
864 	return prog;
865 }
866 EXPORT_SYMBOL_GPL(bpf_prog_add);
867 
868 void bpf_prog_sub(struct bpf_prog *prog, int i)
869 {
870 	/* Only to be used for undoing previous bpf_prog_add() in some
871 	 * error path. We still know that another entity in our call
872 	 * path holds a reference to the program, thus atomic_sub() can
873 	 * be safely used in such cases!
874 	 */
875 	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
876 }
877 EXPORT_SYMBOL_GPL(bpf_prog_sub);
878 
879 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
880 {
881 	return bpf_prog_add(prog, 1);
882 }
883 EXPORT_SYMBOL_GPL(bpf_prog_inc);
884 
885 /* prog_idr_lock should have been held */
886 static struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
887 {
888 	int refold;
889 
890 	refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
891 
892 	if (refold >= BPF_MAX_REFCNT) {
893 		__bpf_prog_put(prog, false);
894 		return ERR_PTR(-EBUSY);
895 	}
896 
897 	if (!refold)
898 		return ERR_PTR(-ENOENT);
899 
900 	return prog;
901 }
902 
903 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
904 {
905 	struct fd f = fdget(ufd);
906 	struct bpf_prog *prog;
907 
908 	prog = ____bpf_prog_get(f);
909 	if (IS_ERR(prog))
910 		return prog;
911 	if (type && prog->type != *type) {
912 		prog = ERR_PTR(-EINVAL);
913 		goto out;
914 	}
915 
916 	prog = bpf_prog_inc(prog);
917 out:
918 	fdput(f);
919 	return prog;
920 }
921 
922 struct bpf_prog *bpf_prog_get(u32 ufd)
923 {
924 	return __bpf_prog_get(ufd, NULL);
925 }
926 
927 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
928 {
929 	struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
930 
931 	if (!IS_ERR(prog))
932 		trace_bpf_prog_get_type(prog);
933 	return prog;
934 }
935 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
936 
937 /* last field in 'union bpf_attr' used by this command */
938 #define	BPF_PROG_LOAD_LAST_FIELD prog_flags
939 
940 static int bpf_prog_load(union bpf_attr *attr)
941 {
942 	enum bpf_prog_type type = attr->prog_type;
943 	struct bpf_prog *prog;
944 	int err;
945 	char license[128];
946 	bool is_gpl;
947 
948 	if (CHECK_ATTR(BPF_PROG_LOAD))
949 		return -EINVAL;
950 
951 	if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
952 		return -EINVAL;
953 
954 	/* copy eBPF program license from user space */
955 	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
956 			      sizeof(license) - 1) < 0)
957 		return -EFAULT;
958 	license[sizeof(license) - 1] = 0;
959 
960 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
961 	is_gpl = license_is_gpl_compatible(license);
962 
963 	if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
964 		return -E2BIG;
965 
966 	if (type == BPF_PROG_TYPE_KPROBE &&
967 	    attr->kern_version != LINUX_VERSION_CODE)
968 		return -EINVAL;
969 
970 	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
971 	    type != BPF_PROG_TYPE_CGROUP_SKB &&
972 	    !capable(CAP_SYS_ADMIN))
973 		return -EPERM;
974 
975 	/* plain bpf_prog allocation */
976 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
977 	if (!prog)
978 		return -ENOMEM;
979 
980 	err = bpf_prog_charge_memlock(prog);
981 	if (err)
982 		goto free_prog_nouncharge;
983 
984 	prog->len = attr->insn_cnt;
985 
986 	err = -EFAULT;
987 	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
988 			   bpf_prog_insn_size(prog)) != 0)
989 		goto free_prog;
990 
991 	prog->orig_prog = NULL;
992 	prog->jited = 0;
993 
994 	atomic_set(&prog->aux->refcnt, 1);
995 	prog->gpl_compatible = is_gpl ? 1 : 0;
996 
997 	/* find program type: socket_filter vs tracing_filter */
998 	err = find_prog_type(type, prog);
999 	if (err < 0)
1000 		goto free_prog;
1001 
1002 	/* run eBPF verifier */
1003 	err = bpf_check(&prog, attr);
1004 	if (err < 0)
1005 		goto free_used_maps;
1006 
1007 	/* eBPF program is ready to be JITed */
1008 	prog = bpf_prog_select_runtime(prog, &err);
1009 	if (err < 0)
1010 		goto free_used_maps;
1011 
1012 	err = bpf_prog_alloc_id(prog);
1013 	if (err)
1014 		goto free_used_maps;
1015 
1016 	err = bpf_prog_new_fd(prog);
1017 	if (err < 0) {
1018 		/* failed to allocate fd.
1019 		 * bpf_prog_put() is needed because the above
1020 		 * bpf_prog_alloc_id() has published the prog
1021 		 * to the userspace and the userspace may
1022 		 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
1023 		 */
1024 		bpf_prog_put(prog);
1025 		return err;
1026 	}
1027 
1028 	bpf_prog_kallsyms_add(prog);
1029 	trace_bpf_prog_load(prog, err);
1030 	return err;
1031 
1032 free_used_maps:
1033 	free_used_maps(prog->aux);
1034 free_prog:
1035 	bpf_prog_uncharge_memlock(prog);
1036 free_prog_nouncharge:
1037 	bpf_prog_free(prog);
1038 	return err;
1039 }
1040 
1041 #define BPF_OBJ_LAST_FIELD bpf_fd
1042 
1043 static int bpf_obj_pin(const union bpf_attr *attr)
1044 {
1045 	if (CHECK_ATTR(BPF_OBJ))
1046 		return -EINVAL;
1047 
1048 	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
1049 }
1050 
1051 static int bpf_obj_get(const union bpf_attr *attr)
1052 {
1053 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
1054 		return -EINVAL;
1055 
1056 	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
1057 }
1058 
1059 #ifdef CONFIG_CGROUP_BPF
1060 
1061 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
1062 
1063 static int bpf_prog_attach(const union bpf_attr *attr)
1064 {
1065 	enum bpf_prog_type ptype;
1066 	struct bpf_prog *prog;
1067 	struct cgroup *cgrp;
1068 	int ret;
1069 
1070 	if (!capable(CAP_NET_ADMIN))
1071 		return -EPERM;
1072 
1073 	if (CHECK_ATTR(BPF_PROG_ATTACH))
1074 		return -EINVAL;
1075 
1076 	if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
1077 		return -EINVAL;
1078 
1079 	switch (attr->attach_type) {
1080 	case BPF_CGROUP_INET_INGRESS:
1081 	case BPF_CGROUP_INET_EGRESS:
1082 		ptype = BPF_PROG_TYPE_CGROUP_SKB;
1083 		break;
1084 	case BPF_CGROUP_INET_SOCK_CREATE:
1085 		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1086 		break;
1087 	case BPF_CGROUP_SOCK_OPS:
1088 		ptype = BPF_PROG_TYPE_SOCK_OPS;
1089 		break;
1090 	default:
1091 		return -EINVAL;
1092 	}
1093 
1094 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1095 	if (IS_ERR(prog))
1096 		return PTR_ERR(prog);
1097 
1098 	cgrp = cgroup_get_from_fd(attr->target_fd);
1099 	if (IS_ERR(cgrp)) {
1100 		bpf_prog_put(prog);
1101 		return PTR_ERR(cgrp);
1102 	}
1103 
1104 	ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
1105 				attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
1106 	if (ret)
1107 		bpf_prog_put(prog);
1108 	cgroup_put(cgrp);
1109 
1110 	return ret;
1111 }
1112 
1113 #define BPF_PROG_DETACH_LAST_FIELD attach_type
1114 
1115 static int bpf_prog_detach(const union bpf_attr *attr)
1116 {
1117 	struct cgroup *cgrp;
1118 	int ret;
1119 
1120 	if (!capable(CAP_NET_ADMIN))
1121 		return -EPERM;
1122 
1123 	if (CHECK_ATTR(BPF_PROG_DETACH))
1124 		return -EINVAL;
1125 
1126 	switch (attr->attach_type) {
1127 	case BPF_CGROUP_INET_INGRESS:
1128 	case BPF_CGROUP_INET_EGRESS:
1129 	case BPF_CGROUP_INET_SOCK_CREATE:
1130 	case BPF_CGROUP_SOCK_OPS:
1131 		cgrp = cgroup_get_from_fd(attr->target_fd);
1132 		if (IS_ERR(cgrp))
1133 			return PTR_ERR(cgrp);
1134 
1135 		ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
1136 		cgroup_put(cgrp);
1137 		break;
1138 
1139 	default:
1140 		return -EINVAL;
1141 	}
1142 
1143 	return ret;
1144 }
1145 
1146 #endif /* CONFIG_CGROUP_BPF */
1147 
1148 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
1149 
1150 static int bpf_prog_test_run(const union bpf_attr *attr,
1151 			     union bpf_attr __user *uattr)
1152 {
1153 	struct bpf_prog *prog;
1154 	int ret = -ENOTSUPP;
1155 
1156 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
1157 		return -EINVAL;
1158 
1159 	prog = bpf_prog_get(attr->test.prog_fd);
1160 	if (IS_ERR(prog))
1161 		return PTR_ERR(prog);
1162 
1163 	if (prog->aux->ops->test_run)
1164 		ret = prog->aux->ops->test_run(prog, attr, uattr);
1165 
1166 	bpf_prog_put(prog);
1167 	return ret;
1168 }
1169 
1170 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
1171 
1172 static int bpf_obj_get_next_id(const union bpf_attr *attr,
1173 			       union bpf_attr __user *uattr,
1174 			       struct idr *idr,
1175 			       spinlock_t *lock)
1176 {
1177 	u32 next_id = attr->start_id;
1178 	int err = 0;
1179 
1180 	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
1181 		return -EINVAL;
1182 
1183 	if (!capable(CAP_SYS_ADMIN))
1184 		return -EPERM;
1185 
1186 	next_id++;
1187 	spin_lock_bh(lock);
1188 	if (!idr_get_next(idr, &next_id))
1189 		err = -ENOENT;
1190 	spin_unlock_bh(lock);
1191 
1192 	if (!err)
1193 		err = put_user(next_id, &uattr->next_id);
1194 
1195 	return err;
1196 }
1197 
1198 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
1199 
1200 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
1201 {
1202 	struct bpf_prog *prog;
1203 	u32 id = attr->prog_id;
1204 	int fd;
1205 
1206 	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
1207 		return -EINVAL;
1208 
1209 	if (!capable(CAP_SYS_ADMIN))
1210 		return -EPERM;
1211 
1212 	spin_lock_bh(&prog_idr_lock);
1213 	prog = idr_find(&prog_idr, id);
1214 	if (prog)
1215 		prog = bpf_prog_inc_not_zero(prog);
1216 	else
1217 		prog = ERR_PTR(-ENOENT);
1218 	spin_unlock_bh(&prog_idr_lock);
1219 
1220 	if (IS_ERR(prog))
1221 		return PTR_ERR(prog);
1222 
1223 	fd = bpf_prog_new_fd(prog);
1224 	if (fd < 0)
1225 		bpf_prog_put(prog);
1226 
1227 	return fd;
1228 }
1229 
1230 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id
1231 
1232 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1233 {
1234 	struct bpf_map *map;
1235 	u32 id = attr->map_id;
1236 	int fd;
1237 
1238 	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
1239 		return -EINVAL;
1240 
1241 	if (!capable(CAP_SYS_ADMIN))
1242 		return -EPERM;
1243 
1244 	spin_lock_bh(&map_idr_lock);
1245 	map = idr_find(&map_idr, id);
1246 	if (map)
1247 		map = bpf_map_inc_not_zero(map, true);
1248 	else
1249 		map = ERR_PTR(-ENOENT);
1250 	spin_unlock_bh(&map_idr_lock);
1251 
1252 	if (IS_ERR(map))
1253 		return PTR_ERR(map);
1254 
1255 	fd = bpf_map_new_fd(map);
1256 	if (fd < 0)
1257 		bpf_map_put(map);
1258 
1259 	return fd;
1260 }
1261 
1262 static int check_uarg_tail_zero(void __user *uaddr,
1263 				size_t expected_size,
1264 				size_t actual_size)
1265 {
1266 	unsigned char __user *addr;
1267 	unsigned char __user *end;
1268 	unsigned char val;
1269 	int err;
1270 
1271 	if (actual_size <= expected_size)
1272 		return 0;
1273 
1274 	addr = uaddr + expected_size;
1275 	end  = uaddr + actual_size;
1276 
1277 	for (; addr < end; addr++) {
1278 		err = get_user(val, addr);
1279 		if (err)
1280 			return err;
1281 		if (val)
1282 			return -E2BIG;
1283 	}
1284 
1285 	return 0;
1286 }
1287 
1288 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1289 				   const union bpf_attr *attr,
1290 				   union bpf_attr __user *uattr)
1291 {
1292 	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1293 	struct bpf_prog_info info = {};
1294 	u32 info_len = attr->info.info_len;
1295 	char __user *uinsns;
1296 	u32 ulen;
1297 	int err;
1298 
1299 	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1300 	if (err)
1301 		return err;
1302 	info_len = min_t(u32, sizeof(info), info_len);
1303 
1304 	if (copy_from_user(&info, uinfo, info_len))
1305 		return err;
1306 
1307 	info.type = prog->type;
1308 	info.id = prog->aux->id;
1309 
1310 	memcpy(info.tag, prog->tag, sizeof(prog->tag));
1311 
1312 	if (!capable(CAP_SYS_ADMIN)) {
1313 		info.jited_prog_len = 0;
1314 		info.xlated_prog_len = 0;
1315 		goto done;
1316 	}
1317 
1318 	ulen = info.jited_prog_len;
1319 	info.jited_prog_len = prog->jited_len;
1320 	if (info.jited_prog_len && ulen) {
1321 		uinsns = u64_to_user_ptr(info.jited_prog_insns);
1322 		ulen = min_t(u32, info.jited_prog_len, ulen);
1323 		if (copy_to_user(uinsns, prog->bpf_func, ulen))
1324 			return -EFAULT;
1325 	}
1326 
1327 	ulen = info.xlated_prog_len;
1328 	info.xlated_prog_len = bpf_prog_size(prog->len);
1329 	if (info.xlated_prog_len && ulen) {
1330 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
1331 		ulen = min_t(u32, info.xlated_prog_len, ulen);
1332 		if (copy_to_user(uinsns, prog->insnsi, ulen))
1333 			return -EFAULT;
1334 	}
1335 
1336 done:
1337 	if (copy_to_user(uinfo, &info, info_len) ||
1338 	    put_user(info_len, &uattr->info.info_len))
1339 		return -EFAULT;
1340 
1341 	return 0;
1342 }
1343 
1344 static int bpf_map_get_info_by_fd(struct bpf_map *map,
1345 				  const union bpf_attr *attr,
1346 				  union bpf_attr __user *uattr)
1347 {
1348 	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1349 	struct bpf_map_info info = {};
1350 	u32 info_len = attr->info.info_len;
1351 	int err;
1352 
1353 	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1354 	if (err)
1355 		return err;
1356 	info_len = min_t(u32, sizeof(info), info_len);
1357 
1358 	info.type = map->map_type;
1359 	info.id = map->id;
1360 	info.key_size = map->key_size;
1361 	info.value_size = map->value_size;
1362 	info.max_entries = map->max_entries;
1363 	info.map_flags = map->map_flags;
1364 
1365 	if (copy_to_user(uinfo, &info, info_len) ||
1366 	    put_user(info_len, &uattr->info.info_len))
1367 		return -EFAULT;
1368 
1369 	return 0;
1370 }
1371 
1372 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
1373 
1374 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
1375 				  union bpf_attr __user *uattr)
1376 {
1377 	int ufd = attr->info.bpf_fd;
1378 	struct fd f;
1379 	int err;
1380 
1381 	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
1382 		return -EINVAL;
1383 
1384 	f = fdget(ufd);
1385 	if (!f.file)
1386 		return -EBADFD;
1387 
1388 	if (f.file->f_op == &bpf_prog_fops)
1389 		err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
1390 					      uattr);
1391 	else if (f.file->f_op == &bpf_map_fops)
1392 		err = bpf_map_get_info_by_fd(f.file->private_data, attr,
1393 					     uattr);
1394 	else
1395 		err = -EINVAL;
1396 
1397 	fdput(f);
1398 	return err;
1399 }
1400 
1401 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1402 {
1403 	union bpf_attr attr = {};
1404 	int err;
1405 
1406 	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1407 		return -EPERM;
1408 
1409 	if (!access_ok(VERIFY_READ, uattr, 1))
1410 		return -EFAULT;
1411 
1412 	if (size > PAGE_SIZE)	/* silly large */
1413 		return -E2BIG;
1414 
1415 	/* If we're handed a bigger struct than we know of,
1416 	 * ensure all the unknown bits are 0 - i.e. new
1417 	 * user-space does not rely on any kernel feature
1418 	 * extensions we dont know about yet.
1419 	 */
1420 	err = check_uarg_tail_zero(uattr, sizeof(attr), size);
1421 	if (err)
1422 		return err;
1423 	size = min_t(u32, size, sizeof(attr));
1424 
1425 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
1426 	if (copy_from_user(&attr, uattr, size) != 0)
1427 		return -EFAULT;
1428 
1429 	switch (cmd) {
1430 	case BPF_MAP_CREATE:
1431 		err = map_create(&attr);
1432 		break;
1433 	case BPF_MAP_LOOKUP_ELEM:
1434 		err = map_lookup_elem(&attr);
1435 		break;
1436 	case BPF_MAP_UPDATE_ELEM:
1437 		err = map_update_elem(&attr);
1438 		break;
1439 	case BPF_MAP_DELETE_ELEM:
1440 		err = map_delete_elem(&attr);
1441 		break;
1442 	case BPF_MAP_GET_NEXT_KEY:
1443 		err = map_get_next_key(&attr);
1444 		break;
1445 	case BPF_PROG_LOAD:
1446 		err = bpf_prog_load(&attr);
1447 		break;
1448 	case BPF_OBJ_PIN:
1449 		err = bpf_obj_pin(&attr);
1450 		break;
1451 	case BPF_OBJ_GET:
1452 		err = bpf_obj_get(&attr);
1453 		break;
1454 #ifdef CONFIG_CGROUP_BPF
1455 	case BPF_PROG_ATTACH:
1456 		err = bpf_prog_attach(&attr);
1457 		break;
1458 	case BPF_PROG_DETACH:
1459 		err = bpf_prog_detach(&attr);
1460 		break;
1461 #endif
1462 	case BPF_PROG_TEST_RUN:
1463 		err = bpf_prog_test_run(&attr, uattr);
1464 		break;
1465 	case BPF_PROG_GET_NEXT_ID:
1466 		err = bpf_obj_get_next_id(&attr, uattr,
1467 					  &prog_idr, &prog_idr_lock);
1468 		break;
1469 	case BPF_MAP_GET_NEXT_ID:
1470 		err = bpf_obj_get_next_id(&attr, uattr,
1471 					  &map_idr, &map_idr_lock);
1472 		break;
1473 	case BPF_PROG_GET_FD_BY_ID:
1474 		err = bpf_prog_get_fd_by_id(&attr);
1475 		break;
1476 	case BPF_MAP_GET_FD_BY_ID:
1477 		err = bpf_map_get_fd_by_id(&attr);
1478 		break;
1479 	case BPF_OBJ_GET_INFO_BY_FD:
1480 		err = bpf_obj_get_info_by_fd(&attr, uattr);
1481 		break;
1482 	default:
1483 		err = -EINVAL;
1484 		break;
1485 	}
1486 
1487 	return err;
1488 }
1489