xref: /linux/kernel/bpf/syscall.c (revision 59ae1d127ac0ae404baf414c434ba2651b793f46)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 #include <linux/bpf.h>
13 #include <linux/bpf_trace.h>
14 #include <linux/syscalls.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mmzone.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/file.h>
21 #include <linux/license.h>
22 #include <linux/filter.h>
23 #include <linux/version.h>
24 #include <linux/kernel.h>
25 #include <linux/idr.h>
26 
27 DEFINE_PER_CPU(int, bpf_prog_active);
28 static DEFINE_IDR(prog_idr);
29 static DEFINE_SPINLOCK(prog_idr_lock);
30 static DEFINE_IDR(map_idr);
31 static DEFINE_SPINLOCK(map_idr_lock);
32 
33 int sysctl_unprivileged_bpf_disabled __read_mostly;
34 
35 static const struct bpf_map_ops * const bpf_map_types[] = {
36 #define BPF_PROG_TYPE(_id, _ops)
37 #define BPF_MAP_TYPE(_id, _ops) \
38 	[_id] = &_ops,
39 #include <linux/bpf_types.h>
40 #undef BPF_PROG_TYPE
41 #undef BPF_MAP_TYPE
42 };
43 
44 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
45 {
46 	struct bpf_map *map;
47 
48 	if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
49 	    !bpf_map_types[attr->map_type])
50 		return ERR_PTR(-EINVAL);
51 
52 	map = bpf_map_types[attr->map_type]->map_alloc(attr);
53 	if (IS_ERR(map))
54 		return map;
55 	map->ops = bpf_map_types[attr->map_type];
56 	map->map_type = attr->map_type;
57 	return map;
58 }
59 
60 void *bpf_map_area_alloc(size_t size)
61 {
62 	/* We definitely need __GFP_NORETRY, so OOM killer doesn't
63 	 * trigger under memory pressure as we really just want to
64 	 * fail instead.
65 	 */
66 	const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
67 	void *area;
68 
69 	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
70 		area = kmalloc(size, GFP_USER | flags);
71 		if (area != NULL)
72 			return area;
73 	}
74 
75 	return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);
76 }
77 
78 void bpf_map_area_free(void *area)
79 {
80 	kvfree(area);
81 }
82 
83 int bpf_map_precharge_memlock(u32 pages)
84 {
85 	struct user_struct *user = get_current_user();
86 	unsigned long memlock_limit, cur;
87 
88 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
89 	cur = atomic_long_read(&user->locked_vm);
90 	free_uid(user);
91 	if (cur + pages > memlock_limit)
92 		return -EPERM;
93 	return 0;
94 }
95 
96 static int bpf_map_charge_memlock(struct bpf_map *map)
97 {
98 	struct user_struct *user = get_current_user();
99 	unsigned long memlock_limit;
100 
101 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
102 
103 	atomic_long_add(map->pages, &user->locked_vm);
104 
105 	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
106 		atomic_long_sub(map->pages, &user->locked_vm);
107 		free_uid(user);
108 		return -EPERM;
109 	}
110 	map->user = user;
111 	return 0;
112 }
113 
114 static void bpf_map_uncharge_memlock(struct bpf_map *map)
115 {
116 	struct user_struct *user = map->user;
117 
118 	atomic_long_sub(map->pages, &user->locked_vm);
119 	free_uid(user);
120 }
121 
122 static int bpf_map_alloc_id(struct bpf_map *map)
123 {
124 	int id;
125 
126 	spin_lock_bh(&map_idr_lock);
127 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
128 	if (id > 0)
129 		map->id = id;
130 	spin_unlock_bh(&map_idr_lock);
131 
132 	if (WARN_ON_ONCE(!id))
133 		return -ENOSPC;
134 
135 	return id > 0 ? 0 : id;
136 }
137 
138 static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
139 {
140 	if (do_idr_lock)
141 		spin_lock_bh(&map_idr_lock);
142 	else
143 		__acquire(&map_idr_lock);
144 
145 	idr_remove(&map_idr, map->id);
146 
147 	if (do_idr_lock)
148 		spin_unlock_bh(&map_idr_lock);
149 	else
150 		__release(&map_idr_lock);
151 }
152 
153 /* called from workqueue */
154 static void bpf_map_free_deferred(struct work_struct *work)
155 {
156 	struct bpf_map *map = container_of(work, struct bpf_map, work);
157 
158 	bpf_map_uncharge_memlock(map);
159 	/* implementation dependent freeing */
160 	map->ops->map_free(map);
161 }
162 
163 static void bpf_map_put_uref(struct bpf_map *map)
164 {
165 	if (atomic_dec_and_test(&map->usercnt)) {
166 		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
167 			bpf_fd_array_map_clear(map);
168 	}
169 }
170 
171 /* decrement map refcnt and schedule it for freeing via workqueue
172  * (unrelying map implementation ops->map_free() might sleep)
173  */
174 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
175 {
176 	if (atomic_dec_and_test(&map->refcnt)) {
177 		/* bpf_map_free_id() must be called first */
178 		bpf_map_free_id(map, do_idr_lock);
179 		INIT_WORK(&map->work, bpf_map_free_deferred);
180 		schedule_work(&map->work);
181 	}
182 }
183 
184 void bpf_map_put(struct bpf_map *map)
185 {
186 	__bpf_map_put(map, true);
187 }
188 
189 void bpf_map_put_with_uref(struct bpf_map *map)
190 {
191 	bpf_map_put_uref(map);
192 	bpf_map_put(map);
193 }
194 
195 static int bpf_map_release(struct inode *inode, struct file *filp)
196 {
197 	struct bpf_map *map = filp->private_data;
198 
199 	if (map->ops->map_release)
200 		map->ops->map_release(map, filp);
201 
202 	bpf_map_put_with_uref(map);
203 	return 0;
204 }
205 
206 #ifdef CONFIG_PROC_FS
207 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
208 {
209 	const struct bpf_map *map = filp->private_data;
210 	const struct bpf_array *array;
211 	u32 owner_prog_type = 0;
212 
213 	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
214 		array = container_of(map, struct bpf_array, map);
215 		owner_prog_type = array->owner_prog_type;
216 	}
217 
218 	seq_printf(m,
219 		   "map_type:\t%u\n"
220 		   "key_size:\t%u\n"
221 		   "value_size:\t%u\n"
222 		   "max_entries:\t%u\n"
223 		   "map_flags:\t%#x\n"
224 		   "memlock:\t%llu\n",
225 		   map->map_type,
226 		   map->key_size,
227 		   map->value_size,
228 		   map->max_entries,
229 		   map->map_flags,
230 		   map->pages * 1ULL << PAGE_SHIFT);
231 
232 	if (owner_prog_type)
233 		seq_printf(m, "owner_prog_type:\t%u\n",
234 			   owner_prog_type);
235 }
236 #endif
237 
238 static const struct file_operations bpf_map_fops = {
239 #ifdef CONFIG_PROC_FS
240 	.show_fdinfo	= bpf_map_show_fdinfo,
241 #endif
242 	.release	= bpf_map_release,
243 };
244 
245 int bpf_map_new_fd(struct bpf_map *map)
246 {
247 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
248 				O_RDWR | O_CLOEXEC);
249 }
250 
251 /* helper macro to check that unused fields 'union bpf_attr' are zero */
252 #define CHECK_ATTR(CMD) \
253 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
254 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
255 		   sizeof(*attr) - \
256 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
257 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
258 
259 #define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
260 /* called via syscall */
261 static int map_create(union bpf_attr *attr)
262 {
263 	struct bpf_map *map;
264 	int err;
265 
266 	err = CHECK_ATTR(BPF_MAP_CREATE);
267 	if (err)
268 		return -EINVAL;
269 
270 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
271 	map = find_and_alloc_map(attr);
272 	if (IS_ERR(map))
273 		return PTR_ERR(map);
274 
275 	atomic_set(&map->refcnt, 1);
276 	atomic_set(&map->usercnt, 1);
277 
278 	err = bpf_map_charge_memlock(map);
279 	if (err)
280 		goto free_map_nouncharge;
281 
282 	err = bpf_map_alloc_id(map);
283 	if (err)
284 		goto free_map;
285 
286 	err = bpf_map_new_fd(map);
287 	if (err < 0) {
288 		/* failed to allocate fd.
289 		 * bpf_map_put() is needed because the above
290 		 * bpf_map_alloc_id() has published the map
291 		 * to the userspace and the userspace may
292 		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
293 		 */
294 		bpf_map_put(map);
295 		return err;
296 	}
297 
298 	trace_bpf_map_create(map, err);
299 	return err;
300 
301 free_map:
302 	bpf_map_uncharge_memlock(map);
303 free_map_nouncharge:
304 	map->ops->map_free(map);
305 	return err;
306 }
307 
308 /* if error is returned, fd is released.
309  * On success caller should complete fd access with matching fdput()
310  */
311 struct bpf_map *__bpf_map_get(struct fd f)
312 {
313 	if (!f.file)
314 		return ERR_PTR(-EBADF);
315 	if (f.file->f_op != &bpf_map_fops) {
316 		fdput(f);
317 		return ERR_PTR(-EINVAL);
318 	}
319 
320 	return f.file->private_data;
321 }
322 
323 /* prog's and map's refcnt limit */
324 #define BPF_MAX_REFCNT 32768
325 
326 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
327 {
328 	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
329 		atomic_dec(&map->refcnt);
330 		return ERR_PTR(-EBUSY);
331 	}
332 	if (uref)
333 		atomic_inc(&map->usercnt);
334 	return map;
335 }
336 
337 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
338 {
339 	struct fd f = fdget(ufd);
340 	struct bpf_map *map;
341 
342 	map = __bpf_map_get(f);
343 	if (IS_ERR(map))
344 		return map;
345 
346 	map = bpf_map_inc(map, true);
347 	fdput(f);
348 
349 	return map;
350 }
351 
352 /* map_idr_lock should have been held */
353 static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
354 					    bool uref)
355 {
356 	int refold;
357 
358 	refold = __atomic_add_unless(&map->refcnt, 1, 0);
359 
360 	if (refold >= BPF_MAX_REFCNT) {
361 		__bpf_map_put(map, false);
362 		return ERR_PTR(-EBUSY);
363 	}
364 
365 	if (!refold)
366 		return ERR_PTR(-ENOENT);
367 
368 	if (uref)
369 		atomic_inc(&map->usercnt);
370 
371 	return map;
372 }
373 
374 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
375 {
376 	return -ENOTSUPP;
377 }
378 
379 /* last field in 'union bpf_attr' used by this command */
380 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
381 
382 static int map_lookup_elem(union bpf_attr *attr)
383 {
384 	void __user *ukey = u64_to_user_ptr(attr->key);
385 	void __user *uvalue = u64_to_user_ptr(attr->value);
386 	int ufd = attr->map_fd;
387 	struct bpf_map *map;
388 	void *key, *value, *ptr;
389 	u32 value_size;
390 	struct fd f;
391 	int err;
392 
393 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
394 		return -EINVAL;
395 
396 	f = fdget(ufd);
397 	map = __bpf_map_get(f);
398 	if (IS_ERR(map))
399 		return PTR_ERR(map);
400 
401 	err = -ENOMEM;
402 	key = kmalloc(map->key_size, GFP_USER);
403 	if (!key)
404 		goto err_put;
405 
406 	err = -EFAULT;
407 	if (copy_from_user(key, ukey, map->key_size) != 0)
408 		goto free_key;
409 
410 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
411 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
412 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
413 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
414 	else
415 		value_size = map->value_size;
416 
417 	err = -ENOMEM;
418 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
419 	if (!value)
420 		goto free_key;
421 
422 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
423 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
424 		err = bpf_percpu_hash_copy(map, key, value);
425 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
426 		err = bpf_percpu_array_copy(map, key, value);
427 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
428 		err = bpf_stackmap_copy(map, key, value);
429 	} else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
430 		   map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
431 		err = -ENOTSUPP;
432 	} else {
433 		rcu_read_lock();
434 		ptr = map->ops->map_lookup_elem(map, key);
435 		if (ptr)
436 			memcpy(value, ptr, value_size);
437 		rcu_read_unlock();
438 		err = ptr ? 0 : -ENOENT;
439 	}
440 
441 	if (err)
442 		goto free_value;
443 
444 	err = -EFAULT;
445 	if (copy_to_user(uvalue, value, value_size) != 0)
446 		goto free_value;
447 
448 	trace_bpf_map_lookup_elem(map, ufd, key, value);
449 	err = 0;
450 
451 free_value:
452 	kfree(value);
453 free_key:
454 	kfree(key);
455 err_put:
456 	fdput(f);
457 	return err;
458 }
459 
460 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
461 
462 static int map_update_elem(union bpf_attr *attr)
463 {
464 	void __user *ukey = u64_to_user_ptr(attr->key);
465 	void __user *uvalue = u64_to_user_ptr(attr->value);
466 	int ufd = attr->map_fd;
467 	struct bpf_map *map;
468 	void *key, *value;
469 	u32 value_size;
470 	struct fd f;
471 	int err;
472 
473 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
474 		return -EINVAL;
475 
476 	f = fdget(ufd);
477 	map = __bpf_map_get(f);
478 	if (IS_ERR(map))
479 		return PTR_ERR(map);
480 
481 	err = -ENOMEM;
482 	key = kmalloc(map->key_size, GFP_USER);
483 	if (!key)
484 		goto err_put;
485 
486 	err = -EFAULT;
487 	if (copy_from_user(key, ukey, map->key_size) != 0)
488 		goto free_key;
489 
490 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
491 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
492 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
493 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
494 	else
495 		value_size = map->value_size;
496 
497 	err = -ENOMEM;
498 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
499 	if (!value)
500 		goto free_key;
501 
502 	err = -EFAULT;
503 	if (copy_from_user(value, uvalue, value_size) != 0)
504 		goto free_value;
505 
506 	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
507 	 * inside bpf map update or delete otherwise deadlocks are possible
508 	 */
509 	preempt_disable();
510 	__this_cpu_inc(bpf_prog_active);
511 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
512 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
513 		err = bpf_percpu_hash_update(map, key, value, attr->flags);
514 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
515 		err = bpf_percpu_array_update(map, key, value, attr->flags);
516 	} else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
517 		   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
518 		   map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
519 		   map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
520 		rcu_read_lock();
521 		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
522 						   attr->flags);
523 		rcu_read_unlock();
524 	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
525 		rcu_read_lock();
526 		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
527 						  attr->flags);
528 		rcu_read_unlock();
529 	} else {
530 		rcu_read_lock();
531 		err = map->ops->map_update_elem(map, key, value, attr->flags);
532 		rcu_read_unlock();
533 	}
534 	__this_cpu_dec(bpf_prog_active);
535 	preempt_enable();
536 
537 	if (!err)
538 		trace_bpf_map_update_elem(map, ufd, key, value);
539 free_value:
540 	kfree(value);
541 free_key:
542 	kfree(key);
543 err_put:
544 	fdput(f);
545 	return err;
546 }
547 
548 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
549 
550 static int map_delete_elem(union bpf_attr *attr)
551 {
552 	void __user *ukey = u64_to_user_ptr(attr->key);
553 	int ufd = attr->map_fd;
554 	struct bpf_map *map;
555 	struct fd f;
556 	void *key;
557 	int err;
558 
559 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
560 		return -EINVAL;
561 
562 	f = fdget(ufd);
563 	map = __bpf_map_get(f);
564 	if (IS_ERR(map))
565 		return PTR_ERR(map);
566 
567 	err = -ENOMEM;
568 	key = kmalloc(map->key_size, GFP_USER);
569 	if (!key)
570 		goto err_put;
571 
572 	err = -EFAULT;
573 	if (copy_from_user(key, ukey, map->key_size) != 0)
574 		goto free_key;
575 
576 	preempt_disable();
577 	__this_cpu_inc(bpf_prog_active);
578 	rcu_read_lock();
579 	err = map->ops->map_delete_elem(map, key);
580 	rcu_read_unlock();
581 	__this_cpu_dec(bpf_prog_active);
582 	preempt_enable();
583 
584 	if (!err)
585 		trace_bpf_map_delete_elem(map, ufd, key);
586 free_key:
587 	kfree(key);
588 err_put:
589 	fdput(f);
590 	return err;
591 }
592 
593 /* last field in 'union bpf_attr' used by this command */
594 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
595 
596 static int map_get_next_key(union bpf_attr *attr)
597 {
598 	void __user *ukey = u64_to_user_ptr(attr->key);
599 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
600 	int ufd = attr->map_fd;
601 	struct bpf_map *map;
602 	void *key, *next_key;
603 	struct fd f;
604 	int err;
605 
606 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
607 		return -EINVAL;
608 
609 	f = fdget(ufd);
610 	map = __bpf_map_get(f);
611 	if (IS_ERR(map))
612 		return PTR_ERR(map);
613 
614 	if (ukey) {
615 		err = -ENOMEM;
616 		key = kmalloc(map->key_size, GFP_USER);
617 		if (!key)
618 			goto err_put;
619 
620 		err = -EFAULT;
621 		if (copy_from_user(key, ukey, map->key_size) != 0)
622 			goto free_key;
623 	} else {
624 		key = NULL;
625 	}
626 
627 	err = -ENOMEM;
628 	next_key = kmalloc(map->key_size, GFP_USER);
629 	if (!next_key)
630 		goto free_key;
631 
632 	rcu_read_lock();
633 	err = map->ops->map_get_next_key(map, key, next_key);
634 	rcu_read_unlock();
635 	if (err)
636 		goto free_next_key;
637 
638 	err = -EFAULT;
639 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
640 		goto free_next_key;
641 
642 	trace_bpf_map_next_key(map, ufd, key, next_key);
643 	err = 0;
644 
645 free_next_key:
646 	kfree(next_key);
647 free_key:
648 	kfree(key);
649 err_put:
650 	fdput(f);
651 	return err;
652 }
653 
654 static const struct bpf_verifier_ops * const bpf_prog_types[] = {
655 #define BPF_PROG_TYPE(_id, _ops) \
656 	[_id] = &_ops,
657 #define BPF_MAP_TYPE(_id, _ops)
658 #include <linux/bpf_types.h>
659 #undef BPF_PROG_TYPE
660 #undef BPF_MAP_TYPE
661 };
662 
663 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
664 {
665 	if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
666 		return -EINVAL;
667 
668 	prog->aux->ops = bpf_prog_types[type];
669 	prog->type = type;
670 	return 0;
671 }
672 
673 /* drop refcnt on maps used by eBPF program and free auxilary data */
674 static void free_used_maps(struct bpf_prog_aux *aux)
675 {
676 	int i;
677 
678 	for (i = 0; i < aux->used_map_cnt; i++)
679 		bpf_map_put(aux->used_maps[i]);
680 
681 	kfree(aux->used_maps);
682 }
683 
684 int __bpf_prog_charge(struct user_struct *user, u32 pages)
685 {
686 	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
687 	unsigned long user_bufs;
688 
689 	if (user) {
690 		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
691 		if (user_bufs > memlock_limit) {
692 			atomic_long_sub(pages, &user->locked_vm);
693 			return -EPERM;
694 		}
695 	}
696 
697 	return 0;
698 }
699 
700 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
701 {
702 	if (user)
703 		atomic_long_sub(pages, &user->locked_vm);
704 }
705 
706 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
707 {
708 	struct user_struct *user = get_current_user();
709 	int ret;
710 
711 	ret = __bpf_prog_charge(user, prog->pages);
712 	if (ret) {
713 		free_uid(user);
714 		return ret;
715 	}
716 
717 	prog->aux->user = user;
718 	return 0;
719 }
720 
721 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
722 {
723 	struct user_struct *user = prog->aux->user;
724 
725 	__bpf_prog_uncharge(user, prog->pages);
726 	free_uid(user);
727 }
728 
729 static int bpf_prog_alloc_id(struct bpf_prog *prog)
730 {
731 	int id;
732 
733 	spin_lock_bh(&prog_idr_lock);
734 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
735 	if (id > 0)
736 		prog->aux->id = id;
737 	spin_unlock_bh(&prog_idr_lock);
738 
739 	/* id is in [1, INT_MAX) */
740 	if (WARN_ON_ONCE(!id))
741 		return -ENOSPC;
742 
743 	return id > 0 ? 0 : id;
744 }
745 
746 static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
747 {
748 	/* cBPF to eBPF migrations are currently not in the idr store. */
749 	if (!prog->aux->id)
750 		return;
751 
752 	if (do_idr_lock)
753 		spin_lock_bh(&prog_idr_lock);
754 	else
755 		__acquire(&prog_idr_lock);
756 
757 	idr_remove(&prog_idr, prog->aux->id);
758 
759 	if (do_idr_lock)
760 		spin_unlock_bh(&prog_idr_lock);
761 	else
762 		__release(&prog_idr_lock);
763 }
764 
765 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
766 {
767 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
768 
769 	free_used_maps(aux);
770 	bpf_prog_uncharge_memlock(aux->prog);
771 	bpf_prog_free(aux->prog);
772 }
773 
774 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
775 {
776 	if (atomic_dec_and_test(&prog->aux->refcnt)) {
777 		trace_bpf_prog_put_rcu(prog);
778 		/* bpf_prog_free_id() must be called first */
779 		bpf_prog_free_id(prog, do_idr_lock);
780 		bpf_prog_kallsyms_del(prog);
781 		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
782 	}
783 }
784 
785 void bpf_prog_put(struct bpf_prog *prog)
786 {
787 	__bpf_prog_put(prog, true);
788 }
789 EXPORT_SYMBOL_GPL(bpf_prog_put);
790 
791 static int bpf_prog_release(struct inode *inode, struct file *filp)
792 {
793 	struct bpf_prog *prog = filp->private_data;
794 
795 	bpf_prog_put(prog);
796 	return 0;
797 }
798 
799 #ifdef CONFIG_PROC_FS
800 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
801 {
802 	const struct bpf_prog *prog = filp->private_data;
803 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
804 
805 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
806 	seq_printf(m,
807 		   "prog_type:\t%u\n"
808 		   "prog_jited:\t%u\n"
809 		   "prog_tag:\t%s\n"
810 		   "memlock:\t%llu\n",
811 		   prog->type,
812 		   prog->jited,
813 		   prog_tag,
814 		   prog->pages * 1ULL << PAGE_SHIFT);
815 }
816 #endif
817 
818 static const struct file_operations bpf_prog_fops = {
819 #ifdef CONFIG_PROC_FS
820 	.show_fdinfo	= bpf_prog_show_fdinfo,
821 #endif
822 	.release	= bpf_prog_release,
823 };
824 
825 int bpf_prog_new_fd(struct bpf_prog *prog)
826 {
827 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
828 				O_RDWR | O_CLOEXEC);
829 }
830 
831 static struct bpf_prog *____bpf_prog_get(struct fd f)
832 {
833 	if (!f.file)
834 		return ERR_PTR(-EBADF);
835 	if (f.file->f_op != &bpf_prog_fops) {
836 		fdput(f);
837 		return ERR_PTR(-EINVAL);
838 	}
839 
840 	return f.file->private_data;
841 }
842 
843 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
844 {
845 	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
846 		atomic_sub(i, &prog->aux->refcnt);
847 		return ERR_PTR(-EBUSY);
848 	}
849 	return prog;
850 }
851 EXPORT_SYMBOL_GPL(bpf_prog_add);
852 
853 void bpf_prog_sub(struct bpf_prog *prog, int i)
854 {
855 	/* Only to be used for undoing previous bpf_prog_add() in some
856 	 * error path. We still know that another entity in our call
857 	 * path holds a reference to the program, thus atomic_sub() can
858 	 * be safely used in such cases!
859 	 */
860 	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
861 }
862 EXPORT_SYMBOL_GPL(bpf_prog_sub);
863 
864 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
865 {
866 	return bpf_prog_add(prog, 1);
867 }
868 EXPORT_SYMBOL_GPL(bpf_prog_inc);
869 
870 /* prog_idr_lock should have been held */
871 static struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
872 {
873 	int refold;
874 
875 	refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
876 
877 	if (refold >= BPF_MAX_REFCNT) {
878 		__bpf_prog_put(prog, false);
879 		return ERR_PTR(-EBUSY);
880 	}
881 
882 	if (!refold)
883 		return ERR_PTR(-ENOENT);
884 
885 	return prog;
886 }
887 
888 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
889 {
890 	struct fd f = fdget(ufd);
891 	struct bpf_prog *prog;
892 
893 	prog = ____bpf_prog_get(f);
894 	if (IS_ERR(prog))
895 		return prog;
896 	if (type && prog->type != *type) {
897 		prog = ERR_PTR(-EINVAL);
898 		goto out;
899 	}
900 
901 	prog = bpf_prog_inc(prog);
902 out:
903 	fdput(f);
904 	return prog;
905 }
906 
907 struct bpf_prog *bpf_prog_get(u32 ufd)
908 {
909 	return __bpf_prog_get(ufd, NULL);
910 }
911 
912 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
913 {
914 	struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
915 
916 	if (!IS_ERR(prog))
917 		trace_bpf_prog_get_type(prog);
918 	return prog;
919 }
920 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
921 
922 /* last field in 'union bpf_attr' used by this command */
923 #define	BPF_PROG_LOAD_LAST_FIELD prog_flags
924 
925 static int bpf_prog_load(union bpf_attr *attr)
926 {
927 	enum bpf_prog_type type = attr->prog_type;
928 	struct bpf_prog *prog;
929 	int err;
930 	char license[128];
931 	bool is_gpl;
932 
933 	if (CHECK_ATTR(BPF_PROG_LOAD))
934 		return -EINVAL;
935 
936 	if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
937 		return -EINVAL;
938 
939 	/* copy eBPF program license from user space */
940 	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
941 			      sizeof(license) - 1) < 0)
942 		return -EFAULT;
943 	license[sizeof(license) - 1] = 0;
944 
945 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
946 	is_gpl = license_is_gpl_compatible(license);
947 
948 	if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
949 		return -E2BIG;
950 
951 	if (type == BPF_PROG_TYPE_KPROBE &&
952 	    attr->kern_version != LINUX_VERSION_CODE)
953 		return -EINVAL;
954 
955 	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
956 	    type != BPF_PROG_TYPE_CGROUP_SKB &&
957 	    !capable(CAP_SYS_ADMIN))
958 		return -EPERM;
959 
960 	/* plain bpf_prog allocation */
961 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
962 	if (!prog)
963 		return -ENOMEM;
964 
965 	err = bpf_prog_charge_memlock(prog);
966 	if (err)
967 		goto free_prog_nouncharge;
968 
969 	prog->len = attr->insn_cnt;
970 
971 	err = -EFAULT;
972 	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
973 			   bpf_prog_insn_size(prog)) != 0)
974 		goto free_prog;
975 
976 	prog->orig_prog = NULL;
977 	prog->jited = 0;
978 
979 	atomic_set(&prog->aux->refcnt, 1);
980 	prog->gpl_compatible = is_gpl ? 1 : 0;
981 
982 	/* find program type: socket_filter vs tracing_filter */
983 	err = find_prog_type(type, prog);
984 	if (err < 0)
985 		goto free_prog;
986 
987 	/* run eBPF verifier */
988 	err = bpf_check(&prog, attr);
989 	if (err < 0)
990 		goto free_used_maps;
991 
992 	/* eBPF program is ready to be JITed */
993 	prog = bpf_prog_select_runtime(prog, &err);
994 	if (err < 0)
995 		goto free_used_maps;
996 
997 	err = bpf_prog_alloc_id(prog);
998 	if (err)
999 		goto free_used_maps;
1000 
1001 	err = bpf_prog_new_fd(prog);
1002 	if (err < 0) {
1003 		/* failed to allocate fd.
1004 		 * bpf_prog_put() is needed because the above
1005 		 * bpf_prog_alloc_id() has published the prog
1006 		 * to the userspace and the userspace may
1007 		 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
1008 		 */
1009 		bpf_prog_put(prog);
1010 		return err;
1011 	}
1012 
1013 	bpf_prog_kallsyms_add(prog);
1014 	trace_bpf_prog_load(prog, err);
1015 	return err;
1016 
1017 free_used_maps:
1018 	free_used_maps(prog->aux);
1019 free_prog:
1020 	bpf_prog_uncharge_memlock(prog);
1021 free_prog_nouncharge:
1022 	bpf_prog_free(prog);
1023 	return err;
1024 }
1025 
1026 #define BPF_OBJ_LAST_FIELD bpf_fd
1027 
1028 static int bpf_obj_pin(const union bpf_attr *attr)
1029 {
1030 	if (CHECK_ATTR(BPF_OBJ))
1031 		return -EINVAL;
1032 
1033 	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
1034 }
1035 
1036 static int bpf_obj_get(const union bpf_attr *attr)
1037 {
1038 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
1039 		return -EINVAL;
1040 
1041 	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
1042 }
1043 
1044 #ifdef CONFIG_CGROUP_BPF
1045 
1046 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
1047 
1048 static int bpf_prog_attach(const union bpf_attr *attr)
1049 {
1050 	enum bpf_prog_type ptype;
1051 	struct bpf_prog *prog;
1052 	struct cgroup *cgrp;
1053 	int ret;
1054 
1055 	if (!capable(CAP_NET_ADMIN))
1056 		return -EPERM;
1057 
1058 	if (CHECK_ATTR(BPF_PROG_ATTACH))
1059 		return -EINVAL;
1060 
1061 	if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
1062 		return -EINVAL;
1063 
1064 	switch (attr->attach_type) {
1065 	case BPF_CGROUP_INET_INGRESS:
1066 	case BPF_CGROUP_INET_EGRESS:
1067 		ptype = BPF_PROG_TYPE_CGROUP_SKB;
1068 		break;
1069 	case BPF_CGROUP_INET_SOCK_CREATE:
1070 		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1071 		break;
1072 	default:
1073 		return -EINVAL;
1074 	}
1075 
1076 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1077 	if (IS_ERR(prog))
1078 		return PTR_ERR(prog);
1079 
1080 	cgrp = cgroup_get_from_fd(attr->target_fd);
1081 	if (IS_ERR(cgrp)) {
1082 		bpf_prog_put(prog);
1083 		return PTR_ERR(cgrp);
1084 	}
1085 
1086 	ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
1087 				attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
1088 	if (ret)
1089 		bpf_prog_put(prog);
1090 	cgroup_put(cgrp);
1091 
1092 	return ret;
1093 }
1094 
1095 #define BPF_PROG_DETACH_LAST_FIELD attach_type
1096 
1097 static int bpf_prog_detach(const union bpf_attr *attr)
1098 {
1099 	struct cgroup *cgrp;
1100 	int ret;
1101 
1102 	if (!capable(CAP_NET_ADMIN))
1103 		return -EPERM;
1104 
1105 	if (CHECK_ATTR(BPF_PROG_DETACH))
1106 		return -EINVAL;
1107 
1108 	switch (attr->attach_type) {
1109 	case BPF_CGROUP_INET_INGRESS:
1110 	case BPF_CGROUP_INET_EGRESS:
1111 	case BPF_CGROUP_INET_SOCK_CREATE:
1112 		cgrp = cgroup_get_from_fd(attr->target_fd);
1113 		if (IS_ERR(cgrp))
1114 			return PTR_ERR(cgrp);
1115 
1116 		ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
1117 		cgroup_put(cgrp);
1118 		break;
1119 
1120 	default:
1121 		return -EINVAL;
1122 	}
1123 
1124 	return ret;
1125 }
1126 #endif /* CONFIG_CGROUP_BPF */
1127 
1128 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
1129 
1130 static int bpf_prog_test_run(const union bpf_attr *attr,
1131 			     union bpf_attr __user *uattr)
1132 {
1133 	struct bpf_prog *prog;
1134 	int ret = -ENOTSUPP;
1135 
1136 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
1137 		return -EINVAL;
1138 
1139 	prog = bpf_prog_get(attr->test.prog_fd);
1140 	if (IS_ERR(prog))
1141 		return PTR_ERR(prog);
1142 
1143 	if (prog->aux->ops->test_run)
1144 		ret = prog->aux->ops->test_run(prog, attr, uattr);
1145 
1146 	bpf_prog_put(prog);
1147 	return ret;
1148 }
1149 
1150 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
1151 
1152 static int bpf_obj_get_next_id(const union bpf_attr *attr,
1153 			       union bpf_attr __user *uattr,
1154 			       struct idr *idr,
1155 			       spinlock_t *lock)
1156 {
1157 	u32 next_id = attr->start_id;
1158 	int err = 0;
1159 
1160 	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
1161 		return -EINVAL;
1162 
1163 	if (!capable(CAP_SYS_ADMIN))
1164 		return -EPERM;
1165 
1166 	next_id++;
1167 	spin_lock_bh(lock);
1168 	if (!idr_get_next(idr, &next_id))
1169 		err = -ENOENT;
1170 	spin_unlock_bh(lock);
1171 
1172 	if (!err)
1173 		err = put_user(next_id, &uattr->next_id);
1174 
1175 	return err;
1176 }
1177 
1178 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
1179 
1180 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
1181 {
1182 	struct bpf_prog *prog;
1183 	u32 id = attr->prog_id;
1184 	int fd;
1185 
1186 	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
1187 		return -EINVAL;
1188 
1189 	if (!capable(CAP_SYS_ADMIN))
1190 		return -EPERM;
1191 
1192 	spin_lock_bh(&prog_idr_lock);
1193 	prog = idr_find(&prog_idr, id);
1194 	if (prog)
1195 		prog = bpf_prog_inc_not_zero(prog);
1196 	else
1197 		prog = ERR_PTR(-ENOENT);
1198 	spin_unlock_bh(&prog_idr_lock);
1199 
1200 	if (IS_ERR(prog))
1201 		return PTR_ERR(prog);
1202 
1203 	fd = bpf_prog_new_fd(prog);
1204 	if (fd < 0)
1205 		bpf_prog_put(prog);
1206 
1207 	return fd;
1208 }
1209 
1210 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id
1211 
1212 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1213 {
1214 	struct bpf_map *map;
1215 	u32 id = attr->map_id;
1216 	int fd;
1217 
1218 	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
1219 		return -EINVAL;
1220 
1221 	if (!capable(CAP_SYS_ADMIN))
1222 		return -EPERM;
1223 
1224 	spin_lock_bh(&map_idr_lock);
1225 	map = idr_find(&map_idr, id);
1226 	if (map)
1227 		map = bpf_map_inc_not_zero(map, true);
1228 	else
1229 		map = ERR_PTR(-ENOENT);
1230 	spin_unlock_bh(&map_idr_lock);
1231 
1232 	if (IS_ERR(map))
1233 		return PTR_ERR(map);
1234 
1235 	fd = bpf_map_new_fd(map);
1236 	if (fd < 0)
1237 		bpf_map_put(map);
1238 
1239 	return fd;
1240 }
1241 
1242 static int check_uarg_tail_zero(void __user *uaddr,
1243 				size_t expected_size,
1244 				size_t actual_size)
1245 {
1246 	unsigned char __user *addr;
1247 	unsigned char __user *end;
1248 	unsigned char val;
1249 	int err;
1250 
1251 	if (actual_size <= expected_size)
1252 		return 0;
1253 
1254 	addr = uaddr + expected_size;
1255 	end  = uaddr + actual_size;
1256 
1257 	for (; addr < end; addr++) {
1258 		err = get_user(val, addr);
1259 		if (err)
1260 			return err;
1261 		if (val)
1262 			return -E2BIG;
1263 	}
1264 
1265 	return 0;
1266 }
1267 
1268 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1269 				   const union bpf_attr *attr,
1270 				   union bpf_attr __user *uattr)
1271 {
1272 	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1273 	struct bpf_prog_info info = {};
1274 	u32 info_len = attr->info.info_len;
1275 	char __user *uinsns;
1276 	u32 ulen;
1277 	int err;
1278 
1279 	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1280 	if (err)
1281 		return err;
1282 	info_len = min_t(u32, sizeof(info), info_len);
1283 
1284 	if (copy_from_user(&info, uinfo, info_len))
1285 		return err;
1286 
1287 	info.type = prog->type;
1288 	info.id = prog->aux->id;
1289 
1290 	memcpy(info.tag, prog->tag, sizeof(prog->tag));
1291 
1292 	if (!capable(CAP_SYS_ADMIN)) {
1293 		info.jited_prog_len = 0;
1294 		info.xlated_prog_len = 0;
1295 		goto done;
1296 	}
1297 
1298 	ulen = info.jited_prog_len;
1299 	info.jited_prog_len = prog->jited_len;
1300 	if (info.jited_prog_len && ulen) {
1301 		uinsns = u64_to_user_ptr(info.jited_prog_insns);
1302 		ulen = min_t(u32, info.jited_prog_len, ulen);
1303 		if (copy_to_user(uinsns, prog->bpf_func, ulen))
1304 			return -EFAULT;
1305 	}
1306 
1307 	ulen = info.xlated_prog_len;
1308 	info.xlated_prog_len = bpf_prog_size(prog->len);
1309 	if (info.xlated_prog_len && ulen) {
1310 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
1311 		ulen = min_t(u32, info.xlated_prog_len, ulen);
1312 		if (copy_to_user(uinsns, prog->insnsi, ulen))
1313 			return -EFAULT;
1314 	}
1315 
1316 done:
1317 	if (copy_to_user(uinfo, &info, info_len) ||
1318 	    put_user(info_len, &uattr->info.info_len))
1319 		return -EFAULT;
1320 
1321 	return 0;
1322 }
1323 
1324 static int bpf_map_get_info_by_fd(struct bpf_map *map,
1325 				  const union bpf_attr *attr,
1326 				  union bpf_attr __user *uattr)
1327 {
1328 	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1329 	struct bpf_map_info info = {};
1330 	u32 info_len = attr->info.info_len;
1331 	int err;
1332 
1333 	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1334 	if (err)
1335 		return err;
1336 	info_len = min_t(u32, sizeof(info), info_len);
1337 
1338 	info.type = map->map_type;
1339 	info.id = map->id;
1340 	info.key_size = map->key_size;
1341 	info.value_size = map->value_size;
1342 	info.max_entries = map->max_entries;
1343 	info.map_flags = map->map_flags;
1344 
1345 	if (copy_to_user(uinfo, &info, info_len) ||
1346 	    put_user(info_len, &uattr->info.info_len))
1347 		return -EFAULT;
1348 
1349 	return 0;
1350 }
1351 
1352 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
1353 
1354 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
1355 				  union bpf_attr __user *uattr)
1356 {
1357 	int ufd = attr->info.bpf_fd;
1358 	struct fd f;
1359 	int err;
1360 
1361 	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
1362 		return -EINVAL;
1363 
1364 	f = fdget(ufd);
1365 	if (!f.file)
1366 		return -EBADFD;
1367 
1368 	if (f.file->f_op == &bpf_prog_fops)
1369 		err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
1370 					      uattr);
1371 	else if (f.file->f_op == &bpf_map_fops)
1372 		err = bpf_map_get_info_by_fd(f.file->private_data, attr,
1373 					     uattr);
1374 	else
1375 		err = -EINVAL;
1376 
1377 	fdput(f);
1378 	return err;
1379 }
1380 
1381 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1382 {
1383 	union bpf_attr attr = {};
1384 	int err;
1385 
1386 	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1387 		return -EPERM;
1388 
1389 	if (!access_ok(VERIFY_READ, uattr, 1))
1390 		return -EFAULT;
1391 
1392 	if (size > PAGE_SIZE)	/* silly large */
1393 		return -E2BIG;
1394 
1395 	/* If we're handed a bigger struct than we know of,
1396 	 * ensure all the unknown bits are 0 - i.e. new
1397 	 * user-space does not rely on any kernel feature
1398 	 * extensions we dont know about yet.
1399 	 */
1400 	err = check_uarg_tail_zero(uattr, sizeof(attr), size);
1401 	if (err)
1402 		return err;
1403 	size = min_t(u32, size, sizeof(attr));
1404 
1405 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
1406 	if (copy_from_user(&attr, uattr, size) != 0)
1407 		return -EFAULT;
1408 
1409 	switch (cmd) {
1410 	case BPF_MAP_CREATE:
1411 		err = map_create(&attr);
1412 		break;
1413 	case BPF_MAP_LOOKUP_ELEM:
1414 		err = map_lookup_elem(&attr);
1415 		break;
1416 	case BPF_MAP_UPDATE_ELEM:
1417 		err = map_update_elem(&attr);
1418 		break;
1419 	case BPF_MAP_DELETE_ELEM:
1420 		err = map_delete_elem(&attr);
1421 		break;
1422 	case BPF_MAP_GET_NEXT_KEY:
1423 		err = map_get_next_key(&attr);
1424 		break;
1425 	case BPF_PROG_LOAD:
1426 		err = bpf_prog_load(&attr);
1427 		break;
1428 	case BPF_OBJ_PIN:
1429 		err = bpf_obj_pin(&attr);
1430 		break;
1431 	case BPF_OBJ_GET:
1432 		err = bpf_obj_get(&attr);
1433 		break;
1434 #ifdef CONFIG_CGROUP_BPF
1435 	case BPF_PROG_ATTACH:
1436 		err = bpf_prog_attach(&attr);
1437 		break;
1438 	case BPF_PROG_DETACH:
1439 		err = bpf_prog_detach(&attr);
1440 		break;
1441 #endif
1442 	case BPF_PROG_TEST_RUN:
1443 		err = bpf_prog_test_run(&attr, uattr);
1444 		break;
1445 	case BPF_PROG_GET_NEXT_ID:
1446 		err = bpf_obj_get_next_id(&attr, uattr,
1447 					  &prog_idr, &prog_idr_lock);
1448 		break;
1449 	case BPF_MAP_GET_NEXT_ID:
1450 		err = bpf_obj_get_next_id(&attr, uattr,
1451 					  &map_idr, &map_idr_lock);
1452 		break;
1453 	case BPF_PROG_GET_FD_BY_ID:
1454 		err = bpf_prog_get_fd_by_id(&attr);
1455 		break;
1456 	case BPF_MAP_GET_FD_BY_ID:
1457 		err = bpf_map_get_fd_by_id(&attr);
1458 		break;
1459 	case BPF_OBJ_GET_INFO_BY_FD:
1460 		err = bpf_obj_get_info_by_fd(&attr, uattr);
1461 		break;
1462 	default:
1463 		err = -EINVAL;
1464 		break;
1465 	}
1466 
1467 	return err;
1468 }
1469