xref: /linux/kernel/bpf/syscall.c (revision 04d8a0a5f3b6887543850d991a5e37c4ec90e250)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 #include <linux/bpf.h>
13 #include <linux/bpf_trace.h>
14 #include <linux/syscalls.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mmzone.h>
18 #include <linux/anon_inodes.h>
19 #include <linux/file.h>
20 #include <linux/license.h>
21 #include <linux/filter.h>
22 #include <linux/version.h>
23 #include <linux/kernel.h>
24 
25 DEFINE_PER_CPU(int, bpf_prog_active);
26 
27 int sysctl_unprivileged_bpf_disabled __read_mostly;
28 
29 static LIST_HEAD(bpf_map_types);
30 
31 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
32 {
33 	struct bpf_map_type_list *tl;
34 	struct bpf_map *map;
35 
36 	list_for_each_entry(tl, &bpf_map_types, list_node) {
37 		if (tl->type == attr->map_type) {
38 			map = tl->ops->map_alloc(attr);
39 			if (IS_ERR(map))
40 				return map;
41 			map->ops = tl->ops;
42 			map->map_type = attr->map_type;
43 			return map;
44 		}
45 	}
46 	return ERR_PTR(-EINVAL);
47 }
48 
49 /* boot time registration of different map implementations */
50 void bpf_register_map_type(struct bpf_map_type_list *tl)
51 {
52 	list_add(&tl->list_node, &bpf_map_types);
53 }
54 
55 void *bpf_map_area_alloc(size_t size)
56 {
57 	/* We definitely need __GFP_NORETRY, so OOM killer doesn't
58 	 * trigger under memory pressure as we really just want to
59 	 * fail instead.
60 	 */
61 	const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
62 	void *area;
63 
64 	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
65 		area = kmalloc(size, GFP_USER | flags);
66 		if (area != NULL)
67 			return area;
68 	}
69 
70 	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
71 			 PAGE_KERNEL);
72 }
73 
74 void bpf_map_area_free(void *area)
75 {
76 	kvfree(area);
77 }
78 
79 int bpf_map_precharge_memlock(u32 pages)
80 {
81 	struct user_struct *user = get_current_user();
82 	unsigned long memlock_limit, cur;
83 
84 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
85 	cur = atomic_long_read(&user->locked_vm);
86 	free_uid(user);
87 	if (cur + pages > memlock_limit)
88 		return -EPERM;
89 	return 0;
90 }
91 
92 static int bpf_map_charge_memlock(struct bpf_map *map)
93 {
94 	struct user_struct *user = get_current_user();
95 	unsigned long memlock_limit;
96 
97 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
98 
99 	atomic_long_add(map->pages, &user->locked_vm);
100 
101 	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
102 		atomic_long_sub(map->pages, &user->locked_vm);
103 		free_uid(user);
104 		return -EPERM;
105 	}
106 	map->user = user;
107 	return 0;
108 }
109 
110 static void bpf_map_uncharge_memlock(struct bpf_map *map)
111 {
112 	struct user_struct *user = map->user;
113 
114 	atomic_long_sub(map->pages, &user->locked_vm);
115 	free_uid(user);
116 }
117 
118 /* called from workqueue */
119 static void bpf_map_free_deferred(struct work_struct *work)
120 {
121 	struct bpf_map *map = container_of(work, struct bpf_map, work);
122 
123 	bpf_map_uncharge_memlock(map);
124 	/* implementation dependent freeing */
125 	map->ops->map_free(map);
126 }
127 
128 static void bpf_map_put_uref(struct bpf_map *map)
129 {
130 	if (atomic_dec_and_test(&map->usercnt)) {
131 		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
132 			bpf_fd_array_map_clear(map);
133 	}
134 }
135 
136 /* decrement map refcnt and schedule it for freeing via workqueue
137  * (unrelying map implementation ops->map_free() might sleep)
138  */
139 void bpf_map_put(struct bpf_map *map)
140 {
141 	if (atomic_dec_and_test(&map->refcnt)) {
142 		INIT_WORK(&map->work, bpf_map_free_deferred);
143 		schedule_work(&map->work);
144 	}
145 }
146 
147 void bpf_map_put_with_uref(struct bpf_map *map)
148 {
149 	bpf_map_put_uref(map);
150 	bpf_map_put(map);
151 }
152 
153 static int bpf_map_release(struct inode *inode, struct file *filp)
154 {
155 	struct bpf_map *map = filp->private_data;
156 
157 	if (map->ops->map_release)
158 		map->ops->map_release(map, filp);
159 
160 	bpf_map_put_with_uref(map);
161 	return 0;
162 }
163 
164 #ifdef CONFIG_PROC_FS
165 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
166 {
167 	const struct bpf_map *map = filp->private_data;
168 	const struct bpf_array *array;
169 	u32 owner_prog_type = 0;
170 
171 	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
172 		array = container_of(map, struct bpf_array, map);
173 		owner_prog_type = array->owner_prog_type;
174 	}
175 
176 	seq_printf(m,
177 		   "map_type:\t%u\n"
178 		   "key_size:\t%u\n"
179 		   "value_size:\t%u\n"
180 		   "max_entries:\t%u\n"
181 		   "map_flags:\t%#x\n"
182 		   "memlock:\t%llu\n",
183 		   map->map_type,
184 		   map->key_size,
185 		   map->value_size,
186 		   map->max_entries,
187 		   map->map_flags,
188 		   map->pages * 1ULL << PAGE_SHIFT);
189 
190 	if (owner_prog_type)
191 		seq_printf(m, "owner_prog_type:\t%u\n",
192 			   owner_prog_type);
193 }
194 #endif
195 
196 static const struct file_operations bpf_map_fops = {
197 #ifdef CONFIG_PROC_FS
198 	.show_fdinfo	= bpf_map_show_fdinfo,
199 #endif
200 	.release	= bpf_map_release,
201 };
202 
203 int bpf_map_new_fd(struct bpf_map *map)
204 {
205 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
206 				O_RDWR | O_CLOEXEC);
207 }
208 
209 /* helper macro to check that unused fields 'union bpf_attr' are zero */
210 #define CHECK_ATTR(CMD) \
211 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
212 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
213 		   sizeof(*attr) - \
214 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
215 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
216 
217 #define BPF_MAP_CREATE_LAST_FIELD map_flags
218 /* called via syscall */
219 static int map_create(union bpf_attr *attr)
220 {
221 	struct bpf_map *map;
222 	int err;
223 
224 	err = CHECK_ATTR(BPF_MAP_CREATE);
225 	if (err)
226 		return -EINVAL;
227 
228 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
229 	map = find_and_alloc_map(attr);
230 	if (IS_ERR(map))
231 		return PTR_ERR(map);
232 
233 	atomic_set(&map->refcnt, 1);
234 	atomic_set(&map->usercnt, 1);
235 
236 	err = bpf_map_charge_memlock(map);
237 	if (err)
238 		goto free_map_nouncharge;
239 
240 	err = bpf_map_new_fd(map);
241 	if (err < 0)
242 		/* failed to allocate fd */
243 		goto free_map;
244 
245 	trace_bpf_map_create(map, err);
246 	return err;
247 
248 free_map:
249 	bpf_map_uncharge_memlock(map);
250 free_map_nouncharge:
251 	map->ops->map_free(map);
252 	return err;
253 }
254 
255 /* if error is returned, fd is released.
256  * On success caller should complete fd access with matching fdput()
257  */
258 struct bpf_map *__bpf_map_get(struct fd f)
259 {
260 	if (!f.file)
261 		return ERR_PTR(-EBADF);
262 	if (f.file->f_op != &bpf_map_fops) {
263 		fdput(f);
264 		return ERR_PTR(-EINVAL);
265 	}
266 
267 	return f.file->private_data;
268 }
269 
270 /* prog's and map's refcnt limit */
271 #define BPF_MAX_REFCNT 32768
272 
273 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
274 {
275 	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
276 		atomic_dec(&map->refcnt);
277 		return ERR_PTR(-EBUSY);
278 	}
279 	if (uref)
280 		atomic_inc(&map->usercnt);
281 	return map;
282 }
283 
284 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
285 {
286 	struct fd f = fdget(ufd);
287 	struct bpf_map *map;
288 
289 	map = __bpf_map_get(f);
290 	if (IS_ERR(map))
291 		return map;
292 
293 	map = bpf_map_inc(map, true);
294 	fdput(f);
295 
296 	return map;
297 }
298 
299 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
300 {
301 	return -ENOTSUPP;
302 }
303 
304 /* last field in 'union bpf_attr' used by this command */
305 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
306 
307 static int map_lookup_elem(union bpf_attr *attr)
308 {
309 	void __user *ukey = u64_to_user_ptr(attr->key);
310 	void __user *uvalue = u64_to_user_ptr(attr->value);
311 	int ufd = attr->map_fd;
312 	struct bpf_map *map;
313 	void *key, *value, *ptr;
314 	u32 value_size;
315 	struct fd f;
316 	int err;
317 
318 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
319 		return -EINVAL;
320 
321 	f = fdget(ufd);
322 	map = __bpf_map_get(f);
323 	if (IS_ERR(map))
324 		return PTR_ERR(map);
325 
326 	err = -ENOMEM;
327 	key = kmalloc(map->key_size, GFP_USER);
328 	if (!key)
329 		goto err_put;
330 
331 	err = -EFAULT;
332 	if (copy_from_user(key, ukey, map->key_size) != 0)
333 		goto free_key;
334 
335 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
336 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
337 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
338 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
339 	else
340 		value_size = map->value_size;
341 
342 	err = -ENOMEM;
343 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
344 	if (!value)
345 		goto free_key;
346 
347 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
348 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
349 		err = bpf_percpu_hash_copy(map, key, value);
350 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
351 		err = bpf_percpu_array_copy(map, key, value);
352 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
353 		err = bpf_stackmap_copy(map, key, value);
354 	} else {
355 		rcu_read_lock();
356 		ptr = map->ops->map_lookup_elem(map, key);
357 		if (ptr)
358 			memcpy(value, ptr, value_size);
359 		rcu_read_unlock();
360 		err = ptr ? 0 : -ENOENT;
361 	}
362 
363 	if (err)
364 		goto free_value;
365 
366 	err = -EFAULT;
367 	if (copy_to_user(uvalue, value, value_size) != 0)
368 		goto free_value;
369 
370 	trace_bpf_map_lookup_elem(map, ufd, key, value);
371 	err = 0;
372 
373 free_value:
374 	kfree(value);
375 free_key:
376 	kfree(key);
377 err_put:
378 	fdput(f);
379 	return err;
380 }
381 
382 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
383 
384 static int map_update_elem(union bpf_attr *attr)
385 {
386 	void __user *ukey = u64_to_user_ptr(attr->key);
387 	void __user *uvalue = u64_to_user_ptr(attr->value);
388 	int ufd = attr->map_fd;
389 	struct bpf_map *map;
390 	void *key, *value;
391 	u32 value_size;
392 	struct fd f;
393 	int err;
394 
395 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
396 		return -EINVAL;
397 
398 	f = fdget(ufd);
399 	map = __bpf_map_get(f);
400 	if (IS_ERR(map))
401 		return PTR_ERR(map);
402 
403 	err = -ENOMEM;
404 	key = kmalloc(map->key_size, GFP_USER);
405 	if (!key)
406 		goto err_put;
407 
408 	err = -EFAULT;
409 	if (copy_from_user(key, ukey, map->key_size) != 0)
410 		goto free_key;
411 
412 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
413 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
414 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
415 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
416 	else
417 		value_size = map->value_size;
418 
419 	err = -ENOMEM;
420 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
421 	if (!value)
422 		goto free_key;
423 
424 	err = -EFAULT;
425 	if (copy_from_user(value, uvalue, value_size) != 0)
426 		goto free_value;
427 
428 	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
429 	 * inside bpf map update or delete otherwise deadlocks are possible
430 	 */
431 	preempt_disable();
432 	__this_cpu_inc(bpf_prog_active);
433 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
434 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
435 		err = bpf_percpu_hash_update(map, key, value, attr->flags);
436 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
437 		err = bpf_percpu_array_update(map, key, value, attr->flags);
438 	} else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
439 		   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
440 		   map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
441 		rcu_read_lock();
442 		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
443 						   attr->flags);
444 		rcu_read_unlock();
445 	} else {
446 		rcu_read_lock();
447 		err = map->ops->map_update_elem(map, key, value, attr->flags);
448 		rcu_read_unlock();
449 	}
450 	__this_cpu_dec(bpf_prog_active);
451 	preempt_enable();
452 
453 	if (!err)
454 		trace_bpf_map_update_elem(map, ufd, key, value);
455 free_value:
456 	kfree(value);
457 free_key:
458 	kfree(key);
459 err_put:
460 	fdput(f);
461 	return err;
462 }
463 
464 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
465 
466 static int map_delete_elem(union bpf_attr *attr)
467 {
468 	void __user *ukey = u64_to_user_ptr(attr->key);
469 	int ufd = attr->map_fd;
470 	struct bpf_map *map;
471 	struct fd f;
472 	void *key;
473 	int err;
474 
475 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
476 		return -EINVAL;
477 
478 	f = fdget(ufd);
479 	map = __bpf_map_get(f);
480 	if (IS_ERR(map))
481 		return PTR_ERR(map);
482 
483 	err = -ENOMEM;
484 	key = kmalloc(map->key_size, GFP_USER);
485 	if (!key)
486 		goto err_put;
487 
488 	err = -EFAULT;
489 	if (copy_from_user(key, ukey, map->key_size) != 0)
490 		goto free_key;
491 
492 	preempt_disable();
493 	__this_cpu_inc(bpf_prog_active);
494 	rcu_read_lock();
495 	err = map->ops->map_delete_elem(map, key);
496 	rcu_read_unlock();
497 	__this_cpu_dec(bpf_prog_active);
498 	preempt_enable();
499 
500 	if (!err)
501 		trace_bpf_map_delete_elem(map, ufd, key);
502 free_key:
503 	kfree(key);
504 err_put:
505 	fdput(f);
506 	return err;
507 }
508 
509 /* last field in 'union bpf_attr' used by this command */
510 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
511 
512 static int map_get_next_key(union bpf_attr *attr)
513 {
514 	void __user *ukey = u64_to_user_ptr(attr->key);
515 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
516 	int ufd = attr->map_fd;
517 	struct bpf_map *map;
518 	void *key, *next_key;
519 	struct fd f;
520 	int err;
521 
522 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
523 		return -EINVAL;
524 
525 	f = fdget(ufd);
526 	map = __bpf_map_get(f);
527 	if (IS_ERR(map))
528 		return PTR_ERR(map);
529 
530 	err = -ENOMEM;
531 	key = kmalloc(map->key_size, GFP_USER);
532 	if (!key)
533 		goto err_put;
534 
535 	err = -EFAULT;
536 	if (copy_from_user(key, ukey, map->key_size) != 0)
537 		goto free_key;
538 
539 	err = -ENOMEM;
540 	next_key = kmalloc(map->key_size, GFP_USER);
541 	if (!next_key)
542 		goto free_key;
543 
544 	rcu_read_lock();
545 	err = map->ops->map_get_next_key(map, key, next_key);
546 	rcu_read_unlock();
547 	if (err)
548 		goto free_next_key;
549 
550 	err = -EFAULT;
551 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
552 		goto free_next_key;
553 
554 	trace_bpf_map_next_key(map, ufd, key, next_key);
555 	err = 0;
556 
557 free_next_key:
558 	kfree(next_key);
559 free_key:
560 	kfree(key);
561 err_put:
562 	fdput(f);
563 	return err;
564 }
565 
566 static LIST_HEAD(bpf_prog_types);
567 
568 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
569 {
570 	struct bpf_prog_type_list *tl;
571 
572 	list_for_each_entry(tl, &bpf_prog_types, list_node) {
573 		if (tl->type == type) {
574 			prog->aux->ops = tl->ops;
575 			prog->type = type;
576 			return 0;
577 		}
578 	}
579 
580 	return -EINVAL;
581 }
582 
583 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
584 {
585 	list_add(&tl->list_node, &bpf_prog_types);
586 }
587 
588 /* fixup insn->imm field of bpf_call instructions:
589  * if (insn->imm == BPF_FUNC_map_lookup_elem)
590  *      insn->imm = bpf_map_lookup_elem - __bpf_call_base;
591  * else if (insn->imm == BPF_FUNC_map_update_elem)
592  *      insn->imm = bpf_map_update_elem - __bpf_call_base;
593  * else ...
594  *
595  * this function is called after eBPF program passed verification
596  */
597 static void fixup_bpf_calls(struct bpf_prog *prog)
598 {
599 	const struct bpf_func_proto *fn;
600 	int i;
601 
602 	for (i = 0; i < prog->len; i++) {
603 		struct bpf_insn *insn = &prog->insnsi[i];
604 
605 		if (insn->code == (BPF_JMP | BPF_CALL)) {
606 			/* we reach here when program has bpf_call instructions
607 			 * and it passed bpf_check(), means that
608 			 * ops->get_func_proto must have been supplied, check it
609 			 */
610 			BUG_ON(!prog->aux->ops->get_func_proto);
611 
612 			if (insn->imm == BPF_FUNC_get_route_realm)
613 				prog->dst_needed = 1;
614 			if (insn->imm == BPF_FUNC_get_prandom_u32)
615 				bpf_user_rnd_init_once();
616 			if (insn->imm == BPF_FUNC_xdp_adjust_head)
617 				prog->xdp_adjust_head = 1;
618 			if (insn->imm == BPF_FUNC_tail_call) {
619 				/* mark bpf_tail_call as different opcode
620 				 * to avoid conditional branch in
621 				 * interpeter for every normal call
622 				 * and to prevent accidental JITing by
623 				 * JIT compiler that doesn't support
624 				 * bpf_tail_call yet
625 				 */
626 				insn->imm = 0;
627 				insn->code |= BPF_X;
628 				continue;
629 			}
630 
631 			fn = prog->aux->ops->get_func_proto(insn->imm);
632 			/* all functions that have prototype and verifier allowed
633 			 * programs to call them, must be real in-kernel functions
634 			 */
635 			BUG_ON(!fn->func);
636 			insn->imm = fn->func - __bpf_call_base;
637 		}
638 	}
639 }
640 
641 /* drop refcnt on maps used by eBPF program and free auxilary data */
642 static void free_used_maps(struct bpf_prog_aux *aux)
643 {
644 	int i;
645 
646 	for (i = 0; i < aux->used_map_cnt; i++)
647 		bpf_map_put(aux->used_maps[i]);
648 
649 	kfree(aux->used_maps);
650 }
651 
652 int __bpf_prog_charge(struct user_struct *user, u32 pages)
653 {
654 	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
655 	unsigned long user_bufs;
656 
657 	if (user) {
658 		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
659 		if (user_bufs > memlock_limit) {
660 			atomic_long_sub(pages, &user->locked_vm);
661 			return -EPERM;
662 		}
663 	}
664 
665 	return 0;
666 }
667 
668 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
669 {
670 	if (user)
671 		atomic_long_sub(pages, &user->locked_vm);
672 }
673 
674 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
675 {
676 	struct user_struct *user = get_current_user();
677 	int ret;
678 
679 	ret = __bpf_prog_charge(user, prog->pages);
680 	if (ret) {
681 		free_uid(user);
682 		return ret;
683 	}
684 
685 	prog->aux->user = user;
686 	return 0;
687 }
688 
689 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
690 {
691 	struct user_struct *user = prog->aux->user;
692 
693 	__bpf_prog_uncharge(user, prog->pages);
694 	free_uid(user);
695 }
696 
697 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
698 {
699 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
700 
701 	free_used_maps(aux);
702 	bpf_prog_uncharge_memlock(aux->prog);
703 	bpf_prog_free(aux->prog);
704 }
705 
706 void bpf_prog_put(struct bpf_prog *prog)
707 {
708 	if (atomic_dec_and_test(&prog->aux->refcnt)) {
709 		trace_bpf_prog_put_rcu(prog);
710 		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
711 	}
712 }
713 EXPORT_SYMBOL_GPL(bpf_prog_put);
714 
715 static int bpf_prog_release(struct inode *inode, struct file *filp)
716 {
717 	struct bpf_prog *prog = filp->private_data;
718 
719 	bpf_prog_put(prog);
720 	return 0;
721 }
722 
723 #ifdef CONFIG_PROC_FS
724 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
725 {
726 	const struct bpf_prog *prog = filp->private_data;
727 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
728 
729 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
730 	seq_printf(m,
731 		   "prog_type:\t%u\n"
732 		   "prog_jited:\t%u\n"
733 		   "prog_tag:\t%s\n"
734 		   "memlock:\t%llu\n",
735 		   prog->type,
736 		   prog->jited,
737 		   prog_tag,
738 		   prog->pages * 1ULL << PAGE_SHIFT);
739 }
740 #endif
741 
742 static const struct file_operations bpf_prog_fops = {
743 #ifdef CONFIG_PROC_FS
744 	.show_fdinfo	= bpf_prog_show_fdinfo,
745 #endif
746 	.release	= bpf_prog_release,
747 };
748 
749 int bpf_prog_new_fd(struct bpf_prog *prog)
750 {
751 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
752 				O_RDWR | O_CLOEXEC);
753 }
754 
755 static struct bpf_prog *____bpf_prog_get(struct fd f)
756 {
757 	if (!f.file)
758 		return ERR_PTR(-EBADF);
759 	if (f.file->f_op != &bpf_prog_fops) {
760 		fdput(f);
761 		return ERR_PTR(-EINVAL);
762 	}
763 
764 	return f.file->private_data;
765 }
766 
767 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
768 {
769 	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
770 		atomic_sub(i, &prog->aux->refcnt);
771 		return ERR_PTR(-EBUSY);
772 	}
773 	return prog;
774 }
775 EXPORT_SYMBOL_GPL(bpf_prog_add);
776 
777 void bpf_prog_sub(struct bpf_prog *prog, int i)
778 {
779 	/* Only to be used for undoing previous bpf_prog_add() in some
780 	 * error path. We still know that another entity in our call
781 	 * path holds a reference to the program, thus atomic_sub() can
782 	 * be safely used in such cases!
783 	 */
784 	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
785 }
786 EXPORT_SYMBOL_GPL(bpf_prog_sub);
787 
788 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
789 {
790 	return bpf_prog_add(prog, 1);
791 }
792 EXPORT_SYMBOL_GPL(bpf_prog_inc);
793 
794 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
795 {
796 	struct fd f = fdget(ufd);
797 	struct bpf_prog *prog;
798 
799 	prog = ____bpf_prog_get(f);
800 	if (IS_ERR(prog))
801 		return prog;
802 	if (type && prog->type != *type) {
803 		prog = ERR_PTR(-EINVAL);
804 		goto out;
805 	}
806 
807 	prog = bpf_prog_inc(prog);
808 out:
809 	fdput(f);
810 	return prog;
811 }
812 
813 struct bpf_prog *bpf_prog_get(u32 ufd)
814 {
815 	return __bpf_prog_get(ufd, NULL);
816 }
817 
818 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
819 {
820 	struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
821 
822 	if (!IS_ERR(prog))
823 		trace_bpf_prog_get_type(prog);
824 	return prog;
825 }
826 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
827 
828 /* last field in 'union bpf_attr' used by this command */
829 #define	BPF_PROG_LOAD_LAST_FIELD kern_version
830 
831 static int bpf_prog_load(union bpf_attr *attr)
832 {
833 	enum bpf_prog_type type = attr->prog_type;
834 	struct bpf_prog *prog;
835 	int err;
836 	char license[128];
837 	bool is_gpl;
838 
839 	if (CHECK_ATTR(BPF_PROG_LOAD))
840 		return -EINVAL;
841 
842 	/* copy eBPF program license from user space */
843 	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
844 			      sizeof(license) - 1) < 0)
845 		return -EFAULT;
846 	license[sizeof(license) - 1] = 0;
847 
848 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
849 	is_gpl = license_is_gpl_compatible(license);
850 
851 	if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
852 		return -E2BIG;
853 
854 	if (type == BPF_PROG_TYPE_KPROBE &&
855 	    attr->kern_version != LINUX_VERSION_CODE)
856 		return -EINVAL;
857 
858 	if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
859 		return -EPERM;
860 
861 	/* plain bpf_prog allocation */
862 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
863 	if (!prog)
864 		return -ENOMEM;
865 
866 	err = bpf_prog_charge_memlock(prog);
867 	if (err)
868 		goto free_prog_nouncharge;
869 
870 	prog->len = attr->insn_cnt;
871 
872 	err = -EFAULT;
873 	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
874 			   bpf_prog_insn_size(prog)) != 0)
875 		goto free_prog;
876 
877 	prog->orig_prog = NULL;
878 	prog->jited = 0;
879 
880 	atomic_set(&prog->aux->refcnt, 1);
881 	prog->gpl_compatible = is_gpl ? 1 : 0;
882 
883 	/* find program type: socket_filter vs tracing_filter */
884 	err = find_prog_type(type, prog);
885 	if (err < 0)
886 		goto free_prog;
887 
888 	/* run eBPF verifier */
889 	err = bpf_check(&prog, attr);
890 	if (err < 0)
891 		goto free_used_maps;
892 
893 	/* fixup BPF_CALL->imm field */
894 	fixup_bpf_calls(prog);
895 
896 	/* eBPF program is ready to be JITed */
897 	prog = bpf_prog_select_runtime(prog, &err);
898 	if (err < 0)
899 		goto free_used_maps;
900 
901 	err = bpf_prog_new_fd(prog);
902 	if (err < 0)
903 		/* failed to allocate fd */
904 		goto free_used_maps;
905 
906 	trace_bpf_prog_load(prog, err);
907 	return err;
908 
909 free_used_maps:
910 	free_used_maps(prog->aux);
911 free_prog:
912 	bpf_prog_uncharge_memlock(prog);
913 free_prog_nouncharge:
914 	bpf_prog_free(prog);
915 	return err;
916 }
917 
918 #define BPF_OBJ_LAST_FIELD bpf_fd
919 
920 static int bpf_obj_pin(const union bpf_attr *attr)
921 {
922 	if (CHECK_ATTR(BPF_OBJ))
923 		return -EINVAL;
924 
925 	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
926 }
927 
928 static int bpf_obj_get(const union bpf_attr *attr)
929 {
930 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
931 		return -EINVAL;
932 
933 	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
934 }
935 
936 #ifdef CONFIG_CGROUP_BPF
937 
938 #define BPF_PROG_ATTACH_LAST_FIELD attach_type
939 
940 static int bpf_prog_attach(const union bpf_attr *attr)
941 {
942 	struct bpf_prog *prog;
943 	struct cgroup *cgrp;
944 	enum bpf_prog_type ptype;
945 
946 	if (!capable(CAP_NET_ADMIN))
947 		return -EPERM;
948 
949 	if (CHECK_ATTR(BPF_PROG_ATTACH))
950 		return -EINVAL;
951 
952 	switch (attr->attach_type) {
953 	case BPF_CGROUP_INET_INGRESS:
954 	case BPF_CGROUP_INET_EGRESS:
955 		ptype = BPF_PROG_TYPE_CGROUP_SKB;
956 		break;
957 	case BPF_CGROUP_INET_SOCK_CREATE:
958 		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
959 		break;
960 	default:
961 		return -EINVAL;
962 	}
963 
964 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
965 	if (IS_ERR(prog))
966 		return PTR_ERR(prog);
967 
968 	cgrp = cgroup_get_from_fd(attr->target_fd);
969 	if (IS_ERR(cgrp)) {
970 		bpf_prog_put(prog);
971 		return PTR_ERR(cgrp);
972 	}
973 
974 	cgroup_bpf_update(cgrp, prog, attr->attach_type);
975 	cgroup_put(cgrp);
976 
977 	return 0;
978 }
979 
980 #define BPF_PROG_DETACH_LAST_FIELD attach_type
981 
982 static int bpf_prog_detach(const union bpf_attr *attr)
983 {
984 	struct cgroup *cgrp;
985 
986 	if (!capable(CAP_NET_ADMIN))
987 		return -EPERM;
988 
989 	if (CHECK_ATTR(BPF_PROG_DETACH))
990 		return -EINVAL;
991 
992 	switch (attr->attach_type) {
993 	case BPF_CGROUP_INET_INGRESS:
994 	case BPF_CGROUP_INET_EGRESS:
995 	case BPF_CGROUP_INET_SOCK_CREATE:
996 		cgrp = cgroup_get_from_fd(attr->target_fd);
997 		if (IS_ERR(cgrp))
998 			return PTR_ERR(cgrp);
999 
1000 		cgroup_bpf_update(cgrp, NULL, attr->attach_type);
1001 		cgroup_put(cgrp);
1002 		break;
1003 
1004 	default:
1005 		return -EINVAL;
1006 	}
1007 
1008 	return 0;
1009 }
1010 #endif /* CONFIG_CGROUP_BPF */
1011 
1012 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1013 {
1014 	union bpf_attr attr = {};
1015 	int err;
1016 
1017 	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1018 		return -EPERM;
1019 
1020 	if (!access_ok(VERIFY_READ, uattr, 1))
1021 		return -EFAULT;
1022 
1023 	if (size > PAGE_SIZE)	/* silly large */
1024 		return -E2BIG;
1025 
1026 	/* If we're handed a bigger struct than we know of,
1027 	 * ensure all the unknown bits are 0 - i.e. new
1028 	 * user-space does not rely on any kernel feature
1029 	 * extensions we dont know about yet.
1030 	 */
1031 	if (size > sizeof(attr)) {
1032 		unsigned char __user *addr;
1033 		unsigned char __user *end;
1034 		unsigned char val;
1035 
1036 		addr = (void __user *)uattr + sizeof(attr);
1037 		end  = (void __user *)uattr + size;
1038 
1039 		for (; addr < end; addr++) {
1040 			err = get_user(val, addr);
1041 			if (err)
1042 				return err;
1043 			if (val)
1044 				return -E2BIG;
1045 		}
1046 		size = sizeof(attr);
1047 	}
1048 
1049 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
1050 	if (copy_from_user(&attr, uattr, size) != 0)
1051 		return -EFAULT;
1052 
1053 	switch (cmd) {
1054 	case BPF_MAP_CREATE:
1055 		err = map_create(&attr);
1056 		break;
1057 	case BPF_MAP_LOOKUP_ELEM:
1058 		err = map_lookup_elem(&attr);
1059 		break;
1060 	case BPF_MAP_UPDATE_ELEM:
1061 		err = map_update_elem(&attr);
1062 		break;
1063 	case BPF_MAP_DELETE_ELEM:
1064 		err = map_delete_elem(&attr);
1065 		break;
1066 	case BPF_MAP_GET_NEXT_KEY:
1067 		err = map_get_next_key(&attr);
1068 		break;
1069 	case BPF_PROG_LOAD:
1070 		err = bpf_prog_load(&attr);
1071 		break;
1072 	case BPF_OBJ_PIN:
1073 		err = bpf_obj_pin(&attr);
1074 		break;
1075 	case BPF_OBJ_GET:
1076 		err = bpf_obj_get(&attr);
1077 		break;
1078 
1079 #ifdef CONFIG_CGROUP_BPF
1080 	case BPF_PROG_ATTACH:
1081 		err = bpf_prog_attach(&attr);
1082 		break;
1083 	case BPF_PROG_DETACH:
1084 		err = bpf_prog_detach(&attr);
1085 		break;
1086 #endif
1087 
1088 	default:
1089 		err = -EINVAL;
1090 		break;
1091 	}
1092 
1093 	return err;
1094 }
1095