syscall.c (ab387f0af24e661fc1c2f609664ec9ae6618e3f0) syscall.c (ab3f0063c48c26c927851b6767824e35a716d878)
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of

--- 9 unchanged lines hidden (view full) ---

18#include <linux/mmzone.h>
19#include <linux/anon_inodes.h>
20#include <linux/file.h>
21#include <linux/license.h>
22#include <linux/filter.h>
23#include <linux/version.h>
24#include <linux/kernel.h>
25#include <linux/idr.h>
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of

--- 9 unchanged lines hidden (view full) ---

18#include <linux/mmzone.h>
19#include <linux/anon_inodes.h>
20#include <linux/file.h>
21#include <linux/license.h>
22#include <linux/filter.h>
23#include <linux/version.h>
24#include <linux/kernel.h>
25#include <linux/idr.h>
26#include <linux/cred.h>
27#include <linux/timekeeping.h>
28#include <linux/ctype.h>
26
27#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
28 (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
29 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
30 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
31#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
32#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
33
29
30#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
31 (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
32 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
33 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
34#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
35#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
36
37#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
38
34DEFINE_PER_CPU(int, bpf_prog_active);
35static DEFINE_IDR(prog_idr);
36static DEFINE_SPINLOCK(prog_idr_lock);
37static DEFINE_IDR(map_idr);
38static DEFINE_SPINLOCK(map_idr_lock);
39
40int sysctl_unprivileged_bpf_disabled __read_mostly;
41

--- 160 unchanged lines hidden (view full) ---

202}
203
204/* called from workqueue */
205static void bpf_map_free_deferred(struct work_struct *work)
206{
207 struct bpf_map *map = container_of(work, struct bpf_map, work);
208
209 bpf_map_uncharge_memlock(map);
39DEFINE_PER_CPU(int, bpf_prog_active);
40static DEFINE_IDR(prog_idr);
41static DEFINE_SPINLOCK(prog_idr_lock);
42static DEFINE_IDR(map_idr);
43static DEFINE_SPINLOCK(map_idr_lock);
44
45int sysctl_unprivileged_bpf_disabled __read_mostly;
46

--- 160 unchanged lines hidden (view full) ---

207}
208
209/* called from workqueue */
210static void bpf_map_free_deferred(struct work_struct *work)
211{
212 struct bpf_map *map = container_of(work, struct bpf_map, work);
213
214 bpf_map_uncharge_memlock(map);
215 security_bpf_map_free(map);
210 /* implementation dependent freeing */
211 map->ops->map_free(map);
212}
213
214static void bpf_map_put_uref(struct bpf_map *map)
215{
216 if (atomic_dec_and_test(&map->usercnt)) {
217 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)

--- 68 unchanged lines hidden (view full) ---

286 seq_printf(m, "owner_prog_type:\t%u\n",
287 owner_prog_type);
288 seq_printf(m, "owner_jited:\t%u\n",
289 owner_jited);
290 }
291}
292#endif
293
216 /* implementation dependent freeing */
217 map->ops->map_free(map);
218}
219
220static void bpf_map_put_uref(struct bpf_map *map)
221{
222 if (atomic_dec_and_test(&map->usercnt)) {
223 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)

--- 68 unchanged lines hidden (view full) ---

292 seq_printf(m, "owner_prog_type:\t%u\n",
293 owner_prog_type);
294 seq_printf(m, "owner_jited:\t%u\n",
295 owner_jited);
296 }
297}
298#endif
299
294static const struct file_operations bpf_map_fops = {
300static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
301 loff_t *ppos)
302{
303 /* We need this handler such that alloc_file() enables
304 * f_mode with FMODE_CAN_READ.
305 */
306 return -EINVAL;
307}
308
309static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
310 size_t siz, loff_t *ppos)
311{
312 /* We need this handler such that alloc_file() enables
313 * f_mode with FMODE_CAN_WRITE.
314 */
315 return -EINVAL;
316}
317
318const struct file_operations bpf_map_fops = {
295#ifdef CONFIG_PROC_FS
296 .show_fdinfo = bpf_map_show_fdinfo,
297#endif
298 .release = bpf_map_release,
319#ifdef CONFIG_PROC_FS
320 .show_fdinfo = bpf_map_show_fdinfo,
321#endif
322 .release = bpf_map_release,
323 .read = bpf_dummy_read,
324 .write = bpf_dummy_write,
299};
300
325};
326
301int bpf_map_new_fd(struct bpf_map *map)
327int bpf_map_new_fd(struct bpf_map *map, int flags)
302{
328{
329 int ret;
330
331 ret = security_bpf_map(map, OPEN_FMODE(flags));
332 if (ret < 0)
333 return ret;
334
303 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
335 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
304 O_RDWR | O_CLOEXEC);
336 flags | O_CLOEXEC);
305}
306
337}
338
339int bpf_get_file_flag(int flags)
340{
341 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
342 return -EINVAL;
343 if (flags & BPF_F_RDONLY)
344 return O_RDONLY;
345 if (flags & BPF_F_WRONLY)
346 return O_WRONLY;
347 return O_RDWR;
348}
349
307/* helper macro to check that unused fields 'union bpf_attr' are zero */
308#define CHECK_ATTR(CMD) \
309 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
310 sizeof(attr->CMD##_LAST_FIELD), 0, \
311 sizeof(*attr) - \
312 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
313 sizeof(attr->CMD##_LAST_FIELD)) != NULL
314
350/* helper macro to check that unused fields 'union bpf_attr' are zero */
351#define CHECK_ATTR(CMD) \
352 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
353 sizeof(attr->CMD##_LAST_FIELD), 0, \
354 sizeof(*attr) - \
355 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
356 sizeof(attr->CMD##_LAST_FIELD)) != NULL
357
315#define BPF_MAP_CREATE_LAST_FIELD numa_node
358/* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes.
359 * Return 0 on success and < 0 on error.
360 */
361static int bpf_obj_name_cpy(char *dst, const char *src)
362{
363 const char *end = src + BPF_OBJ_NAME_LEN;
364
365 memset(dst, 0, BPF_OBJ_NAME_LEN);
366
367 /* Copy all isalnum() and '_' char */
368 while (src < end && *src) {
369 if (!isalnum(*src) && *src != '_')
370 return -EINVAL;
371 *dst++ = *src++;
372 }
373
374 /* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */
375 if (src == end)
376 return -EINVAL;
377
378 return 0;
379}
380
381#define BPF_MAP_CREATE_LAST_FIELD map_name
316/* called via syscall */
317static int map_create(union bpf_attr *attr)
318{
319 int numa_node = bpf_map_attr_numa_node(attr);
320 struct bpf_map *map;
382/* called via syscall */
383static int map_create(union bpf_attr *attr)
384{
385 int numa_node = bpf_map_attr_numa_node(attr);
386 struct bpf_map *map;
387 int f_flags;
321 int err;
322
323 err = CHECK_ATTR(BPF_MAP_CREATE);
324 if (err)
325 return -EINVAL;
326
388 int err;
389
390 err = CHECK_ATTR(BPF_MAP_CREATE);
391 if (err)
392 return -EINVAL;
393
394 f_flags = bpf_get_file_flag(attr->map_flags);
395 if (f_flags < 0)
396 return f_flags;
397
327 if (numa_node != NUMA_NO_NODE &&
328 ((unsigned int)numa_node >= nr_node_ids ||
329 !node_online(numa_node)))
330 return -EINVAL;
331
332 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
333 map = find_and_alloc_map(attr);
334 if (IS_ERR(map))
335 return PTR_ERR(map);
336
398 if (numa_node != NUMA_NO_NODE &&
399 ((unsigned int)numa_node >= nr_node_ids ||
400 !node_online(numa_node)))
401 return -EINVAL;
402
403 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
404 map = find_and_alloc_map(attr);
405 if (IS_ERR(map))
406 return PTR_ERR(map);
407
408 err = bpf_obj_name_cpy(map->name, attr->map_name);
409 if (err)
410 goto free_map_nouncharge;
411
337 atomic_set(&map->refcnt, 1);
338 atomic_set(&map->usercnt, 1);
339
412 atomic_set(&map->refcnt, 1);
413 atomic_set(&map->usercnt, 1);
414
340 err = bpf_map_charge_memlock(map);
415 err = security_bpf_map_alloc(map);
341 if (err)
342 goto free_map_nouncharge;
343
416 if (err)
417 goto free_map_nouncharge;
418
419 err = bpf_map_charge_memlock(map);
420 if (err)
421 goto free_map_sec;
422
344 err = bpf_map_alloc_id(map);
345 if (err)
346 goto free_map;
347
423 err = bpf_map_alloc_id(map);
424 if (err)
425 goto free_map;
426
348 err = bpf_map_new_fd(map);
427 err = bpf_map_new_fd(map, f_flags);
349 if (err < 0) {
350 /* failed to allocate fd.
351 * bpf_map_put() is needed because the above
352 * bpf_map_alloc_id() has published the map
353 * to the userspace and the userspace may
354 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
355 */
356 bpf_map_put(map);
357 return err;
358 }
359
360 trace_bpf_map_create(map, err);
361 return err;
362
363free_map:
364 bpf_map_uncharge_memlock(map);
428 if (err < 0) {
429 /* failed to allocate fd.
430 * bpf_map_put() is needed because the above
431 * bpf_map_alloc_id() has published the map
432 * to the userspace and the userspace may
433 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
434 */
435 bpf_map_put(map);
436 return err;
437 }
438
439 trace_bpf_map_create(map, err);
440 return err;
441
442free_map:
443 bpf_map_uncharge_memlock(map);
444free_map_sec:
445 security_bpf_map_free(map);
365free_map_nouncharge:
366 map->ops->map_free(map);
367 return err;
368}
369
370/* if error is returned, fd is released.
371 * On success caller should complete fd access with matching fdput()
372 */

--- 82 unchanged lines hidden (view full) ---

455 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
456 return -EINVAL;
457
458 f = fdget(ufd);
459 map = __bpf_map_get(f);
460 if (IS_ERR(map))
461 return PTR_ERR(map);
462
446free_map_nouncharge:
447 map->ops->map_free(map);
448 return err;
449}
450
451/* if error is returned, fd is released.
452 * On success caller should complete fd access with matching fdput()
453 */

--- 82 unchanged lines hidden (view full) ---

536 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
537 return -EINVAL;
538
539 f = fdget(ufd);
540 map = __bpf_map_get(f);
541 if (IS_ERR(map))
542 return PTR_ERR(map);
543
544 if (!(f.file->f_mode & FMODE_CAN_READ)) {
545 err = -EPERM;
546 goto err_put;
547 }
548
463 key = memdup_user(ukey, map->key_size);
464 if (IS_ERR(key)) {
465 err = PTR_ERR(key);
466 goto err_put;
467 }
468
469 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
470 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||

--- 64 unchanged lines hidden (view full) ---

535 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
536 return -EINVAL;
537
538 f = fdget(ufd);
539 map = __bpf_map_get(f);
540 if (IS_ERR(map))
541 return PTR_ERR(map);
542
549 key = memdup_user(ukey, map->key_size);
550 if (IS_ERR(key)) {
551 err = PTR_ERR(key);
552 goto err_put;
553 }
554
555 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
556 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||

--- 64 unchanged lines hidden (view full) ---

621 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
622 return -EINVAL;
623
624 f = fdget(ufd);
625 map = __bpf_map_get(f);
626 if (IS_ERR(map))
627 return PTR_ERR(map);
628
629 if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
630 err = -EPERM;
631 goto err_put;
632 }
633
543 key = memdup_user(ukey, map->key_size);
544 if (IS_ERR(key)) {
545 err = PTR_ERR(key);
546 goto err_put;
547 }
548
549 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
550 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||

--- 6 unchanged lines hidden (view full) ---

557 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
558 if (!value)
559 goto free_key;
560
561 err = -EFAULT;
562 if (copy_from_user(value, uvalue, value_size) != 0)
563 goto free_value;
564
634 key = memdup_user(ukey, map->key_size);
635 if (IS_ERR(key)) {
636 err = PTR_ERR(key);
637 goto err_put;
638 }
639
640 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
641 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||

--- 6 unchanged lines hidden (view full) ---

648 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
649 if (!value)
650 goto free_key;
651
652 err = -EFAULT;
653 if (copy_from_user(value, uvalue, value_size) != 0)
654 goto free_value;
655
656 /* Need to create a kthread, thus must support schedule */
657 if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
658 err = map->ops->map_update_elem(map, key, value, attr->flags);
659 goto out;
660 }
661
565 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
566 * inside bpf map update or delete otherwise deadlocks are possible
567 */
568 preempt_disable();
569 __this_cpu_inc(bpf_prog_active);
570 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
571 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
572 err = bpf_percpu_hash_update(map, key, value, attr->flags);

--- 14 unchanged lines hidden (view full) ---

587 rcu_read_unlock();
588 } else {
589 rcu_read_lock();
590 err = map->ops->map_update_elem(map, key, value, attr->flags);
591 rcu_read_unlock();
592 }
593 __this_cpu_dec(bpf_prog_active);
594 preempt_enable();
662 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
663 * inside bpf map update or delete otherwise deadlocks are possible
664 */
665 preempt_disable();
666 __this_cpu_inc(bpf_prog_active);
667 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
668 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
669 err = bpf_percpu_hash_update(map, key, value, attr->flags);

--- 14 unchanged lines hidden (view full) ---

684 rcu_read_unlock();
685 } else {
686 rcu_read_lock();
687 err = map->ops->map_update_elem(map, key, value, attr->flags);
688 rcu_read_unlock();
689 }
690 __this_cpu_dec(bpf_prog_active);
691 preempt_enable();
595
692out:
596 if (!err)
597 trace_bpf_map_update_elem(map, ufd, key, value);
598free_value:
599 kfree(value);
600free_key:
601 kfree(key);
602err_put:
603 fdput(f);

--- 14 unchanged lines hidden (view full) ---

618 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
619 return -EINVAL;
620
621 f = fdget(ufd);
622 map = __bpf_map_get(f);
623 if (IS_ERR(map))
624 return PTR_ERR(map);
625
693 if (!err)
694 trace_bpf_map_update_elem(map, ufd, key, value);
695free_value:
696 kfree(value);
697free_key:
698 kfree(key);
699err_put:
700 fdput(f);

--- 14 unchanged lines hidden (view full) ---

715 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
716 return -EINVAL;
717
718 f = fdget(ufd);
719 map = __bpf_map_get(f);
720 if (IS_ERR(map))
721 return PTR_ERR(map);
722
723 if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
724 err = -EPERM;
725 goto err_put;
726 }
727
626 key = memdup_user(ukey, map->key_size);
627 if (IS_ERR(key)) {
628 err = PTR_ERR(key);
629 goto err_put;
630 }
631
632 preempt_disable();
633 __this_cpu_inc(bpf_prog_active);

--- 27 unchanged lines hidden (view full) ---

661 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
662 return -EINVAL;
663
664 f = fdget(ufd);
665 map = __bpf_map_get(f);
666 if (IS_ERR(map))
667 return PTR_ERR(map);
668
728 key = memdup_user(ukey, map->key_size);
729 if (IS_ERR(key)) {
730 err = PTR_ERR(key);
731 goto err_put;
732 }
733
734 preempt_disable();
735 __this_cpu_inc(bpf_prog_active);

--- 27 unchanged lines hidden (view full) ---

763 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
764 return -EINVAL;
765
766 f = fdget(ufd);
767 map = __bpf_map_get(f);
768 if (IS_ERR(map))
769 return PTR_ERR(map);
770
771 if (!(f.file->f_mode & FMODE_CAN_READ)) {
772 err = -EPERM;
773 goto err_put;
774 }
775
669 if (ukey) {
670 key = memdup_user(ukey, map->key_size);
671 if (IS_ERR(key)) {
672 err = PTR_ERR(key);
673 goto err_put;
674 }
675 } else {
676 key = NULL;

--- 21 unchanged lines hidden (view full) ---

698 kfree(next_key);
699free_key:
700 kfree(key);
701err_put:
702 fdput(f);
703 return err;
704}
705
776 if (ukey) {
777 key = memdup_user(ukey, map->key_size);
778 if (IS_ERR(key)) {
779 err = PTR_ERR(key);
780 goto err_put;
781 }
782 } else {
783 key = NULL;

--- 21 unchanged lines hidden (view full) ---

805 kfree(next_key);
806free_key:
807 kfree(key);
808err_put:
809 fdput(f);
810 return err;
811}
812
706static const struct bpf_verifier_ops * const bpf_prog_types[] = {
707#define BPF_PROG_TYPE(_id, _ops) \
708 [_id] = &_ops,
813static const struct bpf_prog_ops * const bpf_prog_types[] = {
814#define BPF_PROG_TYPE(_id, _name) \
815 [_id] = & _name ## _prog_ops,
709#define BPF_MAP_TYPE(_id, _ops)
710#include <linux/bpf_types.h>
711#undef BPF_PROG_TYPE
712#undef BPF_MAP_TYPE
713};
714
715static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
716{
717 if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
718 return -EINVAL;
719
816#define BPF_MAP_TYPE(_id, _ops)
817#include <linux/bpf_types.h>
818#undef BPF_PROG_TYPE
819#undef BPF_MAP_TYPE
820};
821
822static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
823{
824 if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
825 return -EINVAL;
826
720 prog->aux->ops = bpf_prog_types[type];
827 if (!bpf_prog_is_dev_bound(prog->aux))
828 prog->aux->ops = bpf_prog_types[type];
829 else
830 prog->aux->ops = &bpf_offload_prog_ops;
721 prog->type = type;
722 return 0;
723}
724
725/* drop refcnt on maps used by eBPF program and free auxilary data */
726static void free_used_maps(struct bpf_prog_aux *aux)
727{
728 int i;

--- 86 unchanged lines hidden (view full) ---

815}
816
817static void __bpf_prog_put_rcu(struct rcu_head *rcu)
818{
819 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
820
821 free_used_maps(aux);
822 bpf_prog_uncharge_memlock(aux->prog);
831 prog->type = type;
832 return 0;
833}
834
835/* drop refcnt on maps used by eBPF program and free auxilary data */
836static void free_used_maps(struct bpf_prog_aux *aux)
837{
838 int i;

--- 86 unchanged lines hidden (view full) ---

925}
926
927static void __bpf_prog_put_rcu(struct rcu_head *rcu)
928{
929 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
930
931 free_used_maps(aux);
932 bpf_prog_uncharge_memlock(aux->prog);
933 security_bpf_prog_free(aux);
823 bpf_prog_free(aux->prog);
824}
825
826static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
827{
828 if (atomic_dec_and_test(&prog->aux->refcnt)) {
829 trace_bpf_prog_put_rcu(prog);
830 /* bpf_prog_free_id() must be called first */

--- 31 unchanged lines hidden (view full) ---

862 "memlock:\t%llu\n",
863 prog->type,
864 prog->jited,
865 prog_tag,
866 prog->pages * 1ULL << PAGE_SHIFT);
867}
868#endif
869
934 bpf_prog_free(aux->prog);
935}
936
937static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
938{
939 if (atomic_dec_and_test(&prog->aux->refcnt)) {
940 trace_bpf_prog_put_rcu(prog);
941 /* bpf_prog_free_id() must be called first */

--- 31 unchanged lines hidden (view full) ---

973 "memlock:\t%llu\n",
974 prog->type,
975 prog->jited,
976 prog_tag,
977 prog->pages * 1ULL << PAGE_SHIFT);
978}
979#endif
980
870static const struct file_operations bpf_prog_fops = {
981const struct file_operations bpf_prog_fops = {
871#ifdef CONFIG_PROC_FS
872 .show_fdinfo = bpf_prog_show_fdinfo,
873#endif
874 .release = bpf_prog_release,
982#ifdef CONFIG_PROC_FS
983 .show_fdinfo = bpf_prog_show_fdinfo,
984#endif
985 .release = bpf_prog_release,
986 .read = bpf_dummy_read,
987 .write = bpf_dummy_write,
875};
876
877int bpf_prog_new_fd(struct bpf_prog *prog)
878{
988};
989
990int bpf_prog_new_fd(struct bpf_prog *prog)
991{
992 int ret;
993
994 ret = security_bpf_prog(prog);
995 if (ret < 0)
996 return ret;
997
879 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
880 O_RDWR | O_CLOEXEC);
881}
882
883static struct bpf_prog *____bpf_prog_get(struct fd f)
884{
885 if (!f.file)
886 return ERR_PTR(-EBADF);

--- 46 unchanged lines hidden (view full) ---

933
934 if (!refold)
935 return ERR_PTR(-ENOENT);
936
937 return prog;
938}
939EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
940
998 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
999 O_RDWR | O_CLOEXEC);
1000}
1001
1002static struct bpf_prog *____bpf_prog_get(struct fd f)
1003{
1004 if (!f.file)
1005 return ERR_PTR(-EBADF);

--- 46 unchanged lines hidden (view full) ---

1052
1053 if (!refold)
1054 return ERR_PTR(-ENOENT);
1055
1056 return prog;
1057}
1058EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1059
941static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
1060static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type)
942{
943 struct fd f = fdget(ufd);
944 struct bpf_prog *prog;
945
946 prog = ____bpf_prog_get(f);
947 if (IS_ERR(prog))
948 return prog;
1061{
1062 struct fd f = fdget(ufd);
1063 struct bpf_prog *prog;
1064
1065 prog = ____bpf_prog_get(f);
1066 if (IS_ERR(prog))
1067 return prog;
949 if (type && prog->type != *type) {
1068 if (attach_type && (prog->type != *attach_type || prog->aux->offload)) {
950 prog = ERR_PTR(-EINVAL);
951 goto out;
952 }
953
954 prog = bpf_prog_inc(prog);
955out:
956 fdput(f);
957 return prog;

--- 10 unchanged lines hidden (view full) ---

968
969 if (!IS_ERR(prog))
970 trace_bpf_prog_get_type(prog);
971 return prog;
972}
973EXPORT_SYMBOL_GPL(bpf_prog_get_type);
974
975/* last field in 'union bpf_attr' used by this command */
1069 prog = ERR_PTR(-EINVAL);
1070 goto out;
1071 }
1072
1073 prog = bpf_prog_inc(prog);
1074out:
1075 fdput(f);
1076 return prog;

--- 10 unchanged lines hidden (view full) ---

1087
1088 if (!IS_ERR(prog))
1089 trace_bpf_prog_get_type(prog);
1090 return prog;
1091}
1092EXPORT_SYMBOL_GPL(bpf_prog_get_type);
1093
1094/* last field in 'union bpf_attr' used by this command */
976#define BPF_PROG_LOAD_LAST_FIELD prog_flags
1095#define BPF_PROG_LOAD_LAST_FIELD prog_target_ifindex
977
978static int bpf_prog_load(union bpf_attr *attr)
979{
980 enum bpf_prog_type type = attr->prog_type;
981 struct bpf_prog *prog;
982 int err;
983 char license[128];
984 bool is_gpl;

--- 25 unchanged lines hidden (view full) ---

1010 !capable(CAP_SYS_ADMIN))
1011 return -EPERM;
1012
1013 /* plain bpf_prog allocation */
1014 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
1015 if (!prog)
1016 return -ENOMEM;
1017
1096
1097static int bpf_prog_load(union bpf_attr *attr)
1098{
1099 enum bpf_prog_type type = attr->prog_type;
1100 struct bpf_prog *prog;
1101 int err;
1102 char license[128];
1103 bool is_gpl;

--- 25 unchanged lines hidden (view full) ---

1129 !capable(CAP_SYS_ADMIN))
1130 return -EPERM;
1131
1132 /* plain bpf_prog allocation */
1133 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
1134 if (!prog)
1135 return -ENOMEM;
1136
1018 err = bpf_prog_charge_memlock(prog);
1137 err = security_bpf_prog_alloc(prog->aux);
1019 if (err)
1020 goto free_prog_nouncharge;
1021
1138 if (err)
1139 goto free_prog_nouncharge;
1140
1141 err = bpf_prog_charge_memlock(prog);
1142 if (err)
1143 goto free_prog_sec;
1144
1022 prog->len = attr->insn_cnt;
1023
1024 err = -EFAULT;
1025 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
1026 bpf_prog_insn_size(prog)) != 0)
1027 goto free_prog;
1028
1029 prog->orig_prog = NULL;
1030 prog->jited = 0;
1031
1032 atomic_set(&prog->aux->refcnt, 1);
1033 prog->gpl_compatible = is_gpl ? 1 : 0;
1034
1145 prog->len = attr->insn_cnt;
1146
1147 err = -EFAULT;
1148 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
1149 bpf_prog_insn_size(prog)) != 0)
1150 goto free_prog;
1151
1152 prog->orig_prog = NULL;
1153 prog->jited = 0;
1154
1155 atomic_set(&prog->aux->refcnt, 1);
1156 prog->gpl_compatible = is_gpl ? 1 : 0;
1157
1158 if (attr->prog_target_ifindex) {
1159 err = bpf_prog_offload_init(prog, attr);
1160 if (err)
1161 goto free_prog;
1162 }
1163
1035 /* find program type: socket_filter vs tracing_filter */
1036 err = find_prog_type(type, prog);
1037 if (err < 0)
1038 goto free_prog;
1039
1164 /* find program type: socket_filter vs tracing_filter */
1165 err = find_prog_type(type, prog);
1166 if (err < 0)
1167 goto free_prog;
1168
1169 prog->aux->load_time = ktime_get_boot_ns();
1170 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name);
1171 if (err)
1172 goto free_prog;
1173
1040 /* run eBPF verifier */
1041 err = bpf_check(&prog, attr);
1042 if (err < 0)
1043 goto free_used_maps;
1044
1045 /* eBPF program is ready to be JITed */
1046 prog = bpf_prog_select_runtime(prog, &err);
1047 if (err < 0)

--- 18 unchanged lines hidden (view full) ---

1066 bpf_prog_kallsyms_add(prog);
1067 trace_bpf_prog_load(prog, err);
1068 return err;
1069
1070free_used_maps:
1071 free_used_maps(prog->aux);
1072free_prog:
1073 bpf_prog_uncharge_memlock(prog);
1174 /* run eBPF verifier */
1175 err = bpf_check(&prog, attr);
1176 if (err < 0)
1177 goto free_used_maps;
1178
1179 /* eBPF program is ready to be JITed */
1180 prog = bpf_prog_select_runtime(prog, &err);
1181 if (err < 0)

--- 18 unchanged lines hidden (view full) ---

1200 bpf_prog_kallsyms_add(prog);
1201 trace_bpf_prog_load(prog, err);
1202 return err;
1203
1204free_used_maps:
1205 free_used_maps(prog->aux);
1206free_prog:
1207 bpf_prog_uncharge_memlock(prog);
1208free_prog_sec:
1209 security_bpf_prog_free(prog->aux);
1074free_prog_nouncharge:
1075 bpf_prog_free(prog);
1076 return err;
1077}
1078
1210free_prog_nouncharge:
1211 bpf_prog_free(prog);
1212 return err;
1213}
1214
1079#define BPF_OBJ_LAST_FIELD bpf_fd
1215#define BPF_OBJ_LAST_FIELD file_flags
1080
1081static int bpf_obj_pin(const union bpf_attr *attr)
1082{
1216
1217static int bpf_obj_pin(const union bpf_attr *attr)
1218{
1083 if (CHECK_ATTR(BPF_OBJ))
1219 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
1084 return -EINVAL;
1085
1086 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
1087}
1088
1089static int bpf_obj_get(const union bpf_attr *attr)
1090{
1220 return -EINVAL;
1221
1222 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
1223}
1224
1225static int bpf_obj_get(const union bpf_attr *attr)
1226{
1091 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
1227 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
1228 attr->file_flags & ~BPF_OBJ_FLAG_MASK)
1092 return -EINVAL;
1093
1229 return -EINVAL;
1230
1094 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
1231 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
1232 attr->file_flags);
1095}
1096
1097#ifdef CONFIG_CGROUP_BPF
1098
1099#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
1100
1101static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach)
1102{

--- 24 unchanged lines hidden (view full) ---

1127 bpf_prog_put(prog);
1128 return err;
1129 }
1130
1131 fdput(f);
1132 return 0;
1133}
1134
1233}
1234
1235#ifdef CONFIG_CGROUP_BPF
1236
1237#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
1238
1239static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach)
1240{

--- 24 unchanged lines hidden (view full) ---

1265 bpf_prog_put(prog);
1266 return err;
1267 }
1268
1269 fdput(f);
1270 return 0;
1271}
1272
1273#define BPF_F_ATTACH_MASK \
1274 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
1275
1135static int bpf_prog_attach(const union bpf_attr *attr)
1136{
1137 enum bpf_prog_type ptype;
1138 struct bpf_prog *prog;
1139 struct cgroup *cgrp;
1140 int ret;
1141
1142 if (!capable(CAP_NET_ADMIN))
1143 return -EPERM;
1144
1145 if (CHECK_ATTR(BPF_PROG_ATTACH))
1146 return -EINVAL;
1147
1276static int bpf_prog_attach(const union bpf_attr *attr)
1277{
1278 enum bpf_prog_type ptype;
1279 struct bpf_prog *prog;
1280 struct cgroup *cgrp;
1281 int ret;
1282
1283 if (!capable(CAP_NET_ADMIN))
1284 return -EPERM;
1285
1286 if (CHECK_ATTR(BPF_PROG_ATTACH))
1287 return -EINVAL;
1288
1148 if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
1289 if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
1149 return -EINVAL;
1150
1151 switch (attr->attach_type) {
1152 case BPF_CGROUP_INET_INGRESS:
1153 case BPF_CGROUP_INET_EGRESS:
1154 ptype = BPF_PROG_TYPE_CGROUP_SKB;
1155 break;
1156 case BPF_CGROUP_INET_SOCK_CREATE:

--- 14 unchanged lines hidden (view full) ---

1171 return PTR_ERR(prog);
1172
1173 cgrp = cgroup_get_from_fd(attr->target_fd);
1174 if (IS_ERR(cgrp)) {
1175 bpf_prog_put(prog);
1176 return PTR_ERR(cgrp);
1177 }
1178
1290 return -EINVAL;
1291
1292 switch (attr->attach_type) {
1293 case BPF_CGROUP_INET_INGRESS:
1294 case BPF_CGROUP_INET_EGRESS:
1295 ptype = BPF_PROG_TYPE_CGROUP_SKB;
1296 break;
1297 case BPF_CGROUP_INET_SOCK_CREATE:

--- 14 unchanged lines hidden (view full) ---

1312 return PTR_ERR(prog);
1313
1314 cgrp = cgroup_get_from_fd(attr->target_fd);
1315 if (IS_ERR(cgrp)) {
1316 bpf_prog_put(prog);
1317 return PTR_ERR(cgrp);
1318 }
1319
1179 ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
1180 attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
1320 ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
1321 attr->attach_flags);
1181 if (ret)
1182 bpf_prog_put(prog);
1183 cgroup_put(cgrp);
1184
1185 return ret;
1186}
1187
1188#define BPF_PROG_DETACH_LAST_FIELD attach_type
1189
1190static int bpf_prog_detach(const union bpf_attr *attr)
1191{
1322 if (ret)
1323 bpf_prog_put(prog);
1324 cgroup_put(cgrp);
1325
1326 return ret;
1327}
1328
1329#define BPF_PROG_DETACH_LAST_FIELD attach_type
1330
1331static int bpf_prog_detach(const union bpf_attr *attr)
1332{
1333 enum bpf_prog_type ptype;
1334 struct bpf_prog *prog;
1192 struct cgroup *cgrp;
1193 int ret;
1194
1195 if (!capable(CAP_NET_ADMIN))
1196 return -EPERM;
1197
1198 if (CHECK_ATTR(BPF_PROG_DETACH))
1199 return -EINVAL;
1200
1201 switch (attr->attach_type) {
1202 case BPF_CGROUP_INET_INGRESS:
1203 case BPF_CGROUP_INET_EGRESS:
1335 struct cgroup *cgrp;
1336 int ret;
1337
1338 if (!capable(CAP_NET_ADMIN))
1339 return -EPERM;
1340
1341 if (CHECK_ATTR(BPF_PROG_DETACH))
1342 return -EINVAL;
1343
1344 switch (attr->attach_type) {
1345 case BPF_CGROUP_INET_INGRESS:
1346 case BPF_CGROUP_INET_EGRESS:
1347 ptype = BPF_PROG_TYPE_CGROUP_SKB;
1348 break;
1204 case BPF_CGROUP_INET_SOCK_CREATE:
1349 case BPF_CGROUP_INET_SOCK_CREATE:
1350 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1351 break;
1205 case BPF_CGROUP_SOCK_OPS:
1352 case BPF_CGROUP_SOCK_OPS:
1206 cgrp = cgroup_get_from_fd(attr->target_fd);
1207 if (IS_ERR(cgrp))
1208 return PTR_ERR(cgrp);
1209
1210 ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
1211 cgroup_put(cgrp);
1353 ptype = BPF_PROG_TYPE_SOCK_OPS;
1212 break;
1213 case BPF_SK_SKB_STREAM_PARSER:
1214 case BPF_SK_SKB_STREAM_VERDICT:
1354 break;
1355 case BPF_SK_SKB_STREAM_PARSER:
1356 case BPF_SK_SKB_STREAM_VERDICT:
1215 ret = sockmap_get_from_fd(attr, false);
1216 break;
1357 return sockmap_get_from_fd(attr, false);
1217 default:
1218 return -EINVAL;
1219 }
1220
1358 default:
1359 return -EINVAL;
1360 }
1361
1362 cgrp = cgroup_get_from_fd(attr->target_fd);
1363 if (IS_ERR(cgrp))
1364 return PTR_ERR(cgrp);
1365
1366 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1367 if (IS_ERR(prog))
1368 prog = NULL;
1369
1370 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
1371 if (prog)
1372 bpf_prog_put(prog);
1373 cgroup_put(cgrp);
1221 return ret;
1222}
1223
1374 return ret;
1375}
1376
1377#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
1378
1379static int bpf_prog_query(const union bpf_attr *attr,
1380 union bpf_attr __user *uattr)
1381{
1382 struct cgroup *cgrp;
1383 int ret;
1384
1385 if (!capable(CAP_NET_ADMIN))
1386 return -EPERM;
1387 if (CHECK_ATTR(BPF_PROG_QUERY))
1388 return -EINVAL;
1389 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
1390 return -EINVAL;
1391
1392 switch (attr->query.attach_type) {
1393 case BPF_CGROUP_INET_INGRESS:
1394 case BPF_CGROUP_INET_EGRESS:
1395 case BPF_CGROUP_INET_SOCK_CREATE:
1396 case BPF_CGROUP_SOCK_OPS:
1397 break;
1398 default:
1399 return -EINVAL;
1400 }
1401 cgrp = cgroup_get_from_fd(attr->query.target_fd);
1402 if (IS_ERR(cgrp))
1403 return PTR_ERR(cgrp);
1404 ret = cgroup_bpf_query(cgrp, attr, uattr);
1405 cgroup_put(cgrp);
1406 return ret;
1407}
1224#endif /* CONFIG_CGROUP_BPF */
1225
1226#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
1227
1228static int bpf_prog_test_run(const union bpf_attr *attr,
1229 union bpf_attr __user *uattr)
1230{
1231 struct bpf_prog *prog;

--- 68 unchanged lines hidden (view full) ---

1300
1301 fd = bpf_prog_new_fd(prog);
1302 if (fd < 0)
1303 bpf_prog_put(prog);
1304
1305 return fd;
1306}
1307
1408#endif /* CONFIG_CGROUP_BPF */
1409
1410#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
1411
1412static int bpf_prog_test_run(const union bpf_attr *attr,
1413 union bpf_attr __user *uattr)
1414{
1415 struct bpf_prog *prog;

--- 68 unchanged lines hidden (view full) ---

1484
1485 fd = bpf_prog_new_fd(prog);
1486 if (fd < 0)
1487 bpf_prog_put(prog);
1488
1489 return fd;
1490}
1491
1308#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id
1492#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
1309
1310static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1311{
1312 struct bpf_map *map;
1313 u32 id = attr->map_id;
1493
1494static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1495{
1496 struct bpf_map *map;
1497 u32 id = attr->map_id;
1498 int f_flags;
1314 int fd;
1315
1499 int fd;
1500
1316 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
1501 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
1502 attr->open_flags & ~BPF_OBJ_FLAG_MASK)
1317 return -EINVAL;
1318
1319 if (!capable(CAP_SYS_ADMIN))
1320 return -EPERM;
1321
1503 return -EINVAL;
1504
1505 if (!capable(CAP_SYS_ADMIN))
1506 return -EPERM;
1507
1508 f_flags = bpf_get_file_flag(attr->open_flags);
1509 if (f_flags < 0)
1510 return f_flags;
1511
1322 spin_lock_bh(&map_idr_lock);
1323 map = idr_find(&map_idr, id);
1324 if (map)
1325 map = bpf_map_inc_not_zero(map, true);
1326 else
1327 map = ERR_PTR(-ENOENT);
1328 spin_unlock_bh(&map_idr_lock);
1329
1330 if (IS_ERR(map))
1331 return PTR_ERR(map);
1332
1512 spin_lock_bh(&map_idr_lock);
1513 map = idr_find(&map_idr, id);
1514 if (map)
1515 map = bpf_map_inc_not_zero(map, true);
1516 else
1517 map = ERR_PTR(-ENOENT);
1518 spin_unlock_bh(&map_idr_lock);
1519
1520 if (IS_ERR(map))
1521 return PTR_ERR(map);
1522
1333 fd = bpf_map_new_fd(map);
1523 fd = bpf_map_new_fd(map, f_flags);
1334 if (fd < 0)
1335 bpf_map_put(map);
1336
1337 return fd;
1338}
1339
1340static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1341 const union bpf_attr *attr,

--- 11 unchanged lines hidden (view full) ---

1353 return err;
1354 info_len = min_t(u32, sizeof(info), info_len);
1355
1356 if (copy_from_user(&info, uinfo, info_len))
1357 return -EFAULT;
1358
1359 info.type = prog->type;
1360 info.id = prog->aux->id;
1524 if (fd < 0)
1525 bpf_map_put(map);
1526
1527 return fd;
1528}
1529
1530static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1531 const union bpf_attr *attr,

--- 11 unchanged lines hidden (view full) ---

1543 return err;
1544 info_len = min_t(u32, sizeof(info), info_len);
1545
1546 if (copy_from_user(&info, uinfo, info_len))
1547 return -EFAULT;
1548
1549 info.type = prog->type;
1550 info.id = prog->aux->id;
1551 info.load_time = prog->aux->load_time;
1552 info.created_by_uid = from_kuid_munged(current_user_ns(),
1553 prog->aux->user->uid);
1361
1362 memcpy(info.tag, prog->tag, sizeof(prog->tag));
1554
1555 memcpy(info.tag, prog->tag, sizeof(prog->tag));
1556 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
1363
1557
1558 ulen = info.nr_map_ids;
1559 info.nr_map_ids = prog->aux->used_map_cnt;
1560 ulen = min_t(u32, info.nr_map_ids, ulen);
1561 if (ulen) {
1562 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
1563 u32 i;
1564
1565 for (i = 0; i < ulen; i++)
1566 if (put_user(prog->aux->used_maps[i]->id,
1567 &user_map_ids[i]))
1568 return -EFAULT;
1569 }
1570
1364 if (!capable(CAP_SYS_ADMIN)) {
1365 info.jited_prog_len = 0;
1366 info.xlated_prog_len = 0;
1367 goto done;
1368 }
1369
1370 ulen = info.jited_prog_len;
1371 info.jited_prog_len = prog->jited_len;

--- 36 unchanged lines hidden (view full) ---

1408 info_len = min_t(u32, sizeof(info), info_len);
1409
1410 info.type = map->map_type;
1411 info.id = map->id;
1412 info.key_size = map->key_size;
1413 info.value_size = map->value_size;
1414 info.max_entries = map->max_entries;
1415 info.map_flags = map->map_flags;
1571 if (!capable(CAP_SYS_ADMIN)) {
1572 info.jited_prog_len = 0;
1573 info.xlated_prog_len = 0;
1574 goto done;
1575 }
1576
1577 ulen = info.jited_prog_len;
1578 info.jited_prog_len = prog->jited_len;

--- 36 unchanged lines hidden (view full) ---

1615 info_len = min_t(u32, sizeof(info), info_len);
1616
1617 info.type = map->map_type;
1618 info.id = map->id;
1619 info.key_size = map->key_size;
1620 info.value_size = map->value_size;
1621 info.max_entries = map->max_entries;
1622 info.map_flags = map->map_flags;
1623 memcpy(info.name, map->name, sizeof(map->name));
1416
1417 if (copy_to_user(uinfo, &info, info_len) ||
1418 put_user(info_len, &uattr->info.info_len))
1419 return -EFAULT;
1420
1421 return 0;
1422}
1423

--- 38 unchanged lines hidden (view full) ---

1462 if (err)
1463 return err;
1464 size = min_t(u32, size, sizeof(attr));
1465
1466 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1467 if (copy_from_user(&attr, uattr, size) != 0)
1468 return -EFAULT;
1469
1624
1625 if (copy_to_user(uinfo, &info, info_len) ||
1626 put_user(info_len, &uattr->info.info_len))
1627 return -EFAULT;
1628
1629 return 0;
1630}
1631

--- 38 unchanged lines hidden (view full) ---

1670 if (err)
1671 return err;
1672 size = min_t(u32, size, sizeof(attr));
1673
1674 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1675 if (copy_from_user(&attr, uattr, size) != 0)
1676 return -EFAULT;
1677
1678 err = security_bpf(cmd, &attr, size);
1679 if (err < 0)
1680 return err;
1681
1470 switch (cmd) {
1471 case BPF_MAP_CREATE:
1472 err = map_create(&attr);
1473 break;
1474 case BPF_MAP_LOOKUP_ELEM:
1475 err = map_lookup_elem(&attr);
1476 break;
1477 case BPF_MAP_UPDATE_ELEM:

--- 16 unchanged lines hidden (view full) ---

1494 break;
1495#ifdef CONFIG_CGROUP_BPF
1496 case BPF_PROG_ATTACH:
1497 err = bpf_prog_attach(&attr);
1498 break;
1499 case BPF_PROG_DETACH:
1500 err = bpf_prog_detach(&attr);
1501 break;
1682 switch (cmd) {
1683 case BPF_MAP_CREATE:
1684 err = map_create(&attr);
1685 break;
1686 case BPF_MAP_LOOKUP_ELEM:
1687 err = map_lookup_elem(&attr);
1688 break;
1689 case BPF_MAP_UPDATE_ELEM:

--- 16 unchanged lines hidden (view full) ---

1706 break;
1707#ifdef CONFIG_CGROUP_BPF
1708 case BPF_PROG_ATTACH:
1709 err = bpf_prog_attach(&attr);
1710 break;
1711 case BPF_PROG_DETACH:
1712 err = bpf_prog_detach(&attr);
1713 break;
1714 case BPF_PROG_QUERY:
1715 err = bpf_prog_query(&attr, uattr);
1716 break;
1502#endif
1503 case BPF_PROG_TEST_RUN:
1504 err = bpf_prog_test_run(&attr, uattr);
1505 break;
1506 case BPF_PROG_GET_NEXT_ID:
1507 err = bpf_obj_get_next_id(&attr, uattr,
1508 &prog_idr, &prog_idr_lock);
1509 break;

--- 20 unchanged lines hidden ---
1717#endif
1718 case BPF_PROG_TEST_RUN:
1719 err = bpf_prog_test_run(&attr, uattr);
1720 break;
1721 case BPF_PROG_GET_NEXT_ID:
1722 err = bpf_obj_get_next_id(&attr, uattr,
1723 &prog_idr, &prog_idr_lock);
1724 break;

--- 20 unchanged lines hidden ---