Lines Matching +full:pre +full:- +full:fetchable

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
6 #include <linux/bpf-cgroup.h>
35 #include <linux/bpf-netns.h>
48 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
49 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
50 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
51 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
52 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
83 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
88 * meant to be a future-proofing of bits.
97 return -E2BIG; in bpf_check_uarg_tail_zero()
104 actual_size - expected_size) == NULL; in bpf_check_uarg_tail_zero()
107 actual_size - expected_size); in bpf_check_uarg_tail_zero()
110 return res ? 0 : -E2BIG; in bpf_check_uarg_tail_zero()
123 atomic64_inc(&map->writecnt); in bpf_map_write_active_inc()
128 atomic64_dec(&map->writecnt); in bpf_map_write_active_dec()
133 return atomic64_read(&map->writecnt) != 0; in bpf_map_write_active()
138 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_value_size()
139 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || in bpf_map_value_size()
140 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || in bpf_map_value_size()
141 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in bpf_map_value_size()
142 return round_up(map->value_size, 8) * num_possible_cpus(); in bpf_map_value_size()
146 return map->value_size; in bpf_map_value_size()
151 /* Wait for any running non-sleepable BPF programs to complete so that in maybe_wait_bpf_programs()
152 * userspace, when we return to it, knows that all non-sleepable in maybe_wait_bpf_programs()
159 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || in maybe_wait_bpf_programs()
160 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) in maybe_wait_bpf_programs()
176 for (i = 0, field = rec->fields; i < cnt; i++, field++) { in __bpf_obj_unpin_uptrs()
177 if (field->type != BPF_UPTR) in __bpf_obj_unpin_uptrs()
180 uptr_addr = obj + field->offset; in __bpf_obj_unpin_uptrs()
190 __bpf_obj_unpin_uptrs(rec, rec->cnt, obj); in bpf_obj_unpin_uptrs()
205 for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { in bpf_obj_pin_uptrs()
206 if (field->type != BPF_UPTR) in bpf_obj_pin_uptrs()
209 uptr_addr = obj + field->offset; in bpf_obj_pin_uptrs()
214 t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id); in bpf_obj_pin_uptrs()
215 /* t->size was checked for zero before */ in bpf_obj_pin_uptrs()
216 if (check_add_overflow(start, t->size - 1, &end)) { in bpf_obj_pin_uptrs()
217 err = -EFAULT; in bpf_obj_pin_uptrs()
223 err = -EOPNOTSUPP; in bpf_obj_pin_uptrs()
232 err = -EOPNOTSUPP; in bpf_obj_pin_uptrs()
255 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || in bpf_map_update_value()
256 map->map_type == BPF_MAP_TYPE_ARENA || in bpf_map_update_value()
257 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in bpf_map_update_value()
258 return map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
259 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || in bpf_map_update_value()
260 map->map_type == BPF_MAP_TYPE_SOCKMAP) { in bpf_map_update_value()
268 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_update_value()
269 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_map_update_value()
271 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_map_update_value()
273 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in bpf_map_update_value()
279 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { in bpf_map_update_value()
282 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in bpf_map_update_value()
286 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in bpf_map_update_value()
287 map->map_type == BPF_MAP_TYPE_STACK || in bpf_map_update_value()
288 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in bpf_map_update_value()
289 err = map->ops->map_push_elem(map, value, flags); in bpf_map_update_value()
291 err = bpf_obj_pin_uptrs(map->record, value); in bpf_map_update_value()
294 err = map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
297 bpf_obj_unpin_uptrs(map->record, value); in bpf_map_update_value()
315 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_copy_value()
316 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_map_copy_value()
318 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_map_copy_value()
320 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in bpf_map_copy_value()
322 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { in bpf_map_copy_value()
328 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in bpf_map_copy_value()
330 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in bpf_map_copy_value()
331 map->map_type == BPF_MAP_TYPE_STACK || in bpf_map_copy_value()
332 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in bpf_map_copy_value()
333 err = map->ops->map_peek_elem(map, value); in bpf_map_copy_value()
334 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in bpf_map_copy_value()
339 if (map->ops->map_lookup_elem_sys_only) in bpf_map_copy_value()
340 ptr = map->ops->map_lookup_elem_sys_only(map, key); in bpf_map_copy_value()
342 ptr = map->ops->map_lookup_elem(map, key); in bpf_map_copy_value()
346 err = -ENOENT; in bpf_map_copy_value()
435 map->map_type = attr->map_type; in bpf_map_init_from_attr()
436 map->key_size = attr->key_size; in bpf_map_init_from_attr()
437 map->value_size = attr->value_size; in bpf_map_init_from_attr()
438 map->max_entries = attr->max_entries; in bpf_map_init_from_attr()
439 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); in bpf_map_init_from_attr()
440 map->numa_node = bpf_map_attr_numa_node(attr); in bpf_map_init_from_attr()
441 map->map_extra = attr->map_extra; in bpf_map_init_from_attr()
452 map->id = id; in bpf_map_alloc_id()
457 return -ENOSPC; in bpf_map_alloc_id()
467 * disappears - even if someone holds an fd to them they are unusable, in bpf_map_free_id()
471 if (!map->id) in bpf_map_free_id()
476 idr_remove(&map_idr, map->id); in bpf_map_free_id()
477 map->id = 0; in bpf_map_free_id()
487 * So we have to check map->objcg for being NULL each time it's in bpf_map_save_memcg()
491 map->objcg = get_obj_cgroup_from_current(); in bpf_map_save_memcg()
496 if (map->objcg) in bpf_map_release_memcg()
497 obj_cgroup_put(map->objcg); in bpf_map_release_memcg()
502 if (map->objcg) in bpf_map_get_memcg()
503 return get_mem_cgroup_from_objcg(map->objcg); in bpf_map_get_memcg()
630 ret = -ENOMEM; in bpf_map_alloc_pages()
646 if (f1->offset < f2->offset) in btf_field_cmp()
647 return -1; in btf_field_cmp()
648 else if (f1->offset > f2->offset) in btf_field_cmp()
658 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask)) in btf_record_find()
660 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp); in btf_record_find()
661 if (!field || !(field->type & field_mask)) in btf_record_find()
672 for (i = 0; i < rec->cnt; i++) { in btf_record_free()
673 switch (rec->fields[i].type) { in btf_record_free()
678 if (rec->fields[i].kptr.module) in btf_record_free()
679 module_put(rec->fields[i].kptr.module); in btf_record_free()
680 if (btf_is_kernel(rec->fields[i].kptr.btf)) in btf_record_free()
681 btf_put(rec->fields[i].kptr.btf); in btf_record_free()
705 btf_record_free(map->record); in bpf_map_free_record()
706 map->record = NULL; in bpf_map_free_record()
717 size = struct_size(rec, fields, rec->cnt); in btf_record_dup()
720 return ERR_PTR(-ENOMEM); in btf_record_dup()
722 fields = rec->fields; in btf_record_dup()
723 new_rec->cnt = 0; in btf_record_dup()
724 for (i = 0; i < rec->cnt; i++) { in btf_record_dup()
733 ret = -ENXIO; in btf_record_dup()
750 ret = -EFAULT; in btf_record_dup()
754 new_rec->cnt++; in btf_record_dup()
771 if (rec_a->cnt != rec_b->cnt) in btf_record_equal()
773 size = struct_size(rec_a, fields, rec_a->cnt); in btf_record_equal()
795 bpf_timer_cancel_and_free(obj + rec->timer_off); in bpf_obj_free_timer()
802 bpf_wq_cancel_and_free(obj + rec->wq_off); in bpf_obj_free_workqueue()
809 bpf_task_work_cancel_and_free(obj + rec->task_work_off); in bpf_obj_free_task_work()
819 fields = rec->fields; in bpf_obj_free_fields()
820 for (i = 0; i < rec->cnt; i++) { in bpf_obj_free_fields()
823 void *field_ptr = obj + field->offset; in bpf_obj_free_fields()
848 if (!btf_is_kernel(field->kptr.btf)) { in bpf_obj_free_fields()
849 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf, in bpf_obj_free_fields()
850 field->kptr.btf_id); in bpf_obj_free_fields()
852 pointee_struct_meta->record : NULL, in bpf_obj_free_fields()
855 field->kptr.dtor(xchgd_field); in bpf_obj_free_fields()
863 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) in bpf_obj_free_fields()
865 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); in bpf_obj_free_fields()
868 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) in bpf_obj_free_fields()
870 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); in bpf_obj_free_fields()
885 struct btf_record *rec = map->record; in bpf_map_free()
886 struct btf *btf = map->btf; in bpf_map_free()
892 kfree(map->excl_prog_sha); in bpf_map_free()
894 map->ops->map_free(map); in bpf_map_free()
901 * Note that the btf_record stashed in map->inner_map_meta->record was in bpf_map_free()
926 if (atomic64_dec_and_test(&map->usercnt)) { in bpf_map_put_uref()
927 if (map->ops->map_release_uref) in bpf_map_put_uref()
928 map->ops->map_release_uref(map); in bpf_map_put_uref()
934 INIT_WORK(&map->work, bpf_map_free_deferred); in bpf_map_free_in_work()
938 queue_work(system_dfl_wq, &map->work); in bpf_map_free_in_work()
955 * (underlying map implementation ops->map_free() might sleep)
959 if (atomic64_dec_and_test(&map->refcnt)) { in bpf_map_put()
963 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt)); in bpf_map_put()
964 if (READ_ONCE(map->free_after_mult_rcu_gp)) in bpf_map_put()
965 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); in bpf_map_put()
966 else if (READ_ONCE(map->free_after_rcu_gp)) in bpf_map_put()
967 call_rcu(&map->rcu, bpf_map_free_rcu_gp); in bpf_map_put()
982 struct bpf_map *map = filp->private_data; in bpf_map_release()
984 if (map->ops->map_release) in bpf_map_release()
985 map->ops->map_release(map, filp); in bpf_map_release()
993 fmode_t mode = fd_file(f)->f_mode; in map_get_sys_perms()
998 if (READ_ONCE(map->frozen)) in map_get_sys_perms()
1007 return map->ops->map_mem_usage(map); in bpf_map_memory_usage()
1012 struct bpf_map *map = filp->private_data; in bpf_map_show_fdinfo()
1015 spin_lock(&map->owner_lock); in bpf_map_show_fdinfo()
1016 if (map->owner) { in bpf_map_show_fdinfo()
1017 type = map->owner->type; in bpf_map_show_fdinfo()
1018 jited = map->owner->jited; in bpf_map_show_fdinfo()
1020 spin_unlock(&map->owner_lock); in bpf_map_show_fdinfo()
1032 map->map_type, in bpf_map_show_fdinfo()
1033 map->key_size, in bpf_map_show_fdinfo()
1034 map->value_size, in bpf_map_show_fdinfo()
1035 map->max_entries, in bpf_map_show_fdinfo()
1036 map->map_flags, in bpf_map_show_fdinfo()
1037 (unsigned long long)map->map_extra, in bpf_map_show_fdinfo()
1039 map->id, in bpf_map_show_fdinfo()
1040 READ_ONCE(map->frozen)); in bpf_map_show_fdinfo()
1054 return -EINVAL; in bpf_dummy_read()
1063 return -EINVAL; in bpf_dummy_write()
1066 /* called for any extra memory-mapped regions (except initial) */
1069 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_open()
1071 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap_open()
1078 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_close()
1080 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap_close()
1091 struct bpf_map *map = filp->private_data; in bpf_map_mmap()
1094 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) in bpf_map_mmap()
1095 return -ENOTSUPP; in bpf_map_mmap()
1097 if (!(vma->vm_flags & VM_SHARED)) in bpf_map_mmap()
1098 return -EINVAL; in bpf_map_mmap()
1100 mutex_lock(&map->freeze_mutex); in bpf_map_mmap()
1102 if (vma->vm_flags & VM_WRITE) { in bpf_map_mmap()
1103 if (map->frozen) { in bpf_map_mmap()
1104 err = -EPERM; in bpf_map_mmap()
1107 /* map is meant to be read-only, so do not allow mapping as in bpf_map_mmap()
1109 * reference and allows user-space to still modify it after in bpf_map_mmap()
1112 if (map->map_flags & BPF_F_RDONLY_PROG) { in bpf_map_mmap()
1113 err = -EACCES; in bpf_map_mmap()
1119 mutex_unlock(&map->freeze_mutex); in bpf_map_mmap()
1124 vma->vm_ops = &bpf_map_default_vmops; in bpf_map_mmap()
1125 vma->vm_private_data = map; in bpf_map_mmap()
1127 /* If mapping is read-only, then disallow potentially re-mapping with in bpf_map_mmap()
1129 * means that as far as BPF map's memory-mapped VMAs are concerned, in bpf_map_mmap()
1134 if (!(vma->vm_flags & VM_WRITE)) in bpf_map_mmap()
1137 err = map->ops->map_mmap(map, vma); in bpf_map_mmap()
1139 if (vma->vm_flags & VM_WRITE) in bpf_map_mmap()
1148 struct bpf_map *map = filp->private_data; in bpf_map_poll()
1150 if (map->ops->map_poll) in bpf_map_poll()
1151 return map->ops->map_poll(map, filp, pts); in bpf_map_poll()
1160 struct bpf_map *map = filp->private_data; in bpf_get_unmapped_area()
1162 if (map->ops->map_get_unmapped_area) in bpf_get_unmapped_area()
1163 return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags); in bpf_get_unmapped_area()
1165 return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); in bpf_get_unmapped_area()
1191 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, in bpf_map_new_fd()
1198 return -EINVAL; in bpf_get_file_flag()
1208 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
1209 sizeof(attr->CMD##_LAST_FIELD), 0, \
1210 sizeof(*attr) - \
1211 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
1212 sizeof(attr->CMD##_LAST_FIELD)) != NULL
1227 return -EINVAL; in bpf_obj_name_cpy()
1233 return -EINVAL; in bpf_obj_name_cpy()
1235 return src - orig_src; in bpf_obj_name_cpy()
1243 return -ENOTSUPP; in map_check_no_btf()
1256 if (!key_type || key_size != map->key_size) in map_check_btf()
1257 return -EINVAL; in map_check_btf()
1260 if (!map->ops->map_check_btf) in map_check_btf()
1261 return -EINVAL; in map_check_btf()
1265 if (!value_type || value_size != map->value_size) in map_check_btf()
1266 return -EINVAL; in map_check_btf()
1268 map->record = btf_parse_fields(btf, value_type, in map_check_btf()
1272 map->value_size); in map_check_btf()
1273 if (!IS_ERR_OR_NULL(map->record)) { in map_check_btf()
1277 ret = -EPERM; in map_check_btf()
1280 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { in map_check_btf()
1281 ret = -EACCES; in map_check_btf()
1284 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) { in map_check_btf()
1285 switch (map->record->field_mask & (1 << i)) { in map_check_btf()
1290 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1291 map->map_type != BPF_MAP_TYPE_ARRAY && in map_check_btf()
1292 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && in map_check_btf()
1293 map->map_type != BPF_MAP_TYPE_SK_STORAGE && in map_check_btf()
1294 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && in map_check_btf()
1295 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && in map_check_btf()
1296 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { in map_check_btf()
1297 ret = -EOPNOTSUPP; in map_check_btf()
1304 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1305 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
1306 map->map_type != BPF_MAP_TYPE_ARRAY) { in map_check_btf()
1307 ret = -EOPNOTSUPP; in map_check_btf()
1315 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1316 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && in map_check_btf()
1317 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
1318 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH && in map_check_btf()
1319 map->map_type != BPF_MAP_TYPE_ARRAY && in map_check_btf()
1320 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && in map_check_btf()
1321 map->map_type != BPF_MAP_TYPE_SK_STORAGE && in map_check_btf()
1322 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && in map_check_btf()
1323 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && in map_check_btf()
1324 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { in map_check_btf()
1325 ret = -EOPNOTSUPP; in map_check_btf()
1330 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) { in map_check_btf()
1331 ret = -EOPNOTSUPP; in map_check_btf()
1337 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1338 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
1339 map->map_type != BPF_MAP_TYPE_ARRAY) { in map_check_btf()
1340 ret = -EOPNOTSUPP; in map_check_btf()
1346 ret = -EOPNOTSUPP; in map_check_btf()
1352 ret = btf_check_and_fixup_fields(btf, map->record); in map_check_btf()
1356 if (map->ops->map_check_btf) { in map_check_btf()
1357 ret = map->ops->map_check_btf(map, btf, key_type, value_type); in map_check_btf()
1380 u32 map_type = attr->map_type; in map_create()
1388 return -EINVAL; in map_create()
1391 * to avoid per-map type checks tripping on unknown flag in map_create()
1393 token_flag = attr->map_flags & BPF_F_TOKEN_FD; in map_create()
1394 attr->map_flags &= ~BPF_F_TOKEN_FD; in map_create()
1396 if (attr->btf_vmlinux_value_type_id) { in map_create()
1397 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || in map_create()
1398 attr->btf_key_type_id || attr->btf_value_type_id) in map_create()
1399 return -EINVAL; in map_create()
1400 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { in map_create()
1401 return -EINVAL; in map_create()
1404 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && in map_create()
1405 attr->map_type != BPF_MAP_TYPE_ARENA && in map_create()
1406 attr->map_extra != 0) in map_create()
1407 return -EINVAL; in map_create()
1409 f_flags = bpf_get_file_flag(attr->map_flags); in map_create()
1416 return -EINVAL; in map_create()
1419 map_type = attr->map_type; in map_create()
1421 return -EINVAL; in map_create()
1425 return -EINVAL; in map_create()
1427 if (ops->map_alloc_check) { in map_create()
1428 err = ops->map_alloc_check(attr); in map_create()
1432 if (attr->map_ifindex) in map_create()
1434 if (!ops->map_mem_usage) in map_create()
1435 return -EINVAL; in map_create()
1438 token = bpf_token_get_from_fd(attr->map_token_fd); in map_create()
1444 * system-wide capabilities checks in map_create()
1447 !bpf_token_allow_map_type(token, attr->map_type)) { in map_create()
1453 err = -EPERM; in map_create()
1512 map = ops->map_alloc(attr); in map_create()
1517 map->ops = ops; in map_create()
1518 map->map_type = map_type; in map_create()
1520 err = bpf_obj_name_cpy(map->name, attr->map_name, in map_create()
1521 sizeof(attr->map_name)); in map_create()
1526 map->cookie = gen_cookie_next(&bpf_map_cookie); in map_create()
1529 atomic64_set(&map->refcnt, 1); in map_create()
1530 atomic64_set(&map->usercnt, 1); in map_create()
1531 mutex_init(&map->freeze_mutex); in map_create()
1532 spin_lock_init(&map->owner_lock); in map_create()
1534 if (attr->btf_key_type_id || attr->btf_value_type_id || in map_create()
1538 * counter part. Thus, attr->btf_fd has in map_create()
1541 attr->btf_vmlinux_value_type_id) { in map_create()
1544 btf = btf_get_by_fd(attr->btf_fd); in map_create()
1551 err = -EACCES; in map_create()
1554 map->btf = btf; in map_create()
1556 if (attr->btf_value_type_id) { in map_create()
1557 err = map_check_btf(map, token, btf, attr->btf_key_type_id, in map_create()
1558 attr->btf_value_type_id); in map_create()
1563 map->btf_key_type_id = attr->btf_key_type_id; in map_create()
1564 map->btf_value_type_id = attr->btf_value_type_id; in map_create()
1565 map->btf_vmlinux_value_type_id = in map_create()
1566 attr->btf_vmlinux_value_type_id; in map_create()
1569 if (attr->excl_prog_hash) { in map_create()
1570 bpfptr_t uprog_hash = make_bpfptr(attr->excl_prog_hash, uattr.is_kernel); in map_create()
1572 if (attr->excl_prog_hash_size != SHA256_DIGEST_SIZE) { in map_create()
1573 err = -EINVAL; in map_create()
1577 map->excl_prog_sha = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL); in map_create()
1578 if (!map->excl_prog_sha) { in map_create()
1579 err = -ENOMEM; in map_create()
1583 if (copy_from_bpfptr(map->excl_prog_sha, uprog_hash, SHA256_DIGEST_SIZE)) { in map_create()
1584 err = -EFAULT; in map_create()
1587 } else if (attr->excl_prog_hash_size) { in map_create()
1588 return -EINVAL; in map_create()
1608 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. in map_create()
1627 atomic64_inc(&map->refcnt); in bpf_map_inc()
1633 atomic64_inc(&map->refcnt); in bpf_map_inc_with_uref()
1634 atomic64_inc(&map->usercnt); in bpf_map_inc_with_uref()
1668 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); in __bpf_map_inc_not_zero()
1670 return ERR_PTR(-ENOENT); in __bpf_map_inc_not_zero()
1672 atomic64_inc(&map->usercnt); in __bpf_map_inc_not_zero()
1687 return -ENOTSUPP; in bpf_stackmap_extract()
1696 return ERR_PTR(-EINVAL); in __bpf_copy_key()
1707 return ERR_PTR(-EINVAL); in ___bpf_copy_key()
1717 void __user *ukey = u64_to_user_ptr(attr->key); in map_lookup_elem()
1718 void __user *uvalue = u64_to_user_ptr(attr->value); in map_lookup_elem()
1725 return -EINVAL; in map_lookup_elem()
1727 if (attr->flags & ~BPF_F_LOCK) in map_lookup_elem()
1728 return -EINVAL; in map_lookup_elem()
1730 CLASS(fd, f)(attr->map_fd); in map_lookup_elem()
1735 return -EPERM; in map_lookup_elem()
1737 if ((attr->flags & BPF_F_LOCK) && in map_lookup_elem()
1738 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) in map_lookup_elem()
1739 return -EINVAL; in map_lookup_elem()
1741 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_elem()
1747 err = -ENOMEM; in map_lookup_elem()
1752 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in map_lookup_elem()
1754 err = -EFAULT; in map_lookup_elem()
1756 err = bpf_map_copy_value(map, key, value, attr->flags); in map_lookup_elem()
1760 err = bpf_map_copy_value(map, key, value, attr->flags); in map_lookup_elem()
1764 err = -EFAULT; in map_lookup_elem()
1782 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); in map_update_elem()
1783 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); in map_update_elem()
1790 return -EINVAL; in map_update_elem()
1792 CLASS(fd, f)(attr->map_fd); in map_update_elem()
1798 err = -EPERM; in map_update_elem()
1802 if ((attr->flags & BPF_F_LOCK) && in map_update_elem()
1803 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in map_update_elem()
1804 err = -EINVAL; in map_update_elem()
1808 key = ___bpf_copy_key(ukey, map->key_size); in map_update_elem()
1821 err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags); in map_update_elem()
1837 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); in map_delete_elem()
1843 return -EINVAL; in map_delete_elem()
1845 CLASS(fd, f)(attr->map_fd); in map_delete_elem()
1851 err = -EPERM; in map_delete_elem()
1855 key = ___bpf_copy_key(ukey, map->key_size); in map_delete_elem()
1865 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in map_delete_elem()
1867 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1873 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1890 void __user *ukey = u64_to_user_ptr(attr->key); in map_get_next_key()
1891 void __user *unext_key = u64_to_user_ptr(attr->next_key); in map_get_next_key()
1897 return -EINVAL; in map_get_next_key()
1899 CLASS(fd, f)(attr->map_fd); in map_get_next_key()
1904 return -EPERM; in map_get_next_key()
1907 key = __bpf_copy_key(ukey, map->key_size); in map_get_next_key()
1914 err = -ENOMEM; in map_get_next_key()
1915 next_key = kvmalloc(map->key_size, GFP_USER); in map_get_next_key()
1925 err = map->ops->map_get_next_key(map, key, next_key); in map_get_next_key()
1931 err = -EFAULT; in map_get_next_key()
1932 if (copy_to_user(unext_key, next_key, map->key_size) != 0) in map_get_next_key()
1948 void __user *keys = u64_to_user_ptr(attr->batch.keys); in generic_map_delete_batch()
1953 if (attr->batch.elem_flags & ~BPF_F_LOCK) in generic_map_delete_batch()
1954 return -EINVAL; in generic_map_delete_batch()
1956 if ((attr->batch.elem_flags & BPF_F_LOCK) && in generic_map_delete_batch()
1957 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in generic_map_delete_batch()
1958 return -EINVAL; in generic_map_delete_batch()
1961 max_count = attr->batch.count; in generic_map_delete_batch()
1965 if (put_user(0, &uattr->batch.count)) in generic_map_delete_batch()
1966 return -EFAULT; in generic_map_delete_batch()
1968 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_delete_batch()
1970 return -ENOMEM; in generic_map_delete_batch()
1973 err = -EFAULT; in generic_map_delete_batch()
1974 if (copy_from_user(key, keys + cp * map->key_size, in generic_map_delete_batch()
1975 map->key_size)) in generic_map_delete_batch()
1985 err = map->ops->map_delete_elem(map, key); in generic_map_delete_batch()
1992 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) in generic_map_delete_batch()
1993 err = -EFAULT; in generic_map_delete_batch()
2004 void __user *values = u64_to_user_ptr(attr->batch.values); in generic_map_update_batch()
2005 void __user *keys = u64_to_user_ptr(attr->batch.keys); in generic_map_update_batch()
2010 if (attr->batch.elem_flags & ~BPF_F_LOCK) in generic_map_update_batch()
2011 return -EINVAL; in generic_map_update_batch()
2013 if ((attr->batch.elem_flags & BPF_F_LOCK) && in generic_map_update_batch()
2014 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in generic_map_update_batch()
2015 return -EINVAL; in generic_map_update_batch()
2020 max_count = attr->batch.count; in generic_map_update_batch()
2024 if (put_user(0, &uattr->batch.count)) in generic_map_update_batch()
2025 return -EFAULT; in generic_map_update_batch()
2027 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_update_batch()
2029 return -ENOMEM; in generic_map_update_batch()
2034 return -ENOMEM; in generic_map_update_batch()
2038 err = -EFAULT; in generic_map_update_batch()
2039 if (copy_from_user(key, keys + cp * map->key_size, in generic_map_update_batch()
2040 map->key_size) || in generic_map_update_batch()
2045 attr->batch.elem_flags); in generic_map_update_batch()
2052 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) in generic_map_update_batch()
2053 err = -EFAULT; in generic_map_update_batch()
2065 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); in generic_map_lookup_batch()
2066 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); in generic_map_lookup_batch()
2067 void __user *values = u64_to_user_ptr(attr->batch.values); in generic_map_lookup_batch()
2068 void __user *keys = u64_to_user_ptr(attr->batch.keys); in generic_map_lookup_batch()
2073 if (attr->batch.elem_flags & ~BPF_F_LOCK) in generic_map_lookup_batch()
2074 return -EINVAL; in generic_map_lookup_batch()
2076 if ((attr->batch.elem_flags & BPF_F_LOCK) && in generic_map_lookup_batch()
2077 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) in generic_map_lookup_batch()
2078 return -EINVAL; in generic_map_lookup_batch()
2082 max_count = attr->batch.count; in generic_map_lookup_batch()
2086 if (put_user(0, &uattr->batch.count)) in generic_map_lookup_batch()
2087 return -EFAULT; in generic_map_lookup_batch()
2089 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_lookup_batch()
2091 return -ENOMEM; in generic_map_lookup_batch()
2093 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); in generic_map_lookup_batch()
2096 return -ENOMEM; in generic_map_lookup_batch()
2099 err = -EFAULT; in generic_map_lookup_batch()
2101 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) in generic_map_lookup_batch()
2104 value = key + map->key_size; in generic_map_lookup_batch()
2110 err = map->ops->map_get_next_key(map, prev_key, key); in generic_map_lookup_batch()
2115 attr->batch.elem_flags); in generic_map_lookup_batch()
2117 if (err == -ENOENT) in generic_map_lookup_batch()
2123 if (copy_to_user(keys + cp * map->key_size, key, in generic_map_lookup_batch()
2124 map->key_size)) { in generic_map_lookup_batch()
2125 err = -EFAULT; in generic_map_lookup_batch()
2129 err = -EFAULT; in generic_map_lookup_batch()
2142 if (err == -EFAULT) in generic_map_lookup_batch()
2145 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || in generic_map_lookup_batch()
2146 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) in generic_map_lookup_batch()
2147 err = -EFAULT; in generic_map_lookup_batch()
2159 void __user *ukey = u64_to_user_ptr(attr->key); in map_lookup_and_delete_elem()
2160 void __user *uvalue = u64_to_user_ptr(attr->value); in map_lookup_and_delete_elem()
2167 return -EINVAL; in map_lookup_and_delete_elem()
2169 if (attr->flags & ~BPF_F_LOCK) in map_lookup_and_delete_elem()
2170 return -EINVAL; in map_lookup_and_delete_elem()
2172 CLASS(fd, f)(attr->map_fd); in map_lookup_and_delete_elem()
2179 err = -EPERM; in map_lookup_and_delete_elem()
2183 if (attr->flags && in map_lookup_and_delete_elem()
2184 (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_and_delete_elem()
2185 map->map_type == BPF_MAP_TYPE_STACK)) { in map_lookup_and_delete_elem()
2186 err = -EINVAL; in map_lookup_and_delete_elem()
2190 if ((attr->flags & BPF_F_LOCK) && in map_lookup_and_delete_elem()
2191 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in map_lookup_and_delete_elem()
2192 err = -EINVAL; in map_lookup_and_delete_elem()
2196 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_and_delete_elem()
2204 err = -ENOMEM; in map_lookup_and_delete_elem()
2209 err = -ENOTSUPP; in map_lookup_and_delete_elem()
2210 if (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_and_delete_elem()
2211 map->map_type == BPF_MAP_TYPE_STACK) { in map_lookup_and_delete_elem()
2212 err = map->ops->map_pop_elem(map, value); in map_lookup_and_delete_elem()
2213 } else if (map->map_type == BPF_MAP_TYPE_HASH || in map_lookup_and_delete_elem()
2214 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in map_lookup_and_delete_elem()
2215 map->map_type == BPF_MAP_TYPE_LRU_HASH || in map_lookup_and_delete_elem()
2216 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || in map_lookup_and_delete_elem()
2217 map->map_type == BPF_MAP_TYPE_STACK_TRACE) { in map_lookup_and_delete_elem()
2221 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); in map_lookup_and_delete_elem()
2231 err = -EFAULT; in map_lookup_and_delete_elem()
2254 return -EINVAL; in map_freeze()
2256 CLASS(fd, f)(attr->map_fd); in map_freeze()
2261 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) in map_freeze()
2262 return -ENOTSUPP; in map_freeze()
2265 return -EPERM; in map_freeze()
2267 mutex_lock(&map->freeze_mutex); in map_freeze()
2269 err = -EBUSY; in map_freeze()
2272 if (READ_ONCE(map->frozen)) { in map_freeze()
2273 err = -EBUSY; in map_freeze()
2277 WRITE_ONCE(map->frozen, true); in map_freeze()
2279 mutex_unlock(&map->freeze_mutex); in map_freeze()
2299 return -EINVAL; in find_prog_type()
2303 return -EINVAL; in find_prog_type()
2305 if (!bpf_prog_is_offloaded(prog->aux)) in find_prog_type()
2306 prog->aux->ops = ops; in find_prog_type()
2308 prog->aux->ops = &bpf_offload_prog_ops; in find_prog_type()
2309 prog->type = type; in find_prog_type()
2338 audit_log_format(ab, "prog-id=%u op=%s", in bpf_audit_prog()
2339 prog->aux->id, bpf_audit_str[op]); in bpf_audit_prog()
2351 prog->aux->id = id; in bpf_prog_alloc_id()
2357 return -ENOSPC; in bpf_prog_alloc_id()
2368 * disappears - even if someone grabs an fd to them they are unusable, in bpf_prog_free_id()
2371 if (!prog->aux->id) in bpf_prog_free_id()
2375 idr_remove(&prog_idr, prog->aux->id); in bpf_prog_free_id()
2376 prog->aux->id = 0; in bpf_prog_free_id()
2384 kvfree(aux->func_info); in __bpf_prog_put_rcu()
2385 kfree(aux->func_info_aux); in __bpf_prog_put_rcu()
2386 free_uid(aux->user); in __bpf_prog_put_rcu()
2387 security_bpf_prog_free(aux->prog); in __bpf_prog_put_rcu()
2388 bpf_prog_free(aux->prog); in __bpf_prog_put_rcu()
2394 btf_put(prog->aux->btf); in __bpf_prog_put_noref()
2395 module_put(prog->aux->mod); in __bpf_prog_put_noref()
2396 kvfree(prog->aux->jited_linfo); in __bpf_prog_put_noref()
2397 kvfree(prog->aux->linfo); in __bpf_prog_put_noref()
2398 kfree(prog->aux->kfunc_tab); in __bpf_prog_put_noref()
2399 kfree(prog->aux->ctx_arg_info); in __bpf_prog_put_noref()
2400 if (prog->aux->attach_btf) in __bpf_prog_put_noref()
2401 btf_put(prog->aux->attach_btf); in __bpf_prog_put_noref()
2404 if (prog->sleepable) in __bpf_prog_put_noref()
2405 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); in __bpf_prog_put_noref()
2407 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); in __bpf_prog_put_noref()
2409 __bpf_prog_put_rcu(&prog->aux->rcu); in __bpf_prog_put_noref()
2419 prog = aux->prog; in bpf_prog_put_deferred()
2428 struct bpf_prog_aux *aux = prog->aux; in __bpf_prog_put()
2430 if (atomic64_dec_and_test(&aux->refcnt)) { in __bpf_prog_put()
2432 INIT_WORK(&aux->work, bpf_prog_put_deferred); in __bpf_prog_put()
2433 schedule_work(&aux->work); in __bpf_prog_put()
2435 bpf_prog_put_deferred(&aux->work); in __bpf_prog_put()
2448 struct bpf_prog *prog = filp->private_data; in bpf_prog_release()
2465 stats = this_cpu_ptr(prog->stats); in bpf_prog_inc_misses_counter()
2466 flags = u64_stats_update_begin_irqsave(&stats->syncp); in bpf_prog_inc_misses_counter()
2467 u64_stats_inc(&stats->misses); in bpf_prog_inc_misses_counter()
2468 u64_stats_update_end_irqrestore(&stats->syncp, flags); in bpf_prog_inc_misses_counter()
2482 st = per_cpu_ptr(prog->stats, cpu); in bpf_prog_get_stats()
2484 start = u64_stats_fetch_begin(&st->syncp); in bpf_prog_get_stats()
2485 tnsecs = u64_stats_read(&st->nsecs); in bpf_prog_get_stats()
2486 tcnt = u64_stats_read(&st->cnt); in bpf_prog_get_stats()
2487 tmisses = u64_stats_read(&st->misses); in bpf_prog_get_stats()
2488 } while (u64_stats_fetch_retry(&st->syncp, start)); in bpf_prog_get_stats()
2493 stats->nsecs = nsecs; in bpf_prog_get_stats()
2494 stats->cnt = cnt; in bpf_prog_get_stats()
2495 stats->misses = misses; in bpf_prog_get_stats()
2501 const struct bpf_prog *prog = filp->private_data; in bpf_prog_show_fdinfo()
2502 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; in bpf_prog_show_fdinfo()
2506 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); in bpf_prog_show_fdinfo()
2517 prog->type, in bpf_prog_show_fdinfo()
2518 prog->jited, in bpf_prog_show_fdinfo()
2520 prog->pages * 1ULL << PAGE_SHIFT, in bpf_prog_show_fdinfo()
2521 prog->aux->id, in bpf_prog_show_fdinfo()
2525 prog->aux->verified_insns); in bpf_prog_show_fdinfo()
2546 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, in bpf_prog_new_fd()
2552 atomic64_add(i, &prog->aux->refcnt); in bpf_prog_add()
2563 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); in bpf_prog_sub()
2569 atomic64_inc(&prog->aux->refcnt); in bpf_prog_inc()
2578 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); in bpf_prog_inc_not_zero()
2581 return ERR_PTR(-ENOENT); in bpf_prog_inc_not_zero()
2594 if (prog->type != *attach_type) in bpf_prog_get_ok()
2596 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv) in bpf_prog_get_ok()
2609 return ERR_PTR(-EBADF); in __bpf_prog_get()
2610 if (fd_file(f)->f_op != &bpf_prog_fops) in __bpf_prog_get()
2611 return ERR_PTR(-EINVAL); in __bpf_prog_get()
2613 prog = fd_file(f)->private_data; in __bpf_prog_get()
2615 return ERR_PTR(-EINVAL); in __bpf_prog_get()
2647 switch (attr->prog_type) { in bpf_prog_load_fixup_attach_type()
2650 * exist so checking for non-zero is the way to go here. in bpf_prog_load_fixup_attach_type()
2652 if (!attr->expected_attach_type) in bpf_prog_load_fixup_attach_type()
2653 attr->expected_attach_type = in bpf_prog_load_fixup_attach_type()
2657 if (!attr->expected_attach_type) in bpf_prog_load_fixup_attach_type()
2658 attr->expected_attach_type = in bpf_prog_load_fixup_attach_type()
2672 return -EINVAL; in bpf_prog_load_check_attach()
2675 return -EINVAL; in bpf_prog_load_check_attach()
2684 return -EINVAL; in bpf_prog_load_check_attach()
2689 return -EINVAL; in bpf_prog_load_check_attach()
2693 return -EINVAL; in bpf_prog_load_check_attach()
2704 return -EINVAL; in bpf_prog_load_check_attach()
2727 return -EINVAL; in bpf_prog_load_check_attach()
2735 return -EINVAL; in bpf_prog_load_check_attach()
2743 return -EINVAL; in bpf_prog_load_check_attach()
2748 return -EINVAL; in bpf_prog_load_check_attach()
2755 return -EINVAL; in bpf_prog_load_check_attach()
2760 return -EINVAL; in bpf_prog_load_check_attach()
2764 return -EINVAL; in bpf_prog_load_check_attach()
2823 bpfptr_t usig = make_bpfptr(attr->signature, is_kernel); in bpf_prog_verify_signature()
2829 if (system_keyring_id_check(attr->keyring_id) == 0) in bpf_prog_verify_signature()
2830 key = bpf_lookup_system_key(attr->keyring_id); in bpf_prog_verify_signature()
2832 key = bpf_lookup_user_key(attr->keyring_id, 0); in bpf_prog_verify_signature()
2835 return -EINVAL; in bpf_prog_verify_signature()
2837 sig = kvmemdup_bpfptr(usig, attr->signature_size); in bpf_prog_verify_signature()
2840 return -ENOMEM; in bpf_prog_verify_signature()
2844 attr->signature_size); in bpf_prog_verify_signature()
2845 bpf_dynptr_init(&insns_ptr, prog->insnsi, BPF_DYNPTR_TYPE_LOCAL, 0, in bpf_prog_verify_signature()
2846 prog->len * sizeof(struct bpf_insn)); in bpf_prog_verify_signature()
2861 enum bpf_prog_type type = attr->prog_type; in bpf_prog_load()
2870 return -EINVAL; in bpf_prog_load()
2872 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | in bpf_prog_load()
2881 return -EINVAL; in bpf_prog_load()
2885 if (attr->prog_flags & BPF_F_TOKEN_FD) { in bpf_prog_load()
2886 token = bpf_token_get_from_fd(attr->prog_token_fd); in bpf_prog_load()
2891 * system-wide capabilities checks in bpf_prog_load()
2894 !bpf_token_allow_prog_type(token, attr->prog_type, in bpf_prog_load()
2895 attr->expected_attach_type)) { in bpf_prog_load()
2902 err = -EPERM; in bpf_prog_load()
2905 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && in bpf_prog_load()
2919 if (attr->insn_cnt == 0 || in bpf_prog_load()
2920 attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) { in bpf_prog_load()
2921 err = -E2BIG; in bpf_prog_load()
2937 if (attr->attach_prog_fd) { in bpf_prog_load()
2938 dst_prog = bpf_prog_get(attr->attach_prog_fd); in bpf_prog_load()
2941 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); in bpf_prog_load()
2943 err = -EINVAL; in bpf_prog_load()
2951 err = -ENOTSUPP; in bpf_prog_load()
2955 } else if (attr->attach_btf_id) { in bpf_prog_load()
2963 err = -EINVAL; in bpf_prog_load()
2969 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, in bpf_prog_load()
2970 attach_btf, attr->attach_btf_id, in bpf_prog_load()
2976 err = -EINVAL; in bpf_prog_load()
2981 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); in bpf_prog_load()
2987 err = -EINVAL; in bpf_prog_load()
2991 prog->expected_attach_type = attr->expected_attach_type; in bpf_prog_load()
2992 prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE); in bpf_prog_load()
2993 prog->aux->attach_btf = attach_btf; in bpf_prog_load()
2994 prog->aux->attach_btf_id = attr->attach_btf_id; in bpf_prog_load()
2995 prog->aux->dst_prog = dst_prog; in bpf_prog_load()
2996 prog->aux->dev_bound = !!attr->prog_ifindex; in bpf_prog_load()
2997 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; in bpf_prog_load()
2999 /* move token into prog->aux, reuse taken refcnt */ in bpf_prog_load()
3000 prog->aux->token = token; in bpf_prog_load()
3003 prog->aux->user = get_current_user(); in bpf_prog_load()
3004 prog->len = attr->insn_cnt; in bpf_prog_load()
3006 err = -EFAULT; in bpf_prog_load()
3007 if (copy_from_bpfptr(prog->insns, in bpf_prog_load()
3008 make_bpfptr(attr->insns, uattr.is_kernel), in bpf_prog_load()
3013 make_bpfptr(attr->license, uattr.is_kernel), in bpf_prog_load()
3014 sizeof(license) - 1) < 0) in bpf_prog_load()
3016 license[sizeof(license) - 1] = 0; in bpf_prog_load()
3018 /* eBPF programs must be GPL compatible to use GPL-ed functions */ in bpf_prog_load()
3019 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0; in bpf_prog_load()
3021 if (attr->signature) { in bpf_prog_load()
3027 prog->orig_prog = NULL; in bpf_prog_load()
3028 prog->jited = 0; in bpf_prog_load()
3030 atomic64_set(&prog->aux->refcnt, 1); in bpf_prog_load()
3032 if (bpf_prog_is_dev_bound(prog->aux)) { in bpf_prog_load()
3039 bpf_prog_is_dev_bound(dst_prog->aux)) { in bpf_prog_load()
3057 dst_prog->type == BPF_PROG_TYPE_TRACING) { in bpf_prog_load()
3058 prog->aux->attach_tracing_prog = true; in bpf_prog_load()
3066 prog->aux->load_time = ktime_get_boottime_ns(); in bpf_prog_load()
3067 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, in bpf_prog_load()
3068 sizeof(attr->prog_name)); in bpf_prog_load()
3117 __bpf_prog_put_noref(prog, prog->aux->real_func_cnt); in bpf_prog_load()
3123 free_uid(prog->aux->user); in bpf_prog_load()
3124 if (prog->aux->attach_btf) in bpf_prog_load()
3125 btf_put(prog->aux->attach_btf); in bpf_prog_load()
3138 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD) in bpf_obj_pin()
3139 return -EINVAL; in bpf_obj_pin()
3142 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) in bpf_obj_pin()
3143 return -EINVAL; in bpf_obj_pin()
3145 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; in bpf_obj_pin()
3146 return bpf_obj_pin_user(attr->bpf_fd, path_fd, in bpf_obj_pin()
3147 u64_to_user_ptr(attr->pathname)); in bpf_obj_pin()
3154 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || in bpf_obj_get()
3155 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD)) in bpf_obj_get()
3156 return -EINVAL; in bpf_obj_get()
3159 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) in bpf_obj_get()
3160 return -EINVAL; in bpf_obj_get()
3162 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; in bpf_obj_get()
3163 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname), in bpf_obj_get()
3164 attr->file_flags); in bpf_obj_get()
3170 * detachment due to RCU Tasks Trace-based lifetime protection scheme.
3171 * BPF program itself can be non-sleepable, yet, because it's transitively
3179 WARN_ON(ops->dealloc && ops->dealloc_deferred); in bpf_link_init_sleepable()
3180 atomic64_set(&link->refcnt, 1); in bpf_link_init_sleepable()
3181 link->type = type; in bpf_link_init_sleepable()
3182 link->sleepable = sleepable; in bpf_link_init_sleepable()
3183 link->id = 0; in bpf_link_init_sleepable()
3184 link->ops = ops; in bpf_link_init_sleepable()
3185 link->prog = prog; in bpf_link_init_sleepable()
3186 link->attach_type = attach_type; in bpf_link_init_sleepable()
3217 primer->link->prog = NULL; in bpf_link_cleanup()
3218 bpf_link_free_id(primer->id); in bpf_link_cleanup()
3219 fput(primer->file); in bpf_link_cleanup()
3220 put_unused_fd(primer->fd); in bpf_link_cleanup()
3225 atomic64_inc(&link->refcnt); in bpf_link_inc()
3231 if (link->prog) in bpf_link_dealloc()
3232 bpf_prog_put(link->prog); in bpf_link_dealloc()
3235 if (link->ops->dealloc_deferred) in bpf_link_dealloc()
3236 link->ops->dealloc_deferred(link); in bpf_link_dealloc()
3238 link->ops->dealloc(link); in bpf_link_dealloc()
3259 const struct bpf_link_ops *ops = link->ops; in bpf_link_free()
3261 bpf_link_free_id(link->id); in bpf_link_free()
3263 if (link->prog) in bpf_link_free()
3264 ops->release(link); in bpf_link_free()
3265 if (ops->dealloc_deferred) { in bpf_link_free()
3274 if (link->sleepable || (link->prog && link->prog->sleepable)) in bpf_link_free()
3275 call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp); in bpf_link_free()
3277 call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp); in bpf_link_free()
3278 } else if (ops->dealloc) { in bpf_link_free()
3295 if (!atomic64_dec_and_test(&link->refcnt)) in bpf_link_put()
3298 INIT_WORK(&link->work, bpf_link_put_deferred); in bpf_link_put()
3299 schedule_work(&link->work); in bpf_link_put()
3305 if (!atomic64_dec_and_test(&link->refcnt)) in bpf_link_put_direct()
3312 struct bpf_link *link = filp->private_data; in bpf_link_release()
3332 const struct bpf_link *link = filp->private_data; in bpf_link_show_fdinfo()
3333 const struct bpf_prog *prog = link->prog; in bpf_link_show_fdinfo()
3334 enum bpf_link_type type = link->type; in bpf_link_show_fdinfo()
3335 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; in bpf_link_show_fdinfo()
3338 if (link->type == BPF_LINK_TYPE_KPROBE_MULTI) in bpf_link_show_fdinfo()
3339 seq_printf(m, "link_type:\t%s\n", link->flags == BPF_F_KPROBE_MULTI_RETURN ? in bpf_link_show_fdinfo()
3341 else if (link->type == BPF_LINK_TYPE_UPROBE_MULTI) in bpf_link_show_fdinfo()
3342 seq_printf(m, "link_type:\t%s\n", link->flags == BPF_F_UPROBE_MULTI_RETURN ? in bpf_link_show_fdinfo()
3350 seq_printf(m, "link_id:\t%u\n", link->id); in bpf_link_show_fdinfo()
3353 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); in bpf_link_show_fdinfo()
3358 prog->aux->id); in bpf_link_show_fdinfo()
3360 if (link->ops->show_fdinfo) in bpf_link_show_fdinfo()
3361 link->ops->show_fdinfo(link, m); in bpf_link_show_fdinfo()
3367 struct bpf_link *link = file->private_data; in bpf_link_poll()
3369 return link->ops->poll(file, pts); in bpf_link_poll()
3404 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
3407 * user-space, if bpf_link is successfully attached. If not, bpf_link and
3408 * pre-allocated resources are to be freed with bpf_cleanup() call. All the
3434 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, in bpf_link_prime()
3442 primer->link = link; in bpf_link_prime()
3443 primer->file = file; in bpf_link_prime()
3444 primer->fd = fd; in bpf_link_prime()
3445 primer->id = id; in bpf_link_prime()
3451 /* make bpf_link fetchable by ID */ in bpf_link_settle()
3453 primer->link->id = primer->id; in bpf_link_settle()
3455 /* make bpf_link fetchable by FD */ in bpf_link_settle()
3456 fd_install(primer->fd, primer->file); in bpf_link_settle()
3458 return primer->fd; in bpf_link_settle()
3463 return anon_inode_getfd("bpf-link", in bpf_link_new_fd()
3464 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, in bpf_link_new_fd()
3474 return ERR_PTR(-EBADF); in bpf_link_get_from_fd()
3475 if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll) in bpf_link_get_from_fd()
3476 return ERR_PTR(-EINVAL); in bpf_link_get_from_fd()
3478 link = fd_file(f)->private_data; in bpf_link_get_from_fd()
3489 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, in bpf_tracing_link_release()
3490 tr_link->trampoline, in bpf_tracing_link_release()
3491 tr_link->tgt_prog)); in bpf_tracing_link_release()
3493 bpf_trampoline_put(tr_link->trampoline); in bpf_tracing_link_release()
3496 if (tr_link->tgt_prog) in bpf_tracing_link_release()
3497 bpf_prog_put(tr_link->tgt_prog); in bpf_tracing_link_release()
3515 bpf_trampoline_unpack_key(tr_link->trampoline->key, in bpf_tracing_link_show_fdinfo()
3522 link->attach_type, in bpf_tracing_link_show_fdinfo()
3525 tr_link->link.cookie); in bpf_tracing_link_show_fdinfo()
3534 info->tracing.attach_type = link->attach_type; in bpf_tracing_link_fill_link_info()
3535 info->tracing.cookie = tr_link->link.cookie; in bpf_tracing_link_fill_link_info()
3536 bpf_trampoline_unpack_key(tr_link->trampoline->key, in bpf_tracing_link_fill_link_info()
3537 &info->tracing.target_obj_id, in bpf_tracing_link_fill_link_info()
3538 &info->tracing.target_btf_id); in bpf_tracing_link_fill_link_info()
3563 switch (prog->type) { in bpf_tracing_prog_attach()
3565 if (prog->expected_attach_type != BPF_TRACE_FENTRY && in bpf_tracing_prog_attach()
3566 prog->expected_attach_type != BPF_TRACE_FEXIT && in bpf_tracing_prog_attach()
3567 prog->expected_attach_type != BPF_MODIFY_RETURN) { in bpf_tracing_prog_attach()
3568 err = -EINVAL; in bpf_tracing_prog_attach()
3573 if (prog->expected_attach_type != 0) { in bpf_tracing_prog_attach()
3574 err = -EINVAL; in bpf_tracing_prog_attach()
3579 if (prog->expected_attach_type != BPF_LSM_MAC) { in bpf_tracing_prog_attach()
3580 err = -EINVAL; in bpf_tracing_prog_attach()
3585 err = -EINVAL; in bpf_tracing_prog_attach()
3590 err = -EINVAL; in bpf_tracing_prog_attach()
3601 if (prog->type != BPF_PROG_TYPE_EXT) { in bpf_tracing_prog_attach()
3602 err = -EINVAL; in bpf_tracing_prog_attach()
3618 err = -ENOMEM; in bpf_tracing_prog_attach()
3621 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, in bpf_tracing_prog_attach()
3624 link->link.cookie = bpf_cookie; in bpf_tracing_prog_attach()
3626 mutex_lock(&prog->aux->dst_mutex); in bpf_tracing_prog_attach()
3630 * - if prog->aux->dst_trampoline is set, the program was just loaded in bpf_tracing_prog_attach()
3632 * in prog->aux in bpf_tracing_prog_attach()
3634 * - if prog->aux->dst_trampoline is NULL, the program has already been in bpf_tracing_prog_attach()
3637 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + in bpf_tracing_prog_attach()
3640 * - if tgt_prog == NULL when this function was called using the old in bpf_tracing_prog_attach()
3641 * raw_tracepoint_open API, and we need a target from prog->aux in bpf_tracing_prog_attach()
3643 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program in bpf_tracing_prog_attach()
3644 * was detached and is going for re-attachment. in bpf_tracing_prog_attach()
3646 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf in bpf_tracing_prog_attach()
3650 if (!prog->aux->dst_trampoline && !tgt_prog) { in bpf_tracing_prog_attach()
3652 * Allow re-attach for TRACING and LSM programs. If it's in bpf_tracing_prog_attach()
3655 * re-attach in separate code path. in bpf_tracing_prog_attach()
3657 if (prog->type != BPF_PROG_TYPE_TRACING && in bpf_tracing_prog_attach()
3658 prog->type != BPF_PROG_TYPE_LSM) { in bpf_tracing_prog_attach()
3659 err = -EINVAL; in bpf_tracing_prog_attach()
3662 /* We can allow re-attach only if we have valid attach_btf. */ in bpf_tracing_prog_attach()
3663 if (!prog->aux->attach_btf) { in bpf_tracing_prog_attach()
3664 err = -EINVAL; in bpf_tracing_prog_attach()
3667 btf_id = prog->aux->attach_btf_id; in bpf_tracing_prog_attach()
3668 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); in bpf_tracing_prog_attach()
3671 if (!prog->aux->dst_trampoline || in bpf_tracing_prog_attach()
3672 (key && key != prog->aux->dst_trampoline->key)) { in bpf_tracing_prog_attach()
3685 module_put(prog->aux->mod); in bpf_tracing_prog_attach()
3686 prog->aux->mod = tgt_info.tgt_mod; in bpf_tracing_prog_attach()
3691 err = -ENOMEM; in bpf_tracing_prog_attach()
3700 * prog->aux are cleared below. in bpf_tracing_prog_attach()
3702 tr = prog->aux->dst_trampoline; in bpf_tracing_prog_attach()
3703 tgt_prog = prog->aux->dst_prog; in bpf_tracing_prog_attach()
3706 err = bpf_link_prime(&link->link.link, &link_primer); in bpf_tracing_prog_attach()
3710 err = bpf_trampoline_link_prog(&link->link, tr, tgt_prog); in bpf_tracing_prog_attach()
3717 link->tgt_prog = tgt_prog; in bpf_tracing_prog_attach()
3718 link->trampoline = tr; in bpf_tracing_prog_attach()
3720 /* Always clear the trampoline and target prog from prog->aux to make in bpf_tracing_prog_attach()
3722 * program is (re-)attached to another target. in bpf_tracing_prog_attach()
3724 if (prog->aux->dst_prog && in bpf_tracing_prog_attach()
3725 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) in bpf_tracing_prog_attach()
3727 bpf_prog_put(prog->aux->dst_prog); in bpf_tracing_prog_attach()
3728 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) in bpf_tracing_prog_attach()
3730 bpf_trampoline_put(prog->aux->dst_trampoline); in bpf_tracing_prog_attach()
3732 prog->aux->dst_prog = NULL; in bpf_tracing_prog_attach()
3733 prog->aux->dst_trampoline = NULL; in bpf_tracing_prog_attach()
3734 mutex_unlock(&prog->aux->dst_mutex); in bpf_tracing_prog_attach()
3738 if (tr && tr != prog->aux->dst_trampoline) in bpf_tracing_prog_attach()
3740 mutex_unlock(&prog->aux->dst_mutex); in bpf_tracing_prog_attach()
3753 bpf_probe_unregister(raw_tp->btp, raw_tp); in bpf_raw_tp_link_release()
3754 bpf_put_raw_tracepoint(raw_tp->btp); in bpf_raw_tp_link_release()
3774 raw_tp_link->btp->tp->name, in bpf_raw_tp_link_show_fdinfo()
3775 raw_tp_link->cookie); in bpf_raw_tp_link_show_fdinfo()
3783 return -EFAULT; in bpf_copy_to_user()
3787 if (copy_to_user(ubuf, buf, ulen - 1)) in bpf_copy_to_user()
3788 return -EFAULT; in bpf_copy_to_user()
3789 if (put_user(zero, ubuf + ulen - 1)) in bpf_copy_to_user()
3790 return -EFAULT; in bpf_copy_to_user()
3791 return -ENOSPC; in bpf_copy_to_user()
3802 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); in bpf_raw_tp_link_fill_link_info()
3803 const char *tp_name = raw_tp_link->btp->tp->name; in bpf_raw_tp_link_fill_link_info()
3804 u32 ulen = info->raw_tracepoint.tp_name_len; in bpf_raw_tp_link_fill_link_info()
3808 return -EINVAL; in bpf_raw_tp_link_fill_link_info()
3810 info->raw_tracepoint.tp_name_len = tp_len + 1; in bpf_raw_tp_link_fill_link_info()
3811 info->raw_tracepoint.cookie = raw_tp_link->cookie; in bpf_raw_tp_link_fill_link_info()
3835 struct perf_event *event = perf_link->perf_file->private_data; in bpf_perf_link_release()
3838 fput(perf_link->perf_file); in bpf_perf_link_release()
3860 return -EINVAL; in bpf_perf_link_fill_common()
3884 return -EFAULT; in bpf_perf_link_fill_common()
3899 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name); in bpf_perf_link_fill_kprobe()
3900 ulen = info->perf_event.kprobe.name_len; in bpf_perf_link_fill_kprobe()
3906 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE; in bpf_perf_link_fill_kprobe()
3908 info->perf_event.type = BPF_PERF_EVENT_KPROBE; in bpf_perf_link_fill_kprobe()
3909 info->perf_event.kprobe.name_len = ulen; in bpf_perf_link_fill_kprobe()
3910 info->perf_event.kprobe.offset = offset; in bpf_perf_link_fill_kprobe()
3911 info->perf_event.kprobe.missed = missed; in bpf_perf_link_fill_kprobe()
3914 info->perf_event.kprobe.addr = addr; in bpf_perf_link_fill_kprobe()
3915 info->perf_event.kprobe.cookie = event->bpf_cookie; in bpf_perf_link_fill_kprobe()
3942 event->bpf_cookie); in bpf_perf_link_fdinfo_kprobe()
3955 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name); in bpf_perf_link_fill_uprobe()
3956 ulen = info->perf_event.uprobe.name_len; in bpf_perf_link_fill_uprobe()
3963 info->perf_event.type = BPF_PERF_EVENT_URETPROBE; in bpf_perf_link_fill_uprobe()
3965 info->perf_event.type = BPF_PERF_EVENT_UPROBE; in bpf_perf_link_fill_uprobe()
3966 info->perf_event.uprobe.name_len = ulen; in bpf_perf_link_fill_uprobe()
3967 info->perf_event.uprobe.offset = offset; in bpf_perf_link_fill_uprobe()
3968 info->perf_event.uprobe.cookie = event->bpf_cookie; in bpf_perf_link_fill_uprobe()
3969 info->perf_event.uprobe.ref_ctr_offset = ref_ctr_offset; in bpf_perf_link_fill_uprobe()
3995 event->bpf_cookie); in bpf_perf_link_fdinfo_uprobe()
4003 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE) in bpf_perf_link_fill_probe()
4007 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE) in bpf_perf_link_fill_probe()
4010 return -EOPNOTSUPP; in bpf_perf_link_fill_probe()
4020 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name); in bpf_perf_link_fill_tracepoint()
4021 ulen = info->perf_event.tracepoint.name_len; in bpf_perf_link_fill_tracepoint()
4026 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT; in bpf_perf_link_fill_tracepoint()
4027 info->perf_event.tracepoint.name_len = ulen; in bpf_perf_link_fill_tracepoint()
4028 info->perf_event.tracepoint.cookie = event->bpf_cookie; in bpf_perf_link_fill_tracepoint()
4035 info->perf_event.event.type = event->attr.type; in bpf_perf_link_fill_perf_event()
4036 info->perf_event.event.config = event->attr.config; in bpf_perf_link_fill_perf_event()
4037 info->perf_event.event.cookie = event->bpf_cookie; in bpf_perf_link_fill_perf_event()
4038 info->perf_event.type = BPF_PERF_EVENT_EVENT; in bpf_perf_link_fill_perf_event()
4049 event = perf_get_event(perf_link->perf_file); in bpf_perf_link_fill_link_info()
4053 switch (event->prog->type) { in bpf_perf_link_fill_link_info()
4061 return -EOPNOTSUPP; in bpf_perf_link_fill_link_info()
4073 event->attr.type, event->attr.config, in bpf_perf_event_link_show_fdinfo()
4074 "event", event->bpf_cookie); in bpf_perf_event_link_show_fdinfo()
4093 name, "tracepoint", event->bpf_cookie); in bpf_tracepoint_link_show_fdinfo()
4100 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE) in bpf_probe_link_show_fdinfo()
4105 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE) in bpf_probe_link_show_fdinfo()
4117 event = perf_get_event(perf_link->perf_file); in bpf_perf_link_show_fdinfo()
4121 switch (event->prog->type) { in bpf_perf_link_show_fdinfo()
4148 if (attr->link_create.flags) in bpf_perf_link_attach()
4149 return -EINVAL; in bpf_perf_link_attach()
4151 perf_file = perf_event_get(attr->link_create.target_fd); in bpf_perf_link_attach()
4157 err = -ENOMEM; in bpf_perf_link_attach()
4160 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog, in bpf_perf_link_attach()
4161 attr->link_create.attach_type); in bpf_perf_link_attach()
4162 link->perf_file = perf_file; in bpf_perf_link_attach()
4164 err = bpf_link_prime(&link->link, &link_primer); in bpf_perf_link_attach()
4170 event = perf_file->private_data; in bpf_perf_link_attach()
4171 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); in bpf_perf_link_attach()
4188 return -EOPNOTSUPP; in bpf_perf_link_attach()
4203 switch (prog->type) { in bpf_raw_tp_link_attach()
4211 return -EINVAL; in bpf_raw_tp_link_attach()
4212 if (prog->type == BPF_PROG_TYPE_TRACING && in bpf_raw_tp_link_attach()
4213 prog->expected_attach_type == BPF_TRACE_RAW_TP) { in bpf_raw_tp_link_attach()
4214 tp_name = prog->aux->attach_func_name; in bpf_raw_tp_link_attach()
4220 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) in bpf_raw_tp_link_attach()
4221 return -EFAULT; in bpf_raw_tp_link_attach()
4222 buf[sizeof(buf) - 1] = 0; in bpf_raw_tp_link_attach()
4226 return -EINVAL; in bpf_raw_tp_link_attach()
4231 return -ENOENT; in bpf_raw_tp_link_attach()
4235 err = -ENOMEM; in bpf_raw_tp_link_attach()
4238 bpf_link_init_sleepable(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, in bpf_raw_tp_link_attach()
4240 tracepoint_is_faultable(btp->tp)); in bpf_raw_tp_link_attach()
4241 link->btp = btp; in bpf_raw_tp_link_attach()
4242 link->cookie = cookie; in bpf_raw_tp_link_attach()
4244 err = bpf_link_prime(&link->link, &link_primer); in bpf_raw_tp_link_attach()
4250 err = bpf_probe_register(link->btp, link); in bpf_raw_tp_link_attach()
4273 return -EINVAL; in bpf_raw_tracepoint_open()
4275 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); in bpf_raw_tracepoint_open()
4279 tp_name = u64_to_user_ptr(attr->raw_tracepoint.name); in bpf_raw_tracepoint_open()
4280 cookie = attr->raw_tracepoint.cookie; in bpf_raw_tracepoint_open()
4281 fd = bpf_raw_tp_link_attach(prog, tp_name, cookie, prog->expected_attach_type); in bpf_raw_tracepoint_open()
4365 switch (prog->type) { in bpf_prog_attach_check_attach_type()
4370 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; in bpf_prog_attach_check_attach_type()
4372 if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN)) in bpf_prog_attach_check_attach_type()
4373 /* cg-skb progs can be loaded by unpriv user. in bpf_prog_attach_check_attach_type()
4376 return -EPERM; in bpf_prog_attach_check_attach_type()
4379 if (prog->type != ptype) in bpf_prog_attach_check_attach_type()
4380 return -EINVAL; in bpf_prog_attach_check_attach_type()
4382 return prog->enforce_expected_attach_type && in bpf_prog_attach_check_attach_type()
4383 prog->expected_attach_type != attach_type ? in bpf_prog_attach_check_attach_type()
4384 -EINVAL : 0; in bpf_prog_attach_check_attach_type()
4389 return -EINVAL; in bpf_prog_attach_check_attach_type()
4394 return -EINVAL; in bpf_prog_attach_check_attach_type()
4397 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && in bpf_prog_attach_check_attach_type()
4399 return -EINVAL; in bpf_prog_attach_check_attach_type()
4400 if (prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION && in bpf_prog_attach_check_attach_type()
4402 return -EINVAL; in bpf_prog_attach_check_attach_type()
4403 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && in bpf_prog_attach_check_attach_type()
4405 return -EINVAL; in bpf_prog_attach_check_attach_type()
4406 if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION && in bpf_prog_attach_check_attach_type()
4408 return -EINVAL; in bpf_prog_attach_check_attach_type()
4414 return -EINVAL; in bpf_prog_attach_check_attach_type()
4421 return -EINVAL; in bpf_prog_attach_check_attach_type()
4425 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) in bpf_prog_attach_check_attach_type()
4426 return -EINVAL; in bpf_prog_attach_check_attach_type()
4472 return -EINVAL; in bpf_prog_attach()
4474 ptype = attach_type_to_prog_type(attr->attach_type); in bpf_prog_attach()
4476 return -EINVAL; in bpf_prog_attach()
4478 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) in bpf_prog_attach()
4479 return -EINVAL; in bpf_prog_attach()
4481 if (attr->attach_flags & ~(BPF_F_ATTACH_MASK_BASE | BPF_F_ATTACH_MASK_MPROG)) in bpf_prog_attach()
4482 return -EINVAL; in bpf_prog_attach()
4484 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE) in bpf_prog_attach()
4485 return -EINVAL; in bpf_prog_attach()
4486 if (attr->relative_fd || in bpf_prog_attach()
4487 attr->expected_revision) in bpf_prog_attach()
4488 return -EINVAL; in bpf_prog_attach()
4491 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); in bpf_prog_attach()
4495 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { in bpf_prog_attach()
4497 return -EINVAL; in bpf_prog_attach()
4500 if (is_cgroup_prog_type(ptype, prog->expected_attach_type, true)) { in bpf_prog_attach()
4517 if (attr->attach_type == BPF_TCX_INGRESS || in bpf_prog_attach()
4518 attr->attach_type == BPF_TCX_EGRESS) in bpf_prog_attach()
4524 ret = -EINVAL; in bpf_prog_attach()
4541 return -EINVAL; in bpf_prog_detach()
4543 ptype = attach_type_to_prog_type(attr->attach_type); in bpf_prog_detach()
4546 return -EINVAL; in bpf_prog_detach()
4547 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) in bpf_prog_detach()
4548 return -EINVAL; in bpf_prog_detach()
4549 if (attr->attach_bpf_fd) { in bpf_prog_detach()
4550 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); in bpf_prog_detach()
4555 if (attr->attach_flags || attr->relative_fd) in bpf_prog_detach()
4556 return -EINVAL; in bpf_prog_detach()
4557 } else if (attr->attach_flags || in bpf_prog_detach()
4558 attr->relative_fd || in bpf_prog_detach()
4559 attr->expected_revision) { in bpf_prog_detach()
4560 return -EINVAL; in bpf_prog_detach()
4585 if (attr->attach_type == BPF_TCX_INGRESS || in bpf_prog_detach()
4586 attr->attach_type == BPF_TCX_EGRESS) in bpf_prog_detach()
4592 ret = -EINVAL; in bpf_prog_detach()
4606 return -EPERM; in bpf_prog_query()
4608 return -EINVAL; in bpf_prog_query()
4609 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) in bpf_prog_query()
4610 return -EINVAL; in bpf_prog_query()
4612 switch (attr->query.attach_type) { in bpf_prog_query()
4660 return -EINVAL; in bpf_prog_query()
4670 int ret = -ENOTSUPP; in bpf_prog_test_run()
4673 return -EINVAL; in bpf_prog_test_run()
4675 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || in bpf_prog_test_run()
4676 (!attr->test.ctx_size_in && attr->test.ctx_in)) in bpf_prog_test_run()
4677 return -EINVAL; in bpf_prog_test_run()
4679 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || in bpf_prog_test_run()
4680 (!attr->test.ctx_size_out && attr->test.ctx_out)) in bpf_prog_test_run()
4681 return -EINVAL; in bpf_prog_test_run()
4683 prog = bpf_prog_get(attr->test.prog_fd); in bpf_prog_test_run()
4687 if (prog->aux->ops->test_run) in bpf_prog_test_run()
4688 ret = prog->aux->ops->test_run(prog, attr, uattr); in bpf_prog_test_run()
4701 u32 next_id = attr->start_id; in bpf_obj_get_next_id()
4705 return -EINVAL; in bpf_obj_get_next_id()
4708 return -EPERM; in bpf_obj_get_next_id()
4713 err = -ENOENT; in bpf_obj_get_next_id()
4717 err = put_user(next_id, &uattr->next_id); in bpf_obj_get_next_id()
4767 return ERR_PTR(-ENOENT); in bpf_prog_by_id()
4774 prog = ERR_PTR(-ENOENT); in bpf_prog_by_id()
4782 u32 id = attr->prog_id; in bpf_prog_get_fd_by_id()
4786 return -EINVAL; in bpf_prog_get_fd_by_id()
4789 return -EPERM; in bpf_prog_get_fd_by_id()
4807 u32 id = attr->map_id; in bpf_map_get_fd_by_id()
4812 attr->open_flags & ~BPF_OBJ_FLAG_MASK) in bpf_map_get_fd_by_id()
4813 return -EINVAL; in bpf_map_get_fd_by_id()
4816 return -EPERM; in bpf_map_get_fd_by_id()
4818 f_flags = bpf_get_file_flag(attr->open_flags); in bpf_map_get_fd_by_id()
4827 map = ERR_PTR(-ENOENT); in bpf_map_get_fd_by_id()
4847 mutex_lock(&prog->aux->used_maps_mutex); in bpf_map_from_imm()
4848 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { in bpf_map_from_imm()
4849 map = prog->aux->used_maps[i]; in bpf_map_from_imm()
4854 if (!map->ops->map_direct_value_meta) in bpf_map_from_imm()
4856 if (!map->ops->map_direct_value_meta(map, addr, off)) { in bpf_map_from_imm()
4864 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_map_from_imm()
4878 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), in bpf_insn_prepare_dump()
4883 for (i = 0; i < prog->len; i++) { in bpf_insn_prepare_dump()
4889 /* fall-through */ in bpf_insn_prepare_dump()
4917 insns[i].imm = map->id; in bpf_insn_prepare_dump()
4938 if ((info->nr_func_info || info->func_info_rec_size) && in set_info_rec_size()
4939 info->func_info_rec_size != sizeof(struct bpf_func_info)) in set_info_rec_size()
4940 return -EINVAL; in set_info_rec_size()
4942 if ((info->nr_line_info || info->line_info_rec_size) && in set_info_rec_size()
4943 info->line_info_rec_size != sizeof(struct bpf_line_info)) in set_info_rec_size()
4944 return -EINVAL; in set_info_rec_size()
4946 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && in set_info_rec_size()
4947 info->jited_line_info_rec_size != sizeof(__u64)) in set_info_rec_size()
4948 return -EINVAL; in set_info_rec_size()
4950 info->func_info_rec_size = sizeof(struct bpf_func_info); in set_info_rec_size()
4951 info->line_info_rec_size = sizeof(struct bpf_line_info); in set_info_rec_size()
4952 info->jited_line_info_rec_size = sizeof(__u64); in set_info_rec_size()
4962 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_prog_get_info_by_fd()
4965 u32 info_len = attr->info.info_len; in bpf_prog_get_info_by_fd()
4978 return -EFAULT; in bpf_prog_get_info_by_fd()
4980 info.type = prog->type; in bpf_prog_get_info_by_fd()
4981 info.id = prog->aux->id; in bpf_prog_get_info_by_fd()
4982 info.load_time = prog->aux->load_time; in bpf_prog_get_info_by_fd()
4984 prog->aux->user->uid); in bpf_prog_get_info_by_fd()
4985 info.gpl_compatible = prog->gpl_compatible; in bpf_prog_get_info_by_fd()
4987 memcpy(info.tag, prog->tag, sizeof(prog->tag)); in bpf_prog_get_info_by_fd()
4988 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); in bpf_prog_get_info_by_fd()
4990 mutex_lock(&prog->aux->used_maps_mutex); in bpf_prog_get_info_by_fd()
4992 info.nr_map_ids = prog->aux->used_map_cnt; in bpf_prog_get_info_by_fd()
4999 if (put_user(prog->aux->used_maps[i]->id, in bpf_prog_get_info_by_fd()
5001 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_prog_get_info_by_fd()
5002 return -EFAULT; in bpf_prog_get_info_by_fd()
5005 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_prog_get_info_by_fd()
5016 info.verified_insns = prog->aux->verified_insns; in bpf_prog_get_info_by_fd()
5017 if (prog->aux->btf) in bpf_prog_get_info_by_fd()
5018 info.btf_id = btf_obj_id(prog->aux->btf); in bpf_prog_get_info_by_fd()
5037 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
5041 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); in bpf_prog_get_info_by_fd()
5043 return -ENOMEM; in bpf_prog_get_info_by_fd()
5049 return -EFAULT; in bpf_prog_get_info_by_fd()
5052 if (bpf_prog_is_offloaded(prog->aux)) { in bpf_prog_get_info_by_fd()
5064 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
5068 for (i = 0; i < prog->aux->func_cnt; i++) in bpf_prog_get_info_by_fd()
5069 info.jited_prog_len += prog->aux->func[i]->jited_len; in bpf_prog_get_info_by_fd()
5071 info.jited_prog_len = prog->jited_len; in bpf_prog_get_info_by_fd()
5075 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
5079 /* for multi-function programs, copy the JITed in bpf_prog_get_info_by_fd()
5082 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
5087 for (i = 0; i < prog->aux->func_cnt; i++) { in bpf_prog_get_info_by_fd()
5088 len = prog->aux->func[i]->jited_len; in bpf_prog_get_info_by_fd()
5090 img = (u8 *) prog->aux->func[i]->bpf_func; in bpf_prog_get_info_by_fd()
5092 return -EFAULT; in bpf_prog_get_info_by_fd()
5094 free -= len; in bpf_prog_get_info_by_fd()
5099 if (copy_to_user(uinsns, prog->bpf_func, ulen)) in bpf_prog_get_info_by_fd()
5100 return -EFAULT; in bpf_prog_get_info_by_fd()
5108 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; in bpf_prog_get_info_by_fd()
5110 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
5120 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
5123 prog->aux->func[i]->bpf_func; in bpf_prog_get_info_by_fd()
5126 return -EFAULT; in bpf_prog_get_info_by_fd()
5129 ksym_addr = (unsigned long) prog->bpf_func; in bpf_prog_get_info_by_fd()
5131 return -EFAULT; in bpf_prog_get_info_by_fd()
5139 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; in bpf_prog_get_info_by_fd()
5141 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
5148 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
5151 prog->aux->func[i]->jited_len; in bpf_prog_get_info_by_fd()
5153 return -EFAULT; in bpf_prog_get_info_by_fd()
5156 func_len = prog->jited_len; in bpf_prog_get_info_by_fd()
5158 return -EFAULT; in bpf_prog_get_info_by_fd()
5165 info.attach_btf_id = prog->aux->attach_btf_id; in bpf_prog_get_info_by_fd()
5170 info.nr_func_info = prog->aux->func_info_cnt; in bpf_prog_get_info_by_fd()
5176 if (copy_to_user(user_finfo, prog->aux->func_info, in bpf_prog_get_info_by_fd()
5178 return -EFAULT; in bpf_prog_get_info_by_fd()
5182 info.nr_line_info = prog->aux->nr_linfo; in bpf_prog_get_info_by_fd()
5188 if (copy_to_user(user_linfo, prog->aux->linfo, in bpf_prog_get_info_by_fd()
5190 return -EFAULT; in bpf_prog_get_info_by_fd()
5194 if (prog->aux->jited_linfo) in bpf_prog_get_info_by_fd()
5195 info.nr_jited_line_info = prog->aux->nr_linfo; in bpf_prog_get_info_by_fd()
5199 if (bpf_dump_raw_ok(file->f_cred)) { in bpf_prog_get_info_by_fd()
5207 line_addr = (unsigned long)prog->aux->jited_linfo[i]; in bpf_prog_get_info_by_fd()
5209 return -EFAULT; in bpf_prog_get_info_by_fd()
5217 info.nr_prog_tags = prog->aux->func_cnt ? : 1; in bpf_prog_get_info_by_fd()
5224 if (prog->aux->func_cnt) { in bpf_prog_get_info_by_fd()
5227 prog->aux->func[i]->tag, in bpf_prog_get_info_by_fd()
5229 return -EFAULT; in bpf_prog_get_info_by_fd()
5233 prog->tag, BPF_TAG_SIZE)) in bpf_prog_get_info_by_fd()
5234 return -EFAULT; in bpf_prog_get_info_by_fd()
5240 put_user(info_len, &uattr->info.info_len)) in bpf_prog_get_info_by_fd()
5241 return -EFAULT; in bpf_prog_get_info_by_fd()
5251 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_map_get_info_by_fd()
5253 u32 info_len = attr->info.info_len; in bpf_map_get_info_by_fd()
5263 return -EFAULT; in bpf_map_get_info_by_fd()
5265 info.type = map->map_type; in bpf_map_get_info_by_fd()
5266 info.id = map->id; in bpf_map_get_info_by_fd()
5267 info.key_size = map->key_size; in bpf_map_get_info_by_fd()
5268 info.value_size = map->value_size; in bpf_map_get_info_by_fd()
5269 info.max_entries = map->max_entries; in bpf_map_get_info_by_fd()
5270 info.map_flags = map->map_flags; in bpf_map_get_info_by_fd()
5271 info.map_extra = map->map_extra; in bpf_map_get_info_by_fd()
5272 memcpy(info.name, map->name, sizeof(map->name)); in bpf_map_get_info_by_fd()
5274 if (map->btf) { in bpf_map_get_info_by_fd()
5275 info.btf_id = btf_obj_id(map->btf); in bpf_map_get_info_by_fd()
5276 info.btf_key_type_id = map->btf_key_type_id; in bpf_map_get_info_by_fd()
5277 info.btf_value_type_id = map->btf_value_type_id; in bpf_map_get_info_by_fd()
5279 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; in bpf_map_get_info_by_fd()
5280 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) in bpf_map_get_info_by_fd()
5292 if (!map->ops->map_get_hash) in bpf_map_get_info_by_fd()
5293 return -EINVAL; in bpf_map_get_info_by_fd()
5296 return -EINVAL; in bpf_map_get_info_by_fd()
5298 err = map->ops->map_get_hash(map, SHA256_DIGEST_SIZE, map->sha); in bpf_map_get_info_by_fd()
5302 if (copy_to_user(uhash, map->sha, SHA256_DIGEST_SIZE) != 0) in bpf_map_get_info_by_fd()
5303 return -EFAULT; in bpf_map_get_info_by_fd()
5305 return -EINVAL; in bpf_map_get_info_by_fd()
5309 put_user(info_len, &uattr->info.info_len)) in bpf_map_get_info_by_fd()
5310 return -EFAULT; in bpf_map_get_info_by_fd()
5320 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_btf_get_info_by_fd()
5321 u32 info_len = attr->info.info_len; in bpf_btf_get_info_by_fd()
5336 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); in bpf_link_get_info_by_fd()
5338 u32 info_len = attr->info.info_len; in bpf_link_get_info_by_fd()
5348 return -EFAULT; in bpf_link_get_info_by_fd()
5350 info.type = link->type; in bpf_link_get_info_by_fd()
5351 info.id = link->id; in bpf_link_get_info_by_fd()
5352 if (link->prog) in bpf_link_get_info_by_fd()
5353 info.prog_id = link->prog->aux->id; in bpf_link_get_info_by_fd()
5355 if (link->ops->fill_link_info) { in bpf_link_get_info_by_fd()
5356 err = link->ops->fill_link_info(link, &info); in bpf_link_get_info_by_fd()
5362 put_user(info_len, &uattr->info.info_len)) in bpf_link_get_info_by_fd()
5363 return -EFAULT; in bpf_link_get_info_by_fd()
5374 struct bpf_token_info __user *uinfo = u64_to_user_ptr(attr->info.info); in token_get_info_by_fd()
5375 u32 info_len = attr->info.info_len; in token_get_info_by_fd()
5390 return -EINVAL; in bpf_obj_get_info_by_fd()
5392 CLASS(fd, f)(attr->info.bpf_fd); in bpf_obj_get_info_by_fd()
5394 return -EBADFD; in bpf_obj_get_info_by_fd()
5396 if (fd_file(f)->f_op == &bpf_prog_fops) in bpf_obj_get_info_by_fd()
5397 return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, in bpf_obj_get_info_by_fd()
5399 else if (fd_file(f)->f_op == &bpf_map_fops) in bpf_obj_get_info_by_fd()
5400 return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, in bpf_obj_get_info_by_fd()
5402 else if (fd_file(f)->f_op == &btf_fops) in bpf_obj_get_info_by_fd()
5403 return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr); in bpf_obj_get_info_by_fd()
5404 else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll) in bpf_obj_get_info_by_fd()
5405 return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data, in bpf_obj_get_info_by_fd()
5407 else if (fd_file(f)->f_op == &bpf_token_fops) in bpf_obj_get_info_by_fd()
5408 return token_get_info_by_fd(fd_file(f), fd_file(f)->private_data, in bpf_obj_get_info_by_fd()
5410 return -EINVAL; in bpf_obj_get_info_by_fd()
5420 return -EINVAL; in bpf_btf_load()
5422 if (attr->btf_flags & ~BPF_F_TOKEN_FD) in bpf_btf_load()
5423 return -EINVAL; in bpf_btf_load()
5425 if (attr->btf_flags & BPF_F_TOKEN_FD) { in bpf_btf_load()
5426 token = bpf_token_get_from_fd(attr->btf_token_fd); in bpf_btf_load()
5437 return -EPERM; in bpf_btf_load()
5452 return -EINVAL; in bpf_btf_get_fd_by_id()
5454 if (attr->open_flags & ~BPF_F_TOKEN_FD) in bpf_btf_get_fd_by_id()
5455 return -EINVAL; in bpf_btf_get_fd_by_id()
5457 if (attr->open_flags & BPF_F_TOKEN_FD) { in bpf_btf_get_fd_by_id()
5458 token = bpf_token_get_from_fd(attr->fd_by_id_token_fd); in bpf_btf_get_fd_by_id()
5469 return -EPERM; in bpf_btf_get_fd_by_id()
5474 return btf_get_fd_by_id(attr->btf_id); in bpf_btf_get_fd_by_id()
5483 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); in bpf_task_fd_query_copy()
5487 if (put_user(len, &uattr->task_fd_query.buf_len)) in bpf_task_fd_query_copy()
5488 return -EFAULT; in bpf_task_fd_query_copy()
5489 input_len = attr->task_fd_query.buf_len; in bpf_task_fd_query_copy()
5496 return -EFAULT; in bpf_task_fd_query_copy()
5499 if (err == -EFAULT) in bpf_task_fd_query_copy()
5504 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || in bpf_task_fd_query_copy()
5505 put_user(fd_type, &uattr->task_fd_query.fd_type) || in bpf_task_fd_query_copy()
5506 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || in bpf_task_fd_query_copy()
5507 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) in bpf_task_fd_query_copy()
5508 return -EFAULT; in bpf_task_fd_query_copy()
5518 pid_t pid = attr->task_fd_query.pid; in bpf_task_fd_query()
5519 u32 fd = attr->task_fd_query.fd; in bpf_task_fd_query()
5526 return -EINVAL; in bpf_task_fd_query()
5529 return -EPERM; in bpf_task_fd_query()
5531 if (attr->task_fd_query.flags != 0) in bpf_task_fd_query()
5532 return -EINVAL; in bpf_task_fd_query()
5538 return -ENOENT; in bpf_task_fd_query()
5544 return -EBADF; in bpf_task_fd_query()
5546 if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) { in bpf_task_fd_query()
5547 struct bpf_link *link = file->private_data; in bpf_task_fd_query()
5549 if (link->ops == &bpf_raw_tp_link_lops) { in bpf_task_fd_query()
5552 struct bpf_raw_event_map *btp = raw_tp->btp; in bpf_task_fd_query()
5555 raw_tp->link.prog->aux->id, in bpf_task_fd_query()
5557 btp->tp->name, 0, 0); in bpf_task_fd_query()
5581 err = -ENOTSUPP; in bpf_task_fd_query()
5592 err = -ENOTSUPP; \
5609 return -EINVAL; in bpf_map_do_batch()
5611 CLASS(fd, f)(attr->batch.map_fd); in bpf_map_do_batch()
5619 err = -EPERM; in bpf_map_do_batch()
5623 err = -EPERM; in bpf_map_do_batch()
5628 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr); in bpf_map_do_batch()
5630 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr); in bpf_map_do_batch()
5632 BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr); in bpf_map_do_batch()
5634 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr); in bpf_map_do_batch()
5650 return -EINVAL; in link_create()
5652 if (attr->link_create.attach_type == BPF_STRUCT_OPS) in link_create()
5655 prog = bpf_prog_get(attr->link_create.prog_fd); in link_create()
5660 attr->link_create.attach_type); in link_create()
5664 switch (prog->type) { in link_create()
5676 attr->link_create.target_fd, in link_create()
5677 attr->link_create.target_btf_id, in link_create()
5678 attr->link_create.tracing.cookie, in link_create()
5679 attr->link_create.attach_type); in link_create()
5683 if (attr->link_create.attach_type != prog->expected_attach_type) { in link_create()
5684 ret = -EINVAL; in link_create()
5687 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) in link_create()
5688 ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie, in link_create()
5689 attr->link_create.attach_type); in link_create()
5690 else if (prog->expected_attach_type == BPF_TRACE_ITER) in link_create()
5692 else if (prog->expected_attach_type == BPF_LSM_CGROUP) in link_create()
5696 attr->link_create.target_fd, in link_create()
5697 attr->link_create.target_btf_id, in link_create()
5698 attr->link_create.tracing.cookie, in link_create()
5699 attr->link_create.attach_type); in link_create()
5714 if (attr->link_create.attach_type == BPF_TCX_INGRESS || in link_create()
5715 attr->link_create.attach_type == BPF_TCX_EGRESS) in link_create()
5729 if (attr->link_create.attach_type == BPF_PERF_EVENT) in link_create()
5731 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI || in link_create()
5732 attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION) in link_create()
5734 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI || in link_create()
5735 attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION) in link_create()
5739 ret = -EINVAL; in link_create()
5753 new_map = bpf_map_get(attr->link_update.new_map_fd); in link_update_map()
5757 if (attr->link_update.flags & BPF_F_REPLACE) { in link_update_map()
5758 old_map = bpf_map_get(attr->link_update.old_map_fd); in link_update_map()
5763 } else if (attr->link_update.old_map_fd) { in link_update_map()
5764 ret = -EINVAL; in link_update_map()
5768 ret = link->ops->update_map(link, new_map, old_map); in link_update_map()
5787 return -EINVAL; in link_update()
5789 flags = attr->link_update.flags; in link_update()
5791 return -EINVAL; in link_update()
5793 link = bpf_link_get_from_fd(attr->link_update.link_fd); in link_update()
5797 if (link->ops->update_map) { in link_update()
5802 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); in link_update()
5809 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); in link_update()
5815 } else if (attr->link_update.old_prog_fd) { in link_update()
5816 ret = -EINVAL; in link_update()
5820 if (link->ops->update_prog) in link_update()
5821 ret = link->ops->update_prog(link, new_prog, old_prog); in link_update()
5823 ret = -EINVAL; in link_update()
5843 return -EINVAL; in link_detach()
5845 link = bpf_link_get_from_fd(attr->link_detach.link_fd); in link_detach()
5849 if (link->ops->detach) in link_detach()
5850 ret = link->ops->detach(link); in link_detach()
5852 ret = -EOPNOTSUPP; in link_detach()
5860 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); in bpf_link_inc_not_zero()
5869 return ERR_PTR(-ENOENT); in bpf_link_by_id()
5875 if (link->id) in bpf_link_by_id()
5878 link = ERR_PTR(-EAGAIN); in bpf_link_by_id()
5880 link = ERR_PTR(-ENOENT); in bpf_link_by_id()
5910 u32 id = attr->link_id; in bpf_link_get_fd_by_id()
5914 return -EINVAL; in bpf_link_get_fd_by_id()
5917 return -EPERM; in bpf_link_get_fd_by_id()
5953 return -EBUSY; in bpf_enable_runtime_stats()
5956 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); in bpf_enable_runtime_stats()
5970 return -EINVAL; in bpf_enable_stats()
5973 return -EPERM; in bpf_enable_stats()
5975 switch (attr->enable_stats.type) { in bpf_enable_stats()
5981 return -EINVAL; in bpf_enable_stats()
5992 return -EINVAL; in bpf_iter_create()
5994 if (attr->iter_create.flags) in bpf_iter_create()
5995 return -EINVAL; in bpf_iter_create()
5997 link = bpf_link_get_from_fd(attr->iter_create.link_fd); in bpf_iter_create()
6017 return -EINVAL; in bpf_prog_bind_map()
6019 if (attr->prog_bind_map.flags) in bpf_prog_bind_map()
6020 return -EINVAL; in bpf_prog_bind_map()
6022 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); in bpf_prog_bind_map()
6026 map = bpf_map_get(attr->prog_bind_map.map_fd); in bpf_prog_bind_map()
6032 mutex_lock(&prog->aux->used_maps_mutex); in bpf_prog_bind_map()
6034 used_maps_old = prog->aux->used_maps; in bpf_prog_bind_map()
6036 for (i = 0; i < prog->aux->used_map_cnt; i++) in bpf_prog_bind_map()
6042 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, in bpf_prog_bind_map()
6046 ret = -ENOMEM; in bpf_prog_bind_map()
6053 if (prog->sleepable) in bpf_prog_bind_map()
6054 atomic64_inc(&map->sleepable_refcnt); in bpf_prog_bind_map()
6056 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); in bpf_prog_bind_map()
6057 used_maps_new[prog->aux->used_map_cnt] = map; in bpf_prog_bind_map()
6059 prog->aux->used_map_cnt++; in bpf_prog_bind_map()
6060 prog->aux->used_maps = used_maps_new; in bpf_prog_bind_map()
6065 mutex_unlock(&prog->aux->used_maps_mutex); in bpf_prog_bind_map()
6079 return -EINVAL; in token_create()
6082 if (attr->token_create.flags) in token_create()
6083 return -EINVAL; in token_create()
6092 char __user *buf = u64_to_user_ptr(attr->prog_stream_read.stream_buf); in prog_stream_read()
6093 u32 len = attr->prog_stream_read.stream_buf_len; in prog_stream_read()
6098 return -EINVAL; in prog_stream_read()
6100 prog = bpf_prog_get(attr->prog_stream_read.prog_fd); in prog_stream_read()
6104 ret = bpf_prog_stream_read(prog, attr->prog_stream_read.stream_id, buf, len); in prog_stream_read()
6123 return -EFAULT; in __sys_bpf()
6250 err = -EINVAL; in __sys_bpf()
6288 return -EINVAL; in BPF_CALL_3()
6294 /* To shut up -Wmissing-prototypes.
6309 if (attr->test.data_in || attr->test.data_out || in kern_sys_bpf()
6310 attr->test.ctx_out || attr->test.duration || in kern_sys_bpf()
6311 attr->test.repeat || attr->test.flags) in kern_sys_bpf()
6312 return -EINVAL; in kern_sys_bpf()
6314 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); in kern_sys_bpf()
6318 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || in kern_sys_bpf()
6319 attr->test.ctx_size_in > U16_MAX) { in kern_sys_bpf()
6321 return -EINVAL; in kern_sys_bpf()
6329 return -EBUSY; in kern_sys_bpf()
6331 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); in kern_sys_bpf()
6363 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close in BPF_CALL_1()
6379 return -EINVAL; in BPF_CALL_4()
6381 if (name_sz <= 1 || name[name_sz - 1]) in BPF_CALL_4()
6382 return -EINVAL; in BPF_CALL_4()
6385 return -EPERM; in BPF_CALL_4()
6388 return *res ? 0 : -ENOENT; in BPF_CALL_4()
6407 return !bpf_token_capable(prog->aux->token, CAP_PERFMON) in syscall_prog_func_proto()
6433 struct static_key *key = (struct static_key *)table->data; in bpf_stats_handler()
6439 .mode = table->mode, in bpf_stats_handler()
6445 return -EPERM; in bpf_stats_handler()
6468 int ret, unpriv_enable = *(int *)table->data; in bpf_unpriv_handler()
6473 return -EPERM; in bpf_unpriv_handler()
6479 return -EPERM; in bpf_unpriv_handler()
6480 *(int *)table->data = unpriv_enable; in bpf_unpriv_handler()