1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf-cgroup.h> 6 #include <linux/bpf_trace.h> 7 #include <linux/bpf_lirc.h> 8 #include <linux/bpf_verifier.h> 9 #include <linux/bsearch.h> 10 #include <linux/btf.h> 11 #include <linux/syscalls.h> 12 #include <linux/slab.h> 13 #include <linux/sched/signal.h> 14 #include <linux/vmalloc.h> 15 #include <linux/mmzone.h> 16 #include <linux/anon_inodes.h> 17 #include <linux/fdtable.h> 18 #include <linux/file.h> 19 #include <linux/fs.h> 20 #include <linux/license.h> 21 #include <linux/filter.h> 22 #include <linux/kernel.h> 23 #include <linux/idr.h> 24 #include <linux/cred.h> 25 #include <linux/timekeeping.h> 26 #include <linux/ctype.h> 27 #include <linux/nospec.h> 28 #include <linux/audit.h> 29 #include <uapi/linux/btf.h> 30 #include <linux/pgtable.h> 31 #include <linux/bpf_lsm.h> 32 #include <linux/poll.h> 33 #include <linux/sort.h> 34 #include <linux/bpf-netns.h> 35 #include <linux/rcupdate_trace.h> 36 #include <linux/memcontrol.h> 37 #include <linux/trace_events.h> 38 39 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 40 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 41 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 42 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 43 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 44 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 45 IS_FD_HASH(map)) 46 47 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 48 49 DEFINE_PER_CPU(int, bpf_prog_active); 50 static DEFINE_IDR(prog_idr); 51 static DEFINE_SPINLOCK(prog_idr_lock); 52 static DEFINE_IDR(map_idr); 53 static DEFINE_SPINLOCK(map_idr_lock); 54 static DEFINE_IDR(link_idr); 55 static DEFINE_SPINLOCK(link_idr_lock); 56 57 int sysctl_unprivileged_bpf_disabled __read_mostly = 58 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0; 59 60 static const struct bpf_map_ops * const bpf_map_types[] = { 61 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 62 #define BPF_MAP_TYPE(_id, _ops) \ 63 [_id] = &_ops, 64 #define BPF_LINK_TYPE(_id, _name) 65 #include <linux/bpf_types.h> 66 #undef BPF_PROG_TYPE 67 #undef BPF_MAP_TYPE 68 #undef BPF_LINK_TYPE 69 }; 70 71 /* 72 * If we're handed a bigger struct than we know of, ensure all the unknown bits 73 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 74 * we don't know about yet. 75 * 76 * There is a ToCToU between this function call and the following 77 * copy_from_user() call. However, this is not a concern since this function is 78 * meant to be a future-proofing of bits. 79 */ 80 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, 81 size_t expected_size, 82 size_t actual_size) 83 { 84 int res; 85 86 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 87 return -E2BIG; 88 89 if (actual_size <= expected_size) 90 return 0; 91 92 if (uaddr.is_kernel) 93 res = memchr_inv(uaddr.kernel + expected_size, 0, 94 actual_size - expected_size) == NULL; 95 else 96 res = check_zeroed_user(uaddr.user + expected_size, 97 actual_size - expected_size); 98 if (res < 0) 99 return res; 100 return res ? 0 : -E2BIG; 101 } 102 103 const struct bpf_map_ops bpf_map_offload_ops = { 104 .map_meta_equal = bpf_map_meta_equal, 105 .map_alloc = bpf_map_offload_map_alloc, 106 .map_free = bpf_map_offload_map_free, 107 .map_check_btf = map_check_no_btf, 108 }; 109 110 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) 111 { 112 const struct bpf_map_ops *ops; 113 u32 type = attr->map_type; 114 struct bpf_map *map; 115 int err; 116 117 if (type >= ARRAY_SIZE(bpf_map_types)) 118 return ERR_PTR(-EINVAL); 119 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types)); 120 ops = bpf_map_types[type]; 121 if (!ops) 122 return ERR_PTR(-EINVAL); 123 124 if (ops->map_alloc_check) { 125 err = ops->map_alloc_check(attr); 126 if (err) 127 return ERR_PTR(err); 128 } 129 if (attr->map_ifindex) 130 ops = &bpf_map_offload_ops; 131 map = ops->map_alloc(attr); 132 if (IS_ERR(map)) 133 return map; 134 map->ops = ops; 135 map->map_type = type; 136 return map; 137 } 138 139 static void bpf_map_write_active_inc(struct bpf_map *map) 140 { 141 atomic64_inc(&map->writecnt); 142 } 143 144 static void bpf_map_write_active_dec(struct bpf_map *map) 145 { 146 atomic64_dec(&map->writecnt); 147 } 148 149 bool bpf_map_write_active(const struct bpf_map *map) 150 { 151 return atomic64_read(&map->writecnt) != 0; 152 } 153 154 static u32 bpf_map_value_size(const struct bpf_map *map) 155 { 156 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 157 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 158 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 159 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 160 return round_up(map->value_size, 8) * num_possible_cpus(); 161 else if (IS_FD_MAP(map)) 162 return sizeof(u32); 163 else 164 return map->value_size; 165 } 166 167 static void maybe_wait_bpf_programs(struct bpf_map *map) 168 { 169 /* Wait for any running BPF programs to complete so that 170 * userspace, when we return to it, knows that all programs 171 * that could be running use the new map value. 172 */ 173 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 174 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 175 synchronize_rcu(); 176 } 177 178 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, 179 void *value, __u64 flags) 180 { 181 int err; 182 183 /* Need to create a kthread, thus must support schedule */ 184 if (bpf_map_is_dev_bound(map)) { 185 return bpf_map_offload_update_elem(map, key, value, flags); 186 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 187 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 188 return map->ops->map_update_elem(map, key, value, flags); 189 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || 190 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 191 return sock_map_update_elem_sys(map, key, value, flags); 192 } else if (IS_FD_PROG_ARRAY(map)) { 193 return bpf_fd_array_map_update_elem(map, f.file, key, value, 194 flags); 195 } 196 197 bpf_disable_instrumentation(); 198 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 199 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 200 err = bpf_percpu_hash_update(map, key, value, flags); 201 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 202 err = bpf_percpu_array_update(map, key, value, flags); 203 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 204 err = bpf_percpu_cgroup_storage_update(map, key, value, 205 flags); 206 } else if (IS_FD_ARRAY(map)) { 207 rcu_read_lock(); 208 err = bpf_fd_array_map_update_elem(map, f.file, key, value, 209 flags); 210 rcu_read_unlock(); 211 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 212 rcu_read_lock(); 213 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, 214 flags); 215 rcu_read_unlock(); 216 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 217 /* rcu_read_lock() is not needed */ 218 err = bpf_fd_reuseport_array_update_elem(map, key, value, 219 flags); 220 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 221 map->map_type == BPF_MAP_TYPE_STACK || 222 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 223 err = map->ops->map_push_elem(map, value, flags); 224 } else { 225 rcu_read_lock(); 226 err = map->ops->map_update_elem(map, key, value, flags); 227 rcu_read_unlock(); 228 } 229 bpf_enable_instrumentation(); 230 maybe_wait_bpf_programs(map); 231 232 return err; 233 } 234 235 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 236 __u64 flags) 237 { 238 void *ptr; 239 int err; 240 241 if (bpf_map_is_dev_bound(map)) 242 return bpf_map_offload_lookup_elem(map, key, value); 243 244 bpf_disable_instrumentation(); 245 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 246 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 247 err = bpf_percpu_hash_copy(map, key, value); 248 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 249 err = bpf_percpu_array_copy(map, key, value); 250 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 251 err = bpf_percpu_cgroup_storage_copy(map, key, value); 252 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 253 err = bpf_stackmap_copy(map, key, value); 254 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 255 err = bpf_fd_array_map_lookup_elem(map, key, value); 256 } else if (IS_FD_HASH(map)) { 257 err = bpf_fd_htab_map_lookup_elem(map, key, value); 258 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 259 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 260 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 261 map->map_type == BPF_MAP_TYPE_STACK || 262 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 263 err = map->ops->map_peek_elem(map, value); 264 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 265 /* struct_ops map requires directly updating "value" */ 266 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 267 } else { 268 rcu_read_lock(); 269 if (map->ops->map_lookup_elem_sys_only) 270 ptr = map->ops->map_lookup_elem_sys_only(map, key); 271 else 272 ptr = map->ops->map_lookup_elem(map, key); 273 if (IS_ERR(ptr)) { 274 err = PTR_ERR(ptr); 275 } else if (!ptr) { 276 err = -ENOENT; 277 } else { 278 err = 0; 279 if (flags & BPF_F_LOCK) 280 /* lock 'ptr' and copy everything but lock */ 281 copy_map_value_locked(map, value, ptr, true); 282 else 283 copy_map_value(map, value, ptr); 284 /* mask lock and timer, since value wasn't zero inited */ 285 check_and_init_map_value(map, value); 286 } 287 rcu_read_unlock(); 288 } 289 290 bpf_enable_instrumentation(); 291 maybe_wait_bpf_programs(map); 292 293 return err; 294 } 295 296 /* Please, do not use this function outside from the map creation path 297 * (e.g. in map update path) without taking care of setting the active 298 * memory cgroup (see at bpf_map_kmalloc_node() for example). 299 */ 300 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 301 { 302 /* We really just want to fail instead of triggering OOM killer 303 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 304 * which is used for lower order allocation requests. 305 * 306 * It has been observed that higher order allocation requests done by 307 * vmalloc with __GFP_NORETRY being set might fail due to not trying 308 * to reclaim memory from the page cache, thus we set 309 * __GFP_RETRY_MAYFAIL to avoid such situations. 310 */ 311 312 const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT; 313 unsigned int flags = 0; 314 unsigned long align = 1; 315 void *area; 316 317 if (size >= SIZE_MAX) 318 return NULL; 319 320 /* kmalloc()'ed memory can't be mmap()'ed */ 321 if (mmapable) { 322 BUG_ON(!PAGE_ALIGNED(size)); 323 align = SHMLBA; 324 flags = VM_USERMAP; 325 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 326 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, 327 numa_node); 328 if (area != NULL) 329 return area; 330 } 331 332 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 333 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, 334 flags, numa_node, __builtin_return_address(0)); 335 } 336 337 void *bpf_map_area_alloc(u64 size, int numa_node) 338 { 339 return __bpf_map_area_alloc(size, numa_node, false); 340 } 341 342 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 343 { 344 return __bpf_map_area_alloc(size, numa_node, true); 345 } 346 347 void bpf_map_area_free(void *area) 348 { 349 kvfree(area); 350 } 351 352 static u32 bpf_map_flags_retain_permanent(u32 flags) 353 { 354 /* Some map creation flags are not tied to the map object but 355 * rather to the map fd instead, so they have no meaning upon 356 * map object inspection since multiple file descriptors with 357 * different (access) properties can exist here. Thus, given 358 * this has zero meaning for the map itself, lets clear these 359 * from here. 360 */ 361 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 362 } 363 364 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 365 { 366 map->map_type = attr->map_type; 367 map->key_size = attr->key_size; 368 map->value_size = attr->value_size; 369 map->max_entries = attr->max_entries; 370 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 371 map->numa_node = bpf_map_attr_numa_node(attr); 372 map->map_extra = attr->map_extra; 373 } 374 375 static int bpf_map_alloc_id(struct bpf_map *map) 376 { 377 int id; 378 379 idr_preload(GFP_KERNEL); 380 spin_lock_bh(&map_idr_lock); 381 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 382 if (id > 0) 383 map->id = id; 384 spin_unlock_bh(&map_idr_lock); 385 idr_preload_end(); 386 387 if (WARN_ON_ONCE(!id)) 388 return -ENOSPC; 389 390 return id > 0 ? 0 : id; 391 } 392 393 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) 394 { 395 unsigned long flags; 396 397 /* Offloaded maps are removed from the IDR store when their device 398 * disappears - even if someone holds an fd to them they are unusable, 399 * the memory is gone, all ops will fail; they are simply waiting for 400 * refcnt to drop to be freed. 401 */ 402 if (!map->id) 403 return; 404 405 if (do_idr_lock) 406 spin_lock_irqsave(&map_idr_lock, flags); 407 else 408 __acquire(&map_idr_lock); 409 410 idr_remove(&map_idr, map->id); 411 map->id = 0; 412 413 if (do_idr_lock) 414 spin_unlock_irqrestore(&map_idr_lock, flags); 415 else 416 __release(&map_idr_lock); 417 } 418 419 #ifdef CONFIG_MEMCG_KMEM 420 static void bpf_map_save_memcg(struct bpf_map *map) 421 { 422 /* Currently if a map is created by a process belonging to the root 423 * memory cgroup, get_obj_cgroup_from_current() will return NULL. 424 * So we have to check map->objcg for being NULL each time it's 425 * being used. 426 */ 427 map->objcg = get_obj_cgroup_from_current(); 428 } 429 430 static void bpf_map_release_memcg(struct bpf_map *map) 431 { 432 if (map->objcg) 433 obj_cgroup_put(map->objcg); 434 } 435 436 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map) 437 { 438 if (map->objcg) 439 return get_mem_cgroup_from_objcg(map->objcg); 440 441 return root_mem_cgroup; 442 } 443 444 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 445 int node) 446 { 447 struct mem_cgroup *memcg, *old_memcg; 448 void *ptr; 449 450 memcg = bpf_map_get_memcg(map); 451 old_memcg = set_active_memcg(memcg); 452 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node); 453 set_active_memcg(old_memcg); 454 mem_cgroup_put(memcg); 455 456 return ptr; 457 } 458 459 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 460 { 461 struct mem_cgroup *memcg, *old_memcg; 462 void *ptr; 463 464 memcg = bpf_map_get_memcg(map); 465 old_memcg = set_active_memcg(memcg); 466 ptr = kzalloc(size, flags | __GFP_ACCOUNT); 467 set_active_memcg(old_memcg); 468 mem_cgroup_put(memcg); 469 470 return ptr; 471 } 472 473 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 474 size_t align, gfp_t flags) 475 { 476 struct mem_cgroup *memcg, *old_memcg; 477 void __percpu *ptr; 478 479 memcg = bpf_map_get_memcg(map); 480 old_memcg = set_active_memcg(memcg); 481 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT); 482 set_active_memcg(old_memcg); 483 mem_cgroup_put(memcg); 484 485 return ptr; 486 } 487 488 #else 489 static void bpf_map_save_memcg(struct bpf_map *map) 490 { 491 } 492 493 static void bpf_map_release_memcg(struct bpf_map *map) 494 { 495 } 496 #endif 497 498 static int btf_field_cmp(const void *a, const void *b) 499 { 500 const struct btf_field *f1 = a, *f2 = b; 501 502 if (f1->offset < f2->offset) 503 return -1; 504 else if (f1->offset > f2->offset) 505 return 1; 506 return 0; 507 } 508 509 struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset, 510 enum btf_field_type type) 511 { 512 struct btf_field *field; 513 514 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & type)) 515 return NULL; 516 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp); 517 if (!field || !(field->type & type)) 518 return NULL; 519 return field; 520 } 521 522 void btf_record_free(struct btf_record *rec) 523 { 524 int i; 525 526 if (IS_ERR_OR_NULL(rec)) 527 return; 528 for (i = 0; i < rec->cnt; i++) { 529 switch (rec->fields[i].type) { 530 case BPF_SPIN_LOCK: 531 case BPF_TIMER: 532 break; 533 case BPF_KPTR_UNREF: 534 case BPF_KPTR_REF: 535 if (rec->fields[i].kptr.module) 536 module_put(rec->fields[i].kptr.module); 537 btf_put(rec->fields[i].kptr.btf); 538 break; 539 default: 540 WARN_ON_ONCE(1); 541 continue; 542 } 543 } 544 kfree(rec); 545 } 546 547 void bpf_map_free_record(struct bpf_map *map) 548 { 549 btf_record_free(map->record); 550 map->record = NULL; 551 } 552 553 struct btf_record *btf_record_dup(const struct btf_record *rec) 554 { 555 const struct btf_field *fields; 556 struct btf_record *new_rec; 557 int ret, size, i; 558 559 if (IS_ERR_OR_NULL(rec)) 560 return NULL; 561 size = offsetof(struct btf_record, fields[rec->cnt]); 562 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN); 563 if (!new_rec) 564 return ERR_PTR(-ENOMEM); 565 /* Do a deep copy of the btf_record */ 566 fields = rec->fields; 567 new_rec->cnt = 0; 568 for (i = 0; i < rec->cnt; i++) { 569 switch (fields[i].type) { 570 case BPF_SPIN_LOCK: 571 case BPF_TIMER: 572 break; 573 case BPF_KPTR_UNREF: 574 case BPF_KPTR_REF: 575 btf_get(fields[i].kptr.btf); 576 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) { 577 ret = -ENXIO; 578 goto free; 579 } 580 break; 581 default: 582 ret = -EFAULT; 583 WARN_ON_ONCE(1); 584 goto free; 585 } 586 new_rec->cnt++; 587 } 588 return new_rec; 589 free: 590 btf_record_free(new_rec); 591 return ERR_PTR(ret); 592 } 593 594 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b) 595 { 596 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b); 597 int size; 598 599 if (!a_has_fields && !b_has_fields) 600 return true; 601 if (a_has_fields != b_has_fields) 602 return false; 603 if (rec_a->cnt != rec_b->cnt) 604 return false; 605 size = offsetof(struct btf_record, fields[rec_a->cnt]); 606 return !memcmp(rec_a, rec_b, size); 607 } 608 609 void bpf_obj_free_timer(const struct btf_record *rec, void *obj) 610 { 611 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER))) 612 return; 613 bpf_timer_cancel_and_free(obj + rec->timer_off); 614 } 615 616 void bpf_obj_free_fields(const struct btf_record *rec, void *obj) 617 { 618 const struct btf_field *fields; 619 int i; 620 621 if (IS_ERR_OR_NULL(rec)) 622 return; 623 fields = rec->fields; 624 for (i = 0; i < rec->cnt; i++) { 625 const struct btf_field *field = &fields[i]; 626 void *field_ptr = obj + field->offset; 627 628 switch (fields[i].type) { 629 case BPF_SPIN_LOCK: 630 break; 631 case BPF_TIMER: 632 bpf_timer_cancel_and_free(field_ptr); 633 break; 634 case BPF_KPTR_UNREF: 635 WRITE_ONCE(*(u64 *)field_ptr, 0); 636 break; 637 case BPF_KPTR_REF: 638 field->kptr.dtor((void *)xchg((unsigned long *)field_ptr, 0)); 639 break; 640 default: 641 WARN_ON_ONCE(1); 642 continue; 643 } 644 } 645 } 646 647 /* called from workqueue */ 648 static void bpf_map_free_deferred(struct work_struct *work) 649 { 650 struct bpf_map *map = container_of(work, struct bpf_map, work); 651 652 security_bpf_map_free(map); 653 kfree(map->field_offs); 654 bpf_map_release_memcg(map); 655 /* implementation dependent freeing, map_free callback also does 656 * bpf_map_free_record, if needed. 657 */ 658 map->ops->map_free(map); 659 } 660 661 static void bpf_map_put_uref(struct bpf_map *map) 662 { 663 if (atomic64_dec_and_test(&map->usercnt)) { 664 if (map->ops->map_release_uref) 665 map->ops->map_release_uref(map); 666 } 667 } 668 669 /* decrement map refcnt and schedule it for freeing via workqueue 670 * (unrelying map implementation ops->map_free() might sleep) 671 */ 672 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) 673 { 674 if (atomic64_dec_and_test(&map->refcnt)) { 675 /* bpf_map_free_id() must be called first */ 676 bpf_map_free_id(map, do_idr_lock); 677 btf_put(map->btf); 678 INIT_WORK(&map->work, bpf_map_free_deferred); 679 /* Avoid spawning kworkers, since they all might contend 680 * for the same mutex like slab_mutex. 681 */ 682 queue_work(system_unbound_wq, &map->work); 683 } 684 } 685 686 void bpf_map_put(struct bpf_map *map) 687 { 688 __bpf_map_put(map, true); 689 } 690 EXPORT_SYMBOL_GPL(bpf_map_put); 691 692 void bpf_map_put_with_uref(struct bpf_map *map) 693 { 694 bpf_map_put_uref(map); 695 bpf_map_put(map); 696 } 697 698 static int bpf_map_release(struct inode *inode, struct file *filp) 699 { 700 struct bpf_map *map = filp->private_data; 701 702 if (map->ops->map_release) 703 map->ops->map_release(map, filp); 704 705 bpf_map_put_with_uref(map); 706 return 0; 707 } 708 709 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 710 { 711 fmode_t mode = f.file->f_mode; 712 713 /* Our file permissions may have been overridden by global 714 * map permissions facing syscall side. 715 */ 716 if (READ_ONCE(map->frozen)) 717 mode &= ~FMODE_CAN_WRITE; 718 return mode; 719 } 720 721 #ifdef CONFIG_PROC_FS 722 /* Provides an approximation of the map's memory footprint. 723 * Used only to provide a backward compatibility and display 724 * a reasonable "memlock" info. 725 */ 726 static unsigned long bpf_map_memory_footprint(const struct bpf_map *map) 727 { 728 unsigned long size; 729 730 size = round_up(map->key_size + bpf_map_value_size(map), 8); 731 732 return round_up(map->max_entries * size, PAGE_SIZE); 733 } 734 735 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 736 { 737 struct bpf_map *map = filp->private_data; 738 u32 type = 0, jited = 0; 739 740 if (map_type_contains_progs(map)) { 741 spin_lock(&map->owner.lock); 742 type = map->owner.type; 743 jited = map->owner.jited; 744 spin_unlock(&map->owner.lock); 745 } 746 747 seq_printf(m, 748 "map_type:\t%u\n" 749 "key_size:\t%u\n" 750 "value_size:\t%u\n" 751 "max_entries:\t%u\n" 752 "map_flags:\t%#x\n" 753 "map_extra:\t%#llx\n" 754 "memlock:\t%lu\n" 755 "map_id:\t%u\n" 756 "frozen:\t%u\n", 757 map->map_type, 758 map->key_size, 759 map->value_size, 760 map->max_entries, 761 map->map_flags, 762 (unsigned long long)map->map_extra, 763 bpf_map_memory_footprint(map), 764 map->id, 765 READ_ONCE(map->frozen)); 766 if (type) { 767 seq_printf(m, "owner_prog_type:\t%u\n", type); 768 seq_printf(m, "owner_jited:\t%u\n", jited); 769 } 770 } 771 #endif 772 773 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 774 loff_t *ppos) 775 { 776 /* We need this handler such that alloc_file() enables 777 * f_mode with FMODE_CAN_READ. 778 */ 779 return -EINVAL; 780 } 781 782 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 783 size_t siz, loff_t *ppos) 784 { 785 /* We need this handler such that alloc_file() enables 786 * f_mode with FMODE_CAN_WRITE. 787 */ 788 return -EINVAL; 789 } 790 791 /* called for any extra memory-mapped regions (except initial) */ 792 static void bpf_map_mmap_open(struct vm_area_struct *vma) 793 { 794 struct bpf_map *map = vma->vm_file->private_data; 795 796 if (vma->vm_flags & VM_MAYWRITE) 797 bpf_map_write_active_inc(map); 798 } 799 800 /* called for all unmapped memory region (including initial) */ 801 static void bpf_map_mmap_close(struct vm_area_struct *vma) 802 { 803 struct bpf_map *map = vma->vm_file->private_data; 804 805 if (vma->vm_flags & VM_MAYWRITE) 806 bpf_map_write_active_dec(map); 807 } 808 809 static const struct vm_operations_struct bpf_map_default_vmops = { 810 .open = bpf_map_mmap_open, 811 .close = bpf_map_mmap_close, 812 }; 813 814 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 815 { 816 struct bpf_map *map = filp->private_data; 817 int err; 818 819 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) 820 return -ENOTSUPP; 821 822 if (!(vma->vm_flags & VM_SHARED)) 823 return -EINVAL; 824 825 mutex_lock(&map->freeze_mutex); 826 827 if (vma->vm_flags & VM_WRITE) { 828 if (map->frozen) { 829 err = -EPERM; 830 goto out; 831 } 832 /* map is meant to be read-only, so do not allow mapping as 833 * writable, because it's possible to leak a writable page 834 * reference and allows user-space to still modify it after 835 * freezing, while verifier will assume contents do not change 836 */ 837 if (map->map_flags & BPF_F_RDONLY_PROG) { 838 err = -EACCES; 839 goto out; 840 } 841 } 842 843 /* set default open/close callbacks */ 844 vma->vm_ops = &bpf_map_default_vmops; 845 vma->vm_private_data = map; 846 vma->vm_flags &= ~VM_MAYEXEC; 847 if (!(vma->vm_flags & VM_WRITE)) 848 /* disallow re-mapping with PROT_WRITE */ 849 vma->vm_flags &= ~VM_MAYWRITE; 850 851 err = map->ops->map_mmap(map, vma); 852 if (err) 853 goto out; 854 855 if (vma->vm_flags & VM_MAYWRITE) 856 bpf_map_write_active_inc(map); 857 out: 858 mutex_unlock(&map->freeze_mutex); 859 return err; 860 } 861 862 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts) 863 { 864 struct bpf_map *map = filp->private_data; 865 866 if (map->ops->map_poll) 867 return map->ops->map_poll(map, filp, pts); 868 869 return EPOLLERR; 870 } 871 872 const struct file_operations bpf_map_fops = { 873 #ifdef CONFIG_PROC_FS 874 .show_fdinfo = bpf_map_show_fdinfo, 875 #endif 876 .release = bpf_map_release, 877 .read = bpf_dummy_read, 878 .write = bpf_dummy_write, 879 .mmap = bpf_map_mmap, 880 .poll = bpf_map_poll, 881 }; 882 883 int bpf_map_new_fd(struct bpf_map *map, int flags) 884 { 885 int ret; 886 887 ret = security_bpf_map(map, OPEN_FMODE(flags)); 888 if (ret < 0) 889 return ret; 890 891 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 892 flags | O_CLOEXEC); 893 } 894 895 int bpf_get_file_flag(int flags) 896 { 897 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 898 return -EINVAL; 899 if (flags & BPF_F_RDONLY) 900 return O_RDONLY; 901 if (flags & BPF_F_WRONLY) 902 return O_WRONLY; 903 return O_RDWR; 904 } 905 906 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 907 #define CHECK_ATTR(CMD) \ 908 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 909 sizeof(attr->CMD##_LAST_FIELD), 0, \ 910 sizeof(*attr) - \ 911 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 912 sizeof(attr->CMD##_LAST_FIELD)) != NULL 913 914 /* dst and src must have at least "size" number of bytes. 915 * Return strlen on success and < 0 on error. 916 */ 917 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 918 { 919 const char *end = src + size; 920 const char *orig_src = src; 921 922 memset(dst, 0, size); 923 /* Copy all isalnum(), '_' and '.' chars. */ 924 while (src < end && *src) { 925 if (!isalnum(*src) && 926 *src != '_' && *src != '.') 927 return -EINVAL; 928 *dst++ = *src++; 929 } 930 931 /* No '\0' found in "size" number of bytes */ 932 if (src == end) 933 return -EINVAL; 934 935 return src - orig_src; 936 } 937 938 int map_check_no_btf(const struct bpf_map *map, 939 const struct btf *btf, 940 const struct btf_type *key_type, 941 const struct btf_type *value_type) 942 { 943 return -ENOTSUPP; 944 } 945 946 static int map_check_btf(struct bpf_map *map, const struct btf *btf, 947 u32 btf_key_id, u32 btf_value_id) 948 { 949 const struct btf_type *key_type, *value_type; 950 u32 key_size, value_size; 951 int ret = 0; 952 953 /* Some maps allow key to be unspecified. */ 954 if (btf_key_id) { 955 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 956 if (!key_type || key_size != map->key_size) 957 return -EINVAL; 958 } else { 959 key_type = btf_type_by_id(btf, 0); 960 if (!map->ops->map_check_btf) 961 return -EINVAL; 962 } 963 964 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 965 if (!value_type || value_size != map->value_size) 966 return -EINVAL; 967 968 map->record = btf_parse_fields(btf, value_type, BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR, 969 map->value_size); 970 if (!IS_ERR_OR_NULL(map->record)) { 971 int i; 972 973 if (!bpf_capable()) { 974 ret = -EPERM; 975 goto free_map_tab; 976 } 977 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { 978 ret = -EACCES; 979 goto free_map_tab; 980 } 981 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) { 982 switch (map->record->field_mask & (1 << i)) { 983 case 0: 984 continue; 985 case BPF_SPIN_LOCK: 986 if (map->map_type != BPF_MAP_TYPE_HASH && 987 map->map_type != BPF_MAP_TYPE_ARRAY && 988 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 989 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 990 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 991 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 992 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 993 ret = -EOPNOTSUPP; 994 goto free_map_tab; 995 } 996 break; 997 case BPF_TIMER: 998 if (map->map_type != BPF_MAP_TYPE_HASH && 999 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1000 map->map_type != BPF_MAP_TYPE_ARRAY) { 1001 return -EOPNOTSUPP; 1002 goto free_map_tab; 1003 } 1004 break; 1005 case BPF_KPTR_UNREF: 1006 case BPF_KPTR_REF: 1007 if (map->map_type != BPF_MAP_TYPE_HASH && 1008 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1009 map->map_type != BPF_MAP_TYPE_ARRAY && 1010 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY) { 1011 ret = -EOPNOTSUPP; 1012 goto free_map_tab; 1013 } 1014 break; 1015 default: 1016 /* Fail if map_type checks are missing for a field type */ 1017 ret = -EOPNOTSUPP; 1018 goto free_map_tab; 1019 } 1020 } 1021 } 1022 1023 if (map->ops->map_check_btf) { 1024 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 1025 if (ret < 0) 1026 goto free_map_tab; 1027 } 1028 1029 return ret; 1030 free_map_tab: 1031 bpf_map_free_record(map); 1032 return ret; 1033 } 1034 1035 #define BPF_MAP_CREATE_LAST_FIELD map_extra 1036 /* called via syscall */ 1037 static int map_create(union bpf_attr *attr) 1038 { 1039 int numa_node = bpf_map_attr_numa_node(attr); 1040 struct btf_field_offs *foffs; 1041 struct bpf_map *map; 1042 int f_flags; 1043 int err; 1044 1045 err = CHECK_ATTR(BPF_MAP_CREATE); 1046 if (err) 1047 return -EINVAL; 1048 1049 if (attr->btf_vmlinux_value_type_id) { 1050 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 1051 attr->btf_key_type_id || attr->btf_value_type_id) 1052 return -EINVAL; 1053 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 1054 return -EINVAL; 1055 } 1056 1057 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && 1058 attr->map_extra != 0) 1059 return -EINVAL; 1060 1061 f_flags = bpf_get_file_flag(attr->map_flags); 1062 if (f_flags < 0) 1063 return f_flags; 1064 1065 if (numa_node != NUMA_NO_NODE && 1066 ((unsigned int)numa_node >= nr_node_ids || 1067 !node_online(numa_node))) 1068 return -EINVAL; 1069 1070 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 1071 map = find_and_alloc_map(attr); 1072 if (IS_ERR(map)) 1073 return PTR_ERR(map); 1074 1075 err = bpf_obj_name_cpy(map->name, attr->map_name, 1076 sizeof(attr->map_name)); 1077 if (err < 0) 1078 goto free_map; 1079 1080 atomic64_set(&map->refcnt, 1); 1081 atomic64_set(&map->usercnt, 1); 1082 mutex_init(&map->freeze_mutex); 1083 spin_lock_init(&map->owner.lock); 1084 1085 if (attr->btf_key_type_id || attr->btf_value_type_id || 1086 /* Even the map's value is a kernel's struct, 1087 * the bpf_prog.o must have BTF to begin with 1088 * to figure out the corresponding kernel's 1089 * counter part. Thus, attr->btf_fd has 1090 * to be valid also. 1091 */ 1092 attr->btf_vmlinux_value_type_id) { 1093 struct btf *btf; 1094 1095 btf = btf_get_by_fd(attr->btf_fd); 1096 if (IS_ERR(btf)) { 1097 err = PTR_ERR(btf); 1098 goto free_map; 1099 } 1100 if (btf_is_kernel(btf)) { 1101 btf_put(btf); 1102 err = -EACCES; 1103 goto free_map; 1104 } 1105 map->btf = btf; 1106 1107 if (attr->btf_value_type_id) { 1108 err = map_check_btf(map, btf, attr->btf_key_type_id, 1109 attr->btf_value_type_id); 1110 if (err) 1111 goto free_map; 1112 } 1113 1114 map->btf_key_type_id = attr->btf_key_type_id; 1115 map->btf_value_type_id = attr->btf_value_type_id; 1116 map->btf_vmlinux_value_type_id = 1117 attr->btf_vmlinux_value_type_id; 1118 } 1119 1120 1121 foffs = btf_parse_field_offs(map->record); 1122 if (IS_ERR(foffs)) { 1123 err = PTR_ERR(foffs); 1124 goto free_map; 1125 } 1126 map->field_offs = foffs; 1127 1128 err = security_bpf_map_alloc(map); 1129 if (err) 1130 goto free_map_field_offs; 1131 1132 err = bpf_map_alloc_id(map); 1133 if (err) 1134 goto free_map_sec; 1135 1136 bpf_map_save_memcg(map); 1137 1138 err = bpf_map_new_fd(map, f_flags); 1139 if (err < 0) { 1140 /* failed to allocate fd. 1141 * bpf_map_put_with_uref() is needed because the above 1142 * bpf_map_alloc_id() has published the map 1143 * to the userspace and the userspace may 1144 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 1145 */ 1146 bpf_map_put_with_uref(map); 1147 return err; 1148 } 1149 1150 return err; 1151 1152 free_map_sec: 1153 security_bpf_map_free(map); 1154 free_map_field_offs: 1155 kfree(map->field_offs); 1156 free_map: 1157 btf_put(map->btf); 1158 map->ops->map_free(map); 1159 return err; 1160 } 1161 1162 /* if error is returned, fd is released. 1163 * On success caller should complete fd access with matching fdput() 1164 */ 1165 struct bpf_map *__bpf_map_get(struct fd f) 1166 { 1167 if (!f.file) 1168 return ERR_PTR(-EBADF); 1169 if (f.file->f_op != &bpf_map_fops) { 1170 fdput(f); 1171 return ERR_PTR(-EINVAL); 1172 } 1173 1174 return f.file->private_data; 1175 } 1176 1177 void bpf_map_inc(struct bpf_map *map) 1178 { 1179 atomic64_inc(&map->refcnt); 1180 } 1181 EXPORT_SYMBOL_GPL(bpf_map_inc); 1182 1183 void bpf_map_inc_with_uref(struct bpf_map *map) 1184 { 1185 atomic64_inc(&map->refcnt); 1186 atomic64_inc(&map->usercnt); 1187 } 1188 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 1189 1190 struct bpf_map *bpf_map_get(u32 ufd) 1191 { 1192 struct fd f = fdget(ufd); 1193 struct bpf_map *map; 1194 1195 map = __bpf_map_get(f); 1196 if (IS_ERR(map)) 1197 return map; 1198 1199 bpf_map_inc(map); 1200 fdput(f); 1201 1202 return map; 1203 } 1204 EXPORT_SYMBOL(bpf_map_get); 1205 1206 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 1207 { 1208 struct fd f = fdget(ufd); 1209 struct bpf_map *map; 1210 1211 map = __bpf_map_get(f); 1212 if (IS_ERR(map)) 1213 return map; 1214 1215 bpf_map_inc_with_uref(map); 1216 fdput(f); 1217 1218 return map; 1219 } 1220 1221 /* map_idr_lock should have been held */ 1222 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 1223 { 1224 int refold; 1225 1226 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 1227 if (!refold) 1228 return ERR_PTR(-ENOENT); 1229 if (uref) 1230 atomic64_inc(&map->usercnt); 1231 1232 return map; 1233 } 1234 1235 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 1236 { 1237 spin_lock_bh(&map_idr_lock); 1238 map = __bpf_map_inc_not_zero(map, false); 1239 spin_unlock_bh(&map_idr_lock); 1240 1241 return map; 1242 } 1243 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 1244 1245 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 1246 { 1247 return -ENOTSUPP; 1248 } 1249 1250 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 1251 { 1252 if (key_size) 1253 return vmemdup_user(ukey, key_size); 1254 1255 if (ukey) 1256 return ERR_PTR(-EINVAL); 1257 1258 return NULL; 1259 } 1260 1261 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size) 1262 { 1263 if (key_size) 1264 return kvmemdup_bpfptr(ukey, key_size); 1265 1266 if (!bpfptr_is_null(ukey)) 1267 return ERR_PTR(-EINVAL); 1268 1269 return NULL; 1270 } 1271 1272 /* last field in 'union bpf_attr' used by this command */ 1273 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 1274 1275 static int map_lookup_elem(union bpf_attr *attr) 1276 { 1277 void __user *ukey = u64_to_user_ptr(attr->key); 1278 void __user *uvalue = u64_to_user_ptr(attr->value); 1279 int ufd = attr->map_fd; 1280 struct bpf_map *map; 1281 void *key, *value; 1282 u32 value_size; 1283 struct fd f; 1284 int err; 1285 1286 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 1287 return -EINVAL; 1288 1289 if (attr->flags & ~BPF_F_LOCK) 1290 return -EINVAL; 1291 1292 f = fdget(ufd); 1293 map = __bpf_map_get(f); 1294 if (IS_ERR(map)) 1295 return PTR_ERR(map); 1296 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1297 err = -EPERM; 1298 goto err_put; 1299 } 1300 1301 if ((attr->flags & BPF_F_LOCK) && 1302 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1303 err = -EINVAL; 1304 goto err_put; 1305 } 1306 1307 key = __bpf_copy_key(ukey, map->key_size); 1308 if (IS_ERR(key)) { 1309 err = PTR_ERR(key); 1310 goto err_put; 1311 } 1312 1313 value_size = bpf_map_value_size(map); 1314 1315 err = -ENOMEM; 1316 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1317 if (!value) 1318 goto free_key; 1319 1320 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 1321 if (copy_from_user(value, uvalue, value_size)) 1322 err = -EFAULT; 1323 else 1324 err = bpf_map_copy_value(map, key, value, attr->flags); 1325 goto free_value; 1326 } 1327 1328 err = bpf_map_copy_value(map, key, value, attr->flags); 1329 if (err) 1330 goto free_value; 1331 1332 err = -EFAULT; 1333 if (copy_to_user(uvalue, value, value_size) != 0) 1334 goto free_value; 1335 1336 err = 0; 1337 1338 free_value: 1339 kvfree(value); 1340 free_key: 1341 kvfree(key); 1342 err_put: 1343 fdput(f); 1344 return err; 1345 } 1346 1347 1348 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1349 1350 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) 1351 { 1352 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1353 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); 1354 int ufd = attr->map_fd; 1355 struct bpf_map *map; 1356 void *key, *value; 1357 u32 value_size; 1358 struct fd f; 1359 int err; 1360 1361 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1362 return -EINVAL; 1363 1364 f = fdget(ufd); 1365 map = __bpf_map_get(f); 1366 if (IS_ERR(map)) 1367 return PTR_ERR(map); 1368 bpf_map_write_active_inc(map); 1369 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1370 err = -EPERM; 1371 goto err_put; 1372 } 1373 1374 if ((attr->flags & BPF_F_LOCK) && 1375 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1376 err = -EINVAL; 1377 goto err_put; 1378 } 1379 1380 key = ___bpf_copy_key(ukey, map->key_size); 1381 if (IS_ERR(key)) { 1382 err = PTR_ERR(key); 1383 goto err_put; 1384 } 1385 1386 value_size = bpf_map_value_size(map); 1387 value = kvmemdup_bpfptr(uvalue, value_size); 1388 if (IS_ERR(value)) { 1389 err = PTR_ERR(value); 1390 goto free_key; 1391 } 1392 1393 err = bpf_map_update_value(map, f, key, value, attr->flags); 1394 1395 kvfree(value); 1396 free_key: 1397 kvfree(key); 1398 err_put: 1399 bpf_map_write_active_dec(map); 1400 fdput(f); 1401 return err; 1402 } 1403 1404 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1405 1406 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr) 1407 { 1408 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1409 int ufd = attr->map_fd; 1410 struct bpf_map *map; 1411 struct fd f; 1412 void *key; 1413 int err; 1414 1415 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1416 return -EINVAL; 1417 1418 f = fdget(ufd); 1419 map = __bpf_map_get(f); 1420 if (IS_ERR(map)) 1421 return PTR_ERR(map); 1422 bpf_map_write_active_inc(map); 1423 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1424 err = -EPERM; 1425 goto err_put; 1426 } 1427 1428 key = ___bpf_copy_key(ukey, map->key_size); 1429 if (IS_ERR(key)) { 1430 err = PTR_ERR(key); 1431 goto err_put; 1432 } 1433 1434 if (bpf_map_is_dev_bound(map)) { 1435 err = bpf_map_offload_delete_elem(map, key); 1436 goto out; 1437 } else if (IS_FD_PROG_ARRAY(map) || 1438 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1439 /* These maps require sleepable context */ 1440 err = map->ops->map_delete_elem(map, key); 1441 goto out; 1442 } 1443 1444 bpf_disable_instrumentation(); 1445 rcu_read_lock(); 1446 err = map->ops->map_delete_elem(map, key); 1447 rcu_read_unlock(); 1448 bpf_enable_instrumentation(); 1449 maybe_wait_bpf_programs(map); 1450 out: 1451 kvfree(key); 1452 err_put: 1453 bpf_map_write_active_dec(map); 1454 fdput(f); 1455 return err; 1456 } 1457 1458 /* last field in 'union bpf_attr' used by this command */ 1459 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1460 1461 static int map_get_next_key(union bpf_attr *attr) 1462 { 1463 void __user *ukey = u64_to_user_ptr(attr->key); 1464 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1465 int ufd = attr->map_fd; 1466 struct bpf_map *map; 1467 void *key, *next_key; 1468 struct fd f; 1469 int err; 1470 1471 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1472 return -EINVAL; 1473 1474 f = fdget(ufd); 1475 map = __bpf_map_get(f); 1476 if (IS_ERR(map)) 1477 return PTR_ERR(map); 1478 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1479 err = -EPERM; 1480 goto err_put; 1481 } 1482 1483 if (ukey) { 1484 key = __bpf_copy_key(ukey, map->key_size); 1485 if (IS_ERR(key)) { 1486 err = PTR_ERR(key); 1487 goto err_put; 1488 } 1489 } else { 1490 key = NULL; 1491 } 1492 1493 err = -ENOMEM; 1494 next_key = kvmalloc(map->key_size, GFP_USER); 1495 if (!next_key) 1496 goto free_key; 1497 1498 if (bpf_map_is_dev_bound(map)) { 1499 err = bpf_map_offload_get_next_key(map, key, next_key); 1500 goto out; 1501 } 1502 1503 rcu_read_lock(); 1504 err = map->ops->map_get_next_key(map, key, next_key); 1505 rcu_read_unlock(); 1506 out: 1507 if (err) 1508 goto free_next_key; 1509 1510 err = -EFAULT; 1511 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1512 goto free_next_key; 1513 1514 err = 0; 1515 1516 free_next_key: 1517 kvfree(next_key); 1518 free_key: 1519 kvfree(key); 1520 err_put: 1521 fdput(f); 1522 return err; 1523 } 1524 1525 int generic_map_delete_batch(struct bpf_map *map, 1526 const union bpf_attr *attr, 1527 union bpf_attr __user *uattr) 1528 { 1529 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1530 u32 cp, max_count; 1531 int err = 0; 1532 void *key; 1533 1534 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1535 return -EINVAL; 1536 1537 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1538 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1539 return -EINVAL; 1540 } 1541 1542 max_count = attr->batch.count; 1543 if (!max_count) 1544 return 0; 1545 1546 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1547 if (!key) 1548 return -ENOMEM; 1549 1550 for (cp = 0; cp < max_count; cp++) { 1551 err = -EFAULT; 1552 if (copy_from_user(key, keys + cp * map->key_size, 1553 map->key_size)) 1554 break; 1555 1556 if (bpf_map_is_dev_bound(map)) { 1557 err = bpf_map_offload_delete_elem(map, key); 1558 break; 1559 } 1560 1561 bpf_disable_instrumentation(); 1562 rcu_read_lock(); 1563 err = map->ops->map_delete_elem(map, key); 1564 rcu_read_unlock(); 1565 bpf_enable_instrumentation(); 1566 if (err) 1567 break; 1568 cond_resched(); 1569 } 1570 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1571 err = -EFAULT; 1572 1573 kvfree(key); 1574 1575 maybe_wait_bpf_programs(map); 1576 return err; 1577 } 1578 1579 int generic_map_update_batch(struct bpf_map *map, 1580 const union bpf_attr *attr, 1581 union bpf_attr __user *uattr) 1582 { 1583 void __user *values = u64_to_user_ptr(attr->batch.values); 1584 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1585 u32 value_size, cp, max_count; 1586 int ufd = attr->batch.map_fd; 1587 void *key, *value; 1588 struct fd f; 1589 int err = 0; 1590 1591 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1592 return -EINVAL; 1593 1594 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1595 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1596 return -EINVAL; 1597 } 1598 1599 value_size = bpf_map_value_size(map); 1600 1601 max_count = attr->batch.count; 1602 if (!max_count) 1603 return 0; 1604 1605 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1606 if (!key) 1607 return -ENOMEM; 1608 1609 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1610 if (!value) { 1611 kvfree(key); 1612 return -ENOMEM; 1613 } 1614 1615 f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */ 1616 for (cp = 0; cp < max_count; cp++) { 1617 err = -EFAULT; 1618 if (copy_from_user(key, keys + cp * map->key_size, 1619 map->key_size) || 1620 copy_from_user(value, values + cp * value_size, value_size)) 1621 break; 1622 1623 err = bpf_map_update_value(map, f, key, value, 1624 attr->batch.elem_flags); 1625 1626 if (err) 1627 break; 1628 cond_resched(); 1629 } 1630 1631 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1632 err = -EFAULT; 1633 1634 kvfree(value); 1635 kvfree(key); 1636 fdput(f); 1637 return err; 1638 } 1639 1640 #define MAP_LOOKUP_RETRIES 3 1641 1642 int generic_map_lookup_batch(struct bpf_map *map, 1643 const union bpf_attr *attr, 1644 union bpf_attr __user *uattr) 1645 { 1646 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 1647 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1648 void __user *values = u64_to_user_ptr(attr->batch.values); 1649 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1650 void *buf, *buf_prevkey, *prev_key, *key, *value; 1651 int err, retry = MAP_LOOKUP_RETRIES; 1652 u32 value_size, cp, max_count; 1653 1654 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1655 return -EINVAL; 1656 1657 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1658 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) 1659 return -EINVAL; 1660 1661 value_size = bpf_map_value_size(map); 1662 1663 max_count = attr->batch.count; 1664 if (!max_count) 1665 return 0; 1666 1667 if (put_user(0, &uattr->batch.count)) 1668 return -EFAULT; 1669 1670 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1671 if (!buf_prevkey) 1672 return -ENOMEM; 1673 1674 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 1675 if (!buf) { 1676 kvfree(buf_prevkey); 1677 return -ENOMEM; 1678 } 1679 1680 err = -EFAULT; 1681 prev_key = NULL; 1682 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 1683 goto free_buf; 1684 key = buf; 1685 value = key + map->key_size; 1686 if (ubatch) 1687 prev_key = buf_prevkey; 1688 1689 for (cp = 0; cp < max_count;) { 1690 rcu_read_lock(); 1691 err = map->ops->map_get_next_key(map, prev_key, key); 1692 rcu_read_unlock(); 1693 if (err) 1694 break; 1695 err = bpf_map_copy_value(map, key, value, 1696 attr->batch.elem_flags); 1697 1698 if (err == -ENOENT) { 1699 if (retry) { 1700 retry--; 1701 continue; 1702 } 1703 err = -EINTR; 1704 break; 1705 } 1706 1707 if (err) 1708 goto free_buf; 1709 1710 if (copy_to_user(keys + cp * map->key_size, key, 1711 map->key_size)) { 1712 err = -EFAULT; 1713 goto free_buf; 1714 } 1715 if (copy_to_user(values + cp * value_size, value, value_size)) { 1716 err = -EFAULT; 1717 goto free_buf; 1718 } 1719 1720 if (!prev_key) 1721 prev_key = buf_prevkey; 1722 1723 swap(prev_key, key); 1724 retry = MAP_LOOKUP_RETRIES; 1725 cp++; 1726 cond_resched(); 1727 } 1728 1729 if (err == -EFAULT) 1730 goto free_buf; 1731 1732 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 1733 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 1734 err = -EFAULT; 1735 1736 free_buf: 1737 kvfree(buf_prevkey); 1738 kvfree(buf); 1739 return err; 1740 } 1741 1742 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags 1743 1744 static int map_lookup_and_delete_elem(union bpf_attr *attr) 1745 { 1746 void __user *ukey = u64_to_user_ptr(attr->key); 1747 void __user *uvalue = u64_to_user_ptr(attr->value); 1748 int ufd = attr->map_fd; 1749 struct bpf_map *map; 1750 void *key, *value; 1751 u32 value_size; 1752 struct fd f; 1753 int err; 1754 1755 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 1756 return -EINVAL; 1757 1758 if (attr->flags & ~BPF_F_LOCK) 1759 return -EINVAL; 1760 1761 f = fdget(ufd); 1762 map = __bpf_map_get(f); 1763 if (IS_ERR(map)) 1764 return PTR_ERR(map); 1765 bpf_map_write_active_inc(map); 1766 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || 1767 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1768 err = -EPERM; 1769 goto err_put; 1770 } 1771 1772 if (attr->flags && 1773 (map->map_type == BPF_MAP_TYPE_QUEUE || 1774 map->map_type == BPF_MAP_TYPE_STACK)) { 1775 err = -EINVAL; 1776 goto err_put; 1777 } 1778 1779 if ((attr->flags & BPF_F_LOCK) && 1780 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1781 err = -EINVAL; 1782 goto err_put; 1783 } 1784 1785 key = __bpf_copy_key(ukey, map->key_size); 1786 if (IS_ERR(key)) { 1787 err = PTR_ERR(key); 1788 goto err_put; 1789 } 1790 1791 value_size = bpf_map_value_size(map); 1792 1793 err = -ENOMEM; 1794 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1795 if (!value) 1796 goto free_key; 1797 1798 err = -ENOTSUPP; 1799 if (map->map_type == BPF_MAP_TYPE_QUEUE || 1800 map->map_type == BPF_MAP_TYPE_STACK) { 1801 err = map->ops->map_pop_elem(map, value); 1802 } else if (map->map_type == BPF_MAP_TYPE_HASH || 1803 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 1804 map->map_type == BPF_MAP_TYPE_LRU_HASH || 1805 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 1806 if (!bpf_map_is_dev_bound(map)) { 1807 bpf_disable_instrumentation(); 1808 rcu_read_lock(); 1809 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); 1810 rcu_read_unlock(); 1811 bpf_enable_instrumentation(); 1812 } 1813 } 1814 1815 if (err) 1816 goto free_value; 1817 1818 if (copy_to_user(uvalue, value, value_size) != 0) { 1819 err = -EFAULT; 1820 goto free_value; 1821 } 1822 1823 err = 0; 1824 1825 free_value: 1826 kvfree(value); 1827 free_key: 1828 kvfree(key); 1829 err_put: 1830 bpf_map_write_active_dec(map); 1831 fdput(f); 1832 return err; 1833 } 1834 1835 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 1836 1837 static int map_freeze(const union bpf_attr *attr) 1838 { 1839 int err = 0, ufd = attr->map_fd; 1840 struct bpf_map *map; 1841 struct fd f; 1842 1843 if (CHECK_ATTR(BPF_MAP_FREEZE)) 1844 return -EINVAL; 1845 1846 f = fdget(ufd); 1847 map = __bpf_map_get(f); 1848 if (IS_ERR(map)) 1849 return PTR_ERR(map); 1850 1851 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) { 1852 fdput(f); 1853 return -ENOTSUPP; 1854 } 1855 1856 mutex_lock(&map->freeze_mutex); 1857 if (bpf_map_write_active(map)) { 1858 err = -EBUSY; 1859 goto err_put; 1860 } 1861 if (READ_ONCE(map->frozen)) { 1862 err = -EBUSY; 1863 goto err_put; 1864 } 1865 if (!bpf_capable()) { 1866 err = -EPERM; 1867 goto err_put; 1868 } 1869 1870 WRITE_ONCE(map->frozen, true); 1871 err_put: 1872 mutex_unlock(&map->freeze_mutex); 1873 fdput(f); 1874 return err; 1875 } 1876 1877 static const struct bpf_prog_ops * const bpf_prog_types[] = { 1878 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1879 [_id] = & _name ## _prog_ops, 1880 #define BPF_MAP_TYPE(_id, _ops) 1881 #define BPF_LINK_TYPE(_id, _name) 1882 #include <linux/bpf_types.h> 1883 #undef BPF_PROG_TYPE 1884 #undef BPF_MAP_TYPE 1885 #undef BPF_LINK_TYPE 1886 }; 1887 1888 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 1889 { 1890 const struct bpf_prog_ops *ops; 1891 1892 if (type >= ARRAY_SIZE(bpf_prog_types)) 1893 return -EINVAL; 1894 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 1895 ops = bpf_prog_types[type]; 1896 if (!ops) 1897 return -EINVAL; 1898 1899 if (!bpf_prog_is_dev_bound(prog->aux)) 1900 prog->aux->ops = ops; 1901 else 1902 prog->aux->ops = &bpf_offload_prog_ops; 1903 prog->type = type; 1904 return 0; 1905 } 1906 1907 enum bpf_audit { 1908 BPF_AUDIT_LOAD, 1909 BPF_AUDIT_UNLOAD, 1910 BPF_AUDIT_MAX, 1911 }; 1912 1913 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 1914 [BPF_AUDIT_LOAD] = "LOAD", 1915 [BPF_AUDIT_UNLOAD] = "UNLOAD", 1916 }; 1917 1918 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 1919 { 1920 struct audit_context *ctx = NULL; 1921 struct audit_buffer *ab; 1922 1923 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 1924 return; 1925 if (audit_enabled == AUDIT_OFF) 1926 return; 1927 if (op == BPF_AUDIT_LOAD) 1928 ctx = audit_context(); 1929 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 1930 if (unlikely(!ab)) 1931 return; 1932 audit_log_format(ab, "prog-id=%u op=%s", 1933 prog->aux->id, bpf_audit_str[op]); 1934 audit_log_end(ab); 1935 } 1936 1937 static int bpf_prog_alloc_id(struct bpf_prog *prog) 1938 { 1939 int id; 1940 1941 idr_preload(GFP_KERNEL); 1942 spin_lock_bh(&prog_idr_lock); 1943 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 1944 if (id > 0) 1945 prog->aux->id = id; 1946 spin_unlock_bh(&prog_idr_lock); 1947 idr_preload_end(); 1948 1949 /* id is in [1, INT_MAX) */ 1950 if (WARN_ON_ONCE(!id)) 1951 return -ENOSPC; 1952 1953 return id > 0 ? 0 : id; 1954 } 1955 1956 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) 1957 { 1958 unsigned long flags; 1959 1960 /* cBPF to eBPF migrations are currently not in the idr store. 1961 * Offloaded programs are removed from the store when their device 1962 * disappears - even if someone grabs an fd to them they are unusable, 1963 * simply waiting for refcnt to drop to be freed. 1964 */ 1965 if (!prog->aux->id) 1966 return; 1967 1968 if (do_idr_lock) 1969 spin_lock_irqsave(&prog_idr_lock, flags); 1970 else 1971 __acquire(&prog_idr_lock); 1972 1973 idr_remove(&prog_idr, prog->aux->id); 1974 prog->aux->id = 0; 1975 1976 if (do_idr_lock) 1977 spin_unlock_irqrestore(&prog_idr_lock, flags); 1978 else 1979 __release(&prog_idr_lock); 1980 } 1981 1982 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 1983 { 1984 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 1985 1986 kvfree(aux->func_info); 1987 kfree(aux->func_info_aux); 1988 free_uid(aux->user); 1989 security_bpf_prog_free(aux); 1990 bpf_prog_free(aux->prog); 1991 } 1992 1993 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 1994 { 1995 bpf_prog_kallsyms_del_all(prog); 1996 btf_put(prog->aux->btf); 1997 kvfree(prog->aux->jited_linfo); 1998 kvfree(prog->aux->linfo); 1999 kfree(prog->aux->kfunc_tab); 2000 if (prog->aux->attach_btf) 2001 btf_put(prog->aux->attach_btf); 2002 2003 if (deferred) { 2004 if (prog->aux->sleepable) 2005 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); 2006 else 2007 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 2008 } else { 2009 __bpf_prog_put_rcu(&prog->aux->rcu); 2010 } 2011 } 2012 2013 static void bpf_prog_put_deferred(struct work_struct *work) 2014 { 2015 struct bpf_prog_aux *aux; 2016 struct bpf_prog *prog; 2017 2018 aux = container_of(work, struct bpf_prog_aux, work); 2019 prog = aux->prog; 2020 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 2021 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 2022 __bpf_prog_put_noref(prog, true); 2023 } 2024 2025 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 2026 { 2027 struct bpf_prog_aux *aux = prog->aux; 2028 2029 if (atomic64_dec_and_test(&aux->refcnt)) { 2030 /* bpf_prog_free_id() must be called first */ 2031 bpf_prog_free_id(prog, do_idr_lock); 2032 2033 if (in_irq() || irqs_disabled()) { 2034 INIT_WORK(&aux->work, bpf_prog_put_deferred); 2035 schedule_work(&aux->work); 2036 } else { 2037 bpf_prog_put_deferred(&aux->work); 2038 } 2039 } 2040 } 2041 2042 void bpf_prog_put(struct bpf_prog *prog) 2043 { 2044 __bpf_prog_put(prog, true); 2045 } 2046 EXPORT_SYMBOL_GPL(bpf_prog_put); 2047 2048 static int bpf_prog_release(struct inode *inode, struct file *filp) 2049 { 2050 struct bpf_prog *prog = filp->private_data; 2051 2052 bpf_prog_put(prog); 2053 return 0; 2054 } 2055 2056 struct bpf_prog_kstats { 2057 u64 nsecs; 2058 u64 cnt; 2059 u64 misses; 2060 }; 2061 2062 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog) 2063 { 2064 struct bpf_prog_stats *stats; 2065 unsigned int flags; 2066 2067 stats = this_cpu_ptr(prog->stats); 2068 flags = u64_stats_update_begin_irqsave(&stats->syncp); 2069 u64_stats_inc(&stats->misses); 2070 u64_stats_update_end_irqrestore(&stats->syncp, flags); 2071 } 2072 2073 static void bpf_prog_get_stats(const struct bpf_prog *prog, 2074 struct bpf_prog_kstats *stats) 2075 { 2076 u64 nsecs = 0, cnt = 0, misses = 0; 2077 int cpu; 2078 2079 for_each_possible_cpu(cpu) { 2080 const struct bpf_prog_stats *st; 2081 unsigned int start; 2082 u64 tnsecs, tcnt, tmisses; 2083 2084 st = per_cpu_ptr(prog->stats, cpu); 2085 do { 2086 start = u64_stats_fetch_begin(&st->syncp); 2087 tnsecs = u64_stats_read(&st->nsecs); 2088 tcnt = u64_stats_read(&st->cnt); 2089 tmisses = u64_stats_read(&st->misses); 2090 } while (u64_stats_fetch_retry(&st->syncp, start)); 2091 nsecs += tnsecs; 2092 cnt += tcnt; 2093 misses += tmisses; 2094 } 2095 stats->nsecs = nsecs; 2096 stats->cnt = cnt; 2097 stats->misses = misses; 2098 } 2099 2100 #ifdef CONFIG_PROC_FS 2101 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 2102 { 2103 const struct bpf_prog *prog = filp->private_data; 2104 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2105 struct bpf_prog_kstats stats; 2106 2107 bpf_prog_get_stats(prog, &stats); 2108 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2109 seq_printf(m, 2110 "prog_type:\t%u\n" 2111 "prog_jited:\t%u\n" 2112 "prog_tag:\t%s\n" 2113 "memlock:\t%llu\n" 2114 "prog_id:\t%u\n" 2115 "run_time_ns:\t%llu\n" 2116 "run_cnt:\t%llu\n" 2117 "recursion_misses:\t%llu\n" 2118 "verified_insns:\t%u\n", 2119 prog->type, 2120 prog->jited, 2121 prog_tag, 2122 prog->pages * 1ULL << PAGE_SHIFT, 2123 prog->aux->id, 2124 stats.nsecs, 2125 stats.cnt, 2126 stats.misses, 2127 prog->aux->verified_insns); 2128 } 2129 #endif 2130 2131 const struct file_operations bpf_prog_fops = { 2132 #ifdef CONFIG_PROC_FS 2133 .show_fdinfo = bpf_prog_show_fdinfo, 2134 #endif 2135 .release = bpf_prog_release, 2136 .read = bpf_dummy_read, 2137 .write = bpf_dummy_write, 2138 }; 2139 2140 int bpf_prog_new_fd(struct bpf_prog *prog) 2141 { 2142 int ret; 2143 2144 ret = security_bpf_prog(prog); 2145 if (ret < 0) 2146 return ret; 2147 2148 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 2149 O_RDWR | O_CLOEXEC); 2150 } 2151 2152 static struct bpf_prog *____bpf_prog_get(struct fd f) 2153 { 2154 if (!f.file) 2155 return ERR_PTR(-EBADF); 2156 if (f.file->f_op != &bpf_prog_fops) { 2157 fdput(f); 2158 return ERR_PTR(-EINVAL); 2159 } 2160 2161 return f.file->private_data; 2162 } 2163 2164 void bpf_prog_add(struct bpf_prog *prog, int i) 2165 { 2166 atomic64_add(i, &prog->aux->refcnt); 2167 } 2168 EXPORT_SYMBOL_GPL(bpf_prog_add); 2169 2170 void bpf_prog_sub(struct bpf_prog *prog, int i) 2171 { 2172 /* Only to be used for undoing previous bpf_prog_add() in some 2173 * error path. We still know that another entity in our call 2174 * path holds a reference to the program, thus atomic_sub() can 2175 * be safely used in such cases! 2176 */ 2177 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 2178 } 2179 EXPORT_SYMBOL_GPL(bpf_prog_sub); 2180 2181 void bpf_prog_inc(struct bpf_prog *prog) 2182 { 2183 atomic64_inc(&prog->aux->refcnt); 2184 } 2185 EXPORT_SYMBOL_GPL(bpf_prog_inc); 2186 2187 /* prog_idr_lock should have been held */ 2188 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 2189 { 2190 int refold; 2191 2192 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 2193 2194 if (!refold) 2195 return ERR_PTR(-ENOENT); 2196 2197 return prog; 2198 } 2199 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 2200 2201 bool bpf_prog_get_ok(struct bpf_prog *prog, 2202 enum bpf_prog_type *attach_type, bool attach_drv) 2203 { 2204 /* not an attachment, just a refcount inc, always allow */ 2205 if (!attach_type) 2206 return true; 2207 2208 if (prog->type != *attach_type) 2209 return false; 2210 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) 2211 return false; 2212 2213 return true; 2214 } 2215 2216 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 2217 bool attach_drv) 2218 { 2219 struct fd f = fdget(ufd); 2220 struct bpf_prog *prog; 2221 2222 prog = ____bpf_prog_get(f); 2223 if (IS_ERR(prog)) 2224 return prog; 2225 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 2226 prog = ERR_PTR(-EINVAL); 2227 goto out; 2228 } 2229 2230 bpf_prog_inc(prog); 2231 out: 2232 fdput(f); 2233 return prog; 2234 } 2235 2236 struct bpf_prog *bpf_prog_get(u32 ufd) 2237 { 2238 return __bpf_prog_get(ufd, NULL, false); 2239 } 2240 2241 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 2242 bool attach_drv) 2243 { 2244 return __bpf_prog_get(ufd, &type, attach_drv); 2245 } 2246 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 2247 2248 /* Initially all BPF programs could be loaded w/o specifying 2249 * expected_attach_type. Later for some of them specifying expected_attach_type 2250 * at load time became required so that program could be validated properly. 2251 * Programs of types that are allowed to be loaded both w/ and w/o (for 2252 * backward compatibility) expected_attach_type, should have the default attach 2253 * type assigned to expected_attach_type for the latter case, so that it can be 2254 * validated later at attach time. 2255 * 2256 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 2257 * prog type requires it but has some attach types that have to be backward 2258 * compatible. 2259 */ 2260 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 2261 { 2262 switch (attr->prog_type) { 2263 case BPF_PROG_TYPE_CGROUP_SOCK: 2264 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 2265 * exist so checking for non-zero is the way to go here. 2266 */ 2267 if (!attr->expected_attach_type) 2268 attr->expected_attach_type = 2269 BPF_CGROUP_INET_SOCK_CREATE; 2270 break; 2271 case BPF_PROG_TYPE_SK_REUSEPORT: 2272 if (!attr->expected_attach_type) 2273 attr->expected_attach_type = 2274 BPF_SK_REUSEPORT_SELECT; 2275 break; 2276 } 2277 } 2278 2279 static int 2280 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 2281 enum bpf_attach_type expected_attach_type, 2282 struct btf *attach_btf, u32 btf_id, 2283 struct bpf_prog *dst_prog) 2284 { 2285 if (btf_id) { 2286 if (btf_id > BTF_MAX_TYPE) 2287 return -EINVAL; 2288 2289 if (!attach_btf && !dst_prog) 2290 return -EINVAL; 2291 2292 switch (prog_type) { 2293 case BPF_PROG_TYPE_TRACING: 2294 case BPF_PROG_TYPE_LSM: 2295 case BPF_PROG_TYPE_STRUCT_OPS: 2296 case BPF_PROG_TYPE_EXT: 2297 break; 2298 default: 2299 return -EINVAL; 2300 } 2301 } 2302 2303 if (attach_btf && (!btf_id || dst_prog)) 2304 return -EINVAL; 2305 2306 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING && 2307 prog_type != BPF_PROG_TYPE_EXT) 2308 return -EINVAL; 2309 2310 switch (prog_type) { 2311 case BPF_PROG_TYPE_CGROUP_SOCK: 2312 switch (expected_attach_type) { 2313 case BPF_CGROUP_INET_SOCK_CREATE: 2314 case BPF_CGROUP_INET_SOCK_RELEASE: 2315 case BPF_CGROUP_INET4_POST_BIND: 2316 case BPF_CGROUP_INET6_POST_BIND: 2317 return 0; 2318 default: 2319 return -EINVAL; 2320 } 2321 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2322 switch (expected_attach_type) { 2323 case BPF_CGROUP_INET4_BIND: 2324 case BPF_CGROUP_INET6_BIND: 2325 case BPF_CGROUP_INET4_CONNECT: 2326 case BPF_CGROUP_INET6_CONNECT: 2327 case BPF_CGROUP_INET4_GETPEERNAME: 2328 case BPF_CGROUP_INET6_GETPEERNAME: 2329 case BPF_CGROUP_INET4_GETSOCKNAME: 2330 case BPF_CGROUP_INET6_GETSOCKNAME: 2331 case BPF_CGROUP_UDP4_SENDMSG: 2332 case BPF_CGROUP_UDP6_SENDMSG: 2333 case BPF_CGROUP_UDP4_RECVMSG: 2334 case BPF_CGROUP_UDP6_RECVMSG: 2335 return 0; 2336 default: 2337 return -EINVAL; 2338 } 2339 case BPF_PROG_TYPE_CGROUP_SKB: 2340 switch (expected_attach_type) { 2341 case BPF_CGROUP_INET_INGRESS: 2342 case BPF_CGROUP_INET_EGRESS: 2343 return 0; 2344 default: 2345 return -EINVAL; 2346 } 2347 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2348 switch (expected_attach_type) { 2349 case BPF_CGROUP_SETSOCKOPT: 2350 case BPF_CGROUP_GETSOCKOPT: 2351 return 0; 2352 default: 2353 return -EINVAL; 2354 } 2355 case BPF_PROG_TYPE_SK_LOOKUP: 2356 if (expected_attach_type == BPF_SK_LOOKUP) 2357 return 0; 2358 return -EINVAL; 2359 case BPF_PROG_TYPE_SK_REUSEPORT: 2360 switch (expected_attach_type) { 2361 case BPF_SK_REUSEPORT_SELECT: 2362 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: 2363 return 0; 2364 default: 2365 return -EINVAL; 2366 } 2367 case BPF_PROG_TYPE_SYSCALL: 2368 case BPF_PROG_TYPE_EXT: 2369 if (expected_attach_type) 2370 return -EINVAL; 2371 fallthrough; 2372 default: 2373 return 0; 2374 } 2375 } 2376 2377 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) 2378 { 2379 switch (prog_type) { 2380 case BPF_PROG_TYPE_SCHED_CLS: 2381 case BPF_PROG_TYPE_SCHED_ACT: 2382 case BPF_PROG_TYPE_XDP: 2383 case BPF_PROG_TYPE_LWT_IN: 2384 case BPF_PROG_TYPE_LWT_OUT: 2385 case BPF_PROG_TYPE_LWT_XMIT: 2386 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2387 case BPF_PROG_TYPE_SK_SKB: 2388 case BPF_PROG_TYPE_SK_MSG: 2389 case BPF_PROG_TYPE_LIRC_MODE2: 2390 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2391 case BPF_PROG_TYPE_CGROUP_DEVICE: 2392 case BPF_PROG_TYPE_CGROUP_SOCK: 2393 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2394 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2395 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2396 case BPF_PROG_TYPE_SOCK_OPS: 2397 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2398 return true; 2399 case BPF_PROG_TYPE_CGROUP_SKB: 2400 /* always unpriv */ 2401 case BPF_PROG_TYPE_SK_REUSEPORT: 2402 /* equivalent to SOCKET_FILTER. need CAP_BPF only */ 2403 default: 2404 return false; 2405 } 2406 } 2407 2408 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) 2409 { 2410 switch (prog_type) { 2411 case BPF_PROG_TYPE_KPROBE: 2412 case BPF_PROG_TYPE_TRACEPOINT: 2413 case BPF_PROG_TYPE_PERF_EVENT: 2414 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2415 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2416 case BPF_PROG_TYPE_TRACING: 2417 case BPF_PROG_TYPE_LSM: 2418 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ 2419 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2420 return true; 2421 default: 2422 return false; 2423 } 2424 } 2425 2426 /* last field in 'union bpf_attr' used by this command */ 2427 #define BPF_PROG_LOAD_LAST_FIELD core_relo_rec_size 2428 2429 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) 2430 { 2431 enum bpf_prog_type type = attr->prog_type; 2432 struct bpf_prog *prog, *dst_prog = NULL; 2433 struct btf *attach_btf = NULL; 2434 int err; 2435 char license[128]; 2436 bool is_gpl; 2437 2438 if (CHECK_ATTR(BPF_PROG_LOAD)) 2439 return -EINVAL; 2440 2441 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2442 BPF_F_ANY_ALIGNMENT | 2443 BPF_F_TEST_STATE_FREQ | 2444 BPF_F_SLEEPABLE | 2445 BPF_F_TEST_RND_HI32 | 2446 BPF_F_XDP_HAS_FRAGS)) 2447 return -EINVAL; 2448 2449 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2450 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2451 !bpf_capable()) 2452 return -EPERM; 2453 2454 /* copy eBPF program license from user space */ 2455 if (strncpy_from_bpfptr(license, 2456 make_bpfptr(attr->license, uattr.is_kernel), 2457 sizeof(license) - 1) < 0) 2458 return -EFAULT; 2459 license[sizeof(license) - 1] = 0; 2460 2461 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2462 is_gpl = license_is_gpl_compatible(license); 2463 2464 if (attr->insn_cnt == 0 || 2465 attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) 2466 return -E2BIG; 2467 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2468 type != BPF_PROG_TYPE_CGROUP_SKB && 2469 !bpf_capable()) 2470 return -EPERM; 2471 2472 if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN)) 2473 return -EPERM; 2474 if (is_perfmon_prog_type(type) && !perfmon_capable()) 2475 return -EPERM; 2476 2477 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog 2478 * or btf, we need to check which one it is 2479 */ 2480 if (attr->attach_prog_fd) { 2481 dst_prog = bpf_prog_get(attr->attach_prog_fd); 2482 if (IS_ERR(dst_prog)) { 2483 dst_prog = NULL; 2484 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); 2485 if (IS_ERR(attach_btf)) 2486 return -EINVAL; 2487 if (!btf_is_kernel(attach_btf)) { 2488 /* attaching through specifying bpf_prog's BTF 2489 * objects directly might be supported eventually 2490 */ 2491 btf_put(attach_btf); 2492 return -ENOTSUPP; 2493 } 2494 } 2495 } else if (attr->attach_btf_id) { 2496 /* fall back to vmlinux BTF, if BTF type ID is specified */ 2497 attach_btf = bpf_get_btf_vmlinux(); 2498 if (IS_ERR(attach_btf)) 2499 return PTR_ERR(attach_btf); 2500 if (!attach_btf) 2501 return -EINVAL; 2502 btf_get(attach_btf); 2503 } 2504 2505 bpf_prog_load_fixup_attach_type(attr); 2506 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2507 attach_btf, attr->attach_btf_id, 2508 dst_prog)) { 2509 if (dst_prog) 2510 bpf_prog_put(dst_prog); 2511 if (attach_btf) 2512 btf_put(attach_btf); 2513 return -EINVAL; 2514 } 2515 2516 /* plain bpf_prog allocation */ 2517 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2518 if (!prog) { 2519 if (dst_prog) 2520 bpf_prog_put(dst_prog); 2521 if (attach_btf) 2522 btf_put(attach_btf); 2523 return -ENOMEM; 2524 } 2525 2526 prog->expected_attach_type = attr->expected_attach_type; 2527 prog->aux->attach_btf = attach_btf; 2528 prog->aux->attach_btf_id = attr->attach_btf_id; 2529 prog->aux->dst_prog = dst_prog; 2530 prog->aux->offload_requested = !!attr->prog_ifindex; 2531 prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; 2532 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; 2533 2534 err = security_bpf_prog_alloc(prog->aux); 2535 if (err) 2536 goto free_prog; 2537 2538 prog->aux->user = get_current_user(); 2539 prog->len = attr->insn_cnt; 2540 2541 err = -EFAULT; 2542 if (copy_from_bpfptr(prog->insns, 2543 make_bpfptr(attr->insns, uattr.is_kernel), 2544 bpf_prog_insn_size(prog)) != 0) 2545 goto free_prog_sec; 2546 2547 prog->orig_prog = NULL; 2548 prog->jited = 0; 2549 2550 atomic64_set(&prog->aux->refcnt, 1); 2551 prog->gpl_compatible = is_gpl ? 1 : 0; 2552 2553 if (bpf_prog_is_dev_bound(prog->aux)) { 2554 err = bpf_prog_offload_init(prog, attr); 2555 if (err) 2556 goto free_prog_sec; 2557 } 2558 2559 /* find program type: socket_filter vs tracing_filter */ 2560 err = find_prog_type(type, prog); 2561 if (err < 0) 2562 goto free_prog_sec; 2563 2564 prog->aux->load_time = ktime_get_boottime_ns(); 2565 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2566 sizeof(attr->prog_name)); 2567 if (err < 0) 2568 goto free_prog_sec; 2569 2570 /* run eBPF verifier */ 2571 err = bpf_check(&prog, attr, uattr); 2572 if (err < 0) 2573 goto free_used_maps; 2574 2575 prog = bpf_prog_select_runtime(prog, &err); 2576 if (err < 0) 2577 goto free_used_maps; 2578 2579 err = bpf_prog_alloc_id(prog); 2580 if (err) 2581 goto free_used_maps; 2582 2583 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2584 * effectively publicly exposed. However, retrieving via 2585 * bpf_prog_get_fd_by_id() will take another reference, 2586 * therefore it cannot be gone underneath us. 2587 * 2588 * Only for the time /after/ successful bpf_prog_new_fd() 2589 * and before returning to userspace, we might just hold 2590 * one reference and any parallel close on that fd could 2591 * rip everything out. Hence, below notifications must 2592 * happen before bpf_prog_new_fd(). 2593 * 2594 * Also, any failure handling from this point onwards must 2595 * be using bpf_prog_put() given the program is exposed. 2596 */ 2597 bpf_prog_kallsyms_add(prog); 2598 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 2599 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 2600 2601 err = bpf_prog_new_fd(prog); 2602 if (err < 0) 2603 bpf_prog_put(prog); 2604 return err; 2605 2606 free_used_maps: 2607 /* In case we have subprogs, we need to wait for a grace 2608 * period before we can tear down JIT memory since symbols 2609 * are already exposed under kallsyms. 2610 */ 2611 __bpf_prog_put_noref(prog, prog->aux->func_cnt); 2612 return err; 2613 free_prog_sec: 2614 free_uid(prog->aux->user); 2615 security_bpf_prog_free(prog->aux); 2616 free_prog: 2617 if (prog->aux->attach_btf) 2618 btf_put(prog->aux->attach_btf); 2619 bpf_prog_free(prog); 2620 return err; 2621 } 2622 2623 #define BPF_OBJ_LAST_FIELD file_flags 2624 2625 static int bpf_obj_pin(const union bpf_attr *attr) 2626 { 2627 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) 2628 return -EINVAL; 2629 2630 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); 2631 } 2632 2633 static int bpf_obj_get(const union bpf_attr *attr) 2634 { 2635 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 2636 attr->file_flags & ~BPF_OBJ_FLAG_MASK) 2637 return -EINVAL; 2638 2639 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), 2640 attr->file_flags); 2641 } 2642 2643 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 2644 const struct bpf_link_ops *ops, struct bpf_prog *prog) 2645 { 2646 atomic64_set(&link->refcnt, 1); 2647 link->type = type; 2648 link->id = 0; 2649 link->ops = ops; 2650 link->prog = prog; 2651 } 2652 2653 static void bpf_link_free_id(int id) 2654 { 2655 if (!id) 2656 return; 2657 2658 spin_lock_bh(&link_idr_lock); 2659 idr_remove(&link_idr, id); 2660 spin_unlock_bh(&link_idr_lock); 2661 } 2662 2663 /* Clean up bpf_link and corresponding anon_inode file and FD. After 2664 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred 2665 * anon_inode's release() call. This helper marksbpf_link as 2666 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt 2667 * is not decremented, it's the responsibility of a calling code that failed 2668 * to complete bpf_link initialization. 2669 */ 2670 void bpf_link_cleanup(struct bpf_link_primer *primer) 2671 { 2672 primer->link->prog = NULL; 2673 bpf_link_free_id(primer->id); 2674 fput(primer->file); 2675 put_unused_fd(primer->fd); 2676 } 2677 2678 void bpf_link_inc(struct bpf_link *link) 2679 { 2680 atomic64_inc(&link->refcnt); 2681 } 2682 2683 /* bpf_link_free is guaranteed to be called from process context */ 2684 static void bpf_link_free(struct bpf_link *link) 2685 { 2686 bpf_link_free_id(link->id); 2687 if (link->prog) { 2688 /* detach BPF program, clean up used resources */ 2689 link->ops->release(link); 2690 bpf_prog_put(link->prog); 2691 } 2692 /* free bpf_link and its containing memory */ 2693 link->ops->dealloc(link); 2694 } 2695 2696 static void bpf_link_put_deferred(struct work_struct *work) 2697 { 2698 struct bpf_link *link = container_of(work, struct bpf_link, work); 2699 2700 bpf_link_free(link); 2701 } 2702 2703 /* bpf_link_put can be called from atomic context, but ensures that resources 2704 * are freed from process context 2705 */ 2706 void bpf_link_put(struct bpf_link *link) 2707 { 2708 if (!atomic64_dec_and_test(&link->refcnt)) 2709 return; 2710 2711 if (in_atomic()) { 2712 INIT_WORK(&link->work, bpf_link_put_deferred); 2713 schedule_work(&link->work); 2714 } else { 2715 bpf_link_free(link); 2716 } 2717 } 2718 EXPORT_SYMBOL(bpf_link_put); 2719 2720 static int bpf_link_release(struct inode *inode, struct file *filp) 2721 { 2722 struct bpf_link *link = filp->private_data; 2723 2724 bpf_link_put(link); 2725 return 0; 2726 } 2727 2728 #ifdef CONFIG_PROC_FS 2729 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 2730 #define BPF_MAP_TYPE(_id, _ops) 2731 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name, 2732 static const char *bpf_link_type_strs[] = { 2733 [BPF_LINK_TYPE_UNSPEC] = "<invalid>", 2734 #include <linux/bpf_types.h> 2735 }; 2736 #undef BPF_PROG_TYPE 2737 #undef BPF_MAP_TYPE 2738 #undef BPF_LINK_TYPE 2739 2740 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) 2741 { 2742 const struct bpf_link *link = filp->private_data; 2743 const struct bpf_prog *prog = link->prog; 2744 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2745 2746 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2747 seq_printf(m, 2748 "link_type:\t%s\n" 2749 "link_id:\t%u\n" 2750 "prog_tag:\t%s\n" 2751 "prog_id:\t%u\n", 2752 bpf_link_type_strs[link->type], 2753 link->id, 2754 prog_tag, 2755 prog->aux->id); 2756 if (link->ops->show_fdinfo) 2757 link->ops->show_fdinfo(link, m); 2758 } 2759 #endif 2760 2761 static const struct file_operations bpf_link_fops = { 2762 #ifdef CONFIG_PROC_FS 2763 .show_fdinfo = bpf_link_show_fdinfo, 2764 #endif 2765 .release = bpf_link_release, 2766 .read = bpf_dummy_read, 2767 .write = bpf_dummy_write, 2768 }; 2769 2770 static int bpf_link_alloc_id(struct bpf_link *link) 2771 { 2772 int id; 2773 2774 idr_preload(GFP_KERNEL); 2775 spin_lock_bh(&link_idr_lock); 2776 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC); 2777 spin_unlock_bh(&link_idr_lock); 2778 idr_preload_end(); 2779 2780 return id; 2781 } 2782 2783 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file, 2784 * reserving unused FD and allocating ID from link_idr. This is to be paired 2785 * with bpf_link_settle() to install FD and ID and expose bpf_link to 2786 * user-space, if bpf_link is successfully attached. If not, bpf_link and 2787 * pre-allocated resources are to be freed with bpf_cleanup() call. All the 2788 * transient state is passed around in struct bpf_link_primer. 2789 * This is preferred way to create and initialize bpf_link, especially when 2790 * there are complicated and expensive operations in between creating bpf_link 2791 * itself and attaching it to BPF hook. By using bpf_link_prime() and 2792 * bpf_link_settle() kernel code using bpf_link doesn't have to perform 2793 * expensive (and potentially failing) roll back operations in a rare case 2794 * that file, FD, or ID can't be allocated. 2795 */ 2796 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) 2797 { 2798 struct file *file; 2799 int fd, id; 2800 2801 fd = get_unused_fd_flags(O_CLOEXEC); 2802 if (fd < 0) 2803 return fd; 2804 2805 2806 id = bpf_link_alloc_id(link); 2807 if (id < 0) { 2808 put_unused_fd(fd); 2809 return id; 2810 } 2811 2812 file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC); 2813 if (IS_ERR(file)) { 2814 bpf_link_free_id(id); 2815 put_unused_fd(fd); 2816 return PTR_ERR(file); 2817 } 2818 2819 primer->link = link; 2820 primer->file = file; 2821 primer->fd = fd; 2822 primer->id = id; 2823 return 0; 2824 } 2825 2826 int bpf_link_settle(struct bpf_link_primer *primer) 2827 { 2828 /* make bpf_link fetchable by ID */ 2829 spin_lock_bh(&link_idr_lock); 2830 primer->link->id = primer->id; 2831 spin_unlock_bh(&link_idr_lock); 2832 /* make bpf_link fetchable by FD */ 2833 fd_install(primer->fd, primer->file); 2834 /* pass through installed FD */ 2835 return primer->fd; 2836 } 2837 2838 int bpf_link_new_fd(struct bpf_link *link) 2839 { 2840 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC); 2841 } 2842 2843 struct bpf_link *bpf_link_get_from_fd(u32 ufd) 2844 { 2845 struct fd f = fdget(ufd); 2846 struct bpf_link *link; 2847 2848 if (!f.file) 2849 return ERR_PTR(-EBADF); 2850 if (f.file->f_op != &bpf_link_fops) { 2851 fdput(f); 2852 return ERR_PTR(-EINVAL); 2853 } 2854 2855 link = f.file->private_data; 2856 bpf_link_inc(link); 2857 fdput(f); 2858 2859 return link; 2860 } 2861 EXPORT_SYMBOL(bpf_link_get_from_fd); 2862 2863 static void bpf_tracing_link_release(struct bpf_link *link) 2864 { 2865 struct bpf_tracing_link *tr_link = 2866 container_of(link, struct bpf_tracing_link, link.link); 2867 2868 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, 2869 tr_link->trampoline)); 2870 2871 bpf_trampoline_put(tr_link->trampoline); 2872 2873 /* tgt_prog is NULL if target is a kernel function */ 2874 if (tr_link->tgt_prog) 2875 bpf_prog_put(tr_link->tgt_prog); 2876 } 2877 2878 static void bpf_tracing_link_dealloc(struct bpf_link *link) 2879 { 2880 struct bpf_tracing_link *tr_link = 2881 container_of(link, struct bpf_tracing_link, link.link); 2882 2883 kfree(tr_link); 2884 } 2885 2886 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, 2887 struct seq_file *seq) 2888 { 2889 struct bpf_tracing_link *tr_link = 2890 container_of(link, struct bpf_tracing_link, link.link); 2891 2892 seq_printf(seq, 2893 "attach_type:\t%d\n", 2894 tr_link->attach_type); 2895 } 2896 2897 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, 2898 struct bpf_link_info *info) 2899 { 2900 struct bpf_tracing_link *tr_link = 2901 container_of(link, struct bpf_tracing_link, link.link); 2902 2903 info->tracing.attach_type = tr_link->attach_type; 2904 bpf_trampoline_unpack_key(tr_link->trampoline->key, 2905 &info->tracing.target_obj_id, 2906 &info->tracing.target_btf_id); 2907 2908 return 0; 2909 } 2910 2911 static const struct bpf_link_ops bpf_tracing_link_lops = { 2912 .release = bpf_tracing_link_release, 2913 .dealloc = bpf_tracing_link_dealloc, 2914 .show_fdinfo = bpf_tracing_link_show_fdinfo, 2915 .fill_link_info = bpf_tracing_link_fill_link_info, 2916 }; 2917 2918 static int bpf_tracing_prog_attach(struct bpf_prog *prog, 2919 int tgt_prog_fd, 2920 u32 btf_id, 2921 u64 bpf_cookie) 2922 { 2923 struct bpf_link_primer link_primer; 2924 struct bpf_prog *tgt_prog = NULL; 2925 struct bpf_trampoline *tr = NULL; 2926 struct bpf_tracing_link *link; 2927 u64 key = 0; 2928 int err; 2929 2930 switch (prog->type) { 2931 case BPF_PROG_TYPE_TRACING: 2932 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 2933 prog->expected_attach_type != BPF_TRACE_FEXIT && 2934 prog->expected_attach_type != BPF_MODIFY_RETURN) { 2935 err = -EINVAL; 2936 goto out_put_prog; 2937 } 2938 break; 2939 case BPF_PROG_TYPE_EXT: 2940 if (prog->expected_attach_type != 0) { 2941 err = -EINVAL; 2942 goto out_put_prog; 2943 } 2944 break; 2945 case BPF_PROG_TYPE_LSM: 2946 if (prog->expected_attach_type != BPF_LSM_MAC) { 2947 err = -EINVAL; 2948 goto out_put_prog; 2949 } 2950 break; 2951 default: 2952 err = -EINVAL; 2953 goto out_put_prog; 2954 } 2955 2956 if (!!tgt_prog_fd != !!btf_id) { 2957 err = -EINVAL; 2958 goto out_put_prog; 2959 } 2960 2961 if (tgt_prog_fd) { 2962 /* For now we only allow new targets for BPF_PROG_TYPE_EXT */ 2963 if (prog->type != BPF_PROG_TYPE_EXT) { 2964 err = -EINVAL; 2965 goto out_put_prog; 2966 } 2967 2968 tgt_prog = bpf_prog_get(tgt_prog_fd); 2969 if (IS_ERR(tgt_prog)) { 2970 err = PTR_ERR(tgt_prog); 2971 tgt_prog = NULL; 2972 goto out_put_prog; 2973 } 2974 2975 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id); 2976 } 2977 2978 link = kzalloc(sizeof(*link), GFP_USER); 2979 if (!link) { 2980 err = -ENOMEM; 2981 goto out_put_prog; 2982 } 2983 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, 2984 &bpf_tracing_link_lops, prog); 2985 link->attach_type = prog->expected_attach_type; 2986 link->link.cookie = bpf_cookie; 2987 2988 mutex_lock(&prog->aux->dst_mutex); 2989 2990 /* There are a few possible cases here: 2991 * 2992 * - if prog->aux->dst_trampoline is set, the program was just loaded 2993 * and not yet attached to anything, so we can use the values stored 2994 * in prog->aux 2995 * 2996 * - if prog->aux->dst_trampoline is NULL, the program has already been 2997 * attached to a target and its initial target was cleared (below) 2998 * 2999 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + 3000 * target_btf_id using the link_create API. 3001 * 3002 * - if tgt_prog == NULL when this function was called using the old 3003 * raw_tracepoint_open API, and we need a target from prog->aux 3004 * 3005 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program 3006 * was detached and is going for re-attachment. 3007 */ 3008 if (!prog->aux->dst_trampoline && !tgt_prog) { 3009 /* 3010 * Allow re-attach for TRACING and LSM programs. If it's 3011 * currently linked, bpf_trampoline_link_prog will fail. 3012 * EXT programs need to specify tgt_prog_fd, so they 3013 * re-attach in separate code path. 3014 */ 3015 if (prog->type != BPF_PROG_TYPE_TRACING && 3016 prog->type != BPF_PROG_TYPE_LSM) { 3017 err = -EINVAL; 3018 goto out_unlock; 3019 } 3020 btf_id = prog->aux->attach_btf_id; 3021 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); 3022 } 3023 3024 if (!prog->aux->dst_trampoline || 3025 (key && key != prog->aux->dst_trampoline->key)) { 3026 /* If there is no saved target, or the specified target is 3027 * different from the destination specified at load time, we 3028 * need a new trampoline and a check for compatibility 3029 */ 3030 struct bpf_attach_target_info tgt_info = {}; 3031 3032 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id, 3033 &tgt_info); 3034 if (err) 3035 goto out_unlock; 3036 3037 tr = bpf_trampoline_get(key, &tgt_info); 3038 if (!tr) { 3039 err = -ENOMEM; 3040 goto out_unlock; 3041 } 3042 } else { 3043 /* The caller didn't specify a target, or the target was the 3044 * same as the destination supplied during program load. This 3045 * means we can reuse the trampoline and reference from program 3046 * load time, and there is no need to allocate a new one. This 3047 * can only happen once for any program, as the saved values in 3048 * prog->aux are cleared below. 3049 */ 3050 tr = prog->aux->dst_trampoline; 3051 tgt_prog = prog->aux->dst_prog; 3052 } 3053 3054 err = bpf_link_prime(&link->link.link, &link_primer); 3055 if (err) 3056 goto out_unlock; 3057 3058 err = bpf_trampoline_link_prog(&link->link, tr); 3059 if (err) { 3060 bpf_link_cleanup(&link_primer); 3061 link = NULL; 3062 goto out_unlock; 3063 } 3064 3065 link->tgt_prog = tgt_prog; 3066 link->trampoline = tr; 3067 3068 /* Always clear the trampoline and target prog from prog->aux to make 3069 * sure the original attach destination is not kept alive after a 3070 * program is (re-)attached to another target. 3071 */ 3072 if (prog->aux->dst_prog && 3073 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) 3074 /* got extra prog ref from syscall, or attaching to different prog */ 3075 bpf_prog_put(prog->aux->dst_prog); 3076 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) 3077 /* we allocated a new trampoline, so free the old one */ 3078 bpf_trampoline_put(prog->aux->dst_trampoline); 3079 3080 prog->aux->dst_prog = NULL; 3081 prog->aux->dst_trampoline = NULL; 3082 mutex_unlock(&prog->aux->dst_mutex); 3083 3084 return bpf_link_settle(&link_primer); 3085 out_unlock: 3086 if (tr && tr != prog->aux->dst_trampoline) 3087 bpf_trampoline_put(tr); 3088 mutex_unlock(&prog->aux->dst_mutex); 3089 kfree(link); 3090 out_put_prog: 3091 if (tgt_prog_fd && tgt_prog) 3092 bpf_prog_put(tgt_prog); 3093 return err; 3094 } 3095 3096 struct bpf_raw_tp_link { 3097 struct bpf_link link; 3098 struct bpf_raw_event_map *btp; 3099 }; 3100 3101 static void bpf_raw_tp_link_release(struct bpf_link *link) 3102 { 3103 struct bpf_raw_tp_link *raw_tp = 3104 container_of(link, struct bpf_raw_tp_link, link); 3105 3106 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog); 3107 bpf_put_raw_tracepoint(raw_tp->btp); 3108 } 3109 3110 static void bpf_raw_tp_link_dealloc(struct bpf_link *link) 3111 { 3112 struct bpf_raw_tp_link *raw_tp = 3113 container_of(link, struct bpf_raw_tp_link, link); 3114 3115 kfree(raw_tp); 3116 } 3117 3118 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link, 3119 struct seq_file *seq) 3120 { 3121 struct bpf_raw_tp_link *raw_tp_link = 3122 container_of(link, struct bpf_raw_tp_link, link); 3123 3124 seq_printf(seq, 3125 "tp_name:\t%s\n", 3126 raw_tp_link->btp->tp->name); 3127 } 3128 3129 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, 3130 struct bpf_link_info *info) 3131 { 3132 struct bpf_raw_tp_link *raw_tp_link = 3133 container_of(link, struct bpf_raw_tp_link, link); 3134 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); 3135 const char *tp_name = raw_tp_link->btp->tp->name; 3136 u32 ulen = info->raw_tracepoint.tp_name_len; 3137 size_t tp_len = strlen(tp_name); 3138 3139 if (!ulen ^ !ubuf) 3140 return -EINVAL; 3141 3142 info->raw_tracepoint.tp_name_len = tp_len + 1; 3143 3144 if (!ubuf) 3145 return 0; 3146 3147 if (ulen >= tp_len + 1) { 3148 if (copy_to_user(ubuf, tp_name, tp_len + 1)) 3149 return -EFAULT; 3150 } else { 3151 char zero = '\0'; 3152 3153 if (copy_to_user(ubuf, tp_name, ulen - 1)) 3154 return -EFAULT; 3155 if (put_user(zero, ubuf + ulen - 1)) 3156 return -EFAULT; 3157 return -ENOSPC; 3158 } 3159 3160 return 0; 3161 } 3162 3163 static const struct bpf_link_ops bpf_raw_tp_link_lops = { 3164 .release = bpf_raw_tp_link_release, 3165 .dealloc = bpf_raw_tp_link_dealloc, 3166 .show_fdinfo = bpf_raw_tp_link_show_fdinfo, 3167 .fill_link_info = bpf_raw_tp_link_fill_link_info, 3168 }; 3169 3170 #ifdef CONFIG_PERF_EVENTS 3171 struct bpf_perf_link { 3172 struct bpf_link link; 3173 struct file *perf_file; 3174 }; 3175 3176 static void bpf_perf_link_release(struct bpf_link *link) 3177 { 3178 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3179 struct perf_event *event = perf_link->perf_file->private_data; 3180 3181 perf_event_free_bpf_prog(event); 3182 fput(perf_link->perf_file); 3183 } 3184 3185 static void bpf_perf_link_dealloc(struct bpf_link *link) 3186 { 3187 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3188 3189 kfree(perf_link); 3190 } 3191 3192 static const struct bpf_link_ops bpf_perf_link_lops = { 3193 .release = bpf_perf_link_release, 3194 .dealloc = bpf_perf_link_dealloc, 3195 }; 3196 3197 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3198 { 3199 struct bpf_link_primer link_primer; 3200 struct bpf_perf_link *link; 3201 struct perf_event *event; 3202 struct file *perf_file; 3203 int err; 3204 3205 if (attr->link_create.flags) 3206 return -EINVAL; 3207 3208 perf_file = perf_event_get(attr->link_create.target_fd); 3209 if (IS_ERR(perf_file)) 3210 return PTR_ERR(perf_file); 3211 3212 link = kzalloc(sizeof(*link), GFP_USER); 3213 if (!link) { 3214 err = -ENOMEM; 3215 goto out_put_file; 3216 } 3217 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); 3218 link->perf_file = perf_file; 3219 3220 err = bpf_link_prime(&link->link, &link_primer); 3221 if (err) { 3222 kfree(link); 3223 goto out_put_file; 3224 } 3225 3226 event = perf_file->private_data; 3227 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); 3228 if (err) { 3229 bpf_link_cleanup(&link_primer); 3230 goto out_put_file; 3231 } 3232 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */ 3233 bpf_prog_inc(prog); 3234 3235 return bpf_link_settle(&link_primer); 3236 3237 out_put_file: 3238 fput(perf_file); 3239 return err; 3240 } 3241 #else 3242 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3243 { 3244 return -EOPNOTSUPP; 3245 } 3246 #endif /* CONFIG_PERF_EVENTS */ 3247 3248 static int bpf_raw_tp_link_attach(struct bpf_prog *prog, 3249 const char __user *user_tp_name) 3250 { 3251 struct bpf_link_primer link_primer; 3252 struct bpf_raw_tp_link *link; 3253 struct bpf_raw_event_map *btp; 3254 const char *tp_name; 3255 char buf[128]; 3256 int err; 3257 3258 switch (prog->type) { 3259 case BPF_PROG_TYPE_TRACING: 3260 case BPF_PROG_TYPE_EXT: 3261 case BPF_PROG_TYPE_LSM: 3262 if (user_tp_name) 3263 /* The attach point for this category of programs 3264 * should be specified via btf_id during program load. 3265 */ 3266 return -EINVAL; 3267 if (prog->type == BPF_PROG_TYPE_TRACING && 3268 prog->expected_attach_type == BPF_TRACE_RAW_TP) { 3269 tp_name = prog->aux->attach_func_name; 3270 break; 3271 } 3272 return bpf_tracing_prog_attach(prog, 0, 0, 0); 3273 case BPF_PROG_TYPE_RAW_TRACEPOINT: 3274 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 3275 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) 3276 return -EFAULT; 3277 buf[sizeof(buf) - 1] = 0; 3278 tp_name = buf; 3279 break; 3280 default: 3281 return -EINVAL; 3282 } 3283 3284 btp = bpf_get_raw_tracepoint(tp_name); 3285 if (!btp) 3286 return -ENOENT; 3287 3288 link = kzalloc(sizeof(*link), GFP_USER); 3289 if (!link) { 3290 err = -ENOMEM; 3291 goto out_put_btp; 3292 } 3293 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, 3294 &bpf_raw_tp_link_lops, prog); 3295 link->btp = btp; 3296 3297 err = bpf_link_prime(&link->link, &link_primer); 3298 if (err) { 3299 kfree(link); 3300 goto out_put_btp; 3301 } 3302 3303 err = bpf_probe_register(link->btp, prog); 3304 if (err) { 3305 bpf_link_cleanup(&link_primer); 3306 goto out_put_btp; 3307 } 3308 3309 return bpf_link_settle(&link_primer); 3310 3311 out_put_btp: 3312 bpf_put_raw_tracepoint(btp); 3313 return err; 3314 } 3315 3316 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 3317 3318 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 3319 { 3320 struct bpf_prog *prog; 3321 int fd; 3322 3323 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 3324 return -EINVAL; 3325 3326 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 3327 if (IS_ERR(prog)) 3328 return PTR_ERR(prog); 3329 3330 fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name)); 3331 if (fd < 0) 3332 bpf_prog_put(prog); 3333 return fd; 3334 } 3335 3336 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 3337 enum bpf_attach_type attach_type) 3338 { 3339 switch (prog->type) { 3340 case BPF_PROG_TYPE_CGROUP_SOCK: 3341 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3342 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3343 case BPF_PROG_TYPE_SK_LOOKUP: 3344 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 3345 case BPF_PROG_TYPE_CGROUP_SKB: 3346 if (!capable(CAP_NET_ADMIN)) 3347 /* cg-skb progs can be loaded by unpriv user. 3348 * check permissions at attach time. 3349 */ 3350 return -EPERM; 3351 return prog->enforce_expected_attach_type && 3352 prog->expected_attach_type != attach_type ? 3353 -EINVAL : 0; 3354 default: 3355 return 0; 3356 } 3357 } 3358 3359 static enum bpf_prog_type 3360 attach_type_to_prog_type(enum bpf_attach_type attach_type) 3361 { 3362 switch (attach_type) { 3363 case BPF_CGROUP_INET_INGRESS: 3364 case BPF_CGROUP_INET_EGRESS: 3365 return BPF_PROG_TYPE_CGROUP_SKB; 3366 case BPF_CGROUP_INET_SOCK_CREATE: 3367 case BPF_CGROUP_INET_SOCK_RELEASE: 3368 case BPF_CGROUP_INET4_POST_BIND: 3369 case BPF_CGROUP_INET6_POST_BIND: 3370 return BPF_PROG_TYPE_CGROUP_SOCK; 3371 case BPF_CGROUP_INET4_BIND: 3372 case BPF_CGROUP_INET6_BIND: 3373 case BPF_CGROUP_INET4_CONNECT: 3374 case BPF_CGROUP_INET6_CONNECT: 3375 case BPF_CGROUP_INET4_GETPEERNAME: 3376 case BPF_CGROUP_INET6_GETPEERNAME: 3377 case BPF_CGROUP_INET4_GETSOCKNAME: 3378 case BPF_CGROUP_INET6_GETSOCKNAME: 3379 case BPF_CGROUP_UDP4_SENDMSG: 3380 case BPF_CGROUP_UDP6_SENDMSG: 3381 case BPF_CGROUP_UDP4_RECVMSG: 3382 case BPF_CGROUP_UDP6_RECVMSG: 3383 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 3384 case BPF_CGROUP_SOCK_OPS: 3385 return BPF_PROG_TYPE_SOCK_OPS; 3386 case BPF_CGROUP_DEVICE: 3387 return BPF_PROG_TYPE_CGROUP_DEVICE; 3388 case BPF_SK_MSG_VERDICT: 3389 return BPF_PROG_TYPE_SK_MSG; 3390 case BPF_SK_SKB_STREAM_PARSER: 3391 case BPF_SK_SKB_STREAM_VERDICT: 3392 case BPF_SK_SKB_VERDICT: 3393 return BPF_PROG_TYPE_SK_SKB; 3394 case BPF_LIRC_MODE2: 3395 return BPF_PROG_TYPE_LIRC_MODE2; 3396 case BPF_FLOW_DISSECTOR: 3397 return BPF_PROG_TYPE_FLOW_DISSECTOR; 3398 case BPF_CGROUP_SYSCTL: 3399 return BPF_PROG_TYPE_CGROUP_SYSCTL; 3400 case BPF_CGROUP_GETSOCKOPT: 3401 case BPF_CGROUP_SETSOCKOPT: 3402 return BPF_PROG_TYPE_CGROUP_SOCKOPT; 3403 case BPF_TRACE_ITER: 3404 case BPF_TRACE_RAW_TP: 3405 case BPF_TRACE_FENTRY: 3406 case BPF_TRACE_FEXIT: 3407 case BPF_MODIFY_RETURN: 3408 return BPF_PROG_TYPE_TRACING; 3409 case BPF_LSM_MAC: 3410 return BPF_PROG_TYPE_LSM; 3411 case BPF_SK_LOOKUP: 3412 return BPF_PROG_TYPE_SK_LOOKUP; 3413 case BPF_XDP: 3414 return BPF_PROG_TYPE_XDP; 3415 case BPF_LSM_CGROUP: 3416 return BPF_PROG_TYPE_LSM; 3417 default: 3418 return BPF_PROG_TYPE_UNSPEC; 3419 } 3420 } 3421 3422 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd 3423 3424 #define BPF_F_ATTACH_MASK \ 3425 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE) 3426 3427 static int bpf_prog_attach(const union bpf_attr *attr) 3428 { 3429 enum bpf_prog_type ptype; 3430 struct bpf_prog *prog; 3431 int ret; 3432 3433 if (CHECK_ATTR(BPF_PROG_ATTACH)) 3434 return -EINVAL; 3435 3436 if (attr->attach_flags & ~BPF_F_ATTACH_MASK) 3437 return -EINVAL; 3438 3439 ptype = attach_type_to_prog_type(attr->attach_type); 3440 if (ptype == BPF_PROG_TYPE_UNSPEC) 3441 return -EINVAL; 3442 3443 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 3444 if (IS_ERR(prog)) 3445 return PTR_ERR(prog); 3446 3447 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 3448 bpf_prog_put(prog); 3449 return -EINVAL; 3450 } 3451 3452 switch (ptype) { 3453 case BPF_PROG_TYPE_SK_SKB: 3454 case BPF_PROG_TYPE_SK_MSG: 3455 ret = sock_map_get_from_fd(attr, prog); 3456 break; 3457 case BPF_PROG_TYPE_LIRC_MODE2: 3458 ret = lirc_prog_attach(attr, prog); 3459 break; 3460 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3461 ret = netns_bpf_prog_attach(attr, prog); 3462 break; 3463 case BPF_PROG_TYPE_CGROUP_DEVICE: 3464 case BPF_PROG_TYPE_CGROUP_SKB: 3465 case BPF_PROG_TYPE_CGROUP_SOCK: 3466 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3467 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3468 case BPF_PROG_TYPE_CGROUP_SYSCTL: 3469 case BPF_PROG_TYPE_SOCK_OPS: 3470 case BPF_PROG_TYPE_LSM: 3471 if (ptype == BPF_PROG_TYPE_LSM && 3472 prog->expected_attach_type != BPF_LSM_CGROUP) 3473 return -EINVAL; 3474 3475 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 3476 break; 3477 default: 3478 ret = -EINVAL; 3479 } 3480 3481 if (ret) 3482 bpf_prog_put(prog); 3483 return ret; 3484 } 3485 3486 #define BPF_PROG_DETACH_LAST_FIELD attach_type 3487 3488 static int bpf_prog_detach(const union bpf_attr *attr) 3489 { 3490 enum bpf_prog_type ptype; 3491 3492 if (CHECK_ATTR(BPF_PROG_DETACH)) 3493 return -EINVAL; 3494 3495 ptype = attach_type_to_prog_type(attr->attach_type); 3496 3497 switch (ptype) { 3498 case BPF_PROG_TYPE_SK_MSG: 3499 case BPF_PROG_TYPE_SK_SKB: 3500 return sock_map_prog_detach(attr, ptype); 3501 case BPF_PROG_TYPE_LIRC_MODE2: 3502 return lirc_prog_detach(attr); 3503 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3504 return netns_bpf_prog_detach(attr, ptype); 3505 case BPF_PROG_TYPE_CGROUP_DEVICE: 3506 case BPF_PROG_TYPE_CGROUP_SKB: 3507 case BPF_PROG_TYPE_CGROUP_SOCK: 3508 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3509 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3510 case BPF_PROG_TYPE_CGROUP_SYSCTL: 3511 case BPF_PROG_TYPE_SOCK_OPS: 3512 case BPF_PROG_TYPE_LSM: 3513 return cgroup_bpf_prog_detach(attr, ptype); 3514 default: 3515 return -EINVAL; 3516 } 3517 } 3518 3519 #define BPF_PROG_QUERY_LAST_FIELD query.prog_attach_flags 3520 3521 static int bpf_prog_query(const union bpf_attr *attr, 3522 union bpf_attr __user *uattr) 3523 { 3524 if (!capable(CAP_NET_ADMIN)) 3525 return -EPERM; 3526 if (CHECK_ATTR(BPF_PROG_QUERY)) 3527 return -EINVAL; 3528 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 3529 return -EINVAL; 3530 3531 switch (attr->query.attach_type) { 3532 case BPF_CGROUP_INET_INGRESS: 3533 case BPF_CGROUP_INET_EGRESS: 3534 case BPF_CGROUP_INET_SOCK_CREATE: 3535 case BPF_CGROUP_INET_SOCK_RELEASE: 3536 case BPF_CGROUP_INET4_BIND: 3537 case BPF_CGROUP_INET6_BIND: 3538 case BPF_CGROUP_INET4_POST_BIND: 3539 case BPF_CGROUP_INET6_POST_BIND: 3540 case BPF_CGROUP_INET4_CONNECT: 3541 case BPF_CGROUP_INET6_CONNECT: 3542 case BPF_CGROUP_INET4_GETPEERNAME: 3543 case BPF_CGROUP_INET6_GETPEERNAME: 3544 case BPF_CGROUP_INET4_GETSOCKNAME: 3545 case BPF_CGROUP_INET6_GETSOCKNAME: 3546 case BPF_CGROUP_UDP4_SENDMSG: 3547 case BPF_CGROUP_UDP6_SENDMSG: 3548 case BPF_CGROUP_UDP4_RECVMSG: 3549 case BPF_CGROUP_UDP6_RECVMSG: 3550 case BPF_CGROUP_SOCK_OPS: 3551 case BPF_CGROUP_DEVICE: 3552 case BPF_CGROUP_SYSCTL: 3553 case BPF_CGROUP_GETSOCKOPT: 3554 case BPF_CGROUP_SETSOCKOPT: 3555 case BPF_LSM_CGROUP: 3556 return cgroup_bpf_prog_query(attr, uattr); 3557 case BPF_LIRC_MODE2: 3558 return lirc_prog_query(attr, uattr); 3559 case BPF_FLOW_DISSECTOR: 3560 case BPF_SK_LOOKUP: 3561 return netns_bpf_prog_query(attr, uattr); 3562 case BPF_SK_SKB_STREAM_PARSER: 3563 case BPF_SK_SKB_STREAM_VERDICT: 3564 case BPF_SK_MSG_VERDICT: 3565 case BPF_SK_SKB_VERDICT: 3566 return sock_map_bpf_prog_query(attr, uattr); 3567 default: 3568 return -EINVAL; 3569 } 3570 } 3571 3572 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size 3573 3574 static int bpf_prog_test_run(const union bpf_attr *attr, 3575 union bpf_attr __user *uattr) 3576 { 3577 struct bpf_prog *prog; 3578 int ret = -ENOTSUPP; 3579 3580 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 3581 return -EINVAL; 3582 3583 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 3584 (!attr->test.ctx_size_in && attr->test.ctx_in)) 3585 return -EINVAL; 3586 3587 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 3588 (!attr->test.ctx_size_out && attr->test.ctx_out)) 3589 return -EINVAL; 3590 3591 prog = bpf_prog_get(attr->test.prog_fd); 3592 if (IS_ERR(prog)) 3593 return PTR_ERR(prog); 3594 3595 if (prog->aux->ops->test_run) 3596 ret = prog->aux->ops->test_run(prog, attr, uattr); 3597 3598 bpf_prog_put(prog); 3599 return ret; 3600 } 3601 3602 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 3603 3604 static int bpf_obj_get_next_id(const union bpf_attr *attr, 3605 union bpf_attr __user *uattr, 3606 struct idr *idr, 3607 spinlock_t *lock) 3608 { 3609 u32 next_id = attr->start_id; 3610 int err = 0; 3611 3612 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 3613 return -EINVAL; 3614 3615 if (!capable(CAP_SYS_ADMIN)) 3616 return -EPERM; 3617 3618 next_id++; 3619 spin_lock_bh(lock); 3620 if (!idr_get_next(idr, &next_id)) 3621 err = -ENOENT; 3622 spin_unlock_bh(lock); 3623 3624 if (!err) 3625 err = put_user(next_id, &uattr->next_id); 3626 3627 return err; 3628 } 3629 3630 struct bpf_map *bpf_map_get_curr_or_next(u32 *id) 3631 { 3632 struct bpf_map *map; 3633 3634 spin_lock_bh(&map_idr_lock); 3635 again: 3636 map = idr_get_next(&map_idr, id); 3637 if (map) { 3638 map = __bpf_map_inc_not_zero(map, false); 3639 if (IS_ERR(map)) { 3640 (*id)++; 3641 goto again; 3642 } 3643 } 3644 spin_unlock_bh(&map_idr_lock); 3645 3646 return map; 3647 } 3648 3649 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) 3650 { 3651 struct bpf_prog *prog; 3652 3653 spin_lock_bh(&prog_idr_lock); 3654 again: 3655 prog = idr_get_next(&prog_idr, id); 3656 if (prog) { 3657 prog = bpf_prog_inc_not_zero(prog); 3658 if (IS_ERR(prog)) { 3659 (*id)++; 3660 goto again; 3661 } 3662 } 3663 spin_unlock_bh(&prog_idr_lock); 3664 3665 return prog; 3666 } 3667 3668 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 3669 3670 struct bpf_prog *bpf_prog_by_id(u32 id) 3671 { 3672 struct bpf_prog *prog; 3673 3674 if (!id) 3675 return ERR_PTR(-ENOENT); 3676 3677 spin_lock_bh(&prog_idr_lock); 3678 prog = idr_find(&prog_idr, id); 3679 if (prog) 3680 prog = bpf_prog_inc_not_zero(prog); 3681 else 3682 prog = ERR_PTR(-ENOENT); 3683 spin_unlock_bh(&prog_idr_lock); 3684 return prog; 3685 } 3686 3687 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 3688 { 3689 struct bpf_prog *prog; 3690 u32 id = attr->prog_id; 3691 int fd; 3692 3693 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 3694 return -EINVAL; 3695 3696 if (!capable(CAP_SYS_ADMIN)) 3697 return -EPERM; 3698 3699 prog = bpf_prog_by_id(id); 3700 if (IS_ERR(prog)) 3701 return PTR_ERR(prog); 3702 3703 fd = bpf_prog_new_fd(prog); 3704 if (fd < 0) 3705 bpf_prog_put(prog); 3706 3707 return fd; 3708 } 3709 3710 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 3711 3712 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 3713 { 3714 struct bpf_map *map; 3715 u32 id = attr->map_id; 3716 int f_flags; 3717 int fd; 3718 3719 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 3720 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 3721 return -EINVAL; 3722 3723 if (!capable(CAP_SYS_ADMIN)) 3724 return -EPERM; 3725 3726 f_flags = bpf_get_file_flag(attr->open_flags); 3727 if (f_flags < 0) 3728 return f_flags; 3729 3730 spin_lock_bh(&map_idr_lock); 3731 map = idr_find(&map_idr, id); 3732 if (map) 3733 map = __bpf_map_inc_not_zero(map, true); 3734 else 3735 map = ERR_PTR(-ENOENT); 3736 spin_unlock_bh(&map_idr_lock); 3737 3738 if (IS_ERR(map)) 3739 return PTR_ERR(map); 3740 3741 fd = bpf_map_new_fd(map, f_flags); 3742 if (fd < 0) 3743 bpf_map_put_with_uref(map); 3744 3745 return fd; 3746 } 3747 3748 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 3749 unsigned long addr, u32 *off, 3750 u32 *type) 3751 { 3752 const struct bpf_map *map; 3753 int i; 3754 3755 mutex_lock(&prog->aux->used_maps_mutex); 3756 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 3757 map = prog->aux->used_maps[i]; 3758 if (map == (void *)addr) { 3759 *type = BPF_PSEUDO_MAP_FD; 3760 goto out; 3761 } 3762 if (!map->ops->map_direct_value_meta) 3763 continue; 3764 if (!map->ops->map_direct_value_meta(map, addr, off)) { 3765 *type = BPF_PSEUDO_MAP_VALUE; 3766 goto out; 3767 } 3768 } 3769 map = NULL; 3770 3771 out: 3772 mutex_unlock(&prog->aux->used_maps_mutex); 3773 return map; 3774 } 3775 3776 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, 3777 const struct cred *f_cred) 3778 { 3779 const struct bpf_map *map; 3780 struct bpf_insn *insns; 3781 u32 off, type; 3782 u64 imm; 3783 u8 code; 3784 int i; 3785 3786 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 3787 GFP_USER); 3788 if (!insns) 3789 return insns; 3790 3791 for (i = 0; i < prog->len; i++) { 3792 code = insns[i].code; 3793 3794 if (code == (BPF_JMP | BPF_TAIL_CALL)) { 3795 insns[i].code = BPF_JMP | BPF_CALL; 3796 insns[i].imm = BPF_FUNC_tail_call; 3797 /* fall-through */ 3798 } 3799 if (code == (BPF_JMP | BPF_CALL) || 3800 code == (BPF_JMP | BPF_CALL_ARGS)) { 3801 if (code == (BPF_JMP | BPF_CALL_ARGS)) 3802 insns[i].code = BPF_JMP | BPF_CALL; 3803 if (!bpf_dump_raw_ok(f_cred)) 3804 insns[i].imm = 0; 3805 continue; 3806 } 3807 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { 3808 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; 3809 continue; 3810 } 3811 3812 if (code != (BPF_LD | BPF_IMM | BPF_DW)) 3813 continue; 3814 3815 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 3816 map = bpf_map_from_imm(prog, imm, &off, &type); 3817 if (map) { 3818 insns[i].src_reg = type; 3819 insns[i].imm = map->id; 3820 insns[i + 1].imm = off; 3821 continue; 3822 } 3823 } 3824 3825 return insns; 3826 } 3827 3828 static int set_info_rec_size(struct bpf_prog_info *info) 3829 { 3830 /* 3831 * Ensure info.*_rec_size is the same as kernel expected size 3832 * 3833 * or 3834 * 3835 * Only allow zero *_rec_size if both _rec_size and _cnt are 3836 * zero. In this case, the kernel will set the expected 3837 * _rec_size back to the info. 3838 */ 3839 3840 if ((info->nr_func_info || info->func_info_rec_size) && 3841 info->func_info_rec_size != sizeof(struct bpf_func_info)) 3842 return -EINVAL; 3843 3844 if ((info->nr_line_info || info->line_info_rec_size) && 3845 info->line_info_rec_size != sizeof(struct bpf_line_info)) 3846 return -EINVAL; 3847 3848 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 3849 info->jited_line_info_rec_size != sizeof(__u64)) 3850 return -EINVAL; 3851 3852 info->func_info_rec_size = sizeof(struct bpf_func_info); 3853 info->line_info_rec_size = sizeof(struct bpf_line_info); 3854 info->jited_line_info_rec_size = sizeof(__u64); 3855 3856 return 0; 3857 } 3858 3859 static int bpf_prog_get_info_by_fd(struct file *file, 3860 struct bpf_prog *prog, 3861 const union bpf_attr *attr, 3862 union bpf_attr __user *uattr) 3863 { 3864 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3865 struct btf *attach_btf = bpf_prog_get_target_btf(prog); 3866 struct bpf_prog_info info; 3867 u32 info_len = attr->info.info_len; 3868 struct bpf_prog_kstats stats; 3869 char __user *uinsns; 3870 u32 ulen; 3871 int err; 3872 3873 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 3874 if (err) 3875 return err; 3876 info_len = min_t(u32, sizeof(info), info_len); 3877 3878 memset(&info, 0, sizeof(info)); 3879 if (copy_from_user(&info, uinfo, info_len)) 3880 return -EFAULT; 3881 3882 info.type = prog->type; 3883 info.id = prog->aux->id; 3884 info.load_time = prog->aux->load_time; 3885 info.created_by_uid = from_kuid_munged(current_user_ns(), 3886 prog->aux->user->uid); 3887 info.gpl_compatible = prog->gpl_compatible; 3888 3889 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 3890 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 3891 3892 mutex_lock(&prog->aux->used_maps_mutex); 3893 ulen = info.nr_map_ids; 3894 info.nr_map_ids = prog->aux->used_map_cnt; 3895 ulen = min_t(u32, info.nr_map_ids, ulen); 3896 if (ulen) { 3897 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 3898 u32 i; 3899 3900 for (i = 0; i < ulen; i++) 3901 if (put_user(prog->aux->used_maps[i]->id, 3902 &user_map_ids[i])) { 3903 mutex_unlock(&prog->aux->used_maps_mutex); 3904 return -EFAULT; 3905 } 3906 } 3907 mutex_unlock(&prog->aux->used_maps_mutex); 3908 3909 err = set_info_rec_size(&info); 3910 if (err) 3911 return err; 3912 3913 bpf_prog_get_stats(prog, &stats); 3914 info.run_time_ns = stats.nsecs; 3915 info.run_cnt = stats.cnt; 3916 info.recursion_misses = stats.misses; 3917 3918 info.verified_insns = prog->aux->verified_insns; 3919 3920 if (!bpf_capable()) { 3921 info.jited_prog_len = 0; 3922 info.xlated_prog_len = 0; 3923 info.nr_jited_ksyms = 0; 3924 info.nr_jited_func_lens = 0; 3925 info.nr_func_info = 0; 3926 info.nr_line_info = 0; 3927 info.nr_jited_line_info = 0; 3928 goto done; 3929 } 3930 3931 ulen = info.xlated_prog_len; 3932 info.xlated_prog_len = bpf_prog_insn_size(prog); 3933 if (info.xlated_prog_len && ulen) { 3934 struct bpf_insn *insns_sanitized; 3935 bool fault; 3936 3937 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { 3938 info.xlated_prog_insns = 0; 3939 goto done; 3940 } 3941 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); 3942 if (!insns_sanitized) 3943 return -ENOMEM; 3944 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 3945 ulen = min_t(u32, info.xlated_prog_len, ulen); 3946 fault = copy_to_user(uinsns, insns_sanitized, ulen); 3947 kfree(insns_sanitized); 3948 if (fault) 3949 return -EFAULT; 3950 } 3951 3952 if (bpf_prog_is_dev_bound(prog->aux)) { 3953 err = bpf_prog_offload_info_fill(&info, prog); 3954 if (err) 3955 return err; 3956 goto done; 3957 } 3958 3959 /* NOTE: the following code is supposed to be skipped for offload. 3960 * bpf_prog_offload_info_fill() is the place to fill similar fields 3961 * for offload. 3962 */ 3963 ulen = info.jited_prog_len; 3964 if (prog->aux->func_cnt) { 3965 u32 i; 3966 3967 info.jited_prog_len = 0; 3968 for (i = 0; i < prog->aux->func_cnt; i++) 3969 info.jited_prog_len += prog->aux->func[i]->jited_len; 3970 } else { 3971 info.jited_prog_len = prog->jited_len; 3972 } 3973 3974 if (info.jited_prog_len && ulen) { 3975 if (bpf_dump_raw_ok(file->f_cred)) { 3976 uinsns = u64_to_user_ptr(info.jited_prog_insns); 3977 ulen = min_t(u32, info.jited_prog_len, ulen); 3978 3979 /* for multi-function programs, copy the JITed 3980 * instructions for all the functions 3981 */ 3982 if (prog->aux->func_cnt) { 3983 u32 len, free, i; 3984 u8 *img; 3985 3986 free = ulen; 3987 for (i = 0; i < prog->aux->func_cnt; i++) { 3988 len = prog->aux->func[i]->jited_len; 3989 len = min_t(u32, len, free); 3990 img = (u8 *) prog->aux->func[i]->bpf_func; 3991 if (copy_to_user(uinsns, img, len)) 3992 return -EFAULT; 3993 uinsns += len; 3994 free -= len; 3995 if (!free) 3996 break; 3997 } 3998 } else { 3999 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 4000 return -EFAULT; 4001 } 4002 } else { 4003 info.jited_prog_insns = 0; 4004 } 4005 } 4006 4007 ulen = info.nr_jited_ksyms; 4008 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 4009 if (ulen) { 4010 if (bpf_dump_raw_ok(file->f_cred)) { 4011 unsigned long ksym_addr; 4012 u64 __user *user_ksyms; 4013 u32 i; 4014 4015 /* copy the address of the kernel symbol 4016 * corresponding to each function 4017 */ 4018 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 4019 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 4020 if (prog->aux->func_cnt) { 4021 for (i = 0; i < ulen; i++) { 4022 ksym_addr = (unsigned long) 4023 prog->aux->func[i]->bpf_func; 4024 if (put_user((u64) ksym_addr, 4025 &user_ksyms[i])) 4026 return -EFAULT; 4027 } 4028 } else { 4029 ksym_addr = (unsigned long) prog->bpf_func; 4030 if (put_user((u64) ksym_addr, &user_ksyms[0])) 4031 return -EFAULT; 4032 } 4033 } else { 4034 info.jited_ksyms = 0; 4035 } 4036 } 4037 4038 ulen = info.nr_jited_func_lens; 4039 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 4040 if (ulen) { 4041 if (bpf_dump_raw_ok(file->f_cred)) { 4042 u32 __user *user_lens; 4043 u32 func_len, i; 4044 4045 /* copy the JITed image lengths for each function */ 4046 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 4047 user_lens = u64_to_user_ptr(info.jited_func_lens); 4048 if (prog->aux->func_cnt) { 4049 for (i = 0; i < ulen; i++) { 4050 func_len = 4051 prog->aux->func[i]->jited_len; 4052 if (put_user(func_len, &user_lens[i])) 4053 return -EFAULT; 4054 } 4055 } else { 4056 func_len = prog->jited_len; 4057 if (put_user(func_len, &user_lens[0])) 4058 return -EFAULT; 4059 } 4060 } else { 4061 info.jited_func_lens = 0; 4062 } 4063 } 4064 4065 if (prog->aux->btf) 4066 info.btf_id = btf_obj_id(prog->aux->btf); 4067 info.attach_btf_id = prog->aux->attach_btf_id; 4068 if (attach_btf) 4069 info.attach_btf_obj_id = btf_obj_id(attach_btf); 4070 4071 ulen = info.nr_func_info; 4072 info.nr_func_info = prog->aux->func_info_cnt; 4073 if (info.nr_func_info && ulen) { 4074 char __user *user_finfo; 4075 4076 user_finfo = u64_to_user_ptr(info.func_info); 4077 ulen = min_t(u32, info.nr_func_info, ulen); 4078 if (copy_to_user(user_finfo, prog->aux->func_info, 4079 info.func_info_rec_size * ulen)) 4080 return -EFAULT; 4081 } 4082 4083 ulen = info.nr_line_info; 4084 info.nr_line_info = prog->aux->nr_linfo; 4085 if (info.nr_line_info && ulen) { 4086 __u8 __user *user_linfo; 4087 4088 user_linfo = u64_to_user_ptr(info.line_info); 4089 ulen = min_t(u32, info.nr_line_info, ulen); 4090 if (copy_to_user(user_linfo, prog->aux->linfo, 4091 info.line_info_rec_size * ulen)) 4092 return -EFAULT; 4093 } 4094 4095 ulen = info.nr_jited_line_info; 4096 if (prog->aux->jited_linfo) 4097 info.nr_jited_line_info = prog->aux->nr_linfo; 4098 else 4099 info.nr_jited_line_info = 0; 4100 if (info.nr_jited_line_info && ulen) { 4101 if (bpf_dump_raw_ok(file->f_cred)) { 4102 unsigned long line_addr; 4103 __u64 __user *user_linfo; 4104 u32 i; 4105 4106 user_linfo = u64_to_user_ptr(info.jited_line_info); 4107 ulen = min_t(u32, info.nr_jited_line_info, ulen); 4108 for (i = 0; i < ulen; i++) { 4109 line_addr = (unsigned long)prog->aux->jited_linfo[i]; 4110 if (put_user((__u64)line_addr, &user_linfo[i])) 4111 return -EFAULT; 4112 } 4113 } else { 4114 info.jited_line_info = 0; 4115 } 4116 } 4117 4118 ulen = info.nr_prog_tags; 4119 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 4120 if (ulen) { 4121 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 4122 u32 i; 4123 4124 user_prog_tags = u64_to_user_ptr(info.prog_tags); 4125 ulen = min_t(u32, info.nr_prog_tags, ulen); 4126 if (prog->aux->func_cnt) { 4127 for (i = 0; i < ulen; i++) { 4128 if (copy_to_user(user_prog_tags[i], 4129 prog->aux->func[i]->tag, 4130 BPF_TAG_SIZE)) 4131 return -EFAULT; 4132 } 4133 } else { 4134 if (copy_to_user(user_prog_tags[0], 4135 prog->tag, BPF_TAG_SIZE)) 4136 return -EFAULT; 4137 } 4138 } 4139 4140 done: 4141 if (copy_to_user(uinfo, &info, info_len) || 4142 put_user(info_len, &uattr->info.info_len)) 4143 return -EFAULT; 4144 4145 return 0; 4146 } 4147 4148 static int bpf_map_get_info_by_fd(struct file *file, 4149 struct bpf_map *map, 4150 const union bpf_attr *attr, 4151 union bpf_attr __user *uattr) 4152 { 4153 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4154 struct bpf_map_info info; 4155 u32 info_len = attr->info.info_len; 4156 int err; 4157 4158 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4159 if (err) 4160 return err; 4161 info_len = min_t(u32, sizeof(info), info_len); 4162 4163 memset(&info, 0, sizeof(info)); 4164 info.type = map->map_type; 4165 info.id = map->id; 4166 info.key_size = map->key_size; 4167 info.value_size = map->value_size; 4168 info.max_entries = map->max_entries; 4169 info.map_flags = map->map_flags; 4170 info.map_extra = map->map_extra; 4171 memcpy(info.name, map->name, sizeof(map->name)); 4172 4173 if (map->btf) { 4174 info.btf_id = btf_obj_id(map->btf); 4175 info.btf_key_type_id = map->btf_key_type_id; 4176 info.btf_value_type_id = map->btf_value_type_id; 4177 } 4178 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 4179 4180 if (bpf_map_is_dev_bound(map)) { 4181 err = bpf_map_offload_info_fill(&info, map); 4182 if (err) 4183 return err; 4184 } 4185 4186 if (copy_to_user(uinfo, &info, info_len) || 4187 put_user(info_len, &uattr->info.info_len)) 4188 return -EFAULT; 4189 4190 return 0; 4191 } 4192 4193 static int bpf_btf_get_info_by_fd(struct file *file, 4194 struct btf *btf, 4195 const union bpf_attr *attr, 4196 union bpf_attr __user *uattr) 4197 { 4198 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4199 u32 info_len = attr->info.info_len; 4200 int err; 4201 4202 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len); 4203 if (err) 4204 return err; 4205 4206 return btf_get_info_by_fd(btf, attr, uattr); 4207 } 4208 4209 static int bpf_link_get_info_by_fd(struct file *file, 4210 struct bpf_link *link, 4211 const union bpf_attr *attr, 4212 union bpf_attr __user *uattr) 4213 { 4214 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4215 struct bpf_link_info info; 4216 u32 info_len = attr->info.info_len; 4217 int err; 4218 4219 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4220 if (err) 4221 return err; 4222 info_len = min_t(u32, sizeof(info), info_len); 4223 4224 memset(&info, 0, sizeof(info)); 4225 if (copy_from_user(&info, uinfo, info_len)) 4226 return -EFAULT; 4227 4228 info.type = link->type; 4229 info.id = link->id; 4230 info.prog_id = link->prog->aux->id; 4231 4232 if (link->ops->fill_link_info) { 4233 err = link->ops->fill_link_info(link, &info); 4234 if (err) 4235 return err; 4236 } 4237 4238 if (copy_to_user(uinfo, &info, info_len) || 4239 put_user(info_len, &uattr->info.info_len)) 4240 return -EFAULT; 4241 4242 return 0; 4243 } 4244 4245 4246 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 4247 4248 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 4249 union bpf_attr __user *uattr) 4250 { 4251 int ufd = attr->info.bpf_fd; 4252 struct fd f; 4253 int err; 4254 4255 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 4256 return -EINVAL; 4257 4258 f = fdget(ufd); 4259 if (!f.file) 4260 return -EBADFD; 4261 4262 if (f.file->f_op == &bpf_prog_fops) 4263 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, 4264 uattr); 4265 else if (f.file->f_op == &bpf_map_fops) 4266 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, 4267 uattr); 4268 else if (f.file->f_op == &btf_fops) 4269 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); 4270 else if (f.file->f_op == &bpf_link_fops) 4271 err = bpf_link_get_info_by_fd(f.file, f.file->private_data, 4272 attr, uattr); 4273 else 4274 err = -EINVAL; 4275 4276 fdput(f); 4277 return err; 4278 } 4279 4280 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level 4281 4282 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr) 4283 { 4284 if (CHECK_ATTR(BPF_BTF_LOAD)) 4285 return -EINVAL; 4286 4287 if (!bpf_capable()) 4288 return -EPERM; 4289 4290 return btf_new_fd(attr, uattr); 4291 } 4292 4293 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 4294 4295 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 4296 { 4297 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 4298 return -EINVAL; 4299 4300 if (!capable(CAP_SYS_ADMIN)) 4301 return -EPERM; 4302 4303 return btf_get_fd_by_id(attr->btf_id); 4304 } 4305 4306 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 4307 union bpf_attr __user *uattr, 4308 u32 prog_id, u32 fd_type, 4309 const char *buf, u64 probe_offset, 4310 u64 probe_addr) 4311 { 4312 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 4313 u32 len = buf ? strlen(buf) : 0, input_len; 4314 int err = 0; 4315 4316 if (put_user(len, &uattr->task_fd_query.buf_len)) 4317 return -EFAULT; 4318 input_len = attr->task_fd_query.buf_len; 4319 if (input_len && ubuf) { 4320 if (!len) { 4321 /* nothing to copy, just make ubuf NULL terminated */ 4322 char zero = '\0'; 4323 4324 if (put_user(zero, ubuf)) 4325 return -EFAULT; 4326 } else if (input_len >= len + 1) { 4327 /* ubuf can hold the string with NULL terminator */ 4328 if (copy_to_user(ubuf, buf, len + 1)) 4329 return -EFAULT; 4330 } else { 4331 /* ubuf cannot hold the string with NULL terminator, 4332 * do a partial copy with NULL terminator. 4333 */ 4334 char zero = '\0'; 4335 4336 err = -ENOSPC; 4337 if (copy_to_user(ubuf, buf, input_len - 1)) 4338 return -EFAULT; 4339 if (put_user(zero, ubuf + input_len - 1)) 4340 return -EFAULT; 4341 } 4342 } 4343 4344 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 4345 put_user(fd_type, &uattr->task_fd_query.fd_type) || 4346 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 4347 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 4348 return -EFAULT; 4349 4350 return err; 4351 } 4352 4353 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 4354 4355 static int bpf_task_fd_query(const union bpf_attr *attr, 4356 union bpf_attr __user *uattr) 4357 { 4358 pid_t pid = attr->task_fd_query.pid; 4359 u32 fd = attr->task_fd_query.fd; 4360 const struct perf_event *event; 4361 struct task_struct *task; 4362 struct file *file; 4363 int err; 4364 4365 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 4366 return -EINVAL; 4367 4368 if (!capable(CAP_SYS_ADMIN)) 4369 return -EPERM; 4370 4371 if (attr->task_fd_query.flags != 0) 4372 return -EINVAL; 4373 4374 rcu_read_lock(); 4375 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 4376 rcu_read_unlock(); 4377 if (!task) 4378 return -ENOENT; 4379 4380 err = 0; 4381 file = fget_task(task, fd); 4382 put_task_struct(task); 4383 if (!file) 4384 return -EBADF; 4385 4386 if (file->f_op == &bpf_link_fops) { 4387 struct bpf_link *link = file->private_data; 4388 4389 if (link->ops == &bpf_raw_tp_link_lops) { 4390 struct bpf_raw_tp_link *raw_tp = 4391 container_of(link, struct bpf_raw_tp_link, link); 4392 struct bpf_raw_event_map *btp = raw_tp->btp; 4393 4394 err = bpf_task_fd_query_copy(attr, uattr, 4395 raw_tp->link.prog->aux->id, 4396 BPF_FD_TYPE_RAW_TRACEPOINT, 4397 btp->tp->name, 0, 0); 4398 goto put_file; 4399 } 4400 goto out_not_supp; 4401 } 4402 4403 event = perf_get_event(file); 4404 if (!IS_ERR(event)) { 4405 u64 probe_offset, probe_addr; 4406 u32 prog_id, fd_type; 4407 const char *buf; 4408 4409 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 4410 &buf, &probe_offset, 4411 &probe_addr); 4412 if (!err) 4413 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 4414 fd_type, buf, 4415 probe_offset, 4416 probe_addr); 4417 goto put_file; 4418 } 4419 4420 out_not_supp: 4421 err = -ENOTSUPP; 4422 put_file: 4423 fput(file); 4424 return err; 4425 } 4426 4427 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 4428 4429 #define BPF_DO_BATCH(fn) \ 4430 do { \ 4431 if (!fn) { \ 4432 err = -ENOTSUPP; \ 4433 goto err_put; \ 4434 } \ 4435 err = fn(map, attr, uattr); \ 4436 } while (0) 4437 4438 static int bpf_map_do_batch(const union bpf_attr *attr, 4439 union bpf_attr __user *uattr, 4440 int cmd) 4441 { 4442 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH || 4443 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH; 4444 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH; 4445 struct bpf_map *map; 4446 int err, ufd; 4447 struct fd f; 4448 4449 if (CHECK_ATTR(BPF_MAP_BATCH)) 4450 return -EINVAL; 4451 4452 ufd = attr->batch.map_fd; 4453 f = fdget(ufd); 4454 map = __bpf_map_get(f); 4455 if (IS_ERR(map)) 4456 return PTR_ERR(map); 4457 if (has_write) 4458 bpf_map_write_active_inc(map); 4459 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 4460 err = -EPERM; 4461 goto err_put; 4462 } 4463 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 4464 err = -EPERM; 4465 goto err_put; 4466 } 4467 4468 if (cmd == BPF_MAP_LOOKUP_BATCH) 4469 BPF_DO_BATCH(map->ops->map_lookup_batch); 4470 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 4471 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch); 4472 else if (cmd == BPF_MAP_UPDATE_BATCH) 4473 BPF_DO_BATCH(map->ops->map_update_batch); 4474 else 4475 BPF_DO_BATCH(map->ops->map_delete_batch); 4476 err_put: 4477 if (has_write) 4478 bpf_map_write_active_dec(map); 4479 fdput(f); 4480 return err; 4481 } 4482 4483 #define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.cookies 4484 static int link_create(union bpf_attr *attr, bpfptr_t uattr) 4485 { 4486 enum bpf_prog_type ptype; 4487 struct bpf_prog *prog; 4488 int ret; 4489 4490 if (CHECK_ATTR(BPF_LINK_CREATE)) 4491 return -EINVAL; 4492 4493 prog = bpf_prog_get(attr->link_create.prog_fd); 4494 if (IS_ERR(prog)) 4495 return PTR_ERR(prog); 4496 4497 ret = bpf_prog_attach_check_attach_type(prog, 4498 attr->link_create.attach_type); 4499 if (ret) 4500 goto out; 4501 4502 switch (prog->type) { 4503 case BPF_PROG_TYPE_EXT: 4504 break; 4505 case BPF_PROG_TYPE_PERF_EVENT: 4506 case BPF_PROG_TYPE_TRACEPOINT: 4507 if (attr->link_create.attach_type != BPF_PERF_EVENT) { 4508 ret = -EINVAL; 4509 goto out; 4510 } 4511 break; 4512 case BPF_PROG_TYPE_KPROBE: 4513 if (attr->link_create.attach_type != BPF_PERF_EVENT && 4514 attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) { 4515 ret = -EINVAL; 4516 goto out; 4517 } 4518 break; 4519 default: 4520 ptype = attach_type_to_prog_type(attr->link_create.attach_type); 4521 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) { 4522 ret = -EINVAL; 4523 goto out; 4524 } 4525 break; 4526 } 4527 4528 switch (prog->type) { 4529 case BPF_PROG_TYPE_CGROUP_SKB: 4530 case BPF_PROG_TYPE_CGROUP_SOCK: 4531 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4532 case BPF_PROG_TYPE_SOCK_OPS: 4533 case BPF_PROG_TYPE_CGROUP_DEVICE: 4534 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4535 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4536 ret = cgroup_bpf_link_attach(attr, prog); 4537 break; 4538 case BPF_PROG_TYPE_EXT: 4539 ret = bpf_tracing_prog_attach(prog, 4540 attr->link_create.target_fd, 4541 attr->link_create.target_btf_id, 4542 attr->link_create.tracing.cookie); 4543 break; 4544 case BPF_PROG_TYPE_LSM: 4545 case BPF_PROG_TYPE_TRACING: 4546 if (attr->link_create.attach_type != prog->expected_attach_type) { 4547 ret = -EINVAL; 4548 goto out; 4549 } 4550 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) 4551 ret = bpf_raw_tp_link_attach(prog, NULL); 4552 else if (prog->expected_attach_type == BPF_TRACE_ITER) 4553 ret = bpf_iter_link_attach(attr, uattr, prog); 4554 else if (prog->expected_attach_type == BPF_LSM_CGROUP) 4555 ret = cgroup_bpf_link_attach(attr, prog); 4556 else 4557 ret = bpf_tracing_prog_attach(prog, 4558 attr->link_create.target_fd, 4559 attr->link_create.target_btf_id, 4560 attr->link_create.tracing.cookie); 4561 break; 4562 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4563 case BPF_PROG_TYPE_SK_LOOKUP: 4564 ret = netns_bpf_link_create(attr, prog); 4565 break; 4566 #ifdef CONFIG_NET 4567 case BPF_PROG_TYPE_XDP: 4568 ret = bpf_xdp_link_attach(attr, prog); 4569 break; 4570 #endif 4571 case BPF_PROG_TYPE_PERF_EVENT: 4572 case BPF_PROG_TYPE_TRACEPOINT: 4573 ret = bpf_perf_link_attach(attr, prog); 4574 break; 4575 case BPF_PROG_TYPE_KPROBE: 4576 if (attr->link_create.attach_type == BPF_PERF_EVENT) 4577 ret = bpf_perf_link_attach(attr, prog); 4578 else 4579 ret = bpf_kprobe_multi_link_attach(attr, prog); 4580 break; 4581 default: 4582 ret = -EINVAL; 4583 } 4584 4585 out: 4586 if (ret < 0) 4587 bpf_prog_put(prog); 4588 return ret; 4589 } 4590 4591 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd 4592 4593 static int link_update(union bpf_attr *attr) 4594 { 4595 struct bpf_prog *old_prog = NULL, *new_prog; 4596 struct bpf_link *link; 4597 u32 flags; 4598 int ret; 4599 4600 if (CHECK_ATTR(BPF_LINK_UPDATE)) 4601 return -EINVAL; 4602 4603 flags = attr->link_update.flags; 4604 if (flags & ~BPF_F_REPLACE) 4605 return -EINVAL; 4606 4607 link = bpf_link_get_from_fd(attr->link_update.link_fd); 4608 if (IS_ERR(link)) 4609 return PTR_ERR(link); 4610 4611 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); 4612 if (IS_ERR(new_prog)) { 4613 ret = PTR_ERR(new_prog); 4614 goto out_put_link; 4615 } 4616 4617 if (flags & BPF_F_REPLACE) { 4618 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); 4619 if (IS_ERR(old_prog)) { 4620 ret = PTR_ERR(old_prog); 4621 old_prog = NULL; 4622 goto out_put_progs; 4623 } 4624 } else if (attr->link_update.old_prog_fd) { 4625 ret = -EINVAL; 4626 goto out_put_progs; 4627 } 4628 4629 if (link->ops->update_prog) 4630 ret = link->ops->update_prog(link, new_prog, old_prog); 4631 else 4632 ret = -EINVAL; 4633 4634 out_put_progs: 4635 if (old_prog) 4636 bpf_prog_put(old_prog); 4637 if (ret) 4638 bpf_prog_put(new_prog); 4639 out_put_link: 4640 bpf_link_put(link); 4641 return ret; 4642 } 4643 4644 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd 4645 4646 static int link_detach(union bpf_attr *attr) 4647 { 4648 struct bpf_link *link; 4649 int ret; 4650 4651 if (CHECK_ATTR(BPF_LINK_DETACH)) 4652 return -EINVAL; 4653 4654 link = bpf_link_get_from_fd(attr->link_detach.link_fd); 4655 if (IS_ERR(link)) 4656 return PTR_ERR(link); 4657 4658 if (link->ops->detach) 4659 ret = link->ops->detach(link); 4660 else 4661 ret = -EOPNOTSUPP; 4662 4663 bpf_link_put(link); 4664 return ret; 4665 } 4666 4667 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 4668 { 4669 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); 4670 } 4671 4672 struct bpf_link *bpf_link_by_id(u32 id) 4673 { 4674 struct bpf_link *link; 4675 4676 if (!id) 4677 return ERR_PTR(-ENOENT); 4678 4679 spin_lock_bh(&link_idr_lock); 4680 /* before link is "settled", ID is 0, pretend it doesn't exist yet */ 4681 link = idr_find(&link_idr, id); 4682 if (link) { 4683 if (link->id) 4684 link = bpf_link_inc_not_zero(link); 4685 else 4686 link = ERR_PTR(-EAGAIN); 4687 } else { 4688 link = ERR_PTR(-ENOENT); 4689 } 4690 spin_unlock_bh(&link_idr_lock); 4691 return link; 4692 } 4693 4694 struct bpf_link *bpf_link_get_curr_or_next(u32 *id) 4695 { 4696 struct bpf_link *link; 4697 4698 spin_lock_bh(&link_idr_lock); 4699 again: 4700 link = idr_get_next(&link_idr, id); 4701 if (link) { 4702 link = bpf_link_inc_not_zero(link); 4703 if (IS_ERR(link)) { 4704 (*id)++; 4705 goto again; 4706 } 4707 } 4708 spin_unlock_bh(&link_idr_lock); 4709 4710 return link; 4711 } 4712 4713 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id 4714 4715 static int bpf_link_get_fd_by_id(const union bpf_attr *attr) 4716 { 4717 struct bpf_link *link; 4718 u32 id = attr->link_id; 4719 int fd; 4720 4721 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) 4722 return -EINVAL; 4723 4724 if (!capable(CAP_SYS_ADMIN)) 4725 return -EPERM; 4726 4727 link = bpf_link_by_id(id); 4728 if (IS_ERR(link)) 4729 return PTR_ERR(link); 4730 4731 fd = bpf_link_new_fd(link); 4732 if (fd < 0) 4733 bpf_link_put(link); 4734 4735 return fd; 4736 } 4737 4738 DEFINE_MUTEX(bpf_stats_enabled_mutex); 4739 4740 static int bpf_stats_release(struct inode *inode, struct file *file) 4741 { 4742 mutex_lock(&bpf_stats_enabled_mutex); 4743 static_key_slow_dec(&bpf_stats_enabled_key.key); 4744 mutex_unlock(&bpf_stats_enabled_mutex); 4745 return 0; 4746 } 4747 4748 static const struct file_operations bpf_stats_fops = { 4749 .release = bpf_stats_release, 4750 }; 4751 4752 static int bpf_enable_runtime_stats(void) 4753 { 4754 int fd; 4755 4756 mutex_lock(&bpf_stats_enabled_mutex); 4757 4758 /* Set a very high limit to avoid overflow */ 4759 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { 4760 mutex_unlock(&bpf_stats_enabled_mutex); 4761 return -EBUSY; 4762 } 4763 4764 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); 4765 if (fd >= 0) 4766 static_key_slow_inc(&bpf_stats_enabled_key.key); 4767 4768 mutex_unlock(&bpf_stats_enabled_mutex); 4769 return fd; 4770 } 4771 4772 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type 4773 4774 static int bpf_enable_stats(union bpf_attr *attr) 4775 { 4776 4777 if (CHECK_ATTR(BPF_ENABLE_STATS)) 4778 return -EINVAL; 4779 4780 if (!capable(CAP_SYS_ADMIN)) 4781 return -EPERM; 4782 4783 switch (attr->enable_stats.type) { 4784 case BPF_STATS_RUN_TIME: 4785 return bpf_enable_runtime_stats(); 4786 default: 4787 break; 4788 } 4789 return -EINVAL; 4790 } 4791 4792 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags 4793 4794 static int bpf_iter_create(union bpf_attr *attr) 4795 { 4796 struct bpf_link *link; 4797 int err; 4798 4799 if (CHECK_ATTR(BPF_ITER_CREATE)) 4800 return -EINVAL; 4801 4802 if (attr->iter_create.flags) 4803 return -EINVAL; 4804 4805 link = bpf_link_get_from_fd(attr->iter_create.link_fd); 4806 if (IS_ERR(link)) 4807 return PTR_ERR(link); 4808 4809 err = bpf_iter_new_fd(link); 4810 bpf_link_put(link); 4811 4812 return err; 4813 } 4814 4815 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags 4816 4817 static int bpf_prog_bind_map(union bpf_attr *attr) 4818 { 4819 struct bpf_prog *prog; 4820 struct bpf_map *map; 4821 struct bpf_map **used_maps_old, **used_maps_new; 4822 int i, ret = 0; 4823 4824 if (CHECK_ATTR(BPF_PROG_BIND_MAP)) 4825 return -EINVAL; 4826 4827 if (attr->prog_bind_map.flags) 4828 return -EINVAL; 4829 4830 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); 4831 if (IS_ERR(prog)) 4832 return PTR_ERR(prog); 4833 4834 map = bpf_map_get(attr->prog_bind_map.map_fd); 4835 if (IS_ERR(map)) { 4836 ret = PTR_ERR(map); 4837 goto out_prog_put; 4838 } 4839 4840 mutex_lock(&prog->aux->used_maps_mutex); 4841 4842 used_maps_old = prog->aux->used_maps; 4843 4844 for (i = 0; i < prog->aux->used_map_cnt; i++) 4845 if (used_maps_old[i] == map) { 4846 bpf_map_put(map); 4847 goto out_unlock; 4848 } 4849 4850 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, 4851 sizeof(used_maps_new[0]), 4852 GFP_KERNEL); 4853 if (!used_maps_new) { 4854 ret = -ENOMEM; 4855 goto out_unlock; 4856 } 4857 4858 memcpy(used_maps_new, used_maps_old, 4859 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); 4860 used_maps_new[prog->aux->used_map_cnt] = map; 4861 4862 prog->aux->used_map_cnt++; 4863 prog->aux->used_maps = used_maps_new; 4864 4865 kfree(used_maps_old); 4866 4867 out_unlock: 4868 mutex_unlock(&prog->aux->used_maps_mutex); 4869 4870 if (ret) 4871 bpf_map_put(map); 4872 out_prog_put: 4873 bpf_prog_put(prog); 4874 return ret; 4875 } 4876 4877 static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size) 4878 { 4879 union bpf_attr attr; 4880 bool capable; 4881 int err; 4882 4883 capable = bpf_capable() || !sysctl_unprivileged_bpf_disabled; 4884 4885 /* Intent here is for unprivileged_bpf_disabled to block key object 4886 * creation commands for unprivileged users; other actions depend 4887 * of fd availability and access to bpffs, so are dependent on 4888 * object creation success. Capabilities are later verified for 4889 * operations such as load and map create, so even with unprivileged 4890 * BPF disabled, capability checks are still carried out for these 4891 * and other operations. 4892 */ 4893 if (!capable && 4894 (cmd == BPF_MAP_CREATE || cmd == BPF_PROG_LOAD)) 4895 return -EPERM; 4896 4897 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 4898 if (err) 4899 return err; 4900 size = min_t(u32, size, sizeof(attr)); 4901 4902 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 4903 memset(&attr, 0, sizeof(attr)); 4904 if (copy_from_bpfptr(&attr, uattr, size) != 0) 4905 return -EFAULT; 4906 4907 err = security_bpf(cmd, &attr, size); 4908 if (err < 0) 4909 return err; 4910 4911 switch (cmd) { 4912 case BPF_MAP_CREATE: 4913 err = map_create(&attr); 4914 break; 4915 case BPF_MAP_LOOKUP_ELEM: 4916 err = map_lookup_elem(&attr); 4917 break; 4918 case BPF_MAP_UPDATE_ELEM: 4919 err = map_update_elem(&attr, uattr); 4920 break; 4921 case BPF_MAP_DELETE_ELEM: 4922 err = map_delete_elem(&attr, uattr); 4923 break; 4924 case BPF_MAP_GET_NEXT_KEY: 4925 err = map_get_next_key(&attr); 4926 break; 4927 case BPF_MAP_FREEZE: 4928 err = map_freeze(&attr); 4929 break; 4930 case BPF_PROG_LOAD: 4931 err = bpf_prog_load(&attr, uattr); 4932 break; 4933 case BPF_OBJ_PIN: 4934 err = bpf_obj_pin(&attr); 4935 break; 4936 case BPF_OBJ_GET: 4937 err = bpf_obj_get(&attr); 4938 break; 4939 case BPF_PROG_ATTACH: 4940 err = bpf_prog_attach(&attr); 4941 break; 4942 case BPF_PROG_DETACH: 4943 err = bpf_prog_detach(&attr); 4944 break; 4945 case BPF_PROG_QUERY: 4946 err = bpf_prog_query(&attr, uattr.user); 4947 break; 4948 case BPF_PROG_TEST_RUN: 4949 err = bpf_prog_test_run(&attr, uattr.user); 4950 break; 4951 case BPF_PROG_GET_NEXT_ID: 4952 err = bpf_obj_get_next_id(&attr, uattr.user, 4953 &prog_idr, &prog_idr_lock); 4954 break; 4955 case BPF_MAP_GET_NEXT_ID: 4956 err = bpf_obj_get_next_id(&attr, uattr.user, 4957 &map_idr, &map_idr_lock); 4958 break; 4959 case BPF_BTF_GET_NEXT_ID: 4960 err = bpf_obj_get_next_id(&attr, uattr.user, 4961 &btf_idr, &btf_idr_lock); 4962 break; 4963 case BPF_PROG_GET_FD_BY_ID: 4964 err = bpf_prog_get_fd_by_id(&attr); 4965 break; 4966 case BPF_MAP_GET_FD_BY_ID: 4967 err = bpf_map_get_fd_by_id(&attr); 4968 break; 4969 case BPF_OBJ_GET_INFO_BY_FD: 4970 err = bpf_obj_get_info_by_fd(&attr, uattr.user); 4971 break; 4972 case BPF_RAW_TRACEPOINT_OPEN: 4973 err = bpf_raw_tracepoint_open(&attr); 4974 break; 4975 case BPF_BTF_LOAD: 4976 err = bpf_btf_load(&attr, uattr); 4977 break; 4978 case BPF_BTF_GET_FD_BY_ID: 4979 err = bpf_btf_get_fd_by_id(&attr); 4980 break; 4981 case BPF_TASK_FD_QUERY: 4982 err = bpf_task_fd_query(&attr, uattr.user); 4983 break; 4984 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 4985 err = map_lookup_and_delete_elem(&attr); 4986 break; 4987 case BPF_MAP_LOOKUP_BATCH: 4988 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH); 4989 break; 4990 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 4991 err = bpf_map_do_batch(&attr, uattr.user, 4992 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 4993 break; 4994 case BPF_MAP_UPDATE_BATCH: 4995 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH); 4996 break; 4997 case BPF_MAP_DELETE_BATCH: 4998 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH); 4999 break; 5000 case BPF_LINK_CREATE: 5001 err = link_create(&attr, uattr); 5002 break; 5003 case BPF_LINK_UPDATE: 5004 err = link_update(&attr); 5005 break; 5006 case BPF_LINK_GET_FD_BY_ID: 5007 err = bpf_link_get_fd_by_id(&attr); 5008 break; 5009 case BPF_LINK_GET_NEXT_ID: 5010 err = bpf_obj_get_next_id(&attr, uattr.user, 5011 &link_idr, &link_idr_lock); 5012 break; 5013 case BPF_ENABLE_STATS: 5014 err = bpf_enable_stats(&attr); 5015 break; 5016 case BPF_ITER_CREATE: 5017 err = bpf_iter_create(&attr); 5018 break; 5019 case BPF_LINK_DETACH: 5020 err = link_detach(&attr); 5021 break; 5022 case BPF_PROG_BIND_MAP: 5023 err = bpf_prog_bind_map(&attr); 5024 break; 5025 default: 5026 err = -EINVAL; 5027 break; 5028 } 5029 5030 return err; 5031 } 5032 5033 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 5034 { 5035 return __sys_bpf(cmd, USER_BPFPTR(uattr), size); 5036 } 5037 5038 static bool syscall_prog_is_valid_access(int off, int size, 5039 enum bpf_access_type type, 5040 const struct bpf_prog *prog, 5041 struct bpf_insn_access_aux *info) 5042 { 5043 if (off < 0 || off >= U16_MAX) 5044 return false; 5045 if (off % size != 0) 5046 return false; 5047 return true; 5048 } 5049 5050 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size) 5051 { 5052 switch (cmd) { 5053 case BPF_MAP_CREATE: 5054 case BPF_MAP_DELETE_ELEM: 5055 case BPF_MAP_UPDATE_ELEM: 5056 case BPF_MAP_FREEZE: 5057 case BPF_MAP_GET_FD_BY_ID: 5058 case BPF_PROG_LOAD: 5059 case BPF_BTF_LOAD: 5060 case BPF_LINK_CREATE: 5061 case BPF_RAW_TRACEPOINT_OPEN: 5062 break; 5063 default: 5064 return -EINVAL; 5065 } 5066 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size); 5067 } 5068 5069 5070 /* To shut up -Wmissing-prototypes. 5071 * This function is used by the kernel light skeleton 5072 * to load bpf programs when modules are loaded or during kernel boot. 5073 * See tools/lib/bpf/skel_internal.h 5074 */ 5075 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); 5076 5077 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size) 5078 { 5079 struct bpf_prog * __maybe_unused prog; 5080 struct bpf_tramp_run_ctx __maybe_unused run_ctx; 5081 5082 switch (cmd) { 5083 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */ 5084 case BPF_PROG_TEST_RUN: 5085 if (attr->test.data_in || attr->test.data_out || 5086 attr->test.ctx_out || attr->test.duration || 5087 attr->test.repeat || attr->test.flags) 5088 return -EINVAL; 5089 5090 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); 5091 if (IS_ERR(prog)) 5092 return PTR_ERR(prog); 5093 5094 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || 5095 attr->test.ctx_size_in > U16_MAX) { 5096 bpf_prog_put(prog); 5097 return -EINVAL; 5098 } 5099 5100 run_ctx.bpf_cookie = 0; 5101 run_ctx.saved_run_ctx = NULL; 5102 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) { 5103 /* recursion detected */ 5104 bpf_prog_put(prog); 5105 return -EBUSY; 5106 } 5107 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); 5108 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */, 5109 &run_ctx); 5110 bpf_prog_put(prog); 5111 return 0; 5112 #endif 5113 default: 5114 return ____bpf_sys_bpf(cmd, attr, size); 5115 } 5116 } 5117 EXPORT_SYMBOL(kern_sys_bpf); 5118 5119 static const struct bpf_func_proto bpf_sys_bpf_proto = { 5120 .func = bpf_sys_bpf, 5121 .gpl_only = false, 5122 .ret_type = RET_INTEGER, 5123 .arg1_type = ARG_ANYTHING, 5124 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5125 .arg3_type = ARG_CONST_SIZE, 5126 }; 5127 5128 const struct bpf_func_proto * __weak 5129 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5130 { 5131 return bpf_base_func_proto(func_id); 5132 } 5133 5134 BPF_CALL_1(bpf_sys_close, u32, fd) 5135 { 5136 /* When bpf program calls this helper there should not be 5137 * an fdget() without matching completed fdput(). 5138 * This helper is allowed in the following callchain only: 5139 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close 5140 */ 5141 return close_fd(fd); 5142 } 5143 5144 static const struct bpf_func_proto bpf_sys_close_proto = { 5145 .func = bpf_sys_close, 5146 .gpl_only = false, 5147 .ret_type = RET_INTEGER, 5148 .arg1_type = ARG_ANYTHING, 5149 }; 5150 5151 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) 5152 { 5153 if (flags) 5154 return -EINVAL; 5155 5156 if (name_sz <= 1 || name[name_sz - 1]) 5157 return -EINVAL; 5158 5159 if (!bpf_dump_raw_ok(current_cred())) 5160 return -EPERM; 5161 5162 *res = kallsyms_lookup_name(name); 5163 return *res ? 0 : -ENOENT; 5164 } 5165 5166 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { 5167 .func = bpf_kallsyms_lookup_name, 5168 .gpl_only = false, 5169 .ret_type = RET_INTEGER, 5170 .arg1_type = ARG_PTR_TO_MEM, 5171 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 5172 .arg3_type = ARG_ANYTHING, 5173 .arg4_type = ARG_PTR_TO_LONG, 5174 }; 5175 5176 static const struct bpf_func_proto * 5177 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5178 { 5179 switch (func_id) { 5180 case BPF_FUNC_sys_bpf: 5181 return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto; 5182 case BPF_FUNC_btf_find_by_name_kind: 5183 return &bpf_btf_find_by_name_kind_proto; 5184 case BPF_FUNC_sys_close: 5185 return &bpf_sys_close_proto; 5186 case BPF_FUNC_kallsyms_lookup_name: 5187 return &bpf_kallsyms_lookup_name_proto; 5188 default: 5189 return tracing_prog_func_proto(func_id, prog); 5190 } 5191 } 5192 5193 const struct bpf_verifier_ops bpf_syscall_verifier_ops = { 5194 .get_func_proto = syscall_prog_func_proto, 5195 .is_valid_access = syscall_prog_is_valid_access, 5196 }; 5197 5198 const struct bpf_prog_ops bpf_syscall_prog_ops = { 5199 .test_run = bpf_prog_test_run_syscall, 5200 }; 5201 5202 #ifdef CONFIG_SYSCTL 5203 static int bpf_stats_handler(struct ctl_table *table, int write, 5204 void *buffer, size_t *lenp, loff_t *ppos) 5205 { 5206 struct static_key *key = (struct static_key *)table->data; 5207 static int saved_val; 5208 int val, ret; 5209 struct ctl_table tmp = { 5210 .data = &val, 5211 .maxlen = sizeof(val), 5212 .mode = table->mode, 5213 .extra1 = SYSCTL_ZERO, 5214 .extra2 = SYSCTL_ONE, 5215 }; 5216 5217 if (write && !capable(CAP_SYS_ADMIN)) 5218 return -EPERM; 5219 5220 mutex_lock(&bpf_stats_enabled_mutex); 5221 val = saved_val; 5222 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 5223 if (write && !ret && val != saved_val) { 5224 if (val) 5225 static_key_slow_inc(key); 5226 else 5227 static_key_slow_dec(key); 5228 saved_val = val; 5229 } 5230 mutex_unlock(&bpf_stats_enabled_mutex); 5231 return ret; 5232 } 5233 5234 void __weak unpriv_ebpf_notify(int new_state) 5235 { 5236 } 5237 5238 static int bpf_unpriv_handler(struct ctl_table *table, int write, 5239 void *buffer, size_t *lenp, loff_t *ppos) 5240 { 5241 int ret, unpriv_enable = *(int *)table->data; 5242 bool locked_state = unpriv_enable == 1; 5243 struct ctl_table tmp = *table; 5244 5245 if (write && !capable(CAP_SYS_ADMIN)) 5246 return -EPERM; 5247 5248 tmp.data = &unpriv_enable; 5249 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 5250 if (write && !ret) { 5251 if (locked_state && unpriv_enable != 1) 5252 return -EPERM; 5253 *(int *)table->data = unpriv_enable; 5254 } 5255 5256 unpriv_ebpf_notify(unpriv_enable); 5257 5258 return ret; 5259 } 5260 5261 static struct ctl_table bpf_syscall_table[] = { 5262 { 5263 .procname = "unprivileged_bpf_disabled", 5264 .data = &sysctl_unprivileged_bpf_disabled, 5265 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), 5266 .mode = 0644, 5267 .proc_handler = bpf_unpriv_handler, 5268 .extra1 = SYSCTL_ZERO, 5269 .extra2 = SYSCTL_TWO, 5270 }, 5271 { 5272 .procname = "bpf_stats_enabled", 5273 .data = &bpf_stats_enabled_key.key, 5274 .maxlen = sizeof(bpf_stats_enabled_key), 5275 .mode = 0644, 5276 .proc_handler = bpf_stats_handler, 5277 }, 5278 { } 5279 }; 5280 5281 static int __init bpf_syscall_sysctl_init(void) 5282 { 5283 register_sysctl_init("kernel", bpf_syscall_table); 5284 return 0; 5285 } 5286 late_initcall(bpf_syscall_sysctl_init); 5287 #endif /* CONFIG_SYSCTL */ 5288