1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf-cgroup.h> 6 #include <linux/bpf_trace.h> 7 #include <linux/bpf_lirc.h> 8 #include <linux/bpf_verifier.h> 9 #include <linux/bsearch.h> 10 #include <linux/btf.h> 11 #include <linux/syscalls.h> 12 #include <linux/slab.h> 13 #include <linux/sched/signal.h> 14 #include <linux/vmalloc.h> 15 #include <linux/mmzone.h> 16 #include <linux/anon_inodes.h> 17 #include <linux/fdtable.h> 18 #include <linux/file.h> 19 #include <linux/fs.h> 20 #include <linux/license.h> 21 #include <linux/filter.h> 22 #include <linux/kernel.h> 23 #include <linux/idr.h> 24 #include <linux/cred.h> 25 #include <linux/timekeeping.h> 26 #include <linux/ctype.h> 27 #include <linux/nospec.h> 28 #include <linux/audit.h> 29 #include <uapi/linux/btf.h> 30 #include <linux/pgtable.h> 31 #include <linux/bpf_lsm.h> 32 #include <linux/poll.h> 33 #include <linux/sort.h> 34 #include <linux/bpf-netns.h> 35 #include <linux/rcupdate_trace.h> 36 #include <linux/memcontrol.h> 37 #include <linux/trace_events.h> 38 39 #include <net/netfilter/nf_bpf_link.h> 40 #include <net/netkit.h> 41 #include <net/tcx.h> 42 43 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 44 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 45 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 46 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 47 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 48 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 49 IS_FD_HASH(map)) 50 51 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 52 53 DEFINE_PER_CPU(int, bpf_prog_active); 54 static DEFINE_IDR(prog_idr); 55 static DEFINE_SPINLOCK(prog_idr_lock); 56 static DEFINE_IDR(map_idr); 57 static DEFINE_SPINLOCK(map_idr_lock); 58 static DEFINE_IDR(link_idr); 59 static DEFINE_SPINLOCK(link_idr_lock); 60 61 int sysctl_unprivileged_bpf_disabled __read_mostly = 62 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0; 63 64 static const struct bpf_map_ops * const bpf_map_types[] = { 65 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 66 #define BPF_MAP_TYPE(_id, _ops) \ 67 [_id] = &_ops, 68 #define BPF_LINK_TYPE(_id, _name) 69 #include <linux/bpf_types.h> 70 #undef BPF_PROG_TYPE 71 #undef BPF_MAP_TYPE 72 #undef BPF_LINK_TYPE 73 }; 74 75 /* 76 * If we're handed a bigger struct than we know of, ensure all the unknown bits 77 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 78 * we don't know about yet. 79 * 80 * There is a ToCToU between this function call and the following 81 * copy_from_user() call. However, this is not a concern since this function is 82 * meant to be a future-proofing of bits. 83 */ 84 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, 85 size_t expected_size, 86 size_t actual_size) 87 { 88 int res; 89 90 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 91 return -E2BIG; 92 93 if (actual_size <= expected_size) 94 return 0; 95 96 if (uaddr.is_kernel) 97 res = memchr_inv(uaddr.kernel + expected_size, 0, 98 actual_size - expected_size) == NULL; 99 else 100 res = check_zeroed_user(uaddr.user + expected_size, 101 actual_size - expected_size); 102 if (res < 0) 103 return res; 104 return res ? 0 : -E2BIG; 105 } 106 107 const struct bpf_map_ops bpf_map_offload_ops = { 108 .map_meta_equal = bpf_map_meta_equal, 109 .map_alloc = bpf_map_offload_map_alloc, 110 .map_free = bpf_map_offload_map_free, 111 .map_check_btf = map_check_no_btf, 112 .map_mem_usage = bpf_map_offload_map_mem_usage, 113 }; 114 115 static void bpf_map_write_active_inc(struct bpf_map *map) 116 { 117 atomic64_inc(&map->writecnt); 118 } 119 120 static void bpf_map_write_active_dec(struct bpf_map *map) 121 { 122 atomic64_dec(&map->writecnt); 123 } 124 125 bool bpf_map_write_active(const struct bpf_map *map) 126 { 127 return atomic64_read(&map->writecnt) != 0; 128 } 129 130 static u32 bpf_map_value_size(const struct bpf_map *map) 131 { 132 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 133 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 134 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 135 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 136 return round_up(map->value_size, 8) * num_possible_cpus(); 137 else if (IS_FD_MAP(map)) 138 return sizeof(u32); 139 else 140 return map->value_size; 141 } 142 143 static void maybe_wait_bpf_programs(struct bpf_map *map) 144 { 145 /* Wait for any running non-sleepable BPF programs to complete so that 146 * userspace, when we return to it, knows that all non-sleepable 147 * programs that could be running use the new map value. For sleepable 148 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait 149 * for the completions of these programs, but considering the waiting 150 * time can be very long and userspace may think it will hang forever, 151 * so don't handle sleepable BPF programs now. 152 */ 153 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 154 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 155 synchronize_rcu(); 156 } 157 158 static void unpin_uptr_kaddr(void *kaddr) 159 { 160 if (kaddr) 161 unpin_user_page(virt_to_page(kaddr)); 162 } 163 164 static void __bpf_obj_unpin_uptrs(struct btf_record *rec, u32 cnt, void *obj) 165 { 166 const struct btf_field *field; 167 void **uptr_addr; 168 int i; 169 170 for (i = 0, field = rec->fields; i < cnt; i++, field++) { 171 if (field->type != BPF_UPTR) 172 continue; 173 174 uptr_addr = obj + field->offset; 175 unpin_uptr_kaddr(*uptr_addr); 176 } 177 } 178 179 static void bpf_obj_unpin_uptrs(struct btf_record *rec, void *obj) 180 { 181 if (!btf_record_has_field(rec, BPF_UPTR)) 182 return; 183 184 __bpf_obj_unpin_uptrs(rec, rec->cnt, obj); 185 } 186 187 static int bpf_obj_pin_uptrs(struct btf_record *rec, void *obj) 188 { 189 const struct btf_field *field; 190 const struct btf_type *t; 191 unsigned long start, end; 192 struct page *page; 193 void **uptr_addr; 194 int i, err; 195 196 if (!btf_record_has_field(rec, BPF_UPTR)) 197 return 0; 198 199 for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { 200 if (field->type != BPF_UPTR) 201 continue; 202 203 uptr_addr = obj + field->offset; 204 start = *(unsigned long *)uptr_addr; 205 if (!start) 206 continue; 207 208 t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id); 209 /* t->size was checked for zero before */ 210 if (check_add_overflow(start, t->size - 1, &end)) { 211 err = -EFAULT; 212 goto unpin_all; 213 } 214 215 /* The uptr's struct cannot span across two pages */ 216 if ((start & PAGE_MASK) != (end & PAGE_MASK)) { 217 err = -EOPNOTSUPP; 218 goto unpin_all; 219 } 220 221 err = pin_user_pages_fast(start, 1, FOLL_LONGTERM | FOLL_WRITE, &page); 222 if (err != 1) 223 goto unpin_all; 224 225 if (PageHighMem(page)) { 226 err = -EOPNOTSUPP; 227 unpin_user_page(page); 228 goto unpin_all; 229 } 230 231 *uptr_addr = page_address(page) + offset_in_page(start); 232 } 233 234 return 0; 235 236 unpin_all: 237 __bpf_obj_unpin_uptrs(rec, i, obj); 238 return err; 239 } 240 241 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file, 242 void *key, void *value, __u64 flags) 243 { 244 int err; 245 246 /* Need to create a kthread, thus must support schedule */ 247 if (bpf_map_is_offloaded(map)) { 248 return bpf_map_offload_update_elem(map, key, value, flags); 249 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 250 map->map_type == BPF_MAP_TYPE_ARENA || 251 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 252 return map->ops->map_update_elem(map, key, value, flags); 253 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || 254 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 255 return sock_map_update_elem_sys(map, key, value, flags); 256 } else if (IS_FD_PROG_ARRAY(map)) { 257 return bpf_fd_array_map_update_elem(map, map_file, key, value, 258 flags); 259 } 260 261 bpf_disable_instrumentation(); 262 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 263 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 264 err = bpf_percpu_hash_update(map, key, value, flags); 265 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 266 err = bpf_percpu_array_update(map, key, value, flags); 267 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 268 err = bpf_percpu_cgroup_storage_update(map, key, value, 269 flags); 270 } else if (IS_FD_ARRAY(map)) { 271 err = bpf_fd_array_map_update_elem(map, map_file, key, value, 272 flags); 273 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 274 err = bpf_fd_htab_map_update_elem(map, map_file, key, value, 275 flags); 276 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 277 /* rcu_read_lock() is not needed */ 278 err = bpf_fd_reuseport_array_update_elem(map, key, value, 279 flags); 280 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 281 map->map_type == BPF_MAP_TYPE_STACK || 282 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 283 err = map->ops->map_push_elem(map, value, flags); 284 } else { 285 err = bpf_obj_pin_uptrs(map->record, value); 286 if (!err) { 287 rcu_read_lock(); 288 err = map->ops->map_update_elem(map, key, value, flags); 289 rcu_read_unlock(); 290 if (err) 291 bpf_obj_unpin_uptrs(map->record, value); 292 } 293 } 294 bpf_enable_instrumentation(); 295 296 return err; 297 } 298 299 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 300 __u64 flags) 301 { 302 void *ptr; 303 int err; 304 305 if (bpf_map_is_offloaded(map)) 306 return bpf_map_offload_lookup_elem(map, key, value); 307 308 bpf_disable_instrumentation(); 309 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 310 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 311 err = bpf_percpu_hash_copy(map, key, value); 312 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 313 err = bpf_percpu_array_copy(map, key, value); 314 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 315 err = bpf_percpu_cgroup_storage_copy(map, key, value); 316 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 317 err = bpf_stackmap_copy(map, key, value); 318 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 319 err = bpf_fd_array_map_lookup_elem(map, key, value); 320 } else if (IS_FD_HASH(map)) { 321 err = bpf_fd_htab_map_lookup_elem(map, key, value); 322 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 323 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 324 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 325 map->map_type == BPF_MAP_TYPE_STACK || 326 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 327 err = map->ops->map_peek_elem(map, value); 328 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 329 /* struct_ops map requires directly updating "value" */ 330 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 331 } else { 332 rcu_read_lock(); 333 if (map->ops->map_lookup_elem_sys_only) 334 ptr = map->ops->map_lookup_elem_sys_only(map, key); 335 else 336 ptr = map->ops->map_lookup_elem(map, key); 337 if (IS_ERR(ptr)) { 338 err = PTR_ERR(ptr); 339 } else if (!ptr) { 340 err = -ENOENT; 341 } else { 342 err = 0; 343 if (flags & BPF_F_LOCK) 344 /* lock 'ptr' and copy everything but lock */ 345 copy_map_value_locked(map, value, ptr, true); 346 else 347 copy_map_value(map, value, ptr); 348 /* mask lock and timer, since value wasn't zero inited */ 349 check_and_init_map_value(map, value); 350 } 351 rcu_read_unlock(); 352 } 353 354 bpf_enable_instrumentation(); 355 356 return err; 357 } 358 359 /* Please, do not use this function outside from the map creation path 360 * (e.g. in map update path) without taking care of setting the active 361 * memory cgroup (see at bpf_map_kmalloc_node() for example). 362 */ 363 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 364 { 365 /* We really just want to fail instead of triggering OOM killer 366 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 367 * which is used for lower order allocation requests. 368 * 369 * It has been observed that higher order allocation requests done by 370 * vmalloc with __GFP_NORETRY being set might fail due to not trying 371 * to reclaim memory from the page cache, thus we set 372 * __GFP_RETRY_MAYFAIL to avoid such situations. 373 */ 374 375 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO); 376 unsigned int flags = 0; 377 unsigned long align = 1; 378 void *area; 379 380 if (size >= SIZE_MAX) 381 return NULL; 382 383 /* kmalloc()'ed memory can't be mmap()'ed */ 384 if (mmapable) { 385 BUG_ON(!PAGE_ALIGNED(size)); 386 align = SHMLBA; 387 flags = VM_USERMAP; 388 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 389 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, 390 numa_node); 391 if (area != NULL) 392 return area; 393 } 394 395 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 396 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, 397 flags, numa_node, __builtin_return_address(0)); 398 } 399 400 void *bpf_map_area_alloc(u64 size, int numa_node) 401 { 402 return __bpf_map_area_alloc(size, numa_node, false); 403 } 404 405 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 406 { 407 return __bpf_map_area_alloc(size, numa_node, true); 408 } 409 410 void bpf_map_area_free(void *area) 411 { 412 kvfree(area); 413 } 414 415 static u32 bpf_map_flags_retain_permanent(u32 flags) 416 { 417 /* Some map creation flags are not tied to the map object but 418 * rather to the map fd instead, so they have no meaning upon 419 * map object inspection since multiple file descriptors with 420 * different (access) properties can exist here. Thus, given 421 * this has zero meaning for the map itself, lets clear these 422 * from here. 423 */ 424 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 425 } 426 427 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 428 { 429 map->map_type = attr->map_type; 430 map->key_size = attr->key_size; 431 map->value_size = attr->value_size; 432 map->max_entries = attr->max_entries; 433 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 434 map->numa_node = bpf_map_attr_numa_node(attr); 435 map->map_extra = attr->map_extra; 436 } 437 438 static int bpf_map_alloc_id(struct bpf_map *map) 439 { 440 int id; 441 442 idr_preload(GFP_KERNEL); 443 spin_lock_bh(&map_idr_lock); 444 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 445 if (id > 0) 446 map->id = id; 447 spin_unlock_bh(&map_idr_lock); 448 idr_preload_end(); 449 450 if (WARN_ON_ONCE(!id)) 451 return -ENOSPC; 452 453 return id > 0 ? 0 : id; 454 } 455 456 void bpf_map_free_id(struct bpf_map *map) 457 { 458 unsigned long flags; 459 460 /* Offloaded maps are removed from the IDR store when their device 461 * disappears - even if someone holds an fd to them they are unusable, 462 * the memory is gone, all ops will fail; they are simply waiting for 463 * refcnt to drop to be freed. 464 */ 465 if (!map->id) 466 return; 467 468 spin_lock_irqsave(&map_idr_lock, flags); 469 470 idr_remove(&map_idr, map->id); 471 map->id = 0; 472 473 spin_unlock_irqrestore(&map_idr_lock, flags); 474 } 475 476 #ifdef CONFIG_MEMCG 477 static void bpf_map_save_memcg(struct bpf_map *map) 478 { 479 /* Currently if a map is created by a process belonging to the root 480 * memory cgroup, get_obj_cgroup_from_current() will return NULL. 481 * So we have to check map->objcg for being NULL each time it's 482 * being used. 483 */ 484 if (memcg_bpf_enabled()) 485 map->objcg = get_obj_cgroup_from_current(); 486 } 487 488 static void bpf_map_release_memcg(struct bpf_map *map) 489 { 490 if (map->objcg) 491 obj_cgroup_put(map->objcg); 492 } 493 494 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map) 495 { 496 if (map->objcg) 497 return get_mem_cgroup_from_objcg(map->objcg); 498 499 return root_mem_cgroup; 500 } 501 502 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 503 int node) 504 { 505 struct mem_cgroup *memcg, *old_memcg; 506 void *ptr; 507 508 memcg = bpf_map_get_memcg(map); 509 old_memcg = set_active_memcg(memcg); 510 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node); 511 set_active_memcg(old_memcg); 512 mem_cgroup_put(memcg); 513 514 return ptr; 515 } 516 517 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 518 { 519 struct mem_cgroup *memcg, *old_memcg; 520 void *ptr; 521 522 memcg = bpf_map_get_memcg(map); 523 old_memcg = set_active_memcg(memcg); 524 ptr = kzalloc(size, flags | __GFP_ACCOUNT); 525 set_active_memcg(old_memcg); 526 mem_cgroup_put(memcg); 527 528 return ptr; 529 } 530 531 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, 532 gfp_t flags) 533 { 534 struct mem_cgroup *memcg, *old_memcg; 535 void *ptr; 536 537 memcg = bpf_map_get_memcg(map); 538 old_memcg = set_active_memcg(memcg); 539 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT); 540 set_active_memcg(old_memcg); 541 mem_cgroup_put(memcg); 542 543 return ptr; 544 } 545 546 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 547 size_t align, gfp_t flags) 548 { 549 struct mem_cgroup *memcg, *old_memcg; 550 void __percpu *ptr; 551 552 memcg = bpf_map_get_memcg(map); 553 old_memcg = set_active_memcg(memcg); 554 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT); 555 set_active_memcg(old_memcg); 556 mem_cgroup_put(memcg); 557 558 return ptr; 559 } 560 561 #else 562 static void bpf_map_save_memcg(struct bpf_map *map) 563 { 564 } 565 566 static void bpf_map_release_memcg(struct bpf_map *map) 567 { 568 } 569 #endif 570 571 int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid, 572 unsigned long nr_pages, struct page **pages) 573 { 574 unsigned long i, j; 575 struct page *pg; 576 int ret = 0; 577 #ifdef CONFIG_MEMCG 578 struct mem_cgroup *memcg, *old_memcg; 579 580 memcg = bpf_map_get_memcg(map); 581 old_memcg = set_active_memcg(memcg); 582 #endif 583 for (i = 0; i < nr_pages; i++) { 584 pg = alloc_pages_node(nid, gfp | __GFP_ACCOUNT, 0); 585 586 if (pg) { 587 pages[i] = pg; 588 continue; 589 } 590 for (j = 0; j < i; j++) 591 __free_page(pages[j]); 592 ret = -ENOMEM; 593 break; 594 } 595 596 #ifdef CONFIG_MEMCG 597 set_active_memcg(old_memcg); 598 mem_cgroup_put(memcg); 599 #endif 600 return ret; 601 } 602 603 604 static int btf_field_cmp(const void *a, const void *b) 605 { 606 const struct btf_field *f1 = a, *f2 = b; 607 608 if (f1->offset < f2->offset) 609 return -1; 610 else if (f1->offset > f2->offset) 611 return 1; 612 return 0; 613 } 614 615 struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset, 616 u32 field_mask) 617 { 618 struct btf_field *field; 619 620 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask)) 621 return NULL; 622 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp); 623 if (!field || !(field->type & field_mask)) 624 return NULL; 625 return field; 626 } 627 628 void btf_record_free(struct btf_record *rec) 629 { 630 int i; 631 632 if (IS_ERR_OR_NULL(rec)) 633 return; 634 for (i = 0; i < rec->cnt; i++) { 635 switch (rec->fields[i].type) { 636 case BPF_KPTR_UNREF: 637 case BPF_KPTR_REF: 638 case BPF_KPTR_PERCPU: 639 case BPF_UPTR: 640 if (rec->fields[i].kptr.module) 641 module_put(rec->fields[i].kptr.module); 642 if (btf_is_kernel(rec->fields[i].kptr.btf)) 643 btf_put(rec->fields[i].kptr.btf); 644 break; 645 case BPF_LIST_HEAD: 646 case BPF_LIST_NODE: 647 case BPF_RB_ROOT: 648 case BPF_RB_NODE: 649 case BPF_SPIN_LOCK: 650 case BPF_TIMER: 651 case BPF_REFCOUNT: 652 case BPF_WORKQUEUE: 653 /* Nothing to release */ 654 break; 655 default: 656 WARN_ON_ONCE(1); 657 continue; 658 } 659 } 660 kfree(rec); 661 } 662 663 void bpf_map_free_record(struct bpf_map *map) 664 { 665 btf_record_free(map->record); 666 map->record = NULL; 667 } 668 669 struct btf_record *btf_record_dup(const struct btf_record *rec) 670 { 671 const struct btf_field *fields; 672 struct btf_record *new_rec; 673 int ret, size, i; 674 675 if (IS_ERR_OR_NULL(rec)) 676 return NULL; 677 size = offsetof(struct btf_record, fields[rec->cnt]); 678 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN); 679 if (!new_rec) 680 return ERR_PTR(-ENOMEM); 681 /* Do a deep copy of the btf_record */ 682 fields = rec->fields; 683 new_rec->cnt = 0; 684 for (i = 0; i < rec->cnt; i++) { 685 switch (fields[i].type) { 686 case BPF_KPTR_UNREF: 687 case BPF_KPTR_REF: 688 case BPF_KPTR_PERCPU: 689 case BPF_UPTR: 690 if (btf_is_kernel(fields[i].kptr.btf)) 691 btf_get(fields[i].kptr.btf); 692 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) { 693 ret = -ENXIO; 694 goto free; 695 } 696 break; 697 case BPF_LIST_HEAD: 698 case BPF_LIST_NODE: 699 case BPF_RB_ROOT: 700 case BPF_RB_NODE: 701 case BPF_SPIN_LOCK: 702 case BPF_TIMER: 703 case BPF_REFCOUNT: 704 case BPF_WORKQUEUE: 705 /* Nothing to acquire */ 706 break; 707 default: 708 ret = -EFAULT; 709 WARN_ON_ONCE(1); 710 goto free; 711 } 712 new_rec->cnt++; 713 } 714 return new_rec; 715 free: 716 btf_record_free(new_rec); 717 return ERR_PTR(ret); 718 } 719 720 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b) 721 { 722 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b); 723 int size; 724 725 if (!a_has_fields && !b_has_fields) 726 return true; 727 if (a_has_fields != b_has_fields) 728 return false; 729 if (rec_a->cnt != rec_b->cnt) 730 return false; 731 size = offsetof(struct btf_record, fields[rec_a->cnt]); 732 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused 733 * members are zeroed out. So memcmp is safe to do without worrying 734 * about padding/unused fields. 735 * 736 * While spin_lock, timer, and kptr have no relation to map BTF, 737 * list_head metadata is specific to map BTF, the btf and value_rec 738 * members in particular. btf is the map BTF, while value_rec points to 739 * btf_record in that map BTF. 740 * 741 * So while by default, we don't rely on the map BTF (which the records 742 * were parsed from) matching for both records, which is not backwards 743 * compatible, in case list_head is part of it, we implicitly rely on 744 * that by way of depending on memcmp succeeding for it. 745 */ 746 return !memcmp(rec_a, rec_b, size); 747 } 748 749 void bpf_obj_free_timer(const struct btf_record *rec, void *obj) 750 { 751 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER))) 752 return; 753 bpf_timer_cancel_and_free(obj + rec->timer_off); 754 } 755 756 void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj) 757 { 758 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_WORKQUEUE))) 759 return; 760 bpf_wq_cancel_and_free(obj + rec->wq_off); 761 } 762 763 void bpf_obj_free_fields(const struct btf_record *rec, void *obj) 764 { 765 const struct btf_field *fields; 766 int i; 767 768 if (IS_ERR_OR_NULL(rec)) 769 return; 770 fields = rec->fields; 771 for (i = 0; i < rec->cnt; i++) { 772 struct btf_struct_meta *pointee_struct_meta; 773 const struct btf_field *field = &fields[i]; 774 void *field_ptr = obj + field->offset; 775 void *xchgd_field; 776 777 switch (fields[i].type) { 778 case BPF_SPIN_LOCK: 779 break; 780 case BPF_TIMER: 781 bpf_timer_cancel_and_free(field_ptr); 782 break; 783 case BPF_WORKQUEUE: 784 bpf_wq_cancel_and_free(field_ptr); 785 break; 786 case BPF_KPTR_UNREF: 787 WRITE_ONCE(*(u64 *)field_ptr, 0); 788 break; 789 case BPF_KPTR_REF: 790 case BPF_KPTR_PERCPU: 791 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0); 792 if (!xchgd_field) 793 break; 794 795 if (!btf_is_kernel(field->kptr.btf)) { 796 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf, 797 field->kptr.btf_id); 798 migrate_disable(); 799 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ? 800 pointee_struct_meta->record : NULL, 801 fields[i].type == BPF_KPTR_PERCPU); 802 migrate_enable(); 803 } else { 804 field->kptr.dtor(xchgd_field); 805 } 806 break; 807 case BPF_UPTR: 808 /* The caller ensured that no one is using the uptr */ 809 unpin_uptr_kaddr(*(void **)field_ptr); 810 break; 811 case BPF_LIST_HEAD: 812 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 813 continue; 814 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); 815 break; 816 case BPF_RB_ROOT: 817 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 818 continue; 819 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); 820 break; 821 case BPF_LIST_NODE: 822 case BPF_RB_NODE: 823 case BPF_REFCOUNT: 824 break; 825 default: 826 WARN_ON_ONCE(1); 827 continue; 828 } 829 } 830 } 831 832 static void bpf_map_free(struct bpf_map *map) 833 { 834 struct btf_record *rec = map->record; 835 struct btf *btf = map->btf; 836 837 /* implementation dependent freeing */ 838 map->ops->map_free(map); 839 /* Delay freeing of btf_record for maps, as map_free 840 * callback usually needs access to them. It is better to do it here 841 * than require each callback to do the free itself manually. 842 * 843 * Note that the btf_record stashed in map->inner_map_meta->record was 844 * already freed using the map_free callback for map in map case which 845 * eventually calls bpf_map_free_meta, since inner_map_meta is only a 846 * template bpf_map struct used during verification. 847 */ 848 btf_record_free(rec); 849 /* Delay freeing of btf for maps, as map_free callback may need 850 * struct_meta info which will be freed with btf_put(). 851 */ 852 btf_put(btf); 853 } 854 855 /* called from workqueue */ 856 static void bpf_map_free_deferred(struct work_struct *work) 857 { 858 struct bpf_map *map = container_of(work, struct bpf_map, work); 859 860 security_bpf_map_free(map); 861 bpf_map_release_memcg(map); 862 bpf_map_free(map); 863 } 864 865 static void bpf_map_put_uref(struct bpf_map *map) 866 { 867 if (atomic64_dec_and_test(&map->usercnt)) { 868 if (map->ops->map_release_uref) 869 map->ops->map_release_uref(map); 870 } 871 } 872 873 static void bpf_map_free_in_work(struct bpf_map *map) 874 { 875 INIT_WORK(&map->work, bpf_map_free_deferred); 876 /* Avoid spawning kworkers, since they all might contend 877 * for the same mutex like slab_mutex. 878 */ 879 queue_work(system_unbound_wq, &map->work); 880 } 881 882 static void bpf_map_free_rcu_gp(struct rcu_head *rcu) 883 { 884 bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu)); 885 } 886 887 static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu) 888 { 889 if (rcu_trace_implies_rcu_gp()) 890 bpf_map_free_rcu_gp(rcu); 891 else 892 call_rcu(rcu, bpf_map_free_rcu_gp); 893 } 894 895 /* decrement map refcnt and schedule it for freeing via workqueue 896 * (underlying map implementation ops->map_free() might sleep) 897 */ 898 void bpf_map_put(struct bpf_map *map) 899 { 900 if (atomic64_dec_and_test(&map->refcnt)) { 901 /* bpf_map_free_id() must be called first */ 902 bpf_map_free_id(map); 903 904 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt)); 905 if (READ_ONCE(map->free_after_mult_rcu_gp)) 906 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); 907 else if (READ_ONCE(map->free_after_rcu_gp)) 908 call_rcu(&map->rcu, bpf_map_free_rcu_gp); 909 else 910 bpf_map_free_in_work(map); 911 } 912 } 913 EXPORT_SYMBOL_GPL(bpf_map_put); 914 915 void bpf_map_put_with_uref(struct bpf_map *map) 916 { 917 bpf_map_put_uref(map); 918 bpf_map_put(map); 919 } 920 921 static int bpf_map_release(struct inode *inode, struct file *filp) 922 { 923 struct bpf_map *map = filp->private_data; 924 925 if (map->ops->map_release) 926 map->ops->map_release(map, filp); 927 928 bpf_map_put_with_uref(map); 929 return 0; 930 } 931 932 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 933 { 934 fmode_t mode = fd_file(f)->f_mode; 935 936 /* Our file permissions may have been overridden by global 937 * map permissions facing syscall side. 938 */ 939 if (READ_ONCE(map->frozen)) 940 mode &= ~FMODE_CAN_WRITE; 941 return mode; 942 } 943 944 #ifdef CONFIG_PROC_FS 945 /* Show the memory usage of a bpf map */ 946 static u64 bpf_map_memory_usage(const struct bpf_map *map) 947 { 948 return map->ops->map_mem_usage(map); 949 } 950 951 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 952 { 953 struct bpf_map *map = filp->private_data; 954 u32 type = 0, jited = 0; 955 956 if (map_type_contains_progs(map)) { 957 spin_lock(&map->owner.lock); 958 type = map->owner.type; 959 jited = map->owner.jited; 960 spin_unlock(&map->owner.lock); 961 } 962 963 seq_printf(m, 964 "map_type:\t%u\n" 965 "key_size:\t%u\n" 966 "value_size:\t%u\n" 967 "max_entries:\t%u\n" 968 "map_flags:\t%#x\n" 969 "map_extra:\t%#llx\n" 970 "memlock:\t%llu\n" 971 "map_id:\t%u\n" 972 "frozen:\t%u\n", 973 map->map_type, 974 map->key_size, 975 map->value_size, 976 map->max_entries, 977 map->map_flags, 978 (unsigned long long)map->map_extra, 979 bpf_map_memory_usage(map), 980 map->id, 981 READ_ONCE(map->frozen)); 982 if (type) { 983 seq_printf(m, "owner_prog_type:\t%u\n", type); 984 seq_printf(m, "owner_jited:\t%u\n", jited); 985 } 986 } 987 #endif 988 989 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 990 loff_t *ppos) 991 { 992 /* We need this handler such that alloc_file() enables 993 * f_mode with FMODE_CAN_READ. 994 */ 995 return -EINVAL; 996 } 997 998 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 999 size_t siz, loff_t *ppos) 1000 { 1001 /* We need this handler such that alloc_file() enables 1002 * f_mode with FMODE_CAN_WRITE. 1003 */ 1004 return -EINVAL; 1005 } 1006 1007 /* called for any extra memory-mapped regions (except initial) */ 1008 static void bpf_map_mmap_open(struct vm_area_struct *vma) 1009 { 1010 struct bpf_map *map = vma->vm_file->private_data; 1011 1012 if (vma->vm_flags & VM_MAYWRITE) 1013 bpf_map_write_active_inc(map); 1014 } 1015 1016 /* called for all unmapped memory region (including initial) */ 1017 static void bpf_map_mmap_close(struct vm_area_struct *vma) 1018 { 1019 struct bpf_map *map = vma->vm_file->private_data; 1020 1021 if (vma->vm_flags & VM_MAYWRITE) 1022 bpf_map_write_active_dec(map); 1023 } 1024 1025 static const struct vm_operations_struct bpf_map_default_vmops = { 1026 .open = bpf_map_mmap_open, 1027 .close = bpf_map_mmap_close, 1028 }; 1029 1030 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 1031 { 1032 struct bpf_map *map = filp->private_data; 1033 int err; 1034 1035 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) 1036 return -ENOTSUPP; 1037 1038 if (!(vma->vm_flags & VM_SHARED)) 1039 return -EINVAL; 1040 1041 mutex_lock(&map->freeze_mutex); 1042 1043 if (vma->vm_flags & VM_WRITE) { 1044 if (map->frozen) { 1045 err = -EPERM; 1046 goto out; 1047 } 1048 /* map is meant to be read-only, so do not allow mapping as 1049 * writable, because it's possible to leak a writable page 1050 * reference and allows user-space to still modify it after 1051 * freezing, while verifier will assume contents do not change 1052 */ 1053 if (map->map_flags & BPF_F_RDONLY_PROG) { 1054 err = -EACCES; 1055 goto out; 1056 } 1057 } 1058 1059 /* set default open/close callbacks */ 1060 vma->vm_ops = &bpf_map_default_vmops; 1061 vma->vm_private_data = map; 1062 vm_flags_clear(vma, VM_MAYEXEC); 1063 if (!(vma->vm_flags & VM_WRITE)) 1064 /* disallow re-mapping with PROT_WRITE */ 1065 vm_flags_clear(vma, VM_MAYWRITE); 1066 1067 err = map->ops->map_mmap(map, vma); 1068 if (err) 1069 goto out; 1070 1071 if (vma->vm_flags & VM_MAYWRITE) 1072 bpf_map_write_active_inc(map); 1073 out: 1074 mutex_unlock(&map->freeze_mutex); 1075 return err; 1076 } 1077 1078 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts) 1079 { 1080 struct bpf_map *map = filp->private_data; 1081 1082 if (map->ops->map_poll) 1083 return map->ops->map_poll(map, filp, pts); 1084 1085 return EPOLLERR; 1086 } 1087 1088 static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr, 1089 unsigned long len, unsigned long pgoff, 1090 unsigned long flags) 1091 { 1092 struct bpf_map *map = filp->private_data; 1093 1094 if (map->ops->map_get_unmapped_area) 1095 return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags); 1096 #ifdef CONFIG_MMU 1097 return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); 1098 #else 1099 return addr; 1100 #endif 1101 } 1102 1103 const struct file_operations bpf_map_fops = { 1104 #ifdef CONFIG_PROC_FS 1105 .show_fdinfo = bpf_map_show_fdinfo, 1106 #endif 1107 .release = bpf_map_release, 1108 .read = bpf_dummy_read, 1109 .write = bpf_dummy_write, 1110 .mmap = bpf_map_mmap, 1111 .poll = bpf_map_poll, 1112 .get_unmapped_area = bpf_get_unmapped_area, 1113 }; 1114 1115 int bpf_map_new_fd(struct bpf_map *map, int flags) 1116 { 1117 int ret; 1118 1119 ret = security_bpf_map(map, OPEN_FMODE(flags)); 1120 if (ret < 0) 1121 return ret; 1122 1123 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 1124 flags | O_CLOEXEC); 1125 } 1126 1127 int bpf_get_file_flag(int flags) 1128 { 1129 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 1130 return -EINVAL; 1131 if (flags & BPF_F_RDONLY) 1132 return O_RDONLY; 1133 if (flags & BPF_F_WRONLY) 1134 return O_WRONLY; 1135 return O_RDWR; 1136 } 1137 1138 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 1139 #define CHECK_ATTR(CMD) \ 1140 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 1141 sizeof(attr->CMD##_LAST_FIELD), 0, \ 1142 sizeof(*attr) - \ 1143 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 1144 sizeof(attr->CMD##_LAST_FIELD)) != NULL 1145 1146 /* dst and src must have at least "size" number of bytes. 1147 * Return strlen on success and < 0 on error. 1148 */ 1149 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 1150 { 1151 const char *end = src + size; 1152 const char *orig_src = src; 1153 1154 memset(dst, 0, size); 1155 /* Copy all isalnum(), '_' and '.' chars. */ 1156 while (src < end && *src) { 1157 if (!isalnum(*src) && 1158 *src != '_' && *src != '.') 1159 return -EINVAL; 1160 *dst++ = *src++; 1161 } 1162 1163 /* No '\0' found in "size" number of bytes */ 1164 if (src == end) 1165 return -EINVAL; 1166 1167 return src - orig_src; 1168 } 1169 1170 int map_check_no_btf(const struct bpf_map *map, 1171 const struct btf *btf, 1172 const struct btf_type *key_type, 1173 const struct btf_type *value_type) 1174 { 1175 return -ENOTSUPP; 1176 } 1177 1178 static int map_check_btf(struct bpf_map *map, struct bpf_token *token, 1179 const struct btf *btf, u32 btf_key_id, u32 btf_value_id) 1180 { 1181 const struct btf_type *key_type, *value_type; 1182 u32 key_size, value_size; 1183 int ret = 0; 1184 1185 /* Some maps allow key to be unspecified. */ 1186 if (btf_key_id) { 1187 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 1188 if (!key_type || key_size != map->key_size) 1189 return -EINVAL; 1190 } else { 1191 key_type = btf_type_by_id(btf, 0); 1192 if (!map->ops->map_check_btf) 1193 return -EINVAL; 1194 } 1195 1196 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 1197 if (!value_type || value_size != map->value_size) 1198 return -EINVAL; 1199 1200 map->record = btf_parse_fields(btf, value_type, 1201 BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD | 1202 BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE | BPF_UPTR, 1203 map->value_size); 1204 if (!IS_ERR_OR_NULL(map->record)) { 1205 int i; 1206 1207 if (!bpf_token_capable(token, CAP_BPF)) { 1208 ret = -EPERM; 1209 goto free_map_tab; 1210 } 1211 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { 1212 ret = -EACCES; 1213 goto free_map_tab; 1214 } 1215 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) { 1216 switch (map->record->field_mask & (1 << i)) { 1217 case 0: 1218 continue; 1219 case BPF_SPIN_LOCK: 1220 if (map->map_type != BPF_MAP_TYPE_HASH && 1221 map->map_type != BPF_MAP_TYPE_ARRAY && 1222 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 1223 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1224 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1225 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1226 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1227 ret = -EOPNOTSUPP; 1228 goto free_map_tab; 1229 } 1230 break; 1231 case BPF_TIMER: 1232 case BPF_WORKQUEUE: 1233 if (map->map_type != BPF_MAP_TYPE_HASH && 1234 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1235 map->map_type != BPF_MAP_TYPE_ARRAY) { 1236 ret = -EOPNOTSUPP; 1237 goto free_map_tab; 1238 } 1239 break; 1240 case BPF_KPTR_UNREF: 1241 case BPF_KPTR_REF: 1242 case BPF_KPTR_PERCPU: 1243 case BPF_REFCOUNT: 1244 if (map->map_type != BPF_MAP_TYPE_HASH && 1245 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 1246 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1247 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH && 1248 map->map_type != BPF_MAP_TYPE_ARRAY && 1249 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && 1250 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1251 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1252 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1253 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1254 ret = -EOPNOTSUPP; 1255 goto free_map_tab; 1256 } 1257 break; 1258 case BPF_UPTR: 1259 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) { 1260 ret = -EOPNOTSUPP; 1261 goto free_map_tab; 1262 } 1263 break; 1264 case BPF_LIST_HEAD: 1265 case BPF_RB_ROOT: 1266 if (map->map_type != BPF_MAP_TYPE_HASH && 1267 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1268 map->map_type != BPF_MAP_TYPE_ARRAY) { 1269 ret = -EOPNOTSUPP; 1270 goto free_map_tab; 1271 } 1272 break; 1273 default: 1274 /* Fail if map_type checks are missing for a field type */ 1275 ret = -EOPNOTSUPP; 1276 goto free_map_tab; 1277 } 1278 } 1279 } 1280 1281 ret = btf_check_and_fixup_fields(btf, map->record); 1282 if (ret < 0) 1283 goto free_map_tab; 1284 1285 if (map->ops->map_check_btf) { 1286 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 1287 if (ret < 0) 1288 goto free_map_tab; 1289 } 1290 1291 return ret; 1292 free_map_tab: 1293 bpf_map_free_record(map); 1294 return ret; 1295 } 1296 1297 static bool bpf_net_capable(void) 1298 { 1299 return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN); 1300 } 1301 1302 #define BPF_MAP_CREATE_LAST_FIELD map_token_fd 1303 /* called via syscall */ 1304 static int map_create(union bpf_attr *attr) 1305 { 1306 const struct bpf_map_ops *ops; 1307 struct bpf_token *token = NULL; 1308 int numa_node = bpf_map_attr_numa_node(attr); 1309 u32 map_type = attr->map_type; 1310 struct bpf_map *map; 1311 bool token_flag; 1312 int f_flags; 1313 int err; 1314 1315 err = CHECK_ATTR(BPF_MAP_CREATE); 1316 if (err) 1317 return -EINVAL; 1318 1319 /* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it 1320 * to avoid per-map type checks tripping on unknown flag 1321 */ 1322 token_flag = attr->map_flags & BPF_F_TOKEN_FD; 1323 attr->map_flags &= ~BPF_F_TOKEN_FD; 1324 1325 if (attr->btf_vmlinux_value_type_id) { 1326 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 1327 attr->btf_key_type_id || attr->btf_value_type_id) 1328 return -EINVAL; 1329 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 1330 return -EINVAL; 1331 } 1332 1333 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && 1334 attr->map_type != BPF_MAP_TYPE_ARENA && 1335 attr->map_extra != 0) 1336 return -EINVAL; 1337 1338 f_flags = bpf_get_file_flag(attr->map_flags); 1339 if (f_flags < 0) 1340 return f_flags; 1341 1342 if (numa_node != NUMA_NO_NODE && 1343 ((unsigned int)numa_node >= nr_node_ids || 1344 !node_online(numa_node))) 1345 return -EINVAL; 1346 1347 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 1348 map_type = attr->map_type; 1349 if (map_type >= ARRAY_SIZE(bpf_map_types)) 1350 return -EINVAL; 1351 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types)); 1352 ops = bpf_map_types[map_type]; 1353 if (!ops) 1354 return -EINVAL; 1355 1356 if (ops->map_alloc_check) { 1357 err = ops->map_alloc_check(attr); 1358 if (err) 1359 return err; 1360 } 1361 if (attr->map_ifindex) 1362 ops = &bpf_map_offload_ops; 1363 if (!ops->map_mem_usage) 1364 return -EINVAL; 1365 1366 if (token_flag) { 1367 token = bpf_token_get_from_fd(attr->map_token_fd); 1368 if (IS_ERR(token)) 1369 return PTR_ERR(token); 1370 1371 /* if current token doesn't grant map creation permissions, 1372 * then we can't use this token, so ignore it and rely on 1373 * system-wide capabilities checks 1374 */ 1375 if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) || 1376 !bpf_token_allow_map_type(token, attr->map_type)) { 1377 bpf_token_put(token); 1378 token = NULL; 1379 } 1380 } 1381 1382 err = -EPERM; 1383 1384 /* Intent here is for unprivileged_bpf_disabled to block BPF map 1385 * creation for unprivileged users; other actions depend 1386 * on fd availability and access to bpffs, so are dependent on 1387 * object creation success. Even with unprivileged BPF disabled, 1388 * capability checks are still carried out. 1389 */ 1390 if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF)) 1391 goto put_token; 1392 1393 /* check privileged map type permissions */ 1394 switch (map_type) { 1395 case BPF_MAP_TYPE_ARRAY: 1396 case BPF_MAP_TYPE_PERCPU_ARRAY: 1397 case BPF_MAP_TYPE_PROG_ARRAY: 1398 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1399 case BPF_MAP_TYPE_CGROUP_ARRAY: 1400 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 1401 case BPF_MAP_TYPE_HASH: 1402 case BPF_MAP_TYPE_PERCPU_HASH: 1403 case BPF_MAP_TYPE_HASH_OF_MAPS: 1404 case BPF_MAP_TYPE_RINGBUF: 1405 case BPF_MAP_TYPE_USER_RINGBUF: 1406 case BPF_MAP_TYPE_CGROUP_STORAGE: 1407 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 1408 /* unprivileged */ 1409 break; 1410 case BPF_MAP_TYPE_SK_STORAGE: 1411 case BPF_MAP_TYPE_INODE_STORAGE: 1412 case BPF_MAP_TYPE_TASK_STORAGE: 1413 case BPF_MAP_TYPE_CGRP_STORAGE: 1414 case BPF_MAP_TYPE_BLOOM_FILTER: 1415 case BPF_MAP_TYPE_LPM_TRIE: 1416 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 1417 case BPF_MAP_TYPE_STACK_TRACE: 1418 case BPF_MAP_TYPE_QUEUE: 1419 case BPF_MAP_TYPE_STACK: 1420 case BPF_MAP_TYPE_LRU_HASH: 1421 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 1422 case BPF_MAP_TYPE_STRUCT_OPS: 1423 case BPF_MAP_TYPE_CPUMAP: 1424 case BPF_MAP_TYPE_ARENA: 1425 if (!bpf_token_capable(token, CAP_BPF)) 1426 goto put_token; 1427 break; 1428 case BPF_MAP_TYPE_SOCKMAP: 1429 case BPF_MAP_TYPE_SOCKHASH: 1430 case BPF_MAP_TYPE_DEVMAP: 1431 case BPF_MAP_TYPE_DEVMAP_HASH: 1432 case BPF_MAP_TYPE_XSKMAP: 1433 if (!bpf_token_capable(token, CAP_NET_ADMIN)) 1434 goto put_token; 1435 break; 1436 default: 1437 WARN(1, "unsupported map type %d", map_type); 1438 goto put_token; 1439 } 1440 1441 map = ops->map_alloc(attr); 1442 if (IS_ERR(map)) { 1443 err = PTR_ERR(map); 1444 goto put_token; 1445 } 1446 map->ops = ops; 1447 map->map_type = map_type; 1448 1449 err = bpf_obj_name_cpy(map->name, attr->map_name, 1450 sizeof(attr->map_name)); 1451 if (err < 0) 1452 goto free_map; 1453 1454 atomic64_set(&map->refcnt, 1); 1455 atomic64_set(&map->usercnt, 1); 1456 mutex_init(&map->freeze_mutex); 1457 spin_lock_init(&map->owner.lock); 1458 1459 if (attr->btf_key_type_id || attr->btf_value_type_id || 1460 /* Even the map's value is a kernel's struct, 1461 * the bpf_prog.o must have BTF to begin with 1462 * to figure out the corresponding kernel's 1463 * counter part. Thus, attr->btf_fd has 1464 * to be valid also. 1465 */ 1466 attr->btf_vmlinux_value_type_id) { 1467 struct btf *btf; 1468 1469 btf = btf_get_by_fd(attr->btf_fd); 1470 if (IS_ERR(btf)) { 1471 err = PTR_ERR(btf); 1472 goto free_map; 1473 } 1474 if (btf_is_kernel(btf)) { 1475 btf_put(btf); 1476 err = -EACCES; 1477 goto free_map; 1478 } 1479 map->btf = btf; 1480 1481 if (attr->btf_value_type_id) { 1482 err = map_check_btf(map, token, btf, attr->btf_key_type_id, 1483 attr->btf_value_type_id); 1484 if (err) 1485 goto free_map; 1486 } 1487 1488 map->btf_key_type_id = attr->btf_key_type_id; 1489 map->btf_value_type_id = attr->btf_value_type_id; 1490 map->btf_vmlinux_value_type_id = 1491 attr->btf_vmlinux_value_type_id; 1492 } 1493 1494 err = security_bpf_map_create(map, attr, token); 1495 if (err) 1496 goto free_map_sec; 1497 1498 err = bpf_map_alloc_id(map); 1499 if (err) 1500 goto free_map_sec; 1501 1502 bpf_map_save_memcg(map); 1503 bpf_token_put(token); 1504 1505 err = bpf_map_new_fd(map, f_flags); 1506 if (err < 0) { 1507 /* failed to allocate fd. 1508 * bpf_map_put_with_uref() is needed because the above 1509 * bpf_map_alloc_id() has published the map 1510 * to the userspace and the userspace may 1511 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 1512 */ 1513 bpf_map_put_with_uref(map); 1514 return err; 1515 } 1516 1517 return err; 1518 1519 free_map_sec: 1520 security_bpf_map_free(map); 1521 free_map: 1522 bpf_map_free(map); 1523 put_token: 1524 bpf_token_put(token); 1525 return err; 1526 } 1527 1528 void bpf_map_inc(struct bpf_map *map) 1529 { 1530 atomic64_inc(&map->refcnt); 1531 } 1532 EXPORT_SYMBOL_GPL(bpf_map_inc); 1533 1534 void bpf_map_inc_with_uref(struct bpf_map *map) 1535 { 1536 atomic64_inc(&map->refcnt); 1537 atomic64_inc(&map->usercnt); 1538 } 1539 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 1540 1541 struct bpf_map *bpf_map_get(u32 ufd) 1542 { 1543 CLASS(fd, f)(ufd); 1544 struct bpf_map *map = __bpf_map_get(f); 1545 1546 if (!IS_ERR(map)) 1547 bpf_map_inc(map); 1548 1549 return map; 1550 } 1551 EXPORT_SYMBOL(bpf_map_get); 1552 1553 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 1554 { 1555 CLASS(fd, f)(ufd); 1556 struct bpf_map *map = __bpf_map_get(f); 1557 1558 if (!IS_ERR(map)) 1559 bpf_map_inc_with_uref(map); 1560 1561 return map; 1562 } 1563 1564 /* map_idr_lock should have been held or the map should have been 1565 * protected by rcu read lock. 1566 */ 1567 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 1568 { 1569 int refold; 1570 1571 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 1572 if (!refold) 1573 return ERR_PTR(-ENOENT); 1574 if (uref) 1575 atomic64_inc(&map->usercnt); 1576 1577 return map; 1578 } 1579 1580 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 1581 { 1582 spin_lock_bh(&map_idr_lock); 1583 map = __bpf_map_inc_not_zero(map, false); 1584 spin_unlock_bh(&map_idr_lock); 1585 1586 return map; 1587 } 1588 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 1589 1590 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 1591 { 1592 return -ENOTSUPP; 1593 } 1594 1595 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 1596 { 1597 if (key_size) 1598 return vmemdup_user(ukey, key_size); 1599 1600 if (ukey) 1601 return ERR_PTR(-EINVAL); 1602 1603 return NULL; 1604 } 1605 1606 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size) 1607 { 1608 if (key_size) 1609 return kvmemdup_bpfptr(ukey, key_size); 1610 1611 if (!bpfptr_is_null(ukey)) 1612 return ERR_PTR(-EINVAL); 1613 1614 return NULL; 1615 } 1616 1617 /* last field in 'union bpf_attr' used by this command */ 1618 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 1619 1620 static int map_lookup_elem(union bpf_attr *attr) 1621 { 1622 void __user *ukey = u64_to_user_ptr(attr->key); 1623 void __user *uvalue = u64_to_user_ptr(attr->value); 1624 struct bpf_map *map; 1625 void *key, *value; 1626 u32 value_size; 1627 int err; 1628 1629 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 1630 return -EINVAL; 1631 1632 if (attr->flags & ~BPF_F_LOCK) 1633 return -EINVAL; 1634 1635 CLASS(fd, f)(attr->map_fd); 1636 map = __bpf_map_get(f); 1637 if (IS_ERR(map)) 1638 return PTR_ERR(map); 1639 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) 1640 return -EPERM; 1641 1642 if ((attr->flags & BPF_F_LOCK) && 1643 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) 1644 return -EINVAL; 1645 1646 key = __bpf_copy_key(ukey, map->key_size); 1647 if (IS_ERR(key)) 1648 return PTR_ERR(key); 1649 1650 value_size = bpf_map_value_size(map); 1651 1652 err = -ENOMEM; 1653 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1654 if (!value) 1655 goto free_key; 1656 1657 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 1658 if (copy_from_user(value, uvalue, value_size)) 1659 err = -EFAULT; 1660 else 1661 err = bpf_map_copy_value(map, key, value, attr->flags); 1662 goto free_value; 1663 } 1664 1665 err = bpf_map_copy_value(map, key, value, attr->flags); 1666 if (err) 1667 goto free_value; 1668 1669 err = -EFAULT; 1670 if (copy_to_user(uvalue, value, value_size) != 0) 1671 goto free_value; 1672 1673 err = 0; 1674 1675 free_value: 1676 kvfree(value); 1677 free_key: 1678 kvfree(key); 1679 return err; 1680 } 1681 1682 1683 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1684 1685 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) 1686 { 1687 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1688 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); 1689 struct bpf_map *map; 1690 void *key, *value; 1691 u32 value_size; 1692 int err; 1693 1694 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1695 return -EINVAL; 1696 1697 CLASS(fd, f)(attr->map_fd); 1698 map = __bpf_map_get(f); 1699 if (IS_ERR(map)) 1700 return PTR_ERR(map); 1701 bpf_map_write_active_inc(map); 1702 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1703 err = -EPERM; 1704 goto err_put; 1705 } 1706 1707 if ((attr->flags & BPF_F_LOCK) && 1708 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1709 err = -EINVAL; 1710 goto err_put; 1711 } 1712 1713 key = ___bpf_copy_key(ukey, map->key_size); 1714 if (IS_ERR(key)) { 1715 err = PTR_ERR(key); 1716 goto err_put; 1717 } 1718 1719 value_size = bpf_map_value_size(map); 1720 value = kvmemdup_bpfptr(uvalue, value_size); 1721 if (IS_ERR(value)) { 1722 err = PTR_ERR(value); 1723 goto free_key; 1724 } 1725 1726 err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags); 1727 if (!err) 1728 maybe_wait_bpf_programs(map); 1729 1730 kvfree(value); 1731 free_key: 1732 kvfree(key); 1733 err_put: 1734 bpf_map_write_active_dec(map); 1735 return err; 1736 } 1737 1738 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1739 1740 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr) 1741 { 1742 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1743 struct bpf_map *map; 1744 void *key; 1745 int err; 1746 1747 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1748 return -EINVAL; 1749 1750 CLASS(fd, f)(attr->map_fd); 1751 map = __bpf_map_get(f); 1752 if (IS_ERR(map)) 1753 return PTR_ERR(map); 1754 bpf_map_write_active_inc(map); 1755 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1756 err = -EPERM; 1757 goto err_put; 1758 } 1759 1760 key = ___bpf_copy_key(ukey, map->key_size); 1761 if (IS_ERR(key)) { 1762 err = PTR_ERR(key); 1763 goto err_put; 1764 } 1765 1766 if (bpf_map_is_offloaded(map)) { 1767 err = bpf_map_offload_delete_elem(map, key); 1768 goto out; 1769 } else if (IS_FD_PROG_ARRAY(map) || 1770 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1771 /* These maps require sleepable context */ 1772 err = map->ops->map_delete_elem(map, key); 1773 goto out; 1774 } 1775 1776 bpf_disable_instrumentation(); 1777 rcu_read_lock(); 1778 err = map->ops->map_delete_elem(map, key); 1779 rcu_read_unlock(); 1780 bpf_enable_instrumentation(); 1781 if (!err) 1782 maybe_wait_bpf_programs(map); 1783 out: 1784 kvfree(key); 1785 err_put: 1786 bpf_map_write_active_dec(map); 1787 return err; 1788 } 1789 1790 /* last field in 'union bpf_attr' used by this command */ 1791 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1792 1793 static int map_get_next_key(union bpf_attr *attr) 1794 { 1795 void __user *ukey = u64_to_user_ptr(attr->key); 1796 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1797 struct bpf_map *map; 1798 void *key, *next_key; 1799 int err; 1800 1801 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1802 return -EINVAL; 1803 1804 CLASS(fd, f)(attr->map_fd); 1805 map = __bpf_map_get(f); 1806 if (IS_ERR(map)) 1807 return PTR_ERR(map); 1808 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) 1809 return -EPERM; 1810 1811 if (ukey) { 1812 key = __bpf_copy_key(ukey, map->key_size); 1813 if (IS_ERR(key)) 1814 return PTR_ERR(key); 1815 } else { 1816 key = NULL; 1817 } 1818 1819 err = -ENOMEM; 1820 next_key = kvmalloc(map->key_size, GFP_USER); 1821 if (!next_key) 1822 goto free_key; 1823 1824 if (bpf_map_is_offloaded(map)) { 1825 err = bpf_map_offload_get_next_key(map, key, next_key); 1826 goto out; 1827 } 1828 1829 rcu_read_lock(); 1830 err = map->ops->map_get_next_key(map, key, next_key); 1831 rcu_read_unlock(); 1832 out: 1833 if (err) 1834 goto free_next_key; 1835 1836 err = -EFAULT; 1837 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1838 goto free_next_key; 1839 1840 err = 0; 1841 1842 free_next_key: 1843 kvfree(next_key); 1844 free_key: 1845 kvfree(key); 1846 return err; 1847 } 1848 1849 int generic_map_delete_batch(struct bpf_map *map, 1850 const union bpf_attr *attr, 1851 union bpf_attr __user *uattr) 1852 { 1853 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1854 u32 cp, max_count; 1855 int err = 0; 1856 void *key; 1857 1858 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1859 return -EINVAL; 1860 1861 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1862 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1863 return -EINVAL; 1864 } 1865 1866 max_count = attr->batch.count; 1867 if (!max_count) 1868 return 0; 1869 1870 if (put_user(0, &uattr->batch.count)) 1871 return -EFAULT; 1872 1873 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1874 if (!key) 1875 return -ENOMEM; 1876 1877 for (cp = 0; cp < max_count; cp++) { 1878 err = -EFAULT; 1879 if (copy_from_user(key, keys + cp * map->key_size, 1880 map->key_size)) 1881 break; 1882 1883 if (bpf_map_is_offloaded(map)) { 1884 err = bpf_map_offload_delete_elem(map, key); 1885 break; 1886 } 1887 1888 bpf_disable_instrumentation(); 1889 rcu_read_lock(); 1890 err = map->ops->map_delete_elem(map, key); 1891 rcu_read_unlock(); 1892 bpf_enable_instrumentation(); 1893 if (err) 1894 break; 1895 cond_resched(); 1896 } 1897 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1898 err = -EFAULT; 1899 1900 kvfree(key); 1901 1902 return err; 1903 } 1904 1905 int generic_map_update_batch(struct bpf_map *map, struct file *map_file, 1906 const union bpf_attr *attr, 1907 union bpf_attr __user *uattr) 1908 { 1909 void __user *values = u64_to_user_ptr(attr->batch.values); 1910 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1911 u32 value_size, cp, max_count; 1912 void *key, *value; 1913 int err = 0; 1914 1915 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1916 return -EINVAL; 1917 1918 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1919 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1920 return -EINVAL; 1921 } 1922 1923 value_size = bpf_map_value_size(map); 1924 1925 max_count = attr->batch.count; 1926 if (!max_count) 1927 return 0; 1928 1929 if (put_user(0, &uattr->batch.count)) 1930 return -EFAULT; 1931 1932 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1933 if (!key) 1934 return -ENOMEM; 1935 1936 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1937 if (!value) { 1938 kvfree(key); 1939 return -ENOMEM; 1940 } 1941 1942 for (cp = 0; cp < max_count; cp++) { 1943 err = -EFAULT; 1944 if (copy_from_user(key, keys + cp * map->key_size, 1945 map->key_size) || 1946 copy_from_user(value, values + cp * value_size, value_size)) 1947 break; 1948 1949 err = bpf_map_update_value(map, map_file, key, value, 1950 attr->batch.elem_flags); 1951 1952 if (err) 1953 break; 1954 cond_resched(); 1955 } 1956 1957 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1958 err = -EFAULT; 1959 1960 kvfree(value); 1961 kvfree(key); 1962 1963 return err; 1964 } 1965 1966 #define MAP_LOOKUP_RETRIES 3 1967 1968 int generic_map_lookup_batch(struct bpf_map *map, 1969 const union bpf_attr *attr, 1970 union bpf_attr __user *uattr) 1971 { 1972 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 1973 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1974 void __user *values = u64_to_user_ptr(attr->batch.values); 1975 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1976 void *buf, *buf_prevkey, *prev_key, *key, *value; 1977 int err, retry = MAP_LOOKUP_RETRIES; 1978 u32 value_size, cp, max_count; 1979 1980 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1981 return -EINVAL; 1982 1983 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1984 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) 1985 return -EINVAL; 1986 1987 value_size = bpf_map_value_size(map); 1988 1989 max_count = attr->batch.count; 1990 if (!max_count) 1991 return 0; 1992 1993 if (put_user(0, &uattr->batch.count)) 1994 return -EFAULT; 1995 1996 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1997 if (!buf_prevkey) 1998 return -ENOMEM; 1999 2000 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 2001 if (!buf) { 2002 kvfree(buf_prevkey); 2003 return -ENOMEM; 2004 } 2005 2006 err = -EFAULT; 2007 prev_key = NULL; 2008 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 2009 goto free_buf; 2010 key = buf; 2011 value = key + map->key_size; 2012 if (ubatch) 2013 prev_key = buf_prevkey; 2014 2015 for (cp = 0; cp < max_count;) { 2016 rcu_read_lock(); 2017 err = map->ops->map_get_next_key(map, prev_key, key); 2018 rcu_read_unlock(); 2019 if (err) 2020 break; 2021 err = bpf_map_copy_value(map, key, value, 2022 attr->batch.elem_flags); 2023 2024 if (err == -ENOENT) { 2025 if (retry) { 2026 retry--; 2027 continue; 2028 } 2029 err = -EINTR; 2030 break; 2031 } 2032 2033 if (err) 2034 goto free_buf; 2035 2036 if (copy_to_user(keys + cp * map->key_size, key, 2037 map->key_size)) { 2038 err = -EFAULT; 2039 goto free_buf; 2040 } 2041 if (copy_to_user(values + cp * value_size, value, value_size)) { 2042 err = -EFAULT; 2043 goto free_buf; 2044 } 2045 2046 if (!prev_key) 2047 prev_key = buf_prevkey; 2048 2049 swap(prev_key, key); 2050 retry = MAP_LOOKUP_RETRIES; 2051 cp++; 2052 cond_resched(); 2053 } 2054 2055 if (err == -EFAULT) 2056 goto free_buf; 2057 2058 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 2059 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 2060 err = -EFAULT; 2061 2062 free_buf: 2063 kvfree(buf_prevkey); 2064 kvfree(buf); 2065 return err; 2066 } 2067 2068 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags 2069 2070 static int map_lookup_and_delete_elem(union bpf_attr *attr) 2071 { 2072 void __user *ukey = u64_to_user_ptr(attr->key); 2073 void __user *uvalue = u64_to_user_ptr(attr->value); 2074 struct bpf_map *map; 2075 void *key, *value; 2076 u32 value_size; 2077 int err; 2078 2079 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 2080 return -EINVAL; 2081 2082 if (attr->flags & ~BPF_F_LOCK) 2083 return -EINVAL; 2084 2085 CLASS(fd, f)(attr->map_fd); 2086 map = __bpf_map_get(f); 2087 if (IS_ERR(map)) 2088 return PTR_ERR(map); 2089 bpf_map_write_active_inc(map); 2090 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || 2091 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 2092 err = -EPERM; 2093 goto err_put; 2094 } 2095 2096 if (attr->flags && 2097 (map->map_type == BPF_MAP_TYPE_QUEUE || 2098 map->map_type == BPF_MAP_TYPE_STACK)) { 2099 err = -EINVAL; 2100 goto err_put; 2101 } 2102 2103 if ((attr->flags & BPF_F_LOCK) && 2104 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 2105 err = -EINVAL; 2106 goto err_put; 2107 } 2108 2109 key = __bpf_copy_key(ukey, map->key_size); 2110 if (IS_ERR(key)) { 2111 err = PTR_ERR(key); 2112 goto err_put; 2113 } 2114 2115 value_size = bpf_map_value_size(map); 2116 2117 err = -ENOMEM; 2118 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 2119 if (!value) 2120 goto free_key; 2121 2122 err = -ENOTSUPP; 2123 if (map->map_type == BPF_MAP_TYPE_QUEUE || 2124 map->map_type == BPF_MAP_TYPE_STACK) { 2125 err = map->ops->map_pop_elem(map, value); 2126 } else if (map->map_type == BPF_MAP_TYPE_HASH || 2127 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 2128 map->map_type == BPF_MAP_TYPE_LRU_HASH || 2129 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 2130 if (!bpf_map_is_offloaded(map)) { 2131 bpf_disable_instrumentation(); 2132 rcu_read_lock(); 2133 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); 2134 rcu_read_unlock(); 2135 bpf_enable_instrumentation(); 2136 } 2137 } 2138 2139 if (err) 2140 goto free_value; 2141 2142 if (copy_to_user(uvalue, value, value_size) != 0) { 2143 err = -EFAULT; 2144 goto free_value; 2145 } 2146 2147 err = 0; 2148 2149 free_value: 2150 kvfree(value); 2151 free_key: 2152 kvfree(key); 2153 err_put: 2154 bpf_map_write_active_dec(map); 2155 return err; 2156 } 2157 2158 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 2159 2160 static int map_freeze(const union bpf_attr *attr) 2161 { 2162 int err = 0; 2163 struct bpf_map *map; 2164 2165 if (CHECK_ATTR(BPF_MAP_FREEZE)) 2166 return -EINVAL; 2167 2168 CLASS(fd, f)(attr->map_fd); 2169 map = __bpf_map_get(f); 2170 if (IS_ERR(map)) 2171 return PTR_ERR(map); 2172 2173 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) 2174 return -ENOTSUPP; 2175 2176 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) 2177 return -EPERM; 2178 2179 mutex_lock(&map->freeze_mutex); 2180 if (bpf_map_write_active(map)) { 2181 err = -EBUSY; 2182 goto err_put; 2183 } 2184 if (READ_ONCE(map->frozen)) { 2185 err = -EBUSY; 2186 goto err_put; 2187 } 2188 2189 WRITE_ONCE(map->frozen, true); 2190 err_put: 2191 mutex_unlock(&map->freeze_mutex); 2192 return err; 2193 } 2194 2195 static const struct bpf_prog_ops * const bpf_prog_types[] = { 2196 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 2197 [_id] = & _name ## _prog_ops, 2198 #define BPF_MAP_TYPE(_id, _ops) 2199 #define BPF_LINK_TYPE(_id, _name) 2200 #include <linux/bpf_types.h> 2201 #undef BPF_PROG_TYPE 2202 #undef BPF_MAP_TYPE 2203 #undef BPF_LINK_TYPE 2204 }; 2205 2206 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 2207 { 2208 const struct bpf_prog_ops *ops; 2209 2210 if (type >= ARRAY_SIZE(bpf_prog_types)) 2211 return -EINVAL; 2212 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 2213 ops = bpf_prog_types[type]; 2214 if (!ops) 2215 return -EINVAL; 2216 2217 if (!bpf_prog_is_offloaded(prog->aux)) 2218 prog->aux->ops = ops; 2219 else 2220 prog->aux->ops = &bpf_offload_prog_ops; 2221 prog->type = type; 2222 return 0; 2223 } 2224 2225 enum bpf_audit { 2226 BPF_AUDIT_LOAD, 2227 BPF_AUDIT_UNLOAD, 2228 BPF_AUDIT_MAX, 2229 }; 2230 2231 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 2232 [BPF_AUDIT_LOAD] = "LOAD", 2233 [BPF_AUDIT_UNLOAD] = "UNLOAD", 2234 }; 2235 2236 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 2237 { 2238 struct audit_context *ctx = NULL; 2239 struct audit_buffer *ab; 2240 2241 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 2242 return; 2243 if (audit_enabled == AUDIT_OFF) 2244 return; 2245 if (!in_irq() && !irqs_disabled()) 2246 ctx = audit_context(); 2247 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 2248 if (unlikely(!ab)) 2249 return; 2250 audit_log_format(ab, "prog-id=%u op=%s", 2251 prog->aux->id, bpf_audit_str[op]); 2252 audit_log_end(ab); 2253 } 2254 2255 static int bpf_prog_alloc_id(struct bpf_prog *prog) 2256 { 2257 int id; 2258 2259 idr_preload(GFP_KERNEL); 2260 spin_lock_bh(&prog_idr_lock); 2261 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 2262 if (id > 0) 2263 prog->aux->id = id; 2264 spin_unlock_bh(&prog_idr_lock); 2265 idr_preload_end(); 2266 2267 /* id is in [1, INT_MAX) */ 2268 if (WARN_ON_ONCE(!id)) 2269 return -ENOSPC; 2270 2271 return id > 0 ? 0 : id; 2272 } 2273 2274 void bpf_prog_free_id(struct bpf_prog *prog) 2275 { 2276 unsigned long flags; 2277 2278 /* cBPF to eBPF migrations are currently not in the idr store. 2279 * Offloaded programs are removed from the store when their device 2280 * disappears - even if someone grabs an fd to them they are unusable, 2281 * simply waiting for refcnt to drop to be freed. 2282 */ 2283 if (!prog->aux->id) 2284 return; 2285 2286 spin_lock_irqsave(&prog_idr_lock, flags); 2287 idr_remove(&prog_idr, prog->aux->id); 2288 prog->aux->id = 0; 2289 spin_unlock_irqrestore(&prog_idr_lock, flags); 2290 } 2291 2292 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 2293 { 2294 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 2295 2296 kvfree(aux->func_info); 2297 kfree(aux->func_info_aux); 2298 free_uid(aux->user); 2299 security_bpf_prog_free(aux->prog); 2300 bpf_prog_free(aux->prog); 2301 } 2302 2303 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 2304 { 2305 bpf_prog_kallsyms_del_all(prog); 2306 btf_put(prog->aux->btf); 2307 module_put(prog->aux->mod); 2308 kvfree(prog->aux->jited_linfo); 2309 kvfree(prog->aux->linfo); 2310 kfree(prog->aux->kfunc_tab); 2311 if (prog->aux->attach_btf) 2312 btf_put(prog->aux->attach_btf); 2313 2314 if (deferred) { 2315 if (prog->sleepable) 2316 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); 2317 else 2318 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 2319 } else { 2320 __bpf_prog_put_rcu(&prog->aux->rcu); 2321 } 2322 } 2323 2324 static void bpf_prog_put_deferred(struct work_struct *work) 2325 { 2326 struct bpf_prog_aux *aux; 2327 struct bpf_prog *prog; 2328 2329 aux = container_of(work, struct bpf_prog_aux, work); 2330 prog = aux->prog; 2331 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 2332 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 2333 bpf_prog_free_id(prog); 2334 __bpf_prog_put_noref(prog, true); 2335 } 2336 2337 static void __bpf_prog_put(struct bpf_prog *prog) 2338 { 2339 struct bpf_prog_aux *aux = prog->aux; 2340 2341 if (atomic64_dec_and_test(&aux->refcnt)) { 2342 if (in_irq() || irqs_disabled()) { 2343 INIT_WORK(&aux->work, bpf_prog_put_deferred); 2344 schedule_work(&aux->work); 2345 } else { 2346 bpf_prog_put_deferred(&aux->work); 2347 } 2348 } 2349 } 2350 2351 void bpf_prog_put(struct bpf_prog *prog) 2352 { 2353 __bpf_prog_put(prog); 2354 } 2355 EXPORT_SYMBOL_GPL(bpf_prog_put); 2356 2357 static int bpf_prog_release(struct inode *inode, struct file *filp) 2358 { 2359 struct bpf_prog *prog = filp->private_data; 2360 2361 bpf_prog_put(prog); 2362 return 0; 2363 } 2364 2365 struct bpf_prog_kstats { 2366 u64 nsecs; 2367 u64 cnt; 2368 u64 misses; 2369 }; 2370 2371 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog) 2372 { 2373 struct bpf_prog_stats *stats; 2374 unsigned int flags; 2375 2376 stats = this_cpu_ptr(prog->stats); 2377 flags = u64_stats_update_begin_irqsave(&stats->syncp); 2378 u64_stats_inc(&stats->misses); 2379 u64_stats_update_end_irqrestore(&stats->syncp, flags); 2380 } 2381 2382 static void bpf_prog_get_stats(const struct bpf_prog *prog, 2383 struct bpf_prog_kstats *stats) 2384 { 2385 u64 nsecs = 0, cnt = 0, misses = 0; 2386 int cpu; 2387 2388 for_each_possible_cpu(cpu) { 2389 const struct bpf_prog_stats *st; 2390 unsigned int start; 2391 u64 tnsecs, tcnt, tmisses; 2392 2393 st = per_cpu_ptr(prog->stats, cpu); 2394 do { 2395 start = u64_stats_fetch_begin(&st->syncp); 2396 tnsecs = u64_stats_read(&st->nsecs); 2397 tcnt = u64_stats_read(&st->cnt); 2398 tmisses = u64_stats_read(&st->misses); 2399 } while (u64_stats_fetch_retry(&st->syncp, start)); 2400 nsecs += tnsecs; 2401 cnt += tcnt; 2402 misses += tmisses; 2403 } 2404 stats->nsecs = nsecs; 2405 stats->cnt = cnt; 2406 stats->misses = misses; 2407 } 2408 2409 #ifdef CONFIG_PROC_FS 2410 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 2411 { 2412 const struct bpf_prog *prog = filp->private_data; 2413 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2414 struct bpf_prog_kstats stats; 2415 2416 bpf_prog_get_stats(prog, &stats); 2417 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2418 seq_printf(m, 2419 "prog_type:\t%u\n" 2420 "prog_jited:\t%u\n" 2421 "prog_tag:\t%s\n" 2422 "memlock:\t%llu\n" 2423 "prog_id:\t%u\n" 2424 "run_time_ns:\t%llu\n" 2425 "run_cnt:\t%llu\n" 2426 "recursion_misses:\t%llu\n" 2427 "verified_insns:\t%u\n", 2428 prog->type, 2429 prog->jited, 2430 prog_tag, 2431 prog->pages * 1ULL << PAGE_SHIFT, 2432 prog->aux->id, 2433 stats.nsecs, 2434 stats.cnt, 2435 stats.misses, 2436 prog->aux->verified_insns); 2437 } 2438 #endif 2439 2440 const struct file_operations bpf_prog_fops = { 2441 #ifdef CONFIG_PROC_FS 2442 .show_fdinfo = bpf_prog_show_fdinfo, 2443 #endif 2444 .release = bpf_prog_release, 2445 .read = bpf_dummy_read, 2446 .write = bpf_dummy_write, 2447 }; 2448 2449 int bpf_prog_new_fd(struct bpf_prog *prog) 2450 { 2451 int ret; 2452 2453 ret = security_bpf_prog(prog); 2454 if (ret < 0) 2455 return ret; 2456 2457 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 2458 O_RDWR | O_CLOEXEC); 2459 } 2460 2461 void bpf_prog_add(struct bpf_prog *prog, int i) 2462 { 2463 atomic64_add(i, &prog->aux->refcnt); 2464 } 2465 EXPORT_SYMBOL_GPL(bpf_prog_add); 2466 2467 void bpf_prog_sub(struct bpf_prog *prog, int i) 2468 { 2469 /* Only to be used for undoing previous bpf_prog_add() in some 2470 * error path. We still know that another entity in our call 2471 * path holds a reference to the program, thus atomic_sub() can 2472 * be safely used in such cases! 2473 */ 2474 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 2475 } 2476 EXPORT_SYMBOL_GPL(bpf_prog_sub); 2477 2478 void bpf_prog_inc(struct bpf_prog *prog) 2479 { 2480 atomic64_inc(&prog->aux->refcnt); 2481 } 2482 EXPORT_SYMBOL_GPL(bpf_prog_inc); 2483 2484 /* prog_idr_lock should have been held */ 2485 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 2486 { 2487 int refold; 2488 2489 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 2490 2491 if (!refold) 2492 return ERR_PTR(-ENOENT); 2493 2494 return prog; 2495 } 2496 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 2497 2498 bool bpf_prog_get_ok(struct bpf_prog *prog, 2499 enum bpf_prog_type *attach_type, bool attach_drv) 2500 { 2501 /* not an attachment, just a refcount inc, always allow */ 2502 if (!attach_type) 2503 return true; 2504 2505 if (prog->type != *attach_type) 2506 return false; 2507 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv) 2508 return false; 2509 2510 return true; 2511 } 2512 2513 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 2514 bool attach_drv) 2515 { 2516 CLASS(fd, f)(ufd); 2517 struct bpf_prog *prog; 2518 2519 if (fd_empty(f)) 2520 return ERR_PTR(-EBADF); 2521 if (fd_file(f)->f_op != &bpf_prog_fops) 2522 return ERR_PTR(-EINVAL); 2523 2524 prog = fd_file(f)->private_data; 2525 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) 2526 return ERR_PTR(-EINVAL); 2527 2528 bpf_prog_inc(prog); 2529 return prog; 2530 } 2531 2532 struct bpf_prog *bpf_prog_get(u32 ufd) 2533 { 2534 return __bpf_prog_get(ufd, NULL, false); 2535 } 2536 2537 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 2538 bool attach_drv) 2539 { 2540 return __bpf_prog_get(ufd, &type, attach_drv); 2541 } 2542 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 2543 2544 /* Initially all BPF programs could be loaded w/o specifying 2545 * expected_attach_type. Later for some of them specifying expected_attach_type 2546 * at load time became required so that program could be validated properly. 2547 * Programs of types that are allowed to be loaded both w/ and w/o (for 2548 * backward compatibility) expected_attach_type, should have the default attach 2549 * type assigned to expected_attach_type for the latter case, so that it can be 2550 * validated later at attach time. 2551 * 2552 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 2553 * prog type requires it but has some attach types that have to be backward 2554 * compatible. 2555 */ 2556 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 2557 { 2558 switch (attr->prog_type) { 2559 case BPF_PROG_TYPE_CGROUP_SOCK: 2560 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 2561 * exist so checking for non-zero is the way to go here. 2562 */ 2563 if (!attr->expected_attach_type) 2564 attr->expected_attach_type = 2565 BPF_CGROUP_INET_SOCK_CREATE; 2566 break; 2567 case BPF_PROG_TYPE_SK_REUSEPORT: 2568 if (!attr->expected_attach_type) 2569 attr->expected_attach_type = 2570 BPF_SK_REUSEPORT_SELECT; 2571 break; 2572 } 2573 } 2574 2575 static int 2576 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 2577 enum bpf_attach_type expected_attach_type, 2578 struct btf *attach_btf, u32 btf_id, 2579 struct bpf_prog *dst_prog) 2580 { 2581 if (btf_id) { 2582 if (btf_id > BTF_MAX_TYPE) 2583 return -EINVAL; 2584 2585 if (!attach_btf && !dst_prog) 2586 return -EINVAL; 2587 2588 switch (prog_type) { 2589 case BPF_PROG_TYPE_TRACING: 2590 case BPF_PROG_TYPE_LSM: 2591 case BPF_PROG_TYPE_STRUCT_OPS: 2592 case BPF_PROG_TYPE_EXT: 2593 break; 2594 default: 2595 return -EINVAL; 2596 } 2597 } 2598 2599 if (attach_btf && (!btf_id || dst_prog)) 2600 return -EINVAL; 2601 2602 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING && 2603 prog_type != BPF_PROG_TYPE_EXT) 2604 return -EINVAL; 2605 2606 switch (prog_type) { 2607 case BPF_PROG_TYPE_CGROUP_SOCK: 2608 switch (expected_attach_type) { 2609 case BPF_CGROUP_INET_SOCK_CREATE: 2610 case BPF_CGROUP_INET_SOCK_RELEASE: 2611 case BPF_CGROUP_INET4_POST_BIND: 2612 case BPF_CGROUP_INET6_POST_BIND: 2613 return 0; 2614 default: 2615 return -EINVAL; 2616 } 2617 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2618 switch (expected_attach_type) { 2619 case BPF_CGROUP_INET4_BIND: 2620 case BPF_CGROUP_INET6_BIND: 2621 case BPF_CGROUP_INET4_CONNECT: 2622 case BPF_CGROUP_INET6_CONNECT: 2623 case BPF_CGROUP_UNIX_CONNECT: 2624 case BPF_CGROUP_INET4_GETPEERNAME: 2625 case BPF_CGROUP_INET6_GETPEERNAME: 2626 case BPF_CGROUP_UNIX_GETPEERNAME: 2627 case BPF_CGROUP_INET4_GETSOCKNAME: 2628 case BPF_CGROUP_INET6_GETSOCKNAME: 2629 case BPF_CGROUP_UNIX_GETSOCKNAME: 2630 case BPF_CGROUP_UDP4_SENDMSG: 2631 case BPF_CGROUP_UDP6_SENDMSG: 2632 case BPF_CGROUP_UNIX_SENDMSG: 2633 case BPF_CGROUP_UDP4_RECVMSG: 2634 case BPF_CGROUP_UDP6_RECVMSG: 2635 case BPF_CGROUP_UNIX_RECVMSG: 2636 return 0; 2637 default: 2638 return -EINVAL; 2639 } 2640 case BPF_PROG_TYPE_CGROUP_SKB: 2641 switch (expected_attach_type) { 2642 case BPF_CGROUP_INET_INGRESS: 2643 case BPF_CGROUP_INET_EGRESS: 2644 return 0; 2645 default: 2646 return -EINVAL; 2647 } 2648 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2649 switch (expected_attach_type) { 2650 case BPF_CGROUP_SETSOCKOPT: 2651 case BPF_CGROUP_GETSOCKOPT: 2652 return 0; 2653 default: 2654 return -EINVAL; 2655 } 2656 case BPF_PROG_TYPE_SK_LOOKUP: 2657 if (expected_attach_type == BPF_SK_LOOKUP) 2658 return 0; 2659 return -EINVAL; 2660 case BPF_PROG_TYPE_SK_REUSEPORT: 2661 switch (expected_attach_type) { 2662 case BPF_SK_REUSEPORT_SELECT: 2663 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: 2664 return 0; 2665 default: 2666 return -EINVAL; 2667 } 2668 case BPF_PROG_TYPE_NETFILTER: 2669 if (expected_attach_type == BPF_NETFILTER) 2670 return 0; 2671 return -EINVAL; 2672 case BPF_PROG_TYPE_SYSCALL: 2673 case BPF_PROG_TYPE_EXT: 2674 if (expected_attach_type) 2675 return -EINVAL; 2676 fallthrough; 2677 default: 2678 return 0; 2679 } 2680 } 2681 2682 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) 2683 { 2684 switch (prog_type) { 2685 case BPF_PROG_TYPE_SCHED_CLS: 2686 case BPF_PROG_TYPE_SCHED_ACT: 2687 case BPF_PROG_TYPE_XDP: 2688 case BPF_PROG_TYPE_LWT_IN: 2689 case BPF_PROG_TYPE_LWT_OUT: 2690 case BPF_PROG_TYPE_LWT_XMIT: 2691 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2692 case BPF_PROG_TYPE_SK_SKB: 2693 case BPF_PROG_TYPE_SK_MSG: 2694 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2695 case BPF_PROG_TYPE_CGROUP_DEVICE: 2696 case BPF_PROG_TYPE_CGROUP_SOCK: 2697 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2698 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2699 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2700 case BPF_PROG_TYPE_SOCK_OPS: 2701 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2702 case BPF_PROG_TYPE_NETFILTER: 2703 return true; 2704 case BPF_PROG_TYPE_CGROUP_SKB: 2705 /* always unpriv */ 2706 case BPF_PROG_TYPE_SK_REUSEPORT: 2707 /* equivalent to SOCKET_FILTER. need CAP_BPF only */ 2708 default: 2709 return false; 2710 } 2711 } 2712 2713 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) 2714 { 2715 switch (prog_type) { 2716 case BPF_PROG_TYPE_KPROBE: 2717 case BPF_PROG_TYPE_TRACEPOINT: 2718 case BPF_PROG_TYPE_PERF_EVENT: 2719 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2720 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2721 case BPF_PROG_TYPE_TRACING: 2722 case BPF_PROG_TYPE_LSM: 2723 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ 2724 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2725 return true; 2726 default: 2727 return false; 2728 } 2729 } 2730 2731 /* last field in 'union bpf_attr' used by this command */ 2732 #define BPF_PROG_LOAD_LAST_FIELD prog_token_fd 2733 2734 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) 2735 { 2736 enum bpf_prog_type type = attr->prog_type; 2737 struct bpf_prog *prog, *dst_prog = NULL; 2738 struct btf *attach_btf = NULL; 2739 struct bpf_token *token = NULL; 2740 bool bpf_cap; 2741 int err; 2742 char license[128]; 2743 2744 if (CHECK_ATTR(BPF_PROG_LOAD)) 2745 return -EINVAL; 2746 2747 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2748 BPF_F_ANY_ALIGNMENT | 2749 BPF_F_TEST_STATE_FREQ | 2750 BPF_F_SLEEPABLE | 2751 BPF_F_TEST_RND_HI32 | 2752 BPF_F_XDP_HAS_FRAGS | 2753 BPF_F_XDP_DEV_BOUND_ONLY | 2754 BPF_F_TEST_REG_INVARIANTS | 2755 BPF_F_TOKEN_FD)) 2756 return -EINVAL; 2757 2758 bpf_prog_load_fixup_attach_type(attr); 2759 2760 if (attr->prog_flags & BPF_F_TOKEN_FD) { 2761 token = bpf_token_get_from_fd(attr->prog_token_fd); 2762 if (IS_ERR(token)) 2763 return PTR_ERR(token); 2764 /* if current token doesn't grant prog loading permissions, 2765 * then we can't use this token, so ignore it and rely on 2766 * system-wide capabilities checks 2767 */ 2768 if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) || 2769 !bpf_token_allow_prog_type(token, attr->prog_type, 2770 attr->expected_attach_type)) { 2771 bpf_token_put(token); 2772 token = NULL; 2773 } 2774 } 2775 2776 bpf_cap = bpf_token_capable(token, CAP_BPF); 2777 err = -EPERM; 2778 2779 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2780 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2781 !bpf_cap) 2782 goto put_token; 2783 2784 /* Intent here is for unprivileged_bpf_disabled to block BPF program 2785 * creation for unprivileged users; other actions depend 2786 * on fd availability and access to bpffs, so are dependent on 2787 * object creation success. Even with unprivileged BPF disabled, 2788 * capability checks are still carried out for these 2789 * and other operations. 2790 */ 2791 if (sysctl_unprivileged_bpf_disabled && !bpf_cap) 2792 goto put_token; 2793 2794 if (attr->insn_cnt == 0 || 2795 attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) { 2796 err = -E2BIG; 2797 goto put_token; 2798 } 2799 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2800 type != BPF_PROG_TYPE_CGROUP_SKB && 2801 !bpf_cap) 2802 goto put_token; 2803 2804 if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN)) 2805 goto put_token; 2806 if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON)) 2807 goto put_token; 2808 2809 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog 2810 * or btf, we need to check which one it is 2811 */ 2812 if (attr->attach_prog_fd) { 2813 dst_prog = bpf_prog_get(attr->attach_prog_fd); 2814 if (IS_ERR(dst_prog)) { 2815 dst_prog = NULL; 2816 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); 2817 if (IS_ERR(attach_btf)) { 2818 err = -EINVAL; 2819 goto put_token; 2820 } 2821 if (!btf_is_kernel(attach_btf)) { 2822 /* attaching through specifying bpf_prog's BTF 2823 * objects directly might be supported eventually 2824 */ 2825 btf_put(attach_btf); 2826 err = -ENOTSUPP; 2827 goto put_token; 2828 } 2829 } 2830 } else if (attr->attach_btf_id) { 2831 /* fall back to vmlinux BTF, if BTF type ID is specified */ 2832 attach_btf = bpf_get_btf_vmlinux(); 2833 if (IS_ERR(attach_btf)) { 2834 err = PTR_ERR(attach_btf); 2835 goto put_token; 2836 } 2837 if (!attach_btf) { 2838 err = -EINVAL; 2839 goto put_token; 2840 } 2841 btf_get(attach_btf); 2842 } 2843 2844 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2845 attach_btf, attr->attach_btf_id, 2846 dst_prog)) { 2847 if (dst_prog) 2848 bpf_prog_put(dst_prog); 2849 if (attach_btf) 2850 btf_put(attach_btf); 2851 err = -EINVAL; 2852 goto put_token; 2853 } 2854 2855 /* plain bpf_prog allocation */ 2856 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2857 if (!prog) { 2858 if (dst_prog) 2859 bpf_prog_put(dst_prog); 2860 if (attach_btf) 2861 btf_put(attach_btf); 2862 err = -EINVAL; 2863 goto put_token; 2864 } 2865 2866 prog->expected_attach_type = attr->expected_attach_type; 2867 prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE); 2868 prog->aux->attach_btf = attach_btf; 2869 prog->aux->attach_btf_id = attr->attach_btf_id; 2870 prog->aux->dst_prog = dst_prog; 2871 prog->aux->dev_bound = !!attr->prog_ifindex; 2872 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; 2873 2874 /* move token into prog->aux, reuse taken refcnt */ 2875 prog->aux->token = token; 2876 token = NULL; 2877 2878 prog->aux->user = get_current_user(); 2879 prog->len = attr->insn_cnt; 2880 2881 err = -EFAULT; 2882 if (copy_from_bpfptr(prog->insns, 2883 make_bpfptr(attr->insns, uattr.is_kernel), 2884 bpf_prog_insn_size(prog)) != 0) 2885 goto free_prog; 2886 /* copy eBPF program license from user space */ 2887 if (strncpy_from_bpfptr(license, 2888 make_bpfptr(attr->license, uattr.is_kernel), 2889 sizeof(license) - 1) < 0) 2890 goto free_prog; 2891 license[sizeof(license) - 1] = 0; 2892 2893 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2894 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0; 2895 2896 prog->orig_prog = NULL; 2897 prog->jited = 0; 2898 2899 atomic64_set(&prog->aux->refcnt, 1); 2900 2901 if (bpf_prog_is_dev_bound(prog->aux)) { 2902 err = bpf_prog_dev_bound_init(prog, attr); 2903 if (err) 2904 goto free_prog; 2905 } 2906 2907 if (type == BPF_PROG_TYPE_EXT && dst_prog && 2908 bpf_prog_is_dev_bound(dst_prog->aux)) { 2909 err = bpf_prog_dev_bound_inherit(prog, dst_prog); 2910 if (err) 2911 goto free_prog; 2912 } 2913 2914 /* 2915 * Bookkeeping for managing the program attachment chain. 2916 * 2917 * It might be tempting to set attach_tracing_prog flag at the attachment 2918 * time, but this will not prevent from loading bunch of tracing prog 2919 * first, then attach them one to another. 2920 * 2921 * The flag attach_tracing_prog is set for the whole program lifecycle, and 2922 * doesn't have to be cleared in bpf_tracing_link_release, since tracing 2923 * programs cannot change attachment target. 2924 */ 2925 if (type == BPF_PROG_TYPE_TRACING && dst_prog && 2926 dst_prog->type == BPF_PROG_TYPE_TRACING) { 2927 prog->aux->attach_tracing_prog = true; 2928 } 2929 2930 /* find program type: socket_filter vs tracing_filter */ 2931 err = find_prog_type(type, prog); 2932 if (err < 0) 2933 goto free_prog; 2934 2935 prog->aux->load_time = ktime_get_boottime_ns(); 2936 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2937 sizeof(attr->prog_name)); 2938 if (err < 0) 2939 goto free_prog; 2940 2941 err = security_bpf_prog_load(prog, attr, token); 2942 if (err) 2943 goto free_prog_sec; 2944 2945 /* run eBPF verifier */ 2946 err = bpf_check(&prog, attr, uattr, uattr_size); 2947 if (err < 0) 2948 goto free_used_maps; 2949 2950 prog = bpf_prog_select_runtime(prog, &err); 2951 if (err < 0) 2952 goto free_used_maps; 2953 2954 err = bpf_prog_alloc_id(prog); 2955 if (err) 2956 goto free_used_maps; 2957 2958 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2959 * effectively publicly exposed. However, retrieving via 2960 * bpf_prog_get_fd_by_id() will take another reference, 2961 * therefore it cannot be gone underneath us. 2962 * 2963 * Only for the time /after/ successful bpf_prog_new_fd() 2964 * and before returning to userspace, we might just hold 2965 * one reference and any parallel close on that fd could 2966 * rip everything out. Hence, below notifications must 2967 * happen before bpf_prog_new_fd(). 2968 * 2969 * Also, any failure handling from this point onwards must 2970 * be using bpf_prog_put() given the program is exposed. 2971 */ 2972 bpf_prog_kallsyms_add(prog); 2973 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 2974 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 2975 2976 err = bpf_prog_new_fd(prog); 2977 if (err < 0) 2978 bpf_prog_put(prog); 2979 return err; 2980 2981 free_used_maps: 2982 /* In case we have subprogs, we need to wait for a grace 2983 * period before we can tear down JIT memory since symbols 2984 * are already exposed under kallsyms. 2985 */ 2986 __bpf_prog_put_noref(prog, prog->aux->real_func_cnt); 2987 return err; 2988 2989 free_prog_sec: 2990 security_bpf_prog_free(prog); 2991 free_prog: 2992 free_uid(prog->aux->user); 2993 if (prog->aux->attach_btf) 2994 btf_put(prog->aux->attach_btf); 2995 bpf_prog_free(prog); 2996 put_token: 2997 bpf_token_put(token); 2998 return err; 2999 } 3000 3001 #define BPF_OBJ_LAST_FIELD path_fd 3002 3003 static int bpf_obj_pin(const union bpf_attr *attr) 3004 { 3005 int path_fd; 3006 3007 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD) 3008 return -EINVAL; 3009 3010 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 3011 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 3012 return -EINVAL; 3013 3014 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 3015 return bpf_obj_pin_user(attr->bpf_fd, path_fd, 3016 u64_to_user_ptr(attr->pathname)); 3017 } 3018 3019 static int bpf_obj_get(const union bpf_attr *attr) 3020 { 3021 int path_fd; 3022 3023 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 3024 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD)) 3025 return -EINVAL; 3026 3027 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 3028 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 3029 return -EINVAL; 3030 3031 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 3032 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname), 3033 attr->file_flags); 3034 } 3035 3036 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 3037 const struct bpf_link_ops *ops, struct bpf_prog *prog) 3038 { 3039 WARN_ON(ops->dealloc && ops->dealloc_deferred); 3040 atomic64_set(&link->refcnt, 1); 3041 link->type = type; 3042 link->id = 0; 3043 link->ops = ops; 3044 link->prog = prog; 3045 } 3046 3047 static void bpf_link_free_id(int id) 3048 { 3049 if (!id) 3050 return; 3051 3052 spin_lock_bh(&link_idr_lock); 3053 idr_remove(&link_idr, id); 3054 spin_unlock_bh(&link_idr_lock); 3055 } 3056 3057 /* Clean up bpf_link and corresponding anon_inode file and FD. After 3058 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred 3059 * anon_inode's release() call. This helper marks bpf_link as 3060 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt 3061 * is not decremented, it's the responsibility of a calling code that failed 3062 * to complete bpf_link initialization. 3063 * This helper eventually calls link's dealloc callback, but does not call 3064 * link's release callback. 3065 */ 3066 void bpf_link_cleanup(struct bpf_link_primer *primer) 3067 { 3068 primer->link->prog = NULL; 3069 bpf_link_free_id(primer->id); 3070 fput(primer->file); 3071 put_unused_fd(primer->fd); 3072 } 3073 3074 void bpf_link_inc(struct bpf_link *link) 3075 { 3076 atomic64_inc(&link->refcnt); 3077 } 3078 3079 static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu) 3080 { 3081 struct bpf_link *link = container_of(rcu, struct bpf_link, rcu); 3082 3083 /* free bpf_link and its containing memory */ 3084 link->ops->dealloc_deferred(link); 3085 } 3086 3087 static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu) 3088 { 3089 if (rcu_trace_implies_rcu_gp()) 3090 bpf_link_defer_dealloc_rcu_gp(rcu); 3091 else 3092 call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp); 3093 } 3094 3095 /* bpf_link_free is guaranteed to be called from process context */ 3096 static void bpf_link_free(struct bpf_link *link) 3097 { 3098 const struct bpf_link_ops *ops = link->ops; 3099 bool sleepable = false; 3100 3101 bpf_link_free_id(link->id); 3102 if (link->prog) { 3103 sleepable = link->prog->sleepable; 3104 /* detach BPF program, clean up used resources */ 3105 ops->release(link); 3106 bpf_prog_put(link->prog); 3107 } 3108 if (ops->dealloc_deferred) { 3109 /* schedule BPF link deallocation; if underlying BPF program 3110 * is sleepable, we need to first wait for RCU tasks trace 3111 * sync, then go through "classic" RCU grace period 3112 */ 3113 if (sleepable) 3114 call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp); 3115 else 3116 call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp); 3117 } else if (ops->dealloc) 3118 ops->dealloc(link); 3119 } 3120 3121 static void bpf_link_put_deferred(struct work_struct *work) 3122 { 3123 struct bpf_link *link = container_of(work, struct bpf_link, work); 3124 3125 bpf_link_free(link); 3126 } 3127 3128 /* bpf_link_put might be called from atomic context. It needs to be called 3129 * from sleepable context in order to acquire sleeping locks during the process. 3130 */ 3131 void bpf_link_put(struct bpf_link *link) 3132 { 3133 if (!atomic64_dec_and_test(&link->refcnt)) 3134 return; 3135 3136 INIT_WORK(&link->work, bpf_link_put_deferred); 3137 schedule_work(&link->work); 3138 } 3139 EXPORT_SYMBOL(bpf_link_put); 3140 3141 static void bpf_link_put_direct(struct bpf_link *link) 3142 { 3143 if (!atomic64_dec_and_test(&link->refcnt)) 3144 return; 3145 bpf_link_free(link); 3146 } 3147 3148 static int bpf_link_release(struct inode *inode, struct file *filp) 3149 { 3150 struct bpf_link *link = filp->private_data; 3151 3152 bpf_link_put_direct(link); 3153 return 0; 3154 } 3155 3156 #ifdef CONFIG_PROC_FS 3157 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 3158 #define BPF_MAP_TYPE(_id, _ops) 3159 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name, 3160 static const char *bpf_link_type_strs[] = { 3161 [BPF_LINK_TYPE_UNSPEC] = "<invalid>", 3162 #include <linux/bpf_types.h> 3163 }; 3164 #undef BPF_PROG_TYPE 3165 #undef BPF_MAP_TYPE 3166 #undef BPF_LINK_TYPE 3167 3168 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) 3169 { 3170 const struct bpf_link *link = filp->private_data; 3171 const struct bpf_prog *prog = link->prog; 3172 enum bpf_link_type type = link->type; 3173 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 3174 3175 if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) { 3176 seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]); 3177 } else { 3178 WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type); 3179 seq_printf(m, "link_type:\t<%u>\n", type); 3180 } 3181 seq_printf(m, "link_id:\t%u\n", link->id); 3182 3183 if (prog) { 3184 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 3185 seq_printf(m, 3186 "prog_tag:\t%s\n" 3187 "prog_id:\t%u\n", 3188 prog_tag, 3189 prog->aux->id); 3190 } 3191 if (link->ops->show_fdinfo) 3192 link->ops->show_fdinfo(link, m); 3193 } 3194 #endif 3195 3196 static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts) 3197 { 3198 struct bpf_link *link = file->private_data; 3199 3200 return link->ops->poll(file, pts); 3201 } 3202 3203 static const struct file_operations bpf_link_fops = { 3204 #ifdef CONFIG_PROC_FS 3205 .show_fdinfo = bpf_link_show_fdinfo, 3206 #endif 3207 .release = bpf_link_release, 3208 .read = bpf_dummy_read, 3209 .write = bpf_dummy_write, 3210 }; 3211 3212 static const struct file_operations bpf_link_fops_poll = { 3213 #ifdef CONFIG_PROC_FS 3214 .show_fdinfo = bpf_link_show_fdinfo, 3215 #endif 3216 .release = bpf_link_release, 3217 .read = bpf_dummy_read, 3218 .write = bpf_dummy_write, 3219 .poll = bpf_link_poll, 3220 }; 3221 3222 static int bpf_link_alloc_id(struct bpf_link *link) 3223 { 3224 int id; 3225 3226 idr_preload(GFP_KERNEL); 3227 spin_lock_bh(&link_idr_lock); 3228 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC); 3229 spin_unlock_bh(&link_idr_lock); 3230 idr_preload_end(); 3231 3232 return id; 3233 } 3234 3235 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file, 3236 * reserving unused FD and allocating ID from link_idr. This is to be paired 3237 * with bpf_link_settle() to install FD and ID and expose bpf_link to 3238 * user-space, if bpf_link is successfully attached. If not, bpf_link and 3239 * pre-allocated resources are to be freed with bpf_cleanup() call. All the 3240 * transient state is passed around in struct bpf_link_primer. 3241 * This is preferred way to create and initialize bpf_link, especially when 3242 * there are complicated and expensive operations in between creating bpf_link 3243 * itself and attaching it to BPF hook. By using bpf_link_prime() and 3244 * bpf_link_settle() kernel code using bpf_link doesn't have to perform 3245 * expensive (and potentially failing) roll back operations in a rare case 3246 * that file, FD, or ID can't be allocated. 3247 */ 3248 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) 3249 { 3250 struct file *file; 3251 int fd, id; 3252 3253 fd = get_unused_fd_flags(O_CLOEXEC); 3254 if (fd < 0) 3255 return fd; 3256 3257 3258 id = bpf_link_alloc_id(link); 3259 if (id < 0) { 3260 put_unused_fd(fd); 3261 return id; 3262 } 3263 3264 file = anon_inode_getfile("bpf_link", 3265 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, 3266 link, O_CLOEXEC); 3267 if (IS_ERR(file)) { 3268 bpf_link_free_id(id); 3269 put_unused_fd(fd); 3270 return PTR_ERR(file); 3271 } 3272 3273 primer->link = link; 3274 primer->file = file; 3275 primer->fd = fd; 3276 primer->id = id; 3277 return 0; 3278 } 3279 3280 int bpf_link_settle(struct bpf_link_primer *primer) 3281 { 3282 /* make bpf_link fetchable by ID */ 3283 spin_lock_bh(&link_idr_lock); 3284 primer->link->id = primer->id; 3285 spin_unlock_bh(&link_idr_lock); 3286 /* make bpf_link fetchable by FD */ 3287 fd_install(primer->fd, primer->file); 3288 /* pass through installed FD */ 3289 return primer->fd; 3290 } 3291 3292 int bpf_link_new_fd(struct bpf_link *link) 3293 { 3294 return anon_inode_getfd("bpf-link", 3295 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, 3296 link, O_CLOEXEC); 3297 } 3298 3299 struct bpf_link *bpf_link_get_from_fd(u32 ufd) 3300 { 3301 CLASS(fd, f)(ufd); 3302 struct bpf_link *link; 3303 3304 if (fd_empty(f)) 3305 return ERR_PTR(-EBADF); 3306 if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll) 3307 return ERR_PTR(-EINVAL); 3308 3309 link = fd_file(f)->private_data; 3310 bpf_link_inc(link); 3311 return link; 3312 } 3313 EXPORT_SYMBOL(bpf_link_get_from_fd); 3314 3315 static void bpf_tracing_link_release(struct bpf_link *link) 3316 { 3317 struct bpf_tracing_link *tr_link = 3318 container_of(link, struct bpf_tracing_link, link.link); 3319 3320 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, 3321 tr_link->trampoline, 3322 tr_link->tgt_prog)); 3323 3324 bpf_trampoline_put(tr_link->trampoline); 3325 3326 /* tgt_prog is NULL if target is a kernel function */ 3327 if (tr_link->tgt_prog) 3328 bpf_prog_put(tr_link->tgt_prog); 3329 } 3330 3331 static void bpf_tracing_link_dealloc(struct bpf_link *link) 3332 { 3333 struct bpf_tracing_link *tr_link = 3334 container_of(link, struct bpf_tracing_link, link.link); 3335 3336 kfree(tr_link); 3337 } 3338 3339 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, 3340 struct seq_file *seq) 3341 { 3342 struct bpf_tracing_link *tr_link = 3343 container_of(link, struct bpf_tracing_link, link.link); 3344 u32 target_btf_id, target_obj_id; 3345 3346 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3347 &target_obj_id, &target_btf_id); 3348 seq_printf(seq, 3349 "attach_type:\t%d\n" 3350 "target_obj_id:\t%u\n" 3351 "target_btf_id:\t%u\n", 3352 tr_link->attach_type, 3353 target_obj_id, 3354 target_btf_id); 3355 } 3356 3357 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, 3358 struct bpf_link_info *info) 3359 { 3360 struct bpf_tracing_link *tr_link = 3361 container_of(link, struct bpf_tracing_link, link.link); 3362 3363 info->tracing.attach_type = tr_link->attach_type; 3364 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3365 &info->tracing.target_obj_id, 3366 &info->tracing.target_btf_id); 3367 3368 return 0; 3369 } 3370 3371 static const struct bpf_link_ops bpf_tracing_link_lops = { 3372 .release = bpf_tracing_link_release, 3373 .dealloc = bpf_tracing_link_dealloc, 3374 .show_fdinfo = bpf_tracing_link_show_fdinfo, 3375 .fill_link_info = bpf_tracing_link_fill_link_info, 3376 }; 3377 3378 static int bpf_tracing_prog_attach(struct bpf_prog *prog, 3379 int tgt_prog_fd, 3380 u32 btf_id, 3381 u64 bpf_cookie) 3382 { 3383 struct bpf_link_primer link_primer; 3384 struct bpf_prog *tgt_prog = NULL; 3385 struct bpf_trampoline *tr = NULL; 3386 struct bpf_tracing_link *link; 3387 u64 key = 0; 3388 int err; 3389 3390 switch (prog->type) { 3391 case BPF_PROG_TYPE_TRACING: 3392 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 3393 prog->expected_attach_type != BPF_TRACE_FEXIT && 3394 prog->expected_attach_type != BPF_MODIFY_RETURN) { 3395 err = -EINVAL; 3396 goto out_put_prog; 3397 } 3398 break; 3399 case BPF_PROG_TYPE_EXT: 3400 if (prog->expected_attach_type != 0) { 3401 err = -EINVAL; 3402 goto out_put_prog; 3403 } 3404 break; 3405 case BPF_PROG_TYPE_LSM: 3406 if (prog->expected_attach_type != BPF_LSM_MAC) { 3407 err = -EINVAL; 3408 goto out_put_prog; 3409 } 3410 break; 3411 default: 3412 err = -EINVAL; 3413 goto out_put_prog; 3414 } 3415 3416 if (!!tgt_prog_fd != !!btf_id) { 3417 err = -EINVAL; 3418 goto out_put_prog; 3419 } 3420 3421 if (tgt_prog_fd) { 3422 /* 3423 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this 3424 * part would be changed to implement the same for 3425 * BPF_PROG_TYPE_TRACING, do not forget to update the way how 3426 * attach_tracing_prog flag is set. 3427 */ 3428 if (prog->type != BPF_PROG_TYPE_EXT) { 3429 err = -EINVAL; 3430 goto out_put_prog; 3431 } 3432 3433 tgt_prog = bpf_prog_get(tgt_prog_fd); 3434 if (IS_ERR(tgt_prog)) { 3435 err = PTR_ERR(tgt_prog); 3436 tgt_prog = NULL; 3437 goto out_put_prog; 3438 } 3439 3440 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id); 3441 } 3442 3443 link = kzalloc(sizeof(*link), GFP_USER); 3444 if (!link) { 3445 err = -ENOMEM; 3446 goto out_put_prog; 3447 } 3448 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, 3449 &bpf_tracing_link_lops, prog); 3450 link->attach_type = prog->expected_attach_type; 3451 link->link.cookie = bpf_cookie; 3452 3453 mutex_lock(&prog->aux->dst_mutex); 3454 3455 /* There are a few possible cases here: 3456 * 3457 * - if prog->aux->dst_trampoline is set, the program was just loaded 3458 * and not yet attached to anything, so we can use the values stored 3459 * in prog->aux 3460 * 3461 * - if prog->aux->dst_trampoline is NULL, the program has already been 3462 * attached to a target and its initial target was cleared (below) 3463 * 3464 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + 3465 * target_btf_id using the link_create API. 3466 * 3467 * - if tgt_prog == NULL when this function was called using the old 3468 * raw_tracepoint_open API, and we need a target from prog->aux 3469 * 3470 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program 3471 * was detached and is going for re-attachment. 3472 * 3473 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf 3474 * are NULL, then program was already attached and user did not provide 3475 * tgt_prog_fd so we have no way to find out or create trampoline 3476 */ 3477 if (!prog->aux->dst_trampoline && !tgt_prog) { 3478 /* 3479 * Allow re-attach for TRACING and LSM programs. If it's 3480 * currently linked, bpf_trampoline_link_prog will fail. 3481 * EXT programs need to specify tgt_prog_fd, so they 3482 * re-attach in separate code path. 3483 */ 3484 if (prog->type != BPF_PROG_TYPE_TRACING && 3485 prog->type != BPF_PROG_TYPE_LSM) { 3486 err = -EINVAL; 3487 goto out_unlock; 3488 } 3489 /* We can allow re-attach only if we have valid attach_btf. */ 3490 if (!prog->aux->attach_btf) { 3491 err = -EINVAL; 3492 goto out_unlock; 3493 } 3494 btf_id = prog->aux->attach_btf_id; 3495 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); 3496 } 3497 3498 if (!prog->aux->dst_trampoline || 3499 (key && key != prog->aux->dst_trampoline->key)) { 3500 /* If there is no saved target, or the specified target is 3501 * different from the destination specified at load time, we 3502 * need a new trampoline and a check for compatibility 3503 */ 3504 struct bpf_attach_target_info tgt_info = {}; 3505 3506 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id, 3507 &tgt_info); 3508 if (err) 3509 goto out_unlock; 3510 3511 if (tgt_info.tgt_mod) { 3512 module_put(prog->aux->mod); 3513 prog->aux->mod = tgt_info.tgt_mod; 3514 } 3515 3516 tr = bpf_trampoline_get(key, &tgt_info); 3517 if (!tr) { 3518 err = -ENOMEM; 3519 goto out_unlock; 3520 } 3521 } else { 3522 /* The caller didn't specify a target, or the target was the 3523 * same as the destination supplied during program load. This 3524 * means we can reuse the trampoline and reference from program 3525 * load time, and there is no need to allocate a new one. This 3526 * can only happen once for any program, as the saved values in 3527 * prog->aux are cleared below. 3528 */ 3529 tr = prog->aux->dst_trampoline; 3530 tgt_prog = prog->aux->dst_prog; 3531 } 3532 3533 err = bpf_link_prime(&link->link.link, &link_primer); 3534 if (err) 3535 goto out_unlock; 3536 3537 err = bpf_trampoline_link_prog(&link->link, tr, tgt_prog); 3538 if (err) { 3539 bpf_link_cleanup(&link_primer); 3540 link = NULL; 3541 goto out_unlock; 3542 } 3543 3544 link->tgt_prog = tgt_prog; 3545 link->trampoline = tr; 3546 3547 /* Always clear the trampoline and target prog from prog->aux to make 3548 * sure the original attach destination is not kept alive after a 3549 * program is (re-)attached to another target. 3550 */ 3551 if (prog->aux->dst_prog && 3552 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) 3553 /* got extra prog ref from syscall, or attaching to different prog */ 3554 bpf_prog_put(prog->aux->dst_prog); 3555 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) 3556 /* we allocated a new trampoline, so free the old one */ 3557 bpf_trampoline_put(prog->aux->dst_trampoline); 3558 3559 prog->aux->dst_prog = NULL; 3560 prog->aux->dst_trampoline = NULL; 3561 mutex_unlock(&prog->aux->dst_mutex); 3562 3563 return bpf_link_settle(&link_primer); 3564 out_unlock: 3565 if (tr && tr != prog->aux->dst_trampoline) 3566 bpf_trampoline_put(tr); 3567 mutex_unlock(&prog->aux->dst_mutex); 3568 kfree(link); 3569 out_put_prog: 3570 if (tgt_prog_fd && tgt_prog) 3571 bpf_prog_put(tgt_prog); 3572 return err; 3573 } 3574 3575 static void bpf_raw_tp_link_release(struct bpf_link *link) 3576 { 3577 struct bpf_raw_tp_link *raw_tp = 3578 container_of(link, struct bpf_raw_tp_link, link); 3579 3580 bpf_probe_unregister(raw_tp->btp, raw_tp); 3581 bpf_put_raw_tracepoint(raw_tp->btp); 3582 } 3583 3584 static void bpf_raw_tp_link_dealloc(struct bpf_link *link) 3585 { 3586 struct bpf_raw_tp_link *raw_tp = 3587 container_of(link, struct bpf_raw_tp_link, link); 3588 3589 kfree(raw_tp); 3590 } 3591 3592 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link, 3593 struct seq_file *seq) 3594 { 3595 struct bpf_raw_tp_link *raw_tp_link = 3596 container_of(link, struct bpf_raw_tp_link, link); 3597 3598 seq_printf(seq, 3599 "tp_name:\t%s\n", 3600 raw_tp_link->btp->tp->name); 3601 } 3602 3603 static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen, 3604 u32 len) 3605 { 3606 if (ulen >= len + 1) { 3607 if (copy_to_user(ubuf, buf, len + 1)) 3608 return -EFAULT; 3609 } else { 3610 char zero = '\0'; 3611 3612 if (copy_to_user(ubuf, buf, ulen - 1)) 3613 return -EFAULT; 3614 if (put_user(zero, ubuf + ulen - 1)) 3615 return -EFAULT; 3616 return -ENOSPC; 3617 } 3618 3619 return 0; 3620 } 3621 3622 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, 3623 struct bpf_link_info *info) 3624 { 3625 struct bpf_raw_tp_link *raw_tp_link = 3626 container_of(link, struct bpf_raw_tp_link, link); 3627 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); 3628 const char *tp_name = raw_tp_link->btp->tp->name; 3629 u32 ulen = info->raw_tracepoint.tp_name_len; 3630 size_t tp_len = strlen(tp_name); 3631 3632 if (!ulen ^ !ubuf) 3633 return -EINVAL; 3634 3635 info->raw_tracepoint.tp_name_len = tp_len + 1; 3636 3637 if (!ubuf) 3638 return 0; 3639 3640 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len); 3641 } 3642 3643 static const struct bpf_link_ops bpf_raw_tp_link_lops = { 3644 .release = bpf_raw_tp_link_release, 3645 .dealloc_deferred = bpf_raw_tp_link_dealloc, 3646 .show_fdinfo = bpf_raw_tp_link_show_fdinfo, 3647 .fill_link_info = bpf_raw_tp_link_fill_link_info, 3648 }; 3649 3650 #ifdef CONFIG_PERF_EVENTS 3651 struct bpf_perf_link { 3652 struct bpf_link link; 3653 struct file *perf_file; 3654 }; 3655 3656 static void bpf_perf_link_release(struct bpf_link *link) 3657 { 3658 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3659 struct perf_event *event = perf_link->perf_file->private_data; 3660 3661 perf_event_free_bpf_prog(event); 3662 fput(perf_link->perf_file); 3663 } 3664 3665 static void bpf_perf_link_dealloc(struct bpf_link *link) 3666 { 3667 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3668 3669 kfree(perf_link); 3670 } 3671 3672 static int bpf_perf_link_fill_common(const struct perf_event *event, 3673 char __user *uname, u32 *ulenp, 3674 u64 *probe_offset, u64 *probe_addr, 3675 u32 *fd_type, unsigned long *missed) 3676 { 3677 const char *buf; 3678 u32 prog_id, ulen; 3679 size_t len; 3680 int err; 3681 3682 ulen = *ulenp; 3683 if (!ulen ^ !uname) 3684 return -EINVAL; 3685 3686 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf, 3687 probe_offset, probe_addr, missed); 3688 if (err) 3689 return err; 3690 3691 if (buf) { 3692 len = strlen(buf); 3693 *ulenp = len + 1; 3694 } else { 3695 *ulenp = 1; 3696 } 3697 if (!uname) 3698 return 0; 3699 3700 if (buf) { 3701 err = bpf_copy_to_user(uname, buf, ulen, len); 3702 if (err) 3703 return err; 3704 } else { 3705 char zero = '\0'; 3706 3707 if (put_user(zero, uname)) 3708 return -EFAULT; 3709 } 3710 return 0; 3711 } 3712 3713 #ifdef CONFIG_KPROBE_EVENTS 3714 static int bpf_perf_link_fill_kprobe(const struct perf_event *event, 3715 struct bpf_link_info *info) 3716 { 3717 unsigned long missed; 3718 char __user *uname; 3719 u64 addr, offset; 3720 u32 ulen, type; 3721 int err; 3722 3723 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name); 3724 ulen = info->perf_event.kprobe.name_len; 3725 err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr, 3726 &type, &missed); 3727 if (err) 3728 return err; 3729 if (type == BPF_FD_TYPE_KRETPROBE) 3730 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE; 3731 else 3732 info->perf_event.type = BPF_PERF_EVENT_KPROBE; 3733 info->perf_event.kprobe.name_len = ulen; 3734 info->perf_event.kprobe.offset = offset; 3735 info->perf_event.kprobe.missed = missed; 3736 if (!kallsyms_show_value(current_cred())) 3737 addr = 0; 3738 info->perf_event.kprobe.addr = addr; 3739 info->perf_event.kprobe.cookie = event->bpf_cookie; 3740 return 0; 3741 } 3742 #endif 3743 3744 #ifdef CONFIG_UPROBE_EVENTS 3745 static int bpf_perf_link_fill_uprobe(const struct perf_event *event, 3746 struct bpf_link_info *info) 3747 { 3748 char __user *uname; 3749 u64 addr, offset; 3750 u32 ulen, type; 3751 int err; 3752 3753 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name); 3754 ulen = info->perf_event.uprobe.name_len; 3755 err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr, 3756 &type, NULL); 3757 if (err) 3758 return err; 3759 3760 if (type == BPF_FD_TYPE_URETPROBE) 3761 info->perf_event.type = BPF_PERF_EVENT_URETPROBE; 3762 else 3763 info->perf_event.type = BPF_PERF_EVENT_UPROBE; 3764 info->perf_event.uprobe.name_len = ulen; 3765 info->perf_event.uprobe.offset = offset; 3766 info->perf_event.uprobe.cookie = event->bpf_cookie; 3767 return 0; 3768 } 3769 #endif 3770 3771 static int bpf_perf_link_fill_probe(const struct perf_event *event, 3772 struct bpf_link_info *info) 3773 { 3774 #ifdef CONFIG_KPROBE_EVENTS 3775 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE) 3776 return bpf_perf_link_fill_kprobe(event, info); 3777 #endif 3778 #ifdef CONFIG_UPROBE_EVENTS 3779 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE) 3780 return bpf_perf_link_fill_uprobe(event, info); 3781 #endif 3782 return -EOPNOTSUPP; 3783 } 3784 3785 static int bpf_perf_link_fill_tracepoint(const struct perf_event *event, 3786 struct bpf_link_info *info) 3787 { 3788 char __user *uname; 3789 u32 ulen; 3790 int err; 3791 3792 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name); 3793 ulen = info->perf_event.tracepoint.name_len; 3794 err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL); 3795 if (err) 3796 return err; 3797 3798 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT; 3799 info->perf_event.tracepoint.name_len = ulen; 3800 info->perf_event.tracepoint.cookie = event->bpf_cookie; 3801 return 0; 3802 } 3803 3804 static int bpf_perf_link_fill_perf_event(const struct perf_event *event, 3805 struct bpf_link_info *info) 3806 { 3807 info->perf_event.event.type = event->attr.type; 3808 info->perf_event.event.config = event->attr.config; 3809 info->perf_event.event.cookie = event->bpf_cookie; 3810 info->perf_event.type = BPF_PERF_EVENT_EVENT; 3811 return 0; 3812 } 3813 3814 static int bpf_perf_link_fill_link_info(const struct bpf_link *link, 3815 struct bpf_link_info *info) 3816 { 3817 struct bpf_perf_link *perf_link; 3818 const struct perf_event *event; 3819 3820 perf_link = container_of(link, struct bpf_perf_link, link); 3821 event = perf_get_event(perf_link->perf_file); 3822 if (IS_ERR(event)) 3823 return PTR_ERR(event); 3824 3825 switch (event->prog->type) { 3826 case BPF_PROG_TYPE_PERF_EVENT: 3827 return bpf_perf_link_fill_perf_event(event, info); 3828 case BPF_PROG_TYPE_TRACEPOINT: 3829 return bpf_perf_link_fill_tracepoint(event, info); 3830 case BPF_PROG_TYPE_KPROBE: 3831 return bpf_perf_link_fill_probe(event, info); 3832 default: 3833 return -EOPNOTSUPP; 3834 } 3835 } 3836 3837 static const struct bpf_link_ops bpf_perf_link_lops = { 3838 .release = bpf_perf_link_release, 3839 .dealloc = bpf_perf_link_dealloc, 3840 .fill_link_info = bpf_perf_link_fill_link_info, 3841 }; 3842 3843 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3844 { 3845 struct bpf_link_primer link_primer; 3846 struct bpf_perf_link *link; 3847 struct perf_event *event; 3848 struct file *perf_file; 3849 int err; 3850 3851 if (attr->link_create.flags) 3852 return -EINVAL; 3853 3854 perf_file = perf_event_get(attr->link_create.target_fd); 3855 if (IS_ERR(perf_file)) 3856 return PTR_ERR(perf_file); 3857 3858 link = kzalloc(sizeof(*link), GFP_USER); 3859 if (!link) { 3860 err = -ENOMEM; 3861 goto out_put_file; 3862 } 3863 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); 3864 link->perf_file = perf_file; 3865 3866 err = bpf_link_prime(&link->link, &link_primer); 3867 if (err) { 3868 kfree(link); 3869 goto out_put_file; 3870 } 3871 3872 event = perf_file->private_data; 3873 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); 3874 if (err) { 3875 bpf_link_cleanup(&link_primer); 3876 goto out_put_file; 3877 } 3878 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */ 3879 bpf_prog_inc(prog); 3880 3881 return bpf_link_settle(&link_primer); 3882 3883 out_put_file: 3884 fput(perf_file); 3885 return err; 3886 } 3887 #else 3888 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3889 { 3890 return -EOPNOTSUPP; 3891 } 3892 #endif /* CONFIG_PERF_EVENTS */ 3893 3894 static int bpf_raw_tp_link_attach(struct bpf_prog *prog, 3895 const char __user *user_tp_name, u64 cookie) 3896 { 3897 struct bpf_link_primer link_primer; 3898 struct bpf_raw_tp_link *link; 3899 struct bpf_raw_event_map *btp; 3900 const char *tp_name; 3901 char buf[128]; 3902 int err; 3903 3904 switch (prog->type) { 3905 case BPF_PROG_TYPE_TRACING: 3906 case BPF_PROG_TYPE_EXT: 3907 case BPF_PROG_TYPE_LSM: 3908 if (user_tp_name) 3909 /* The attach point for this category of programs 3910 * should be specified via btf_id during program load. 3911 */ 3912 return -EINVAL; 3913 if (prog->type == BPF_PROG_TYPE_TRACING && 3914 prog->expected_attach_type == BPF_TRACE_RAW_TP) { 3915 tp_name = prog->aux->attach_func_name; 3916 break; 3917 } 3918 return bpf_tracing_prog_attach(prog, 0, 0, 0); 3919 case BPF_PROG_TYPE_RAW_TRACEPOINT: 3920 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 3921 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) 3922 return -EFAULT; 3923 buf[sizeof(buf) - 1] = 0; 3924 tp_name = buf; 3925 break; 3926 default: 3927 return -EINVAL; 3928 } 3929 3930 btp = bpf_get_raw_tracepoint(tp_name); 3931 if (!btp) 3932 return -ENOENT; 3933 3934 link = kzalloc(sizeof(*link), GFP_USER); 3935 if (!link) { 3936 err = -ENOMEM; 3937 goto out_put_btp; 3938 } 3939 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, 3940 &bpf_raw_tp_link_lops, prog); 3941 link->btp = btp; 3942 link->cookie = cookie; 3943 3944 err = bpf_link_prime(&link->link, &link_primer); 3945 if (err) { 3946 kfree(link); 3947 goto out_put_btp; 3948 } 3949 3950 err = bpf_probe_register(link->btp, link); 3951 if (err) { 3952 bpf_link_cleanup(&link_primer); 3953 goto out_put_btp; 3954 } 3955 3956 return bpf_link_settle(&link_primer); 3957 3958 out_put_btp: 3959 bpf_put_raw_tracepoint(btp); 3960 return err; 3961 } 3962 3963 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.cookie 3964 3965 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 3966 { 3967 struct bpf_prog *prog; 3968 void __user *tp_name; 3969 __u64 cookie; 3970 int fd; 3971 3972 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 3973 return -EINVAL; 3974 3975 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 3976 if (IS_ERR(prog)) 3977 return PTR_ERR(prog); 3978 3979 tp_name = u64_to_user_ptr(attr->raw_tracepoint.name); 3980 cookie = attr->raw_tracepoint.cookie; 3981 fd = bpf_raw_tp_link_attach(prog, tp_name, cookie); 3982 if (fd < 0) 3983 bpf_prog_put(prog); 3984 return fd; 3985 } 3986 3987 static enum bpf_prog_type 3988 attach_type_to_prog_type(enum bpf_attach_type attach_type) 3989 { 3990 switch (attach_type) { 3991 case BPF_CGROUP_INET_INGRESS: 3992 case BPF_CGROUP_INET_EGRESS: 3993 return BPF_PROG_TYPE_CGROUP_SKB; 3994 case BPF_CGROUP_INET_SOCK_CREATE: 3995 case BPF_CGROUP_INET_SOCK_RELEASE: 3996 case BPF_CGROUP_INET4_POST_BIND: 3997 case BPF_CGROUP_INET6_POST_BIND: 3998 return BPF_PROG_TYPE_CGROUP_SOCK; 3999 case BPF_CGROUP_INET4_BIND: 4000 case BPF_CGROUP_INET6_BIND: 4001 case BPF_CGROUP_INET4_CONNECT: 4002 case BPF_CGROUP_INET6_CONNECT: 4003 case BPF_CGROUP_UNIX_CONNECT: 4004 case BPF_CGROUP_INET4_GETPEERNAME: 4005 case BPF_CGROUP_INET6_GETPEERNAME: 4006 case BPF_CGROUP_UNIX_GETPEERNAME: 4007 case BPF_CGROUP_INET4_GETSOCKNAME: 4008 case BPF_CGROUP_INET6_GETSOCKNAME: 4009 case BPF_CGROUP_UNIX_GETSOCKNAME: 4010 case BPF_CGROUP_UDP4_SENDMSG: 4011 case BPF_CGROUP_UDP6_SENDMSG: 4012 case BPF_CGROUP_UNIX_SENDMSG: 4013 case BPF_CGROUP_UDP4_RECVMSG: 4014 case BPF_CGROUP_UDP6_RECVMSG: 4015 case BPF_CGROUP_UNIX_RECVMSG: 4016 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 4017 case BPF_CGROUP_SOCK_OPS: 4018 return BPF_PROG_TYPE_SOCK_OPS; 4019 case BPF_CGROUP_DEVICE: 4020 return BPF_PROG_TYPE_CGROUP_DEVICE; 4021 case BPF_SK_MSG_VERDICT: 4022 return BPF_PROG_TYPE_SK_MSG; 4023 case BPF_SK_SKB_STREAM_PARSER: 4024 case BPF_SK_SKB_STREAM_VERDICT: 4025 case BPF_SK_SKB_VERDICT: 4026 return BPF_PROG_TYPE_SK_SKB; 4027 case BPF_LIRC_MODE2: 4028 return BPF_PROG_TYPE_LIRC_MODE2; 4029 case BPF_FLOW_DISSECTOR: 4030 return BPF_PROG_TYPE_FLOW_DISSECTOR; 4031 case BPF_CGROUP_SYSCTL: 4032 return BPF_PROG_TYPE_CGROUP_SYSCTL; 4033 case BPF_CGROUP_GETSOCKOPT: 4034 case BPF_CGROUP_SETSOCKOPT: 4035 return BPF_PROG_TYPE_CGROUP_SOCKOPT; 4036 case BPF_TRACE_ITER: 4037 case BPF_TRACE_RAW_TP: 4038 case BPF_TRACE_FENTRY: 4039 case BPF_TRACE_FEXIT: 4040 case BPF_MODIFY_RETURN: 4041 return BPF_PROG_TYPE_TRACING; 4042 case BPF_LSM_MAC: 4043 return BPF_PROG_TYPE_LSM; 4044 case BPF_SK_LOOKUP: 4045 return BPF_PROG_TYPE_SK_LOOKUP; 4046 case BPF_XDP: 4047 return BPF_PROG_TYPE_XDP; 4048 case BPF_LSM_CGROUP: 4049 return BPF_PROG_TYPE_LSM; 4050 case BPF_TCX_INGRESS: 4051 case BPF_TCX_EGRESS: 4052 case BPF_NETKIT_PRIMARY: 4053 case BPF_NETKIT_PEER: 4054 return BPF_PROG_TYPE_SCHED_CLS; 4055 default: 4056 return BPF_PROG_TYPE_UNSPEC; 4057 } 4058 } 4059 4060 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 4061 enum bpf_attach_type attach_type) 4062 { 4063 enum bpf_prog_type ptype; 4064 4065 switch (prog->type) { 4066 case BPF_PROG_TYPE_CGROUP_SOCK: 4067 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4068 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4069 case BPF_PROG_TYPE_SK_LOOKUP: 4070 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 4071 case BPF_PROG_TYPE_CGROUP_SKB: 4072 if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN)) 4073 /* cg-skb progs can be loaded by unpriv user. 4074 * check permissions at attach time. 4075 */ 4076 return -EPERM; 4077 4078 ptype = attach_type_to_prog_type(attach_type); 4079 if (prog->type != ptype) 4080 return -EINVAL; 4081 4082 return prog->enforce_expected_attach_type && 4083 prog->expected_attach_type != attach_type ? 4084 -EINVAL : 0; 4085 case BPF_PROG_TYPE_EXT: 4086 return 0; 4087 case BPF_PROG_TYPE_NETFILTER: 4088 if (attach_type != BPF_NETFILTER) 4089 return -EINVAL; 4090 return 0; 4091 case BPF_PROG_TYPE_PERF_EVENT: 4092 case BPF_PROG_TYPE_TRACEPOINT: 4093 if (attach_type != BPF_PERF_EVENT) 4094 return -EINVAL; 4095 return 0; 4096 case BPF_PROG_TYPE_KPROBE: 4097 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && 4098 attach_type != BPF_TRACE_KPROBE_MULTI) 4099 return -EINVAL; 4100 if (prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION && 4101 attach_type != BPF_TRACE_KPROBE_SESSION) 4102 return -EINVAL; 4103 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && 4104 attach_type != BPF_TRACE_UPROBE_MULTI) 4105 return -EINVAL; 4106 if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION && 4107 attach_type != BPF_TRACE_UPROBE_SESSION) 4108 return -EINVAL; 4109 if (attach_type != BPF_PERF_EVENT && 4110 attach_type != BPF_TRACE_KPROBE_MULTI && 4111 attach_type != BPF_TRACE_KPROBE_SESSION && 4112 attach_type != BPF_TRACE_UPROBE_MULTI && 4113 attach_type != BPF_TRACE_UPROBE_SESSION) 4114 return -EINVAL; 4115 return 0; 4116 case BPF_PROG_TYPE_SCHED_CLS: 4117 if (attach_type != BPF_TCX_INGRESS && 4118 attach_type != BPF_TCX_EGRESS && 4119 attach_type != BPF_NETKIT_PRIMARY && 4120 attach_type != BPF_NETKIT_PEER) 4121 return -EINVAL; 4122 return 0; 4123 default: 4124 ptype = attach_type_to_prog_type(attach_type); 4125 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) 4126 return -EINVAL; 4127 return 0; 4128 } 4129 } 4130 4131 #define BPF_PROG_ATTACH_LAST_FIELD expected_revision 4132 4133 #define BPF_F_ATTACH_MASK_BASE \ 4134 (BPF_F_ALLOW_OVERRIDE | \ 4135 BPF_F_ALLOW_MULTI | \ 4136 BPF_F_REPLACE) 4137 4138 #define BPF_F_ATTACH_MASK_MPROG \ 4139 (BPF_F_REPLACE | \ 4140 BPF_F_BEFORE | \ 4141 BPF_F_AFTER | \ 4142 BPF_F_ID | \ 4143 BPF_F_LINK) 4144 4145 static int bpf_prog_attach(const union bpf_attr *attr) 4146 { 4147 enum bpf_prog_type ptype; 4148 struct bpf_prog *prog; 4149 int ret; 4150 4151 if (CHECK_ATTR(BPF_PROG_ATTACH)) 4152 return -EINVAL; 4153 4154 ptype = attach_type_to_prog_type(attr->attach_type); 4155 if (ptype == BPF_PROG_TYPE_UNSPEC) 4156 return -EINVAL; 4157 if (bpf_mprog_supported(ptype)) { 4158 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 4159 return -EINVAL; 4160 } else { 4161 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE) 4162 return -EINVAL; 4163 if (attr->relative_fd || 4164 attr->expected_revision) 4165 return -EINVAL; 4166 } 4167 4168 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 4169 if (IS_ERR(prog)) 4170 return PTR_ERR(prog); 4171 4172 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 4173 bpf_prog_put(prog); 4174 return -EINVAL; 4175 } 4176 4177 switch (ptype) { 4178 case BPF_PROG_TYPE_SK_SKB: 4179 case BPF_PROG_TYPE_SK_MSG: 4180 ret = sock_map_get_from_fd(attr, prog); 4181 break; 4182 case BPF_PROG_TYPE_LIRC_MODE2: 4183 ret = lirc_prog_attach(attr, prog); 4184 break; 4185 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4186 ret = netns_bpf_prog_attach(attr, prog); 4187 break; 4188 case BPF_PROG_TYPE_CGROUP_DEVICE: 4189 case BPF_PROG_TYPE_CGROUP_SKB: 4190 case BPF_PROG_TYPE_CGROUP_SOCK: 4191 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4192 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4193 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4194 case BPF_PROG_TYPE_SOCK_OPS: 4195 case BPF_PROG_TYPE_LSM: 4196 if (ptype == BPF_PROG_TYPE_LSM && 4197 prog->expected_attach_type != BPF_LSM_CGROUP) 4198 ret = -EINVAL; 4199 else 4200 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 4201 break; 4202 case BPF_PROG_TYPE_SCHED_CLS: 4203 if (attr->attach_type == BPF_TCX_INGRESS || 4204 attr->attach_type == BPF_TCX_EGRESS) 4205 ret = tcx_prog_attach(attr, prog); 4206 else 4207 ret = netkit_prog_attach(attr, prog); 4208 break; 4209 default: 4210 ret = -EINVAL; 4211 } 4212 4213 if (ret) 4214 bpf_prog_put(prog); 4215 return ret; 4216 } 4217 4218 #define BPF_PROG_DETACH_LAST_FIELD expected_revision 4219 4220 static int bpf_prog_detach(const union bpf_attr *attr) 4221 { 4222 struct bpf_prog *prog = NULL; 4223 enum bpf_prog_type ptype; 4224 int ret; 4225 4226 if (CHECK_ATTR(BPF_PROG_DETACH)) 4227 return -EINVAL; 4228 4229 ptype = attach_type_to_prog_type(attr->attach_type); 4230 if (bpf_mprog_supported(ptype)) { 4231 if (ptype == BPF_PROG_TYPE_UNSPEC) 4232 return -EINVAL; 4233 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 4234 return -EINVAL; 4235 if (attr->attach_bpf_fd) { 4236 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 4237 if (IS_ERR(prog)) 4238 return PTR_ERR(prog); 4239 } 4240 } else if (attr->attach_flags || 4241 attr->relative_fd || 4242 attr->expected_revision) { 4243 return -EINVAL; 4244 } 4245 4246 switch (ptype) { 4247 case BPF_PROG_TYPE_SK_MSG: 4248 case BPF_PROG_TYPE_SK_SKB: 4249 ret = sock_map_prog_detach(attr, ptype); 4250 break; 4251 case BPF_PROG_TYPE_LIRC_MODE2: 4252 ret = lirc_prog_detach(attr); 4253 break; 4254 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4255 ret = netns_bpf_prog_detach(attr, ptype); 4256 break; 4257 case BPF_PROG_TYPE_CGROUP_DEVICE: 4258 case BPF_PROG_TYPE_CGROUP_SKB: 4259 case BPF_PROG_TYPE_CGROUP_SOCK: 4260 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4261 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4262 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4263 case BPF_PROG_TYPE_SOCK_OPS: 4264 case BPF_PROG_TYPE_LSM: 4265 ret = cgroup_bpf_prog_detach(attr, ptype); 4266 break; 4267 case BPF_PROG_TYPE_SCHED_CLS: 4268 if (attr->attach_type == BPF_TCX_INGRESS || 4269 attr->attach_type == BPF_TCX_EGRESS) 4270 ret = tcx_prog_detach(attr, prog); 4271 else 4272 ret = netkit_prog_detach(attr, prog); 4273 break; 4274 default: 4275 ret = -EINVAL; 4276 } 4277 4278 if (prog) 4279 bpf_prog_put(prog); 4280 return ret; 4281 } 4282 4283 #define BPF_PROG_QUERY_LAST_FIELD query.revision 4284 4285 static int bpf_prog_query(const union bpf_attr *attr, 4286 union bpf_attr __user *uattr) 4287 { 4288 if (!bpf_net_capable()) 4289 return -EPERM; 4290 if (CHECK_ATTR(BPF_PROG_QUERY)) 4291 return -EINVAL; 4292 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 4293 return -EINVAL; 4294 4295 switch (attr->query.attach_type) { 4296 case BPF_CGROUP_INET_INGRESS: 4297 case BPF_CGROUP_INET_EGRESS: 4298 case BPF_CGROUP_INET_SOCK_CREATE: 4299 case BPF_CGROUP_INET_SOCK_RELEASE: 4300 case BPF_CGROUP_INET4_BIND: 4301 case BPF_CGROUP_INET6_BIND: 4302 case BPF_CGROUP_INET4_POST_BIND: 4303 case BPF_CGROUP_INET6_POST_BIND: 4304 case BPF_CGROUP_INET4_CONNECT: 4305 case BPF_CGROUP_INET6_CONNECT: 4306 case BPF_CGROUP_UNIX_CONNECT: 4307 case BPF_CGROUP_INET4_GETPEERNAME: 4308 case BPF_CGROUP_INET6_GETPEERNAME: 4309 case BPF_CGROUP_UNIX_GETPEERNAME: 4310 case BPF_CGROUP_INET4_GETSOCKNAME: 4311 case BPF_CGROUP_INET6_GETSOCKNAME: 4312 case BPF_CGROUP_UNIX_GETSOCKNAME: 4313 case BPF_CGROUP_UDP4_SENDMSG: 4314 case BPF_CGROUP_UDP6_SENDMSG: 4315 case BPF_CGROUP_UNIX_SENDMSG: 4316 case BPF_CGROUP_UDP4_RECVMSG: 4317 case BPF_CGROUP_UDP6_RECVMSG: 4318 case BPF_CGROUP_UNIX_RECVMSG: 4319 case BPF_CGROUP_SOCK_OPS: 4320 case BPF_CGROUP_DEVICE: 4321 case BPF_CGROUP_SYSCTL: 4322 case BPF_CGROUP_GETSOCKOPT: 4323 case BPF_CGROUP_SETSOCKOPT: 4324 case BPF_LSM_CGROUP: 4325 return cgroup_bpf_prog_query(attr, uattr); 4326 case BPF_LIRC_MODE2: 4327 return lirc_prog_query(attr, uattr); 4328 case BPF_FLOW_DISSECTOR: 4329 case BPF_SK_LOOKUP: 4330 return netns_bpf_prog_query(attr, uattr); 4331 case BPF_SK_SKB_STREAM_PARSER: 4332 case BPF_SK_SKB_STREAM_VERDICT: 4333 case BPF_SK_MSG_VERDICT: 4334 case BPF_SK_SKB_VERDICT: 4335 return sock_map_bpf_prog_query(attr, uattr); 4336 case BPF_TCX_INGRESS: 4337 case BPF_TCX_EGRESS: 4338 return tcx_prog_query(attr, uattr); 4339 case BPF_NETKIT_PRIMARY: 4340 case BPF_NETKIT_PEER: 4341 return netkit_prog_query(attr, uattr); 4342 default: 4343 return -EINVAL; 4344 } 4345 } 4346 4347 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size 4348 4349 static int bpf_prog_test_run(const union bpf_attr *attr, 4350 union bpf_attr __user *uattr) 4351 { 4352 struct bpf_prog *prog; 4353 int ret = -ENOTSUPP; 4354 4355 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 4356 return -EINVAL; 4357 4358 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 4359 (!attr->test.ctx_size_in && attr->test.ctx_in)) 4360 return -EINVAL; 4361 4362 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 4363 (!attr->test.ctx_size_out && attr->test.ctx_out)) 4364 return -EINVAL; 4365 4366 prog = bpf_prog_get(attr->test.prog_fd); 4367 if (IS_ERR(prog)) 4368 return PTR_ERR(prog); 4369 4370 if (prog->aux->ops->test_run) 4371 ret = prog->aux->ops->test_run(prog, attr, uattr); 4372 4373 bpf_prog_put(prog); 4374 return ret; 4375 } 4376 4377 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 4378 4379 static int bpf_obj_get_next_id(const union bpf_attr *attr, 4380 union bpf_attr __user *uattr, 4381 struct idr *idr, 4382 spinlock_t *lock) 4383 { 4384 u32 next_id = attr->start_id; 4385 int err = 0; 4386 4387 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 4388 return -EINVAL; 4389 4390 if (!capable(CAP_SYS_ADMIN)) 4391 return -EPERM; 4392 4393 next_id++; 4394 spin_lock_bh(lock); 4395 if (!idr_get_next(idr, &next_id)) 4396 err = -ENOENT; 4397 spin_unlock_bh(lock); 4398 4399 if (!err) 4400 err = put_user(next_id, &uattr->next_id); 4401 4402 return err; 4403 } 4404 4405 struct bpf_map *bpf_map_get_curr_or_next(u32 *id) 4406 { 4407 struct bpf_map *map; 4408 4409 spin_lock_bh(&map_idr_lock); 4410 again: 4411 map = idr_get_next(&map_idr, id); 4412 if (map) { 4413 map = __bpf_map_inc_not_zero(map, false); 4414 if (IS_ERR(map)) { 4415 (*id)++; 4416 goto again; 4417 } 4418 } 4419 spin_unlock_bh(&map_idr_lock); 4420 4421 return map; 4422 } 4423 4424 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) 4425 { 4426 struct bpf_prog *prog; 4427 4428 spin_lock_bh(&prog_idr_lock); 4429 again: 4430 prog = idr_get_next(&prog_idr, id); 4431 if (prog) { 4432 prog = bpf_prog_inc_not_zero(prog); 4433 if (IS_ERR(prog)) { 4434 (*id)++; 4435 goto again; 4436 } 4437 } 4438 spin_unlock_bh(&prog_idr_lock); 4439 4440 return prog; 4441 } 4442 4443 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 4444 4445 struct bpf_prog *bpf_prog_by_id(u32 id) 4446 { 4447 struct bpf_prog *prog; 4448 4449 if (!id) 4450 return ERR_PTR(-ENOENT); 4451 4452 spin_lock_bh(&prog_idr_lock); 4453 prog = idr_find(&prog_idr, id); 4454 if (prog) 4455 prog = bpf_prog_inc_not_zero(prog); 4456 else 4457 prog = ERR_PTR(-ENOENT); 4458 spin_unlock_bh(&prog_idr_lock); 4459 return prog; 4460 } 4461 4462 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 4463 { 4464 struct bpf_prog *prog; 4465 u32 id = attr->prog_id; 4466 int fd; 4467 4468 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 4469 return -EINVAL; 4470 4471 if (!capable(CAP_SYS_ADMIN)) 4472 return -EPERM; 4473 4474 prog = bpf_prog_by_id(id); 4475 if (IS_ERR(prog)) 4476 return PTR_ERR(prog); 4477 4478 fd = bpf_prog_new_fd(prog); 4479 if (fd < 0) 4480 bpf_prog_put(prog); 4481 4482 return fd; 4483 } 4484 4485 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 4486 4487 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 4488 { 4489 struct bpf_map *map; 4490 u32 id = attr->map_id; 4491 int f_flags; 4492 int fd; 4493 4494 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 4495 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 4496 return -EINVAL; 4497 4498 if (!capable(CAP_SYS_ADMIN)) 4499 return -EPERM; 4500 4501 f_flags = bpf_get_file_flag(attr->open_flags); 4502 if (f_flags < 0) 4503 return f_flags; 4504 4505 spin_lock_bh(&map_idr_lock); 4506 map = idr_find(&map_idr, id); 4507 if (map) 4508 map = __bpf_map_inc_not_zero(map, true); 4509 else 4510 map = ERR_PTR(-ENOENT); 4511 spin_unlock_bh(&map_idr_lock); 4512 4513 if (IS_ERR(map)) 4514 return PTR_ERR(map); 4515 4516 fd = bpf_map_new_fd(map, f_flags); 4517 if (fd < 0) 4518 bpf_map_put_with_uref(map); 4519 4520 return fd; 4521 } 4522 4523 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 4524 unsigned long addr, u32 *off, 4525 u32 *type) 4526 { 4527 const struct bpf_map *map; 4528 int i; 4529 4530 mutex_lock(&prog->aux->used_maps_mutex); 4531 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 4532 map = prog->aux->used_maps[i]; 4533 if (map == (void *)addr) { 4534 *type = BPF_PSEUDO_MAP_FD; 4535 goto out; 4536 } 4537 if (!map->ops->map_direct_value_meta) 4538 continue; 4539 if (!map->ops->map_direct_value_meta(map, addr, off)) { 4540 *type = BPF_PSEUDO_MAP_VALUE; 4541 goto out; 4542 } 4543 } 4544 map = NULL; 4545 4546 out: 4547 mutex_unlock(&prog->aux->used_maps_mutex); 4548 return map; 4549 } 4550 4551 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, 4552 const struct cred *f_cred) 4553 { 4554 const struct bpf_map *map; 4555 struct bpf_insn *insns; 4556 u32 off, type; 4557 u64 imm; 4558 u8 code; 4559 int i; 4560 4561 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 4562 GFP_USER); 4563 if (!insns) 4564 return insns; 4565 4566 for (i = 0; i < prog->len; i++) { 4567 code = insns[i].code; 4568 4569 if (code == (BPF_JMP | BPF_TAIL_CALL)) { 4570 insns[i].code = BPF_JMP | BPF_CALL; 4571 insns[i].imm = BPF_FUNC_tail_call; 4572 /* fall-through */ 4573 } 4574 if (code == (BPF_JMP | BPF_CALL) || 4575 code == (BPF_JMP | BPF_CALL_ARGS)) { 4576 if (code == (BPF_JMP | BPF_CALL_ARGS)) 4577 insns[i].code = BPF_JMP | BPF_CALL; 4578 if (!bpf_dump_raw_ok(f_cred)) 4579 insns[i].imm = 0; 4580 continue; 4581 } 4582 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { 4583 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; 4584 continue; 4585 } 4586 4587 if ((BPF_CLASS(code) == BPF_LDX || BPF_CLASS(code) == BPF_STX || 4588 BPF_CLASS(code) == BPF_ST) && BPF_MODE(code) == BPF_PROBE_MEM32) { 4589 insns[i].code = BPF_CLASS(code) | BPF_SIZE(code) | BPF_MEM; 4590 continue; 4591 } 4592 4593 if (code != (BPF_LD | BPF_IMM | BPF_DW)) 4594 continue; 4595 4596 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 4597 map = bpf_map_from_imm(prog, imm, &off, &type); 4598 if (map) { 4599 insns[i].src_reg = type; 4600 insns[i].imm = map->id; 4601 insns[i + 1].imm = off; 4602 continue; 4603 } 4604 } 4605 4606 return insns; 4607 } 4608 4609 static int set_info_rec_size(struct bpf_prog_info *info) 4610 { 4611 /* 4612 * Ensure info.*_rec_size is the same as kernel expected size 4613 * 4614 * or 4615 * 4616 * Only allow zero *_rec_size if both _rec_size and _cnt are 4617 * zero. In this case, the kernel will set the expected 4618 * _rec_size back to the info. 4619 */ 4620 4621 if ((info->nr_func_info || info->func_info_rec_size) && 4622 info->func_info_rec_size != sizeof(struct bpf_func_info)) 4623 return -EINVAL; 4624 4625 if ((info->nr_line_info || info->line_info_rec_size) && 4626 info->line_info_rec_size != sizeof(struct bpf_line_info)) 4627 return -EINVAL; 4628 4629 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 4630 info->jited_line_info_rec_size != sizeof(__u64)) 4631 return -EINVAL; 4632 4633 info->func_info_rec_size = sizeof(struct bpf_func_info); 4634 info->line_info_rec_size = sizeof(struct bpf_line_info); 4635 info->jited_line_info_rec_size = sizeof(__u64); 4636 4637 return 0; 4638 } 4639 4640 static int bpf_prog_get_info_by_fd(struct file *file, 4641 struct bpf_prog *prog, 4642 const union bpf_attr *attr, 4643 union bpf_attr __user *uattr) 4644 { 4645 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4646 struct btf *attach_btf = bpf_prog_get_target_btf(prog); 4647 struct bpf_prog_info info; 4648 u32 info_len = attr->info.info_len; 4649 struct bpf_prog_kstats stats; 4650 char __user *uinsns; 4651 u32 ulen; 4652 int err; 4653 4654 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4655 if (err) 4656 return err; 4657 info_len = min_t(u32, sizeof(info), info_len); 4658 4659 memset(&info, 0, sizeof(info)); 4660 if (copy_from_user(&info, uinfo, info_len)) 4661 return -EFAULT; 4662 4663 info.type = prog->type; 4664 info.id = prog->aux->id; 4665 info.load_time = prog->aux->load_time; 4666 info.created_by_uid = from_kuid_munged(current_user_ns(), 4667 prog->aux->user->uid); 4668 info.gpl_compatible = prog->gpl_compatible; 4669 4670 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 4671 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 4672 4673 mutex_lock(&prog->aux->used_maps_mutex); 4674 ulen = info.nr_map_ids; 4675 info.nr_map_ids = prog->aux->used_map_cnt; 4676 ulen = min_t(u32, info.nr_map_ids, ulen); 4677 if (ulen) { 4678 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 4679 u32 i; 4680 4681 for (i = 0; i < ulen; i++) 4682 if (put_user(prog->aux->used_maps[i]->id, 4683 &user_map_ids[i])) { 4684 mutex_unlock(&prog->aux->used_maps_mutex); 4685 return -EFAULT; 4686 } 4687 } 4688 mutex_unlock(&prog->aux->used_maps_mutex); 4689 4690 err = set_info_rec_size(&info); 4691 if (err) 4692 return err; 4693 4694 bpf_prog_get_stats(prog, &stats); 4695 info.run_time_ns = stats.nsecs; 4696 info.run_cnt = stats.cnt; 4697 info.recursion_misses = stats.misses; 4698 4699 info.verified_insns = prog->aux->verified_insns; 4700 4701 if (!bpf_capable()) { 4702 info.jited_prog_len = 0; 4703 info.xlated_prog_len = 0; 4704 info.nr_jited_ksyms = 0; 4705 info.nr_jited_func_lens = 0; 4706 info.nr_func_info = 0; 4707 info.nr_line_info = 0; 4708 info.nr_jited_line_info = 0; 4709 goto done; 4710 } 4711 4712 ulen = info.xlated_prog_len; 4713 info.xlated_prog_len = bpf_prog_insn_size(prog); 4714 if (info.xlated_prog_len && ulen) { 4715 struct bpf_insn *insns_sanitized; 4716 bool fault; 4717 4718 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { 4719 info.xlated_prog_insns = 0; 4720 goto done; 4721 } 4722 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); 4723 if (!insns_sanitized) 4724 return -ENOMEM; 4725 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 4726 ulen = min_t(u32, info.xlated_prog_len, ulen); 4727 fault = copy_to_user(uinsns, insns_sanitized, ulen); 4728 kfree(insns_sanitized); 4729 if (fault) 4730 return -EFAULT; 4731 } 4732 4733 if (bpf_prog_is_offloaded(prog->aux)) { 4734 err = bpf_prog_offload_info_fill(&info, prog); 4735 if (err) 4736 return err; 4737 goto done; 4738 } 4739 4740 /* NOTE: the following code is supposed to be skipped for offload. 4741 * bpf_prog_offload_info_fill() is the place to fill similar fields 4742 * for offload. 4743 */ 4744 ulen = info.jited_prog_len; 4745 if (prog->aux->func_cnt) { 4746 u32 i; 4747 4748 info.jited_prog_len = 0; 4749 for (i = 0; i < prog->aux->func_cnt; i++) 4750 info.jited_prog_len += prog->aux->func[i]->jited_len; 4751 } else { 4752 info.jited_prog_len = prog->jited_len; 4753 } 4754 4755 if (info.jited_prog_len && ulen) { 4756 if (bpf_dump_raw_ok(file->f_cred)) { 4757 uinsns = u64_to_user_ptr(info.jited_prog_insns); 4758 ulen = min_t(u32, info.jited_prog_len, ulen); 4759 4760 /* for multi-function programs, copy the JITed 4761 * instructions for all the functions 4762 */ 4763 if (prog->aux->func_cnt) { 4764 u32 len, free, i; 4765 u8 *img; 4766 4767 free = ulen; 4768 for (i = 0; i < prog->aux->func_cnt; i++) { 4769 len = prog->aux->func[i]->jited_len; 4770 len = min_t(u32, len, free); 4771 img = (u8 *) prog->aux->func[i]->bpf_func; 4772 if (copy_to_user(uinsns, img, len)) 4773 return -EFAULT; 4774 uinsns += len; 4775 free -= len; 4776 if (!free) 4777 break; 4778 } 4779 } else { 4780 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 4781 return -EFAULT; 4782 } 4783 } else { 4784 info.jited_prog_insns = 0; 4785 } 4786 } 4787 4788 ulen = info.nr_jited_ksyms; 4789 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 4790 if (ulen) { 4791 if (bpf_dump_raw_ok(file->f_cred)) { 4792 unsigned long ksym_addr; 4793 u64 __user *user_ksyms; 4794 u32 i; 4795 4796 /* copy the address of the kernel symbol 4797 * corresponding to each function 4798 */ 4799 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 4800 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 4801 if (prog->aux->func_cnt) { 4802 for (i = 0; i < ulen; i++) { 4803 ksym_addr = (unsigned long) 4804 prog->aux->func[i]->bpf_func; 4805 if (put_user((u64) ksym_addr, 4806 &user_ksyms[i])) 4807 return -EFAULT; 4808 } 4809 } else { 4810 ksym_addr = (unsigned long) prog->bpf_func; 4811 if (put_user((u64) ksym_addr, &user_ksyms[0])) 4812 return -EFAULT; 4813 } 4814 } else { 4815 info.jited_ksyms = 0; 4816 } 4817 } 4818 4819 ulen = info.nr_jited_func_lens; 4820 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 4821 if (ulen) { 4822 if (bpf_dump_raw_ok(file->f_cred)) { 4823 u32 __user *user_lens; 4824 u32 func_len, i; 4825 4826 /* copy the JITed image lengths for each function */ 4827 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 4828 user_lens = u64_to_user_ptr(info.jited_func_lens); 4829 if (prog->aux->func_cnt) { 4830 for (i = 0; i < ulen; i++) { 4831 func_len = 4832 prog->aux->func[i]->jited_len; 4833 if (put_user(func_len, &user_lens[i])) 4834 return -EFAULT; 4835 } 4836 } else { 4837 func_len = prog->jited_len; 4838 if (put_user(func_len, &user_lens[0])) 4839 return -EFAULT; 4840 } 4841 } else { 4842 info.jited_func_lens = 0; 4843 } 4844 } 4845 4846 if (prog->aux->btf) 4847 info.btf_id = btf_obj_id(prog->aux->btf); 4848 info.attach_btf_id = prog->aux->attach_btf_id; 4849 if (attach_btf) 4850 info.attach_btf_obj_id = btf_obj_id(attach_btf); 4851 4852 ulen = info.nr_func_info; 4853 info.nr_func_info = prog->aux->func_info_cnt; 4854 if (info.nr_func_info && ulen) { 4855 char __user *user_finfo; 4856 4857 user_finfo = u64_to_user_ptr(info.func_info); 4858 ulen = min_t(u32, info.nr_func_info, ulen); 4859 if (copy_to_user(user_finfo, prog->aux->func_info, 4860 info.func_info_rec_size * ulen)) 4861 return -EFAULT; 4862 } 4863 4864 ulen = info.nr_line_info; 4865 info.nr_line_info = prog->aux->nr_linfo; 4866 if (info.nr_line_info && ulen) { 4867 __u8 __user *user_linfo; 4868 4869 user_linfo = u64_to_user_ptr(info.line_info); 4870 ulen = min_t(u32, info.nr_line_info, ulen); 4871 if (copy_to_user(user_linfo, prog->aux->linfo, 4872 info.line_info_rec_size * ulen)) 4873 return -EFAULT; 4874 } 4875 4876 ulen = info.nr_jited_line_info; 4877 if (prog->aux->jited_linfo) 4878 info.nr_jited_line_info = prog->aux->nr_linfo; 4879 else 4880 info.nr_jited_line_info = 0; 4881 if (info.nr_jited_line_info && ulen) { 4882 if (bpf_dump_raw_ok(file->f_cred)) { 4883 unsigned long line_addr; 4884 __u64 __user *user_linfo; 4885 u32 i; 4886 4887 user_linfo = u64_to_user_ptr(info.jited_line_info); 4888 ulen = min_t(u32, info.nr_jited_line_info, ulen); 4889 for (i = 0; i < ulen; i++) { 4890 line_addr = (unsigned long)prog->aux->jited_linfo[i]; 4891 if (put_user((__u64)line_addr, &user_linfo[i])) 4892 return -EFAULT; 4893 } 4894 } else { 4895 info.jited_line_info = 0; 4896 } 4897 } 4898 4899 ulen = info.nr_prog_tags; 4900 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 4901 if (ulen) { 4902 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 4903 u32 i; 4904 4905 user_prog_tags = u64_to_user_ptr(info.prog_tags); 4906 ulen = min_t(u32, info.nr_prog_tags, ulen); 4907 if (prog->aux->func_cnt) { 4908 for (i = 0; i < ulen; i++) { 4909 if (copy_to_user(user_prog_tags[i], 4910 prog->aux->func[i]->tag, 4911 BPF_TAG_SIZE)) 4912 return -EFAULT; 4913 } 4914 } else { 4915 if (copy_to_user(user_prog_tags[0], 4916 prog->tag, BPF_TAG_SIZE)) 4917 return -EFAULT; 4918 } 4919 } 4920 4921 done: 4922 if (copy_to_user(uinfo, &info, info_len) || 4923 put_user(info_len, &uattr->info.info_len)) 4924 return -EFAULT; 4925 4926 return 0; 4927 } 4928 4929 static int bpf_map_get_info_by_fd(struct file *file, 4930 struct bpf_map *map, 4931 const union bpf_attr *attr, 4932 union bpf_attr __user *uattr) 4933 { 4934 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4935 struct bpf_map_info info; 4936 u32 info_len = attr->info.info_len; 4937 int err; 4938 4939 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4940 if (err) 4941 return err; 4942 info_len = min_t(u32, sizeof(info), info_len); 4943 4944 memset(&info, 0, sizeof(info)); 4945 info.type = map->map_type; 4946 info.id = map->id; 4947 info.key_size = map->key_size; 4948 info.value_size = map->value_size; 4949 info.max_entries = map->max_entries; 4950 info.map_flags = map->map_flags; 4951 info.map_extra = map->map_extra; 4952 memcpy(info.name, map->name, sizeof(map->name)); 4953 4954 if (map->btf) { 4955 info.btf_id = btf_obj_id(map->btf); 4956 info.btf_key_type_id = map->btf_key_type_id; 4957 info.btf_value_type_id = map->btf_value_type_id; 4958 } 4959 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 4960 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) 4961 bpf_map_struct_ops_info_fill(&info, map); 4962 4963 if (bpf_map_is_offloaded(map)) { 4964 err = bpf_map_offload_info_fill(&info, map); 4965 if (err) 4966 return err; 4967 } 4968 4969 if (copy_to_user(uinfo, &info, info_len) || 4970 put_user(info_len, &uattr->info.info_len)) 4971 return -EFAULT; 4972 4973 return 0; 4974 } 4975 4976 static int bpf_btf_get_info_by_fd(struct file *file, 4977 struct btf *btf, 4978 const union bpf_attr *attr, 4979 union bpf_attr __user *uattr) 4980 { 4981 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4982 u32 info_len = attr->info.info_len; 4983 int err; 4984 4985 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len); 4986 if (err) 4987 return err; 4988 4989 return btf_get_info_by_fd(btf, attr, uattr); 4990 } 4991 4992 static int bpf_link_get_info_by_fd(struct file *file, 4993 struct bpf_link *link, 4994 const union bpf_attr *attr, 4995 union bpf_attr __user *uattr) 4996 { 4997 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4998 struct bpf_link_info info; 4999 u32 info_len = attr->info.info_len; 5000 int err; 5001 5002 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 5003 if (err) 5004 return err; 5005 info_len = min_t(u32, sizeof(info), info_len); 5006 5007 memset(&info, 0, sizeof(info)); 5008 if (copy_from_user(&info, uinfo, info_len)) 5009 return -EFAULT; 5010 5011 info.type = link->type; 5012 info.id = link->id; 5013 if (link->prog) 5014 info.prog_id = link->prog->aux->id; 5015 5016 if (link->ops->fill_link_info) { 5017 err = link->ops->fill_link_info(link, &info); 5018 if (err) 5019 return err; 5020 } 5021 5022 if (copy_to_user(uinfo, &info, info_len) || 5023 put_user(info_len, &uattr->info.info_len)) 5024 return -EFAULT; 5025 5026 return 0; 5027 } 5028 5029 5030 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 5031 5032 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 5033 union bpf_attr __user *uattr) 5034 { 5035 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 5036 return -EINVAL; 5037 5038 CLASS(fd, f)(attr->info.bpf_fd); 5039 if (fd_empty(f)) 5040 return -EBADFD; 5041 5042 if (fd_file(f)->f_op == &bpf_prog_fops) 5043 return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, 5044 uattr); 5045 else if (fd_file(f)->f_op == &bpf_map_fops) 5046 return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, 5047 uattr); 5048 else if (fd_file(f)->f_op == &btf_fops) 5049 return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr); 5050 else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll) 5051 return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data, 5052 attr, uattr); 5053 return -EINVAL; 5054 } 5055 5056 #define BPF_BTF_LOAD_LAST_FIELD btf_token_fd 5057 5058 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) 5059 { 5060 struct bpf_token *token = NULL; 5061 5062 if (CHECK_ATTR(BPF_BTF_LOAD)) 5063 return -EINVAL; 5064 5065 if (attr->btf_flags & ~BPF_F_TOKEN_FD) 5066 return -EINVAL; 5067 5068 if (attr->btf_flags & BPF_F_TOKEN_FD) { 5069 token = bpf_token_get_from_fd(attr->btf_token_fd); 5070 if (IS_ERR(token)) 5071 return PTR_ERR(token); 5072 if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) { 5073 bpf_token_put(token); 5074 token = NULL; 5075 } 5076 } 5077 5078 if (!bpf_token_capable(token, CAP_BPF)) { 5079 bpf_token_put(token); 5080 return -EPERM; 5081 } 5082 5083 bpf_token_put(token); 5084 5085 return btf_new_fd(attr, uattr, uattr_size); 5086 } 5087 5088 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 5089 5090 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 5091 { 5092 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 5093 return -EINVAL; 5094 5095 if (!capable(CAP_SYS_ADMIN)) 5096 return -EPERM; 5097 5098 return btf_get_fd_by_id(attr->btf_id); 5099 } 5100 5101 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 5102 union bpf_attr __user *uattr, 5103 u32 prog_id, u32 fd_type, 5104 const char *buf, u64 probe_offset, 5105 u64 probe_addr) 5106 { 5107 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 5108 u32 len = buf ? strlen(buf) : 0, input_len; 5109 int err = 0; 5110 5111 if (put_user(len, &uattr->task_fd_query.buf_len)) 5112 return -EFAULT; 5113 input_len = attr->task_fd_query.buf_len; 5114 if (input_len && ubuf) { 5115 if (!len) { 5116 /* nothing to copy, just make ubuf NULL terminated */ 5117 char zero = '\0'; 5118 5119 if (put_user(zero, ubuf)) 5120 return -EFAULT; 5121 } else if (input_len >= len + 1) { 5122 /* ubuf can hold the string with NULL terminator */ 5123 if (copy_to_user(ubuf, buf, len + 1)) 5124 return -EFAULT; 5125 } else { 5126 /* ubuf cannot hold the string with NULL terminator, 5127 * do a partial copy with NULL terminator. 5128 */ 5129 char zero = '\0'; 5130 5131 err = -ENOSPC; 5132 if (copy_to_user(ubuf, buf, input_len - 1)) 5133 return -EFAULT; 5134 if (put_user(zero, ubuf + input_len - 1)) 5135 return -EFAULT; 5136 } 5137 } 5138 5139 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 5140 put_user(fd_type, &uattr->task_fd_query.fd_type) || 5141 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 5142 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 5143 return -EFAULT; 5144 5145 return err; 5146 } 5147 5148 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 5149 5150 static int bpf_task_fd_query(const union bpf_attr *attr, 5151 union bpf_attr __user *uattr) 5152 { 5153 pid_t pid = attr->task_fd_query.pid; 5154 u32 fd = attr->task_fd_query.fd; 5155 const struct perf_event *event; 5156 struct task_struct *task; 5157 struct file *file; 5158 int err; 5159 5160 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 5161 return -EINVAL; 5162 5163 if (!capable(CAP_SYS_ADMIN)) 5164 return -EPERM; 5165 5166 if (attr->task_fd_query.flags != 0) 5167 return -EINVAL; 5168 5169 rcu_read_lock(); 5170 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 5171 rcu_read_unlock(); 5172 if (!task) 5173 return -ENOENT; 5174 5175 err = 0; 5176 file = fget_task(task, fd); 5177 put_task_struct(task); 5178 if (!file) 5179 return -EBADF; 5180 5181 if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) { 5182 struct bpf_link *link = file->private_data; 5183 5184 if (link->ops == &bpf_raw_tp_link_lops) { 5185 struct bpf_raw_tp_link *raw_tp = 5186 container_of(link, struct bpf_raw_tp_link, link); 5187 struct bpf_raw_event_map *btp = raw_tp->btp; 5188 5189 err = bpf_task_fd_query_copy(attr, uattr, 5190 raw_tp->link.prog->aux->id, 5191 BPF_FD_TYPE_RAW_TRACEPOINT, 5192 btp->tp->name, 0, 0); 5193 goto put_file; 5194 } 5195 goto out_not_supp; 5196 } 5197 5198 event = perf_get_event(file); 5199 if (!IS_ERR(event)) { 5200 u64 probe_offset, probe_addr; 5201 u32 prog_id, fd_type; 5202 const char *buf; 5203 5204 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 5205 &buf, &probe_offset, 5206 &probe_addr, NULL); 5207 if (!err) 5208 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 5209 fd_type, buf, 5210 probe_offset, 5211 probe_addr); 5212 goto put_file; 5213 } 5214 5215 out_not_supp: 5216 err = -ENOTSUPP; 5217 put_file: 5218 fput(file); 5219 return err; 5220 } 5221 5222 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 5223 5224 #define BPF_DO_BATCH(fn, ...) \ 5225 do { \ 5226 if (!fn) { \ 5227 err = -ENOTSUPP; \ 5228 goto err_put; \ 5229 } \ 5230 err = fn(__VA_ARGS__); \ 5231 } while (0) 5232 5233 static int bpf_map_do_batch(const union bpf_attr *attr, 5234 union bpf_attr __user *uattr, 5235 int cmd) 5236 { 5237 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH || 5238 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH; 5239 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH; 5240 struct bpf_map *map; 5241 int err; 5242 5243 if (CHECK_ATTR(BPF_MAP_BATCH)) 5244 return -EINVAL; 5245 5246 CLASS(fd, f)(attr->batch.map_fd); 5247 5248 map = __bpf_map_get(f); 5249 if (IS_ERR(map)) 5250 return PTR_ERR(map); 5251 if (has_write) 5252 bpf_map_write_active_inc(map); 5253 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 5254 err = -EPERM; 5255 goto err_put; 5256 } 5257 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 5258 err = -EPERM; 5259 goto err_put; 5260 } 5261 5262 if (cmd == BPF_MAP_LOOKUP_BATCH) 5263 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr); 5264 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 5265 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr); 5266 else if (cmd == BPF_MAP_UPDATE_BATCH) 5267 BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr); 5268 else 5269 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr); 5270 err_put: 5271 if (has_write) { 5272 maybe_wait_bpf_programs(map); 5273 bpf_map_write_active_dec(map); 5274 } 5275 return err; 5276 } 5277 5278 #define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid 5279 static int link_create(union bpf_attr *attr, bpfptr_t uattr) 5280 { 5281 struct bpf_prog *prog; 5282 int ret; 5283 5284 if (CHECK_ATTR(BPF_LINK_CREATE)) 5285 return -EINVAL; 5286 5287 if (attr->link_create.attach_type == BPF_STRUCT_OPS) 5288 return bpf_struct_ops_link_create(attr); 5289 5290 prog = bpf_prog_get(attr->link_create.prog_fd); 5291 if (IS_ERR(prog)) 5292 return PTR_ERR(prog); 5293 5294 ret = bpf_prog_attach_check_attach_type(prog, 5295 attr->link_create.attach_type); 5296 if (ret) 5297 goto out; 5298 5299 switch (prog->type) { 5300 case BPF_PROG_TYPE_CGROUP_SKB: 5301 case BPF_PROG_TYPE_CGROUP_SOCK: 5302 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 5303 case BPF_PROG_TYPE_SOCK_OPS: 5304 case BPF_PROG_TYPE_CGROUP_DEVICE: 5305 case BPF_PROG_TYPE_CGROUP_SYSCTL: 5306 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 5307 ret = cgroup_bpf_link_attach(attr, prog); 5308 break; 5309 case BPF_PROG_TYPE_EXT: 5310 ret = bpf_tracing_prog_attach(prog, 5311 attr->link_create.target_fd, 5312 attr->link_create.target_btf_id, 5313 attr->link_create.tracing.cookie); 5314 break; 5315 case BPF_PROG_TYPE_LSM: 5316 case BPF_PROG_TYPE_TRACING: 5317 if (attr->link_create.attach_type != prog->expected_attach_type) { 5318 ret = -EINVAL; 5319 goto out; 5320 } 5321 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) 5322 ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie); 5323 else if (prog->expected_attach_type == BPF_TRACE_ITER) 5324 ret = bpf_iter_link_attach(attr, uattr, prog); 5325 else if (prog->expected_attach_type == BPF_LSM_CGROUP) 5326 ret = cgroup_bpf_link_attach(attr, prog); 5327 else 5328 ret = bpf_tracing_prog_attach(prog, 5329 attr->link_create.target_fd, 5330 attr->link_create.target_btf_id, 5331 attr->link_create.tracing.cookie); 5332 break; 5333 case BPF_PROG_TYPE_FLOW_DISSECTOR: 5334 case BPF_PROG_TYPE_SK_LOOKUP: 5335 ret = netns_bpf_link_create(attr, prog); 5336 break; 5337 case BPF_PROG_TYPE_SK_MSG: 5338 case BPF_PROG_TYPE_SK_SKB: 5339 ret = sock_map_link_create(attr, prog); 5340 break; 5341 #ifdef CONFIG_NET 5342 case BPF_PROG_TYPE_XDP: 5343 ret = bpf_xdp_link_attach(attr, prog); 5344 break; 5345 case BPF_PROG_TYPE_SCHED_CLS: 5346 if (attr->link_create.attach_type == BPF_TCX_INGRESS || 5347 attr->link_create.attach_type == BPF_TCX_EGRESS) 5348 ret = tcx_link_attach(attr, prog); 5349 else 5350 ret = netkit_link_attach(attr, prog); 5351 break; 5352 case BPF_PROG_TYPE_NETFILTER: 5353 ret = bpf_nf_link_attach(attr, prog); 5354 break; 5355 #endif 5356 case BPF_PROG_TYPE_PERF_EVENT: 5357 case BPF_PROG_TYPE_TRACEPOINT: 5358 ret = bpf_perf_link_attach(attr, prog); 5359 break; 5360 case BPF_PROG_TYPE_KPROBE: 5361 if (attr->link_create.attach_type == BPF_PERF_EVENT) 5362 ret = bpf_perf_link_attach(attr, prog); 5363 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI || 5364 attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION) 5365 ret = bpf_kprobe_multi_link_attach(attr, prog); 5366 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI || 5367 attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION) 5368 ret = bpf_uprobe_multi_link_attach(attr, prog); 5369 break; 5370 default: 5371 ret = -EINVAL; 5372 } 5373 5374 out: 5375 if (ret < 0) 5376 bpf_prog_put(prog); 5377 return ret; 5378 } 5379 5380 static int link_update_map(struct bpf_link *link, union bpf_attr *attr) 5381 { 5382 struct bpf_map *new_map, *old_map = NULL; 5383 int ret; 5384 5385 new_map = bpf_map_get(attr->link_update.new_map_fd); 5386 if (IS_ERR(new_map)) 5387 return PTR_ERR(new_map); 5388 5389 if (attr->link_update.flags & BPF_F_REPLACE) { 5390 old_map = bpf_map_get(attr->link_update.old_map_fd); 5391 if (IS_ERR(old_map)) { 5392 ret = PTR_ERR(old_map); 5393 goto out_put; 5394 } 5395 } else if (attr->link_update.old_map_fd) { 5396 ret = -EINVAL; 5397 goto out_put; 5398 } 5399 5400 ret = link->ops->update_map(link, new_map, old_map); 5401 5402 if (old_map) 5403 bpf_map_put(old_map); 5404 out_put: 5405 bpf_map_put(new_map); 5406 return ret; 5407 } 5408 5409 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd 5410 5411 static int link_update(union bpf_attr *attr) 5412 { 5413 struct bpf_prog *old_prog = NULL, *new_prog; 5414 struct bpf_link *link; 5415 u32 flags; 5416 int ret; 5417 5418 if (CHECK_ATTR(BPF_LINK_UPDATE)) 5419 return -EINVAL; 5420 5421 flags = attr->link_update.flags; 5422 if (flags & ~BPF_F_REPLACE) 5423 return -EINVAL; 5424 5425 link = bpf_link_get_from_fd(attr->link_update.link_fd); 5426 if (IS_ERR(link)) 5427 return PTR_ERR(link); 5428 5429 if (link->ops->update_map) { 5430 ret = link_update_map(link, attr); 5431 goto out_put_link; 5432 } 5433 5434 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); 5435 if (IS_ERR(new_prog)) { 5436 ret = PTR_ERR(new_prog); 5437 goto out_put_link; 5438 } 5439 5440 if (flags & BPF_F_REPLACE) { 5441 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); 5442 if (IS_ERR(old_prog)) { 5443 ret = PTR_ERR(old_prog); 5444 old_prog = NULL; 5445 goto out_put_progs; 5446 } 5447 } else if (attr->link_update.old_prog_fd) { 5448 ret = -EINVAL; 5449 goto out_put_progs; 5450 } 5451 5452 if (link->ops->update_prog) 5453 ret = link->ops->update_prog(link, new_prog, old_prog); 5454 else 5455 ret = -EINVAL; 5456 5457 out_put_progs: 5458 if (old_prog) 5459 bpf_prog_put(old_prog); 5460 if (ret) 5461 bpf_prog_put(new_prog); 5462 out_put_link: 5463 bpf_link_put_direct(link); 5464 return ret; 5465 } 5466 5467 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd 5468 5469 static int link_detach(union bpf_attr *attr) 5470 { 5471 struct bpf_link *link; 5472 int ret; 5473 5474 if (CHECK_ATTR(BPF_LINK_DETACH)) 5475 return -EINVAL; 5476 5477 link = bpf_link_get_from_fd(attr->link_detach.link_fd); 5478 if (IS_ERR(link)) 5479 return PTR_ERR(link); 5480 5481 if (link->ops->detach) 5482 ret = link->ops->detach(link); 5483 else 5484 ret = -EOPNOTSUPP; 5485 5486 bpf_link_put_direct(link); 5487 return ret; 5488 } 5489 5490 struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 5491 { 5492 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); 5493 } 5494 EXPORT_SYMBOL(bpf_link_inc_not_zero); 5495 5496 struct bpf_link *bpf_link_by_id(u32 id) 5497 { 5498 struct bpf_link *link; 5499 5500 if (!id) 5501 return ERR_PTR(-ENOENT); 5502 5503 spin_lock_bh(&link_idr_lock); 5504 /* before link is "settled", ID is 0, pretend it doesn't exist yet */ 5505 link = idr_find(&link_idr, id); 5506 if (link) { 5507 if (link->id) 5508 link = bpf_link_inc_not_zero(link); 5509 else 5510 link = ERR_PTR(-EAGAIN); 5511 } else { 5512 link = ERR_PTR(-ENOENT); 5513 } 5514 spin_unlock_bh(&link_idr_lock); 5515 return link; 5516 } 5517 5518 struct bpf_link *bpf_link_get_curr_or_next(u32 *id) 5519 { 5520 struct bpf_link *link; 5521 5522 spin_lock_bh(&link_idr_lock); 5523 again: 5524 link = idr_get_next(&link_idr, id); 5525 if (link) { 5526 link = bpf_link_inc_not_zero(link); 5527 if (IS_ERR(link)) { 5528 (*id)++; 5529 goto again; 5530 } 5531 } 5532 spin_unlock_bh(&link_idr_lock); 5533 5534 return link; 5535 } 5536 5537 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id 5538 5539 static int bpf_link_get_fd_by_id(const union bpf_attr *attr) 5540 { 5541 struct bpf_link *link; 5542 u32 id = attr->link_id; 5543 int fd; 5544 5545 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) 5546 return -EINVAL; 5547 5548 if (!capable(CAP_SYS_ADMIN)) 5549 return -EPERM; 5550 5551 link = bpf_link_by_id(id); 5552 if (IS_ERR(link)) 5553 return PTR_ERR(link); 5554 5555 fd = bpf_link_new_fd(link); 5556 if (fd < 0) 5557 bpf_link_put_direct(link); 5558 5559 return fd; 5560 } 5561 5562 DEFINE_MUTEX(bpf_stats_enabled_mutex); 5563 5564 static int bpf_stats_release(struct inode *inode, struct file *file) 5565 { 5566 mutex_lock(&bpf_stats_enabled_mutex); 5567 static_key_slow_dec(&bpf_stats_enabled_key.key); 5568 mutex_unlock(&bpf_stats_enabled_mutex); 5569 return 0; 5570 } 5571 5572 static const struct file_operations bpf_stats_fops = { 5573 .release = bpf_stats_release, 5574 }; 5575 5576 static int bpf_enable_runtime_stats(void) 5577 { 5578 int fd; 5579 5580 mutex_lock(&bpf_stats_enabled_mutex); 5581 5582 /* Set a very high limit to avoid overflow */ 5583 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { 5584 mutex_unlock(&bpf_stats_enabled_mutex); 5585 return -EBUSY; 5586 } 5587 5588 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); 5589 if (fd >= 0) 5590 static_key_slow_inc(&bpf_stats_enabled_key.key); 5591 5592 mutex_unlock(&bpf_stats_enabled_mutex); 5593 return fd; 5594 } 5595 5596 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type 5597 5598 static int bpf_enable_stats(union bpf_attr *attr) 5599 { 5600 5601 if (CHECK_ATTR(BPF_ENABLE_STATS)) 5602 return -EINVAL; 5603 5604 if (!capable(CAP_SYS_ADMIN)) 5605 return -EPERM; 5606 5607 switch (attr->enable_stats.type) { 5608 case BPF_STATS_RUN_TIME: 5609 return bpf_enable_runtime_stats(); 5610 default: 5611 break; 5612 } 5613 return -EINVAL; 5614 } 5615 5616 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags 5617 5618 static int bpf_iter_create(union bpf_attr *attr) 5619 { 5620 struct bpf_link *link; 5621 int err; 5622 5623 if (CHECK_ATTR(BPF_ITER_CREATE)) 5624 return -EINVAL; 5625 5626 if (attr->iter_create.flags) 5627 return -EINVAL; 5628 5629 link = bpf_link_get_from_fd(attr->iter_create.link_fd); 5630 if (IS_ERR(link)) 5631 return PTR_ERR(link); 5632 5633 err = bpf_iter_new_fd(link); 5634 bpf_link_put_direct(link); 5635 5636 return err; 5637 } 5638 5639 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags 5640 5641 static int bpf_prog_bind_map(union bpf_attr *attr) 5642 { 5643 struct bpf_prog *prog; 5644 struct bpf_map *map; 5645 struct bpf_map **used_maps_old, **used_maps_new; 5646 int i, ret = 0; 5647 5648 if (CHECK_ATTR(BPF_PROG_BIND_MAP)) 5649 return -EINVAL; 5650 5651 if (attr->prog_bind_map.flags) 5652 return -EINVAL; 5653 5654 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); 5655 if (IS_ERR(prog)) 5656 return PTR_ERR(prog); 5657 5658 map = bpf_map_get(attr->prog_bind_map.map_fd); 5659 if (IS_ERR(map)) { 5660 ret = PTR_ERR(map); 5661 goto out_prog_put; 5662 } 5663 5664 mutex_lock(&prog->aux->used_maps_mutex); 5665 5666 used_maps_old = prog->aux->used_maps; 5667 5668 for (i = 0; i < prog->aux->used_map_cnt; i++) 5669 if (used_maps_old[i] == map) { 5670 bpf_map_put(map); 5671 goto out_unlock; 5672 } 5673 5674 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, 5675 sizeof(used_maps_new[0]), 5676 GFP_KERNEL); 5677 if (!used_maps_new) { 5678 ret = -ENOMEM; 5679 goto out_unlock; 5680 } 5681 5682 /* The bpf program will not access the bpf map, but for the sake of 5683 * simplicity, increase sleepable_refcnt for sleepable program as well. 5684 */ 5685 if (prog->sleepable) 5686 atomic64_inc(&map->sleepable_refcnt); 5687 memcpy(used_maps_new, used_maps_old, 5688 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); 5689 used_maps_new[prog->aux->used_map_cnt] = map; 5690 5691 prog->aux->used_map_cnt++; 5692 prog->aux->used_maps = used_maps_new; 5693 5694 kfree(used_maps_old); 5695 5696 out_unlock: 5697 mutex_unlock(&prog->aux->used_maps_mutex); 5698 5699 if (ret) 5700 bpf_map_put(map); 5701 out_prog_put: 5702 bpf_prog_put(prog); 5703 return ret; 5704 } 5705 5706 #define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd 5707 5708 static int token_create(union bpf_attr *attr) 5709 { 5710 if (CHECK_ATTR(BPF_TOKEN_CREATE)) 5711 return -EINVAL; 5712 5713 /* no flags are supported yet */ 5714 if (attr->token_create.flags) 5715 return -EINVAL; 5716 5717 return bpf_token_create(attr); 5718 } 5719 5720 static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size) 5721 { 5722 union bpf_attr attr; 5723 int err; 5724 5725 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 5726 if (err) 5727 return err; 5728 size = min_t(u32, size, sizeof(attr)); 5729 5730 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 5731 memset(&attr, 0, sizeof(attr)); 5732 if (copy_from_bpfptr(&attr, uattr, size) != 0) 5733 return -EFAULT; 5734 5735 err = security_bpf(cmd, &attr, size); 5736 if (err < 0) 5737 return err; 5738 5739 switch (cmd) { 5740 case BPF_MAP_CREATE: 5741 err = map_create(&attr); 5742 break; 5743 case BPF_MAP_LOOKUP_ELEM: 5744 err = map_lookup_elem(&attr); 5745 break; 5746 case BPF_MAP_UPDATE_ELEM: 5747 err = map_update_elem(&attr, uattr); 5748 break; 5749 case BPF_MAP_DELETE_ELEM: 5750 err = map_delete_elem(&attr, uattr); 5751 break; 5752 case BPF_MAP_GET_NEXT_KEY: 5753 err = map_get_next_key(&attr); 5754 break; 5755 case BPF_MAP_FREEZE: 5756 err = map_freeze(&attr); 5757 break; 5758 case BPF_PROG_LOAD: 5759 err = bpf_prog_load(&attr, uattr, size); 5760 break; 5761 case BPF_OBJ_PIN: 5762 err = bpf_obj_pin(&attr); 5763 break; 5764 case BPF_OBJ_GET: 5765 err = bpf_obj_get(&attr); 5766 break; 5767 case BPF_PROG_ATTACH: 5768 err = bpf_prog_attach(&attr); 5769 break; 5770 case BPF_PROG_DETACH: 5771 err = bpf_prog_detach(&attr); 5772 break; 5773 case BPF_PROG_QUERY: 5774 err = bpf_prog_query(&attr, uattr.user); 5775 break; 5776 case BPF_PROG_TEST_RUN: 5777 err = bpf_prog_test_run(&attr, uattr.user); 5778 break; 5779 case BPF_PROG_GET_NEXT_ID: 5780 err = bpf_obj_get_next_id(&attr, uattr.user, 5781 &prog_idr, &prog_idr_lock); 5782 break; 5783 case BPF_MAP_GET_NEXT_ID: 5784 err = bpf_obj_get_next_id(&attr, uattr.user, 5785 &map_idr, &map_idr_lock); 5786 break; 5787 case BPF_BTF_GET_NEXT_ID: 5788 err = bpf_obj_get_next_id(&attr, uattr.user, 5789 &btf_idr, &btf_idr_lock); 5790 break; 5791 case BPF_PROG_GET_FD_BY_ID: 5792 err = bpf_prog_get_fd_by_id(&attr); 5793 break; 5794 case BPF_MAP_GET_FD_BY_ID: 5795 err = bpf_map_get_fd_by_id(&attr); 5796 break; 5797 case BPF_OBJ_GET_INFO_BY_FD: 5798 err = bpf_obj_get_info_by_fd(&attr, uattr.user); 5799 break; 5800 case BPF_RAW_TRACEPOINT_OPEN: 5801 err = bpf_raw_tracepoint_open(&attr); 5802 break; 5803 case BPF_BTF_LOAD: 5804 err = bpf_btf_load(&attr, uattr, size); 5805 break; 5806 case BPF_BTF_GET_FD_BY_ID: 5807 err = bpf_btf_get_fd_by_id(&attr); 5808 break; 5809 case BPF_TASK_FD_QUERY: 5810 err = bpf_task_fd_query(&attr, uattr.user); 5811 break; 5812 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 5813 err = map_lookup_and_delete_elem(&attr); 5814 break; 5815 case BPF_MAP_LOOKUP_BATCH: 5816 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH); 5817 break; 5818 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 5819 err = bpf_map_do_batch(&attr, uattr.user, 5820 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 5821 break; 5822 case BPF_MAP_UPDATE_BATCH: 5823 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH); 5824 break; 5825 case BPF_MAP_DELETE_BATCH: 5826 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH); 5827 break; 5828 case BPF_LINK_CREATE: 5829 err = link_create(&attr, uattr); 5830 break; 5831 case BPF_LINK_UPDATE: 5832 err = link_update(&attr); 5833 break; 5834 case BPF_LINK_GET_FD_BY_ID: 5835 err = bpf_link_get_fd_by_id(&attr); 5836 break; 5837 case BPF_LINK_GET_NEXT_ID: 5838 err = bpf_obj_get_next_id(&attr, uattr.user, 5839 &link_idr, &link_idr_lock); 5840 break; 5841 case BPF_ENABLE_STATS: 5842 err = bpf_enable_stats(&attr); 5843 break; 5844 case BPF_ITER_CREATE: 5845 err = bpf_iter_create(&attr); 5846 break; 5847 case BPF_LINK_DETACH: 5848 err = link_detach(&attr); 5849 break; 5850 case BPF_PROG_BIND_MAP: 5851 err = bpf_prog_bind_map(&attr); 5852 break; 5853 case BPF_TOKEN_CREATE: 5854 err = token_create(&attr); 5855 break; 5856 default: 5857 err = -EINVAL; 5858 break; 5859 } 5860 5861 return err; 5862 } 5863 5864 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 5865 { 5866 return __sys_bpf(cmd, USER_BPFPTR(uattr), size); 5867 } 5868 5869 static bool syscall_prog_is_valid_access(int off, int size, 5870 enum bpf_access_type type, 5871 const struct bpf_prog *prog, 5872 struct bpf_insn_access_aux *info) 5873 { 5874 if (off < 0 || off >= U16_MAX) 5875 return false; 5876 if (off % size != 0) 5877 return false; 5878 return true; 5879 } 5880 5881 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size) 5882 { 5883 switch (cmd) { 5884 case BPF_MAP_CREATE: 5885 case BPF_MAP_DELETE_ELEM: 5886 case BPF_MAP_UPDATE_ELEM: 5887 case BPF_MAP_FREEZE: 5888 case BPF_MAP_GET_FD_BY_ID: 5889 case BPF_PROG_LOAD: 5890 case BPF_BTF_LOAD: 5891 case BPF_LINK_CREATE: 5892 case BPF_RAW_TRACEPOINT_OPEN: 5893 break; 5894 default: 5895 return -EINVAL; 5896 } 5897 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size); 5898 } 5899 5900 5901 /* To shut up -Wmissing-prototypes. 5902 * This function is used by the kernel light skeleton 5903 * to load bpf programs when modules are loaded or during kernel boot. 5904 * See tools/lib/bpf/skel_internal.h 5905 */ 5906 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); 5907 5908 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size) 5909 { 5910 struct bpf_prog * __maybe_unused prog; 5911 struct bpf_tramp_run_ctx __maybe_unused run_ctx; 5912 5913 switch (cmd) { 5914 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */ 5915 case BPF_PROG_TEST_RUN: 5916 if (attr->test.data_in || attr->test.data_out || 5917 attr->test.ctx_out || attr->test.duration || 5918 attr->test.repeat || attr->test.flags) 5919 return -EINVAL; 5920 5921 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); 5922 if (IS_ERR(prog)) 5923 return PTR_ERR(prog); 5924 5925 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || 5926 attr->test.ctx_size_in > U16_MAX) { 5927 bpf_prog_put(prog); 5928 return -EINVAL; 5929 } 5930 5931 run_ctx.bpf_cookie = 0; 5932 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) { 5933 /* recursion detected */ 5934 __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx); 5935 bpf_prog_put(prog); 5936 return -EBUSY; 5937 } 5938 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); 5939 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */, 5940 &run_ctx); 5941 bpf_prog_put(prog); 5942 return 0; 5943 #endif 5944 default: 5945 return ____bpf_sys_bpf(cmd, attr, size); 5946 } 5947 } 5948 EXPORT_SYMBOL(kern_sys_bpf); 5949 5950 static const struct bpf_func_proto bpf_sys_bpf_proto = { 5951 .func = bpf_sys_bpf, 5952 .gpl_only = false, 5953 .ret_type = RET_INTEGER, 5954 .arg1_type = ARG_ANYTHING, 5955 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5956 .arg3_type = ARG_CONST_SIZE, 5957 }; 5958 5959 const struct bpf_func_proto * __weak 5960 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5961 { 5962 return bpf_base_func_proto(func_id, prog); 5963 } 5964 5965 BPF_CALL_1(bpf_sys_close, u32, fd) 5966 { 5967 /* When bpf program calls this helper there should not be 5968 * an fdget() without matching completed fdput(). 5969 * This helper is allowed in the following callchain only: 5970 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close 5971 */ 5972 return close_fd(fd); 5973 } 5974 5975 static const struct bpf_func_proto bpf_sys_close_proto = { 5976 .func = bpf_sys_close, 5977 .gpl_only = false, 5978 .ret_type = RET_INTEGER, 5979 .arg1_type = ARG_ANYTHING, 5980 }; 5981 5982 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) 5983 { 5984 *res = 0; 5985 if (flags) 5986 return -EINVAL; 5987 5988 if (name_sz <= 1 || name[name_sz - 1]) 5989 return -EINVAL; 5990 5991 if (!bpf_dump_raw_ok(current_cred())) 5992 return -EPERM; 5993 5994 *res = kallsyms_lookup_name(name); 5995 return *res ? 0 : -ENOENT; 5996 } 5997 5998 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { 5999 .func = bpf_kallsyms_lookup_name, 6000 .gpl_only = false, 6001 .ret_type = RET_INTEGER, 6002 .arg1_type = ARG_PTR_TO_MEM, 6003 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 6004 .arg3_type = ARG_ANYTHING, 6005 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 6006 .arg4_size = sizeof(u64), 6007 }; 6008 6009 static const struct bpf_func_proto * 6010 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 6011 { 6012 switch (func_id) { 6013 case BPF_FUNC_sys_bpf: 6014 return !bpf_token_capable(prog->aux->token, CAP_PERFMON) 6015 ? NULL : &bpf_sys_bpf_proto; 6016 case BPF_FUNC_btf_find_by_name_kind: 6017 return &bpf_btf_find_by_name_kind_proto; 6018 case BPF_FUNC_sys_close: 6019 return &bpf_sys_close_proto; 6020 case BPF_FUNC_kallsyms_lookup_name: 6021 return &bpf_kallsyms_lookup_name_proto; 6022 default: 6023 return tracing_prog_func_proto(func_id, prog); 6024 } 6025 } 6026 6027 const struct bpf_verifier_ops bpf_syscall_verifier_ops = { 6028 .get_func_proto = syscall_prog_func_proto, 6029 .is_valid_access = syscall_prog_is_valid_access, 6030 }; 6031 6032 const struct bpf_prog_ops bpf_syscall_prog_ops = { 6033 .test_run = bpf_prog_test_run_syscall, 6034 }; 6035 6036 #ifdef CONFIG_SYSCTL 6037 static int bpf_stats_handler(const struct ctl_table *table, int write, 6038 void *buffer, size_t *lenp, loff_t *ppos) 6039 { 6040 struct static_key *key = (struct static_key *)table->data; 6041 static int saved_val; 6042 int val, ret; 6043 struct ctl_table tmp = { 6044 .data = &val, 6045 .maxlen = sizeof(val), 6046 .mode = table->mode, 6047 .extra1 = SYSCTL_ZERO, 6048 .extra2 = SYSCTL_ONE, 6049 }; 6050 6051 if (write && !capable(CAP_SYS_ADMIN)) 6052 return -EPERM; 6053 6054 mutex_lock(&bpf_stats_enabled_mutex); 6055 val = saved_val; 6056 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 6057 if (write && !ret && val != saved_val) { 6058 if (val) 6059 static_key_slow_inc(key); 6060 else 6061 static_key_slow_dec(key); 6062 saved_val = val; 6063 } 6064 mutex_unlock(&bpf_stats_enabled_mutex); 6065 return ret; 6066 } 6067 6068 void __weak unpriv_ebpf_notify(int new_state) 6069 { 6070 } 6071 6072 static int bpf_unpriv_handler(const struct ctl_table *table, int write, 6073 void *buffer, size_t *lenp, loff_t *ppos) 6074 { 6075 int ret, unpriv_enable = *(int *)table->data; 6076 bool locked_state = unpriv_enable == 1; 6077 struct ctl_table tmp = *table; 6078 6079 if (write && !capable(CAP_SYS_ADMIN)) 6080 return -EPERM; 6081 6082 tmp.data = &unpriv_enable; 6083 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 6084 if (write && !ret) { 6085 if (locked_state && unpriv_enable != 1) 6086 return -EPERM; 6087 *(int *)table->data = unpriv_enable; 6088 } 6089 6090 if (write) 6091 unpriv_ebpf_notify(unpriv_enable); 6092 6093 return ret; 6094 } 6095 6096 static struct ctl_table bpf_syscall_table[] = { 6097 { 6098 .procname = "unprivileged_bpf_disabled", 6099 .data = &sysctl_unprivileged_bpf_disabled, 6100 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), 6101 .mode = 0644, 6102 .proc_handler = bpf_unpriv_handler, 6103 .extra1 = SYSCTL_ZERO, 6104 .extra2 = SYSCTL_TWO, 6105 }, 6106 { 6107 .procname = "bpf_stats_enabled", 6108 .data = &bpf_stats_enabled_key.key, 6109 .mode = 0644, 6110 .proc_handler = bpf_stats_handler, 6111 }, 6112 }; 6113 6114 static int __init bpf_syscall_sysctl_init(void) 6115 { 6116 register_sysctl_init("kernel", bpf_syscall_table); 6117 return 0; 6118 } 6119 late_initcall(bpf_syscall_sysctl_init); 6120 #endif /* CONFIG_SYSCTL */ 6121