1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf-cgroup.h> 6 #include <linux/bpf_trace.h> 7 #include <linux/bpf_lirc.h> 8 #include <linux/bpf_verifier.h> 9 #include <linux/bsearch.h> 10 #include <linux/btf.h> 11 #include <linux/syscalls.h> 12 #include <linux/slab.h> 13 #include <linux/sched/signal.h> 14 #include <linux/vmalloc.h> 15 #include <linux/mmzone.h> 16 #include <linux/anon_inodes.h> 17 #include <linux/fdtable.h> 18 #include <linux/file.h> 19 #include <linux/fs.h> 20 #include <linux/license.h> 21 #include <linux/filter.h> 22 #include <linux/kernel.h> 23 #include <linux/idr.h> 24 #include <linux/cred.h> 25 #include <linux/timekeeping.h> 26 #include <linux/ctype.h> 27 #include <linux/nospec.h> 28 #include <linux/audit.h> 29 #include <uapi/linux/btf.h> 30 #include <linux/pgtable.h> 31 #include <linux/bpf_lsm.h> 32 #include <linux/poll.h> 33 #include <linux/sort.h> 34 #include <linux/bpf-netns.h> 35 #include <linux/rcupdate_trace.h> 36 #include <linux/memcontrol.h> 37 #include <linux/trace_events.h> 38 #include <linux/tracepoint.h> 39 40 #include <net/netfilter/nf_bpf_link.h> 41 #include <net/netkit.h> 42 #include <net/tcx.h> 43 44 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 45 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 46 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 47 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 48 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 49 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 50 IS_FD_HASH(map)) 51 52 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 53 54 DEFINE_PER_CPU(int, bpf_prog_active); 55 static DEFINE_IDR(prog_idr); 56 static DEFINE_SPINLOCK(prog_idr_lock); 57 static DEFINE_IDR(map_idr); 58 static DEFINE_SPINLOCK(map_idr_lock); 59 static DEFINE_IDR(link_idr); 60 static DEFINE_SPINLOCK(link_idr_lock); 61 62 int sysctl_unprivileged_bpf_disabled __read_mostly = 63 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0; 64 65 static const struct bpf_map_ops * const bpf_map_types[] = { 66 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 67 #define BPF_MAP_TYPE(_id, _ops) \ 68 [_id] = &_ops, 69 #define BPF_LINK_TYPE(_id, _name) 70 #include <linux/bpf_types.h> 71 #undef BPF_PROG_TYPE 72 #undef BPF_MAP_TYPE 73 #undef BPF_LINK_TYPE 74 }; 75 76 /* 77 * If we're handed a bigger struct than we know of, ensure all the unknown bits 78 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 79 * we don't know about yet. 80 * 81 * There is a ToCToU between this function call and the following 82 * copy_from_user() call. However, this is not a concern since this function is 83 * meant to be a future-proofing of bits. 84 */ 85 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, 86 size_t expected_size, 87 size_t actual_size) 88 { 89 int res; 90 91 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 92 return -E2BIG; 93 94 if (actual_size <= expected_size) 95 return 0; 96 97 if (uaddr.is_kernel) 98 res = memchr_inv(uaddr.kernel + expected_size, 0, 99 actual_size - expected_size) == NULL; 100 else 101 res = check_zeroed_user(uaddr.user + expected_size, 102 actual_size - expected_size); 103 if (res < 0) 104 return res; 105 return res ? 0 : -E2BIG; 106 } 107 108 const struct bpf_map_ops bpf_map_offload_ops = { 109 .map_meta_equal = bpf_map_meta_equal, 110 .map_alloc = bpf_map_offload_map_alloc, 111 .map_free = bpf_map_offload_map_free, 112 .map_check_btf = map_check_no_btf, 113 .map_mem_usage = bpf_map_offload_map_mem_usage, 114 }; 115 116 static void bpf_map_write_active_inc(struct bpf_map *map) 117 { 118 atomic64_inc(&map->writecnt); 119 } 120 121 static void bpf_map_write_active_dec(struct bpf_map *map) 122 { 123 atomic64_dec(&map->writecnt); 124 } 125 126 bool bpf_map_write_active(const struct bpf_map *map) 127 { 128 return atomic64_read(&map->writecnt) != 0; 129 } 130 131 static u32 bpf_map_value_size(const struct bpf_map *map) 132 { 133 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 134 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 135 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 136 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 137 return round_up(map->value_size, 8) * num_possible_cpus(); 138 else if (IS_FD_MAP(map)) 139 return sizeof(u32); 140 else 141 return map->value_size; 142 } 143 144 static void maybe_wait_bpf_programs(struct bpf_map *map) 145 { 146 /* Wait for any running non-sleepable BPF programs to complete so that 147 * userspace, when we return to it, knows that all non-sleepable 148 * programs that could be running use the new map value. For sleepable 149 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait 150 * for the completions of these programs, but considering the waiting 151 * time can be very long and userspace may think it will hang forever, 152 * so don't handle sleepable BPF programs now. 153 */ 154 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 155 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 156 synchronize_rcu(); 157 } 158 159 static void unpin_uptr_kaddr(void *kaddr) 160 { 161 if (kaddr) 162 unpin_user_page(virt_to_page(kaddr)); 163 } 164 165 static void __bpf_obj_unpin_uptrs(struct btf_record *rec, u32 cnt, void *obj) 166 { 167 const struct btf_field *field; 168 void **uptr_addr; 169 int i; 170 171 for (i = 0, field = rec->fields; i < cnt; i++, field++) { 172 if (field->type != BPF_UPTR) 173 continue; 174 175 uptr_addr = obj + field->offset; 176 unpin_uptr_kaddr(*uptr_addr); 177 } 178 } 179 180 static void bpf_obj_unpin_uptrs(struct btf_record *rec, void *obj) 181 { 182 if (!btf_record_has_field(rec, BPF_UPTR)) 183 return; 184 185 __bpf_obj_unpin_uptrs(rec, rec->cnt, obj); 186 } 187 188 static int bpf_obj_pin_uptrs(struct btf_record *rec, void *obj) 189 { 190 const struct btf_field *field; 191 const struct btf_type *t; 192 unsigned long start, end; 193 struct page *page; 194 void **uptr_addr; 195 int i, err; 196 197 if (!btf_record_has_field(rec, BPF_UPTR)) 198 return 0; 199 200 for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { 201 if (field->type != BPF_UPTR) 202 continue; 203 204 uptr_addr = obj + field->offset; 205 start = *(unsigned long *)uptr_addr; 206 if (!start) 207 continue; 208 209 t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id); 210 /* t->size was checked for zero before */ 211 if (check_add_overflow(start, t->size - 1, &end)) { 212 err = -EFAULT; 213 goto unpin_all; 214 } 215 216 /* The uptr's struct cannot span across two pages */ 217 if ((start & PAGE_MASK) != (end & PAGE_MASK)) { 218 err = -EOPNOTSUPP; 219 goto unpin_all; 220 } 221 222 err = pin_user_pages_fast(start, 1, FOLL_LONGTERM | FOLL_WRITE, &page); 223 if (err != 1) 224 goto unpin_all; 225 226 if (PageHighMem(page)) { 227 err = -EOPNOTSUPP; 228 unpin_user_page(page); 229 goto unpin_all; 230 } 231 232 *uptr_addr = page_address(page) + offset_in_page(start); 233 } 234 235 return 0; 236 237 unpin_all: 238 __bpf_obj_unpin_uptrs(rec, i, obj); 239 return err; 240 } 241 242 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file, 243 void *key, void *value, __u64 flags) 244 { 245 int err; 246 247 /* Need to create a kthread, thus must support schedule */ 248 if (bpf_map_is_offloaded(map)) { 249 return bpf_map_offload_update_elem(map, key, value, flags); 250 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 251 map->map_type == BPF_MAP_TYPE_ARENA || 252 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 253 return map->ops->map_update_elem(map, key, value, flags); 254 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || 255 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 256 return sock_map_update_elem_sys(map, key, value, flags); 257 } else if (IS_FD_PROG_ARRAY(map)) { 258 return bpf_fd_array_map_update_elem(map, map_file, key, value, 259 flags); 260 } 261 262 bpf_disable_instrumentation(); 263 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 264 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 265 err = bpf_percpu_hash_update(map, key, value, flags); 266 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 267 err = bpf_percpu_array_update(map, key, value, flags); 268 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 269 err = bpf_percpu_cgroup_storage_update(map, key, value, 270 flags); 271 } else if (IS_FD_ARRAY(map)) { 272 err = bpf_fd_array_map_update_elem(map, map_file, key, value, 273 flags); 274 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 275 err = bpf_fd_htab_map_update_elem(map, map_file, key, value, 276 flags); 277 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 278 /* rcu_read_lock() is not needed */ 279 err = bpf_fd_reuseport_array_update_elem(map, key, value, 280 flags); 281 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 282 map->map_type == BPF_MAP_TYPE_STACK || 283 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 284 err = map->ops->map_push_elem(map, value, flags); 285 } else { 286 err = bpf_obj_pin_uptrs(map->record, value); 287 if (!err) { 288 rcu_read_lock(); 289 err = map->ops->map_update_elem(map, key, value, flags); 290 rcu_read_unlock(); 291 if (err) 292 bpf_obj_unpin_uptrs(map->record, value); 293 } 294 } 295 bpf_enable_instrumentation(); 296 297 return err; 298 } 299 300 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 301 __u64 flags) 302 { 303 void *ptr; 304 int err; 305 306 if (bpf_map_is_offloaded(map)) 307 return bpf_map_offload_lookup_elem(map, key, value); 308 309 bpf_disable_instrumentation(); 310 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 311 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 312 err = bpf_percpu_hash_copy(map, key, value); 313 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 314 err = bpf_percpu_array_copy(map, key, value); 315 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 316 err = bpf_percpu_cgroup_storage_copy(map, key, value); 317 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 318 err = bpf_stackmap_copy(map, key, value); 319 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 320 err = bpf_fd_array_map_lookup_elem(map, key, value); 321 } else if (IS_FD_HASH(map)) { 322 err = bpf_fd_htab_map_lookup_elem(map, key, value); 323 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 324 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 325 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 326 map->map_type == BPF_MAP_TYPE_STACK || 327 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 328 err = map->ops->map_peek_elem(map, value); 329 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 330 /* struct_ops map requires directly updating "value" */ 331 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 332 } else { 333 rcu_read_lock(); 334 if (map->ops->map_lookup_elem_sys_only) 335 ptr = map->ops->map_lookup_elem_sys_only(map, key); 336 else 337 ptr = map->ops->map_lookup_elem(map, key); 338 if (IS_ERR(ptr)) { 339 err = PTR_ERR(ptr); 340 } else if (!ptr) { 341 err = -ENOENT; 342 } else { 343 err = 0; 344 if (flags & BPF_F_LOCK) 345 /* lock 'ptr' and copy everything but lock */ 346 copy_map_value_locked(map, value, ptr, true); 347 else 348 copy_map_value(map, value, ptr); 349 /* mask lock and timer, since value wasn't zero inited */ 350 check_and_init_map_value(map, value); 351 } 352 rcu_read_unlock(); 353 } 354 355 bpf_enable_instrumentation(); 356 357 return err; 358 } 359 360 /* Please, do not use this function outside from the map creation path 361 * (e.g. in map update path) without taking care of setting the active 362 * memory cgroup (see at bpf_map_kmalloc_node() for example). 363 */ 364 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 365 { 366 /* We really just want to fail instead of triggering OOM killer 367 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 368 * which is used for lower order allocation requests. 369 * 370 * It has been observed that higher order allocation requests done by 371 * vmalloc with __GFP_NORETRY being set might fail due to not trying 372 * to reclaim memory from the page cache, thus we set 373 * __GFP_RETRY_MAYFAIL to avoid such situations. 374 */ 375 376 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO); 377 unsigned int flags = 0; 378 unsigned long align = 1; 379 void *area; 380 381 if (size >= SIZE_MAX) 382 return NULL; 383 384 /* kmalloc()'ed memory can't be mmap()'ed */ 385 if (mmapable) { 386 BUG_ON(!PAGE_ALIGNED(size)); 387 align = SHMLBA; 388 flags = VM_USERMAP; 389 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 390 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, 391 numa_node); 392 if (area != NULL) 393 return area; 394 } 395 396 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 397 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, 398 flags, numa_node, __builtin_return_address(0)); 399 } 400 401 void *bpf_map_area_alloc(u64 size, int numa_node) 402 { 403 return __bpf_map_area_alloc(size, numa_node, false); 404 } 405 406 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 407 { 408 return __bpf_map_area_alloc(size, numa_node, true); 409 } 410 411 void bpf_map_area_free(void *area) 412 { 413 kvfree(area); 414 } 415 416 static u32 bpf_map_flags_retain_permanent(u32 flags) 417 { 418 /* Some map creation flags are not tied to the map object but 419 * rather to the map fd instead, so they have no meaning upon 420 * map object inspection since multiple file descriptors with 421 * different (access) properties can exist here. Thus, given 422 * this has zero meaning for the map itself, lets clear these 423 * from here. 424 */ 425 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 426 } 427 428 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 429 { 430 map->map_type = attr->map_type; 431 map->key_size = attr->key_size; 432 map->value_size = attr->value_size; 433 map->max_entries = attr->max_entries; 434 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 435 map->numa_node = bpf_map_attr_numa_node(attr); 436 map->map_extra = attr->map_extra; 437 } 438 439 static int bpf_map_alloc_id(struct bpf_map *map) 440 { 441 int id; 442 443 idr_preload(GFP_KERNEL); 444 spin_lock_bh(&map_idr_lock); 445 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 446 if (id > 0) 447 map->id = id; 448 spin_unlock_bh(&map_idr_lock); 449 idr_preload_end(); 450 451 if (WARN_ON_ONCE(!id)) 452 return -ENOSPC; 453 454 return id > 0 ? 0 : id; 455 } 456 457 void bpf_map_free_id(struct bpf_map *map) 458 { 459 unsigned long flags; 460 461 /* Offloaded maps are removed from the IDR store when their device 462 * disappears - even if someone holds an fd to them they are unusable, 463 * the memory is gone, all ops will fail; they are simply waiting for 464 * refcnt to drop to be freed. 465 */ 466 if (!map->id) 467 return; 468 469 spin_lock_irqsave(&map_idr_lock, flags); 470 471 idr_remove(&map_idr, map->id); 472 map->id = 0; 473 474 spin_unlock_irqrestore(&map_idr_lock, flags); 475 } 476 477 #ifdef CONFIG_MEMCG 478 static void bpf_map_save_memcg(struct bpf_map *map) 479 { 480 /* Currently if a map is created by a process belonging to the root 481 * memory cgroup, get_obj_cgroup_from_current() will return NULL. 482 * So we have to check map->objcg for being NULL each time it's 483 * being used. 484 */ 485 if (memcg_bpf_enabled()) 486 map->objcg = get_obj_cgroup_from_current(); 487 } 488 489 static void bpf_map_release_memcg(struct bpf_map *map) 490 { 491 if (map->objcg) 492 obj_cgroup_put(map->objcg); 493 } 494 495 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map) 496 { 497 if (map->objcg) 498 return get_mem_cgroup_from_objcg(map->objcg); 499 500 return root_mem_cgroup; 501 } 502 503 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 504 int node) 505 { 506 struct mem_cgroup *memcg, *old_memcg; 507 void *ptr; 508 509 memcg = bpf_map_get_memcg(map); 510 old_memcg = set_active_memcg(memcg); 511 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node); 512 set_active_memcg(old_memcg); 513 mem_cgroup_put(memcg); 514 515 return ptr; 516 } 517 518 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 519 { 520 struct mem_cgroup *memcg, *old_memcg; 521 void *ptr; 522 523 memcg = bpf_map_get_memcg(map); 524 old_memcg = set_active_memcg(memcg); 525 ptr = kzalloc(size, flags | __GFP_ACCOUNT); 526 set_active_memcg(old_memcg); 527 mem_cgroup_put(memcg); 528 529 return ptr; 530 } 531 532 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, 533 gfp_t flags) 534 { 535 struct mem_cgroup *memcg, *old_memcg; 536 void *ptr; 537 538 memcg = bpf_map_get_memcg(map); 539 old_memcg = set_active_memcg(memcg); 540 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT); 541 set_active_memcg(old_memcg); 542 mem_cgroup_put(memcg); 543 544 return ptr; 545 } 546 547 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 548 size_t align, gfp_t flags) 549 { 550 struct mem_cgroup *memcg, *old_memcg; 551 void __percpu *ptr; 552 553 memcg = bpf_map_get_memcg(map); 554 old_memcg = set_active_memcg(memcg); 555 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT); 556 set_active_memcg(old_memcg); 557 mem_cgroup_put(memcg); 558 559 return ptr; 560 } 561 562 #else 563 static void bpf_map_save_memcg(struct bpf_map *map) 564 { 565 } 566 567 static void bpf_map_release_memcg(struct bpf_map *map) 568 { 569 } 570 #endif 571 572 int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid, 573 unsigned long nr_pages, struct page **pages) 574 { 575 unsigned long i, j; 576 struct page *pg; 577 int ret = 0; 578 #ifdef CONFIG_MEMCG 579 struct mem_cgroup *memcg, *old_memcg; 580 581 memcg = bpf_map_get_memcg(map); 582 old_memcg = set_active_memcg(memcg); 583 #endif 584 for (i = 0; i < nr_pages; i++) { 585 pg = alloc_pages_node(nid, gfp | __GFP_ACCOUNT, 0); 586 587 if (pg) { 588 pages[i] = pg; 589 continue; 590 } 591 for (j = 0; j < i; j++) 592 __free_page(pages[j]); 593 ret = -ENOMEM; 594 break; 595 } 596 597 #ifdef CONFIG_MEMCG 598 set_active_memcg(old_memcg); 599 mem_cgroup_put(memcg); 600 #endif 601 return ret; 602 } 603 604 605 static int btf_field_cmp(const void *a, const void *b) 606 { 607 const struct btf_field *f1 = a, *f2 = b; 608 609 if (f1->offset < f2->offset) 610 return -1; 611 else if (f1->offset > f2->offset) 612 return 1; 613 return 0; 614 } 615 616 struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset, 617 u32 field_mask) 618 { 619 struct btf_field *field; 620 621 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask)) 622 return NULL; 623 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp); 624 if (!field || !(field->type & field_mask)) 625 return NULL; 626 return field; 627 } 628 629 void btf_record_free(struct btf_record *rec) 630 { 631 int i; 632 633 if (IS_ERR_OR_NULL(rec)) 634 return; 635 for (i = 0; i < rec->cnt; i++) { 636 switch (rec->fields[i].type) { 637 case BPF_KPTR_UNREF: 638 case BPF_KPTR_REF: 639 case BPF_KPTR_PERCPU: 640 case BPF_UPTR: 641 if (rec->fields[i].kptr.module) 642 module_put(rec->fields[i].kptr.module); 643 if (btf_is_kernel(rec->fields[i].kptr.btf)) 644 btf_put(rec->fields[i].kptr.btf); 645 break; 646 case BPF_LIST_HEAD: 647 case BPF_LIST_NODE: 648 case BPF_RB_ROOT: 649 case BPF_RB_NODE: 650 case BPF_SPIN_LOCK: 651 case BPF_TIMER: 652 case BPF_REFCOUNT: 653 case BPF_WORKQUEUE: 654 /* Nothing to release */ 655 break; 656 default: 657 WARN_ON_ONCE(1); 658 continue; 659 } 660 } 661 kfree(rec); 662 } 663 664 void bpf_map_free_record(struct bpf_map *map) 665 { 666 btf_record_free(map->record); 667 map->record = NULL; 668 } 669 670 struct btf_record *btf_record_dup(const struct btf_record *rec) 671 { 672 const struct btf_field *fields; 673 struct btf_record *new_rec; 674 int ret, size, i; 675 676 if (IS_ERR_OR_NULL(rec)) 677 return NULL; 678 size = offsetof(struct btf_record, fields[rec->cnt]); 679 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN); 680 if (!new_rec) 681 return ERR_PTR(-ENOMEM); 682 /* Do a deep copy of the btf_record */ 683 fields = rec->fields; 684 new_rec->cnt = 0; 685 for (i = 0; i < rec->cnt; i++) { 686 switch (fields[i].type) { 687 case BPF_KPTR_UNREF: 688 case BPF_KPTR_REF: 689 case BPF_KPTR_PERCPU: 690 case BPF_UPTR: 691 if (btf_is_kernel(fields[i].kptr.btf)) 692 btf_get(fields[i].kptr.btf); 693 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) { 694 ret = -ENXIO; 695 goto free; 696 } 697 break; 698 case BPF_LIST_HEAD: 699 case BPF_LIST_NODE: 700 case BPF_RB_ROOT: 701 case BPF_RB_NODE: 702 case BPF_SPIN_LOCK: 703 case BPF_TIMER: 704 case BPF_REFCOUNT: 705 case BPF_WORKQUEUE: 706 /* Nothing to acquire */ 707 break; 708 default: 709 ret = -EFAULT; 710 WARN_ON_ONCE(1); 711 goto free; 712 } 713 new_rec->cnt++; 714 } 715 return new_rec; 716 free: 717 btf_record_free(new_rec); 718 return ERR_PTR(ret); 719 } 720 721 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b) 722 { 723 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b); 724 int size; 725 726 if (!a_has_fields && !b_has_fields) 727 return true; 728 if (a_has_fields != b_has_fields) 729 return false; 730 if (rec_a->cnt != rec_b->cnt) 731 return false; 732 size = offsetof(struct btf_record, fields[rec_a->cnt]); 733 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused 734 * members are zeroed out. So memcmp is safe to do without worrying 735 * about padding/unused fields. 736 * 737 * While spin_lock, timer, and kptr have no relation to map BTF, 738 * list_head metadata is specific to map BTF, the btf and value_rec 739 * members in particular. btf is the map BTF, while value_rec points to 740 * btf_record in that map BTF. 741 * 742 * So while by default, we don't rely on the map BTF (which the records 743 * were parsed from) matching for both records, which is not backwards 744 * compatible, in case list_head is part of it, we implicitly rely on 745 * that by way of depending on memcmp succeeding for it. 746 */ 747 return !memcmp(rec_a, rec_b, size); 748 } 749 750 void bpf_obj_free_timer(const struct btf_record *rec, void *obj) 751 { 752 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER))) 753 return; 754 bpf_timer_cancel_and_free(obj + rec->timer_off); 755 } 756 757 void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj) 758 { 759 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_WORKQUEUE))) 760 return; 761 bpf_wq_cancel_and_free(obj + rec->wq_off); 762 } 763 764 void bpf_obj_free_fields(const struct btf_record *rec, void *obj) 765 { 766 const struct btf_field *fields; 767 int i; 768 769 if (IS_ERR_OR_NULL(rec)) 770 return; 771 fields = rec->fields; 772 for (i = 0; i < rec->cnt; i++) { 773 struct btf_struct_meta *pointee_struct_meta; 774 const struct btf_field *field = &fields[i]; 775 void *field_ptr = obj + field->offset; 776 void *xchgd_field; 777 778 switch (fields[i].type) { 779 case BPF_SPIN_LOCK: 780 break; 781 case BPF_TIMER: 782 bpf_timer_cancel_and_free(field_ptr); 783 break; 784 case BPF_WORKQUEUE: 785 bpf_wq_cancel_and_free(field_ptr); 786 break; 787 case BPF_KPTR_UNREF: 788 WRITE_ONCE(*(u64 *)field_ptr, 0); 789 break; 790 case BPF_KPTR_REF: 791 case BPF_KPTR_PERCPU: 792 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0); 793 if (!xchgd_field) 794 break; 795 796 if (!btf_is_kernel(field->kptr.btf)) { 797 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf, 798 field->kptr.btf_id); 799 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ? 800 pointee_struct_meta->record : NULL, 801 fields[i].type == BPF_KPTR_PERCPU); 802 } else { 803 field->kptr.dtor(xchgd_field); 804 } 805 break; 806 case BPF_UPTR: 807 /* The caller ensured that no one is using the uptr */ 808 unpin_uptr_kaddr(*(void **)field_ptr); 809 break; 810 case BPF_LIST_HEAD: 811 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 812 continue; 813 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); 814 break; 815 case BPF_RB_ROOT: 816 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 817 continue; 818 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); 819 break; 820 case BPF_LIST_NODE: 821 case BPF_RB_NODE: 822 case BPF_REFCOUNT: 823 break; 824 default: 825 WARN_ON_ONCE(1); 826 continue; 827 } 828 } 829 } 830 831 static void bpf_map_free(struct bpf_map *map) 832 { 833 struct btf_record *rec = map->record; 834 struct btf *btf = map->btf; 835 836 /* implementation dependent freeing. Disabling migration to simplify 837 * the free of values or special fields allocated from bpf memory 838 * allocator. 839 */ 840 migrate_disable(); 841 map->ops->map_free(map); 842 migrate_enable(); 843 844 /* Delay freeing of btf_record for maps, as map_free 845 * callback usually needs access to them. It is better to do it here 846 * than require each callback to do the free itself manually. 847 * 848 * Note that the btf_record stashed in map->inner_map_meta->record was 849 * already freed using the map_free callback for map in map case which 850 * eventually calls bpf_map_free_meta, since inner_map_meta is only a 851 * template bpf_map struct used during verification. 852 */ 853 btf_record_free(rec); 854 /* Delay freeing of btf for maps, as map_free callback may need 855 * struct_meta info which will be freed with btf_put(). 856 */ 857 btf_put(btf); 858 } 859 860 /* called from workqueue */ 861 static void bpf_map_free_deferred(struct work_struct *work) 862 { 863 struct bpf_map *map = container_of(work, struct bpf_map, work); 864 865 security_bpf_map_free(map); 866 bpf_map_release_memcg(map); 867 bpf_map_free(map); 868 } 869 870 static void bpf_map_put_uref(struct bpf_map *map) 871 { 872 if (atomic64_dec_and_test(&map->usercnt)) { 873 if (map->ops->map_release_uref) 874 map->ops->map_release_uref(map); 875 } 876 } 877 878 static void bpf_map_free_in_work(struct bpf_map *map) 879 { 880 INIT_WORK(&map->work, bpf_map_free_deferred); 881 /* Avoid spawning kworkers, since they all might contend 882 * for the same mutex like slab_mutex. 883 */ 884 queue_work(system_unbound_wq, &map->work); 885 } 886 887 static void bpf_map_free_rcu_gp(struct rcu_head *rcu) 888 { 889 bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu)); 890 } 891 892 static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu) 893 { 894 if (rcu_trace_implies_rcu_gp()) 895 bpf_map_free_rcu_gp(rcu); 896 else 897 call_rcu(rcu, bpf_map_free_rcu_gp); 898 } 899 900 /* decrement map refcnt and schedule it for freeing via workqueue 901 * (underlying map implementation ops->map_free() might sleep) 902 */ 903 void bpf_map_put(struct bpf_map *map) 904 { 905 if (atomic64_dec_and_test(&map->refcnt)) { 906 /* bpf_map_free_id() must be called first */ 907 bpf_map_free_id(map); 908 909 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt)); 910 if (READ_ONCE(map->free_after_mult_rcu_gp)) 911 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); 912 else if (READ_ONCE(map->free_after_rcu_gp)) 913 call_rcu(&map->rcu, bpf_map_free_rcu_gp); 914 else 915 bpf_map_free_in_work(map); 916 } 917 } 918 EXPORT_SYMBOL_GPL(bpf_map_put); 919 920 void bpf_map_put_with_uref(struct bpf_map *map) 921 { 922 bpf_map_put_uref(map); 923 bpf_map_put(map); 924 } 925 926 static int bpf_map_release(struct inode *inode, struct file *filp) 927 { 928 struct bpf_map *map = filp->private_data; 929 930 if (map->ops->map_release) 931 map->ops->map_release(map, filp); 932 933 bpf_map_put_with_uref(map); 934 return 0; 935 } 936 937 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 938 { 939 fmode_t mode = fd_file(f)->f_mode; 940 941 /* Our file permissions may have been overridden by global 942 * map permissions facing syscall side. 943 */ 944 if (READ_ONCE(map->frozen)) 945 mode &= ~FMODE_CAN_WRITE; 946 return mode; 947 } 948 949 #ifdef CONFIG_PROC_FS 950 /* Show the memory usage of a bpf map */ 951 static u64 bpf_map_memory_usage(const struct bpf_map *map) 952 { 953 return map->ops->map_mem_usage(map); 954 } 955 956 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 957 { 958 struct bpf_map *map = filp->private_data; 959 u32 type = 0, jited = 0; 960 961 if (map_type_contains_progs(map)) { 962 spin_lock(&map->owner.lock); 963 type = map->owner.type; 964 jited = map->owner.jited; 965 spin_unlock(&map->owner.lock); 966 } 967 968 seq_printf(m, 969 "map_type:\t%u\n" 970 "key_size:\t%u\n" 971 "value_size:\t%u\n" 972 "max_entries:\t%u\n" 973 "map_flags:\t%#x\n" 974 "map_extra:\t%#llx\n" 975 "memlock:\t%llu\n" 976 "map_id:\t%u\n" 977 "frozen:\t%u\n", 978 map->map_type, 979 map->key_size, 980 map->value_size, 981 map->max_entries, 982 map->map_flags, 983 (unsigned long long)map->map_extra, 984 bpf_map_memory_usage(map), 985 map->id, 986 READ_ONCE(map->frozen)); 987 if (type) { 988 seq_printf(m, "owner_prog_type:\t%u\n", type); 989 seq_printf(m, "owner_jited:\t%u\n", jited); 990 } 991 } 992 #endif 993 994 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 995 loff_t *ppos) 996 { 997 /* We need this handler such that alloc_file() enables 998 * f_mode with FMODE_CAN_READ. 999 */ 1000 return -EINVAL; 1001 } 1002 1003 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 1004 size_t siz, loff_t *ppos) 1005 { 1006 /* We need this handler such that alloc_file() enables 1007 * f_mode with FMODE_CAN_WRITE. 1008 */ 1009 return -EINVAL; 1010 } 1011 1012 /* called for any extra memory-mapped regions (except initial) */ 1013 static void bpf_map_mmap_open(struct vm_area_struct *vma) 1014 { 1015 struct bpf_map *map = vma->vm_file->private_data; 1016 1017 if (vma->vm_flags & VM_MAYWRITE) 1018 bpf_map_write_active_inc(map); 1019 } 1020 1021 /* called for all unmapped memory region (including initial) */ 1022 static void bpf_map_mmap_close(struct vm_area_struct *vma) 1023 { 1024 struct bpf_map *map = vma->vm_file->private_data; 1025 1026 if (vma->vm_flags & VM_MAYWRITE) 1027 bpf_map_write_active_dec(map); 1028 } 1029 1030 static const struct vm_operations_struct bpf_map_default_vmops = { 1031 .open = bpf_map_mmap_open, 1032 .close = bpf_map_mmap_close, 1033 }; 1034 1035 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 1036 { 1037 struct bpf_map *map = filp->private_data; 1038 int err; 1039 1040 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) 1041 return -ENOTSUPP; 1042 1043 if (!(vma->vm_flags & VM_SHARED)) 1044 return -EINVAL; 1045 1046 mutex_lock(&map->freeze_mutex); 1047 1048 if (vma->vm_flags & VM_WRITE) { 1049 if (map->frozen) { 1050 err = -EPERM; 1051 goto out; 1052 } 1053 /* map is meant to be read-only, so do not allow mapping as 1054 * writable, because it's possible to leak a writable page 1055 * reference and allows user-space to still modify it after 1056 * freezing, while verifier will assume contents do not change 1057 */ 1058 if (map->map_flags & BPF_F_RDONLY_PROG) { 1059 err = -EACCES; 1060 goto out; 1061 } 1062 } 1063 1064 /* set default open/close callbacks */ 1065 vma->vm_ops = &bpf_map_default_vmops; 1066 vma->vm_private_data = map; 1067 vm_flags_clear(vma, VM_MAYEXEC); 1068 if (!(vma->vm_flags & VM_WRITE)) 1069 /* disallow re-mapping with PROT_WRITE */ 1070 vm_flags_clear(vma, VM_MAYWRITE); 1071 1072 err = map->ops->map_mmap(map, vma); 1073 if (err) 1074 goto out; 1075 1076 if (vma->vm_flags & VM_MAYWRITE) 1077 bpf_map_write_active_inc(map); 1078 out: 1079 mutex_unlock(&map->freeze_mutex); 1080 return err; 1081 } 1082 1083 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts) 1084 { 1085 struct bpf_map *map = filp->private_data; 1086 1087 if (map->ops->map_poll) 1088 return map->ops->map_poll(map, filp, pts); 1089 1090 return EPOLLERR; 1091 } 1092 1093 static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr, 1094 unsigned long len, unsigned long pgoff, 1095 unsigned long flags) 1096 { 1097 struct bpf_map *map = filp->private_data; 1098 1099 if (map->ops->map_get_unmapped_area) 1100 return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags); 1101 #ifdef CONFIG_MMU 1102 return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); 1103 #else 1104 return addr; 1105 #endif 1106 } 1107 1108 const struct file_operations bpf_map_fops = { 1109 #ifdef CONFIG_PROC_FS 1110 .show_fdinfo = bpf_map_show_fdinfo, 1111 #endif 1112 .release = bpf_map_release, 1113 .read = bpf_dummy_read, 1114 .write = bpf_dummy_write, 1115 .mmap = bpf_map_mmap, 1116 .poll = bpf_map_poll, 1117 .get_unmapped_area = bpf_get_unmapped_area, 1118 }; 1119 1120 int bpf_map_new_fd(struct bpf_map *map, int flags) 1121 { 1122 int ret; 1123 1124 ret = security_bpf_map(map, OPEN_FMODE(flags)); 1125 if (ret < 0) 1126 return ret; 1127 1128 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 1129 flags | O_CLOEXEC); 1130 } 1131 1132 int bpf_get_file_flag(int flags) 1133 { 1134 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 1135 return -EINVAL; 1136 if (flags & BPF_F_RDONLY) 1137 return O_RDONLY; 1138 if (flags & BPF_F_WRONLY) 1139 return O_WRONLY; 1140 return O_RDWR; 1141 } 1142 1143 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 1144 #define CHECK_ATTR(CMD) \ 1145 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 1146 sizeof(attr->CMD##_LAST_FIELD), 0, \ 1147 sizeof(*attr) - \ 1148 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 1149 sizeof(attr->CMD##_LAST_FIELD)) != NULL 1150 1151 /* dst and src must have at least "size" number of bytes. 1152 * Return strlen on success and < 0 on error. 1153 */ 1154 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 1155 { 1156 const char *end = src + size; 1157 const char *orig_src = src; 1158 1159 memset(dst, 0, size); 1160 /* Copy all isalnum(), '_' and '.' chars. */ 1161 while (src < end && *src) { 1162 if (!isalnum(*src) && 1163 *src != '_' && *src != '.') 1164 return -EINVAL; 1165 *dst++ = *src++; 1166 } 1167 1168 /* No '\0' found in "size" number of bytes */ 1169 if (src == end) 1170 return -EINVAL; 1171 1172 return src - orig_src; 1173 } 1174 1175 int map_check_no_btf(const struct bpf_map *map, 1176 const struct btf *btf, 1177 const struct btf_type *key_type, 1178 const struct btf_type *value_type) 1179 { 1180 return -ENOTSUPP; 1181 } 1182 1183 static int map_check_btf(struct bpf_map *map, struct bpf_token *token, 1184 const struct btf *btf, u32 btf_key_id, u32 btf_value_id) 1185 { 1186 const struct btf_type *key_type, *value_type; 1187 u32 key_size, value_size; 1188 int ret = 0; 1189 1190 /* Some maps allow key to be unspecified. */ 1191 if (btf_key_id) { 1192 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 1193 if (!key_type || key_size != map->key_size) 1194 return -EINVAL; 1195 } else { 1196 key_type = btf_type_by_id(btf, 0); 1197 if (!map->ops->map_check_btf) 1198 return -EINVAL; 1199 } 1200 1201 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 1202 if (!value_type || value_size != map->value_size) 1203 return -EINVAL; 1204 1205 map->record = btf_parse_fields(btf, value_type, 1206 BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD | 1207 BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE | BPF_UPTR, 1208 map->value_size); 1209 if (!IS_ERR_OR_NULL(map->record)) { 1210 int i; 1211 1212 if (!bpf_token_capable(token, CAP_BPF)) { 1213 ret = -EPERM; 1214 goto free_map_tab; 1215 } 1216 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { 1217 ret = -EACCES; 1218 goto free_map_tab; 1219 } 1220 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) { 1221 switch (map->record->field_mask & (1 << i)) { 1222 case 0: 1223 continue; 1224 case BPF_SPIN_LOCK: 1225 if (map->map_type != BPF_MAP_TYPE_HASH && 1226 map->map_type != BPF_MAP_TYPE_ARRAY && 1227 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 1228 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1229 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1230 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1231 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1232 ret = -EOPNOTSUPP; 1233 goto free_map_tab; 1234 } 1235 break; 1236 case BPF_TIMER: 1237 case BPF_WORKQUEUE: 1238 if (map->map_type != BPF_MAP_TYPE_HASH && 1239 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1240 map->map_type != BPF_MAP_TYPE_ARRAY) { 1241 ret = -EOPNOTSUPP; 1242 goto free_map_tab; 1243 } 1244 break; 1245 case BPF_KPTR_UNREF: 1246 case BPF_KPTR_REF: 1247 case BPF_KPTR_PERCPU: 1248 case BPF_REFCOUNT: 1249 if (map->map_type != BPF_MAP_TYPE_HASH && 1250 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 1251 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1252 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH && 1253 map->map_type != BPF_MAP_TYPE_ARRAY && 1254 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && 1255 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1256 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1257 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1258 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1259 ret = -EOPNOTSUPP; 1260 goto free_map_tab; 1261 } 1262 break; 1263 case BPF_UPTR: 1264 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) { 1265 ret = -EOPNOTSUPP; 1266 goto free_map_tab; 1267 } 1268 break; 1269 case BPF_LIST_HEAD: 1270 case BPF_RB_ROOT: 1271 if (map->map_type != BPF_MAP_TYPE_HASH && 1272 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1273 map->map_type != BPF_MAP_TYPE_ARRAY) { 1274 ret = -EOPNOTSUPP; 1275 goto free_map_tab; 1276 } 1277 break; 1278 default: 1279 /* Fail if map_type checks are missing for a field type */ 1280 ret = -EOPNOTSUPP; 1281 goto free_map_tab; 1282 } 1283 } 1284 } 1285 1286 ret = btf_check_and_fixup_fields(btf, map->record); 1287 if (ret < 0) 1288 goto free_map_tab; 1289 1290 if (map->ops->map_check_btf) { 1291 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 1292 if (ret < 0) 1293 goto free_map_tab; 1294 } 1295 1296 return ret; 1297 free_map_tab: 1298 bpf_map_free_record(map); 1299 return ret; 1300 } 1301 1302 static bool bpf_net_capable(void) 1303 { 1304 return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN); 1305 } 1306 1307 #define BPF_MAP_CREATE_LAST_FIELD map_token_fd 1308 /* called via syscall */ 1309 static int map_create(union bpf_attr *attr) 1310 { 1311 const struct bpf_map_ops *ops; 1312 struct bpf_token *token = NULL; 1313 int numa_node = bpf_map_attr_numa_node(attr); 1314 u32 map_type = attr->map_type; 1315 struct bpf_map *map; 1316 bool token_flag; 1317 int f_flags; 1318 int err; 1319 1320 err = CHECK_ATTR(BPF_MAP_CREATE); 1321 if (err) 1322 return -EINVAL; 1323 1324 /* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it 1325 * to avoid per-map type checks tripping on unknown flag 1326 */ 1327 token_flag = attr->map_flags & BPF_F_TOKEN_FD; 1328 attr->map_flags &= ~BPF_F_TOKEN_FD; 1329 1330 if (attr->btf_vmlinux_value_type_id) { 1331 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 1332 attr->btf_key_type_id || attr->btf_value_type_id) 1333 return -EINVAL; 1334 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 1335 return -EINVAL; 1336 } 1337 1338 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && 1339 attr->map_type != BPF_MAP_TYPE_ARENA && 1340 attr->map_extra != 0) 1341 return -EINVAL; 1342 1343 f_flags = bpf_get_file_flag(attr->map_flags); 1344 if (f_flags < 0) 1345 return f_flags; 1346 1347 if (numa_node != NUMA_NO_NODE && 1348 ((unsigned int)numa_node >= nr_node_ids || 1349 !node_online(numa_node))) 1350 return -EINVAL; 1351 1352 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 1353 map_type = attr->map_type; 1354 if (map_type >= ARRAY_SIZE(bpf_map_types)) 1355 return -EINVAL; 1356 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types)); 1357 ops = bpf_map_types[map_type]; 1358 if (!ops) 1359 return -EINVAL; 1360 1361 if (ops->map_alloc_check) { 1362 err = ops->map_alloc_check(attr); 1363 if (err) 1364 return err; 1365 } 1366 if (attr->map_ifindex) 1367 ops = &bpf_map_offload_ops; 1368 if (!ops->map_mem_usage) 1369 return -EINVAL; 1370 1371 if (token_flag) { 1372 token = bpf_token_get_from_fd(attr->map_token_fd); 1373 if (IS_ERR(token)) 1374 return PTR_ERR(token); 1375 1376 /* if current token doesn't grant map creation permissions, 1377 * then we can't use this token, so ignore it and rely on 1378 * system-wide capabilities checks 1379 */ 1380 if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) || 1381 !bpf_token_allow_map_type(token, attr->map_type)) { 1382 bpf_token_put(token); 1383 token = NULL; 1384 } 1385 } 1386 1387 err = -EPERM; 1388 1389 /* Intent here is for unprivileged_bpf_disabled to block BPF map 1390 * creation for unprivileged users; other actions depend 1391 * on fd availability and access to bpffs, so are dependent on 1392 * object creation success. Even with unprivileged BPF disabled, 1393 * capability checks are still carried out. 1394 */ 1395 if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF)) 1396 goto put_token; 1397 1398 /* check privileged map type permissions */ 1399 switch (map_type) { 1400 case BPF_MAP_TYPE_ARRAY: 1401 case BPF_MAP_TYPE_PERCPU_ARRAY: 1402 case BPF_MAP_TYPE_PROG_ARRAY: 1403 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1404 case BPF_MAP_TYPE_CGROUP_ARRAY: 1405 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 1406 case BPF_MAP_TYPE_HASH: 1407 case BPF_MAP_TYPE_PERCPU_HASH: 1408 case BPF_MAP_TYPE_HASH_OF_MAPS: 1409 case BPF_MAP_TYPE_RINGBUF: 1410 case BPF_MAP_TYPE_USER_RINGBUF: 1411 case BPF_MAP_TYPE_CGROUP_STORAGE: 1412 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 1413 /* unprivileged */ 1414 break; 1415 case BPF_MAP_TYPE_SK_STORAGE: 1416 case BPF_MAP_TYPE_INODE_STORAGE: 1417 case BPF_MAP_TYPE_TASK_STORAGE: 1418 case BPF_MAP_TYPE_CGRP_STORAGE: 1419 case BPF_MAP_TYPE_BLOOM_FILTER: 1420 case BPF_MAP_TYPE_LPM_TRIE: 1421 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 1422 case BPF_MAP_TYPE_STACK_TRACE: 1423 case BPF_MAP_TYPE_QUEUE: 1424 case BPF_MAP_TYPE_STACK: 1425 case BPF_MAP_TYPE_LRU_HASH: 1426 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 1427 case BPF_MAP_TYPE_STRUCT_OPS: 1428 case BPF_MAP_TYPE_CPUMAP: 1429 case BPF_MAP_TYPE_ARENA: 1430 if (!bpf_token_capable(token, CAP_BPF)) 1431 goto put_token; 1432 break; 1433 case BPF_MAP_TYPE_SOCKMAP: 1434 case BPF_MAP_TYPE_SOCKHASH: 1435 case BPF_MAP_TYPE_DEVMAP: 1436 case BPF_MAP_TYPE_DEVMAP_HASH: 1437 case BPF_MAP_TYPE_XSKMAP: 1438 if (!bpf_token_capable(token, CAP_NET_ADMIN)) 1439 goto put_token; 1440 break; 1441 default: 1442 WARN(1, "unsupported map type %d", map_type); 1443 goto put_token; 1444 } 1445 1446 map = ops->map_alloc(attr); 1447 if (IS_ERR(map)) { 1448 err = PTR_ERR(map); 1449 goto put_token; 1450 } 1451 map->ops = ops; 1452 map->map_type = map_type; 1453 1454 err = bpf_obj_name_cpy(map->name, attr->map_name, 1455 sizeof(attr->map_name)); 1456 if (err < 0) 1457 goto free_map; 1458 1459 atomic64_set(&map->refcnt, 1); 1460 atomic64_set(&map->usercnt, 1); 1461 mutex_init(&map->freeze_mutex); 1462 spin_lock_init(&map->owner.lock); 1463 1464 if (attr->btf_key_type_id || attr->btf_value_type_id || 1465 /* Even the map's value is a kernel's struct, 1466 * the bpf_prog.o must have BTF to begin with 1467 * to figure out the corresponding kernel's 1468 * counter part. Thus, attr->btf_fd has 1469 * to be valid also. 1470 */ 1471 attr->btf_vmlinux_value_type_id) { 1472 struct btf *btf; 1473 1474 btf = btf_get_by_fd(attr->btf_fd); 1475 if (IS_ERR(btf)) { 1476 err = PTR_ERR(btf); 1477 goto free_map; 1478 } 1479 if (btf_is_kernel(btf)) { 1480 btf_put(btf); 1481 err = -EACCES; 1482 goto free_map; 1483 } 1484 map->btf = btf; 1485 1486 if (attr->btf_value_type_id) { 1487 err = map_check_btf(map, token, btf, attr->btf_key_type_id, 1488 attr->btf_value_type_id); 1489 if (err) 1490 goto free_map; 1491 } 1492 1493 map->btf_key_type_id = attr->btf_key_type_id; 1494 map->btf_value_type_id = attr->btf_value_type_id; 1495 map->btf_vmlinux_value_type_id = 1496 attr->btf_vmlinux_value_type_id; 1497 } 1498 1499 err = security_bpf_map_create(map, attr, token); 1500 if (err) 1501 goto free_map_sec; 1502 1503 err = bpf_map_alloc_id(map); 1504 if (err) 1505 goto free_map_sec; 1506 1507 bpf_map_save_memcg(map); 1508 bpf_token_put(token); 1509 1510 err = bpf_map_new_fd(map, f_flags); 1511 if (err < 0) { 1512 /* failed to allocate fd. 1513 * bpf_map_put_with_uref() is needed because the above 1514 * bpf_map_alloc_id() has published the map 1515 * to the userspace and the userspace may 1516 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 1517 */ 1518 bpf_map_put_with_uref(map); 1519 return err; 1520 } 1521 1522 return err; 1523 1524 free_map_sec: 1525 security_bpf_map_free(map); 1526 free_map: 1527 bpf_map_free(map); 1528 put_token: 1529 bpf_token_put(token); 1530 return err; 1531 } 1532 1533 void bpf_map_inc(struct bpf_map *map) 1534 { 1535 atomic64_inc(&map->refcnt); 1536 } 1537 EXPORT_SYMBOL_GPL(bpf_map_inc); 1538 1539 void bpf_map_inc_with_uref(struct bpf_map *map) 1540 { 1541 atomic64_inc(&map->refcnt); 1542 atomic64_inc(&map->usercnt); 1543 } 1544 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 1545 1546 struct bpf_map *bpf_map_get(u32 ufd) 1547 { 1548 CLASS(fd, f)(ufd); 1549 struct bpf_map *map = __bpf_map_get(f); 1550 1551 if (!IS_ERR(map)) 1552 bpf_map_inc(map); 1553 1554 return map; 1555 } 1556 EXPORT_SYMBOL(bpf_map_get); 1557 1558 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 1559 { 1560 CLASS(fd, f)(ufd); 1561 struct bpf_map *map = __bpf_map_get(f); 1562 1563 if (!IS_ERR(map)) 1564 bpf_map_inc_with_uref(map); 1565 1566 return map; 1567 } 1568 1569 /* map_idr_lock should have been held or the map should have been 1570 * protected by rcu read lock. 1571 */ 1572 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 1573 { 1574 int refold; 1575 1576 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 1577 if (!refold) 1578 return ERR_PTR(-ENOENT); 1579 if (uref) 1580 atomic64_inc(&map->usercnt); 1581 1582 return map; 1583 } 1584 1585 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 1586 { 1587 spin_lock_bh(&map_idr_lock); 1588 map = __bpf_map_inc_not_zero(map, false); 1589 spin_unlock_bh(&map_idr_lock); 1590 1591 return map; 1592 } 1593 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 1594 1595 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 1596 { 1597 return -ENOTSUPP; 1598 } 1599 1600 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 1601 { 1602 if (key_size) 1603 return vmemdup_user(ukey, key_size); 1604 1605 if (ukey) 1606 return ERR_PTR(-EINVAL); 1607 1608 return NULL; 1609 } 1610 1611 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size) 1612 { 1613 if (key_size) 1614 return kvmemdup_bpfptr(ukey, key_size); 1615 1616 if (!bpfptr_is_null(ukey)) 1617 return ERR_PTR(-EINVAL); 1618 1619 return NULL; 1620 } 1621 1622 /* last field in 'union bpf_attr' used by this command */ 1623 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 1624 1625 static int map_lookup_elem(union bpf_attr *attr) 1626 { 1627 void __user *ukey = u64_to_user_ptr(attr->key); 1628 void __user *uvalue = u64_to_user_ptr(attr->value); 1629 struct bpf_map *map; 1630 void *key, *value; 1631 u32 value_size; 1632 int err; 1633 1634 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 1635 return -EINVAL; 1636 1637 if (attr->flags & ~BPF_F_LOCK) 1638 return -EINVAL; 1639 1640 CLASS(fd, f)(attr->map_fd); 1641 map = __bpf_map_get(f); 1642 if (IS_ERR(map)) 1643 return PTR_ERR(map); 1644 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) 1645 return -EPERM; 1646 1647 if ((attr->flags & BPF_F_LOCK) && 1648 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) 1649 return -EINVAL; 1650 1651 key = __bpf_copy_key(ukey, map->key_size); 1652 if (IS_ERR(key)) 1653 return PTR_ERR(key); 1654 1655 value_size = bpf_map_value_size(map); 1656 1657 err = -ENOMEM; 1658 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1659 if (!value) 1660 goto free_key; 1661 1662 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 1663 if (copy_from_user(value, uvalue, value_size)) 1664 err = -EFAULT; 1665 else 1666 err = bpf_map_copy_value(map, key, value, attr->flags); 1667 goto free_value; 1668 } 1669 1670 err = bpf_map_copy_value(map, key, value, attr->flags); 1671 if (err) 1672 goto free_value; 1673 1674 err = -EFAULT; 1675 if (copy_to_user(uvalue, value, value_size) != 0) 1676 goto free_value; 1677 1678 err = 0; 1679 1680 free_value: 1681 kvfree(value); 1682 free_key: 1683 kvfree(key); 1684 return err; 1685 } 1686 1687 1688 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1689 1690 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) 1691 { 1692 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1693 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); 1694 struct bpf_map *map; 1695 void *key, *value; 1696 u32 value_size; 1697 int err; 1698 1699 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1700 return -EINVAL; 1701 1702 CLASS(fd, f)(attr->map_fd); 1703 map = __bpf_map_get(f); 1704 if (IS_ERR(map)) 1705 return PTR_ERR(map); 1706 bpf_map_write_active_inc(map); 1707 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1708 err = -EPERM; 1709 goto err_put; 1710 } 1711 1712 if ((attr->flags & BPF_F_LOCK) && 1713 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1714 err = -EINVAL; 1715 goto err_put; 1716 } 1717 1718 key = ___bpf_copy_key(ukey, map->key_size); 1719 if (IS_ERR(key)) { 1720 err = PTR_ERR(key); 1721 goto err_put; 1722 } 1723 1724 value_size = bpf_map_value_size(map); 1725 value = kvmemdup_bpfptr(uvalue, value_size); 1726 if (IS_ERR(value)) { 1727 err = PTR_ERR(value); 1728 goto free_key; 1729 } 1730 1731 err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags); 1732 if (!err) 1733 maybe_wait_bpf_programs(map); 1734 1735 kvfree(value); 1736 free_key: 1737 kvfree(key); 1738 err_put: 1739 bpf_map_write_active_dec(map); 1740 return err; 1741 } 1742 1743 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1744 1745 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr) 1746 { 1747 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1748 struct bpf_map *map; 1749 void *key; 1750 int err; 1751 1752 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1753 return -EINVAL; 1754 1755 CLASS(fd, f)(attr->map_fd); 1756 map = __bpf_map_get(f); 1757 if (IS_ERR(map)) 1758 return PTR_ERR(map); 1759 bpf_map_write_active_inc(map); 1760 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1761 err = -EPERM; 1762 goto err_put; 1763 } 1764 1765 key = ___bpf_copy_key(ukey, map->key_size); 1766 if (IS_ERR(key)) { 1767 err = PTR_ERR(key); 1768 goto err_put; 1769 } 1770 1771 if (bpf_map_is_offloaded(map)) { 1772 err = bpf_map_offload_delete_elem(map, key); 1773 goto out; 1774 } else if (IS_FD_PROG_ARRAY(map) || 1775 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1776 /* These maps require sleepable context */ 1777 err = map->ops->map_delete_elem(map, key); 1778 goto out; 1779 } 1780 1781 bpf_disable_instrumentation(); 1782 rcu_read_lock(); 1783 err = map->ops->map_delete_elem(map, key); 1784 rcu_read_unlock(); 1785 bpf_enable_instrumentation(); 1786 if (!err) 1787 maybe_wait_bpf_programs(map); 1788 out: 1789 kvfree(key); 1790 err_put: 1791 bpf_map_write_active_dec(map); 1792 return err; 1793 } 1794 1795 /* last field in 'union bpf_attr' used by this command */ 1796 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1797 1798 static int map_get_next_key(union bpf_attr *attr) 1799 { 1800 void __user *ukey = u64_to_user_ptr(attr->key); 1801 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1802 struct bpf_map *map; 1803 void *key, *next_key; 1804 int err; 1805 1806 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1807 return -EINVAL; 1808 1809 CLASS(fd, f)(attr->map_fd); 1810 map = __bpf_map_get(f); 1811 if (IS_ERR(map)) 1812 return PTR_ERR(map); 1813 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) 1814 return -EPERM; 1815 1816 if (ukey) { 1817 key = __bpf_copy_key(ukey, map->key_size); 1818 if (IS_ERR(key)) 1819 return PTR_ERR(key); 1820 } else { 1821 key = NULL; 1822 } 1823 1824 err = -ENOMEM; 1825 next_key = kvmalloc(map->key_size, GFP_USER); 1826 if (!next_key) 1827 goto free_key; 1828 1829 if (bpf_map_is_offloaded(map)) { 1830 err = bpf_map_offload_get_next_key(map, key, next_key); 1831 goto out; 1832 } 1833 1834 rcu_read_lock(); 1835 err = map->ops->map_get_next_key(map, key, next_key); 1836 rcu_read_unlock(); 1837 out: 1838 if (err) 1839 goto free_next_key; 1840 1841 err = -EFAULT; 1842 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1843 goto free_next_key; 1844 1845 err = 0; 1846 1847 free_next_key: 1848 kvfree(next_key); 1849 free_key: 1850 kvfree(key); 1851 return err; 1852 } 1853 1854 int generic_map_delete_batch(struct bpf_map *map, 1855 const union bpf_attr *attr, 1856 union bpf_attr __user *uattr) 1857 { 1858 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1859 u32 cp, max_count; 1860 int err = 0; 1861 void *key; 1862 1863 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1864 return -EINVAL; 1865 1866 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1867 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1868 return -EINVAL; 1869 } 1870 1871 max_count = attr->batch.count; 1872 if (!max_count) 1873 return 0; 1874 1875 if (put_user(0, &uattr->batch.count)) 1876 return -EFAULT; 1877 1878 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1879 if (!key) 1880 return -ENOMEM; 1881 1882 for (cp = 0; cp < max_count; cp++) { 1883 err = -EFAULT; 1884 if (copy_from_user(key, keys + cp * map->key_size, 1885 map->key_size)) 1886 break; 1887 1888 if (bpf_map_is_offloaded(map)) { 1889 err = bpf_map_offload_delete_elem(map, key); 1890 break; 1891 } 1892 1893 bpf_disable_instrumentation(); 1894 rcu_read_lock(); 1895 err = map->ops->map_delete_elem(map, key); 1896 rcu_read_unlock(); 1897 bpf_enable_instrumentation(); 1898 if (err) 1899 break; 1900 cond_resched(); 1901 } 1902 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1903 err = -EFAULT; 1904 1905 kvfree(key); 1906 1907 return err; 1908 } 1909 1910 int generic_map_update_batch(struct bpf_map *map, struct file *map_file, 1911 const union bpf_attr *attr, 1912 union bpf_attr __user *uattr) 1913 { 1914 void __user *values = u64_to_user_ptr(attr->batch.values); 1915 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1916 u32 value_size, cp, max_count; 1917 void *key, *value; 1918 int err = 0; 1919 1920 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1921 return -EINVAL; 1922 1923 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1924 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1925 return -EINVAL; 1926 } 1927 1928 value_size = bpf_map_value_size(map); 1929 1930 max_count = attr->batch.count; 1931 if (!max_count) 1932 return 0; 1933 1934 if (put_user(0, &uattr->batch.count)) 1935 return -EFAULT; 1936 1937 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1938 if (!key) 1939 return -ENOMEM; 1940 1941 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1942 if (!value) { 1943 kvfree(key); 1944 return -ENOMEM; 1945 } 1946 1947 for (cp = 0; cp < max_count; cp++) { 1948 err = -EFAULT; 1949 if (copy_from_user(key, keys + cp * map->key_size, 1950 map->key_size) || 1951 copy_from_user(value, values + cp * value_size, value_size)) 1952 break; 1953 1954 err = bpf_map_update_value(map, map_file, key, value, 1955 attr->batch.elem_flags); 1956 1957 if (err) 1958 break; 1959 cond_resched(); 1960 } 1961 1962 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1963 err = -EFAULT; 1964 1965 kvfree(value); 1966 kvfree(key); 1967 1968 return err; 1969 } 1970 1971 #define MAP_LOOKUP_RETRIES 3 1972 1973 int generic_map_lookup_batch(struct bpf_map *map, 1974 const union bpf_attr *attr, 1975 union bpf_attr __user *uattr) 1976 { 1977 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 1978 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1979 void __user *values = u64_to_user_ptr(attr->batch.values); 1980 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1981 void *buf, *buf_prevkey, *prev_key, *key, *value; 1982 int err, retry = MAP_LOOKUP_RETRIES; 1983 u32 value_size, cp, max_count; 1984 1985 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1986 return -EINVAL; 1987 1988 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1989 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) 1990 return -EINVAL; 1991 1992 value_size = bpf_map_value_size(map); 1993 1994 max_count = attr->batch.count; 1995 if (!max_count) 1996 return 0; 1997 1998 if (put_user(0, &uattr->batch.count)) 1999 return -EFAULT; 2000 2001 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 2002 if (!buf_prevkey) 2003 return -ENOMEM; 2004 2005 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 2006 if (!buf) { 2007 kvfree(buf_prevkey); 2008 return -ENOMEM; 2009 } 2010 2011 err = -EFAULT; 2012 prev_key = NULL; 2013 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 2014 goto free_buf; 2015 key = buf; 2016 value = key + map->key_size; 2017 if (ubatch) 2018 prev_key = buf_prevkey; 2019 2020 for (cp = 0; cp < max_count;) { 2021 rcu_read_lock(); 2022 err = map->ops->map_get_next_key(map, prev_key, key); 2023 rcu_read_unlock(); 2024 if (err) 2025 break; 2026 err = bpf_map_copy_value(map, key, value, 2027 attr->batch.elem_flags); 2028 2029 if (err == -ENOENT) { 2030 if (retry) { 2031 retry--; 2032 continue; 2033 } 2034 err = -EINTR; 2035 break; 2036 } 2037 2038 if (err) 2039 goto free_buf; 2040 2041 if (copy_to_user(keys + cp * map->key_size, key, 2042 map->key_size)) { 2043 err = -EFAULT; 2044 goto free_buf; 2045 } 2046 if (copy_to_user(values + cp * value_size, value, value_size)) { 2047 err = -EFAULT; 2048 goto free_buf; 2049 } 2050 2051 if (!prev_key) 2052 prev_key = buf_prevkey; 2053 2054 swap(prev_key, key); 2055 retry = MAP_LOOKUP_RETRIES; 2056 cp++; 2057 cond_resched(); 2058 } 2059 2060 if (err == -EFAULT) 2061 goto free_buf; 2062 2063 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 2064 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 2065 err = -EFAULT; 2066 2067 free_buf: 2068 kvfree(buf_prevkey); 2069 kvfree(buf); 2070 return err; 2071 } 2072 2073 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags 2074 2075 static int map_lookup_and_delete_elem(union bpf_attr *attr) 2076 { 2077 void __user *ukey = u64_to_user_ptr(attr->key); 2078 void __user *uvalue = u64_to_user_ptr(attr->value); 2079 struct bpf_map *map; 2080 void *key, *value; 2081 u32 value_size; 2082 int err; 2083 2084 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 2085 return -EINVAL; 2086 2087 if (attr->flags & ~BPF_F_LOCK) 2088 return -EINVAL; 2089 2090 CLASS(fd, f)(attr->map_fd); 2091 map = __bpf_map_get(f); 2092 if (IS_ERR(map)) 2093 return PTR_ERR(map); 2094 bpf_map_write_active_inc(map); 2095 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || 2096 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 2097 err = -EPERM; 2098 goto err_put; 2099 } 2100 2101 if (attr->flags && 2102 (map->map_type == BPF_MAP_TYPE_QUEUE || 2103 map->map_type == BPF_MAP_TYPE_STACK)) { 2104 err = -EINVAL; 2105 goto err_put; 2106 } 2107 2108 if ((attr->flags & BPF_F_LOCK) && 2109 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 2110 err = -EINVAL; 2111 goto err_put; 2112 } 2113 2114 key = __bpf_copy_key(ukey, map->key_size); 2115 if (IS_ERR(key)) { 2116 err = PTR_ERR(key); 2117 goto err_put; 2118 } 2119 2120 value_size = bpf_map_value_size(map); 2121 2122 err = -ENOMEM; 2123 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 2124 if (!value) 2125 goto free_key; 2126 2127 err = -ENOTSUPP; 2128 if (map->map_type == BPF_MAP_TYPE_QUEUE || 2129 map->map_type == BPF_MAP_TYPE_STACK) { 2130 err = map->ops->map_pop_elem(map, value); 2131 } else if (map->map_type == BPF_MAP_TYPE_HASH || 2132 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 2133 map->map_type == BPF_MAP_TYPE_LRU_HASH || 2134 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 2135 if (!bpf_map_is_offloaded(map)) { 2136 bpf_disable_instrumentation(); 2137 rcu_read_lock(); 2138 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); 2139 rcu_read_unlock(); 2140 bpf_enable_instrumentation(); 2141 } 2142 } 2143 2144 if (err) 2145 goto free_value; 2146 2147 if (copy_to_user(uvalue, value, value_size) != 0) { 2148 err = -EFAULT; 2149 goto free_value; 2150 } 2151 2152 err = 0; 2153 2154 free_value: 2155 kvfree(value); 2156 free_key: 2157 kvfree(key); 2158 err_put: 2159 bpf_map_write_active_dec(map); 2160 return err; 2161 } 2162 2163 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 2164 2165 static int map_freeze(const union bpf_attr *attr) 2166 { 2167 int err = 0; 2168 struct bpf_map *map; 2169 2170 if (CHECK_ATTR(BPF_MAP_FREEZE)) 2171 return -EINVAL; 2172 2173 CLASS(fd, f)(attr->map_fd); 2174 map = __bpf_map_get(f); 2175 if (IS_ERR(map)) 2176 return PTR_ERR(map); 2177 2178 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) 2179 return -ENOTSUPP; 2180 2181 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) 2182 return -EPERM; 2183 2184 mutex_lock(&map->freeze_mutex); 2185 if (bpf_map_write_active(map)) { 2186 err = -EBUSY; 2187 goto err_put; 2188 } 2189 if (READ_ONCE(map->frozen)) { 2190 err = -EBUSY; 2191 goto err_put; 2192 } 2193 2194 WRITE_ONCE(map->frozen, true); 2195 err_put: 2196 mutex_unlock(&map->freeze_mutex); 2197 return err; 2198 } 2199 2200 static const struct bpf_prog_ops * const bpf_prog_types[] = { 2201 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 2202 [_id] = & _name ## _prog_ops, 2203 #define BPF_MAP_TYPE(_id, _ops) 2204 #define BPF_LINK_TYPE(_id, _name) 2205 #include <linux/bpf_types.h> 2206 #undef BPF_PROG_TYPE 2207 #undef BPF_MAP_TYPE 2208 #undef BPF_LINK_TYPE 2209 }; 2210 2211 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 2212 { 2213 const struct bpf_prog_ops *ops; 2214 2215 if (type >= ARRAY_SIZE(bpf_prog_types)) 2216 return -EINVAL; 2217 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 2218 ops = bpf_prog_types[type]; 2219 if (!ops) 2220 return -EINVAL; 2221 2222 if (!bpf_prog_is_offloaded(prog->aux)) 2223 prog->aux->ops = ops; 2224 else 2225 prog->aux->ops = &bpf_offload_prog_ops; 2226 prog->type = type; 2227 return 0; 2228 } 2229 2230 enum bpf_audit { 2231 BPF_AUDIT_LOAD, 2232 BPF_AUDIT_UNLOAD, 2233 BPF_AUDIT_MAX, 2234 }; 2235 2236 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 2237 [BPF_AUDIT_LOAD] = "LOAD", 2238 [BPF_AUDIT_UNLOAD] = "UNLOAD", 2239 }; 2240 2241 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 2242 { 2243 struct audit_context *ctx = NULL; 2244 struct audit_buffer *ab; 2245 2246 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 2247 return; 2248 if (audit_enabled == AUDIT_OFF) 2249 return; 2250 if (!in_irq() && !irqs_disabled()) 2251 ctx = audit_context(); 2252 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 2253 if (unlikely(!ab)) 2254 return; 2255 audit_log_format(ab, "prog-id=%u op=%s", 2256 prog->aux->id, bpf_audit_str[op]); 2257 audit_log_end(ab); 2258 } 2259 2260 static int bpf_prog_alloc_id(struct bpf_prog *prog) 2261 { 2262 int id; 2263 2264 idr_preload(GFP_KERNEL); 2265 spin_lock_bh(&prog_idr_lock); 2266 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 2267 if (id > 0) 2268 prog->aux->id = id; 2269 spin_unlock_bh(&prog_idr_lock); 2270 idr_preload_end(); 2271 2272 /* id is in [1, INT_MAX) */ 2273 if (WARN_ON_ONCE(!id)) 2274 return -ENOSPC; 2275 2276 return id > 0 ? 0 : id; 2277 } 2278 2279 void bpf_prog_free_id(struct bpf_prog *prog) 2280 { 2281 unsigned long flags; 2282 2283 /* cBPF to eBPF migrations are currently not in the idr store. 2284 * Offloaded programs are removed from the store when their device 2285 * disappears - even if someone grabs an fd to them they are unusable, 2286 * simply waiting for refcnt to drop to be freed. 2287 */ 2288 if (!prog->aux->id) 2289 return; 2290 2291 spin_lock_irqsave(&prog_idr_lock, flags); 2292 idr_remove(&prog_idr, prog->aux->id); 2293 prog->aux->id = 0; 2294 spin_unlock_irqrestore(&prog_idr_lock, flags); 2295 } 2296 2297 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 2298 { 2299 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 2300 2301 kvfree(aux->func_info); 2302 kfree(aux->func_info_aux); 2303 free_uid(aux->user); 2304 security_bpf_prog_free(aux->prog); 2305 bpf_prog_free(aux->prog); 2306 } 2307 2308 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 2309 { 2310 bpf_prog_kallsyms_del_all(prog); 2311 btf_put(prog->aux->btf); 2312 module_put(prog->aux->mod); 2313 kvfree(prog->aux->jited_linfo); 2314 kvfree(prog->aux->linfo); 2315 kfree(prog->aux->kfunc_tab); 2316 if (prog->aux->attach_btf) 2317 btf_put(prog->aux->attach_btf); 2318 2319 if (deferred) { 2320 if (prog->sleepable) 2321 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); 2322 else 2323 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 2324 } else { 2325 __bpf_prog_put_rcu(&prog->aux->rcu); 2326 } 2327 } 2328 2329 static void bpf_prog_put_deferred(struct work_struct *work) 2330 { 2331 struct bpf_prog_aux *aux; 2332 struct bpf_prog *prog; 2333 2334 aux = container_of(work, struct bpf_prog_aux, work); 2335 prog = aux->prog; 2336 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 2337 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 2338 bpf_prog_free_id(prog); 2339 __bpf_prog_put_noref(prog, true); 2340 } 2341 2342 static void __bpf_prog_put(struct bpf_prog *prog) 2343 { 2344 struct bpf_prog_aux *aux = prog->aux; 2345 2346 if (atomic64_dec_and_test(&aux->refcnt)) { 2347 if (in_irq() || irqs_disabled()) { 2348 INIT_WORK(&aux->work, bpf_prog_put_deferred); 2349 schedule_work(&aux->work); 2350 } else { 2351 bpf_prog_put_deferred(&aux->work); 2352 } 2353 } 2354 } 2355 2356 void bpf_prog_put(struct bpf_prog *prog) 2357 { 2358 __bpf_prog_put(prog); 2359 } 2360 EXPORT_SYMBOL_GPL(bpf_prog_put); 2361 2362 static int bpf_prog_release(struct inode *inode, struct file *filp) 2363 { 2364 struct bpf_prog *prog = filp->private_data; 2365 2366 bpf_prog_put(prog); 2367 return 0; 2368 } 2369 2370 struct bpf_prog_kstats { 2371 u64 nsecs; 2372 u64 cnt; 2373 u64 misses; 2374 }; 2375 2376 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog) 2377 { 2378 struct bpf_prog_stats *stats; 2379 unsigned int flags; 2380 2381 stats = this_cpu_ptr(prog->stats); 2382 flags = u64_stats_update_begin_irqsave(&stats->syncp); 2383 u64_stats_inc(&stats->misses); 2384 u64_stats_update_end_irqrestore(&stats->syncp, flags); 2385 } 2386 2387 static void bpf_prog_get_stats(const struct bpf_prog *prog, 2388 struct bpf_prog_kstats *stats) 2389 { 2390 u64 nsecs = 0, cnt = 0, misses = 0; 2391 int cpu; 2392 2393 for_each_possible_cpu(cpu) { 2394 const struct bpf_prog_stats *st; 2395 unsigned int start; 2396 u64 tnsecs, tcnt, tmisses; 2397 2398 st = per_cpu_ptr(prog->stats, cpu); 2399 do { 2400 start = u64_stats_fetch_begin(&st->syncp); 2401 tnsecs = u64_stats_read(&st->nsecs); 2402 tcnt = u64_stats_read(&st->cnt); 2403 tmisses = u64_stats_read(&st->misses); 2404 } while (u64_stats_fetch_retry(&st->syncp, start)); 2405 nsecs += tnsecs; 2406 cnt += tcnt; 2407 misses += tmisses; 2408 } 2409 stats->nsecs = nsecs; 2410 stats->cnt = cnt; 2411 stats->misses = misses; 2412 } 2413 2414 #ifdef CONFIG_PROC_FS 2415 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 2416 { 2417 const struct bpf_prog *prog = filp->private_data; 2418 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2419 struct bpf_prog_kstats stats; 2420 2421 bpf_prog_get_stats(prog, &stats); 2422 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2423 seq_printf(m, 2424 "prog_type:\t%u\n" 2425 "prog_jited:\t%u\n" 2426 "prog_tag:\t%s\n" 2427 "memlock:\t%llu\n" 2428 "prog_id:\t%u\n" 2429 "run_time_ns:\t%llu\n" 2430 "run_cnt:\t%llu\n" 2431 "recursion_misses:\t%llu\n" 2432 "verified_insns:\t%u\n", 2433 prog->type, 2434 prog->jited, 2435 prog_tag, 2436 prog->pages * 1ULL << PAGE_SHIFT, 2437 prog->aux->id, 2438 stats.nsecs, 2439 stats.cnt, 2440 stats.misses, 2441 prog->aux->verified_insns); 2442 } 2443 #endif 2444 2445 const struct file_operations bpf_prog_fops = { 2446 #ifdef CONFIG_PROC_FS 2447 .show_fdinfo = bpf_prog_show_fdinfo, 2448 #endif 2449 .release = bpf_prog_release, 2450 .read = bpf_dummy_read, 2451 .write = bpf_dummy_write, 2452 }; 2453 2454 int bpf_prog_new_fd(struct bpf_prog *prog) 2455 { 2456 int ret; 2457 2458 ret = security_bpf_prog(prog); 2459 if (ret < 0) 2460 return ret; 2461 2462 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 2463 O_RDWR | O_CLOEXEC); 2464 } 2465 2466 void bpf_prog_add(struct bpf_prog *prog, int i) 2467 { 2468 atomic64_add(i, &prog->aux->refcnt); 2469 } 2470 EXPORT_SYMBOL_GPL(bpf_prog_add); 2471 2472 void bpf_prog_sub(struct bpf_prog *prog, int i) 2473 { 2474 /* Only to be used for undoing previous bpf_prog_add() in some 2475 * error path. We still know that another entity in our call 2476 * path holds a reference to the program, thus atomic_sub() can 2477 * be safely used in such cases! 2478 */ 2479 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 2480 } 2481 EXPORT_SYMBOL_GPL(bpf_prog_sub); 2482 2483 void bpf_prog_inc(struct bpf_prog *prog) 2484 { 2485 atomic64_inc(&prog->aux->refcnt); 2486 } 2487 EXPORT_SYMBOL_GPL(bpf_prog_inc); 2488 2489 /* prog_idr_lock should have been held */ 2490 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 2491 { 2492 int refold; 2493 2494 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 2495 2496 if (!refold) 2497 return ERR_PTR(-ENOENT); 2498 2499 return prog; 2500 } 2501 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 2502 2503 bool bpf_prog_get_ok(struct bpf_prog *prog, 2504 enum bpf_prog_type *attach_type, bool attach_drv) 2505 { 2506 /* not an attachment, just a refcount inc, always allow */ 2507 if (!attach_type) 2508 return true; 2509 2510 if (prog->type != *attach_type) 2511 return false; 2512 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv) 2513 return false; 2514 2515 return true; 2516 } 2517 2518 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 2519 bool attach_drv) 2520 { 2521 CLASS(fd, f)(ufd); 2522 struct bpf_prog *prog; 2523 2524 if (fd_empty(f)) 2525 return ERR_PTR(-EBADF); 2526 if (fd_file(f)->f_op != &bpf_prog_fops) 2527 return ERR_PTR(-EINVAL); 2528 2529 prog = fd_file(f)->private_data; 2530 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) 2531 return ERR_PTR(-EINVAL); 2532 2533 bpf_prog_inc(prog); 2534 return prog; 2535 } 2536 2537 struct bpf_prog *bpf_prog_get(u32 ufd) 2538 { 2539 return __bpf_prog_get(ufd, NULL, false); 2540 } 2541 2542 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 2543 bool attach_drv) 2544 { 2545 return __bpf_prog_get(ufd, &type, attach_drv); 2546 } 2547 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 2548 2549 /* Initially all BPF programs could be loaded w/o specifying 2550 * expected_attach_type. Later for some of them specifying expected_attach_type 2551 * at load time became required so that program could be validated properly. 2552 * Programs of types that are allowed to be loaded both w/ and w/o (for 2553 * backward compatibility) expected_attach_type, should have the default attach 2554 * type assigned to expected_attach_type for the latter case, so that it can be 2555 * validated later at attach time. 2556 * 2557 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 2558 * prog type requires it but has some attach types that have to be backward 2559 * compatible. 2560 */ 2561 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 2562 { 2563 switch (attr->prog_type) { 2564 case BPF_PROG_TYPE_CGROUP_SOCK: 2565 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 2566 * exist so checking for non-zero is the way to go here. 2567 */ 2568 if (!attr->expected_attach_type) 2569 attr->expected_attach_type = 2570 BPF_CGROUP_INET_SOCK_CREATE; 2571 break; 2572 case BPF_PROG_TYPE_SK_REUSEPORT: 2573 if (!attr->expected_attach_type) 2574 attr->expected_attach_type = 2575 BPF_SK_REUSEPORT_SELECT; 2576 break; 2577 } 2578 } 2579 2580 static int 2581 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 2582 enum bpf_attach_type expected_attach_type, 2583 struct btf *attach_btf, u32 btf_id, 2584 struct bpf_prog *dst_prog) 2585 { 2586 if (btf_id) { 2587 if (btf_id > BTF_MAX_TYPE) 2588 return -EINVAL; 2589 2590 if (!attach_btf && !dst_prog) 2591 return -EINVAL; 2592 2593 switch (prog_type) { 2594 case BPF_PROG_TYPE_TRACING: 2595 case BPF_PROG_TYPE_LSM: 2596 case BPF_PROG_TYPE_STRUCT_OPS: 2597 case BPF_PROG_TYPE_EXT: 2598 break; 2599 default: 2600 return -EINVAL; 2601 } 2602 } 2603 2604 if (attach_btf && (!btf_id || dst_prog)) 2605 return -EINVAL; 2606 2607 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING && 2608 prog_type != BPF_PROG_TYPE_EXT) 2609 return -EINVAL; 2610 2611 switch (prog_type) { 2612 case BPF_PROG_TYPE_CGROUP_SOCK: 2613 switch (expected_attach_type) { 2614 case BPF_CGROUP_INET_SOCK_CREATE: 2615 case BPF_CGROUP_INET_SOCK_RELEASE: 2616 case BPF_CGROUP_INET4_POST_BIND: 2617 case BPF_CGROUP_INET6_POST_BIND: 2618 return 0; 2619 default: 2620 return -EINVAL; 2621 } 2622 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2623 switch (expected_attach_type) { 2624 case BPF_CGROUP_INET4_BIND: 2625 case BPF_CGROUP_INET6_BIND: 2626 case BPF_CGROUP_INET4_CONNECT: 2627 case BPF_CGROUP_INET6_CONNECT: 2628 case BPF_CGROUP_UNIX_CONNECT: 2629 case BPF_CGROUP_INET4_GETPEERNAME: 2630 case BPF_CGROUP_INET6_GETPEERNAME: 2631 case BPF_CGROUP_UNIX_GETPEERNAME: 2632 case BPF_CGROUP_INET4_GETSOCKNAME: 2633 case BPF_CGROUP_INET6_GETSOCKNAME: 2634 case BPF_CGROUP_UNIX_GETSOCKNAME: 2635 case BPF_CGROUP_UDP4_SENDMSG: 2636 case BPF_CGROUP_UDP6_SENDMSG: 2637 case BPF_CGROUP_UNIX_SENDMSG: 2638 case BPF_CGROUP_UDP4_RECVMSG: 2639 case BPF_CGROUP_UDP6_RECVMSG: 2640 case BPF_CGROUP_UNIX_RECVMSG: 2641 return 0; 2642 default: 2643 return -EINVAL; 2644 } 2645 case BPF_PROG_TYPE_CGROUP_SKB: 2646 switch (expected_attach_type) { 2647 case BPF_CGROUP_INET_INGRESS: 2648 case BPF_CGROUP_INET_EGRESS: 2649 return 0; 2650 default: 2651 return -EINVAL; 2652 } 2653 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2654 switch (expected_attach_type) { 2655 case BPF_CGROUP_SETSOCKOPT: 2656 case BPF_CGROUP_GETSOCKOPT: 2657 return 0; 2658 default: 2659 return -EINVAL; 2660 } 2661 case BPF_PROG_TYPE_SK_LOOKUP: 2662 if (expected_attach_type == BPF_SK_LOOKUP) 2663 return 0; 2664 return -EINVAL; 2665 case BPF_PROG_TYPE_SK_REUSEPORT: 2666 switch (expected_attach_type) { 2667 case BPF_SK_REUSEPORT_SELECT: 2668 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: 2669 return 0; 2670 default: 2671 return -EINVAL; 2672 } 2673 case BPF_PROG_TYPE_NETFILTER: 2674 if (expected_attach_type == BPF_NETFILTER) 2675 return 0; 2676 return -EINVAL; 2677 case BPF_PROG_TYPE_SYSCALL: 2678 case BPF_PROG_TYPE_EXT: 2679 if (expected_attach_type) 2680 return -EINVAL; 2681 fallthrough; 2682 default: 2683 return 0; 2684 } 2685 } 2686 2687 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) 2688 { 2689 switch (prog_type) { 2690 case BPF_PROG_TYPE_SCHED_CLS: 2691 case BPF_PROG_TYPE_SCHED_ACT: 2692 case BPF_PROG_TYPE_XDP: 2693 case BPF_PROG_TYPE_LWT_IN: 2694 case BPF_PROG_TYPE_LWT_OUT: 2695 case BPF_PROG_TYPE_LWT_XMIT: 2696 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2697 case BPF_PROG_TYPE_SK_SKB: 2698 case BPF_PROG_TYPE_SK_MSG: 2699 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2700 case BPF_PROG_TYPE_CGROUP_DEVICE: 2701 case BPF_PROG_TYPE_CGROUP_SOCK: 2702 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2703 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2704 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2705 case BPF_PROG_TYPE_SOCK_OPS: 2706 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2707 case BPF_PROG_TYPE_NETFILTER: 2708 return true; 2709 case BPF_PROG_TYPE_CGROUP_SKB: 2710 /* always unpriv */ 2711 case BPF_PROG_TYPE_SK_REUSEPORT: 2712 /* equivalent to SOCKET_FILTER. need CAP_BPF only */ 2713 default: 2714 return false; 2715 } 2716 } 2717 2718 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) 2719 { 2720 switch (prog_type) { 2721 case BPF_PROG_TYPE_KPROBE: 2722 case BPF_PROG_TYPE_TRACEPOINT: 2723 case BPF_PROG_TYPE_PERF_EVENT: 2724 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2725 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2726 case BPF_PROG_TYPE_TRACING: 2727 case BPF_PROG_TYPE_LSM: 2728 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ 2729 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2730 return true; 2731 default: 2732 return false; 2733 } 2734 } 2735 2736 /* last field in 'union bpf_attr' used by this command */ 2737 #define BPF_PROG_LOAD_LAST_FIELD fd_array_cnt 2738 2739 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) 2740 { 2741 enum bpf_prog_type type = attr->prog_type; 2742 struct bpf_prog *prog, *dst_prog = NULL; 2743 struct btf *attach_btf = NULL; 2744 struct bpf_token *token = NULL; 2745 bool bpf_cap; 2746 int err; 2747 char license[128]; 2748 2749 if (CHECK_ATTR(BPF_PROG_LOAD)) 2750 return -EINVAL; 2751 2752 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2753 BPF_F_ANY_ALIGNMENT | 2754 BPF_F_TEST_STATE_FREQ | 2755 BPF_F_SLEEPABLE | 2756 BPF_F_TEST_RND_HI32 | 2757 BPF_F_XDP_HAS_FRAGS | 2758 BPF_F_XDP_DEV_BOUND_ONLY | 2759 BPF_F_TEST_REG_INVARIANTS | 2760 BPF_F_TOKEN_FD)) 2761 return -EINVAL; 2762 2763 bpf_prog_load_fixup_attach_type(attr); 2764 2765 if (attr->prog_flags & BPF_F_TOKEN_FD) { 2766 token = bpf_token_get_from_fd(attr->prog_token_fd); 2767 if (IS_ERR(token)) 2768 return PTR_ERR(token); 2769 /* if current token doesn't grant prog loading permissions, 2770 * then we can't use this token, so ignore it and rely on 2771 * system-wide capabilities checks 2772 */ 2773 if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) || 2774 !bpf_token_allow_prog_type(token, attr->prog_type, 2775 attr->expected_attach_type)) { 2776 bpf_token_put(token); 2777 token = NULL; 2778 } 2779 } 2780 2781 bpf_cap = bpf_token_capable(token, CAP_BPF); 2782 err = -EPERM; 2783 2784 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2785 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2786 !bpf_cap) 2787 goto put_token; 2788 2789 /* Intent here is for unprivileged_bpf_disabled to block BPF program 2790 * creation for unprivileged users; other actions depend 2791 * on fd availability and access to bpffs, so are dependent on 2792 * object creation success. Even with unprivileged BPF disabled, 2793 * capability checks are still carried out for these 2794 * and other operations. 2795 */ 2796 if (sysctl_unprivileged_bpf_disabled && !bpf_cap) 2797 goto put_token; 2798 2799 if (attr->insn_cnt == 0 || 2800 attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) { 2801 err = -E2BIG; 2802 goto put_token; 2803 } 2804 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2805 type != BPF_PROG_TYPE_CGROUP_SKB && 2806 !bpf_cap) 2807 goto put_token; 2808 2809 if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN)) 2810 goto put_token; 2811 if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON)) 2812 goto put_token; 2813 2814 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog 2815 * or btf, we need to check which one it is 2816 */ 2817 if (attr->attach_prog_fd) { 2818 dst_prog = bpf_prog_get(attr->attach_prog_fd); 2819 if (IS_ERR(dst_prog)) { 2820 dst_prog = NULL; 2821 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); 2822 if (IS_ERR(attach_btf)) { 2823 err = -EINVAL; 2824 goto put_token; 2825 } 2826 if (!btf_is_kernel(attach_btf)) { 2827 /* attaching through specifying bpf_prog's BTF 2828 * objects directly might be supported eventually 2829 */ 2830 btf_put(attach_btf); 2831 err = -ENOTSUPP; 2832 goto put_token; 2833 } 2834 } 2835 } else if (attr->attach_btf_id) { 2836 /* fall back to vmlinux BTF, if BTF type ID is specified */ 2837 attach_btf = bpf_get_btf_vmlinux(); 2838 if (IS_ERR(attach_btf)) { 2839 err = PTR_ERR(attach_btf); 2840 goto put_token; 2841 } 2842 if (!attach_btf) { 2843 err = -EINVAL; 2844 goto put_token; 2845 } 2846 btf_get(attach_btf); 2847 } 2848 2849 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2850 attach_btf, attr->attach_btf_id, 2851 dst_prog)) { 2852 if (dst_prog) 2853 bpf_prog_put(dst_prog); 2854 if (attach_btf) 2855 btf_put(attach_btf); 2856 err = -EINVAL; 2857 goto put_token; 2858 } 2859 2860 /* plain bpf_prog allocation */ 2861 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2862 if (!prog) { 2863 if (dst_prog) 2864 bpf_prog_put(dst_prog); 2865 if (attach_btf) 2866 btf_put(attach_btf); 2867 err = -EINVAL; 2868 goto put_token; 2869 } 2870 2871 prog->expected_attach_type = attr->expected_attach_type; 2872 prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE); 2873 prog->aux->attach_btf = attach_btf; 2874 prog->aux->attach_btf_id = attr->attach_btf_id; 2875 prog->aux->dst_prog = dst_prog; 2876 prog->aux->dev_bound = !!attr->prog_ifindex; 2877 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; 2878 2879 /* move token into prog->aux, reuse taken refcnt */ 2880 prog->aux->token = token; 2881 token = NULL; 2882 2883 prog->aux->user = get_current_user(); 2884 prog->len = attr->insn_cnt; 2885 2886 err = -EFAULT; 2887 if (copy_from_bpfptr(prog->insns, 2888 make_bpfptr(attr->insns, uattr.is_kernel), 2889 bpf_prog_insn_size(prog)) != 0) 2890 goto free_prog; 2891 /* copy eBPF program license from user space */ 2892 if (strncpy_from_bpfptr(license, 2893 make_bpfptr(attr->license, uattr.is_kernel), 2894 sizeof(license) - 1) < 0) 2895 goto free_prog; 2896 license[sizeof(license) - 1] = 0; 2897 2898 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2899 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0; 2900 2901 prog->orig_prog = NULL; 2902 prog->jited = 0; 2903 2904 atomic64_set(&prog->aux->refcnt, 1); 2905 2906 if (bpf_prog_is_dev_bound(prog->aux)) { 2907 err = bpf_prog_dev_bound_init(prog, attr); 2908 if (err) 2909 goto free_prog; 2910 } 2911 2912 if (type == BPF_PROG_TYPE_EXT && dst_prog && 2913 bpf_prog_is_dev_bound(dst_prog->aux)) { 2914 err = bpf_prog_dev_bound_inherit(prog, dst_prog); 2915 if (err) 2916 goto free_prog; 2917 } 2918 2919 /* 2920 * Bookkeeping for managing the program attachment chain. 2921 * 2922 * It might be tempting to set attach_tracing_prog flag at the attachment 2923 * time, but this will not prevent from loading bunch of tracing prog 2924 * first, then attach them one to another. 2925 * 2926 * The flag attach_tracing_prog is set for the whole program lifecycle, and 2927 * doesn't have to be cleared in bpf_tracing_link_release, since tracing 2928 * programs cannot change attachment target. 2929 */ 2930 if (type == BPF_PROG_TYPE_TRACING && dst_prog && 2931 dst_prog->type == BPF_PROG_TYPE_TRACING) { 2932 prog->aux->attach_tracing_prog = true; 2933 } 2934 2935 /* find program type: socket_filter vs tracing_filter */ 2936 err = find_prog_type(type, prog); 2937 if (err < 0) 2938 goto free_prog; 2939 2940 prog->aux->load_time = ktime_get_boottime_ns(); 2941 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2942 sizeof(attr->prog_name)); 2943 if (err < 0) 2944 goto free_prog; 2945 2946 err = security_bpf_prog_load(prog, attr, token); 2947 if (err) 2948 goto free_prog_sec; 2949 2950 /* run eBPF verifier */ 2951 err = bpf_check(&prog, attr, uattr, uattr_size); 2952 if (err < 0) 2953 goto free_used_maps; 2954 2955 prog = bpf_prog_select_runtime(prog, &err); 2956 if (err < 0) 2957 goto free_used_maps; 2958 2959 err = bpf_prog_alloc_id(prog); 2960 if (err) 2961 goto free_used_maps; 2962 2963 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2964 * effectively publicly exposed. However, retrieving via 2965 * bpf_prog_get_fd_by_id() will take another reference, 2966 * therefore it cannot be gone underneath us. 2967 * 2968 * Only for the time /after/ successful bpf_prog_new_fd() 2969 * and before returning to userspace, we might just hold 2970 * one reference and any parallel close on that fd could 2971 * rip everything out. Hence, below notifications must 2972 * happen before bpf_prog_new_fd(). 2973 * 2974 * Also, any failure handling from this point onwards must 2975 * be using bpf_prog_put() given the program is exposed. 2976 */ 2977 bpf_prog_kallsyms_add(prog); 2978 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 2979 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 2980 2981 err = bpf_prog_new_fd(prog); 2982 if (err < 0) 2983 bpf_prog_put(prog); 2984 return err; 2985 2986 free_used_maps: 2987 /* In case we have subprogs, we need to wait for a grace 2988 * period before we can tear down JIT memory since symbols 2989 * are already exposed under kallsyms. 2990 */ 2991 __bpf_prog_put_noref(prog, prog->aux->real_func_cnt); 2992 return err; 2993 2994 free_prog_sec: 2995 security_bpf_prog_free(prog); 2996 free_prog: 2997 free_uid(prog->aux->user); 2998 if (prog->aux->attach_btf) 2999 btf_put(prog->aux->attach_btf); 3000 bpf_prog_free(prog); 3001 put_token: 3002 bpf_token_put(token); 3003 return err; 3004 } 3005 3006 #define BPF_OBJ_LAST_FIELD path_fd 3007 3008 static int bpf_obj_pin(const union bpf_attr *attr) 3009 { 3010 int path_fd; 3011 3012 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD) 3013 return -EINVAL; 3014 3015 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 3016 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 3017 return -EINVAL; 3018 3019 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 3020 return bpf_obj_pin_user(attr->bpf_fd, path_fd, 3021 u64_to_user_ptr(attr->pathname)); 3022 } 3023 3024 static int bpf_obj_get(const union bpf_attr *attr) 3025 { 3026 int path_fd; 3027 3028 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 3029 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD)) 3030 return -EINVAL; 3031 3032 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 3033 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 3034 return -EINVAL; 3035 3036 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 3037 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname), 3038 attr->file_flags); 3039 } 3040 3041 /* bpf_link_init_sleepable() allows to specify whether BPF link itself has 3042 * "sleepable" semantics, which normally would mean that BPF link's attach 3043 * hook can dereference link or link's underlying program for some time after 3044 * detachment due to RCU Tasks Trace-based lifetime protection scheme. 3045 * BPF program itself can be non-sleepable, yet, because it's transitively 3046 * reachable through BPF link, its freeing has to be delayed until after RCU 3047 * Tasks Trace GP. 3048 */ 3049 void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, 3050 const struct bpf_link_ops *ops, struct bpf_prog *prog, 3051 bool sleepable) 3052 { 3053 WARN_ON(ops->dealloc && ops->dealloc_deferred); 3054 atomic64_set(&link->refcnt, 1); 3055 link->type = type; 3056 link->sleepable = sleepable; 3057 link->id = 0; 3058 link->ops = ops; 3059 link->prog = prog; 3060 } 3061 3062 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 3063 const struct bpf_link_ops *ops, struct bpf_prog *prog) 3064 { 3065 bpf_link_init_sleepable(link, type, ops, prog, false); 3066 } 3067 3068 static void bpf_link_free_id(int id) 3069 { 3070 if (!id) 3071 return; 3072 3073 spin_lock_bh(&link_idr_lock); 3074 idr_remove(&link_idr, id); 3075 spin_unlock_bh(&link_idr_lock); 3076 } 3077 3078 /* Clean up bpf_link and corresponding anon_inode file and FD. After 3079 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred 3080 * anon_inode's release() call. This helper marks bpf_link as 3081 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt 3082 * is not decremented, it's the responsibility of a calling code that failed 3083 * to complete bpf_link initialization. 3084 * This helper eventually calls link's dealloc callback, but does not call 3085 * link's release callback. 3086 */ 3087 void bpf_link_cleanup(struct bpf_link_primer *primer) 3088 { 3089 primer->link->prog = NULL; 3090 bpf_link_free_id(primer->id); 3091 fput(primer->file); 3092 put_unused_fd(primer->fd); 3093 } 3094 3095 void bpf_link_inc(struct bpf_link *link) 3096 { 3097 atomic64_inc(&link->refcnt); 3098 } 3099 3100 static void bpf_link_dealloc(struct bpf_link *link) 3101 { 3102 /* now that we know that bpf_link itself can't be reached, put underlying BPF program */ 3103 if (link->prog) 3104 bpf_prog_put(link->prog); 3105 3106 /* free bpf_link and its containing memory */ 3107 if (link->ops->dealloc_deferred) 3108 link->ops->dealloc_deferred(link); 3109 else 3110 link->ops->dealloc(link); 3111 } 3112 3113 static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu) 3114 { 3115 struct bpf_link *link = container_of(rcu, struct bpf_link, rcu); 3116 3117 bpf_link_dealloc(link); 3118 } 3119 3120 static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu) 3121 { 3122 if (rcu_trace_implies_rcu_gp()) 3123 bpf_link_defer_dealloc_rcu_gp(rcu); 3124 else 3125 call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp); 3126 } 3127 3128 /* bpf_link_free is guaranteed to be called from process context */ 3129 static void bpf_link_free(struct bpf_link *link) 3130 { 3131 const struct bpf_link_ops *ops = link->ops; 3132 3133 bpf_link_free_id(link->id); 3134 /* detach BPF program, clean up used resources */ 3135 if (link->prog) 3136 ops->release(link); 3137 if (ops->dealloc_deferred) { 3138 /* Schedule BPF link deallocation, which will only then 3139 * trigger putting BPF program refcount. 3140 * If underlying BPF program is sleepable or BPF link's target 3141 * attach hookpoint is sleepable or otherwise requires RCU GPs 3142 * to ensure link and its underlying BPF program is not 3143 * reachable anymore, we need to first wait for RCU tasks 3144 * trace sync, and then go through "classic" RCU grace period 3145 */ 3146 if (link->sleepable || (link->prog && link->prog->sleepable)) 3147 call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp); 3148 else 3149 call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp); 3150 } else if (ops->dealloc) { 3151 bpf_link_dealloc(link); 3152 } 3153 } 3154 3155 static void bpf_link_put_deferred(struct work_struct *work) 3156 { 3157 struct bpf_link *link = container_of(work, struct bpf_link, work); 3158 3159 bpf_link_free(link); 3160 } 3161 3162 /* bpf_link_put might be called from atomic context. It needs to be called 3163 * from sleepable context in order to acquire sleeping locks during the process. 3164 */ 3165 void bpf_link_put(struct bpf_link *link) 3166 { 3167 if (!atomic64_dec_and_test(&link->refcnt)) 3168 return; 3169 3170 INIT_WORK(&link->work, bpf_link_put_deferred); 3171 schedule_work(&link->work); 3172 } 3173 EXPORT_SYMBOL(bpf_link_put); 3174 3175 static void bpf_link_put_direct(struct bpf_link *link) 3176 { 3177 if (!atomic64_dec_and_test(&link->refcnt)) 3178 return; 3179 bpf_link_free(link); 3180 } 3181 3182 static int bpf_link_release(struct inode *inode, struct file *filp) 3183 { 3184 struct bpf_link *link = filp->private_data; 3185 3186 bpf_link_put_direct(link); 3187 return 0; 3188 } 3189 3190 #ifdef CONFIG_PROC_FS 3191 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 3192 #define BPF_MAP_TYPE(_id, _ops) 3193 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name, 3194 static const char *bpf_link_type_strs[] = { 3195 [BPF_LINK_TYPE_UNSPEC] = "<invalid>", 3196 #include <linux/bpf_types.h> 3197 }; 3198 #undef BPF_PROG_TYPE 3199 #undef BPF_MAP_TYPE 3200 #undef BPF_LINK_TYPE 3201 3202 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) 3203 { 3204 const struct bpf_link *link = filp->private_data; 3205 const struct bpf_prog *prog = link->prog; 3206 enum bpf_link_type type = link->type; 3207 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 3208 3209 if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) { 3210 seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]); 3211 } else { 3212 WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type); 3213 seq_printf(m, "link_type:\t<%u>\n", type); 3214 } 3215 seq_printf(m, "link_id:\t%u\n", link->id); 3216 3217 if (prog) { 3218 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 3219 seq_printf(m, 3220 "prog_tag:\t%s\n" 3221 "prog_id:\t%u\n", 3222 prog_tag, 3223 prog->aux->id); 3224 } 3225 if (link->ops->show_fdinfo) 3226 link->ops->show_fdinfo(link, m); 3227 } 3228 #endif 3229 3230 static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts) 3231 { 3232 struct bpf_link *link = file->private_data; 3233 3234 return link->ops->poll(file, pts); 3235 } 3236 3237 static const struct file_operations bpf_link_fops = { 3238 #ifdef CONFIG_PROC_FS 3239 .show_fdinfo = bpf_link_show_fdinfo, 3240 #endif 3241 .release = bpf_link_release, 3242 .read = bpf_dummy_read, 3243 .write = bpf_dummy_write, 3244 }; 3245 3246 static const struct file_operations bpf_link_fops_poll = { 3247 #ifdef CONFIG_PROC_FS 3248 .show_fdinfo = bpf_link_show_fdinfo, 3249 #endif 3250 .release = bpf_link_release, 3251 .read = bpf_dummy_read, 3252 .write = bpf_dummy_write, 3253 .poll = bpf_link_poll, 3254 }; 3255 3256 static int bpf_link_alloc_id(struct bpf_link *link) 3257 { 3258 int id; 3259 3260 idr_preload(GFP_KERNEL); 3261 spin_lock_bh(&link_idr_lock); 3262 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC); 3263 spin_unlock_bh(&link_idr_lock); 3264 idr_preload_end(); 3265 3266 return id; 3267 } 3268 3269 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file, 3270 * reserving unused FD and allocating ID from link_idr. This is to be paired 3271 * with bpf_link_settle() to install FD and ID and expose bpf_link to 3272 * user-space, if bpf_link is successfully attached. If not, bpf_link and 3273 * pre-allocated resources are to be freed with bpf_cleanup() call. All the 3274 * transient state is passed around in struct bpf_link_primer. 3275 * This is preferred way to create and initialize bpf_link, especially when 3276 * there are complicated and expensive operations in between creating bpf_link 3277 * itself and attaching it to BPF hook. By using bpf_link_prime() and 3278 * bpf_link_settle() kernel code using bpf_link doesn't have to perform 3279 * expensive (and potentially failing) roll back operations in a rare case 3280 * that file, FD, or ID can't be allocated. 3281 */ 3282 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) 3283 { 3284 struct file *file; 3285 int fd, id; 3286 3287 fd = get_unused_fd_flags(O_CLOEXEC); 3288 if (fd < 0) 3289 return fd; 3290 3291 3292 id = bpf_link_alloc_id(link); 3293 if (id < 0) { 3294 put_unused_fd(fd); 3295 return id; 3296 } 3297 3298 file = anon_inode_getfile("bpf_link", 3299 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, 3300 link, O_CLOEXEC); 3301 if (IS_ERR(file)) { 3302 bpf_link_free_id(id); 3303 put_unused_fd(fd); 3304 return PTR_ERR(file); 3305 } 3306 3307 primer->link = link; 3308 primer->file = file; 3309 primer->fd = fd; 3310 primer->id = id; 3311 return 0; 3312 } 3313 3314 int bpf_link_settle(struct bpf_link_primer *primer) 3315 { 3316 /* make bpf_link fetchable by ID */ 3317 spin_lock_bh(&link_idr_lock); 3318 primer->link->id = primer->id; 3319 spin_unlock_bh(&link_idr_lock); 3320 /* make bpf_link fetchable by FD */ 3321 fd_install(primer->fd, primer->file); 3322 /* pass through installed FD */ 3323 return primer->fd; 3324 } 3325 3326 int bpf_link_new_fd(struct bpf_link *link) 3327 { 3328 return anon_inode_getfd("bpf-link", 3329 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, 3330 link, O_CLOEXEC); 3331 } 3332 3333 struct bpf_link *bpf_link_get_from_fd(u32 ufd) 3334 { 3335 CLASS(fd, f)(ufd); 3336 struct bpf_link *link; 3337 3338 if (fd_empty(f)) 3339 return ERR_PTR(-EBADF); 3340 if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll) 3341 return ERR_PTR(-EINVAL); 3342 3343 link = fd_file(f)->private_data; 3344 bpf_link_inc(link); 3345 return link; 3346 } 3347 EXPORT_SYMBOL(bpf_link_get_from_fd); 3348 3349 static void bpf_tracing_link_release(struct bpf_link *link) 3350 { 3351 struct bpf_tracing_link *tr_link = 3352 container_of(link, struct bpf_tracing_link, link.link); 3353 3354 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, 3355 tr_link->trampoline, 3356 tr_link->tgt_prog)); 3357 3358 bpf_trampoline_put(tr_link->trampoline); 3359 3360 /* tgt_prog is NULL if target is a kernel function */ 3361 if (tr_link->tgt_prog) 3362 bpf_prog_put(tr_link->tgt_prog); 3363 } 3364 3365 static void bpf_tracing_link_dealloc(struct bpf_link *link) 3366 { 3367 struct bpf_tracing_link *tr_link = 3368 container_of(link, struct bpf_tracing_link, link.link); 3369 3370 kfree(tr_link); 3371 } 3372 3373 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, 3374 struct seq_file *seq) 3375 { 3376 struct bpf_tracing_link *tr_link = 3377 container_of(link, struct bpf_tracing_link, link.link); 3378 u32 target_btf_id, target_obj_id; 3379 3380 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3381 &target_obj_id, &target_btf_id); 3382 seq_printf(seq, 3383 "attach_type:\t%d\n" 3384 "target_obj_id:\t%u\n" 3385 "target_btf_id:\t%u\n", 3386 tr_link->attach_type, 3387 target_obj_id, 3388 target_btf_id); 3389 } 3390 3391 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, 3392 struct bpf_link_info *info) 3393 { 3394 struct bpf_tracing_link *tr_link = 3395 container_of(link, struct bpf_tracing_link, link.link); 3396 3397 info->tracing.attach_type = tr_link->attach_type; 3398 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3399 &info->tracing.target_obj_id, 3400 &info->tracing.target_btf_id); 3401 3402 return 0; 3403 } 3404 3405 static const struct bpf_link_ops bpf_tracing_link_lops = { 3406 .release = bpf_tracing_link_release, 3407 .dealloc = bpf_tracing_link_dealloc, 3408 .show_fdinfo = bpf_tracing_link_show_fdinfo, 3409 .fill_link_info = bpf_tracing_link_fill_link_info, 3410 }; 3411 3412 static int bpf_tracing_prog_attach(struct bpf_prog *prog, 3413 int tgt_prog_fd, 3414 u32 btf_id, 3415 u64 bpf_cookie) 3416 { 3417 struct bpf_link_primer link_primer; 3418 struct bpf_prog *tgt_prog = NULL; 3419 struct bpf_trampoline *tr = NULL; 3420 struct bpf_tracing_link *link; 3421 u64 key = 0; 3422 int err; 3423 3424 switch (prog->type) { 3425 case BPF_PROG_TYPE_TRACING: 3426 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 3427 prog->expected_attach_type != BPF_TRACE_FEXIT && 3428 prog->expected_attach_type != BPF_MODIFY_RETURN) { 3429 err = -EINVAL; 3430 goto out_put_prog; 3431 } 3432 break; 3433 case BPF_PROG_TYPE_EXT: 3434 if (prog->expected_attach_type != 0) { 3435 err = -EINVAL; 3436 goto out_put_prog; 3437 } 3438 break; 3439 case BPF_PROG_TYPE_LSM: 3440 if (prog->expected_attach_type != BPF_LSM_MAC) { 3441 err = -EINVAL; 3442 goto out_put_prog; 3443 } 3444 break; 3445 default: 3446 err = -EINVAL; 3447 goto out_put_prog; 3448 } 3449 3450 if (!!tgt_prog_fd != !!btf_id) { 3451 err = -EINVAL; 3452 goto out_put_prog; 3453 } 3454 3455 if (tgt_prog_fd) { 3456 /* 3457 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this 3458 * part would be changed to implement the same for 3459 * BPF_PROG_TYPE_TRACING, do not forget to update the way how 3460 * attach_tracing_prog flag is set. 3461 */ 3462 if (prog->type != BPF_PROG_TYPE_EXT) { 3463 err = -EINVAL; 3464 goto out_put_prog; 3465 } 3466 3467 tgt_prog = bpf_prog_get(tgt_prog_fd); 3468 if (IS_ERR(tgt_prog)) { 3469 err = PTR_ERR(tgt_prog); 3470 tgt_prog = NULL; 3471 goto out_put_prog; 3472 } 3473 3474 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id); 3475 } 3476 3477 link = kzalloc(sizeof(*link), GFP_USER); 3478 if (!link) { 3479 err = -ENOMEM; 3480 goto out_put_prog; 3481 } 3482 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, 3483 &bpf_tracing_link_lops, prog); 3484 link->attach_type = prog->expected_attach_type; 3485 link->link.cookie = bpf_cookie; 3486 3487 mutex_lock(&prog->aux->dst_mutex); 3488 3489 /* There are a few possible cases here: 3490 * 3491 * - if prog->aux->dst_trampoline is set, the program was just loaded 3492 * and not yet attached to anything, so we can use the values stored 3493 * in prog->aux 3494 * 3495 * - if prog->aux->dst_trampoline is NULL, the program has already been 3496 * attached to a target and its initial target was cleared (below) 3497 * 3498 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + 3499 * target_btf_id using the link_create API. 3500 * 3501 * - if tgt_prog == NULL when this function was called using the old 3502 * raw_tracepoint_open API, and we need a target from prog->aux 3503 * 3504 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program 3505 * was detached and is going for re-attachment. 3506 * 3507 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf 3508 * are NULL, then program was already attached and user did not provide 3509 * tgt_prog_fd so we have no way to find out or create trampoline 3510 */ 3511 if (!prog->aux->dst_trampoline && !tgt_prog) { 3512 /* 3513 * Allow re-attach for TRACING and LSM programs. If it's 3514 * currently linked, bpf_trampoline_link_prog will fail. 3515 * EXT programs need to specify tgt_prog_fd, so they 3516 * re-attach in separate code path. 3517 */ 3518 if (prog->type != BPF_PROG_TYPE_TRACING && 3519 prog->type != BPF_PROG_TYPE_LSM) { 3520 err = -EINVAL; 3521 goto out_unlock; 3522 } 3523 /* We can allow re-attach only if we have valid attach_btf. */ 3524 if (!prog->aux->attach_btf) { 3525 err = -EINVAL; 3526 goto out_unlock; 3527 } 3528 btf_id = prog->aux->attach_btf_id; 3529 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); 3530 } 3531 3532 if (!prog->aux->dst_trampoline || 3533 (key && key != prog->aux->dst_trampoline->key)) { 3534 /* If there is no saved target, or the specified target is 3535 * different from the destination specified at load time, we 3536 * need a new trampoline and a check for compatibility 3537 */ 3538 struct bpf_attach_target_info tgt_info = {}; 3539 3540 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id, 3541 &tgt_info); 3542 if (err) 3543 goto out_unlock; 3544 3545 if (tgt_info.tgt_mod) { 3546 module_put(prog->aux->mod); 3547 prog->aux->mod = tgt_info.tgt_mod; 3548 } 3549 3550 tr = bpf_trampoline_get(key, &tgt_info); 3551 if (!tr) { 3552 err = -ENOMEM; 3553 goto out_unlock; 3554 } 3555 } else { 3556 /* The caller didn't specify a target, or the target was the 3557 * same as the destination supplied during program load. This 3558 * means we can reuse the trampoline and reference from program 3559 * load time, and there is no need to allocate a new one. This 3560 * can only happen once for any program, as the saved values in 3561 * prog->aux are cleared below. 3562 */ 3563 tr = prog->aux->dst_trampoline; 3564 tgt_prog = prog->aux->dst_prog; 3565 } 3566 3567 err = bpf_link_prime(&link->link.link, &link_primer); 3568 if (err) 3569 goto out_unlock; 3570 3571 err = bpf_trampoline_link_prog(&link->link, tr, tgt_prog); 3572 if (err) { 3573 bpf_link_cleanup(&link_primer); 3574 link = NULL; 3575 goto out_unlock; 3576 } 3577 3578 link->tgt_prog = tgt_prog; 3579 link->trampoline = tr; 3580 3581 /* Always clear the trampoline and target prog from prog->aux to make 3582 * sure the original attach destination is not kept alive after a 3583 * program is (re-)attached to another target. 3584 */ 3585 if (prog->aux->dst_prog && 3586 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) 3587 /* got extra prog ref from syscall, or attaching to different prog */ 3588 bpf_prog_put(prog->aux->dst_prog); 3589 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) 3590 /* we allocated a new trampoline, so free the old one */ 3591 bpf_trampoline_put(prog->aux->dst_trampoline); 3592 3593 prog->aux->dst_prog = NULL; 3594 prog->aux->dst_trampoline = NULL; 3595 mutex_unlock(&prog->aux->dst_mutex); 3596 3597 return bpf_link_settle(&link_primer); 3598 out_unlock: 3599 if (tr && tr != prog->aux->dst_trampoline) 3600 bpf_trampoline_put(tr); 3601 mutex_unlock(&prog->aux->dst_mutex); 3602 kfree(link); 3603 out_put_prog: 3604 if (tgt_prog_fd && tgt_prog) 3605 bpf_prog_put(tgt_prog); 3606 return err; 3607 } 3608 3609 static void bpf_raw_tp_link_release(struct bpf_link *link) 3610 { 3611 struct bpf_raw_tp_link *raw_tp = 3612 container_of(link, struct bpf_raw_tp_link, link); 3613 3614 bpf_probe_unregister(raw_tp->btp, raw_tp); 3615 bpf_put_raw_tracepoint(raw_tp->btp); 3616 } 3617 3618 static void bpf_raw_tp_link_dealloc(struct bpf_link *link) 3619 { 3620 struct bpf_raw_tp_link *raw_tp = 3621 container_of(link, struct bpf_raw_tp_link, link); 3622 3623 kfree(raw_tp); 3624 } 3625 3626 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link, 3627 struct seq_file *seq) 3628 { 3629 struct bpf_raw_tp_link *raw_tp_link = 3630 container_of(link, struct bpf_raw_tp_link, link); 3631 3632 seq_printf(seq, 3633 "tp_name:\t%s\n", 3634 raw_tp_link->btp->tp->name); 3635 } 3636 3637 static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen, 3638 u32 len) 3639 { 3640 if (ulen >= len + 1) { 3641 if (copy_to_user(ubuf, buf, len + 1)) 3642 return -EFAULT; 3643 } else { 3644 char zero = '\0'; 3645 3646 if (copy_to_user(ubuf, buf, ulen - 1)) 3647 return -EFAULT; 3648 if (put_user(zero, ubuf + ulen - 1)) 3649 return -EFAULT; 3650 return -ENOSPC; 3651 } 3652 3653 return 0; 3654 } 3655 3656 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, 3657 struct bpf_link_info *info) 3658 { 3659 struct bpf_raw_tp_link *raw_tp_link = 3660 container_of(link, struct bpf_raw_tp_link, link); 3661 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); 3662 const char *tp_name = raw_tp_link->btp->tp->name; 3663 u32 ulen = info->raw_tracepoint.tp_name_len; 3664 size_t tp_len = strlen(tp_name); 3665 3666 if (!ulen ^ !ubuf) 3667 return -EINVAL; 3668 3669 info->raw_tracepoint.tp_name_len = tp_len + 1; 3670 3671 if (!ubuf) 3672 return 0; 3673 3674 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len); 3675 } 3676 3677 static const struct bpf_link_ops bpf_raw_tp_link_lops = { 3678 .release = bpf_raw_tp_link_release, 3679 .dealloc_deferred = bpf_raw_tp_link_dealloc, 3680 .show_fdinfo = bpf_raw_tp_link_show_fdinfo, 3681 .fill_link_info = bpf_raw_tp_link_fill_link_info, 3682 }; 3683 3684 #ifdef CONFIG_PERF_EVENTS 3685 struct bpf_perf_link { 3686 struct bpf_link link; 3687 struct file *perf_file; 3688 }; 3689 3690 static void bpf_perf_link_release(struct bpf_link *link) 3691 { 3692 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3693 struct perf_event *event = perf_link->perf_file->private_data; 3694 3695 perf_event_free_bpf_prog(event); 3696 fput(perf_link->perf_file); 3697 } 3698 3699 static void bpf_perf_link_dealloc(struct bpf_link *link) 3700 { 3701 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3702 3703 kfree(perf_link); 3704 } 3705 3706 static int bpf_perf_link_fill_common(const struct perf_event *event, 3707 char __user *uname, u32 *ulenp, 3708 u64 *probe_offset, u64 *probe_addr, 3709 u32 *fd_type, unsigned long *missed) 3710 { 3711 const char *buf; 3712 u32 prog_id, ulen; 3713 size_t len; 3714 int err; 3715 3716 ulen = *ulenp; 3717 if (!ulen ^ !uname) 3718 return -EINVAL; 3719 3720 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf, 3721 probe_offset, probe_addr, missed); 3722 if (err) 3723 return err; 3724 3725 if (buf) { 3726 len = strlen(buf); 3727 *ulenp = len + 1; 3728 } else { 3729 *ulenp = 1; 3730 } 3731 if (!uname) 3732 return 0; 3733 3734 if (buf) { 3735 err = bpf_copy_to_user(uname, buf, ulen, len); 3736 if (err) 3737 return err; 3738 } else { 3739 char zero = '\0'; 3740 3741 if (put_user(zero, uname)) 3742 return -EFAULT; 3743 } 3744 return 0; 3745 } 3746 3747 #ifdef CONFIG_KPROBE_EVENTS 3748 static int bpf_perf_link_fill_kprobe(const struct perf_event *event, 3749 struct bpf_link_info *info) 3750 { 3751 unsigned long missed; 3752 char __user *uname; 3753 u64 addr, offset; 3754 u32 ulen, type; 3755 int err; 3756 3757 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name); 3758 ulen = info->perf_event.kprobe.name_len; 3759 err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr, 3760 &type, &missed); 3761 if (err) 3762 return err; 3763 if (type == BPF_FD_TYPE_KRETPROBE) 3764 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE; 3765 else 3766 info->perf_event.type = BPF_PERF_EVENT_KPROBE; 3767 info->perf_event.kprobe.name_len = ulen; 3768 info->perf_event.kprobe.offset = offset; 3769 info->perf_event.kprobe.missed = missed; 3770 if (!kallsyms_show_value(current_cred())) 3771 addr = 0; 3772 info->perf_event.kprobe.addr = addr; 3773 info->perf_event.kprobe.cookie = event->bpf_cookie; 3774 return 0; 3775 } 3776 #endif 3777 3778 #ifdef CONFIG_UPROBE_EVENTS 3779 static int bpf_perf_link_fill_uprobe(const struct perf_event *event, 3780 struct bpf_link_info *info) 3781 { 3782 char __user *uname; 3783 u64 addr, offset; 3784 u32 ulen, type; 3785 int err; 3786 3787 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name); 3788 ulen = info->perf_event.uprobe.name_len; 3789 err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr, 3790 &type, NULL); 3791 if (err) 3792 return err; 3793 3794 if (type == BPF_FD_TYPE_URETPROBE) 3795 info->perf_event.type = BPF_PERF_EVENT_URETPROBE; 3796 else 3797 info->perf_event.type = BPF_PERF_EVENT_UPROBE; 3798 info->perf_event.uprobe.name_len = ulen; 3799 info->perf_event.uprobe.offset = offset; 3800 info->perf_event.uprobe.cookie = event->bpf_cookie; 3801 return 0; 3802 } 3803 #endif 3804 3805 static int bpf_perf_link_fill_probe(const struct perf_event *event, 3806 struct bpf_link_info *info) 3807 { 3808 #ifdef CONFIG_KPROBE_EVENTS 3809 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE) 3810 return bpf_perf_link_fill_kprobe(event, info); 3811 #endif 3812 #ifdef CONFIG_UPROBE_EVENTS 3813 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE) 3814 return bpf_perf_link_fill_uprobe(event, info); 3815 #endif 3816 return -EOPNOTSUPP; 3817 } 3818 3819 static int bpf_perf_link_fill_tracepoint(const struct perf_event *event, 3820 struct bpf_link_info *info) 3821 { 3822 char __user *uname; 3823 u32 ulen; 3824 int err; 3825 3826 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name); 3827 ulen = info->perf_event.tracepoint.name_len; 3828 err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL); 3829 if (err) 3830 return err; 3831 3832 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT; 3833 info->perf_event.tracepoint.name_len = ulen; 3834 info->perf_event.tracepoint.cookie = event->bpf_cookie; 3835 return 0; 3836 } 3837 3838 static int bpf_perf_link_fill_perf_event(const struct perf_event *event, 3839 struct bpf_link_info *info) 3840 { 3841 info->perf_event.event.type = event->attr.type; 3842 info->perf_event.event.config = event->attr.config; 3843 info->perf_event.event.cookie = event->bpf_cookie; 3844 info->perf_event.type = BPF_PERF_EVENT_EVENT; 3845 return 0; 3846 } 3847 3848 static int bpf_perf_link_fill_link_info(const struct bpf_link *link, 3849 struct bpf_link_info *info) 3850 { 3851 struct bpf_perf_link *perf_link; 3852 const struct perf_event *event; 3853 3854 perf_link = container_of(link, struct bpf_perf_link, link); 3855 event = perf_get_event(perf_link->perf_file); 3856 if (IS_ERR(event)) 3857 return PTR_ERR(event); 3858 3859 switch (event->prog->type) { 3860 case BPF_PROG_TYPE_PERF_EVENT: 3861 return bpf_perf_link_fill_perf_event(event, info); 3862 case BPF_PROG_TYPE_TRACEPOINT: 3863 return bpf_perf_link_fill_tracepoint(event, info); 3864 case BPF_PROG_TYPE_KPROBE: 3865 return bpf_perf_link_fill_probe(event, info); 3866 default: 3867 return -EOPNOTSUPP; 3868 } 3869 } 3870 3871 static const struct bpf_link_ops bpf_perf_link_lops = { 3872 .release = bpf_perf_link_release, 3873 .dealloc = bpf_perf_link_dealloc, 3874 .fill_link_info = bpf_perf_link_fill_link_info, 3875 }; 3876 3877 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3878 { 3879 struct bpf_link_primer link_primer; 3880 struct bpf_perf_link *link; 3881 struct perf_event *event; 3882 struct file *perf_file; 3883 int err; 3884 3885 if (attr->link_create.flags) 3886 return -EINVAL; 3887 3888 perf_file = perf_event_get(attr->link_create.target_fd); 3889 if (IS_ERR(perf_file)) 3890 return PTR_ERR(perf_file); 3891 3892 link = kzalloc(sizeof(*link), GFP_USER); 3893 if (!link) { 3894 err = -ENOMEM; 3895 goto out_put_file; 3896 } 3897 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); 3898 link->perf_file = perf_file; 3899 3900 err = bpf_link_prime(&link->link, &link_primer); 3901 if (err) { 3902 kfree(link); 3903 goto out_put_file; 3904 } 3905 3906 event = perf_file->private_data; 3907 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); 3908 if (err) { 3909 bpf_link_cleanup(&link_primer); 3910 goto out_put_file; 3911 } 3912 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */ 3913 bpf_prog_inc(prog); 3914 3915 return bpf_link_settle(&link_primer); 3916 3917 out_put_file: 3918 fput(perf_file); 3919 return err; 3920 } 3921 #else 3922 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3923 { 3924 return -EOPNOTSUPP; 3925 } 3926 #endif /* CONFIG_PERF_EVENTS */ 3927 3928 static int bpf_raw_tp_link_attach(struct bpf_prog *prog, 3929 const char __user *user_tp_name, u64 cookie) 3930 { 3931 struct bpf_link_primer link_primer; 3932 struct bpf_raw_tp_link *link; 3933 struct bpf_raw_event_map *btp; 3934 const char *tp_name; 3935 char buf[128]; 3936 int err; 3937 3938 switch (prog->type) { 3939 case BPF_PROG_TYPE_TRACING: 3940 case BPF_PROG_TYPE_EXT: 3941 case BPF_PROG_TYPE_LSM: 3942 if (user_tp_name) 3943 /* The attach point for this category of programs 3944 * should be specified via btf_id during program load. 3945 */ 3946 return -EINVAL; 3947 if (prog->type == BPF_PROG_TYPE_TRACING && 3948 prog->expected_attach_type == BPF_TRACE_RAW_TP) { 3949 tp_name = prog->aux->attach_func_name; 3950 break; 3951 } 3952 return bpf_tracing_prog_attach(prog, 0, 0, 0); 3953 case BPF_PROG_TYPE_RAW_TRACEPOINT: 3954 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 3955 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) 3956 return -EFAULT; 3957 buf[sizeof(buf) - 1] = 0; 3958 tp_name = buf; 3959 break; 3960 default: 3961 return -EINVAL; 3962 } 3963 3964 btp = bpf_get_raw_tracepoint(tp_name); 3965 if (!btp) 3966 return -ENOENT; 3967 3968 link = kzalloc(sizeof(*link), GFP_USER); 3969 if (!link) { 3970 err = -ENOMEM; 3971 goto out_put_btp; 3972 } 3973 bpf_link_init_sleepable(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, 3974 &bpf_raw_tp_link_lops, prog, 3975 tracepoint_is_faultable(btp->tp)); 3976 link->btp = btp; 3977 link->cookie = cookie; 3978 3979 err = bpf_link_prime(&link->link, &link_primer); 3980 if (err) { 3981 kfree(link); 3982 goto out_put_btp; 3983 } 3984 3985 err = bpf_probe_register(link->btp, link); 3986 if (err) { 3987 bpf_link_cleanup(&link_primer); 3988 goto out_put_btp; 3989 } 3990 3991 return bpf_link_settle(&link_primer); 3992 3993 out_put_btp: 3994 bpf_put_raw_tracepoint(btp); 3995 return err; 3996 } 3997 3998 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.cookie 3999 4000 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 4001 { 4002 struct bpf_prog *prog; 4003 void __user *tp_name; 4004 __u64 cookie; 4005 int fd; 4006 4007 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 4008 return -EINVAL; 4009 4010 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 4011 if (IS_ERR(prog)) 4012 return PTR_ERR(prog); 4013 4014 tp_name = u64_to_user_ptr(attr->raw_tracepoint.name); 4015 cookie = attr->raw_tracepoint.cookie; 4016 fd = bpf_raw_tp_link_attach(prog, tp_name, cookie); 4017 if (fd < 0) 4018 bpf_prog_put(prog); 4019 return fd; 4020 } 4021 4022 static enum bpf_prog_type 4023 attach_type_to_prog_type(enum bpf_attach_type attach_type) 4024 { 4025 switch (attach_type) { 4026 case BPF_CGROUP_INET_INGRESS: 4027 case BPF_CGROUP_INET_EGRESS: 4028 return BPF_PROG_TYPE_CGROUP_SKB; 4029 case BPF_CGROUP_INET_SOCK_CREATE: 4030 case BPF_CGROUP_INET_SOCK_RELEASE: 4031 case BPF_CGROUP_INET4_POST_BIND: 4032 case BPF_CGROUP_INET6_POST_BIND: 4033 return BPF_PROG_TYPE_CGROUP_SOCK; 4034 case BPF_CGROUP_INET4_BIND: 4035 case BPF_CGROUP_INET6_BIND: 4036 case BPF_CGROUP_INET4_CONNECT: 4037 case BPF_CGROUP_INET6_CONNECT: 4038 case BPF_CGROUP_UNIX_CONNECT: 4039 case BPF_CGROUP_INET4_GETPEERNAME: 4040 case BPF_CGROUP_INET6_GETPEERNAME: 4041 case BPF_CGROUP_UNIX_GETPEERNAME: 4042 case BPF_CGROUP_INET4_GETSOCKNAME: 4043 case BPF_CGROUP_INET6_GETSOCKNAME: 4044 case BPF_CGROUP_UNIX_GETSOCKNAME: 4045 case BPF_CGROUP_UDP4_SENDMSG: 4046 case BPF_CGROUP_UDP6_SENDMSG: 4047 case BPF_CGROUP_UNIX_SENDMSG: 4048 case BPF_CGROUP_UDP4_RECVMSG: 4049 case BPF_CGROUP_UDP6_RECVMSG: 4050 case BPF_CGROUP_UNIX_RECVMSG: 4051 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 4052 case BPF_CGROUP_SOCK_OPS: 4053 return BPF_PROG_TYPE_SOCK_OPS; 4054 case BPF_CGROUP_DEVICE: 4055 return BPF_PROG_TYPE_CGROUP_DEVICE; 4056 case BPF_SK_MSG_VERDICT: 4057 return BPF_PROG_TYPE_SK_MSG; 4058 case BPF_SK_SKB_STREAM_PARSER: 4059 case BPF_SK_SKB_STREAM_VERDICT: 4060 case BPF_SK_SKB_VERDICT: 4061 return BPF_PROG_TYPE_SK_SKB; 4062 case BPF_LIRC_MODE2: 4063 return BPF_PROG_TYPE_LIRC_MODE2; 4064 case BPF_FLOW_DISSECTOR: 4065 return BPF_PROG_TYPE_FLOW_DISSECTOR; 4066 case BPF_CGROUP_SYSCTL: 4067 return BPF_PROG_TYPE_CGROUP_SYSCTL; 4068 case BPF_CGROUP_GETSOCKOPT: 4069 case BPF_CGROUP_SETSOCKOPT: 4070 return BPF_PROG_TYPE_CGROUP_SOCKOPT; 4071 case BPF_TRACE_ITER: 4072 case BPF_TRACE_RAW_TP: 4073 case BPF_TRACE_FENTRY: 4074 case BPF_TRACE_FEXIT: 4075 case BPF_MODIFY_RETURN: 4076 return BPF_PROG_TYPE_TRACING; 4077 case BPF_LSM_MAC: 4078 return BPF_PROG_TYPE_LSM; 4079 case BPF_SK_LOOKUP: 4080 return BPF_PROG_TYPE_SK_LOOKUP; 4081 case BPF_XDP: 4082 return BPF_PROG_TYPE_XDP; 4083 case BPF_LSM_CGROUP: 4084 return BPF_PROG_TYPE_LSM; 4085 case BPF_TCX_INGRESS: 4086 case BPF_TCX_EGRESS: 4087 case BPF_NETKIT_PRIMARY: 4088 case BPF_NETKIT_PEER: 4089 return BPF_PROG_TYPE_SCHED_CLS; 4090 default: 4091 return BPF_PROG_TYPE_UNSPEC; 4092 } 4093 } 4094 4095 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 4096 enum bpf_attach_type attach_type) 4097 { 4098 enum bpf_prog_type ptype; 4099 4100 switch (prog->type) { 4101 case BPF_PROG_TYPE_CGROUP_SOCK: 4102 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4103 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4104 case BPF_PROG_TYPE_SK_LOOKUP: 4105 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 4106 case BPF_PROG_TYPE_CGROUP_SKB: 4107 if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN)) 4108 /* cg-skb progs can be loaded by unpriv user. 4109 * check permissions at attach time. 4110 */ 4111 return -EPERM; 4112 4113 ptype = attach_type_to_prog_type(attach_type); 4114 if (prog->type != ptype) 4115 return -EINVAL; 4116 4117 return prog->enforce_expected_attach_type && 4118 prog->expected_attach_type != attach_type ? 4119 -EINVAL : 0; 4120 case BPF_PROG_TYPE_EXT: 4121 return 0; 4122 case BPF_PROG_TYPE_NETFILTER: 4123 if (attach_type != BPF_NETFILTER) 4124 return -EINVAL; 4125 return 0; 4126 case BPF_PROG_TYPE_PERF_EVENT: 4127 case BPF_PROG_TYPE_TRACEPOINT: 4128 if (attach_type != BPF_PERF_EVENT) 4129 return -EINVAL; 4130 return 0; 4131 case BPF_PROG_TYPE_KPROBE: 4132 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && 4133 attach_type != BPF_TRACE_KPROBE_MULTI) 4134 return -EINVAL; 4135 if (prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION && 4136 attach_type != BPF_TRACE_KPROBE_SESSION) 4137 return -EINVAL; 4138 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && 4139 attach_type != BPF_TRACE_UPROBE_MULTI) 4140 return -EINVAL; 4141 if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION && 4142 attach_type != BPF_TRACE_UPROBE_SESSION) 4143 return -EINVAL; 4144 if (attach_type != BPF_PERF_EVENT && 4145 attach_type != BPF_TRACE_KPROBE_MULTI && 4146 attach_type != BPF_TRACE_KPROBE_SESSION && 4147 attach_type != BPF_TRACE_UPROBE_MULTI && 4148 attach_type != BPF_TRACE_UPROBE_SESSION) 4149 return -EINVAL; 4150 return 0; 4151 case BPF_PROG_TYPE_SCHED_CLS: 4152 if (attach_type != BPF_TCX_INGRESS && 4153 attach_type != BPF_TCX_EGRESS && 4154 attach_type != BPF_NETKIT_PRIMARY && 4155 attach_type != BPF_NETKIT_PEER) 4156 return -EINVAL; 4157 return 0; 4158 default: 4159 ptype = attach_type_to_prog_type(attach_type); 4160 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) 4161 return -EINVAL; 4162 return 0; 4163 } 4164 } 4165 4166 #define BPF_PROG_ATTACH_LAST_FIELD expected_revision 4167 4168 #define BPF_F_ATTACH_MASK_BASE \ 4169 (BPF_F_ALLOW_OVERRIDE | \ 4170 BPF_F_ALLOW_MULTI | \ 4171 BPF_F_REPLACE) 4172 4173 #define BPF_F_ATTACH_MASK_MPROG \ 4174 (BPF_F_REPLACE | \ 4175 BPF_F_BEFORE | \ 4176 BPF_F_AFTER | \ 4177 BPF_F_ID | \ 4178 BPF_F_LINK) 4179 4180 static int bpf_prog_attach(const union bpf_attr *attr) 4181 { 4182 enum bpf_prog_type ptype; 4183 struct bpf_prog *prog; 4184 int ret; 4185 4186 if (CHECK_ATTR(BPF_PROG_ATTACH)) 4187 return -EINVAL; 4188 4189 ptype = attach_type_to_prog_type(attr->attach_type); 4190 if (ptype == BPF_PROG_TYPE_UNSPEC) 4191 return -EINVAL; 4192 if (bpf_mprog_supported(ptype)) { 4193 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 4194 return -EINVAL; 4195 } else { 4196 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE) 4197 return -EINVAL; 4198 if (attr->relative_fd || 4199 attr->expected_revision) 4200 return -EINVAL; 4201 } 4202 4203 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 4204 if (IS_ERR(prog)) 4205 return PTR_ERR(prog); 4206 4207 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 4208 bpf_prog_put(prog); 4209 return -EINVAL; 4210 } 4211 4212 switch (ptype) { 4213 case BPF_PROG_TYPE_SK_SKB: 4214 case BPF_PROG_TYPE_SK_MSG: 4215 ret = sock_map_get_from_fd(attr, prog); 4216 break; 4217 case BPF_PROG_TYPE_LIRC_MODE2: 4218 ret = lirc_prog_attach(attr, prog); 4219 break; 4220 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4221 ret = netns_bpf_prog_attach(attr, prog); 4222 break; 4223 case BPF_PROG_TYPE_CGROUP_DEVICE: 4224 case BPF_PROG_TYPE_CGROUP_SKB: 4225 case BPF_PROG_TYPE_CGROUP_SOCK: 4226 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4227 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4228 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4229 case BPF_PROG_TYPE_SOCK_OPS: 4230 case BPF_PROG_TYPE_LSM: 4231 if (ptype == BPF_PROG_TYPE_LSM && 4232 prog->expected_attach_type != BPF_LSM_CGROUP) 4233 ret = -EINVAL; 4234 else 4235 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 4236 break; 4237 case BPF_PROG_TYPE_SCHED_CLS: 4238 if (attr->attach_type == BPF_TCX_INGRESS || 4239 attr->attach_type == BPF_TCX_EGRESS) 4240 ret = tcx_prog_attach(attr, prog); 4241 else 4242 ret = netkit_prog_attach(attr, prog); 4243 break; 4244 default: 4245 ret = -EINVAL; 4246 } 4247 4248 if (ret) 4249 bpf_prog_put(prog); 4250 return ret; 4251 } 4252 4253 #define BPF_PROG_DETACH_LAST_FIELD expected_revision 4254 4255 static int bpf_prog_detach(const union bpf_attr *attr) 4256 { 4257 struct bpf_prog *prog = NULL; 4258 enum bpf_prog_type ptype; 4259 int ret; 4260 4261 if (CHECK_ATTR(BPF_PROG_DETACH)) 4262 return -EINVAL; 4263 4264 ptype = attach_type_to_prog_type(attr->attach_type); 4265 if (bpf_mprog_supported(ptype)) { 4266 if (ptype == BPF_PROG_TYPE_UNSPEC) 4267 return -EINVAL; 4268 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 4269 return -EINVAL; 4270 if (attr->attach_bpf_fd) { 4271 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 4272 if (IS_ERR(prog)) 4273 return PTR_ERR(prog); 4274 } 4275 } else if (attr->attach_flags || 4276 attr->relative_fd || 4277 attr->expected_revision) { 4278 return -EINVAL; 4279 } 4280 4281 switch (ptype) { 4282 case BPF_PROG_TYPE_SK_MSG: 4283 case BPF_PROG_TYPE_SK_SKB: 4284 ret = sock_map_prog_detach(attr, ptype); 4285 break; 4286 case BPF_PROG_TYPE_LIRC_MODE2: 4287 ret = lirc_prog_detach(attr); 4288 break; 4289 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4290 ret = netns_bpf_prog_detach(attr, ptype); 4291 break; 4292 case BPF_PROG_TYPE_CGROUP_DEVICE: 4293 case BPF_PROG_TYPE_CGROUP_SKB: 4294 case BPF_PROG_TYPE_CGROUP_SOCK: 4295 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4296 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4297 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4298 case BPF_PROG_TYPE_SOCK_OPS: 4299 case BPF_PROG_TYPE_LSM: 4300 ret = cgroup_bpf_prog_detach(attr, ptype); 4301 break; 4302 case BPF_PROG_TYPE_SCHED_CLS: 4303 if (attr->attach_type == BPF_TCX_INGRESS || 4304 attr->attach_type == BPF_TCX_EGRESS) 4305 ret = tcx_prog_detach(attr, prog); 4306 else 4307 ret = netkit_prog_detach(attr, prog); 4308 break; 4309 default: 4310 ret = -EINVAL; 4311 } 4312 4313 if (prog) 4314 bpf_prog_put(prog); 4315 return ret; 4316 } 4317 4318 #define BPF_PROG_QUERY_LAST_FIELD query.revision 4319 4320 static int bpf_prog_query(const union bpf_attr *attr, 4321 union bpf_attr __user *uattr) 4322 { 4323 if (!bpf_net_capable()) 4324 return -EPERM; 4325 if (CHECK_ATTR(BPF_PROG_QUERY)) 4326 return -EINVAL; 4327 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 4328 return -EINVAL; 4329 4330 switch (attr->query.attach_type) { 4331 case BPF_CGROUP_INET_INGRESS: 4332 case BPF_CGROUP_INET_EGRESS: 4333 case BPF_CGROUP_INET_SOCK_CREATE: 4334 case BPF_CGROUP_INET_SOCK_RELEASE: 4335 case BPF_CGROUP_INET4_BIND: 4336 case BPF_CGROUP_INET6_BIND: 4337 case BPF_CGROUP_INET4_POST_BIND: 4338 case BPF_CGROUP_INET6_POST_BIND: 4339 case BPF_CGROUP_INET4_CONNECT: 4340 case BPF_CGROUP_INET6_CONNECT: 4341 case BPF_CGROUP_UNIX_CONNECT: 4342 case BPF_CGROUP_INET4_GETPEERNAME: 4343 case BPF_CGROUP_INET6_GETPEERNAME: 4344 case BPF_CGROUP_UNIX_GETPEERNAME: 4345 case BPF_CGROUP_INET4_GETSOCKNAME: 4346 case BPF_CGROUP_INET6_GETSOCKNAME: 4347 case BPF_CGROUP_UNIX_GETSOCKNAME: 4348 case BPF_CGROUP_UDP4_SENDMSG: 4349 case BPF_CGROUP_UDP6_SENDMSG: 4350 case BPF_CGROUP_UNIX_SENDMSG: 4351 case BPF_CGROUP_UDP4_RECVMSG: 4352 case BPF_CGROUP_UDP6_RECVMSG: 4353 case BPF_CGROUP_UNIX_RECVMSG: 4354 case BPF_CGROUP_SOCK_OPS: 4355 case BPF_CGROUP_DEVICE: 4356 case BPF_CGROUP_SYSCTL: 4357 case BPF_CGROUP_GETSOCKOPT: 4358 case BPF_CGROUP_SETSOCKOPT: 4359 case BPF_LSM_CGROUP: 4360 return cgroup_bpf_prog_query(attr, uattr); 4361 case BPF_LIRC_MODE2: 4362 return lirc_prog_query(attr, uattr); 4363 case BPF_FLOW_DISSECTOR: 4364 case BPF_SK_LOOKUP: 4365 return netns_bpf_prog_query(attr, uattr); 4366 case BPF_SK_SKB_STREAM_PARSER: 4367 case BPF_SK_SKB_STREAM_VERDICT: 4368 case BPF_SK_MSG_VERDICT: 4369 case BPF_SK_SKB_VERDICT: 4370 return sock_map_bpf_prog_query(attr, uattr); 4371 case BPF_TCX_INGRESS: 4372 case BPF_TCX_EGRESS: 4373 return tcx_prog_query(attr, uattr); 4374 case BPF_NETKIT_PRIMARY: 4375 case BPF_NETKIT_PEER: 4376 return netkit_prog_query(attr, uattr); 4377 default: 4378 return -EINVAL; 4379 } 4380 } 4381 4382 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size 4383 4384 static int bpf_prog_test_run(const union bpf_attr *attr, 4385 union bpf_attr __user *uattr) 4386 { 4387 struct bpf_prog *prog; 4388 int ret = -ENOTSUPP; 4389 4390 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 4391 return -EINVAL; 4392 4393 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 4394 (!attr->test.ctx_size_in && attr->test.ctx_in)) 4395 return -EINVAL; 4396 4397 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 4398 (!attr->test.ctx_size_out && attr->test.ctx_out)) 4399 return -EINVAL; 4400 4401 prog = bpf_prog_get(attr->test.prog_fd); 4402 if (IS_ERR(prog)) 4403 return PTR_ERR(prog); 4404 4405 if (prog->aux->ops->test_run) 4406 ret = prog->aux->ops->test_run(prog, attr, uattr); 4407 4408 bpf_prog_put(prog); 4409 return ret; 4410 } 4411 4412 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 4413 4414 static int bpf_obj_get_next_id(const union bpf_attr *attr, 4415 union bpf_attr __user *uattr, 4416 struct idr *idr, 4417 spinlock_t *lock) 4418 { 4419 u32 next_id = attr->start_id; 4420 int err = 0; 4421 4422 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 4423 return -EINVAL; 4424 4425 if (!capable(CAP_SYS_ADMIN)) 4426 return -EPERM; 4427 4428 next_id++; 4429 spin_lock_bh(lock); 4430 if (!idr_get_next(idr, &next_id)) 4431 err = -ENOENT; 4432 spin_unlock_bh(lock); 4433 4434 if (!err) 4435 err = put_user(next_id, &uattr->next_id); 4436 4437 return err; 4438 } 4439 4440 struct bpf_map *bpf_map_get_curr_or_next(u32 *id) 4441 { 4442 struct bpf_map *map; 4443 4444 spin_lock_bh(&map_idr_lock); 4445 again: 4446 map = idr_get_next(&map_idr, id); 4447 if (map) { 4448 map = __bpf_map_inc_not_zero(map, false); 4449 if (IS_ERR(map)) { 4450 (*id)++; 4451 goto again; 4452 } 4453 } 4454 spin_unlock_bh(&map_idr_lock); 4455 4456 return map; 4457 } 4458 4459 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) 4460 { 4461 struct bpf_prog *prog; 4462 4463 spin_lock_bh(&prog_idr_lock); 4464 again: 4465 prog = idr_get_next(&prog_idr, id); 4466 if (prog) { 4467 prog = bpf_prog_inc_not_zero(prog); 4468 if (IS_ERR(prog)) { 4469 (*id)++; 4470 goto again; 4471 } 4472 } 4473 spin_unlock_bh(&prog_idr_lock); 4474 4475 return prog; 4476 } 4477 4478 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 4479 4480 struct bpf_prog *bpf_prog_by_id(u32 id) 4481 { 4482 struct bpf_prog *prog; 4483 4484 if (!id) 4485 return ERR_PTR(-ENOENT); 4486 4487 spin_lock_bh(&prog_idr_lock); 4488 prog = idr_find(&prog_idr, id); 4489 if (prog) 4490 prog = bpf_prog_inc_not_zero(prog); 4491 else 4492 prog = ERR_PTR(-ENOENT); 4493 spin_unlock_bh(&prog_idr_lock); 4494 return prog; 4495 } 4496 4497 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 4498 { 4499 struct bpf_prog *prog; 4500 u32 id = attr->prog_id; 4501 int fd; 4502 4503 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 4504 return -EINVAL; 4505 4506 if (!capable(CAP_SYS_ADMIN)) 4507 return -EPERM; 4508 4509 prog = bpf_prog_by_id(id); 4510 if (IS_ERR(prog)) 4511 return PTR_ERR(prog); 4512 4513 fd = bpf_prog_new_fd(prog); 4514 if (fd < 0) 4515 bpf_prog_put(prog); 4516 4517 return fd; 4518 } 4519 4520 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 4521 4522 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 4523 { 4524 struct bpf_map *map; 4525 u32 id = attr->map_id; 4526 int f_flags; 4527 int fd; 4528 4529 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 4530 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 4531 return -EINVAL; 4532 4533 if (!capable(CAP_SYS_ADMIN)) 4534 return -EPERM; 4535 4536 f_flags = bpf_get_file_flag(attr->open_flags); 4537 if (f_flags < 0) 4538 return f_flags; 4539 4540 spin_lock_bh(&map_idr_lock); 4541 map = idr_find(&map_idr, id); 4542 if (map) 4543 map = __bpf_map_inc_not_zero(map, true); 4544 else 4545 map = ERR_PTR(-ENOENT); 4546 spin_unlock_bh(&map_idr_lock); 4547 4548 if (IS_ERR(map)) 4549 return PTR_ERR(map); 4550 4551 fd = bpf_map_new_fd(map, f_flags); 4552 if (fd < 0) 4553 bpf_map_put_with_uref(map); 4554 4555 return fd; 4556 } 4557 4558 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 4559 unsigned long addr, u32 *off, 4560 u32 *type) 4561 { 4562 const struct bpf_map *map; 4563 int i; 4564 4565 mutex_lock(&prog->aux->used_maps_mutex); 4566 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 4567 map = prog->aux->used_maps[i]; 4568 if (map == (void *)addr) { 4569 *type = BPF_PSEUDO_MAP_FD; 4570 goto out; 4571 } 4572 if (!map->ops->map_direct_value_meta) 4573 continue; 4574 if (!map->ops->map_direct_value_meta(map, addr, off)) { 4575 *type = BPF_PSEUDO_MAP_VALUE; 4576 goto out; 4577 } 4578 } 4579 map = NULL; 4580 4581 out: 4582 mutex_unlock(&prog->aux->used_maps_mutex); 4583 return map; 4584 } 4585 4586 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, 4587 const struct cred *f_cred) 4588 { 4589 const struct bpf_map *map; 4590 struct bpf_insn *insns; 4591 u32 off, type; 4592 u64 imm; 4593 u8 code; 4594 int i; 4595 4596 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 4597 GFP_USER); 4598 if (!insns) 4599 return insns; 4600 4601 for (i = 0; i < prog->len; i++) { 4602 code = insns[i].code; 4603 4604 if (code == (BPF_JMP | BPF_TAIL_CALL)) { 4605 insns[i].code = BPF_JMP | BPF_CALL; 4606 insns[i].imm = BPF_FUNC_tail_call; 4607 /* fall-through */ 4608 } 4609 if (code == (BPF_JMP | BPF_CALL) || 4610 code == (BPF_JMP | BPF_CALL_ARGS)) { 4611 if (code == (BPF_JMP | BPF_CALL_ARGS)) 4612 insns[i].code = BPF_JMP | BPF_CALL; 4613 if (!bpf_dump_raw_ok(f_cred)) 4614 insns[i].imm = 0; 4615 continue; 4616 } 4617 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { 4618 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; 4619 continue; 4620 } 4621 4622 if ((BPF_CLASS(code) == BPF_LDX || BPF_CLASS(code) == BPF_STX || 4623 BPF_CLASS(code) == BPF_ST) && BPF_MODE(code) == BPF_PROBE_MEM32) { 4624 insns[i].code = BPF_CLASS(code) | BPF_SIZE(code) | BPF_MEM; 4625 continue; 4626 } 4627 4628 if (code != (BPF_LD | BPF_IMM | BPF_DW)) 4629 continue; 4630 4631 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 4632 map = bpf_map_from_imm(prog, imm, &off, &type); 4633 if (map) { 4634 insns[i].src_reg = type; 4635 insns[i].imm = map->id; 4636 insns[i + 1].imm = off; 4637 continue; 4638 } 4639 } 4640 4641 return insns; 4642 } 4643 4644 static int set_info_rec_size(struct bpf_prog_info *info) 4645 { 4646 /* 4647 * Ensure info.*_rec_size is the same as kernel expected size 4648 * 4649 * or 4650 * 4651 * Only allow zero *_rec_size if both _rec_size and _cnt are 4652 * zero. In this case, the kernel will set the expected 4653 * _rec_size back to the info. 4654 */ 4655 4656 if ((info->nr_func_info || info->func_info_rec_size) && 4657 info->func_info_rec_size != sizeof(struct bpf_func_info)) 4658 return -EINVAL; 4659 4660 if ((info->nr_line_info || info->line_info_rec_size) && 4661 info->line_info_rec_size != sizeof(struct bpf_line_info)) 4662 return -EINVAL; 4663 4664 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 4665 info->jited_line_info_rec_size != sizeof(__u64)) 4666 return -EINVAL; 4667 4668 info->func_info_rec_size = sizeof(struct bpf_func_info); 4669 info->line_info_rec_size = sizeof(struct bpf_line_info); 4670 info->jited_line_info_rec_size = sizeof(__u64); 4671 4672 return 0; 4673 } 4674 4675 static int bpf_prog_get_info_by_fd(struct file *file, 4676 struct bpf_prog *prog, 4677 const union bpf_attr *attr, 4678 union bpf_attr __user *uattr) 4679 { 4680 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4681 struct btf *attach_btf = bpf_prog_get_target_btf(prog); 4682 struct bpf_prog_info info; 4683 u32 info_len = attr->info.info_len; 4684 struct bpf_prog_kstats stats; 4685 char __user *uinsns; 4686 u32 ulen; 4687 int err; 4688 4689 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4690 if (err) 4691 return err; 4692 info_len = min_t(u32, sizeof(info), info_len); 4693 4694 memset(&info, 0, sizeof(info)); 4695 if (copy_from_user(&info, uinfo, info_len)) 4696 return -EFAULT; 4697 4698 info.type = prog->type; 4699 info.id = prog->aux->id; 4700 info.load_time = prog->aux->load_time; 4701 info.created_by_uid = from_kuid_munged(current_user_ns(), 4702 prog->aux->user->uid); 4703 info.gpl_compatible = prog->gpl_compatible; 4704 4705 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 4706 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 4707 4708 mutex_lock(&prog->aux->used_maps_mutex); 4709 ulen = info.nr_map_ids; 4710 info.nr_map_ids = prog->aux->used_map_cnt; 4711 ulen = min_t(u32, info.nr_map_ids, ulen); 4712 if (ulen) { 4713 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 4714 u32 i; 4715 4716 for (i = 0; i < ulen; i++) 4717 if (put_user(prog->aux->used_maps[i]->id, 4718 &user_map_ids[i])) { 4719 mutex_unlock(&prog->aux->used_maps_mutex); 4720 return -EFAULT; 4721 } 4722 } 4723 mutex_unlock(&prog->aux->used_maps_mutex); 4724 4725 err = set_info_rec_size(&info); 4726 if (err) 4727 return err; 4728 4729 bpf_prog_get_stats(prog, &stats); 4730 info.run_time_ns = stats.nsecs; 4731 info.run_cnt = stats.cnt; 4732 info.recursion_misses = stats.misses; 4733 4734 info.verified_insns = prog->aux->verified_insns; 4735 4736 if (!bpf_capable()) { 4737 info.jited_prog_len = 0; 4738 info.xlated_prog_len = 0; 4739 info.nr_jited_ksyms = 0; 4740 info.nr_jited_func_lens = 0; 4741 info.nr_func_info = 0; 4742 info.nr_line_info = 0; 4743 info.nr_jited_line_info = 0; 4744 goto done; 4745 } 4746 4747 ulen = info.xlated_prog_len; 4748 info.xlated_prog_len = bpf_prog_insn_size(prog); 4749 if (info.xlated_prog_len && ulen) { 4750 struct bpf_insn *insns_sanitized; 4751 bool fault; 4752 4753 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { 4754 info.xlated_prog_insns = 0; 4755 goto done; 4756 } 4757 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); 4758 if (!insns_sanitized) 4759 return -ENOMEM; 4760 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 4761 ulen = min_t(u32, info.xlated_prog_len, ulen); 4762 fault = copy_to_user(uinsns, insns_sanitized, ulen); 4763 kfree(insns_sanitized); 4764 if (fault) 4765 return -EFAULT; 4766 } 4767 4768 if (bpf_prog_is_offloaded(prog->aux)) { 4769 err = bpf_prog_offload_info_fill(&info, prog); 4770 if (err) 4771 return err; 4772 goto done; 4773 } 4774 4775 /* NOTE: the following code is supposed to be skipped for offload. 4776 * bpf_prog_offload_info_fill() is the place to fill similar fields 4777 * for offload. 4778 */ 4779 ulen = info.jited_prog_len; 4780 if (prog->aux->func_cnt) { 4781 u32 i; 4782 4783 info.jited_prog_len = 0; 4784 for (i = 0; i < prog->aux->func_cnt; i++) 4785 info.jited_prog_len += prog->aux->func[i]->jited_len; 4786 } else { 4787 info.jited_prog_len = prog->jited_len; 4788 } 4789 4790 if (info.jited_prog_len && ulen) { 4791 if (bpf_dump_raw_ok(file->f_cred)) { 4792 uinsns = u64_to_user_ptr(info.jited_prog_insns); 4793 ulen = min_t(u32, info.jited_prog_len, ulen); 4794 4795 /* for multi-function programs, copy the JITed 4796 * instructions for all the functions 4797 */ 4798 if (prog->aux->func_cnt) { 4799 u32 len, free, i; 4800 u8 *img; 4801 4802 free = ulen; 4803 for (i = 0; i < prog->aux->func_cnt; i++) { 4804 len = prog->aux->func[i]->jited_len; 4805 len = min_t(u32, len, free); 4806 img = (u8 *) prog->aux->func[i]->bpf_func; 4807 if (copy_to_user(uinsns, img, len)) 4808 return -EFAULT; 4809 uinsns += len; 4810 free -= len; 4811 if (!free) 4812 break; 4813 } 4814 } else { 4815 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 4816 return -EFAULT; 4817 } 4818 } else { 4819 info.jited_prog_insns = 0; 4820 } 4821 } 4822 4823 ulen = info.nr_jited_ksyms; 4824 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 4825 if (ulen) { 4826 if (bpf_dump_raw_ok(file->f_cred)) { 4827 unsigned long ksym_addr; 4828 u64 __user *user_ksyms; 4829 u32 i; 4830 4831 /* copy the address of the kernel symbol 4832 * corresponding to each function 4833 */ 4834 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 4835 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 4836 if (prog->aux->func_cnt) { 4837 for (i = 0; i < ulen; i++) { 4838 ksym_addr = (unsigned long) 4839 prog->aux->func[i]->bpf_func; 4840 if (put_user((u64) ksym_addr, 4841 &user_ksyms[i])) 4842 return -EFAULT; 4843 } 4844 } else { 4845 ksym_addr = (unsigned long) prog->bpf_func; 4846 if (put_user((u64) ksym_addr, &user_ksyms[0])) 4847 return -EFAULT; 4848 } 4849 } else { 4850 info.jited_ksyms = 0; 4851 } 4852 } 4853 4854 ulen = info.nr_jited_func_lens; 4855 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 4856 if (ulen) { 4857 if (bpf_dump_raw_ok(file->f_cred)) { 4858 u32 __user *user_lens; 4859 u32 func_len, i; 4860 4861 /* copy the JITed image lengths for each function */ 4862 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 4863 user_lens = u64_to_user_ptr(info.jited_func_lens); 4864 if (prog->aux->func_cnt) { 4865 for (i = 0; i < ulen; i++) { 4866 func_len = 4867 prog->aux->func[i]->jited_len; 4868 if (put_user(func_len, &user_lens[i])) 4869 return -EFAULT; 4870 } 4871 } else { 4872 func_len = prog->jited_len; 4873 if (put_user(func_len, &user_lens[0])) 4874 return -EFAULT; 4875 } 4876 } else { 4877 info.jited_func_lens = 0; 4878 } 4879 } 4880 4881 if (prog->aux->btf) 4882 info.btf_id = btf_obj_id(prog->aux->btf); 4883 info.attach_btf_id = prog->aux->attach_btf_id; 4884 if (attach_btf) 4885 info.attach_btf_obj_id = btf_obj_id(attach_btf); 4886 4887 ulen = info.nr_func_info; 4888 info.nr_func_info = prog->aux->func_info_cnt; 4889 if (info.nr_func_info && ulen) { 4890 char __user *user_finfo; 4891 4892 user_finfo = u64_to_user_ptr(info.func_info); 4893 ulen = min_t(u32, info.nr_func_info, ulen); 4894 if (copy_to_user(user_finfo, prog->aux->func_info, 4895 info.func_info_rec_size * ulen)) 4896 return -EFAULT; 4897 } 4898 4899 ulen = info.nr_line_info; 4900 info.nr_line_info = prog->aux->nr_linfo; 4901 if (info.nr_line_info && ulen) { 4902 __u8 __user *user_linfo; 4903 4904 user_linfo = u64_to_user_ptr(info.line_info); 4905 ulen = min_t(u32, info.nr_line_info, ulen); 4906 if (copy_to_user(user_linfo, prog->aux->linfo, 4907 info.line_info_rec_size * ulen)) 4908 return -EFAULT; 4909 } 4910 4911 ulen = info.nr_jited_line_info; 4912 if (prog->aux->jited_linfo) 4913 info.nr_jited_line_info = prog->aux->nr_linfo; 4914 else 4915 info.nr_jited_line_info = 0; 4916 if (info.nr_jited_line_info && ulen) { 4917 if (bpf_dump_raw_ok(file->f_cred)) { 4918 unsigned long line_addr; 4919 __u64 __user *user_linfo; 4920 u32 i; 4921 4922 user_linfo = u64_to_user_ptr(info.jited_line_info); 4923 ulen = min_t(u32, info.nr_jited_line_info, ulen); 4924 for (i = 0; i < ulen; i++) { 4925 line_addr = (unsigned long)prog->aux->jited_linfo[i]; 4926 if (put_user((__u64)line_addr, &user_linfo[i])) 4927 return -EFAULT; 4928 } 4929 } else { 4930 info.jited_line_info = 0; 4931 } 4932 } 4933 4934 ulen = info.nr_prog_tags; 4935 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 4936 if (ulen) { 4937 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 4938 u32 i; 4939 4940 user_prog_tags = u64_to_user_ptr(info.prog_tags); 4941 ulen = min_t(u32, info.nr_prog_tags, ulen); 4942 if (prog->aux->func_cnt) { 4943 for (i = 0; i < ulen; i++) { 4944 if (copy_to_user(user_prog_tags[i], 4945 prog->aux->func[i]->tag, 4946 BPF_TAG_SIZE)) 4947 return -EFAULT; 4948 } 4949 } else { 4950 if (copy_to_user(user_prog_tags[0], 4951 prog->tag, BPF_TAG_SIZE)) 4952 return -EFAULT; 4953 } 4954 } 4955 4956 done: 4957 if (copy_to_user(uinfo, &info, info_len) || 4958 put_user(info_len, &uattr->info.info_len)) 4959 return -EFAULT; 4960 4961 return 0; 4962 } 4963 4964 static int bpf_map_get_info_by_fd(struct file *file, 4965 struct bpf_map *map, 4966 const union bpf_attr *attr, 4967 union bpf_attr __user *uattr) 4968 { 4969 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4970 struct bpf_map_info info; 4971 u32 info_len = attr->info.info_len; 4972 int err; 4973 4974 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4975 if (err) 4976 return err; 4977 info_len = min_t(u32, sizeof(info), info_len); 4978 4979 memset(&info, 0, sizeof(info)); 4980 info.type = map->map_type; 4981 info.id = map->id; 4982 info.key_size = map->key_size; 4983 info.value_size = map->value_size; 4984 info.max_entries = map->max_entries; 4985 info.map_flags = map->map_flags; 4986 info.map_extra = map->map_extra; 4987 memcpy(info.name, map->name, sizeof(map->name)); 4988 4989 if (map->btf) { 4990 info.btf_id = btf_obj_id(map->btf); 4991 info.btf_key_type_id = map->btf_key_type_id; 4992 info.btf_value_type_id = map->btf_value_type_id; 4993 } 4994 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 4995 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) 4996 bpf_map_struct_ops_info_fill(&info, map); 4997 4998 if (bpf_map_is_offloaded(map)) { 4999 err = bpf_map_offload_info_fill(&info, map); 5000 if (err) 5001 return err; 5002 } 5003 5004 if (copy_to_user(uinfo, &info, info_len) || 5005 put_user(info_len, &uattr->info.info_len)) 5006 return -EFAULT; 5007 5008 return 0; 5009 } 5010 5011 static int bpf_btf_get_info_by_fd(struct file *file, 5012 struct btf *btf, 5013 const union bpf_attr *attr, 5014 union bpf_attr __user *uattr) 5015 { 5016 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 5017 u32 info_len = attr->info.info_len; 5018 int err; 5019 5020 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len); 5021 if (err) 5022 return err; 5023 5024 return btf_get_info_by_fd(btf, attr, uattr); 5025 } 5026 5027 static int bpf_link_get_info_by_fd(struct file *file, 5028 struct bpf_link *link, 5029 const union bpf_attr *attr, 5030 union bpf_attr __user *uattr) 5031 { 5032 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); 5033 struct bpf_link_info info; 5034 u32 info_len = attr->info.info_len; 5035 int err; 5036 5037 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 5038 if (err) 5039 return err; 5040 info_len = min_t(u32, sizeof(info), info_len); 5041 5042 memset(&info, 0, sizeof(info)); 5043 if (copy_from_user(&info, uinfo, info_len)) 5044 return -EFAULT; 5045 5046 info.type = link->type; 5047 info.id = link->id; 5048 if (link->prog) 5049 info.prog_id = link->prog->aux->id; 5050 5051 if (link->ops->fill_link_info) { 5052 err = link->ops->fill_link_info(link, &info); 5053 if (err) 5054 return err; 5055 } 5056 5057 if (copy_to_user(uinfo, &info, info_len) || 5058 put_user(info_len, &uattr->info.info_len)) 5059 return -EFAULT; 5060 5061 return 0; 5062 } 5063 5064 5065 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 5066 5067 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 5068 union bpf_attr __user *uattr) 5069 { 5070 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 5071 return -EINVAL; 5072 5073 CLASS(fd, f)(attr->info.bpf_fd); 5074 if (fd_empty(f)) 5075 return -EBADFD; 5076 5077 if (fd_file(f)->f_op == &bpf_prog_fops) 5078 return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, 5079 uattr); 5080 else if (fd_file(f)->f_op == &bpf_map_fops) 5081 return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, 5082 uattr); 5083 else if (fd_file(f)->f_op == &btf_fops) 5084 return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr); 5085 else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll) 5086 return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data, 5087 attr, uattr); 5088 return -EINVAL; 5089 } 5090 5091 #define BPF_BTF_LOAD_LAST_FIELD btf_token_fd 5092 5093 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) 5094 { 5095 struct bpf_token *token = NULL; 5096 5097 if (CHECK_ATTR(BPF_BTF_LOAD)) 5098 return -EINVAL; 5099 5100 if (attr->btf_flags & ~BPF_F_TOKEN_FD) 5101 return -EINVAL; 5102 5103 if (attr->btf_flags & BPF_F_TOKEN_FD) { 5104 token = bpf_token_get_from_fd(attr->btf_token_fd); 5105 if (IS_ERR(token)) 5106 return PTR_ERR(token); 5107 if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) { 5108 bpf_token_put(token); 5109 token = NULL; 5110 } 5111 } 5112 5113 if (!bpf_token_capable(token, CAP_BPF)) { 5114 bpf_token_put(token); 5115 return -EPERM; 5116 } 5117 5118 bpf_token_put(token); 5119 5120 return btf_new_fd(attr, uattr, uattr_size); 5121 } 5122 5123 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 5124 5125 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 5126 { 5127 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 5128 return -EINVAL; 5129 5130 if (!capable(CAP_SYS_ADMIN)) 5131 return -EPERM; 5132 5133 return btf_get_fd_by_id(attr->btf_id); 5134 } 5135 5136 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 5137 union bpf_attr __user *uattr, 5138 u32 prog_id, u32 fd_type, 5139 const char *buf, u64 probe_offset, 5140 u64 probe_addr) 5141 { 5142 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 5143 u32 len = buf ? strlen(buf) : 0, input_len; 5144 int err = 0; 5145 5146 if (put_user(len, &uattr->task_fd_query.buf_len)) 5147 return -EFAULT; 5148 input_len = attr->task_fd_query.buf_len; 5149 if (input_len && ubuf) { 5150 if (!len) { 5151 /* nothing to copy, just make ubuf NULL terminated */ 5152 char zero = '\0'; 5153 5154 if (put_user(zero, ubuf)) 5155 return -EFAULT; 5156 } else if (input_len >= len + 1) { 5157 /* ubuf can hold the string with NULL terminator */ 5158 if (copy_to_user(ubuf, buf, len + 1)) 5159 return -EFAULT; 5160 } else { 5161 /* ubuf cannot hold the string with NULL terminator, 5162 * do a partial copy with NULL terminator. 5163 */ 5164 char zero = '\0'; 5165 5166 err = -ENOSPC; 5167 if (copy_to_user(ubuf, buf, input_len - 1)) 5168 return -EFAULT; 5169 if (put_user(zero, ubuf + input_len - 1)) 5170 return -EFAULT; 5171 } 5172 } 5173 5174 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 5175 put_user(fd_type, &uattr->task_fd_query.fd_type) || 5176 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 5177 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 5178 return -EFAULT; 5179 5180 return err; 5181 } 5182 5183 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 5184 5185 static int bpf_task_fd_query(const union bpf_attr *attr, 5186 union bpf_attr __user *uattr) 5187 { 5188 pid_t pid = attr->task_fd_query.pid; 5189 u32 fd = attr->task_fd_query.fd; 5190 const struct perf_event *event; 5191 struct task_struct *task; 5192 struct file *file; 5193 int err; 5194 5195 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 5196 return -EINVAL; 5197 5198 if (!capable(CAP_SYS_ADMIN)) 5199 return -EPERM; 5200 5201 if (attr->task_fd_query.flags != 0) 5202 return -EINVAL; 5203 5204 rcu_read_lock(); 5205 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 5206 rcu_read_unlock(); 5207 if (!task) 5208 return -ENOENT; 5209 5210 err = 0; 5211 file = fget_task(task, fd); 5212 put_task_struct(task); 5213 if (!file) 5214 return -EBADF; 5215 5216 if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) { 5217 struct bpf_link *link = file->private_data; 5218 5219 if (link->ops == &bpf_raw_tp_link_lops) { 5220 struct bpf_raw_tp_link *raw_tp = 5221 container_of(link, struct bpf_raw_tp_link, link); 5222 struct bpf_raw_event_map *btp = raw_tp->btp; 5223 5224 err = bpf_task_fd_query_copy(attr, uattr, 5225 raw_tp->link.prog->aux->id, 5226 BPF_FD_TYPE_RAW_TRACEPOINT, 5227 btp->tp->name, 0, 0); 5228 goto put_file; 5229 } 5230 goto out_not_supp; 5231 } 5232 5233 event = perf_get_event(file); 5234 if (!IS_ERR(event)) { 5235 u64 probe_offset, probe_addr; 5236 u32 prog_id, fd_type; 5237 const char *buf; 5238 5239 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 5240 &buf, &probe_offset, 5241 &probe_addr, NULL); 5242 if (!err) 5243 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 5244 fd_type, buf, 5245 probe_offset, 5246 probe_addr); 5247 goto put_file; 5248 } 5249 5250 out_not_supp: 5251 err = -ENOTSUPP; 5252 put_file: 5253 fput(file); 5254 return err; 5255 } 5256 5257 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 5258 5259 #define BPF_DO_BATCH(fn, ...) \ 5260 do { \ 5261 if (!fn) { \ 5262 err = -ENOTSUPP; \ 5263 goto err_put; \ 5264 } \ 5265 err = fn(__VA_ARGS__); \ 5266 } while (0) 5267 5268 static int bpf_map_do_batch(const union bpf_attr *attr, 5269 union bpf_attr __user *uattr, 5270 int cmd) 5271 { 5272 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH || 5273 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH; 5274 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH; 5275 struct bpf_map *map; 5276 int err; 5277 5278 if (CHECK_ATTR(BPF_MAP_BATCH)) 5279 return -EINVAL; 5280 5281 CLASS(fd, f)(attr->batch.map_fd); 5282 5283 map = __bpf_map_get(f); 5284 if (IS_ERR(map)) 5285 return PTR_ERR(map); 5286 if (has_write) 5287 bpf_map_write_active_inc(map); 5288 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 5289 err = -EPERM; 5290 goto err_put; 5291 } 5292 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 5293 err = -EPERM; 5294 goto err_put; 5295 } 5296 5297 if (cmd == BPF_MAP_LOOKUP_BATCH) 5298 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr); 5299 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 5300 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr); 5301 else if (cmd == BPF_MAP_UPDATE_BATCH) 5302 BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr); 5303 else 5304 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr); 5305 err_put: 5306 if (has_write) { 5307 maybe_wait_bpf_programs(map); 5308 bpf_map_write_active_dec(map); 5309 } 5310 return err; 5311 } 5312 5313 #define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid 5314 static int link_create(union bpf_attr *attr, bpfptr_t uattr) 5315 { 5316 struct bpf_prog *prog; 5317 int ret; 5318 5319 if (CHECK_ATTR(BPF_LINK_CREATE)) 5320 return -EINVAL; 5321 5322 if (attr->link_create.attach_type == BPF_STRUCT_OPS) 5323 return bpf_struct_ops_link_create(attr); 5324 5325 prog = bpf_prog_get(attr->link_create.prog_fd); 5326 if (IS_ERR(prog)) 5327 return PTR_ERR(prog); 5328 5329 ret = bpf_prog_attach_check_attach_type(prog, 5330 attr->link_create.attach_type); 5331 if (ret) 5332 goto out; 5333 5334 switch (prog->type) { 5335 case BPF_PROG_TYPE_CGROUP_SKB: 5336 case BPF_PROG_TYPE_CGROUP_SOCK: 5337 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 5338 case BPF_PROG_TYPE_SOCK_OPS: 5339 case BPF_PROG_TYPE_CGROUP_DEVICE: 5340 case BPF_PROG_TYPE_CGROUP_SYSCTL: 5341 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 5342 ret = cgroup_bpf_link_attach(attr, prog); 5343 break; 5344 case BPF_PROG_TYPE_EXT: 5345 ret = bpf_tracing_prog_attach(prog, 5346 attr->link_create.target_fd, 5347 attr->link_create.target_btf_id, 5348 attr->link_create.tracing.cookie); 5349 break; 5350 case BPF_PROG_TYPE_LSM: 5351 case BPF_PROG_TYPE_TRACING: 5352 if (attr->link_create.attach_type != prog->expected_attach_type) { 5353 ret = -EINVAL; 5354 goto out; 5355 } 5356 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) 5357 ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie); 5358 else if (prog->expected_attach_type == BPF_TRACE_ITER) 5359 ret = bpf_iter_link_attach(attr, uattr, prog); 5360 else if (prog->expected_attach_type == BPF_LSM_CGROUP) 5361 ret = cgroup_bpf_link_attach(attr, prog); 5362 else 5363 ret = bpf_tracing_prog_attach(prog, 5364 attr->link_create.target_fd, 5365 attr->link_create.target_btf_id, 5366 attr->link_create.tracing.cookie); 5367 break; 5368 case BPF_PROG_TYPE_FLOW_DISSECTOR: 5369 case BPF_PROG_TYPE_SK_LOOKUP: 5370 ret = netns_bpf_link_create(attr, prog); 5371 break; 5372 case BPF_PROG_TYPE_SK_MSG: 5373 case BPF_PROG_TYPE_SK_SKB: 5374 ret = sock_map_link_create(attr, prog); 5375 break; 5376 #ifdef CONFIG_NET 5377 case BPF_PROG_TYPE_XDP: 5378 ret = bpf_xdp_link_attach(attr, prog); 5379 break; 5380 case BPF_PROG_TYPE_SCHED_CLS: 5381 if (attr->link_create.attach_type == BPF_TCX_INGRESS || 5382 attr->link_create.attach_type == BPF_TCX_EGRESS) 5383 ret = tcx_link_attach(attr, prog); 5384 else 5385 ret = netkit_link_attach(attr, prog); 5386 break; 5387 case BPF_PROG_TYPE_NETFILTER: 5388 ret = bpf_nf_link_attach(attr, prog); 5389 break; 5390 #endif 5391 case BPF_PROG_TYPE_PERF_EVENT: 5392 case BPF_PROG_TYPE_TRACEPOINT: 5393 ret = bpf_perf_link_attach(attr, prog); 5394 break; 5395 case BPF_PROG_TYPE_KPROBE: 5396 if (attr->link_create.attach_type == BPF_PERF_EVENT) 5397 ret = bpf_perf_link_attach(attr, prog); 5398 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI || 5399 attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION) 5400 ret = bpf_kprobe_multi_link_attach(attr, prog); 5401 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI || 5402 attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION) 5403 ret = bpf_uprobe_multi_link_attach(attr, prog); 5404 break; 5405 default: 5406 ret = -EINVAL; 5407 } 5408 5409 out: 5410 if (ret < 0) 5411 bpf_prog_put(prog); 5412 return ret; 5413 } 5414 5415 static int link_update_map(struct bpf_link *link, union bpf_attr *attr) 5416 { 5417 struct bpf_map *new_map, *old_map = NULL; 5418 int ret; 5419 5420 new_map = bpf_map_get(attr->link_update.new_map_fd); 5421 if (IS_ERR(new_map)) 5422 return PTR_ERR(new_map); 5423 5424 if (attr->link_update.flags & BPF_F_REPLACE) { 5425 old_map = bpf_map_get(attr->link_update.old_map_fd); 5426 if (IS_ERR(old_map)) { 5427 ret = PTR_ERR(old_map); 5428 goto out_put; 5429 } 5430 } else if (attr->link_update.old_map_fd) { 5431 ret = -EINVAL; 5432 goto out_put; 5433 } 5434 5435 ret = link->ops->update_map(link, new_map, old_map); 5436 5437 if (old_map) 5438 bpf_map_put(old_map); 5439 out_put: 5440 bpf_map_put(new_map); 5441 return ret; 5442 } 5443 5444 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd 5445 5446 static int link_update(union bpf_attr *attr) 5447 { 5448 struct bpf_prog *old_prog = NULL, *new_prog; 5449 struct bpf_link *link; 5450 u32 flags; 5451 int ret; 5452 5453 if (CHECK_ATTR(BPF_LINK_UPDATE)) 5454 return -EINVAL; 5455 5456 flags = attr->link_update.flags; 5457 if (flags & ~BPF_F_REPLACE) 5458 return -EINVAL; 5459 5460 link = bpf_link_get_from_fd(attr->link_update.link_fd); 5461 if (IS_ERR(link)) 5462 return PTR_ERR(link); 5463 5464 if (link->ops->update_map) { 5465 ret = link_update_map(link, attr); 5466 goto out_put_link; 5467 } 5468 5469 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); 5470 if (IS_ERR(new_prog)) { 5471 ret = PTR_ERR(new_prog); 5472 goto out_put_link; 5473 } 5474 5475 if (flags & BPF_F_REPLACE) { 5476 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); 5477 if (IS_ERR(old_prog)) { 5478 ret = PTR_ERR(old_prog); 5479 old_prog = NULL; 5480 goto out_put_progs; 5481 } 5482 } else if (attr->link_update.old_prog_fd) { 5483 ret = -EINVAL; 5484 goto out_put_progs; 5485 } 5486 5487 if (link->ops->update_prog) 5488 ret = link->ops->update_prog(link, new_prog, old_prog); 5489 else 5490 ret = -EINVAL; 5491 5492 out_put_progs: 5493 if (old_prog) 5494 bpf_prog_put(old_prog); 5495 if (ret) 5496 bpf_prog_put(new_prog); 5497 out_put_link: 5498 bpf_link_put_direct(link); 5499 return ret; 5500 } 5501 5502 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd 5503 5504 static int link_detach(union bpf_attr *attr) 5505 { 5506 struct bpf_link *link; 5507 int ret; 5508 5509 if (CHECK_ATTR(BPF_LINK_DETACH)) 5510 return -EINVAL; 5511 5512 link = bpf_link_get_from_fd(attr->link_detach.link_fd); 5513 if (IS_ERR(link)) 5514 return PTR_ERR(link); 5515 5516 if (link->ops->detach) 5517 ret = link->ops->detach(link); 5518 else 5519 ret = -EOPNOTSUPP; 5520 5521 bpf_link_put_direct(link); 5522 return ret; 5523 } 5524 5525 struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 5526 { 5527 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); 5528 } 5529 EXPORT_SYMBOL(bpf_link_inc_not_zero); 5530 5531 struct bpf_link *bpf_link_by_id(u32 id) 5532 { 5533 struct bpf_link *link; 5534 5535 if (!id) 5536 return ERR_PTR(-ENOENT); 5537 5538 spin_lock_bh(&link_idr_lock); 5539 /* before link is "settled", ID is 0, pretend it doesn't exist yet */ 5540 link = idr_find(&link_idr, id); 5541 if (link) { 5542 if (link->id) 5543 link = bpf_link_inc_not_zero(link); 5544 else 5545 link = ERR_PTR(-EAGAIN); 5546 } else { 5547 link = ERR_PTR(-ENOENT); 5548 } 5549 spin_unlock_bh(&link_idr_lock); 5550 return link; 5551 } 5552 5553 struct bpf_link *bpf_link_get_curr_or_next(u32 *id) 5554 { 5555 struct bpf_link *link; 5556 5557 spin_lock_bh(&link_idr_lock); 5558 again: 5559 link = idr_get_next(&link_idr, id); 5560 if (link) { 5561 link = bpf_link_inc_not_zero(link); 5562 if (IS_ERR(link)) { 5563 (*id)++; 5564 goto again; 5565 } 5566 } 5567 spin_unlock_bh(&link_idr_lock); 5568 5569 return link; 5570 } 5571 5572 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id 5573 5574 static int bpf_link_get_fd_by_id(const union bpf_attr *attr) 5575 { 5576 struct bpf_link *link; 5577 u32 id = attr->link_id; 5578 int fd; 5579 5580 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) 5581 return -EINVAL; 5582 5583 if (!capable(CAP_SYS_ADMIN)) 5584 return -EPERM; 5585 5586 link = bpf_link_by_id(id); 5587 if (IS_ERR(link)) 5588 return PTR_ERR(link); 5589 5590 fd = bpf_link_new_fd(link); 5591 if (fd < 0) 5592 bpf_link_put_direct(link); 5593 5594 return fd; 5595 } 5596 5597 DEFINE_MUTEX(bpf_stats_enabled_mutex); 5598 5599 static int bpf_stats_release(struct inode *inode, struct file *file) 5600 { 5601 mutex_lock(&bpf_stats_enabled_mutex); 5602 static_key_slow_dec(&bpf_stats_enabled_key.key); 5603 mutex_unlock(&bpf_stats_enabled_mutex); 5604 return 0; 5605 } 5606 5607 static const struct file_operations bpf_stats_fops = { 5608 .release = bpf_stats_release, 5609 }; 5610 5611 static int bpf_enable_runtime_stats(void) 5612 { 5613 int fd; 5614 5615 mutex_lock(&bpf_stats_enabled_mutex); 5616 5617 /* Set a very high limit to avoid overflow */ 5618 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { 5619 mutex_unlock(&bpf_stats_enabled_mutex); 5620 return -EBUSY; 5621 } 5622 5623 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); 5624 if (fd >= 0) 5625 static_key_slow_inc(&bpf_stats_enabled_key.key); 5626 5627 mutex_unlock(&bpf_stats_enabled_mutex); 5628 return fd; 5629 } 5630 5631 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type 5632 5633 static int bpf_enable_stats(union bpf_attr *attr) 5634 { 5635 5636 if (CHECK_ATTR(BPF_ENABLE_STATS)) 5637 return -EINVAL; 5638 5639 if (!capable(CAP_SYS_ADMIN)) 5640 return -EPERM; 5641 5642 switch (attr->enable_stats.type) { 5643 case BPF_STATS_RUN_TIME: 5644 return bpf_enable_runtime_stats(); 5645 default: 5646 break; 5647 } 5648 return -EINVAL; 5649 } 5650 5651 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags 5652 5653 static int bpf_iter_create(union bpf_attr *attr) 5654 { 5655 struct bpf_link *link; 5656 int err; 5657 5658 if (CHECK_ATTR(BPF_ITER_CREATE)) 5659 return -EINVAL; 5660 5661 if (attr->iter_create.flags) 5662 return -EINVAL; 5663 5664 link = bpf_link_get_from_fd(attr->iter_create.link_fd); 5665 if (IS_ERR(link)) 5666 return PTR_ERR(link); 5667 5668 err = bpf_iter_new_fd(link); 5669 bpf_link_put_direct(link); 5670 5671 return err; 5672 } 5673 5674 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags 5675 5676 static int bpf_prog_bind_map(union bpf_attr *attr) 5677 { 5678 struct bpf_prog *prog; 5679 struct bpf_map *map; 5680 struct bpf_map **used_maps_old, **used_maps_new; 5681 int i, ret = 0; 5682 5683 if (CHECK_ATTR(BPF_PROG_BIND_MAP)) 5684 return -EINVAL; 5685 5686 if (attr->prog_bind_map.flags) 5687 return -EINVAL; 5688 5689 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); 5690 if (IS_ERR(prog)) 5691 return PTR_ERR(prog); 5692 5693 map = bpf_map_get(attr->prog_bind_map.map_fd); 5694 if (IS_ERR(map)) { 5695 ret = PTR_ERR(map); 5696 goto out_prog_put; 5697 } 5698 5699 mutex_lock(&prog->aux->used_maps_mutex); 5700 5701 used_maps_old = prog->aux->used_maps; 5702 5703 for (i = 0; i < prog->aux->used_map_cnt; i++) 5704 if (used_maps_old[i] == map) { 5705 bpf_map_put(map); 5706 goto out_unlock; 5707 } 5708 5709 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, 5710 sizeof(used_maps_new[0]), 5711 GFP_KERNEL); 5712 if (!used_maps_new) { 5713 ret = -ENOMEM; 5714 goto out_unlock; 5715 } 5716 5717 /* The bpf program will not access the bpf map, but for the sake of 5718 * simplicity, increase sleepable_refcnt for sleepable program as well. 5719 */ 5720 if (prog->sleepable) 5721 atomic64_inc(&map->sleepable_refcnt); 5722 memcpy(used_maps_new, used_maps_old, 5723 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); 5724 used_maps_new[prog->aux->used_map_cnt] = map; 5725 5726 prog->aux->used_map_cnt++; 5727 prog->aux->used_maps = used_maps_new; 5728 5729 kfree(used_maps_old); 5730 5731 out_unlock: 5732 mutex_unlock(&prog->aux->used_maps_mutex); 5733 5734 if (ret) 5735 bpf_map_put(map); 5736 out_prog_put: 5737 bpf_prog_put(prog); 5738 return ret; 5739 } 5740 5741 #define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd 5742 5743 static int token_create(union bpf_attr *attr) 5744 { 5745 if (CHECK_ATTR(BPF_TOKEN_CREATE)) 5746 return -EINVAL; 5747 5748 /* no flags are supported yet */ 5749 if (attr->token_create.flags) 5750 return -EINVAL; 5751 5752 return bpf_token_create(attr); 5753 } 5754 5755 static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size) 5756 { 5757 union bpf_attr attr; 5758 int err; 5759 5760 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 5761 if (err) 5762 return err; 5763 size = min_t(u32, size, sizeof(attr)); 5764 5765 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 5766 memset(&attr, 0, sizeof(attr)); 5767 if (copy_from_bpfptr(&attr, uattr, size) != 0) 5768 return -EFAULT; 5769 5770 err = security_bpf(cmd, &attr, size); 5771 if (err < 0) 5772 return err; 5773 5774 switch (cmd) { 5775 case BPF_MAP_CREATE: 5776 err = map_create(&attr); 5777 break; 5778 case BPF_MAP_LOOKUP_ELEM: 5779 err = map_lookup_elem(&attr); 5780 break; 5781 case BPF_MAP_UPDATE_ELEM: 5782 err = map_update_elem(&attr, uattr); 5783 break; 5784 case BPF_MAP_DELETE_ELEM: 5785 err = map_delete_elem(&attr, uattr); 5786 break; 5787 case BPF_MAP_GET_NEXT_KEY: 5788 err = map_get_next_key(&attr); 5789 break; 5790 case BPF_MAP_FREEZE: 5791 err = map_freeze(&attr); 5792 break; 5793 case BPF_PROG_LOAD: 5794 err = bpf_prog_load(&attr, uattr, size); 5795 break; 5796 case BPF_OBJ_PIN: 5797 err = bpf_obj_pin(&attr); 5798 break; 5799 case BPF_OBJ_GET: 5800 err = bpf_obj_get(&attr); 5801 break; 5802 case BPF_PROG_ATTACH: 5803 err = bpf_prog_attach(&attr); 5804 break; 5805 case BPF_PROG_DETACH: 5806 err = bpf_prog_detach(&attr); 5807 break; 5808 case BPF_PROG_QUERY: 5809 err = bpf_prog_query(&attr, uattr.user); 5810 break; 5811 case BPF_PROG_TEST_RUN: 5812 err = bpf_prog_test_run(&attr, uattr.user); 5813 break; 5814 case BPF_PROG_GET_NEXT_ID: 5815 err = bpf_obj_get_next_id(&attr, uattr.user, 5816 &prog_idr, &prog_idr_lock); 5817 break; 5818 case BPF_MAP_GET_NEXT_ID: 5819 err = bpf_obj_get_next_id(&attr, uattr.user, 5820 &map_idr, &map_idr_lock); 5821 break; 5822 case BPF_BTF_GET_NEXT_ID: 5823 err = bpf_obj_get_next_id(&attr, uattr.user, 5824 &btf_idr, &btf_idr_lock); 5825 break; 5826 case BPF_PROG_GET_FD_BY_ID: 5827 err = bpf_prog_get_fd_by_id(&attr); 5828 break; 5829 case BPF_MAP_GET_FD_BY_ID: 5830 err = bpf_map_get_fd_by_id(&attr); 5831 break; 5832 case BPF_OBJ_GET_INFO_BY_FD: 5833 err = bpf_obj_get_info_by_fd(&attr, uattr.user); 5834 break; 5835 case BPF_RAW_TRACEPOINT_OPEN: 5836 err = bpf_raw_tracepoint_open(&attr); 5837 break; 5838 case BPF_BTF_LOAD: 5839 err = bpf_btf_load(&attr, uattr, size); 5840 break; 5841 case BPF_BTF_GET_FD_BY_ID: 5842 err = bpf_btf_get_fd_by_id(&attr); 5843 break; 5844 case BPF_TASK_FD_QUERY: 5845 err = bpf_task_fd_query(&attr, uattr.user); 5846 break; 5847 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 5848 err = map_lookup_and_delete_elem(&attr); 5849 break; 5850 case BPF_MAP_LOOKUP_BATCH: 5851 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH); 5852 break; 5853 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 5854 err = bpf_map_do_batch(&attr, uattr.user, 5855 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 5856 break; 5857 case BPF_MAP_UPDATE_BATCH: 5858 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH); 5859 break; 5860 case BPF_MAP_DELETE_BATCH: 5861 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH); 5862 break; 5863 case BPF_LINK_CREATE: 5864 err = link_create(&attr, uattr); 5865 break; 5866 case BPF_LINK_UPDATE: 5867 err = link_update(&attr); 5868 break; 5869 case BPF_LINK_GET_FD_BY_ID: 5870 err = bpf_link_get_fd_by_id(&attr); 5871 break; 5872 case BPF_LINK_GET_NEXT_ID: 5873 err = bpf_obj_get_next_id(&attr, uattr.user, 5874 &link_idr, &link_idr_lock); 5875 break; 5876 case BPF_ENABLE_STATS: 5877 err = bpf_enable_stats(&attr); 5878 break; 5879 case BPF_ITER_CREATE: 5880 err = bpf_iter_create(&attr); 5881 break; 5882 case BPF_LINK_DETACH: 5883 err = link_detach(&attr); 5884 break; 5885 case BPF_PROG_BIND_MAP: 5886 err = bpf_prog_bind_map(&attr); 5887 break; 5888 case BPF_TOKEN_CREATE: 5889 err = token_create(&attr); 5890 break; 5891 default: 5892 err = -EINVAL; 5893 break; 5894 } 5895 5896 return err; 5897 } 5898 5899 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 5900 { 5901 return __sys_bpf(cmd, USER_BPFPTR(uattr), size); 5902 } 5903 5904 static bool syscall_prog_is_valid_access(int off, int size, 5905 enum bpf_access_type type, 5906 const struct bpf_prog *prog, 5907 struct bpf_insn_access_aux *info) 5908 { 5909 if (off < 0 || off >= U16_MAX) 5910 return false; 5911 if (off % size != 0) 5912 return false; 5913 return true; 5914 } 5915 5916 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size) 5917 { 5918 switch (cmd) { 5919 case BPF_MAP_CREATE: 5920 case BPF_MAP_DELETE_ELEM: 5921 case BPF_MAP_UPDATE_ELEM: 5922 case BPF_MAP_FREEZE: 5923 case BPF_MAP_GET_FD_BY_ID: 5924 case BPF_PROG_LOAD: 5925 case BPF_BTF_LOAD: 5926 case BPF_LINK_CREATE: 5927 case BPF_RAW_TRACEPOINT_OPEN: 5928 break; 5929 default: 5930 return -EINVAL; 5931 } 5932 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size); 5933 } 5934 5935 5936 /* To shut up -Wmissing-prototypes. 5937 * This function is used by the kernel light skeleton 5938 * to load bpf programs when modules are loaded or during kernel boot. 5939 * See tools/lib/bpf/skel_internal.h 5940 */ 5941 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); 5942 5943 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size) 5944 { 5945 struct bpf_prog * __maybe_unused prog; 5946 struct bpf_tramp_run_ctx __maybe_unused run_ctx; 5947 5948 switch (cmd) { 5949 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */ 5950 case BPF_PROG_TEST_RUN: 5951 if (attr->test.data_in || attr->test.data_out || 5952 attr->test.ctx_out || attr->test.duration || 5953 attr->test.repeat || attr->test.flags) 5954 return -EINVAL; 5955 5956 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); 5957 if (IS_ERR(prog)) 5958 return PTR_ERR(prog); 5959 5960 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || 5961 attr->test.ctx_size_in > U16_MAX) { 5962 bpf_prog_put(prog); 5963 return -EINVAL; 5964 } 5965 5966 run_ctx.bpf_cookie = 0; 5967 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) { 5968 /* recursion detected */ 5969 __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx); 5970 bpf_prog_put(prog); 5971 return -EBUSY; 5972 } 5973 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); 5974 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */, 5975 &run_ctx); 5976 bpf_prog_put(prog); 5977 return 0; 5978 #endif 5979 default: 5980 return ____bpf_sys_bpf(cmd, attr, size); 5981 } 5982 } 5983 EXPORT_SYMBOL(kern_sys_bpf); 5984 5985 static const struct bpf_func_proto bpf_sys_bpf_proto = { 5986 .func = bpf_sys_bpf, 5987 .gpl_only = false, 5988 .ret_type = RET_INTEGER, 5989 .arg1_type = ARG_ANYTHING, 5990 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5991 .arg3_type = ARG_CONST_SIZE, 5992 }; 5993 5994 const struct bpf_func_proto * __weak 5995 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5996 { 5997 return bpf_base_func_proto(func_id, prog); 5998 } 5999 6000 BPF_CALL_1(bpf_sys_close, u32, fd) 6001 { 6002 /* When bpf program calls this helper there should not be 6003 * an fdget() without matching completed fdput(). 6004 * This helper is allowed in the following callchain only: 6005 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close 6006 */ 6007 return close_fd(fd); 6008 } 6009 6010 static const struct bpf_func_proto bpf_sys_close_proto = { 6011 .func = bpf_sys_close, 6012 .gpl_only = false, 6013 .ret_type = RET_INTEGER, 6014 .arg1_type = ARG_ANYTHING, 6015 }; 6016 6017 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) 6018 { 6019 *res = 0; 6020 if (flags) 6021 return -EINVAL; 6022 6023 if (name_sz <= 1 || name[name_sz - 1]) 6024 return -EINVAL; 6025 6026 if (!bpf_dump_raw_ok(current_cred())) 6027 return -EPERM; 6028 6029 *res = kallsyms_lookup_name(name); 6030 return *res ? 0 : -ENOENT; 6031 } 6032 6033 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { 6034 .func = bpf_kallsyms_lookup_name, 6035 .gpl_only = false, 6036 .ret_type = RET_INTEGER, 6037 .arg1_type = ARG_PTR_TO_MEM, 6038 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 6039 .arg3_type = ARG_ANYTHING, 6040 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 6041 .arg4_size = sizeof(u64), 6042 }; 6043 6044 static const struct bpf_func_proto * 6045 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 6046 { 6047 switch (func_id) { 6048 case BPF_FUNC_sys_bpf: 6049 return !bpf_token_capable(prog->aux->token, CAP_PERFMON) 6050 ? NULL : &bpf_sys_bpf_proto; 6051 case BPF_FUNC_btf_find_by_name_kind: 6052 return &bpf_btf_find_by_name_kind_proto; 6053 case BPF_FUNC_sys_close: 6054 return &bpf_sys_close_proto; 6055 case BPF_FUNC_kallsyms_lookup_name: 6056 return &bpf_kallsyms_lookup_name_proto; 6057 default: 6058 return tracing_prog_func_proto(func_id, prog); 6059 } 6060 } 6061 6062 const struct bpf_verifier_ops bpf_syscall_verifier_ops = { 6063 .get_func_proto = syscall_prog_func_proto, 6064 .is_valid_access = syscall_prog_is_valid_access, 6065 }; 6066 6067 const struct bpf_prog_ops bpf_syscall_prog_ops = { 6068 .test_run = bpf_prog_test_run_syscall, 6069 }; 6070 6071 #ifdef CONFIG_SYSCTL 6072 static int bpf_stats_handler(const struct ctl_table *table, int write, 6073 void *buffer, size_t *lenp, loff_t *ppos) 6074 { 6075 struct static_key *key = (struct static_key *)table->data; 6076 static int saved_val; 6077 int val, ret; 6078 struct ctl_table tmp = { 6079 .data = &val, 6080 .maxlen = sizeof(val), 6081 .mode = table->mode, 6082 .extra1 = SYSCTL_ZERO, 6083 .extra2 = SYSCTL_ONE, 6084 }; 6085 6086 if (write && !capable(CAP_SYS_ADMIN)) 6087 return -EPERM; 6088 6089 mutex_lock(&bpf_stats_enabled_mutex); 6090 val = saved_val; 6091 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 6092 if (write && !ret && val != saved_val) { 6093 if (val) 6094 static_key_slow_inc(key); 6095 else 6096 static_key_slow_dec(key); 6097 saved_val = val; 6098 } 6099 mutex_unlock(&bpf_stats_enabled_mutex); 6100 return ret; 6101 } 6102 6103 void __weak unpriv_ebpf_notify(int new_state) 6104 { 6105 } 6106 6107 static int bpf_unpriv_handler(const struct ctl_table *table, int write, 6108 void *buffer, size_t *lenp, loff_t *ppos) 6109 { 6110 int ret, unpriv_enable = *(int *)table->data; 6111 bool locked_state = unpriv_enable == 1; 6112 struct ctl_table tmp = *table; 6113 6114 if (write && !capable(CAP_SYS_ADMIN)) 6115 return -EPERM; 6116 6117 tmp.data = &unpriv_enable; 6118 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 6119 if (write && !ret) { 6120 if (locked_state && unpriv_enable != 1) 6121 return -EPERM; 6122 *(int *)table->data = unpriv_enable; 6123 } 6124 6125 if (write) 6126 unpriv_ebpf_notify(unpriv_enable); 6127 6128 return ret; 6129 } 6130 6131 static struct ctl_table bpf_syscall_table[] = { 6132 { 6133 .procname = "unprivileged_bpf_disabled", 6134 .data = &sysctl_unprivileged_bpf_disabled, 6135 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), 6136 .mode = 0644, 6137 .proc_handler = bpf_unpriv_handler, 6138 .extra1 = SYSCTL_ZERO, 6139 .extra2 = SYSCTL_TWO, 6140 }, 6141 { 6142 .procname = "bpf_stats_enabled", 6143 .data = &bpf_stats_enabled_key.key, 6144 .mode = 0644, 6145 .proc_handler = bpf_stats_handler, 6146 }, 6147 }; 6148 6149 static int __init bpf_syscall_sysctl_init(void) 6150 { 6151 register_sysctl_init("kernel", bpf_syscall_table); 6152 return 0; 6153 } 6154 late_initcall(bpf_syscall_sysctl_init); 6155 #endif /* CONFIG_SYSCTL */ 6156