1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf-cgroup.h> 6 #include <linux/bpf_trace.h> 7 #include <linux/bpf_lirc.h> 8 #include <linux/bpf_verifier.h> 9 #include <linux/bsearch.h> 10 #include <linux/btf.h> 11 #include <linux/syscalls.h> 12 #include <linux/slab.h> 13 #include <linux/sched/signal.h> 14 #include <linux/vmalloc.h> 15 #include <linux/mmzone.h> 16 #include <linux/anon_inodes.h> 17 #include <linux/fdtable.h> 18 #include <linux/file.h> 19 #include <linux/fs.h> 20 #include <linux/license.h> 21 #include <linux/filter.h> 22 #include <linux/kernel.h> 23 #include <linux/idr.h> 24 #include <linux/cred.h> 25 #include <linux/timekeeping.h> 26 #include <linux/ctype.h> 27 #include <linux/nospec.h> 28 #include <linux/audit.h> 29 #include <uapi/linux/btf.h> 30 #include <linux/pgtable.h> 31 #include <linux/bpf_lsm.h> 32 #include <linux/poll.h> 33 #include <linux/sort.h> 34 #include <linux/bpf-netns.h> 35 #include <linux/rcupdate_trace.h> 36 #include <linux/memcontrol.h> 37 #include <linux/trace_events.h> 38 #include <linux/tracepoint.h> 39 #include <linux/overflow.h> 40 41 #include <net/netfilter/nf_bpf_link.h> 42 #include <net/netkit.h> 43 #include <net/tcx.h> 44 45 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 46 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 47 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 48 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 49 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 50 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 51 IS_FD_HASH(map)) 52 53 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 54 55 DEFINE_PER_CPU(int, bpf_prog_active); 56 static DEFINE_IDR(prog_idr); 57 static DEFINE_SPINLOCK(prog_idr_lock); 58 static DEFINE_IDR(map_idr); 59 static DEFINE_SPINLOCK(map_idr_lock); 60 static DEFINE_IDR(link_idr); 61 static DEFINE_SPINLOCK(link_idr_lock); 62 63 int sysctl_unprivileged_bpf_disabled __read_mostly = 64 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0; 65 66 static const struct bpf_map_ops * const bpf_map_types[] = { 67 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 68 #define BPF_MAP_TYPE(_id, _ops) \ 69 [_id] = &_ops, 70 #define BPF_LINK_TYPE(_id, _name) 71 #include <linux/bpf_types.h> 72 #undef BPF_PROG_TYPE 73 #undef BPF_MAP_TYPE 74 #undef BPF_LINK_TYPE 75 }; 76 77 /* 78 * If we're handed a bigger struct than we know of, ensure all the unknown bits 79 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 80 * we don't know about yet. 81 * 82 * There is a ToCToU between this function call and the following 83 * copy_from_user() call. However, this is not a concern since this function is 84 * meant to be a future-proofing of bits. 85 */ 86 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, 87 size_t expected_size, 88 size_t actual_size) 89 { 90 int res; 91 92 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 93 return -E2BIG; 94 95 if (actual_size <= expected_size) 96 return 0; 97 98 if (uaddr.is_kernel) 99 res = memchr_inv(uaddr.kernel + expected_size, 0, 100 actual_size - expected_size) == NULL; 101 else 102 res = check_zeroed_user(uaddr.user + expected_size, 103 actual_size - expected_size); 104 if (res < 0) 105 return res; 106 return res ? 0 : -E2BIG; 107 } 108 109 const struct bpf_map_ops bpf_map_offload_ops = { 110 .map_meta_equal = bpf_map_meta_equal, 111 .map_alloc = bpf_map_offload_map_alloc, 112 .map_free = bpf_map_offload_map_free, 113 .map_check_btf = map_check_no_btf, 114 .map_mem_usage = bpf_map_offload_map_mem_usage, 115 }; 116 117 static void bpf_map_write_active_inc(struct bpf_map *map) 118 { 119 atomic64_inc(&map->writecnt); 120 } 121 122 static void bpf_map_write_active_dec(struct bpf_map *map) 123 { 124 atomic64_dec(&map->writecnt); 125 } 126 127 bool bpf_map_write_active(const struct bpf_map *map) 128 { 129 return atomic64_read(&map->writecnt) != 0; 130 } 131 132 static u32 bpf_map_value_size(const struct bpf_map *map) 133 { 134 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 135 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 136 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 137 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 138 return round_up(map->value_size, 8) * num_possible_cpus(); 139 else if (IS_FD_MAP(map)) 140 return sizeof(u32); 141 else 142 return map->value_size; 143 } 144 145 static void maybe_wait_bpf_programs(struct bpf_map *map) 146 { 147 /* Wait for any running non-sleepable BPF programs to complete so that 148 * userspace, when we return to it, knows that all non-sleepable 149 * programs that could be running use the new map value. For sleepable 150 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait 151 * for the completions of these programs, but considering the waiting 152 * time can be very long and userspace may think it will hang forever, 153 * so don't handle sleepable BPF programs now. 154 */ 155 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 156 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 157 synchronize_rcu(); 158 } 159 160 static void unpin_uptr_kaddr(void *kaddr) 161 { 162 if (kaddr) 163 unpin_user_page(virt_to_page(kaddr)); 164 } 165 166 static void __bpf_obj_unpin_uptrs(struct btf_record *rec, u32 cnt, void *obj) 167 { 168 const struct btf_field *field; 169 void **uptr_addr; 170 int i; 171 172 for (i = 0, field = rec->fields; i < cnt; i++, field++) { 173 if (field->type != BPF_UPTR) 174 continue; 175 176 uptr_addr = obj + field->offset; 177 unpin_uptr_kaddr(*uptr_addr); 178 } 179 } 180 181 static void bpf_obj_unpin_uptrs(struct btf_record *rec, void *obj) 182 { 183 if (!btf_record_has_field(rec, BPF_UPTR)) 184 return; 185 186 __bpf_obj_unpin_uptrs(rec, rec->cnt, obj); 187 } 188 189 static int bpf_obj_pin_uptrs(struct btf_record *rec, void *obj) 190 { 191 const struct btf_field *field; 192 const struct btf_type *t; 193 unsigned long start, end; 194 struct page *page; 195 void **uptr_addr; 196 int i, err; 197 198 if (!btf_record_has_field(rec, BPF_UPTR)) 199 return 0; 200 201 for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { 202 if (field->type != BPF_UPTR) 203 continue; 204 205 uptr_addr = obj + field->offset; 206 start = *(unsigned long *)uptr_addr; 207 if (!start) 208 continue; 209 210 t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id); 211 /* t->size was checked for zero before */ 212 if (check_add_overflow(start, t->size - 1, &end)) { 213 err = -EFAULT; 214 goto unpin_all; 215 } 216 217 /* The uptr's struct cannot span across two pages */ 218 if ((start & PAGE_MASK) != (end & PAGE_MASK)) { 219 err = -EOPNOTSUPP; 220 goto unpin_all; 221 } 222 223 err = pin_user_pages_fast(start, 1, FOLL_LONGTERM | FOLL_WRITE, &page); 224 if (err != 1) 225 goto unpin_all; 226 227 if (PageHighMem(page)) { 228 err = -EOPNOTSUPP; 229 unpin_user_page(page); 230 goto unpin_all; 231 } 232 233 *uptr_addr = page_address(page) + offset_in_page(start); 234 } 235 236 return 0; 237 238 unpin_all: 239 __bpf_obj_unpin_uptrs(rec, i, obj); 240 return err; 241 } 242 243 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file, 244 void *key, void *value, __u64 flags) 245 { 246 int err; 247 248 /* Need to create a kthread, thus must support schedule */ 249 if (bpf_map_is_offloaded(map)) { 250 return bpf_map_offload_update_elem(map, key, value, flags); 251 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 252 map->map_type == BPF_MAP_TYPE_ARENA || 253 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 254 return map->ops->map_update_elem(map, key, value, flags); 255 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || 256 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 257 return sock_map_update_elem_sys(map, key, value, flags); 258 } else if (IS_FD_PROG_ARRAY(map)) { 259 return bpf_fd_array_map_update_elem(map, map_file, key, value, 260 flags); 261 } 262 263 bpf_disable_instrumentation(); 264 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 265 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 266 err = bpf_percpu_hash_update(map, key, value, flags); 267 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 268 err = bpf_percpu_array_update(map, key, value, flags); 269 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 270 err = bpf_percpu_cgroup_storage_update(map, key, value, 271 flags); 272 } else if (IS_FD_ARRAY(map)) { 273 err = bpf_fd_array_map_update_elem(map, map_file, key, value, 274 flags); 275 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 276 err = bpf_fd_htab_map_update_elem(map, map_file, key, value, 277 flags); 278 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 279 /* rcu_read_lock() is not needed */ 280 err = bpf_fd_reuseport_array_update_elem(map, key, value, 281 flags); 282 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 283 map->map_type == BPF_MAP_TYPE_STACK || 284 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 285 err = map->ops->map_push_elem(map, value, flags); 286 } else { 287 err = bpf_obj_pin_uptrs(map->record, value); 288 if (!err) { 289 rcu_read_lock(); 290 err = map->ops->map_update_elem(map, key, value, flags); 291 rcu_read_unlock(); 292 if (err) 293 bpf_obj_unpin_uptrs(map->record, value); 294 } 295 } 296 bpf_enable_instrumentation(); 297 298 return err; 299 } 300 301 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 302 __u64 flags) 303 { 304 void *ptr; 305 int err; 306 307 if (bpf_map_is_offloaded(map)) 308 return bpf_map_offload_lookup_elem(map, key, value); 309 310 bpf_disable_instrumentation(); 311 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 312 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 313 err = bpf_percpu_hash_copy(map, key, value); 314 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 315 err = bpf_percpu_array_copy(map, key, value); 316 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 317 err = bpf_percpu_cgroup_storage_copy(map, key, value); 318 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 319 err = bpf_stackmap_copy(map, key, value); 320 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 321 err = bpf_fd_array_map_lookup_elem(map, key, value); 322 } else if (IS_FD_HASH(map)) { 323 err = bpf_fd_htab_map_lookup_elem(map, key, value); 324 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 325 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 326 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 327 map->map_type == BPF_MAP_TYPE_STACK || 328 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 329 err = map->ops->map_peek_elem(map, value); 330 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 331 /* struct_ops map requires directly updating "value" */ 332 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 333 } else { 334 rcu_read_lock(); 335 if (map->ops->map_lookup_elem_sys_only) 336 ptr = map->ops->map_lookup_elem_sys_only(map, key); 337 else 338 ptr = map->ops->map_lookup_elem(map, key); 339 if (IS_ERR(ptr)) { 340 err = PTR_ERR(ptr); 341 } else if (!ptr) { 342 err = -ENOENT; 343 } else { 344 err = 0; 345 if (flags & BPF_F_LOCK) 346 /* lock 'ptr' and copy everything but lock */ 347 copy_map_value_locked(map, value, ptr, true); 348 else 349 copy_map_value(map, value, ptr); 350 /* mask lock and timer, since value wasn't zero inited */ 351 check_and_init_map_value(map, value); 352 } 353 rcu_read_unlock(); 354 } 355 356 bpf_enable_instrumentation(); 357 358 return err; 359 } 360 361 /* Please, do not use this function outside from the map creation path 362 * (e.g. in map update path) without taking care of setting the active 363 * memory cgroup (see at bpf_map_kmalloc_node() for example). 364 */ 365 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 366 { 367 /* We really just want to fail instead of triggering OOM killer 368 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 369 * which is used for lower order allocation requests. 370 * 371 * It has been observed that higher order allocation requests done by 372 * vmalloc with __GFP_NORETRY being set might fail due to not trying 373 * to reclaim memory from the page cache, thus we set 374 * __GFP_RETRY_MAYFAIL to avoid such situations. 375 */ 376 377 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO); 378 unsigned int flags = 0; 379 unsigned long align = 1; 380 void *area; 381 382 if (size >= SIZE_MAX) 383 return NULL; 384 385 /* kmalloc()'ed memory can't be mmap()'ed */ 386 if (mmapable) { 387 BUG_ON(!PAGE_ALIGNED(size)); 388 align = SHMLBA; 389 flags = VM_USERMAP; 390 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 391 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, 392 numa_node); 393 if (area != NULL) 394 return area; 395 } 396 397 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 398 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, 399 flags, numa_node, __builtin_return_address(0)); 400 } 401 402 void *bpf_map_area_alloc(u64 size, int numa_node) 403 { 404 return __bpf_map_area_alloc(size, numa_node, false); 405 } 406 407 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 408 { 409 return __bpf_map_area_alloc(size, numa_node, true); 410 } 411 412 void bpf_map_area_free(void *area) 413 { 414 kvfree(area); 415 } 416 417 static u32 bpf_map_flags_retain_permanent(u32 flags) 418 { 419 /* Some map creation flags are not tied to the map object but 420 * rather to the map fd instead, so they have no meaning upon 421 * map object inspection since multiple file descriptors with 422 * different (access) properties can exist here. Thus, given 423 * this has zero meaning for the map itself, lets clear these 424 * from here. 425 */ 426 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 427 } 428 429 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 430 { 431 map->map_type = attr->map_type; 432 map->key_size = attr->key_size; 433 map->value_size = attr->value_size; 434 map->max_entries = attr->max_entries; 435 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 436 map->numa_node = bpf_map_attr_numa_node(attr); 437 map->map_extra = attr->map_extra; 438 } 439 440 static int bpf_map_alloc_id(struct bpf_map *map) 441 { 442 int id; 443 444 idr_preload(GFP_KERNEL); 445 spin_lock_bh(&map_idr_lock); 446 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 447 if (id > 0) 448 map->id = id; 449 spin_unlock_bh(&map_idr_lock); 450 idr_preload_end(); 451 452 if (WARN_ON_ONCE(!id)) 453 return -ENOSPC; 454 455 return id > 0 ? 0 : id; 456 } 457 458 void bpf_map_free_id(struct bpf_map *map) 459 { 460 unsigned long flags; 461 462 /* Offloaded maps are removed from the IDR store when their device 463 * disappears - even if someone holds an fd to them they are unusable, 464 * the memory is gone, all ops will fail; they are simply waiting for 465 * refcnt to drop to be freed. 466 */ 467 if (!map->id) 468 return; 469 470 spin_lock_irqsave(&map_idr_lock, flags); 471 472 idr_remove(&map_idr, map->id); 473 map->id = 0; 474 475 spin_unlock_irqrestore(&map_idr_lock, flags); 476 } 477 478 #ifdef CONFIG_MEMCG 479 static void bpf_map_save_memcg(struct bpf_map *map) 480 { 481 /* Currently if a map is created by a process belonging to the root 482 * memory cgroup, get_obj_cgroup_from_current() will return NULL. 483 * So we have to check map->objcg for being NULL each time it's 484 * being used. 485 */ 486 if (memcg_bpf_enabled()) 487 map->objcg = get_obj_cgroup_from_current(); 488 } 489 490 static void bpf_map_release_memcg(struct bpf_map *map) 491 { 492 if (map->objcg) 493 obj_cgroup_put(map->objcg); 494 } 495 496 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map) 497 { 498 if (map->objcg) 499 return get_mem_cgroup_from_objcg(map->objcg); 500 501 return root_mem_cgroup; 502 } 503 504 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 505 int node) 506 { 507 struct mem_cgroup *memcg, *old_memcg; 508 void *ptr; 509 510 memcg = bpf_map_get_memcg(map); 511 old_memcg = set_active_memcg(memcg); 512 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node); 513 set_active_memcg(old_memcg); 514 mem_cgroup_put(memcg); 515 516 return ptr; 517 } 518 519 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 520 { 521 struct mem_cgroup *memcg, *old_memcg; 522 void *ptr; 523 524 memcg = bpf_map_get_memcg(map); 525 old_memcg = set_active_memcg(memcg); 526 ptr = kzalloc(size, flags | __GFP_ACCOUNT); 527 set_active_memcg(old_memcg); 528 mem_cgroup_put(memcg); 529 530 return ptr; 531 } 532 533 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, 534 gfp_t flags) 535 { 536 struct mem_cgroup *memcg, *old_memcg; 537 void *ptr; 538 539 memcg = bpf_map_get_memcg(map); 540 old_memcg = set_active_memcg(memcg); 541 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT); 542 set_active_memcg(old_memcg); 543 mem_cgroup_put(memcg); 544 545 return ptr; 546 } 547 548 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 549 size_t align, gfp_t flags) 550 { 551 struct mem_cgroup *memcg, *old_memcg; 552 void __percpu *ptr; 553 554 memcg = bpf_map_get_memcg(map); 555 old_memcg = set_active_memcg(memcg); 556 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT); 557 set_active_memcg(old_memcg); 558 mem_cgroup_put(memcg); 559 560 return ptr; 561 } 562 563 #else 564 static void bpf_map_save_memcg(struct bpf_map *map) 565 { 566 } 567 568 static void bpf_map_release_memcg(struct bpf_map *map) 569 { 570 } 571 #endif 572 573 static bool can_alloc_pages(void) 574 { 575 return preempt_count() == 0 && !irqs_disabled() && 576 !IS_ENABLED(CONFIG_PREEMPT_RT); 577 } 578 579 static struct page *__bpf_alloc_page(int nid) 580 { 581 if (!can_alloc_pages()) 582 return alloc_pages_nolock(nid, 0); 583 584 return alloc_pages_node(nid, 585 GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT 586 | __GFP_NOWARN, 587 0); 588 } 589 590 int bpf_map_alloc_pages(const struct bpf_map *map, int nid, 591 unsigned long nr_pages, struct page **pages) 592 { 593 unsigned long i, j; 594 struct page *pg; 595 int ret = 0; 596 #ifdef CONFIG_MEMCG 597 struct mem_cgroup *memcg, *old_memcg; 598 599 memcg = bpf_map_get_memcg(map); 600 old_memcg = set_active_memcg(memcg); 601 #endif 602 for (i = 0; i < nr_pages; i++) { 603 pg = __bpf_alloc_page(nid); 604 605 if (pg) { 606 pages[i] = pg; 607 continue; 608 } 609 for (j = 0; j < i; j++) 610 free_pages_nolock(pages[j], 0); 611 ret = -ENOMEM; 612 break; 613 } 614 615 #ifdef CONFIG_MEMCG 616 set_active_memcg(old_memcg); 617 mem_cgroup_put(memcg); 618 #endif 619 return ret; 620 } 621 622 623 static int btf_field_cmp(const void *a, const void *b) 624 { 625 const struct btf_field *f1 = a, *f2 = b; 626 627 if (f1->offset < f2->offset) 628 return -1; 629 else if (f1->offset > f2->offset) 630 return 1; 631 return 0; 632 } 633 634 struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset, 635 u32 field_mask) 636 { 637 struct btf_field *field; 638 639 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask)) 640 return NULL; 641 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp); 642 if (!field || !(field->type & field_mask)) 643 return NULL; 644 return field; 645 } 646 647 void btf_record_free(struct btf_record *rec) 648 { 649 int i; 650 651 if (IS_ERR_OR_NULL(rec)) 652 return; 653 for (i = 0; i < rec->cnt; i++) { 654 switch (rec->fields[i].type) { 655 case BPF_KPTR_UNREF: 656 case BPF_KPTR_REF: 657 case BPF_KPTR_PERCPU: 658 case BPF_UPTR: 659 if (rec->fields[i].kptr.module) 660 module_put(rec->fields[i].kptr.module); 661 if (btf_is_kernel(rec->fields[i].kptr.btf)) 662 btf_put(rec->fields[i].kptr.btf); 663 break; 664 case BPF_LIST_HEAD: 665 case BPF_LIST_NODE: 666 case BPF_RB_ROOT: 667 case BPF_RB_NODE: 668 case BPF_SPIN_LOCK: 669 case BPF_RES_SPIN_LOCK: 670 case BPF_TIMER: 671 case BPF_REFCOUNT: 672 case BPF_WORKQUEUE: 673 /* Nothing to release */ 674 break; 675 default: 676 WARN_ON_ONCE(1); 677 continue; 678 } 679 } 680 kfree(rec); 681 } 682 683 void bpf_map_free_record(struct bpf_map *map) 684 { 685 btf_record_free(map->record); 686 map->record = NULL; 687 } 688 689 struct btf_record *btf_record_dup(const struct btf_record *rec) 690 { 691 const struct btf_field *fields; 692 struct btf_record *new_rec; 693 int ret, size, i; 694 695 if (IS_ERR_OR_NULL(rec)) 696 return NULL; 697 size = struct_size(rec, fields, rec->cnt); 698 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN); 699 if (!new_rec) 700 return ERR_PTR(-ENOMEM); 701 /* Do a deep copy of the btf_record */ 702 fields = rec->fields; 703 new_rec->cnt = 0; 704 for (i = 0; i < rec->cnt; i++) { 705 switch (fields[i].type) { 706 case BPF_KPTR_UNREF: 707 case BPF_KPTR_REF: 708 case BPF_KPTR_PERCPU: 709 case BPF_UPTR: 710 if (btf_is_kernel(fields[i].kptr.btf)) 711 btf_get(fields[i].kptr.btf); 712 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) { 713 ret = -ENXIO; 714 goto free; 715 } 716 break; 717 case BPF_LIST_HEAD: 718 case BPF_LIST_NODE: 719 case BPF_RB_ROOT: 720 case BPF_RB_NODE: 721 case BPF_SPIN_LOCK: 722 case BPF_RES_SPIN_LOCK: 723 case BPF_TIMER: 724 case BPF_REFCOUNT: 725 case BPF_WORKQUEUE: 726 /* Nothing to acquire */ 727 break; 728 default: 729 ret = -EFAULT; 730 WARN_ON_ONCE(1); 731 goto free; 732 } 733 new_rec->cnt++; 734 } 735 return new_rec; 736 free: 737 btf_record_free(new_rec); 738 return ERR_PTR(ret); 739 } 740 741 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b) 742 { 743 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b); 744 int size; 745 746 if (!a_has_fields && !b_has_fields) 747 return true; 748 if (a_has_fields != b_has_fields) 749 return false; 750 if (rec_a->cnt != rec_b->cnt) 751 return false; 752 size = struct_size(rec_a, fields, rec_a->cnt); 753 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused 754 * members are zeroed out. So memcmp is safe to do without worrying 755 * about padding/unused fields. 756 * 757 * While spin_lock, timer, and kptr have no relation to map BTF, 758 * list_head metadata is specific to map BTF, the btf and value_rec 759 * members in particular. btf is the map BTF, while value_rec points to 760 * btf_record in that map BTF. 761 * 762 * So while by default, we don't rely on the map BTF (which the records 763 * were parsed from) matching for both records, which is not backwards 764 * compatible, in case list_head is part of it, we implicitly rely on 765 * that by way of depending on memcmp succeeding for it. 766 */ 767 return !memcmp(rec_a, rec_b, size); 768 } 769 770 void bpf_obj_free_timer(const struct btf_record *rec, void *obj) 771 { 772 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER))) 773 return; 774 bpf_timer_cancel_and_free(obj + rec->timer_off); 775 } 776 777 void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj) 778 { 779 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_WORKQUEUE))) 780 return; 781 bpf_wq_cancel_and_free(obj + rec->wq_off); 782 } 783 784 void bpf_obj_free_fields(const struct btf_record *rec, void *obj) 785 { 786 const struct btf_field *fields; 787 int i; 788 789 if (IS_ERR_OR_NULL(rec)) 790 return; 791 fields = rec->fields; 792 for (i = 0; i < rec->cnt; i++) { 793 struct btf_struct_meta *pointee_struct_meta; 794 const struct btf_field *field = &fields[i]; 795 void *field_ptr = obj + field->offset; 796 void *xchgd_field; 797 798 switch (fields[i].type) { 799 case BPF_SPIN_LOCK: 800 case BPF_RES_SPIN_LOCK: 801 break; 802 case BPF_TIMER: 803 bpf_timer_cancel_and_free(field_ptr); 804 break; 805 case BPF_WORKQUEUE: 806 bpf_wq_cancel_and_free(field_ptr); 807 break; 808 case BPF_KPTR_UNREF: 809 WRITE_ONCE(*(u64 *)field_ptr, 0); 810 break; 811 case BPF_KPTR_REF: 812 case BPF_KPTR_PERCPU: 813 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0); 814 if (!xchgd_field) 815 break; 816 817 if (!btf_is_kernel(field->kptr.btf)) { 818 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf, 819 field->kptr.btf_id); 820 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ? 821 pointee_struct_meta->record : NULL, 822 fields[i].type == BPF_KPTR_PERCPU); 823 } else { 824 field->kptr.dtor(xchgd_field); 825 } 826 break; 827 case BPF_UPTR: 828 /* The caller ensured that no one is using the uptr */ 829 unpin_uptr_kaddr(*(void **)field_ptr); 830 break; 831 case BPF_LIST_HEAD: 832 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 833 continue; 834 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); 835 break; 836 case BPF_RB_ROOT: 837 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 838 continue; 839 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); 840 break; 841 case BPF_LIST_NODE: 842 case BPF_RB_NODE: 843 case BPF_REFCOUNT: 844 break; 845 default: 846 WARN_ON_ONCE(1); 847 continue; 848 } 849 } 850 } 851 852 static void bpf_map_free(struct bpf_map *map) 853 { 854 struct btf_record *rec = map->record; 855 struct btf *btf = map->btf; 856 857 /* implementation dependent freeing. Disabling migration to simplify 858 * the free of values or special fields allocated from bpf memory 859 * allocator. 860 */ 861 migrate_disable(); 862 map->ops->map_free(map); 863 migrate_enable(); 864 865 /* Delay freeing of btf_record for maps, as map_free 866 * callback usually needs access to them. It is better to do it here 867 * than require each callback to do the free itself manually. 868 * 869 * Note that the btf_record stashed in map->inner_map_meta->record was 870 * already freed using the map_free callback for map in map case which 871 * eventually calls bpf_map_free_meta, since inner_map_meta is only a 872 * template bpf_map struct used during verification. 873 */ 874 btf_record_free(rec); 875 /* Delay freeing of btf for maps, as map_free callback may need 876 * struct_meta info which will be freed with btf_put(). 877 */ 878 btf_put(btf); 879 } 880 881 /* called from workqueue */ 882 static void bpf_map_free_deferred(struct work_struct *work) 883 { 884 struct bpf_map *map = container_of(work, struct bpf_map, work); 885 886 security_bpf_map_free(map); 887 bpf_map_release_memcg(map); 888 bpf_map_free(map); 889 } 890 891 static void bpf_map_put_uref(struct bpf_map *map) 892 { 893 if (atomic64_dec_and_test(&map->usercnt)) { 894 if (map->ops->map_release_uref) 895 map->ops->map_release_uref(map); 896 } 897 } 898 899 static void bpf_map_free_in_work(struct bpf_map *map) 900 { 901 INIT_WORK(&map->work, bpf_map_free_deferred); 902 /* Avoid spawning kworkers, since they all might contend 903 * for the same mutex like slab_mutex. 904 */ 905 queue_work(system_unbound_wq, &map->work); 906 } 907 908 static void bpf_map_free_rcu_gp(struct rcu_head *rcu) 909 { 910 bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu)); 911 } 912 913 static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu) 914 { 915 if (rcu_trace_implies_rcu_gp()) 916 bpf_map_free_rcu_gp(rcu); 917 else 918 call_rcu(rcu, bpf_map_free_rcu_gp); 919 } 920 921 /* decrement map refcnt and schedule it for freeing via workqueue 922 * (underlying map implementation ops->map_free() might sleep) 923 */ 924 void bpf_map_put(struct bpf_map *map) 925 { 926 if (atomic64_dec_and_test(&map->refcnt)) { 927 /* bpf_map_free_id() must be called first */ 928 bpf_map_free_id(map); 929 930 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt)); 931 if (READ_ONCE(map->free_after_mult_rcu_gp)) 932 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); 933 else if (READ_ONCE(map->free_after_rcu_gp)) 934 call_rcu(&map->rcu, bpf_map_free_rcu_gp); 935 else 936 bpf_map_free_in_work(map); 937 } 938 } 939 EXPORT_SYMBOL_GPL(bpf_map_put); 940 941 void bpf_map_put_with_uref(struct bpf_map *map) 942 { 943 bpf_map_put_uref(map); 944 bpf_map_put(map); 945 } 946 947 static int bpf_map_release(struct inode *inode, struct file *filp) 948 { 949 struct bpf_map *map = filp->private_data; 950 951 if (map->ops->map_release) 952 map->ops->map_release(map, filp); 953 954 bpf_map_put_with_uref(map); 955 return 0; 956 } 957 958 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 959 { 960 fmode_t mode = fd_file(f)->f_mode; 961 962 /* Our file permissions may have been overridden by global 963 * map permissions facing syscall side. 964 */ 965 if (READ_ONCE(map->frozen)) 966 mode &= ~FMODE_CAN_WRITE; 967 return mode; 968 } 969 970 #ifdef CONFIG_PROC_FS 971 /* Show the memory usage of a bpf map */ 972 static u64 bpf_map_memory_usage(const struct bpf_map *map) 973 { 974 return map->ops->map_mem_usage(map); 975 } 976 977 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 978 { 979 struct bpf_map *map = filp->private_data; 980 u32 type = 0, jited = 0; 981 982 if (map_type_contains_progs(map)) { 983 spin_lock(&map->owner.lock); 984 type = map->owner.type; 985 jited = map->owner.jited; 986 spin_unlock(&map->owner.lock); 987 } 988 989 seq_printf(m, 990 "map_type:\t%u\n" 991 "key_size:\t%u\n" 992 "value_size:\t%u\n" 993 "max_entries:\t%u\n" 994 "map_flags:\t%#x\n" 995 "map_extra:\t%#llx\n" 996 "memlock:\t%llu\n" 997 "map_id:\t%u\n" 998 "frozen:\t%u\n", 999 map->map_type, 1000 map->key_size, 1001 map->value_size, 1002 map->max_entries, 1003 map->map_flags, 1004 (unsigned long long)map->map_extra, 1005 bpf_map_memory_usage(map), 1006 map->id, 1007 READ_ONCE(map->frozen)); 1008 if (type) { 1009 seq_printf(m, "owner_prog_type:\t%u\n", type); 1010 seq_printf(m, "owner_jited:\t%u\n", jited); 1011 } 1012 } 1013 #endif 1014 1015 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 1016 loff_t *ppos) 1017 { 1018 /* We need this handler such that alloc_file() enables 1019 * f_mode with FMODE_CAN_READ. 1020 */ 1021 return -EINVAL; 1022 } 1023 1024 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 1025 size_t siz, loff_t *ppos) 1026 { 1027 /* We need this handler such that alloc_file() enables 1028 * f_mode with FMODE_CAN_WRITE. 1029 */ 1030 return -EINVAL; 1031 } 1032 1033 /* called for any extra memory-mapped regions (except initial) */ 1034 static void bpf_map_mmap_open(struct vm_area_struct *vma) 1035 { 1036 struct bpf_map *map = vma->vm_file->private_data; 1037 1038 if (vma->vm_flags & VM_MAYWRITE) 1039 bpf_map_write_active_inc(map); 1040 } 1041 1042 /* called for all unmapped memory region (including initial) */ 1043 static void bpf_map_mmap_close(struct vm_area_struct *vma) 1044 { 1045 struct bpf_map *map = vma->vm_file->private_data; 1046 1047 if (vma->vm_flags & VM_MAYWRITE) 1048 bpf_map_write_active_dec(map); 1049 } 1050 1051 static const struct vm_operations_struct bpf_map_default_vmops = { 1052 .open = bpf_map_mmap_open, 1053 .close = bpf_map_mmap_close, 1054 }; 1055 1056 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 1057 { 1058 struct bpf_map *map = filp->private_data; 1059 int err = 0; 1060 1061 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) 1062 return -ENOTSUPP; 1063 1064 if (!(vma->vm_flags & VM_SHARED)) 1065 return -EINVAL; 1066 1067 mutex_lock(&map->freeze_mutex); 1068 1069 if (vma->vm_flags & VM_WRITE) { 1070 if (map->frozen) { 1071 err = -EPERM; 1072 goto out; 1073 } 1074 /* map is meant to be read-only, so do not allow mapping as 1075 * writable, because it's possible to leak a writable page 1076 * reference and allows user-space to still modify it after 1077 * freezing, while verifier will assume contents do not change 1078 */ 1079 if (map->map_flags & BPF_F_RDONLY_PROG) { 1080 err = -EACCES; 1081 goto out; 1082 } 1083 bpf_map_write_active_inc(map); 1084 } 1085 out: 1086 mutex_unlock(&map->freeze_mutex); 1087 if (err) 1088 return err; 1089 1090 /* set default open/close callbacks */ 1091 vma->vm_ops = &bpf_map_default_vmops; 1092 vma->vm_private_data = map; 1093 vm_flags_clear(vma, VM_MAYEXEC); 1094 /* If mapping is read-only, then disallow potentially re-mapping with 1095 * PROT_WRITE by dropping VM_MAYWRITE flag. This VM_MAYWRITE clearing 1096 * means that as far as BPF map's memory-mapped VMAs are concerned, 1097 * VM_WRITE and VM_MAYWRITE and equivalent, if one of them is set, 1098 * both should be set, so we can forget about VM_MAYWRITE and always 1099 * check just VM_WRITE 1100 */ 1101 if (!(vma->vm_flags & VM_WRITE)) 1102 vm_flags_clear(vma, VM_MAYWRITE); 1103 1104 err = map->ops->map_mmap(map, vma); 1105 if (err) { 1106 if (vma->vm_flags & VM_WRITE) 1107 bpf_map_write_active_dec(map); 1108 } 1109 1110 return err; 1111 } 1112 1113 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts) 1114 { 1115 struct bpf_map *map = filp->private_data; 1116 1117 if (map->ops->map_poll) 1118 return map->ops->map_poll(map, filp, pts); 1119 1120 return EPOLLERR; 1121 } 1122 1123 static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr, 1124 unsigned long len, unsigned long pgoff, 1125 unsigned long flags) 1126 { 1127 struct bpf_map *map = filp->private_data; 1128 1129 if (map->ops->map_get_unmapped_area) 1130 return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags); 1131 #ifdef CONFIG_MMU 1132 return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); 1133 #else 1134 return addr; 1135 #endif 1136 } 1137 1138 const struct file_operations bpf_map_fops = { 1139 #ifdef CONFIG_PROC_FS 1140 .show_fdinfo = bpf_map_show_fdinfo, 1141 #endif 1142 .release = bpf_map_release, 1143 .read = bpf_dummy_read, 1144 .write = bpf_dummy_write, 1145 .mmap = bpf_map_mmap, 1146 .poll = bpf_map_poll, 1147 .get_unmapped_area = bpf_get_unmapped_area, 1148 }; 1149 1150 int bpf_map_new_fd(struct bpf_map *map, int flags) 1151 { 1152 int ret; 1153 1154 ret = security_bpf_map(map, OPEN_FMODE(flags)); 1155 if (ret < 0) 1156 return ret; 1157 1158 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 1159 flags | O_CLOEXEC); 1160 } 1161 1162 int bpf_get_file_flag(int flags) 1163 { 1164 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 1165 return -EINVAL; 1166 if (flags & BPF_F_RDONLY) 1167 return O_RDONLY; 1168 if (flags & BPF_F_WRONLY) 1169 return O_WRONLY; 1170 return O_RDWR; 1171 } 1172 1173 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 1174 #define CHECK_ATTR(CMD) \ 1175 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 1176 sizeof(attr->CMD##_LAST_FIELD), 0, \ 1177 sizeof(*attr) - \ 1178 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 1179 sizeof(attr->CMD##_LAST_FIELD)) != NULL 1180 1181 /* dst and src must have at least "size" number of bytes. 1182 * Return strlen on success and < 0 on error. 1183 */ 1184 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 1185 { 1186 const char *end = src + size; 1187 const char *orig_src = src; 1188 1189 memset(dst, 0, size); 1190 /* Copy all isalnum(), '_' and '.' chars. */ 1191 while (src < end && *src) { 1192 if (!isalnum(*src) && 1193 *src != '_' && *src != '.') 1194 return -EINVAL; 1195 *dst++ = *src++; 1196 } 1197 1198 /* No '\0' found in "size" number of bytes */ 1199 if (src == end) 1200 return -EINVAL; 1201 1202 return src - orig_src; 1203 } 1204 1205 int map_check_no_btf(const struct bpf_map *map, 1206 const struct btf *btf, 1207 const struct btf_type *key_type, 1208 const struct btf_type *value_type) 1209 { 1210 return -ENOTSUPP; 1211 } 1212 1213 static int map_check_btf(struct bpf_map *map, struct bpf_token *token, 1214 const struct btf *btf, u32 btf_key_id, u32 btf_value_id) 1215 { 1216 const struct btf_type *key_type, *value_type; 1217 u32 key_size, value_size; 1218 int ret = 0; 1219 1220 /* Some maps allow key to be unspecified. */ 1221 if (btf_key_id) { 1222 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 1223 if (!key_type || key_size != map->key_size) 1224 return -EINVAL; 1225 } else { 1226 key_type = btf_type_by_id(btf, 0); 1227 if (!map->ops->map_check_btf) 1228 return -EINVAL; 1229 } 1230 1231 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 1232 if (!value_type || value_size != map->value_size) 1233 return -EINVAL; 1234 1235 map->record = btf_parse_fields(btf, value_type, 1236 BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD | 1237 BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE | BPF_UPTR, 1238 map->value_size); 1239 if (!IS_ERR_OR_NULL(map->record)) { 1240 int i; 1241 1242 if (!bpf_token_capable(token, CAP_BPF)) { 1243 ret = -EPERM; 1244 goto free_map_tab; 1245 } 1246 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { 1247 ret = -EACCES; 1248 goto free_map_tab; 1249 } 1250 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) { 1251 switch (map->record->field_mask & (1 << i)) { 1252 case 0: 1253 continue; 1254 case BPF_SPIN_LOCK: 1255 case BPF_RES_SPIN_LOCK: 1256 if (map->map_type != BPF_MAP_TYPE_HASH && 1257 map->map_type != BPF_MAP_TYPE_ARRAY && 1258 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 1259 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1260 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1261 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1262 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1263 ret = -EOPNOTSUPP; 1264 goto free_map_tab; 1265 } 1266 break; 1267 case BPF_TIMER: 1268 case BPF_WORKQUEUE: 1269 if (map->map_type != BPF_MAP_TYPE_HASH && 1270 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1271 map->map_type != BPF_MAP_TYPE_ARRAY) { 1272 ret = -EOPNOTSUPP; 1273 goto free_map_tab; 1274 } 1275 break; 1276 case BPF_KPTR_UNREF: 1277 case BPF_KPTR_REF: 1278 case BPF_KPTR_PERCPU: 1279 case BPF_REFCOUNT: 1280 if (map->map_type != BPF_MAP_TYPE_HASH && 1281 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 1282 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1283 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH && 1284 map->map_type != BPF_MAP_TYPE_ARRAY && 1285 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && 1286 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1287 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1288 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1289 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1290 ret = -EOPNOTSUPP; 1291 goto free_map_tab; 1292 } 1293 break; 1294 case BPF_UPTR: 1295 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) { 1296 ret = -EOPNOTSUPP; 1297 goto free_map_tab; 1298 } 1299 break; 1300 case BPF_LIST_HEAD: 1301 case BPF_RB_ROOT: 1302 if (map->map_type != BPF_MAP_TYPE_HASH && 1303 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1304 map->map_type != BPF_MAP_TYPE_ARRAY) { 1305 ret = -EOPNOTSUPP; 1306 goto free_map_tab; 1307 } 1308 break; 1309 default: 1310 /* Fail if map_type checks are missing for a field type */ 1311 ret = -EOPNOTSUPP; 1312 goto free_map_tab; 1313 } 1314 } 1315 } 1316 1317 ret = btf_check_and_fixup_fields(btf, map->record); 1318 if (ret < 0) 1319 goto free_map_tab; 1320 1321 if (map->ops->map_check_btf) { 1322 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 1323 if (ret < 0) 1324 goto free_map_tab; 1325 } 1326 1327 return ret; 1328 free_map_tab: 1329 bpf_map_free_record(map); 1330 return ret; 1331 } 1332 1333 static bool bpf_net_capable(void) 1334 { 1335 return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN); 1336 } 1337 1338 #define BPF_MAP_CREATE_LAST_FIELD map_token_fd 1339 /* called via syscall */ 1340 static int map_create(union bpf_attr *attr, bool kernel) 1341 { 1342 const struct bpf_map_ops *ops; 1343 struct bpf_token *token = NULL; 1344 int numa_node = bpf_map_attr_numa_node(attr); 1345 u32 map_type = attr->map_type; 1346 struct bpf_map *map; 1347 bool token_flag; 1348 int f_flags; 1349 int err; 1350 1351 err = CHECK_ATTR(BPF_MAP_CREATE); 1352 if (err) 1353 return -EINVAL; 1354 1355 /* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it 1356 * to avoid per-map type checks tripping on unknown flag 1357 */ 1358 token_flag = attr->map_flags & BPF_F_TOKEN_FD; 1359 attr->map_flags &= ~BPF_F_TOKEN_FD; 1360 1361 if (attr->btf_vmlinux_value_type_id) { 1362 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 1363 attr->btf_key_type_id || attr->btf_value_type_id) 1364 return -EINVAL; 1365 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 1366 return -EINVAL; 1367 } 1368 1369 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && 1370 attr->map_type != BPF_MAP_TYPE_ARENA && 1371 attr->map_extra != 0) 1372 return -EINVAL; 1373 1374 f_flags = bpf_get_file_flag(attr->map_flags); 1375 if (f_flags < 0) 1376 return f_flags; 1377 1378 if (numa_node != NUMA_NO_NODE && 1379 ((unsigned int)numa_node >= nr_node_ids || 1380 !node_online(numa_node))) 1381 return -EINVAL; 1382 1383 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 1384 map_type = attr->map_type; 1385 if (map_type >= ARRAY_SIZE(bpf_map_types)) 1386 return -EINVAL; 1387 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types)); 1388 ops = bpf_map_types[map_type]; 1389 if (!ops) 1390 return -EINVAL; 1391 1392 if (ops->map_alloc_check) { 1393 err = ops->map_alloc_check(attr); 1394 if (err) 1395 return err; 1396 } 1397 if (attr->map_ifindex) 1398 ops = &bpf_map_offload_ops; 1399 if (!ops->map_mem_usage) 1400 return -EINVAL; 1401 1402 if (token_flag) { 1403 token = bpf_token_get_from_fd(attr->map_token_fd); 1404 if (IS_ERR(token)) 1405 return PTR_ERR(token); 1406 1407 /* if current token doesn't grant map creation permissions, 1408 * then we can't use this token, so ignore it and rely on 1409 * system-wide capabilities checks 1410 */ 1411 if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) || 1412 !bpf_token_allow_map_type(token, attr->map_type)) { 1413 bpf_token_put(token); 1414 token = NULL; 1415 } 1416 } 1417 1418 err = -EPERM; 1419 1420 /* Intent here is for unprivileged_bpf_disabled to block BPF map 1421 * creation for unprivileged users; other actions depend 1422 * on fd availability and access to bpffs, so are dependent on 1423 * object creation success. Even with unprivileged BPF disabled, 1424 * capability checks are still carried out. 1425 */ 1426 if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF)) 1427 goto put_token; 1428 1429 /* check privileged map type permissions */ 1430 switch (map_type) { 1431 case BPF_MAP_TYPE_ARRAY: 1432 case BPF_MAP_TYPE_PERCPU_ARRAY: 1433 case BPF_MAP_TYPE_PROG_ARRAY: 1434 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1435 case BPF_MAP_TYPE_CGROUP_ARRAY: 1436 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 1437 case BPF_MAP_TYPE_HASH: 1438 case BPF_MAP_TYPE_PERCPU_HASH: 1439 case BPF_MAP_TYPE_HASH_OF_MAPS: 1440 case BPF_MAP_TYPE_RINGBUF: 1441 case BPF_MAP_TYPE_USER_RINGBUF: 1442 case BPF_MAP_TYPE_CGROUP_STORAGE: 1443 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 1444 /* unprivileged */ 1445 break; 1446 case BPF_MAP_TYPE_SK_STORAGE: 1447 case BPF_MAP_TYPE_INODE_STORAGE: 1448 case BPF_MAP_TYPE_TASK_STORAGE: 1449 case BPF_MAP_TYPE_CGRP_STORAGE: 1450 case BPF_MAP_TYPE_BLOOM_FILTER: 1451 case BPF_MAP_TYPE_LPM_TRIE: 1452 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 1453 case BPF_MAP_TYPE_STACK_TRACE: 1454 case BPF_MAP_TYPE_QUEUE: 1455 case BPF_MAP_TYPE_STACK: 1456 case BPF_MAP_TYPE_LRU_HASH: 1457 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 1458 case BPF_MAP_TYPE_STRUCT_OPS: 1459 case BPF_MAP_TYPE_CPUMAP: 1460 case BPF_MAP_TYPE_ARENA: 1461 if (!bpf_token_capable(token, CAP_BPF)) 1462 goto put_token; 1463 break; 1464 case BPF_MAP_TYPE_SOCKMAP: 1465 case BPF_MAP_TYPE_SOCKHASH: 1466 case BPF_MAP_TYPE_DEVMAP: 1467 case BPF_MAP_TYPE_DEVMAP_HASH: 1468 case BPF_MAP_TYPE_XSKMAP: 1469 if (!bpf_token_capable(token, CAP_NET_ADMIN)) 1470 goto put_token; 1471 break; 1472 default: 1473 WARN(1, "unsupported map type %d", map_type); 1474 goto put_token; 1475 } 1476 1477 map = ops->map_alloc(attr); 1478 if (IS_ERR(map)) { 1479 err = PTR_ERR(map); 1480 goto put_token; 1481 } 1482 map->ops = ops; 1483 map->map_type = map_type; 1484 1485 err = bpf_obj_name_cpy(map->name, attr->map_name, 1486 sizeof(attr->map_name)); 1487 if (err < 0) 1488 goto free_map; 1489 1490 atomic64_set(&map->refcnt, 1); 1491 atomic64_set(&map->usercnt, 1); 1492 mutex_init(&map->freeze_mutex); 1493 spin_lock_init(&map->owner.lock); 1494 1495 if (attr->btf_key_type_id || attr->btf_value_type_id || 1496 /* Even the map's value is a kernel's struct, 1497 * the bpf_prog.o must have BTF to begin with 1498 * to figure out the corresponding kernel's 1499 * counter part. Thus, attr->btf_fd has 1500 * to be valid also. 1501 */ 1502 attr->btf_vmlinux_value_type_id) { 1503 struct btf *btf; 1504 1505 btf = btf_get_by_fd(attr->btf_fd); 1506 if (IS_ERR(btf)) { 1507 err = PTR_ERR(btf); 1508 goto free_map; 1509 } 1510 if (btf_is_kernel(btf)) { 1511 btf_put(btf); 1512 err = -EACCES; 1513 goto free_map; 1514 } 1515 map->btf = btf; 1516 1517 if (attr->btf_value_type_id) { 1518 err = map_check_btf(map, token, btf, attr->btf_key_type_id, 1519 attr->btf_value_type_id); 1520 if (err) 1521 goto free_map; 1522 } 1523 1524 map->btf_key_type_id = attr->btf_key_type_id; 1525 map->btf_value_type_id = attr->btf_value_type_id; 1526 map->btf_vmlinux_value_type_id = 1527 attr->btf_vmlinux_value_type_id; 1528 } 1529 1530 err = security_bpf_map_create(map, attr, token, kernel); 1531 if (err) 1532 goto free_map_sec; 1533 1534 err = bpf_map_alloc_id(map); 1535 if (err) 1536 goto free_map_sec; 1537 1538 bpf_map_save_memcg(map); 1539 bpf_token_put(token); 1540 1541 err = bpf_map_new_fd(map, f_flags); 1542 if (err < 0) { 1543 /* failed to allocate fd. 1544 * bpf_map_put_with_uref() is needed because the above 1545 * bpf_map_alloc_id() has published the map 1546 * to the userspace and the userspace may 1547 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 1548 */ 1549 bpf_map_put_with_uref(map); 1550 return err; 1551 } 1552 1553 return err; 1554 1555 free_map_sec: 1556 security_bpf_map_free(map); 1557 free_map: 1558 bpf_map_free(map); 1559 put_token: 1560 bpf_token_put(token); 1561 return err; 1562 } 1563 1564 void bpf_map_inc(struct bpf_map *map) 1565 { 1566 atomic64_inc(&map->refcnt); 1567 } 1568 EXPORT_SYMBOL_GPL(bpf_map_inc); 1569 1570 void bpf_map_inc_with_uref(struct bpf_map *map) 1571 { 1572 atomic64_inc(&map->refcnt); 1573 atomic64_inc(&map->usercnt); 1574 } 1575 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 1576 1577 struct bpf_map *bpf_map_get(u32 ufd) 1578 { 1579 CLASS(fd, f)(ufd); 1580 struct bpf_map *map = __bpf_map_get(f); 1581 1582 if (!IS_ERR(map)) 1583 bpf_map_inc(map); 1584 1585 return map; 1586 } 1587 EXPORT_SYMBOL_NS(bpf_map_get, "BPF_INTERNAL"); 1588 1589 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 1590 { 1591 CLASS(fd, f)(ufd); 1592 struct bpf_map *map = __bpf_map_get(f); 1593 1594 if (!IS_ERR(map)) 1595 bpf_map_inc_with_uref(map); 1596 1597 return map; 1598 } 1599 1600 /* map_idr_lock should have been held or the map should have been 1601 * protected by rcu read lock. 1602 */ 1603 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 1604 { 1605 int refold; 1606 1607 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 1608 if (!refold) 1609 return ERR_PTR(-ENOENT); 1610 if (uref) 1611 atomic64_inc(&map->usercnt); 1612 1613 return map; 1614 } 1615 1616 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 1617 { 1618 lockdep_assert(rcu_read_lock_held()); 1619 return __bpf_map_inc_not_zero(map, false); 1620 } 1621 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 1622 1623 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 1624 { 1625 return -ENOTSUPP; 1626 } 1627 1628 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 1629 { 1630 if (key_size) 1631 return vmemdup_user(ukey, key_size); 1632 1633 if (ukey) 1634 return ERR_PTR(-EINVAL); 1635 1636 return NULL; 1637 } 1638 1639 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size) 1640 { 1641 if (key_size) 1642 return kvmemdup_bpfptr(ukey, key_size); 1643 1644 if (!bpfptr_is_null(ukey)) 1645 return ERR_PTR(-EINVAL); 1646 1647 return NULL; 1648 } 1649 1650 /* last field in 'union bpf_attr' used by this command */ 1651 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 1652 1653 static int map_lookup_elem(union bpf_attr *attr) 1654 { 1655 void __user *ukey = u64_to_user_ptr(attr->key); 1656 void __user *uvalue = u64_to_user_ptr(attr->value); 1657 struct bpf_map *map; 1658 void *key, *value; 1659 u32 value_size; 1660 int err; 1661 1662 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 1663 return -EINVAL; 1664 1665 if (attr->flags & ~BPF_F_LOCK) 1666 return -EINVAL; 1667 1668 CLASS(fd, f)(attr->map_fd); 1669 map = __bpf_map_get(f); 1670 if (IS_ERR(map)) 1671 return PTR_ERR(map); 1672 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) 1673 return -EPERM; 1674 1675 if ((attr->flags & BPF_F_LOCK) && 1676 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) 1677 return -EINVAL; 1678 1679 key = __bpf_copy_key(ukey, map->key_size); 1680 if (IS_ERR(key)) 1681 return PTR_ERR(key); 1682 1683 value_size = bpf_map_value_size(map); 1684 1685 err = -ENOMEM; 1686 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1687 if (!value) 1688 goto free_key; 1689 1690 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 1691 if (copy_from_user(value, uvalue, value_size)) 1692 err = -EFAULT; 1693 else 1694 err = bpf_map_copy_value(map, key, value, attr->flags); 1695 goto free_value; 1696 } 1697 1698 err = bpf_map_copy_value(map, key, value, attr->flags); 1699 if (err) 1700 goto free_value; 1701 1702 err = -EFAULT; 1703 if (copy_to_user(uvalue, value, value_size) != 0) 1704 goto free_value; 1705 1706 err = 0; 1707 1708 free_value: 1709 kvfree(value); 1710 free_key: 1711 kvfree(key); 1712 return err; 1713 } 1714 1715 1716 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1717 1718 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) 1719 { 1720 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1721 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); 1722 struct bpf_map *map; 1723 void *key, *value; 1724 u32 value_size; 1725 int err; 1726 1727 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1728 return -EINVAL; 1729 1730 CLASS(fd, f)(attr->map_fd); 1731 map = __bpf_map_get(f); 1732 if (IS_ERR(map)) 1733 return PTR_ERR(map); 1734 bpf_map_write_active_inc(map); 1735 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1736 err = -EPERM; 1737 goto err_put; 1738 } 1739 1740 if ((attr->flags & BPF_F_LOCK) && 1741 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1742 err = -EINVAL; 1743 goto err_put; 1744 } 1745 1746 key = ___bpf_copy_key(ukey, map->key_size); 1747 if (IS_ERR(key)) { 1748 err = PTR_ERR(key); 1749 goto err_put; 1750 } 1751 1752 value_size = bpf_map_value_size(map); 1753 value = kvmemdup_bpfptr(uvalue, value_size); 1754 if (IS_ERR(value)) { 1755 err = PTR_ERR(value); 1756 goto free_key; 1757 } 1758 1759 err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags); 1760 if (!err) 1761 maybe_wait_bpf_programs(map); 1762 1763 kvfree(value); 1764 free_key: 1765 kvfree(key); 1766 err_put: 1767 bpf_map_write_active_dec(map); 1768 return err; 1769 } 1770 1771 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1772 1773 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr) 1774 { 1775 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1776 struct bpf_map *map; 1777 void *key; 1778 int err; 1779 1780 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1781 return -EINVAL; 1782 1783 CLASS(fd, f)(attr->map_fd); 1784 map = __bpf_map_get(f); 1785 if (IS_ERR(map)) 1786 return PTR_ERR(map); 1787 bpf_map_write_active_inc(map); 1788 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1789 err = -EPERM; 1790 goto err_put; 1791 } 1792 1793 key = ___bpf_copy_key(ukey, map->key_size); 1794 if (IS_ERR(key)) { 1795 err = PTR_ERR(key); 1796 goto err_put; 1797 } 1798 1799 if (bpf_map_is_offloaded(map)) { 1800 err = bpf_map_offload_delete_elem(map, key); 1801 goto out; 1802 } else if (IS_FD_PROG_ARRAY(map) || 1803 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1804 /* These maps require sleepable context */ 1805 err = map->ops->map_delete_elem(map, key); 1806 goto out; 1807 } 1808 1809 bpf_disable_instrumentation(); 1810 rcu_read_lock(); 1811 err = map->ops->map_delete_elem(map, key); 1812 rcu_read_unlock(); 1813 bpf_enable_instrumentation(); 1814 if (!err) 1815 maybe_wait_bpf_programs(map); 1816 out: 1817 kvfree(key); 1818 err_put: 1819 bpf_map_write_active_dec(map); 1820 return err; 1821 } 1822 1823 /* last field in 'union bpf_attr' used by this command */ 1824 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1825 1826 static int map_get_next_key(union bpf_attr *attr) 1827 { 1828 void __user *ukey = u64_to_user_ptr(attr->key); 1829 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1830 struct bpf_map *map; 1831 void *key, *next_key; 1832 int err; 1833 1834 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1835 return -EINVAL; 1836 1837 CLASS(fd, f)(attr->map_fd); 1838 map = __bpf_map_get(f); 1839 if (IS_ERR(map)) 1840 return PTR_ERR(map); 1841 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) 1842 return -EPERM; 1843 1844 if (ukey) { 1845 key = __bpf_copy_key(ukey, map->key_size); 1846 if (IS_ERR(key)) 1847 return PTR_ERR(key); 1848 } else { 1849 key = NULL; 1850 } 1851 1852 err = -ENOMEM; 1853 next_key = kvmalloc(map->key_size, GFP_USER); 1854 if (!next_key) 1855 goto free_key; 1856 1857 if (bpf_map_is_offloaded(map)) { 1858 err = bpf_map_offload_get_next_key(map, key, next_key); 1859 goto out; 1860 } 1861 1862 rcu_read_lock(); 1863 err = map->ops->map_get_next_key(map, key, next_key); 1864 rcu_read_unlock(); 1865 out: 1866 if (err) 1867 goto free_next_key; 1868 1869 err = -EFAULT; 1870 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1871 goto free_next_key; 1872 1873 err = 0; 1874 1875 free_next_key: 1876 kvfree(next_key); 1877 free_key: 1878 kvfree(key); 1879 return err; 1880 } 1881 1882 int generic_map_delete_batch(struct bpf_map *map, 1883 const union bpf_attr *attr, 1884 union bpf_attr __user *uattr) 1885 { 1886 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1887 u32 cp, max_count; 1888 int err = 0; 1889 void *key; 1890 1891 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1892 return -EINVAL; 1893 1894 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1895 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1896 return -EINVAL; 1897 } 1898 1899 max_count = attr->batch.count; 1900 if (!max_count) 1901 return 0; 1902 1903 if (put_user(0, &uattr->batch.count)) 1904 return -EFAULT; 1905 1906 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1907 if (!key) 1908 return -ENOMEM; 1909 1910 for (cp = 0; cp < max_count; cp++) { 1911 err = -EFAULT; 1912 if (copy_from_user(key, keys + cp * map->key_size, 1913 map->key_size)) 1914 break; 1915 1916 if (bpf_map_is_offloaded(map)) { 1917 err = bpf_map_offload_delete_elem(map, key); 1918 break; 1919 } 1920 1921 bpf_disable_instrumentation(); 1922 rcu_read_lock(); 1923 err = map->ops->map_delete_elem(map, key); 1924 rcu_read_unlock(); 1925 bpf_enable_instrumentation(); 1926 if (err) 1927 break; 1928 cond_resched(); 1929 } 1930 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1931 err = -EFAULT; 1932 1933 kvfree(key); 1934 1935 return err; 1936 } 1937 1938 int generic_map_update_batch(struct bpf_map *map, struct file *map_file, 1939 const union bpf_attr *attr, 1940 union bpf_attr __user *uattr) 1941 { 1942 void __user *values = u64_to_user_ptr(attr->batch.values); 1943 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1944 u32 value_size, cp, max_count; 1945 void *key, *value; 1946 int err = 0; 1947 1948 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1949 return -EINVAL; 1950 1951 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1952 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1953 return -EINVAL; 1954 } 1955 1956 value_size = bpf_map_value_size(map); 1957 1958 max_count = attr->batch.count; 1959 if (!max_count) 1960 return 0; 1961 1962 if (put_user(0, &uattr->batch.count)) 1963 return -EFAULT; 1964 1965 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1966 if (!key) 1967 return -ENOMEM; 1968 1969 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1970 if (!value) { 1971 kvfree(key); 1972 return -ENOMEM; 1973 } 1974 1975 for (cp = 0; cp < max_count; cp++) { 1976 err = -EFAULT; 1977 if (copy_from_user(key, keys + cp * map->key_size, 1978 map->key_size) || 1979 copy_from_user(value, values + cp * value_size, value_size)) 1980 break; 1981 1982 err = bpf_map_update_value(map, map_file, key, value, 1983 attr->batch.elem_flags); 1984 1985 if (err) 1986 break; 1987 cond_resched(); 1988 } 1989 1990 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1991 err = -EFAULT; 1992 1993 kvfree(value); 1994 kvfree(key); 1995 1996 return err; 1997 } 1998 1999 int generic_map_lookup_batch(struct bpf_map *map, 2000 const union bpf_attr *attr, 2001 union bpf_attr __user *uattr) 2002 { 2003 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 2004 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 2005 void __user *values = u64_to_user_ptr(attr->batch.values); 2006 void __user *keys = u64_to_user_ptr(attr->batch.keys); 2007 void *buf, *buf_prevkey, *prev_key, *key, *value; 2008 u32 value_size, cp, max_count; 2009 int err; 2010 2011 if (attr->batch.elem_flags & ~BPF_F_LOCK) 2012 return -EINVAL; 2013 2014 if ((attr->batch.elem_flags & BPF_F_LOCK) && 2015 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) 2016 return -EINVAL; 2017 2018 value_size = bpf_map_value_size(map); 2019 2020 max_count = attr->batch.count; 2021 if (!max_count) 2022 return 0; 2023 2024 if (put_user(0, &uattr->batch.count)) 2025 return -EFAULT; 2026 2027 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 2028 if (!buf_prevkey) 2029 return -ENOMEM; 2030 2031 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 2032 if (!buf) { 2033 kvfree(buf_prevkey); 2034 return -ENOMEM; 2035 } 2036 2037 err = -EFAULT; 2038 prev_key = NULL; 2039 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 2040 goto free_buf; 2041 key = buf; 2042 value = key + map->key_size; 2043 if (ubatch) 2044 prev_key = buf_prevkey; 2045 2046 for (cp = 0; cp < max_count;) { 2047 rcu_read_lock(); 2048 err = map->ops->map_get_next_key(map, prev_key, key); 2049 rcu_read_unlock(); 2050 if (err) 2051 break; 2052 err = bpf_map_copy_value(map, key, value, 2053 attr->batch.elem_flags); 2054 2055 if (err == -ENOENT) 2056 goto next_key; 2057 2058 if (err) 2059 goto free_buf; 2060 2061 if (copy_to_user(keys + cp * map->key_size, key, 2062 map->key_size)) { 2063 err = -EFAULT; 2064 goto free_buf; 2065 } 2066 if (copy_to_user(values + cp * value_size, value, value_size)) { 2067 err = -EFAULT; 2068 goto free_buf; 2069 } 2070 2071 cp++; 2072 next_key: 2073 if (!prev_key) 2074 prev_key = buf_prevkey; 2075 2076 swap(prev_key, key); 2077 cond_resched(); 2078 } 2079 2080 if (err == -EFAULT) 2081 goto free_buf; 2082 2083 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 2084 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 2085 err = -EFAULT; 2086 2087 free_buf: 2088 kvfree(buf_prevkey); 2089 kvfree(buf); 2090 return err; 2091 } 2092 2093 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags 2094 2095 static int map_lookup_and_delete_elem(union bpf_attr *attr) 2096 { 2097 void __user *ukey = u64_to_user_ptr(attr->key); 2098 void __user *uvalue = u64_to_user_ptr(attr->value); 2099 struct bpf_map *map; 2100 void *key, *value; 2101 u32 value_size; 2102 int err; 2103 2104 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 2105 return -EINVAL; 2106 2107 if (attr->flags & ~BPF_F_LOCK) 2108 return -EINVAL; 2109 2110 CLASS(fd, f)(attr->map_fd); 2111 map = __bpf_map_get(f); 2112 if (IS_ERR(map)) 2113 return PTR_ERR(map); 2114 bpf_map_write_active_inc(map); 2115 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || 2116 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 2117 err = -EPERM; 2118 goto err_put; 2119 } 2120 2121 if (attr->flags && 2122 (map->map_type == BPF_MAP_TYPE_QUEUE || 2123 map->map_type == BPF_MAP_TYPE_STACK)) { 2124 err = -EINVAL; 2125 goto err_put; 2126 } 2127 2128 if ((attr->flags & BPF_F_LOCK) && 2129 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 2130 err = -EINVAL; 2131 goto err_put; 2132 } 2133 2134 key = __bpf_copy_key(ukey, map->key_size); 2135 if (IS_ERR(key)) { 2136 err = PTR_ERR(key); 2137 goto err_put; 2138 } 2139 2140 value_size = bpf_map_value_size(map); 2141 2142 err = -ENOMEM; 2143 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 2144 if (!value) 2145 goto free_key; 2146 2147 err = -ENOTSUPP; 2148 if (map->map_type == BPF_MAP_TYPE_QUEUE || 2149 map->map_type == BPF_MAP_TYPE_STACK) { 2150 err = map->ops->map_pop_elem(map, value); 2151 } else if (map->map_type == BPF_MAP_TYPE_HASH || 2152 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 2153 map->map_type == BPF_MAP_TYPE_LRU_HASH || 2154 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 2155 if (!bpf_map_is_offloaded(map)) { 2156 bpf_disable_instrumentation(); 2157 rcu_read_lock(); 2158 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); 2159 rcu_read_unlock(); 2160 bpf_enable_instrumentation(); 2161 } 2162 } 2163 2164 if (err) 2165 goto free_value; 2166 2167 if (copy_to_user(uvalue, value, value_size) != 0) { 2168 err = -EFAULT; 2169 goto free_value; 2170 } 2171 2172 err = 0; 2173 2174 free_value: 2175 kvfree(value); 2176 free_key: 2177 kvfree(key); 2178 err_put: 2179 bpf_map_write_active_dec(map); 2180 return err; 2181 } 2182 2183 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 2184 2185 static int map_freeze(const union bpf_attr *attr) 2186 { 2187 int err = 0; 2188 struct bpf_map *map; 2189 2190 if (CHECK_ATTR(BPF_MAP_FREEZE)) 2191 return -EINVAL; 2192 2193 CLASS(fd, f)(attr->map_fd); 2194 map = __bpf_map_get(f); 2195 if (IS_ERR(map)) 2196 return PTR_ERR(map); 2197 2198 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) 2199 return -ENOTSUPP; 2200 2201 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) 2202 return -EPERM; 2203 2204 mutex_lock(&map->freeze_mutex); 2205 if (bpf_map_write_active(map)) { 2206 err = -EBUSY; 2207 goto err_put; 2208 } 2209 if (READ_ONCE(map->frozen)) { 2210 err = -EBUSY; 2211 goto err_put; 2212 } 2213 2214 WRITE_ONCE(map->frozen, true); 2215 err_put: 2216 mutex_unlock(&map->freeze_mutex); 2217 return err; 2218 } 2219 2220 static const struct bpf_prog_ops * const bpf_prog_types[] = { 2221 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 2222 [_id] = & _name ## _prog_ops, 2223 #define BPF_MAP_TYPE(_id, _ops) 2224 #define BPF_LINK_TYPE(_id, _name) 2225 #include <linux/bpf_types.h> 2226 #undef BPF_PROG_TYPE 2227 #undef BPF_MAP_TYPE 2228 #undef BPF_LINK_TYPE 2229 }; 2230 2231 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 2232 { 2233 const struct bpf_prog_ops *ops; 2234 2235 if (type >= ARRAY_SIZE(bpf_prog_types)) 2236 return -EINVAL; 2237 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 2238 ops = bpf_prog_types[type]; 2239 if (!ops) 2240 return -EINVAL; 2241 2242 if (!bpf_prog_is_offloaded(prog->aux)) 2243 prog->aux->ops = ops; 2244 else 2245 prog->aux->ops = &bpf_offload_prog_ops; 2246 prog->type = type; 2247 return 0; 2248 } 2249 2250 enum bpf_audit { 2251 BPF_AUDIT_LOAD, 2252 BPF_AUDIT_UNLOAD, 2253 BPF_AUDIT_MAX, 2254 }; 2255 2256 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 2257 [BPF_AUDIT_LOAD] = "LOAD", 2258 [BPF_AUDIT_UNLOAD] = "UNLOAD", 2259 }; 2260 2261 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 2262 { 2263 struct audit_context *ctx = NULL; 2264 struct audit_buffer *ab; 2265 2266 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 2267 return; 2268 if (audit_enabled == AUDIT_OFF) 2269 return; 2270 if (!in_irq() && !irqs_disabled()) 2271 ctx = audit_context(); 2272 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 2273 if (unlikely(!ab)) 2274 return; 2275 audit_log_format(ab, "prog-id=%u op=%s", 2276 prog->aux->id, bpf_audit_str[op]); 2277 audit_log_end(ab); 2278 } 2279 2280 static int bpf_prog_alloc_id(struct bpf_prog *prog) 2281 { 2282 int id; 2283 2284 idr_preload(GFP_KERNEL); 2285 spin_lock_bh(&prog_idr_lock); 2286 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 2287 if (id > 0) 2288 prog->aux->id = id; 2289 spin_unlock_bh(&prog_idr_lock); 2290 idr_preload_end(); 2291 2292 /* id is in [1, INT_MAX) */ 2293 if (WARN_ON_ONCE(!id)) 2294 return -ENOSPC; 2295 2296 return id > 0 ? 0 : id; 2297 } 2298 2299 void bpf_prog_free_id(struct bpf_prog *prog) 2300 { 2301 unsigned long flags; 2302 2303 /* cBPF to eBPF migrations are currently not in the idr store. 2304 * Offloaded programs are removed from the store when their device 2305 * disappears - even if someone grabs an fd to them they are unusable, 2306 * simply waiting for refcnt to drop to be freed. 2307 */ 2308 if (!prog->aux->id) 2309 return; 2310 2311 spin_lock_irqsave(&prog_idr_lock, flags); 2312 idr_remove(&prog_idr, prog->aux->id); 2313 prog->aux->id = 0; 2314 spin_unlock_irqrestore(&prog_idr_lock, flags); 2315 } 2316 2317 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 2318 { 2319 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 2320 2321 kvfree(aux->func_info); 2322 kfree(aux->func_info_aux); 2323 free_uid(aux->user); 2324 security_bpf_prog_free(aux->prog); 2325 bpf_prog_free(aux->prog); 2326 } 2327 2328 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 2329 { 2330 bpf_prog_kallsyms_del_all(prog); 2331 btf_put(prog->aux->btf); 2332 module_put(prog->aux->mod); 2333 kvfree(prog->aux->jited_linfo); 2334 kvfree(prog->aux->linfo); 2335 kfree(prog->aux->kfunc_tab); 2336 kfree(prog->aux->ctx_arg_info); 2337 if (prog->aux->attach_btf) 2338 btf_put(prog->aux->attach_btf); 2339 2340 if (deferred) { 2341 if (prog->sleepable) 2342 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); 2343 else 2344 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 2345 } else { 2346 __bpf_prog_put_rcu(&prog->aux->rcu); 2347 } 2348 } 2349 2350 static void bpf_prog_put_deferred(struct work_struct *work) 2351 { 2352 struct bpf_prog_aux *aux; 2353 struct bpf_prog *prog; 2354 2355 aux = container_of(work, struct bpf_prog_aux, work); 2356 prog = aux->prog; 2357 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 2358 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 2359 bpf_prog_free_id(prog); 2360 __bpf_prog_put_noref(prog, true); 2361 } 2362 2363 static void __bpf_prog_put(struct bpf_prog *prog) 2364 { 2365 struct bpf_prog_aux *aux = prog->aux; 2366 2367 if (atomic64_dec_and_test(&aux->refcnt)) { 2368 if (in_irq() || irqs_disabled()) { 2369 INIT_WORK(&aux->work, bpf_prog_put_deferred); 2370 schedule_work(&aux->work); 2371 } else { 2372 bpf_prog_put_deferred(&aux->work); 2373 } 2374 } 2375 } 2376 2377 void bpf_prog_put(struct bpf_prog *prog) 2378 { 2379 __bpf_prog_put(prog); 2380 } 2381 EXPORT_SYMBOL_GPL(bpf_prog_put); 2382 2383 static int bpf_prog_release(struct inode *inode, struct file *filp) 2384 { 2385 struct bpf_prog *prog = filp->private_data; 2386 2387 bpf_prog_put(prog); 2388 return 0; 2389 } 2390 2391 struct bpf_prog_kstats { 2392 u64 nsecs; 2393 u64 cnt; 2394 u64 misses; 2395 }; 2396 2397 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog) 2398 { 2399 struct bpf_prog_stats *stats; 2400 unsigned int flags; 2401 2402 stats = this_cpu_ptr(prog->stats); 2403 flags = u64_stats_update_begin_irqsave(&stats->syncp); 2404 u64_stats_inc(&stats->misses); 2405 u64_stats_update_end_irqrestore(&stats->syncp, flags); 2406 } 2407 2408 static void bpf_prog_get_stats(const struct bpf_prog *prog, 2409 struct bpf_prog_kstats *stats) 2410 { 2411 u64 nsecs = 0, cnt = 0, misses = 0; 2412 int cpu; 2413 2414 for_each_possible_cpu(cpu) { 2415 const struct bpf_prog_stats *st; 2416 unsigned int start; 2417 u64 tnsecs, tcnt, tmisses; 2418 2419 st = per_cpu_ptr(prog->stats, cpu); 2420 do { 2421 start = u64_stats_fetch_begin(&st->syncp); 2422 tnsecs = u64_stats_read(&st->nsecs); 2423 tcnt = u64_stats_read(&st->cnt); 2424 tmisses = u64_stats_read(&st->misses); 2425 } while (u64_stats_fetch_retry(&st->syncp, start)); 2426 nsecs += tnsecs; 2427 cnt += tcnt; 2428 misses += tmisses; 2429 } 2430 stats->nsecs = nsecs; 2431 stats->cnt = cnt; 2432 stats->misses = misses; 2433 } 2434 2435 #ifdef CONFIG_PROC_FS 2436 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 2437 { 2438 const struct bpf_prog *prog = filp->private_data; 2439 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2440 struct bpf_prog_kstats stats; 2441 2442 bpf_prog_get_stats(prog, &stats); 2443 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2444 seq_printf(m, 2445 "prog_type:\t%u\n" 2446 "prog_jited:\t%u\n" 2447 "prog_tag:\t%s\n" 2448 "memlock:\t%llu\n" 2449 "prog_id:\t%u\n" 2450 "run_time_ns:\t%llu\n" 2451 "run_cnt:\t%llu\n" 2452 "recursion_misses:\t%llu\n" 2453 "verified_insns:\t%u\n", 2454 prog->type, 2455 prog->jited, 2456 prog_tag, 2457 prog->pages * 1ULL << PAGE_SHIFT, 2458 prog->aux->id, 2459 stats.nsecs, 2460 stats.cnt, 2461 stats.misses, 2462 prog->aux->verified_insns); 2463 } 2464 #endif 2465 2466 const struct file_operations bpf_prog_fops = { 2467 #ifdef CONFIG_PROC_FS 2468 .show_fdinfo = bpf_prog_show_fdinfo, 2469 #endif 2470 .release = bpf_prog_release, 2471 .read = bpf_dummy_read, 2472 .write = bpf_dummy_write, 2473 }; 2474 2475 int bpf_prog_new_fd(struct bpf_prog *prog) 2476 { 2477 int ret; 2478 2479 ret = security_bpf_prog(prog); 2480 if (ret < 0) 2481 return ret; 2482 2483 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 2484 O_RDWR | O_CLOEXEC); 2485 } 2486 2487 void bpf_prog_add(struct bpf_prog *prog, int i) 2488 { 2489 atomic64_add(i, &prog->aux->refcnt); 2490 } 2491 EXPORT_SYMBOL_GPL(bpf_prog_add); 2492 2493 void bpf_prog_sub(struct bpf_prog *prog, int i) 2494 { 2495 /* Only to be used for undoing previous bpf_prog_add() in some 2496 * error path. We still know that another entity in our call 2497 * path holds a reference to the program, thus atomic_sub() can 2498 * be safely used in such cases! 2499 */ 2500 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 2501 } 2502 EXPORT_SYMBOL_GPL(bpf_prog_sub); 2503 2504 void bpf_prog_inc(struct bpf_prog *prog) 2505 { 2506 atomic64_inc(&prog->aux->refcnt); 2507 } 2508 EXPORT_SYMBOL_GPL(bpf_prog_inc); 2509 2510 /* prog_idr_lock should have been held */ 2511 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 2512 { 2513 int refold; 2514 2515 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 2516 2517 if (!refold) 2518 return ERR_PTR(-ENOENT); 2519 2520 return prog; 2521 } 2522 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 2523 2524 bool bpf_prog_get_ok(struct bpf_prog *prog, 2525 enum bpf_prog_type *attach_type, bool attach_drv) 2526 { 2527 /* not an attachment, just a refcount inc, always allow */ 2528 if (!attach_type) 2529 return true; 2530 2531 if (prog->type != *attach_type) 2532 return false; 2533 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv) 2534 return false; 2535 2536 return true; 2537 } 2538 2539 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 2540 bool attach_drv) 2541 { 2542 CLASS(fd, f)(ufd); 2543 struct bpf_prog *prog; 2544 2545 if (fd_empty(f)) 2546 return ERR_PTR(-EBADF); 2547 if (fd_file(f)->f_op != &bpf_prog_fops) 2548 return ERR_PTR(-EINVAL); 2549 2550 prog = fd_file(f)->private_data; 2551 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) 2552 return ERR_PTR(-EINVAL); 2553 2554 bpf_prog_inc(prog); 2555 return prog; 2556 } 2557 2558 struct bpf_prog *bpf_prog_get(u32 ufd) 2559 { 2560 return __bpf_prog_get(ufd, NULL, false); 2561 } 2562 2563 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 2564 bool attach_drv) 2565 { 2566 return __bpf_prog_get(ufd, &type, attach_drv); 2567 } 2568 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 2569 2570 /* Initially all BPF programs could be loaded w/o specifying 2571 * expected_attach_type. Later for some of them specifying expected_attach_type 2572 * at load time became required so that program could be validated properly. 2573 * Programs of types that are allowed to be loaded both w/ and w/o (for 2574 * backward compatibility) expected_attach_type, should have the default attach 2575 * type assigned to expected_attach_type for the latter case, so that it can be 2576 * validated later at attach time. 2577 * 2578 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 2579 * prog type requires it but has some attach types that have to be backward 2580 * compatible. 2581 */ 2582 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 2583 { 2584 switch (attr->prog_type) { 2585 case BPF_PROG_TYPE_CGROUP_SOCK: 2586 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 2587 * exist so checking for non-zero is the way to go here. 2588 */ 2589 if (!attr->expected_attach_type) 2590 attr->expected_attach_type = 2591 BPF_CGROUP_INET_SOCK_CREATE; 2592 break; 2593 case BPF_PROG_TYPE_SK_REUSEPORT: 2594 if (!attr->expected_attach_type) 2595 attr->expected_attach_type = 2596 BPF_SK_REUSEPORT_SELECT; 2597 break; 2598 } 2599 } 2600 2601 static int 2602 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 2603 enum bpf_attach_type expected_attach_type, 2604 struct btf *attach_btf, u32 btf_id, 2605 struct bpf_prog *dst_prog) 2606 { 2607 if (btf_id) { 2608 if (btf_id > BTF_MAX_TYPE) 2609 return -EINVAL; 2610 2611 if (!attach_btf && !dst_prog) 2612 return -EINVAL; 2613 2614 switch (prog_type) { 2615 case BPF_PROG_TYPE_TRACING: 2616 case BPF_PROG_TYPE_LSM: 2617 case BPF_PROG_TYPE_STRUCT_OPS: 2618 case BPF_PROG_TYPE_EXT: 2619 break; 2620 default: 2621 return -EINVAL; 2622 } 2623 } 2624 2625 if (attach_btf && (!btf_id || dst_prog)) 2626 return -EINVAL; 2627 2628 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING && 2629 prog_type != BPF_PROG_TYPE_EXT) 2630 return -EINVAL; 2631 2632 switch (prog_type) { 2633 case BPF_PROG_TYPE_CGROUP_SOCK: 2634 switch (expected_attach_type) { 2635 case BPF_CGROUP_INET_SOCK_CREATE: 2636 case BPF_CGROUP_INET_SOCK_RELEASE: 2637 case BPF_CGROUP_INET4_POST_BIND: 2638 case BPF_CGROUP_INET6_POST_BIND: 2639 return 0; 2640 default: 2641 return -EINVAL; 2642 } 2643 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2644 switch (expected_attach_type) { 2645 case BPF_CGROUP_INET4_BIND: 2646 case BPF_CGROUP_INET6_BIND: 2647 case BPF_CGROUP_INET4_CONNECT: 2648 case BPF_CGROUP_INET6_CONNECT: 2649 case BPF_CGROUP_UNIX_CONNECT: 2650 case BPF_CGROUP_INET4_GETPEERNAME: 2651 case BPF_CGROUP_INET6_GETPEERNAME: 2652 case BPF_CGROUP_UNIX_GETPEERNAME: 2653 case BPF_CGROUP_INET4_GETSOCKNAME: 2654 case BPF_CGROUP_INET6_GETSOCKNAME: 2655 case BPF_CGROUP_UNIX_GETSOCKNAME: 2656 case BPF_CGROUP_UDP4_SENDMSG: 2657 case BPF_CGROUP_UDP6_SENDMSG: 2658 case BPF_CGROUP_UNIX_SENDMSG: 2659 case BPF_CGROUP_UDP4_RECVMSG: 2660 case BPF_CGROUP_UDP6_RECVMSG: 2661 case BPF_CGROUP_UNIX_RECVMSG: 2662 return 0; 2663 default: 2664 return -EINVAL; 2665 } 2666 case BPF_PROG_TYPE_CGROUP_SKB: 2667 switch (expected_attach_type) { 2668 case BPF_CGROUP_INET_INGRESS: 2669 case BPF_CGROUP_INET_EGRESS: 2670 return 0; 2671 default: 2672 return -EINVAL; 2673 } 2674 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2675 switch (expected_attach_type) { 2676 case BPF_CGROUP_SETSOCKOPT: 2677 case BPF_CGROUP_GETSOCKOPT: 2678 return 0; 2679 default: 2680 return -EINVAL; 2681 } 2682 case BPF_PROG_TYPE_SK_LOOKUP: 2683 if (expected_attach_type == BPF_SK_LOOKUP) 2684 return 0; 2685 return -EINVAL; 2686 case BPF_PROG_TYPE_SK_REUSEPORT: 2687 switch (expected_attach_type) { 2688 case BPF_SK_REUSEPORT_SELECT: 2689 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: 2690 return 0; 2691 default: 2692 return -EINVAL; 2693 } 2694 case BPF_PROG_TYPE_NETFILTER: 2695 if (expected_attach_type == BPF_NETFILTER) 2696 return 0; 2697 return -EINVAL; 2698 case BPF_PROG_TYPE_SYSCALL: 2699 case BPF_PROG_TYPE_EXT: 2700 if (expected_attach_type) 2701 return -EINVAL; 2702 fallthrough; 2703 default: 2704 return 0; 2705 } 2706 } 2707 2708 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) 2709 { 2710 switch (prog_type) { 2711 case BPF_PROG_TYPE_SCHED_CLS: 2712 case BPF_PROG_TYPE_SCHED_ACT: 2713 case BPF_PROG_TYPE_XDP: 2714 case BPF_PROG_TYPE_LWT_IN: 2715 case BPF_PROG_TYPE_LWT_OUT: 2716 case BPF_PROG_TYPE_LWT_XMIT: 2717 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2718 case BPF_PROG_TYPE_SK_SKB: 2719 case BPF_PROG_TYPE_SK_MSG: 2720 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2721 case BPF_PROG_TYPE_CGROUP_DEVICE: 2722 case BPF_PROG_TYPE_CGROUP_SOCK: 2723 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2724 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2725 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2726 case BPF_PROG_TYPE_SOCK_OPS: 2727 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2728 case BPF_PROG_TYPE_NETFILTER: 2729 return true; 2730 case BPF_PROG_TYPE_CGROUP_SKB: 2731 /* always unpriv */ 2732 case BPF_PROG_TYPE_SK_REUSEPORT: 2733 /* equivalent to SOCKET_FILTER. need CAP_BPF only */ 2734 default: 2735 return false; 2736 } 2737 } 2738 2739 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) 2740 { 2741 switch (prog_type) { 2742 case BPF_PROG_TYPE_KPROBE: 2743 case BPF_PROG_TYPE_TRACEPOINT: 2744 case BPF_PROG_TYPE_PERF_EVENT: 2745 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2746 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2747 case BPF_PROG_TYPE_TRACING: 2748 case BPF_PROG_TYPE_LSM: 2749 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ 2750 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2751 return true; 2752 default: 2753 return false; 2754 } 2755 } 2756 2757 /* last field in 'union bpf_attr' used by this command */ 2758 #define BPF_PROG_LOAD_LAST_FIELD fd_array_cnt 2759 2760 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) 2761 { 2762 enum bpf_prog_type type = attr->prog_type; 2763 struct bpf_prog *prog, *dst_prog = NULL; 2764 struct btf *attach_btf = NULL; 2765 struct bpf_token *token = NULL; 2766 bool bpf_cap; 2767 int err; 2768 char license[128]; 2769 2770 if (CHECK_ATTR(BPF_PROG_LOAD)) 2771 return -EINVAL; 2772 2773 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2774 BPF_F_ANY_ALIGNMENT | 2775 BPF_F_TEST_STATE_FREQ | 2776 BPF_F_SLEEPABLE | 2777 BPF_F_TEST_RND_HI32 | 2778 BPF_F_XDP_HAS_FRAGS | 2779 BPF_F_XDP_DEV_BOUND_ONLY | 2780 BPF_F_TEST_REG_INVARIANTS | 2781 BPF_F_TOKEN_FD)) 2782 return -EINVAL; 2783 2784 bpf_prog_load_fixup_attach_type(attr); 2785 2786 if (attr->prog_flags & BPF_F_TOKEN_FD) { 2787 token = bpf_token_get_from_fd(attr->prog_token_fd); 2788 if (IS_ERR(token)) 2789 return PTR_ERR(token); 2790 /* if current token doesn't grant prog loading permissions, 2791 * then we can't use this token, so ignore it and rely on 2792 * system-wide capabilities checks 2793 */ 2794 if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) || 2795 !bpf_token_allow_prog_type(token, attr->prog_type, 2796 attr->expected_attach_type)) { 2797 bpf_token_put(token); 2798 token = NULL; 2799 } 2800 } 2801 2802 bpf_cap = bpf_token_capable(token, CAP_BPF); 2803 err = -EPERM; 2804 2805 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2806 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2807 !bpf_cap) 2808 goto put_token; 2809 2810 /* Intent here is for unprivileged_bpf_disabled to block BPF program 2811 * creation for unprivileged users; other actions depend 2812 * on fd availability and access to bpffs, so are dependent on 2813 * object creation success. Even with unprivileged BPF disabled, 2814 * capability checks are still carried out for these 2815 * and other operations. 2816 */ 2817 if (sysctl_unprivileged_bpf_disabled && !bpf_cap) 2818 goto put_token; 2819 2820 if (attr->insn_cnt == 0 || 2821 attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) { 2822 err = -E2BIG; 2823 goto put_token; 2824 } 2825 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2826 type != BPF_PROG_TYPE_CGROUP_SKB && 2827 !bpf_cap) 2828 goto put_token; 2829 2830 if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN)) 2831 goto put_token; 2832 if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON)) 2833 goto put_token; 2834 2835 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog 2836 * or btf, we need to check which one it is 2837 */ 2838 if (attr->attach_prog_fd) { 2839 dst_prog = bpf_prog_get(attr->attach_prog_fd); 2840 if (IS_ERR(dst_prog)) { 2841 dst_prog = NULL; 2842 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); 2843 if (IS_ERR(attach_btf)) { 2844 err = -EINVAL; 2845 goto put_token; 2846 } 2847 if (!btf_is_kernel(attach_btf)) { 2848 /* attaching through specifying bpf_prog's BTF 2849 * objects directly might be supported eventually 2850 */ 2851 btf_put(attach_btf); 2852 err = -ENOTSUPP; 2853 goto put_token; 2854 } 2855 } 2856 } else if (attr->attach_btf_id) { 2857 /* fall back to vmlinux BTF, if BTF type ID is specified */ 2858 attach_btf = bpf_get_btf_vmlinux(); 2859 if (IS_ERR(attach_btf)) { 2860 err = PTR_ERR(attach_btf); 2861 goto put_token; 2862 } 2863 if (!attach_btf) { 2864 err = -EINVAL; 2865 goto put_token; 2866 } 2867 btf_get(attach_btf); 2868 } 2869 2870 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2871 attach_btf, attr->attach_btf_id, 2872 dst_prog)) { 2873 if (dst_prog) 2874 bpf_prog_put(dst_prog); 2875 if (attach_btf) 2876 btf_put(attach_btf); 2877 err = -EINVAL; 2878 goto put_token; 2879 } 2880 2881 /* plain bpf_prog allocation */ 2882 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2883 if (!prog) { 2884 if (dst_prog) 2885 bpf_prog_put(dst_prog); 2886 if (attach_btf) 2887 btf_put(attach_btf); 2888 err = -EINVAL; 2889 goto put_token; 2890 } 2891 2892 prog->expected_attach_type = attr->expected_attach_type; 2893 prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE); 2894 prog->aux->attach_btf = attach_btf; 2895 prog->aux->attach_btf_id = attr->attach_btf_id; 2896 prog->aux->dst_prog = dst_prog; 2897 prog->aux->dev_bound = !!attr->prog_ifindex; 2898 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; 2899 2900 /* move token into prog->aux, reuse taken refcnt */ 2901 prog->aux->token = token; 2902 token = NULL; 2903 2904 prog->aux->user = get_current_user(); 2905 prog->len = attr->insn_cnt; 2906 2907 err = -EFAULT; 2908 if (copy_from_bpfptr(prog->insns, 2909 make_bpfptr(attr->insns, uattr.is_kernel), 2910 bpf_prog_insn_size(prog)) != 0) 2911 goto free_prog; 2912 /* copy eBPF program license from user space */ 2913 if (strncpy_from_bpfptr(license, 2914 make_bpfptr(attr->license, uattr.is_kernel), 2915 sizeof(license) - 1) < 0) 2916 goto free_prog; 2917 license[sizeof(license) - 1] = 0; 2918 2919 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2920 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0; 2921 2922 prog->orig_prog = NULL; 2923 prog->jited = 0; 2924 2925 atomic64_set(&prog->aux->refcnt, 1); 2926 2927 if (bpf_prog_is_dev_bound(prog->aux)) { 2928 err = bpf_prog_dev_bound_init(prog, attr); 2929 if (err) 2930 goto free_prog; 2931 } 2932 2933 if (type == BPF_PROG_TYPE_EXT && dst_prog && 2934 bpf_prog_is_dev_bound(dst_prog->aux)) { 2935 err = bpf_prog_dev_bound_inherit(prog, dst_prog); 2936 if (err) 2937 goto free_prog; 2938 } 2939 2940 /* 2941 * Bookkeeping for managing the program attachment chain. 2942 * 2943 * It might be tempting to set attach_tracing_prog flag at the attachment 2944 * time, but this will not prevent from loading bunch of tracing prog 2945 * first, then attach them one to another. 2946 * 2947 * The flag attach_tracing_prog is set for the whole program lifecycle, and 2948 * doesn't have to be cleared in bpf_tracing_link_release, since tracing 2949 * programs cannot change attachment target. 2950 */ 2951 if (type == BPF_PROG_TYPE_TRACING && dst_prog && 2952 dst_prog->type == BPF_PROG_TYPE_TRACING) { 2953 prog->aux->attach_tracing_prog = true; 2954 } 2955 2956 /* find program type: socket_filter vs tracing_filter */ 2957 err = find_prog_type(type, prog); 2958 if (err < 0) 2959 goto free_prog; 2960 2961 prog->aux->load_time = ktime_get_boottime_ns(); 2962 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2963 sizeof(attr->prog_name)); 2964 if (err < 0) 2965 goto free_prog; 2966 2967 err = security_bpf_prog_load(prog, attr, token, uattr.is_kernel); 2968 if (err) 2969 goto free_prog_sec; 2970 2971 /* run eBPF verifier */ 2972 err = bpf_check(&prog, attr, uattr, uattr_size); 2973 if (err < 0) 2974 goto free_used_maps; 2975 2976 prog = bpf_prog_select_runtime(prog, &err); 2977 if (err < 0) 2978 goto free_used_maps; 2979 2980 err = bpf_prog_alloc_id(prog); 2981 if (err) 2982 goto free_used_maps; 2983 2984 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2985 * effectively publicly exposed. However, retrieving via 2986 * bpf_prog_get_fd_by_id() will take another reference, 2987 * therefore it cannot be gone underneath us. 2988 * 2989 * Only for the time /after/ successful bpf_prog_new_fd() 2990 * and before returning to userspace, we might just hold 2991 * one reference and any parallel close on that fd could 2992 * rip everything out. Hence, below notifications must 2993 * happen before bpf_prog_new_fd(). 2994 * 2995 * Also, any failure handling from this point onwards must 2996 * be using bpf_prog_put() given the program is exposed. 2997 */ 2998 bpf_prog_kallsyms_add(prog); 2999 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 3000 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 3001 3002 err = bpf_prog_new_fd(prog); 3003 if (err < 0) 3004 bpf_prog_put(prog); 3005 return err; 3006 3007 free_used_maps: 3008 /* In case we have subprogs, we need to wait for a grace 3009 * period before we can tear down JIT memory since symbols 3010 * are already exposed under kallsyms. 3011 */ 3012 __bpf_prog_put_noref(prog, prog->aux->real_func_cnt); 3013 return err; 3014 3015 free_prog_sec: 3016 security_bpf_prog_free(prog); 3017 free_prog: 3018 free_uid(prog->aux->user); 3019 if (prog->aux->attach_btf) 3020 btf_put(prog->aux->attach_btf); 3021 bpf_prog_free(prog); 3022 put_token: 3023 bpf_token_put(token); 3024 return err; 3025 } 3026 3027 #define BPF_OBJ_LAST_FIELD path_fd 3028 3029 static int bpf_obj_pin(const union bpf_attr *attr) 3030 { 3031 int path_fd; 3032 3033 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD) 3034 return -EINVAL; 3035 3036 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 3037 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 3038 return -EINVAL; 3039 3040 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 3041 return bpf_obj_pin_user(attr->bpf_fd, path_fd, 3042 u64_to_user_ptr(attr->pathname)); 3043 } 3044 3045 static int bpf_obj_get(const union bpf_attr *attr) 3046 { 3047 int path_fd; 3048 3049 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 3050 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD)) 3051 return -EINVAL; 3052 3053 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 3054 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 3055 return -EINVAL; 3056 3057 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 3058 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname), 3059 attr->file_flags); 3060 } 3061 3062 /* bpf_link_init_sleepable() allows to specify whether BPF link itself has 3063 * "sleepable" semantics, which normally would mean that BPF link's attach 3064 * hook can dereference link or link's underlying program for some time after 3065 * detachment due to RCU Tasks Trace-based lifetime protection scheme. 3066 * BPF program itself can be non-sleepable, yet, because it's transitively 3067 * reachable through BPF link, its freeing has to be delayed until after RCU 3068 * Tasks Trace GP. 3069 */ 3070 void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, 3071 const struct bpf_link_ops *ops, struct bpf_prog *prog, 3072 bool sleepable) 3073 { 3074 WARN_ON(ops->dealloc && ops->dealloc_deferred); 3075 atomic64_set(&link->refcnt, 1); 3076 link->type = type; 3077 link->sleepable = sleepable; 3078 link->id = 0; 3079 link->ops = ops; 3080 link->prog = prog; 3081 } 3082 3083 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 3084 const struct bpf_link_ops *ops, struct bpf_prog *prog) 3085 { 3086 bpf_link_init_sleepable(link, type, ops, prog, false); 3087 } 3088 3089 static void bpf_link_free_id(int id) 3090 { 3091 if (!id) 3092 return; 3093 3094 spin_lock_bh(&link_idr_lock); 3095 idr_remove(&link_idr, id); 3096 spin_unlock_bh(&link_idr_lock); 3097 } 3098 3099 /* Clean up bpf_link and corresponding anon_inode file and FD. After 3100 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred 3101 * anon_inode's release() call. This helper marks bpf_link as 3102 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt 3103 * is not decremented, it's the responsibility of a calling code that failed 3104 * to complete bpf_link initialization. 3105 * This helper eventually calls link's dealloc callback, but does not call 3106 * link's release callback. 3107 */ 3108 void bpf_link_cleanup(struct bpf_link_primer *primer) 3109 { 3110 primer->link->prog = NULL; 3111 bpf_link_free_id(primer->id); 3112 fput(primer->file); 3113 put_unused_fd(primer->fd); 3114 } 3115 3116 void bpf_link_inc(struct bpf_link *link) 3117 { 3118 atomic64_inc(&link->refcnt); 3119 } 3120 3121 static void bpf_link_dealloc(struct bpf_link *link) 3122 { 3123 /* now that we know that bpf_link itself can't be reached, put underlying BPF program */ 3124 if (link->prog) 3125 bpf_prog_put(link->prog); 3126 3127 /* free bpf_link and its containing memory */ 3128 if (link->ops->dealloc_deferred) 3129 link->ops->dealloc_deferred(link); 3130 else 3131 link->ops->dealloc(link); 3132 } 3133 3134 static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu) 3135 { 3136 struct bpf_link *link = container_of(rcu, struct bpf_link, rcu); 3137 3138 bpf_link_dealloc(link); 3139 } 3140 3141 static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu) 3142 { 3143 if (rcu_trace_implies_rcu_gp()) 3144 bpf_link_defer_dealloc_rcu_gp(rcu); 3145 else 3146 call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp); 3147 } 3148 3149 /* bpf_link_free is guaranteed to be called from process context */ 3150 static void bpf_link_free(struct bpf_link *link) 3151 { 3152 const struct bpf_link_ops *ops = link->ops; 3153 3154 bpf_link_free_id(link->id); 3155 /* detach BPF program, clean up used resources */ 3156 if (link->prog) 3157 ops->release(link); 3158 if (ops->dealloc_deferred) { 3159 /* Schedule BPF link deallocation, which will only then 3160 * trigger putting BPF program refcount. 3161 * If underlying BPF program is sleepable or BPF link's target 3162 * attach hookpoint is sleepable or otherwise requires RCU GPs 3163 * to ensure link and its underlying BPF program is not 3164 * reachable anymore, we need to first wait for RCU tasks 3165 * trace sync, and then go through "classic" RCU grace period 3166 */ 3167 if (link->sleepable || (link->prog && link->prog->sleepable)) 3168 call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp); 3169 else 3170 call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp); 3171 } else if (ops->dealloc) { 3172 bpf_link_dealloc(link); 3173 } 3174 } 3175 3176 static void bpf_link_put_deferred(struct work_struct *work) 3177 { 3178 struct bpf_link *link = container_of(work, struct bpf_link, work); 3179 3180 bpf_link_free(link); 3181 } 3182 3183 /* bpf_link_put might be called from atomic context. It needs to be called 3184 * from sleepable context in order to acquire sleeping locks during the process. 3185 */ 3186 void bpf_link_put(struct bpf_link *link) 3187 { 3188 if (!atomic64_dec_and_test(&link->refcnt)) 3189 return; 3190 3191 INIT_WORK(&link->work, bpf_link_put_deferred); 3192 schedule_work(&link->work); 3193 } 3194 EXPORT_SYMBOL(bpf_link_put); 3195 3196 static void bpf_link_put_direct(struct bpf_link *link) 3197 { 3198 if (!atomic64_dec_and_test(&link->refcnt)) 3199 return; 3200 bpf_link_free(link); 3201 } 3202 3203 static int bpf_link_release(struct inode *inode, struct file *filp) 3204 { 3205 struct bpf_link *link = filp->private_data; 3206 3207 bpf_link_put_direct(link); 3208 return 0; 3209 } 3210 3211 #ifdef CONFIG_PROC_FS 3212 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 3213 #define BPF_MAP_TYPE(_id, _ops) 3214 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name, 3215 static const char *bpf_link_type_strs[] = { 3216 [BPF_LINK_TYPE_UNSPEC] = "<invalid>", 3217 #include <linux/bpf_types.h> 3218 }; 3219 #undef BPF_PROG_TYPE 3220 #undef BPF_MAP_TYPE 3221 #undef BPF_LINK_TYPE 3222 3223 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) 3224 { 3225 const struct bpf_link *link = filp->private_data; 3226 const struct bpf_prog *prog = link->prog; 3227 enum bpf_link_type type = link->type; 3228 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 3229 3230 if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) { 3231 if (link->type == BPF_LINK_TYPE_KPROBE_MULTI) 3232 seq_printf(m, "link_type:\t%s\n", link->flags == BPF_F_KPROBE_MULTI_RETURN ? 3233 "kretprobe_multi" : "kprobe_multi"); 3234 else if (link->type == BPF_LINK_TYPE_UPROBE_MULTI) 3235 seq_printf(m, "link_type:\t%s\n", link->flags == BPF_F_UPROBE_MULTI_RETURN ? 3236 "uretprobe_multi" : "uprobe_multi"); 3237 else 3238 seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]); 3239 } else { 3240 WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type); 3241 seq_printf(m, "link_type:\t<%u>\n", type); 3242 } 3243 seq_printf(m, "link_id:\t%u\n", link->id); 3244 3245 if (prog) { 3246 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 3247 seq_printf(m, 3248 "prog_tag:\t%s\n" 3249 "prog_id:\t%u\n", 3250 prog_tag, 3251 prog->aux->id); 3252 } 3253 if (link->ops->show_fdinfo) 3254 link->ops->show_fdinfo(link, m); 3255 } 3256 #endif 3257 3258 static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts) 3259 { 3260 struct bpf_link *link = file->private_data; 3261 3262 return link->ops->poll(file, pts); 3263 } 3264 3265 static const struct file_operations bpf_link_fops = { 3266 #ifdef CONFIG_PROC_FS 3267 .show_fdinfo = bpf_link_show_fdinfo, 3268 #endif 3269 .release = bpf_link_release, 3270 .read = bpf_dummy_read, 3271 .write = bpf_dummy_write, 3272 }; 3273 3274 static const struct file_operations bpf_link_fops_poll = { 3275 #ifdef CONFIG_PROC_FS 3276 .show_fdinfo = bpf_link_show_fdinfo, 3277 #endif 3278 .release = bpf_link_release, 3279 .read = bpf_dummy_read, 3280 .write = bpf_dummy_write, 3281 .poll = bpf_link_poll, 3282 }; 3283 3284 static int bpf_link_alloc_id(struct bpf_link *link) 3285 { 3286 int id; 3287 3288 idr_preload(GFP_KERNEL); 3289 spin_lock_bh(&link_idr_lock); 3290 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC); 3291 spin_unlock_bh(&link_idr_lock); 3292 idr_preload_end(); 3293 3294 return id; 3295 } 3296 3297 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file, 3298 * reserving unused FD and allocating ID from link_idr. This is to be paired 3299 * with bpf_link_settle() to install FD and ID and expose bpf_link to 3300 * user-space, if bpf_link is successfully attached. If not, bpf_link and 3301 * pre-allocated resources are to be freed with bpf_cleanup() call. All the 3302 * transient state is passed around in struct bpf_link_primer. 3303 * This is preferred way to create and initialize bpf_link, especially when 3304 * there are complicated and expensive operations in between creating bpf_link 3305 * itself and attaching it to BPF hook. By using bpf_link_prime() and 3306 * bpf_link_settle() kernel code using bpf_link doesn't have to perform 3307 * expensive (and potentially failing) roll back operations in a rare case 3308 * that file, FD, or ID can't be allocated. 3309 */ 3310 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) 3311 { 3312 struct file *file; 3313 int fd, id; 3314 3315 fd = get_unused_fd_flags(O_CLOEXEC); 3316 if (fd < 0) 3317 return fd; 3318 3319 3320 id = bpf_link_alloc_id(link); 3321 if (id < 0) { 3322 put_unused_fd(fd); 3323 return id; 3324 } 3325 3326 file = anon_inode_getfile("bpf_link", 3327 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, 3328 link, O_CLOEXEC); 3329 if (IS_ERR(file)) { 3330 bpf_link_free_id(id); 3331 put_unused_fd(fd); 3332 return PTR_ERR(file); 3333 } 3334 3335 primer->link = link; 3336 primer->file = file; 3337 primer->fd = fd; 3338 primer->id = id; 3339 return 0; 3340 } 3341 3342 int bpf_link_settle(struct bpf_link_primer *primer) 3343 { 3344 /* make bpf_link fetchable by ID */ 3345 spin_lock_bh(&link_idr_lock); 3346 primer->link->id = primer->id; 3347 spin_unlock_bh(&link_idr_lock); 3348 /* make bpf_link fetchable by FD */ 3349 fd_install(primer->fd, primer->file); 3350 /* pass through installed FD */ 3351 return primer->fd; 3352 } 3353 3354 int bpf_link_new_fd(struct bpf_link *link) 3355 { 3356 return anon_inode_getfd("bpf-link", 3357 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, 3358 link, O_CLOEXEC); 3359 } 3360 3361 struct bpf_link *bpf_link_get_from_fd(u32 ufd) 3362 { 3363 CLASS(fd, f)(ufd); 3364 struct bpf_link *link; 3365 3366 if (fd_empty(f)) 3367 return ERR_PTR(-EBADF); 3368 if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll) 3369 return ERR_PTR(-EINVAL); 3370 3371 link = fd_file(f)->private_data; 3372 bpf_link_inc(link); 3373 return link; 3374 } 3375 EXPORT_SYMBOL_NS(bpf_link_get_from_fd, "BPF_INTERNAL"); 3376 3377 static void bpf_tracing_link_release(struct bpf_link *link) 3378 { 3379 struct bpf_tracing_link *tr_link = 3380 container_of(link, struct bpf_tracing_link, link.link); 3381 3382 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, 3383 tr_link->trampoline, 3384 tr_link->tgt_prog)); 3385 3386 bpf_trampoline_put(tr_link->trampoline); 3387 3388 /* tgt_prog is NULL if target is a kernel function */ 3389 if (tr_link->tgt_prog) 3390 bpf_prog_put(tr_link->tgt_prog); 3391 } 3392 3393 static void bpf_tracing_link_dealloc(struct bpf_link *link) 3394 { 3395 struct bpf_tracing_link *tr_link = 3396 container_of(link, struct bpf_tracing_link, link.link); 3397 3398 kfree(tr_link); 3399 } 3400 3401 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, 3402 struct seq_file *seq) 3403 { 3404 struct bpf_tracing_link *tr_link = 3405 container_of(link, struct bpf_tracing_link, link.link); 3406 u32 target_btf_id, target_obj_id; 3407 3408 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3409 &target_obj_id, &target_btf_id); 3410 seq_printf(seq, 3411 "attach_type:\t%d\n" 3412 "target_obj_id:\t%u\n" 3413 "target_btf_id:\t%u\n" 3414 "cookie:\t%llu\n", 3415 tr_link->attach_type, 3416 target_obj_id, 3417 target_btf_id, 3418 tr_link->link.cookie); 3419 } 3420 3421 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, 3422 struct bpf_link_info *info) 3423 { 3424 struct bpf_tracing_link *tr_link = 3425 container_of(link, struct bpf_tracing_link, link.link); 3426 3427 info->tracing.attach_type = tr_link->attach_type; 3428 info->tracing.cookie = tr_link->link.cookie; 3429 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3430 &info->tracing.target_obj_id, 3431 &info->tracing.target_btf_id); 3432 3433 return 0; 3434 } 3435 3436 static const struct bpf_link_ops bpf_tracing_link_lops = { 3437 .release = bpf_tracing_link_release, 3438 .dealloc = bpf_tracing_link_dealloc, 3439 .show_fdinfo = bpf_tracing_link_show_fdinfo, 3440 .fill_link_info = bpf_tracing_link_fill_link_info, 3441 }; 3442 3443 static int bpf_tracing_prog_attach(struct bpf_prog *prog, 3444 int tgt_prog_fd, 3445 u32 btf_id, 3446 u64 bpf_cookie) 3447 { 3448 struct bpf_link_primer link_primer; 3449 struct bpf_prog *tgt_prog = NULL; 3450 struct bpf_trampoline *tr = NULL; 3451 struct bpf_tracing_link *link; 3452 u64 key = 0; 3453 int err; 3454 3455 switch (prog->type) { 3456 case BPF_PROG_TYPE_TRACING: 3457 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 3458 prog->expected_attach_type != BPF_TRACE_FEXIT && 3459 prog->expected_attach_type != BPF_MODIFY_RETURN) { 3460 err = -EINVAL; 3461 goto out_put_prog; 3462 } 3463 break; 3464 case BPF_PROG_TYPE_EXT: 3465 if (prog->expected_attach_type != 0) { 3466 err = -EINVAL; 3467 goto out_put_prog; 3468 } 3469 break; 3470 case BPF_PROG_TYPE_LSM: 3471 if (prog->expected_attach_type != BPF_LSM_MAC) { 3472 err = -EINVAL; 3473 goto out_put_prog; 3474 } 3475 break; 3476 default: 3477 err = -EINVAL; 3478 goto out_put_prog; 3479 } 3480 3481 if (!!tgt_prog_fd != !!btf_id) { 3482 err = -EINVAL; 3483 goto out_put_prog; 3484 } 3485 3486 if (tgt_prog_fd) { 3487 /* 3488 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this 3489 * part would be changed to implement the same for 3490 * BPF_PROG_TYPE_TRACING, do not forget to update the way how 3491 * attach_tracing_prog flag is set. 3492 */ 3493 if (prog->type != BPF_PROG_TYPE_EXT) { 3494 err = -EINVAL; 3495 goto out_put_prog; 3496 } 3497 3498 tgt_prog = bpf_prog_get(tgt_prog_fd); 3499 if (IS_ERR(tgt_prog)) { 3500 err = PTR_ERR(tgt_prog); 3501 tgt_prog = NULL; 3502 goto out_put_prog; 3503 } 3504 3505 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id); 3506 } 3507 3508 link = kzalloc(sizeof(*link), GFP_USER); 3509 if (!link) { 3510 err = -ENOMEM; 3511 goto out_put_prog; 3512 } 3513 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, 3514 &bpf_tracing_link_lops, prog); 3515 link->attach_type = prog->expected_attach_type; 3516 link->link.cookie = bpf_cookie; 3517 3518 mutex_lock(&prog->aux->dst_mutex); 3519 3520 /* There are a few possible cases here: 3521 * 3522 * - if prog->aux->dst_trampoline is set, the program was just loaded 3523 * and not yet attached to anything, so we can use the values stored 3524 * in prog->aux 3525 * 3526 * - if prog->aux->dst_trampoline is NULL, the program has already been 3527 * attached to a target and its initial target was cleared (below) 3528 * 3529 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + 3530 * target_btf_id using the link_create API. 3531 * 3532 * - if tgt_prog == NULL when this function was called using the old 3533 * raw_tracepoint_open API, and we need a target from prog->aux 3534 * 3535 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program 3536 * was detached and is going for re-attachment. 3537 * 3538 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf 3539 * are NULL, then program was already attached and user did not provide 3540 * tgt_prog_fd so we have no way to find out or create trampoline 3541 */ 3542 if (!prog->aux->dst_trampoline && !tgt_prog) { 3543 /* 3544 * Allow re-attach for TRACING and LSM programs. If it's 3545 * currently linked, bpf_trampoline_link_prog will fail. 3546 * EXT programs need to specify tgt_prog_fd, so they 3547 * re-attach in separate code path. 3548 */ 3549 if (prog->type != BPF_PROG_TYPE_TRACING && 3550 prog->type != BPF_PROG_TYPE_LSM) { 3551 err = -EINVAL; 3552 goto out_unlock; 3553 } 3554 /* We can allow re-attach only if we have valid attach_btf. */ 3555 if (!prog->aux->attach_btf) { 3556 err = -EINVAL; 3557 goto out_unlock; 3558 } 3559 btf_id = prog->aux->attach_btf_id; 3560 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); 3561 } 3562 3563 if (!prog->aux->dst_trampoline || 3564 (key && key != prog->aux->dst_trampoline->key)) { 3565 /* If there is no saved target, or the specified target is 3566 * different from the destination specified at load time, we 3567 * need a new trampoline and a check for compatibility 3568 */ 3569 struct bpf_attach_target_info tgt_info = {}; 3570 3571 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id, 3572 &tgt_info); 3573 if (err) 3574 goto out_unlock; 3575 3576 if (tgt_info.tgt_mod) { 3577 module_put(prog->aux->mod); 3578 prog->aux->mod = tgt_info.tgt_mod; 3579 } 3580 3581 tr = bpf_trampoline_get(key, &tgt_info); 3582 if (!tr) { 3583 err = -ENOMEM; 3584 goto out_unlock; 3585 } 3586 } else { 3587 /* The caller didn't specify a target, or the target was the 3588 * same as the destination supplied during program load. This 3589 * means we can reuse the trampoline and reference from program 3590 * load time, and there is no need to allocate a new one. This 3591 * can only happen once for any program, as the saved values in 3592 * prog->aux are cleared below. 3593 */ 3594 tr = prog->aux->dst_trampoline; 3595 tgt_prog = prog->aux->dst_prog; 3596 } 3597 3598 err = bpf_link_prime(&link->link.link, &link_primer); 3599 if (err) 3600 goto out_unlock; 3601 3602 err = bpf_trampoline_link_prog(&link->link, tr, tgt_prog); 3603 if (err) { 3604 bpf_link_cleanup(&link_primer); 3605 link = NULL; 3606 goto out_unlock; 3607 } 3608 3609 link->tgt_prog = tgt_prog; 3610 link->trampoline = tr; 3611 3612 /* Always clear the trampoline and target prog from prog->aux to make 3613 * sure the original attach destination is not kept alive after a 3614 * program is (re-)attached to another target. 3615 */ 3616 if (prog->aux->dst_prog && 3617 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) 3618 /* got extra prog ref from syscall, or attaching to different prog */ 3619 bpf_prog_put(prog->aux->dst_prog); 3620 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) 3621 /* we allocated a new trampoline, so free the old one */ 3622 bpf_trampoline_put(prog->aux->dst_trampoline); 3623 3624 prog->aux->dst_prog = NULL; 3625 prog->aux->dst_trampoline = NULL; 3626 mutex_unlock(&prog->aux->dst_mutex); 3627 3628 return bpf_link_settle(&link_primer); 3629 out_unlock: 3630 if (tr && tr != prog->aux->dst_trampoline) 3631 bpf_trampoline_put(tr); 3632 mutex_unlock(&prog->aux->dst_mutex); 3633 kfree(link); 3634 out_put_prog: 3635 if (tgt_prog_fd && tgt_prog) 3636 bpf_prog_put(tgt_prog); 3637 return err; 3638 } 3639 3640 static void bpf_raw_tp_link_release(struct bpf_link *link) 3641 { 3642 struct bpf_raw_tp_link *raw_tp = 3643 container_of(link, struct bpf_raw_tp_link, link); 3644 3645 bpf_probe_unregister(raw_tp->btp, raw_tp); 3646 bpf_put_raw_tracepoint(raw_tp->btp); 3647 } 3648 3649 static void bpf_raw_tp_link_dealloc(struct bpf_link *link) 3650 { 3651 struct bpf_raw_tp_link *raw_tp = 3652 container_of(link, struct bpf_raw_tp_link, link); 3653 3654 kfree(raw_tp); 3655 } 3656 3657 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link, 3658 struct seq_file *seq) 3659 { 3660 struct bpf_raw_tp_link *raw_tp_link = 3661 container_of(link, struct bpf_raw_tp_link, link); 3662 3663 seq_printf(seq, 3664 "tp_name:\t%s\n" 3665 "cookie:\t%llu\n", 3666 raw_tp_link->btp->tp->name, 3667 raw_tp_link->cookie); 3668 } 3669 3670 static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen, 3671 u32 len) 3672 { 3673 if (ulen >= len + 1) { 3674 if (copy_to_user(ubuf, buf, len + 1)) 3675 return -EFAULT; 3676 } else { 3677 char zero = '\0'; 3678 3679 if (copy_to_user(ubuf, buf, ulen - 1)) 3680 return -EFAULT; 3681 if (put_user(zero, ubuf + ulen - 1)) 3682 return -EFAULT; 3683 return -ENOSPC; 3684 } 3685 3686 return 0; 3687 } 3688 3689 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, 3690 struct bpf_link_info *info) 3691 { 3692 struct bpf_raw_tp_link *raw_tp_link = 3693 container_of(link, struct bpf_raw_tp_link, link); 3694 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); 3695 const char *tp_name = raw_tp_link->btp->tp->name; 3696 u32 ulen = info->raw_tracepoint.tp_name_len; 3697 size_t tp_len = strlen(tp_name); 3698 3699 if (!ulen ^ !ubuf) 3700 return -EINVAL; 3701 3702 info->raw_tracepoint.tp_name_len = tp_len + 1; 3703 info->raw_tracepoint.cookie = raw_tp_link->cookie; 3704 3705 if (!ubuf) 3706 return 0; 3707 3708 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len); 3709 } 3710 3711 static const struct bpf_link_ops bpf_raw_tp_link_lops = { 3712 .release = bpf_raw_tp_link_release, 3713 .dealloc_deferred = bpf_raw_tp_link_dealloc, 3714 .show_fdinfo = bpf_raw_tp_link_show_fdinfo, 3715 .fill_link_info = bpf_raw_tp_link_fill_link_info, 3716 }; 3717 3718 #ifdef CONFIG_PERF_EVENTS 3719 struct bpf_perf_link { 3720 struct bpf_link link; 3721 struct file *perf_file; 3722 }; 3723 3724 static void bpf_perf_link_release(struct bpf_link *link) 3725 { 3726 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3727 struct perf_event *event = perf_link->perf_file->private_data; 3728 3729 perf_event_free_bpf_prog(event); 3730 fput(perf_link->perf_file); 3731 } 3732 3733 static void bpf_perf_link_dealloc(struct bpf_link *link) 3734 { 3735 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3736 3737 kfree(perf_link); 3738 } 3739 3740 static int bpf_perf_link_fill_common(const struct perf_event *event, 3741 char __user *uname, u32 *ulenp, 3742 u64 *probe_offset, u64 *probe_addr, 3743 u32 *fd_type, unsigned long *missed) 3744 { 3745 const char *buf; 3746 u32 prog_id, ulen; 3747 size_t len; 3748 int err; 3749 3750 ulen = *ulenp; 3751 if (!ulen ^ !uname) 3752 return -EINVAL; 3753 3754 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf, 3755 probe_offset, probe_addr, missed); 3756 if (err) 3757 return err; 3758 3759 if (buf) { 3760 len = strlen(buf); 3761 *ulenp = len + 1; 3762 } else { 3763 *ulenp = 1; 3764 } 3765 if (!uname) 3766 return 0; 3767 3768 if (buf) { 3769 err = bpf_copy_to_user(uname, buf, ulen, len); 3770 if (err) 3771 return err; 3772 } else { 3773 char zero = '\0'; 3774 3775 if (put_user(zero, uname)) 3776 return -EFAULT; 3777 } 3778 return 0; 3779 } 3780 3781 #ifdef CONFIG_KPROBE_EVENTS 3782 static int bpf_perf_link_fill_kprobe(const struct perf_event *event, 3783 struct bpf_link_info *info) 3784 { 3785 unsigned long missed; 3786 char __user *uname; 3787 u64 addr, offset; 3788 u32 ulen, type; 3789 int err; 3790 3791 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name); 3792 ulen = info->perf_event.kprobe.name_len; 3793 err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr, 3794 &type, &missed); 3795 if (err) 3796 return err; 3797 if (type == BPF_FD_TYPE_KRETPROBE) 3798 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE; 3799 else 3800 info->perf_event.type = BPF_PERF_EVENT_KPROBE; 3801 info->perf_event.kprobe.name_len = ulen; 3802 info->perf_event.kprobe.offset = offset; 3803 info->perf_event.kprobe.missed = missed; 3804 if (!kallsyms_show_value(current_cred())) 3805 addr = 0; 3806 info->perf_event.kprobe.addr = addr; 3807 info->perf_event.kprobe.cookie = event->bpf_cookie; 3808 return 0; 3809 } 3810 3811 static void bpf_perf_link_fdinfo_kprobe(const struct perf_event *event, 3812 struct seq_file *seq) 3813 { 3814 const char *name; 3815 int err; 3816 u32 prog_id, type; 3817 u64 offset, addr; 3818 unsigned long missed; 3819 3820 err = bpf_get_perf_event_info(event, &prog_id, &type, &name, 3821 &offset, &addr, &missed); 3822 if (err) 3823 return; 3824 3825 seq_printf(seq, 3826 "name:\t%s\n" 3827 "offset:\t%#llx\n" 3828 "missed:\t%lu\n" 3829 "addr:\t%#llx\n" 3830 "event_type:\t%s\n" 3831 "cookie:\t%llu\n", 3832 name, offset, missed, addr, 3833 type == BPF_FD_TYPE_KRETPROBE ? "kretprobe" : "kprobe", 3834 event->bpf_cookie); 3835 } 3836 #endif 3837 3838 #ifdef CONFIG_UPROBE_EVENTS 3839 static int bpf_perf_link_fill_uprobe(const struct perf_event *event, 3840 struct bpf_link_info *info) 3841 { 3842 u64 ref_ctr_offset, offset; 3843 char __user *uname; 3844 u32 ulen, type; 3845 int err; 3846 3847 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name); 3848 ulen = info->perf_event.uprobe.name_len; 3849 err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &ref_ctr_offset, 3850 &type, NULL); 3851 if (err) 3852 return err; 3853 3854 if (type == BPF_FD_TYPE_URETPROBE) 3855 info->perf_event.type = BPF_PERF_EVENT_URETPROBE; 3856 else 3857 info->perf_event.type = BPF_PERF_EVENT_UPROBE; 3858 info->perf_event.uprobe.name_len = ulen; 3859 info->perf_event.uprobe.offset = offset; 3860 info->perf_event.uprobe.cookie = event->bpf_cookie; 3861 info->perf_event.uprobe.ref_ctr_offset = ref_ctr_offset; 3862 return 0; 3863 } 3864 3865 static void bpf_perf_link_fdinfo_uprobe(const struct perf_event *event, 3866 struct seq_file *seq) 3867 { 3868 const char *name; 3869 int err; 3870 u32 prog_id, type; 3871 u64 offset, ref_ctr_offset; 3872 unsigned long missed; 3873 3874 err = bpf_get_perf_event_info(event, &prog_id, &type, &name, 3875 &offset, &ref_ctr_offset, &missed); 3876 if (err) 3877 return; 3878 3879 seq_printf(seq, 3880 "name:\t%s\n" 3881 "offset:\t%#llx\n" 3882 "ref_ctr_offset:\t%#llx\n" 3883 "event_type:\t%s\n" 3884 "cookie:\t%llu\n", 3885 name, offset, ref_ctr_offset, 3886 type == BPF_FD_TYPE_URETPROBE ? "uretprobe" : "uprobe", 3887 event->bpf_cookie); 3888 } 3889 #endif 3890 3891 static int bpf_perf_link_fill_probe(const struct perf_event *event, 3892 struct bpf_link_info *info) 3893 { 3894 #ifdef CONFIG_KPROBE_EVENTS 3895 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE) 3896 return bpf_perf_link_fill_kprobe(event, info); 3897 #endif 3898 #ifdef CONFIG_UPROBE_EVENTS 3899 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE) 3900 return bpf_perf_link_fill_uprobe(event, info); 3901 #endif 3902 return -EOPNOTSUPP; 3903 } 3904 3905 static int bpf_perf_link_fill_tracepoint(const struct perf_event *event, 3906 struct bpf_link_info *info) 3907 { 3908 char __user *uname; 3909 u32 ulen; 3910 int err; 3911 3912 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name); 3913 ulen = info->perf_event.tracepoint.name_len; 3914 err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL); 3915 if (err) 3916 return err; 3917 3918 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT; 3919 info->perf_event.tracepoint.name_len = ulen; 3920 info->perf_event.tracepoint.cookie = event->bpf_cookie; 3921 return 0; 3922 } 3923 3924 static int bpf_perf_link_fill_perf_event(const struct perf_event *event, 3925 struct bpf_link_info *info) 3926 { 3927 info->perf_event.event.type = event->attr.type; 3928 info->perf_event.event.config = event->attr.config; 3929 info->perf_event.event.cookie = event->bpf_cookie; 3930 info->perf_event.type = BPF_PERF_EVENT_EVENT; 3931 return 0; 3932 } 3933 3934 static int bpf_perf_link_fill_link_info(const struct bpf_link *link, 3935 struct bpf_link_info *info) 3936 { 3937 struct bpf_perf_link *perf_link; 3938 const struct perf_event *event; 3939 3940 perf_link = container_of(link, struct bpf_perf_link, link); 3941 event = perf_get_event(perf_link->perf_file); 3942 if (IS_ERR(event)) 3943 return PTR_ERR(event); 3944 3945 switch (event->prog->type) { 3946 case BPF_PROG_TYPE_PERF_EVENT: 3947 return bpf_perf_link_fill_perf_event(event, info); 3948 case BPF_PROG_TYPE_TRACEPOINT: 3949 return bpf_perf_link_fill_tracepoint(event, info); 3950 case BPF_PROG_TYPE_KPROBE: 3951 return bpf_perf_link_fill_probe(event, info); 3952 default: 3953 return -EOPNOTSUPP; 3954 } 3955 } 3956 3957 static void bpf_perf_event_link_show_fdinfo(const struct perf_event *event, 3958 struct seq_file *seq) 3959 { 3960 seq_printf(seq, 3961 "type:\t%u\n" 3962 "config:\t%llu\n" 3963 "event_type:\t%s\n" 3964 "cookie:\t%llu\n", 3965 event->attr.type, event->attr.config, 3966 "event", event->bpf_cookie); 3967 } 3968 3969 static void bpf_tracepoint_link_show_fdinfo(const struct perf_event *event, 3970 struct seq_file *seq) 3971 { 3972 int err; 3973 const char *name; 3974 u32 prog_id; 3975 3976 err = bpf_get_perf_event_info(event, &prog_id, NULL, &name, NULL, 3977 NULL, NULL); 3978 if (err) 3979 return; 3980 3981 seq_printf(seq, 3982 "tp_name:\t%s\n" 3983 "event_type:\t%s\n" 3984 "cookie:\t%llu\n", 3985 name, "tracepoint", event->bpf_cookie); 3986 } 3987 3988 static void bpf_probe_link_show_fdinfo(const struct perf_event *event, 3989 struct seq_file *seq) 3990 { 3991 #ifdef CONFIG_KPROBE_EVENTS 3992 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE) 3993 return bpf_perf_link_fdinfo_kprobe(event, seq); 3994 #endif 3995 3996 #ifdef CONFIG_UPROBE_EVENTS 3997 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE) 3998 return bpf_perf_link_fdinfo_uprobe(event, seq); 3999 #endif 4000 } 4001 4002 static void bpf_perf_link_show_fdinfo(const struct bpf_link *link, 4003 struct seq_file *seq) 4004 { 4005 struct bpf_perf_link *perf_link; 4006 const struct perf_event *event; 4007 4008 perf_link = container_of(link, struct bpf_perf_link, link); 4009 event = perf_get_event(perf_link->perf_file); 4010 if (IS_ERR(event)) 4011 return; 4012 4013 switch (event->prog->type) { 4014 case BPF_PROG_TYPE_PERF_EVENT: 4015 return bpf_perf_event_link_show_fdinfo(event, seq); 4016 case BPF_PROG_TYPE_TRACEPOINT: 4017 return bpf_tracepoint_link_show_fdinfo(event, seq); 4018 case BPF_PROG_TYPE_KPROBE: 4019 return bpf_probe_link_show_fdinfo(event, seq); 4020 default: 4021 return; 4022 } 4023 } 4024 4025 static const struct bpf_link_ops bpf_perf_link_lops = { 4026 .release = bpf_perf_link_release, 4027 .dealloc = bpf_perf_link_dealloc, 4028 .fill_link_info = bpf_perf_link_fill_link_info, 4029 .show_fdinfo = bpf_perf_link_show_fdinfo, 4030 }; 4031 4032 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 4033 { 4034 struct bpf_link_primer link_primer; 4035 struct bpf_perf_link *link; 4036 struct perf_event *event; 4037 struct file *perf_file; 4038 int err; 4039 4040 if (attr->link_create.flags) 4041 return -EINVAL; 4042 4043 perf_file = perf_event_get(attr->link_create.target_fd); 4044 if (IS_ERR(perf_file)) 4045 return PTR_ERR(perf_file); 4046 4047 link = kzalloc(sizeof(*link), GFP_USER); 4048 if (!link) { 4049 err = -ENOMEM; 4050 goto out_put_file; 4051 } 4052 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); 4053 link->perf_file = perf_file; 4054 4055 err = bpf_link_prime(&link->link, &link_primer); 4056 if (err) { 4057 kfree(link); 4058 goto out_put_file; 4059 } 4060 4061 event = perf_file->private_data; 4062 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); 4063 if (err) { 4064 bpf_link_cleanup(&link_primer); 4065 goto out_put_file; 4066 } 4067 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */ 4068 bpf_prog_inc(prog); 4069 4070 return bpf_link_settle(&link_primer); 4071 4072 out_put_file: 4073 fput(perf_file); 4074 return err; 4075 } 4076 #else 4077 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 4078 { 4079 return -EOPNOTSUPP; 4080 } 4081 #endif /* CONFIG_PERF_EVENTS */ 4082 4083 static int bpf_raw_tp_link_attach(struct bpf_prog *prog, 4084 const char __user *user_tp_name, u64 cookie) 4085 { 4086 struct bpf_link_primer link_primer; 4087 struct bpf_raw_tp_link *link; 4088 struct bpf_raw_event_map *btp; 4089 const char *tp_name; 4090 char buf[128]; 4091 int err; 4092 4093 switch (prog->type) { 4094 case BPF_PROG_TYPE_TRACING: 4095 case BPF_PROG_TYPE_EXT: 4096 case BPF_PROG_TYPE_LSM: 4097 if (user_tp_name) 4098 /* The attach point for this category of programs 4099 * should be specified via btf_id during program load. 4100 */ 4101 return -EINVAL; 4102 if (prog->type == BPF_PROG_TYPE_TRACING && 4103 prog->expected_attach_type == BPF_TRACE_RAW_TP) { 4104 tp_name = prog->aux->attach_func_name; 4105 break; 4106 } 4107 return bpf_tracing_prog_attach(prog, 0, 0, 0); 4108 case BPF_PROG_TYPE_RAW_TRACEPOINT: 4109 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 4110 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) 4111 return -EFAULT; 4112 buf[sizeof(buf) - 1] = 0; 4113 tp_name = buf; 4114 break; 4115 default: 4116 return -EINVAL; 4117 } 4118 4119 btp = bpf_get_raw_tracepoint(tp_name); 4120 if (!btp) 4121 return -ENOENT; 4122 4123 link = kzalloc(sizeof(*link), GFP_USER); 4124 if (!link) { 4125 err = -ENOMEM; 4126 goto out_put_btp; 4127 } 4128 bpf_link_init_sleepable(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, 4129 &bpf_raw_tp_link_lops, prog, 4130 tracepoint_is_faultable(btp->tp)); 4131 link->btp = btp; 4132 link->cookie = cookie; 4133 4134 err = bpf_link_prime(&link->link, &link_primer); 4135 if (err) { 4136 kfree(link); 4137 goto out_put_btp; 4138 } 4139 4140 err = bpf_probe_register(link->btp, link); 4141 if (err) { 4142 bpf_link_cleanup(&link_primer); 4143 goto out_put_btp; 4144 } 4145 4146 return bpf_link_settle(&link_primer); 4147 4148 out_put_btp: 4149 bpf_put_raw_tracepoint(btp); 4150 return err; 4151 } 4152 4153 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.cookie 4154 4155 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 4156 { 4157 struct bpf_prog *prog; 4158 void __user *tp_name; 4159 __u64 cookie; 4160 int fd; 4161 4162 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 4163 return -EINVAL; 4164 4165 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 4166 if (IS_ERR(prog)) 4167 return PTR_ERR(prog); 4168 4169 tp_name = u64_to_user_ptr(attr->raw_tracepoint.name); 4170 cookie = attr->raw_tracepoint.cookie; 4171 fd = bpf_raw_tp_link_attach(prog, tp_name, cookie); 4172 if (fd < 0) 4173 bpf_prog_put(prog); 4174 return fd; 4175 } 4176 4177 static enum bpf_prog_type 4178 attach_type_to_prog_type(enum bpf_attach_type attach_type) 4179 { 4180 switch (attach_type) { 4181 case BPF_CGROUP_INET_INGRESS: 4182 case BPF_CGROUP_INET_EGRESS: 4183 return BPF_PROG_TYPE_CGROUP_SKB; 4184 case BPF_CGROUP_INET_SOCK_CREATE: 4185 case BPF_CGROUP_INET_SOCK_RELEASE: 4186 case BPF_CGROUP_INET4_POST_BIND: 4187 case BPF_CGROUP_INET6_POST_BIND: 4188 return BPF_PROG_TYPE_CGROUP_SOCK; 4189 case BPF_CGROUP_INET4_BIND: 4190 case BPF_CGROUP_INET6_BIND: 4191 case BPF_CGROUP_INET4_CONNECT: 4192 case BPF_CGROUP_INET6_CONNECT: 4193 case BPF_CGROUP_UNIX_CONNECT: 4194 case BPF_CGROUP_INET4_GETPEERNAME: 4195 case BPF_CGROUP_INET6_GETPEERNAME: 4196 case BPF_CGROUP_UNIX_GETPEERNAME: 4197 case BPF_CGROUP_INET4_GETSOCKNAME: 4198 case BPF_CGROUP_INET6_GETSOCKNAME: 4199 case BPF_CGROUP_UNIX_GETSOCKNAME: 4200 case BPF_CGROUP_UDP4_SENDMSG: 4201 case BPF_CGROUP_UDP6_SENDMSG: 4202 case BPF_CGROUP_UNIX_SENDMSG: 4203 case BPF_CGROUP_UDP4_RECVMSG: 4204 case BPF_CGROUP_UDP6_RECVMSG: 4205 case BPF_CGROUP_UNIX_RECVMSG: 4206 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 4207 case BPF_CGROUP_SOCK_OPS: 4208 return BPF_PROG_TYPE_SOCK_OPS; 4209 case BPF_CGROUP_DEVICE: 4210 return BPF_PROG_TYPE_CGROUP_DEVICE; 4211 case BPF_SK_MSG_VERDICT: 4212 return BPF_PROG_TYPE_SK_MSG; 4213 case BPF_SK_SKB_STREAM_PARSER: 4214 case BPF_SK_SKB_STREAM_VERDICT: 4215 case BPF_SK_SKB_VERDICT: 4216 return BPF_PROG_TYPE_SK_SKB; 4217 case BPF_LIRC_MODE2: 4218 return BPF_PROG_TYPE_LIRC_MODE2; 4219 case BPF_FLOW_DISSECTOR: 4220 return BPF_PROG_TYPE_FLOW_DISSECTOR; 4221 case BPF_CGROUP_SYSCTL: 4222 return BPF_PROG_TYPE_CGROUP_SYSCTL; 4223 case BPF_CGROUP_GETSOCKOPT: 4224 case BPF_CGROUP_SETSOCKOPT: 4225 return BPF_PROG_TYPE_CGROUP_SOCKOPT; 4226 case BPF_TRACE_ITER: 4227 case BPF_TRACE_RAW_TP: 4228 case BPF_TRACE_FENTRY: 4229 case BPF_TRACE_FEXIT: 4230 case BPF_MODIFY_RETURN: 4231 return BPF_PROG_TYPE_TRACING; 4232 case BPF_LSM_MAC: 4233 return BPF_PROG_TYPE_LSM; 4234 case BPF_SK_LOOKUP: 4235 return BPF_PROG_TYPE_SK_LOOKUP; 4236 case BPF_XDP: 4237 return BPF_PROG_TYPE_XDP; 4238 case BPF_LSM_CGROUP: 4239 return BPF_PROG_TYPE_LSM; 4240 case BPF_TCX_INGRESS: 4241 case BPF_TCX_EGRESS: 4242 case BPF_NETKIT_PRIMARY: 4243 case BPF_NETKIT_PEER: 4244 return BPF_PROG_TYPE_SCHED_CLS; 4245 default: 4246 return BPF_PROG_TYPE_UNSPEC; 4247 } 4248 } 4249 4250 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 4251 enum bpf_attach_type attach_type) 4252 { 4253 enum bpf_prog_type ptype; 4254 4255 switch (prog->type) { 4256 case BPF_PROG_TYPE_CGROUP_SOCK: 4257 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4258 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4259 case BPF_PROG_TYPE_SK_LOOKUP: 4260 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 4261 case BPF_PROG_TYPE_CGROUP_SKB: 4262 if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN)) 4263 /* cg-skb progs can be loaded by unpriv user. 4264 * check permissions at attach time. 4265 */ 4266 return -EPERM; 4267 4268 ptype = attach_type_to_prog_type(attach_type); 4269 if (prog->type != ptype) 4270 return -EINVAL; 4271 4272 return prog->enforce_expected_attach_type && 4273 prog->expected_attach_type != attach_type ? 4274 -EINVAL : 0; 4275 case BPF_PROG_TYPE_EXT: 4276 return 0; 4277 case BPF_PROG_TYPE_NETFILTER: 4278 if (attach_type != BPF_NETFILTER) 4279 return -EINVAL; 4280 return 0; 4281 case BPF_PROG_TYPE_PERF_EVENT: 4282 case BPF_PROG_TYPE_TRACEPOINT: 4283 if (attach_type != BPF_PERF_EVENT) 4284 return -EINVAL; 4285 return 0; 4286 case BPF_PROG_TYPE_KPROBE: 4287 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && 4288 attach_type != BPF_TRACE_KPROBE_MULTI) 4289 return -EINVAL; 4290 if (prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION && 4291 attach_type != BPF_TRACE_KPROBE_SESSION) 4292 return -EINVAL; 4293 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && 4294 attach_type != BPF_TRACE_UPROBE_MULTI) 4295 return -EINVAL; 4296 if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION && 4297 attach_type != BPF_TRACE_UPROBE_SESSION) 4298 return -EINVAL; 4299 if (attach_type != BPF_PERF_EVENT && 4300 attach_type != BPF_TRACE_KPROBE_MULTI && 4301 attach_type != BPF_TRACE_KPROBE_SESSION && 4302 attach_type != BPF_TRACE_UPROBE_MULTI && 4303 attach_type != BPF_TRACE_UPROBE_SESSION) 4304 return -EINVAL; 4305 return 0; 4306 case BPF_PROG_TYPE_SCHED_CLS: 4307 if (attach_type != BPF_TCX_INGRESS && 4308 attach_type != BPF_TCX_EGRESS && 4309 attach_type != BPF_NETKIT_PRIMARY && 4310 attach_type != BPF_NETKIT_PEER) 4311 return -EINVAL; 4312 return 0; 4313 default: 4314 ptype = attach_type_to_prog_type(attach_type); 4315 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) 4316 return -EINVAL; 4317 return 0; 4318 } 4319 } 4320 4321 static bool is_cgroup_prog_type(enum bpf_prog_type ptype, enum bpf_attach_type atype, 4322 bool check_atype) 4323 { 4324 switch (ptype) { 4325 case BPF_PROG_TYPE_CGROUP_DEVICE: 4326 case BPF_PROG_TYPE_CGROUP_SKB: 4327 case BPF_PROG_TYPE_CGROUP_SOCK: 4328 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4329 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4330 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4331 case BPF_PROG_TYPE_SOCK_OPS: 4332 return true; 4333 case BPF_PROG_TYPE_LSM: 4334 return check_atype ? atype == BPF_LSM_CGROUP : true; 4335 default: 4336 return false; 4337 } 4338 } 4339 4340 #define BPF_PROG_ATTACH_LAST_FIELD expected_revision 4341 4342 #define BPF_F_ATTACH_MASK_BASE \ 4343 (BPF_F_ALLOW_OVERRIDE | \ 4344 BPF_F_ALLOW_MULTI | \ 4345 BPF_F_REPLACE | \ 4346 BPF_F_PREORDER) 4347 4348 #define BPF_F_ATTACH_MASK_MPROG \ 4349 (BPF_F_REPLACE | \ 4350 BPF_F_BEFORE | \ 4351 BPF_F_AFTER | \ 4352 BPF_F_ID | \ 4353 BPF_F_LINK) 4354 4355 static int bpf_prog_attach(const union bpf_attr *attr) 4356 { 4357 enum bpf_prog_type ptype; 4358 struct bpf_prog *prog; 4359 int ret; 4360 4361 if (CHECK_ATTR(BPF_PROG_ATTACH)) 4362 return -EINVAL; 4363 4364 ptype = attach_type_to_prog_type(attr->attach_type); 4365 if (ptype == BPF_PROG_TYPE_UNSPEC) 4366 return -EINVAL; 4367 if (bpf_mprog_supported(ptype)) { 4368 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 4369 return -EINVAL; 4370 } else if (is_cgroup_prog_type(ptype, 0, false)) { 4371 if (attr->attach_flags & ~(BPF_F_ATTACH_MASK_BASE | BPF_F_ATTACH_MASK_MPROG)) 4372 return -EINVAL; 4373 } else { 4374 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE) 4375 return -EINVAL; 4376 if (attr->relative_fd || 4377 attr->expected_revision) 4378 return -EINVAL; 4379 } 4380 4381 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 4382 if (IS_ERR(prog)) 4383 return PTR_ERR(prog); 4384 4385 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 4386 bpf_prog_put(prog); 4387 return -EINVAL; 4388 } 4389 4390 if (is_cgroup_prog_type(ptype, prog->expected_attach_type, true)) { 4391 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 4392 goto out; 4393 } 4394 4395 switch (ptype) { 4396 case BPF_PROG_TYPE_SK_SKB: 4397 case BPF_PROG_TYPE_SK_MSG: 4398 ret = sock_map_get_from_fd(attr, prog); 4399 break; 4400 case BPF_PROG_TYPE_LIRC_MODE2: 4401 ret = lirc_prog_attach(attr, prog); 4402 break; 4403 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4404 ret = netns_bpf_prog_attach(attr, prog); 4405 break; 4406 case BPF_PROG_TYPE_SCHED_CLS: 4407 if (attr->attach_type == BPF_TCX_INGRESS || 4408 attr->attach_type == BPF_TCX_EGRESS) 4409 ret = tcx_prog_attach(attr, prog); 4410 else 4411 ret = netkit_prog_attach(attr, prog); 4412 break; 4413 default: 4414 ret = -EINVAL; 4415 } 4416 out: 4417 if (ret) 4418 bpf_prog_put(prog); 4419 return ret; 4420 } 4421 4422 #define BPF_PROG_DETACH_LAST_FIELD expected_revision 4423 4424 static int bpf_prog_detach(const union bpf_attr *attr) 4425 { 4426 struct bpf_prog *prog = NULL; 4427 enum bpf_prog_type ptype; 4428 int ret; 4429 4430 if (CHECK_ATTR(BPF_PROG_DETACH)) 4431 return -EINVAL; 4432 4433 ptype = attach_type_to_prog_type(attr->attach_type); 4434 if (bpf_mprog_supported(ptype)) { 4435 if (ptype == BPF_PROG_TYPE_UNSPEC) 4436 return -EINVAL; 4437 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 4438 return -EINVAL; 4439 if (attr->attach_bpf_fd) { 4440 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 4441 if (IS_ERR(prog)) 4442 return PTR_ERR(prog); 4443 } 4444 } else if (is_cgroup_prog_type(ptype, 0, false)) { 4445 if (attr->attach_flags || attr->relative_fd) 4446 return -EINVAL; 4447 } else if (attr->attach_flags || 4448 attr->relative_fd || 4449 attr->expected_revision) { 4450 return -EINVAL; 4451 } 4452 4453 switch (ptype) { 4454 case BPF_PROG_TYPE_SK_MSG: 4455 case BPF_PROG_TYPE_SK_SKB: 4456 ret = sock_map_prog_detach(attr, ptype); 4457 break; 4458 case BPF_PROG_TYPE_LIRC_MODE2: 4459 ret = lirc_prog_detach(attr); 4460 break; 4461 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4462 ret = netns_bpf_prog_detach(attr, ptype); 4463 break; 4464 case BPF_PROG_TYPE_CGROUP_DEVICE: 4465 case BPF_PROG_TYPE_CGROUP_SKB: 4466 case BPF_PROG_TYPE_CGROUP_SOCK: 4467 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4468 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4469 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4470 case BPF_PROG_TYPE_SOCK_OPS: 4471 case BPF_PROG_TYPE_LSM: 4472 ret = cgroup_bpf_prog_detach(attr, ptype); 4473 break; 4474 case BPF_PROG_TYPE_SCHED_CLS: 4475 if (attr->attach_type == BPF_TCX_INGRESS || 4476 attr->attach_type == BPF_TCX_EGRESS) 4477 ret = tcx_prog_detach(attr, prog); 4478 else 4479 ret = netkit_prog_detach(attr, prog); 4480 break; 4481 default: 4482 ret = -EINVAL; 4483 } 4484 4485 if (prog) 4486 bpf_prog_put(prog); 4487 return ret; 4488 } 4489 4490 #define BPF_PROG_QUERY_LAST_FIELD query.revision 4491 4492 static int bpf_prog_query(const union bpf_attr *attr, 4493 union bpf_attr __user *uattr) 4494 { 4495 if (!bpf_net_capable()) 4496 return -EPERM; 4497 if (CHECK_ATTR(BPF_PROG_QUERY)) 4498 return -EINVAL; 4499 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 4500 return -EINVAL; 4501 4502 switch (attr->query.attach_type) { 4503 case BPF_CGROUP_INET_INGRESS: 4504 case BPF_CGROUP_INET_EGRESS: 4505 case BPF_CGROUP_INET_SOCK_CREATE: 4506 case BPF_CGROUP_INET_SOCK_RELEASE: 4507 case BPF_CGROUP_INET4_BIND: 4508 case BPF_CGROUP_INET6_BIND: 4509 case BPF_CGROUP_INET4_POST_BIND: 4510 case BPF_CGROUP_INET6_POST_BIND: 4511 case BPF_CGROUP_INET4_CONNECT: 4512 case BPF_CGROUP_INET6_CONNECT: 4513 case BPF_CGROUP_UNIX_CONNECT: 4514 case BPF_CGROUP_INET4_GETPEERNAME: 4515 case BPF_CGROUP_INET6_GETPEERNAME: 4516 case BPF_CGROUP_UNIX_GETPEERNAME: 4517 case BPF_CGROUP_INET4_GETSOCKNAME: 4518 case BPF_CGROUP_INET6_GETSOCKNAME: 4519 case BPF_CGROUP_UNIX_GETSOCKNAME: 4520 case BPF_CGROUP_UDP4_SENDMSG: 4521 case BPF_CGROUP_UDP6_SENDMSG: 4522 case BPF_CGROUP_UNIX_SENDMSG: 4523 case BPF_CGROUP_UDP4_RECVMSG: 4524 case BPF_CGROUP_UDP6_RECVMSG: 4525 case BPF_CGROUP_UNIX_RECVMSG: 4526 case BPF_CGROUP_SOCK_OPS: 4527 case BPF_CGROUP_DEVICE: 4528 case BPF_CGROUP_SYSCTL: 4529 case BPF_CGROUP_GETSOCKOPT: 4530 case BPF_CGROUP_SETSOCKOPT: 4531 case BPF_LSM_CGROUP: 4532 return cgroup_bpf_prog_query(attr, uattr); 4533 case BPF_LIRC_MODE2: 4534 return lirc_prog_query(attr, uattr); 4535 case BPF_FLOW_DISSECTOR: 4536 case BPF_SK_LOOKUP: 4537 return netns_bpf_prog_query(attr, uattr); 4538 case BPF_SK_SKB_STREAM_PARSER: 4539 case BPF_SK_SKB_STREAM_VERDICT: 4540 case BPF_SK_MSG_VERDICT: 4541 case BPF_SK_SKB_VERDICT: 4542 return sock_map_bpf_prog_query(attr, uattr); 4543 case BPF_TCX_INGRESS: 4544 case BPF_TCX_EGRESS: 4545 return tcx_prog_query(attr, uattr); 4546 case BPF_NETKIT_PRIMARY: 4547 case BPF_NETKIT_PEER: 4548 return netkit_prog_query(attr, uattr); 4549 default: 4550 return -EINVAL; 4551 } 4552 } 4553 4554 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size 4555 4556 static int bpf_prog_test_run(const union bpf_attr *attr, 4557 union bpf_attr __user *uattr) 4558 { 4559 struct bpf_prog *prog; 4560 int ret = -ENOTSUPP; 4561 4562 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 4563 return -EINVAL; 4564 4565 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 4566 (!attr->test.ctx_size_in && attr->test.ctx_in)) 4567 return -EINVAL; 4568 4569 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 4570 (!attr->test.ctx_size_out && attr->test.ctx_out)) 4571 return -EINVAL; 4572 4573 prog = bpf_prog_get(attr->test.prog_fd); 4574 if (IS_ERR(prog)) 4575 return PTR_ERR(prog); 4576 4577 if (prog->aux->ops->test_run) 4578 ret = prog->aux->ops->test_run(prog, attr, uattr); 4579 4580 bpf_prog_put(prog); 4581 return ret; 4582 } 4583 4584 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 4585 4586 static int bpf_obj_get_next_id(const union bpf_attr *attr, 4587 union bpf_attr __user *uattr, 4588 struct idr *idr, 4589 spinlock_t *lock) 4590 { 4591 u32 next_id = attr->start_id; 4592 int err = 0; 4593 4594 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 4595 return -EINVAL; 4596 4597 if (!capable(CAP_SYS_ADMIN)) 4598 return -EPERM; 4599 4600 next_id++; 4601 spin_lock_bh(lock); 4602 if (!idr_get_next(idr, &next_id)) 4603 err = -ENOENT; 4604 spin_unlock_bh(lock); 4605 4606 if (!err) 4607 err = put_user(next_id, &uattr->next_id); 4608 4609 return err; 4610 } 4611 4612 struct bpf_map *bpf_map_get_curr_or_next(u32 *id) 4613 { 4614 struct bpf_map *map; 4615 4616 spin_lock_bh(&map_idr_lock); 4617 again: 4618 map = idr_get_next(&map_idr, id); 4619 if (map) { 4620 map = __bpf_map_inc_not_zero(map, false); 4621 if (IS_ERR(map)) { 4622 (*id)++; 4623 goto again; 4624 } 4625 } 4626 spin_unlock_bh(&map_idr_lock); 4627 4628 return map; 4629 } 4630 4631 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) 4632 { 4633 struct bpf_prog *prog; 4634 4635 spin_lock_bh(&prog_idr_lock); 4636 again: 4637 prog = idr_get_next(&prog_idr, id); 4638 if (prog) { 4639 prog = bpf_prog_inc_not_zero(prog); 4640 if (IS_ERR(prog)) { 4641 (*id)++; 4642 goto again; 4643 } 4644 } 4645 spin_unlock_bh(&prog_idr_lock); 4646 4647 return prog; 4648 } 4649 4650 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 4651 4652 struct bpf_prog *bpf_prog_by_id(u32 id) 4653 { 4654 struct bpf_prog *prog; 4655 4656 if (!id) 4657 return ERR_PTR(-ENOENT); 4658 4659 spin_lock_bh(&prog_idr_lock); 4660 prog = idr_find(&prog_idr, id); 4661 if (prog) 4662 prog = bpf_prog_inc_not_zero(prog); 4663 else 4664 prog = ERR_PTR(-ENOENT); 4665 spin_unlock_bh(&prog_idr_lock); 4666 return prog; 4667 } 4668 4669 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 4670 { 4671 struct bpf_prog *prog; 4672 u32 id = attr->prog_id; 4673 int fd; 4674 4675 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 4676 return -EINVAL; 4677 4678 if (!capable(CAP_SYS_ADMIN)) 4679 return -EPERM; 4680 4681 prog = bpf_prog_by_id(id); 4682 if (IS_ERR(prog)) 4683 return PTR_ERR(prog); 4684 4685 fd = bpf_prog_new_fd(prog); 4686 if (fd < 0) 4687 bpf_prog_put(prog); 4688 4689 return fd; 4690 } 4691 4692 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 4693 4694 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 4695 { 4696 struct bpf_map *map; 4697 u32 id = attr->map_id; 4698 int f_flags; 4699 int fd; 4700 4701 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 4702 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 4703 return -EINVAL; 4704 4705 if (!capable(CAP_SYS_ADMIN)) 4706 return -EPERM; 4707 4708 f_flags = bpf_get_file_flag(attr->open_flags); 4709 if (f_flags < 0) 4710 return f_flags; 4711 4712 spin_lock_bh(&map_idr_lock); 4713 map = idr_find(&map_idr, id); 4714 if (map) 4715 map = __bpf_map_inc_not_zero(map, true); 4716 else 4717 map = ERR_PTR(-ENOENT); 4718 spin_unlock_bh(&map_idr_lock); 4719 4720 if (IS_ERR(map)) 4721 return PTR_ERR(map); 4722 4723 fd = bpf_map_new_fd(map, f_flags); 4724 if (fd < 0) 4725 bpf_map_put_with_uref(map); 4726 4727 return fd; 4728 } 4729 4730 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 4731 unsigned long addr, u32 *off, 4732 u32 *type) 4733 { 4734 const struct bpf_map *map; 4735 int i; 4736 4737 mutex_lock(&prog->aux->used_maps_mutex); 4738 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 4739 map = prog->aux->used_maps[i]; 4740 if (map == (void *)addr) { 4741 *type = BPF_PSEUDO_MAP_FD; 4742 goto out; 4743 } 4744 if (!map->ops->map_direct_value_meta) 4745 continue; 4746 if (!map->ops->map_direct_value_meta(map, addr, off)) { 4747 *type = BPF_PSEUDO_MAP_VALUE; 4748 goto out; 4749 } 4750 } 4751 map = NULL; 4752 4753 out: 4754 mutex_unlock(&prog->aux->used_maps_mutex); 4755 return map; 4756 } 4757 4758 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, 4759 const struct cred *f_cred) 4760 { 4761 const struct bpf_map *map; 4762 struct bpf_insn *insns; 4763 u32 off, type; 4764 u64 imm; 4765 u8 code; 4766 int i; 4767 4768 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 4769 GFP_USER); 4770 if (!insns) 4771 return insns; 4772 4773 for (i = 0; i < prog->len; i++) { 4774 code = insns[i].code; 4775 4776 if (code == (BPF_JMP | BPF_TAIL_CALL)) { 4777 insns[i].code = BPF_JMP | BPF_CALL; 4778 insns[i].imm = BPF_FUNC_tail_call; 4779 /* fall-through */ 4780 } 4781 if (code == (BPF_JMP | BPF_CALL) || 4782 code == (BPF_JMP | BPF_CALL_ARGS)) { 4783 if (code == (BPF_JMP | BPF_CALL_ARGS)) 4784 insns[i].code = BPF_JMP | BPF_CALL; 4785 if (!bpf_dump_raw_ok(f_cred)) 4786 insns[i].imm = 0; 4787 continue; 4788 } 4789 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { 4790 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; 4791 continue; 4792 } 4793 4794 if ((BPF_CLASS(code) == BPF_LDX || BPF_CLASS(code) == BPF_STX || 4795 BPF_CLASS(code) == BPF_ST) && BPF_MODE(code) == BPF_PROBE_MEM32) { 4796 insns[i].code = BPF_CLASS(code) | BPF_SIZE(code) | BPF_MEM; 4797 continue; 4798 } 4799 4800 if (code != (BPF_LD | BPF_IMM | BPF_DW)) 4801 continue; 4802 4803 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 4804 map = bpf_map_from_imm(prog, imm, &off, &type); 4805 if (map) { 4806 insns[i].src_reg = type; 4807 insns[i].imm = map->id; 4808 insns[i + 1].imm = off; 4809 continue; 4810 } 4811 } 4812 4813 return insns; 4814 } 4815 4816 static int set_info_rec_size(struct bpf_prog_info *info) 4817 { 4818 /* 4819 * Ensure info.*_rec_size is the same as kernel expected size 4820 * 4821 * or 4822 * 4823 * Only allow zero *_rec_size if both _rec_size and _cnt are 4824 * zero. In this case, the kernel will set the expected 4825 * _rec_size back to the info. 4826 */ 4827 4828 if ((info->nr_func_info || info->func_info_rec_size) && 4829 info->func_info_rec_size != sizeof(struct bpf_func_info)) 4830 return -EINVAL; 4831 4832 if ((info->nr_line_info || info->line_info_rec_size) && 4833 info->line_info_rec_size != sizeof(struct bpf_line_info)) 4834 return -EINVAL; 4835 4836 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 4837 info->jited_line_info_rec_size != sizeof(__u64)) 4838 return -EINVAL; 4839 4840 info->func_info_rec_size = sizeof(struct bpf_func_info); 4841 info->line_info_rec_size = sizeof(struct bpf_line_info); 4842 info->jited_line_info_rec_size = sizeof(__u64); 4843 4844 return 0; 4845 } 4846 4847 static int bpf_prog_get_info_by_fd(struct file *file, 4848 struct bpf_prog *prog, 4849 const union bpf_attr *attr, 4850 union bpf_attr __user *uattr) 4851 { 4852 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4853 struct btf *attach_btf = bpf_prog_get_target_btf(prog); 4854 struct bpf_prog_info info; 4855 u32 info_len = attr->info.info_len; 4856 struct bpf_prog_kstats stats; 4857 char __user *uinsns; 4858 u32 ulen; 4859 int err; 4860 4861 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4862 if (err) 4863 return err; 4864 info_len = min_t(u32, sizeof(info), info_len); 4865 4866 memset(&info, 0, sizeof(info)); 4867 if (copy_from_user(&info, uinfo, info_len)) 4868 return -EFAULT; 4869 4870 info.type = prog->type; 4871 info.id = prog->aux->id; 4872 info.load_time = prog->aux->load_time; 4873 info.created_by_uid = from_kuid_munged(current_user_ns(), 4874 prog->aux->user->uid); 4875 info.gpl_compatible = prog->gpl_compatible; 4876 4877 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 4878 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 4879 4880 mutex_lock(&prog->aux->used_maps_mutex); 4881 ulen = info.nr_map_ids; 4882 info.nr_map_ids = prog->aux->used_map_cnt; 4883 ulen = min_t(u32, info.nr_map_ids, ulen); 4884 if (ulen) { 4885 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 4886 u32 i; 4887 4888 for (i = 0; i < ulen; i++) 4889 if (put_user(prog->aux->used_maps[i]->id, 4890 &user_map_ids[i])) { 4891 mutex_unlock(&prog->aux->used_maps_mutex); 4892 return -EFAULT; 4893 } 4894 } 4895 mutex_unlock(&prog->aux->used_maps_mutex); 4896 4897 err = set_info_rec_size(&info); 4898 if (err) 4899 return err; 4900 4901 bpf_prog_get_stats(prog, &stats); 4902 info.run_time_ns = stats.nsecs; 4903 info.run_cnt = stats.cnt; 4904 info.recursion_misses = stats.misses; 4905 4906 info.verified_insns = prog->aux->verified_insns; 4907 if (prog->aux->btf) 4908 info.btf_id = btf_obj_id(prog->aux->btf); 4909 4910 if (!bpf_capable()) { 4911 info.jited_prog_len = 0; 4912 info.xlated_prog_len = 0; 4913 info.nr_jited_ksyms = 0; 4914 info.nr_jited_func_lens = 0; 4915 info.nr_func_info = 0; 4916 info.nr_line_info = 0; 4917 info.nr_jited_line_info = 0; 4918 goto done; 4919 } 4920 4921 ulen = info.xlated_prog_len; 4922 info.xlated_prog_len = bpf_prog_insn_size(prog); 4923 if (info.xlated_prog_len && ulen) { 4924 struct bpf_insn *insns_sanitized; 4925 bool fault; 4926 4927 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { 4928 info.xlated_prog_insns = 0; 4929 goto done; 4930 } 4931 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); 4932 if (!insns_sanitized) 4933 return -ENOMEM; 4934 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 4935 ulen = min_t(u32, info.xlated_prog_len, ulen); 4936 fault = copy_to_user(uinsns, insns_sanitized, ulen); 4937 kfree(insns_sanitized); 4938 if (fault) 4939 return -EFAULT; 4940 } 4941 4942 if (bpf_prog_is_offloaded(prog->aux)) { 4943 err = bpf_prog_offload_info_fill(&info, prog); 4944 if (err) 4945 return err; 4946 goto done; 4947 } 4948 4949 /* NOTE: the following code is supposed to be skipped for offload. 4950 * bpf_prog_offload_info_fill() is the place to fill similar fields 4951 * for offload. 4952 */ 4953 ulen = info.jited_prog_len; 4954 if (prog->aux->func_cnt) { 4955 u32 i; 4956 4957 info.jited_prog_len = 0; 4958 for (i = 0; i < prog->aux->func_cnt; i++) 4959 info.jited_prog_len += prog->aux->func[i]->jited_len; 4960 } else { 4961 info.jited_prog_len = prog->jited_len; 4962 } 4963 4964 if (info.jited_prog_len && ulen) { 4965 if (bpf_dump_raw_ok(file->f_cred)) { 4966 uinsns = u64_to_user_ptr(info.jited_prog_insns); 4967 ulen = min_t(u32, info.jited_prog_len, ulen); 4968 4969 /* for multi-function programs, copy the JITed 4970 * instructions for all the functions 4971 */ 4972 if (prog->aux->func_cnt) { 4973 u32 len, free, i; 4974 u8 *img; 4975 4976 free = ulen; 4977 for (i = 0; i < prog->aux->func_cnt; i++) { 4978 len = prog->aux->func[i]->jited_len; 4979 len = min_t(u32, len, free); 4980 img = (u8 *) prog->aux->func[i]->bpf_func; 4981 if (copy_to_user(uinsns, img, len)) 4982 return -EFAULT; 4983 uinsns += len; 4984 free -= len; 4985 if (!free) 4986 break; 4987 } 4988 } else { 4989 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 4990 return -EFAULT; 4991 } 4992 } else { 4993 info.jited_prog_insns = 0; 4994 } 4995 } 4996 4997 ulen = info.nr_jited_ksyms; 4998 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 4999 if (ulen) { 5000 if (bpf_dump_raw_ok(file->f_cred)) { 5001 unsigned long ksym_addr; 5002 u64 __user *user_ksyms; 5003 u32 i; 5004 5005 /* copy the address of the kernel symbol 5006 * corresponding to each function 5007 */ 5008 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 5009 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 5010 if (prog->aux->func_cnt) { 5011 for (i = 0; i < ulen; i++) { 5012 ksym_addr = (unsigned long) 5013 prog->aux->func[i]->bpf_func; 5014 if (put_user((u64) ksym_addr, 5015 &user_ksyms[i])) 5016 return -EFAULT; 5017 } 5018 } else { 5019 ksym_addr = (unsigned long) prog->bpf_func; 5020 if (put_user((u64) ksym_addr, &user_ksyms[0])) 5021 return -EFAULT; 5022 } 5023 } else { 5024 info.jited_ksyms = 0; 5025 } 5026 } 5027 5028 ulen = info.nr_jited_func_lens; 5029 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 5030 if (ulen) { 5031 if (bpf_dump_raw_ok(file->f_cred)) { 5032 u32 __user *user_lens; 5033 u32 func_len, i; 5034 5035 /* copy the JITed image lengths for each function */ 5036 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 5037 user_lens = u64_to_user_ptr(info.jited_func_lens); 5038 if (prog->aux->func_cnt) { 5039 for (i = 0; i < ulen; i++) { 5040 func_len = 5041 prog->aux->func[i]->jited_len; 5042 if (put_user(func_len, &user_lens[i])) 5043 return -EFAULT; 5044 } 5045 } else { 5046 func_len = prog->jited_len; 5047 if (put_user(func_len, &user_lens[0])) 5048 return -EFAULT; 5049 } 5050 } else { 5051 info.jited_func_lens = 0; 5052 } 5053 } 5054 5055 info.attach_btf_id = prog->aux->attach_btf_id; 5056 if (attach_btf) 5057 info.attach_btf_obj_id = btf_obj_id(attach_btf); 5058 5059 ulen = info.nr_func_info; 5060 info.nr_func_info = prog->aux->func_info_cnt; 5061 if (info.nr_func_info && ulen) { 5062 char __user *user_finfo; 5063 5064 user_finfo = u64_to_user_ptr(info.func_info); 5065 ulen = min_t(u32, info.nr_func_info, ulen); 5066 if (copy_to_user(user_finfo, prog->aux->func_info, 5067 info.func_info_rec_size * ulen)) 5068 return -EFAULT; 5069 } 5070 5071 ulen = info.nr_line_info; 5072 info.nr_line_info = prog->aux->nr_linfo; 5073 if (info.nr_line_info && ulen) { 5074 __u8 __user *user_linfo; 5075 5076 user_linfo = u64_to_user_ptr(info.line_info); 5077 ulen = min_t(u32, info.nr_line_info, ulen); 5078 if (copy_to_user(user_linfo, prog->aux->linfo, 5079 info.line_info_rec_size * ulen)) 5080 return -EFAULT; 5081 } 5082 5083 ulen = info.nr_jited_line_info; 5084 if (prog->aux->jited_linfo) 5085 info.nr_jited_line_info = prog->aux->nr_linfo; 5086 else 5087 info.nr_jited_line_info = 0; 5088 if (info.nr_jited_line_info && ulen) { 5089 if (bpf_dump_raw_ok(file->f_cred)) { 5090 unsigned long line_addr; 5091 __u64 __user *user_linfo; 5092 u32 i; 5093 5094 user_linfo = u64_to_user_ptr(info.jited_line_info); 5095 ulen = min_t(u32, info.nr_jited_line_info, ulen); 5096 for (i = 0; i < ulen; i++) { 5097 line_addr = (unsigned long)prog->aux->jited_linfo[i]; 5098 if (put_user((__u64)line_addr, &user_linfo[i])) 5099 return -EFAULT; 5100 } 5101 } else { 5102 info.jited_line_info = 0; 5103 } 5104 } 5105 5106 ulen = info.nr_prog_tags; 5107 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 5108 if (ulen) { 5109 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 5110 u32 i; 5111 5112 user_prog_tags = u64_to_user_ptr(info.prog_tags); 5113 ulen = min_t(u32, info.nr_prog_tags, ulen); 5114 if (prog->aux->func_cnt) { 5115 for (i = 0; i < ulen; i++) { 5116 if (copy_to_user(user_prog_tags[i], 5117 prog->aux->func[i]->tag, 5118 BPF_TAG_SIZE)) 5119 return -EFAULT; 5120 } 5121 } else { 5122 if (copy_to_user(user_prog_tags[0], 5123 prog->tag, BPF_TAG_SIZE)) 5124 return -EFAULT; 5125 } 5126 } 5127 5128 done: 5129 if (copy_to_user(uinfo, &info, info_len) || 5130 put_user(info_len, &uattr->info.info_len)) 5131 return -EFAULT; 5132 5133 return 0; 5134 } 5135 5136 static int bpf_map_get_info_by_fd(struct file *file, 5137 struct bpf_map *map, 5138 const union bpf_attr *attr, 5139 union bpf_attr __user *uattr) 5140 { 5141 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 5142 struct bpf_map_info info; 5143 u32 info_len = attr->info.info_len; 5144 int err; 5145 5146 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 5147 if (err) 5148 return err; 5149 info_len = min_t(u32, sizeof(info), info_len); 5150 5151 memset(&info, 0, sizeof(info)); 5152 info.type = map->map_type; 5153 info.id = map->id; 5154 info.key_size = map->key_size; 5155 info.value_size = map->value_size; 5156 info.max_entries = map->max_entries; 5157 info.map_flags = map->map_flags; 5158 info.map_extra = map->map_extra; 5159 memcpy(info.name, map->name, sizeof(map->name)); 5160 5161 if (map->btf) { 5162 info.btf_id = btf_obj_id(map->btf); 5163 info.btf_key_type_id = map->btf_key_type_id; 5164 info.btf_value_type_id = map->btf_value_type_id; 5165 } 5166 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 5167 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) 5168 bpf_map_struct_ops_info_fill(&info, map); 5169 5170 if (bpf_map_is_offloaded(map)) { 5171 err = bpf_map_offload_info_fill(&info, map); 5172 if (err) 5173 return err; 5174 } 5175 5176 if (copy_to_user(uinfo, &info, info_len) || 5177 put_user(info_len, &uattr->info.info_len)) 5178 return -EFAULT; 5179 5180 return 0; 5181 } 5182 5183 static int bpf_btf_get_info_by_fd(struct file *file, 5184 struct btf *btf, 5185 const union bpf_attr *attr, 5186 union bpf_attr __user *uattr) 5187 { 5188 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 5189 u32 info_len = attr->info.info_len; 5190 int err; 5191 5192 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len); 5193 if (err) 5194 return err; 5195 5196 return btf_get_info_by_fd(btf, attr, uattr); 5197 } 5198 5199 static int bpf_link_get_info_by_fd(struct file *file, 5200 struct bpf_link *link, 5201 const union bpf_attr *attr, 5202 union bpf_attr __user *uattr) 5203 { 5204 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); 5205 struct bpf_link_info info; 5206 u32 info_len = attr->info.info_len; 5207 int err; 5208 5209 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 5210 if (err) 5211 return err; 5212 info_len = min_t(u32, sizeof(info), info_len); 5213 5214 memset(&info, 0, sizeof(info)); 5215 if (copy_from_user(&info, uinfo, info_len)) 5216 return -EFAULT; 5217 5218 info.type = link->type; 5219 info.id = link->id; 5220 if (link->prog) 5221 info.prog_id = link->prog->aux->id; 5222 5223 if (link->ops->fill_link_info) { 5224 err = link->ops->fill_link_info(link, &info); 5225 if (err) 5226 return err; 5227 } 5228 5229 if (copy_to_user(uinfo, &info, info_len) || 5230 put_user(info_len, &uattr->info.info_len)) 5231 return -EFAULT; 5232 5233 return 0; 5234 } 5235 5236 5237 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 5238 5239 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 5240 union bpf_attr __user *uattr) 5241 { 5242 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 5243 return -EINVAL; 5244 5245 CLASS(fd, f)(attr->info.bpf_fd); 5246 if (fd_empty(f)) 5247 return -EBADFD; 5248 5249 if (fd_file(f)->f_op == &bpf_prog_fops) 5250 return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, 5251 uattr); 5252 else if (fd_file(f)->f_op == &bpf_map_fops) 5253 return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, 5254 uattr); 5255 else if (fd_file(f)->f_op == &btf_fops) 5256 return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr); 5257 else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll) 5258 return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data, 5259 attr, uattr); 5260 return -EINVAL; 5261 } 5262 5263 #define BPF_BTF_LOAD_LAST_FIELD btf_token_fd 5264 5265 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) 5266 { 5267 struct bpf_token *token = NULL; 5268 5269 if (CHECK_ATTR(BPF_BTF_LOAD)) 5270 return -EINVAL; 5271 5272 if (attr->btf_flags & ~BPF_F_TOKEN_FD) 5273 return -EINVAL; 5274 5275 if (attr->btf_flags & BPF_F_TOKEN_FD) { 5276 token = bpf_token_get_from_fd(attr->btf_token_fd); 5277 if (IS_ERR(token)) 5278 return PTR_ERR(token); 5279 if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) { 5280 bpf_token_put(token); 5281 token = NULL; 5282 } 5283 } 5284 5285 if (!bpf_token_capable(token, CAP_BPF)) { 5286 bpf_token_put(token); 5287 return -EPERM; 5288 } 5289 5290 bpf_token_put(token); 5291 5292 return btf_new_fd(attr, uattr, uattr_size); 5293 } 5294 5295 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD fd_by_id_token_fd 5296 5297 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 5298 { 5299 struct bpf_token *token = NULL; 5300 5301 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 5302 return -EINVAL; 5303 5304 if (attr->open_flags & ~BPF_F_TOKEN_FD) 5305 return -EINVAL; 5306 5307 if (attr->open_flags & BPF_F_TOKEN_FD) { 5308 token = bpf_token_get_from_fd(attr->fd_by_id_token_fd); 5309 if (IS_ERR(token)) 5310 return PTR_ERR(token); 5311 if (!bpf_token_allow_cmd(token, BPF_BTF_GET_FD_BY_ID)) { 5312 bpf_token_put(token); 5313 token = NULL; 5314 } 5315 } 5316 5317 if (!bpf_token_capable(token, CAP_SYS_ADMIN)) { 5318 bpf_token_put(token); 5319 return -EPERM; 5320 } 5321 5322 bpf_token_put(token); 5323 5324 return btf_get_fd_by_id(attr->btf_id); 5325 } 5326 5327 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 5328 union bpf_attr __user *uattr, 5329 u32 prog_id, u32 fd_type, 5330 const char *buf, u64 probe_offset, 5331 u64 probe_addr) 5332 { 5333 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 5334 u32 len = buf ? strlen(buf) : 0, input_len; 5335 int err = 0; 5336 5337 if (put_user(len, &uattr->task_fd_query.buf_len)) 5338 return -EFAULT; 5339 input_len = attr->task_fd_query.buf_len; 5340 if (input_len && ubuf) { 5341 if (!len) { 5342 /* nothing to copy, just make ubuf NULL terminated */ 5343 char zero = '\0'; 5344 5345 if (put_user(zero, ubuf)) 5346 return -EFAULT; 5347 } else if (input_len >= len + 1) { 5348 /* ubuf can hold the string with NULL terminator */ 5349 if (copy_to_user(ubuf, buf, len + 1)) 5350 return -EFAULT; 5351 } else { 5352 /* ubuf cannot hold the string with NULL terminator, 5353 * do a partial copy with NULL terminator. 5354 */ 5355 char zero = '\0'; 5356 5357 err = -ENOSPC; 5358 if (copy_to_user(ubuf, buf, input_len - 1)) 5359 return -EFAULT; 5360 if (put_user(zero, ubuf + input_len - 1)) 5361 return -EFAULT; 5362 } 5363 } 5364 5365 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 5366 put_user(fd_type, &uattr->task_fd_query.fd_type) || 5367 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 5368 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 5369 return -EFAULT; 5370 5371 return err; 5372 } 5373 5374 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 5375 5376 static int bpf_task_fd_query(const union bpf_attr *attr, 5377 union bpf_attr __user *uattr) 5378 { 5379 pid_t pid = attr->task_fd_query.pid; 5380 u32 fd = attr->task_fd_query.fd; 5381 const struct perf_event *event; 5382 struct task_struct *task; 5383 struct file *file; 5384 int err; 5385 5386 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 5387 return -EINVAL; 5388 5389 if (!capable(CAP_SYS_ADMIN)) 5390 return -EPERM; 5391 5392 if (attr->task_fd_query.flags != 0) 5393 return -EINVAL; 5394 5395 rcu_read_lock(); 5396 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 5397 rcu_read_unlock(); 5398 if (!task) 5399 return -ENOENT; 5400 5401 err = 0; 5402 file = fget_task(task, fd); 5403 put_task_struct(task); 5404 if (!file) 5405 return -EBADF; 5406 5407 if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) { 5408 struct bpf_link *link = file->private_data; 5409 5410 if (link->ops == &bpf_raw_tp_link_lops) { 5411 struct bpf_raw_tp_link *raw_tp = 5412 container_of(link, struct bpf_raw_tp_link, link); 5413 struct bpf_raw_event_map *btp = raw_tp->btp; 5414 5415 err = bpf_task_fd_query_copy(attr, uattr, 5416 raw_tp->link.prog->aux->id, 5417 BPF_FD_TYPE_RAW_TRACEPOINT, 5418 btp->tp->name, 0, 0); 5419 goto put_file; 5420 } 5421 goto out_not_supp; 5422 } 5423 5424 event = perf_get_event(file); 5425 if (!IS_ERR(event)) { 5426 u64 probe_offset, probe_addr; 5427 u32 prog_id, fd_type; 5428 const char *buf; 5429 5430 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 5431 &buf, &probe_offset, 5432 &probe_addr, NULL); 5433 if (!err) 5434 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 5435 fd_type, buf, 5436 probe_offset, 5437 probe_addr); 5438 goto put_file; 5439 } 5440 5441 out_not_supp: 5442 err = -ENOTSUPP; 5443 put_file: 5444 fput(file); 5445 return err; 5446 } 5447 5448 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 5449 5450 #define BPF_DO_BATCH(fn, ...) \ 5451 do { \ 5452 if (!fn) { \ 5453 err = -ENOTSUPP; \ 5454 goto err_put; \ 5455 } \ 5456 err = fn(__VA_ARGS__); \ 5457 } while (0) 5458 5459 static int bpf_map_do_batch(const union bpf_attr *attr, 5460 union bpf_attr __user *uattr, 5461 int cmd) 5462 { 5463 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH || 5464 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH; 5465 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH; 5466 struct bpf_map *map; 5467 int err; 5468 5469 if (CHECK_ATTR(BPF_MAP_BATCH)) 5470 return -EINVAL; 5471 5472 CLASS(fd, f)(attr->batch.map_fd); 5473 5474 map = __bpf_map_get(f); 5475 if (IS_ERR(map)) 5476 return PTR_ERR(map); 5477 if (has_write) 5478 bpf_map_write_active_inc(map); 5479 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 5480 err = -EPERM; 5481 goto err_put; 5482 } 5483 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 5484 err = -EPERM; 5485 goto err_put; 5486 } 5487 5488 if (cmd == BPF_MAP_LOOKUP_BATCH) 5489 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr); 5490 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 5491 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr); 5492 else if (cmd == BPF_MAP_UPDATE_BATCH) 5493 BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr); 5494 else 5495 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr); 5496 err_put: 5497 if (has_write) { 5498 maybe_wait_bpf_programs(map); 5499 bpf_map_write_active_dec(map); 5500 } 5501 return err; 5502 } 5503 5504 #define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid 5505 static int link_create(union bpf_attr *attr, bpfptr_t uattr) 5506 { 5507 struct bpf_prog *prog; 5508 int ret; 5509 5510 if (CHECK_ATTR(BPF_LINK_CREATE)) 5511 return -EINVAL; 5512 5513 if (attr->link_create.attach_type == BPF_STRUCT_OPS) 5514 return bpf_struct_ops_link_create(attr); 5515 5516 prog = bpf_prog_get(attr->link_create.prog_fd); 5517 if (IS_ERR(prog)) 5518 return PTR_ERR(prog); 5519 5520 ret = bpf_prog_attach_check_attach_type(prog, 5521 attr->link_create.attach_type); 5522 if (ret) 5523 goto out; 5524 5525 switch (prog->type) { 5526 case BPF_PROG_TYPE_CGROUP_SKB: 5527 case BPF_PROG_TYPE_CGROUP_SOCK: 5528 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 5529 case BPF_PROG_TYPE_SOCK_OPS: 5530 case BPF_PROG_TYPE_CGROUP_DEVICE: 5531 case BPF_PROG_TYPE_CGROUP_SYSCTL: 5532 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 5533 ret = cgroup_bpf_link_attach(attr, prog); 5534 break; 5535 case BPF_PROG_TYPE_EXT: 5536 ret = bpf_tracing_prog_attach(prog, 5537 attr->link_create.target_fd, 5538 attr->link_create.target_btf_id, 5539 attr->link_create.tracing.cookie); 5540 break; 5541 case BPF_PROG_TYPE_LSM: 5542 case BPF_PROG_TYPE_TRACING: 5543 if (attr->link_create.attach_type != prog->expected_attach_type) { 5544 ret = -EINVAL; 5545 goto out; 5546 } 5547 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) 5548 ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie); 5549 else if (prog->expected_attach_type == BPF_TRACE_ITER) 5550 ret = bpf_iter_link_attach(attr, uattr, prog); 5551 else if (prog->expected_attach_type == BPF_LSM_CGROUP) 5552 ret = cgroup_bpf_link_attach(attr, prog); 5553 else 5554 ret = bpf_tracing_prog_attach(prog, 5555 attr->link_create.target_fd, 5556 attr->link_create.target_btf_id, 5557 attr->link_create.tracing.cookie); 5558 break; 5559 case BPF_PROG_TYPE_FLOW_DISSECTOR: 5560 case BPF_PROG_TYPE_SK_LOOKUP: 5561 ret = netns_bpf_link_create(attr, prog); 5562 break; 5563 case BPF_PROG_TYPE_SK_MSG: 5564 case BPF_PROG_TYPE_SK_SKB: 5565 ret = sock_map_link_create(attr, prog); 5566 break; 5567 #ifdef CONFIG_NET 5568 case BPF_PROG_TYPE_XDP: 5569 ret = bpf_xdp_link_attach(attr, prog); 5570 break; 5571 case BPF_PROG_TYPE_SCHED_CLS: 5572 if (attr->link_create.attach_type == BPF_TCX_INGRESS || 5573 attr->link_create.attach_type == BPF_TCX_EGRESS) 5574 ret = tcx_link_attach(attr, prog); 5575 else 5576 ret = netkit_link_attach(attr, prog); 5577 break; 5578 case BPF_PROG_TYPE_NETFILTER: 5579 ret = bpf_nf_link_attach(attr, prog); 5580 break; 5581 #endif 5582 case BPF_PROG_TYPE_PERF_EVENT: 5583 case BPF_PROG_TYPE_TRACEPOINT: 5584 ret = bpf_perf_link_attach(attr, prog); 5585 break; 5586 case BPF_PROG_TYPE_KPROBE: 5587 if (attr->link_create.attach_type == BPF_PERF_EVENT) 5588 ret = bpf_perf_link_attach(attr, prog); 5589 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI || 5590 attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION) 5591 ret = bpf_kprobe_multi_link_attach(attr, prog); 5592 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI || 5593 attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION) 5594 ret = bpf_uprobe_multi_link_attach(attr, prog); 5595 break; 5596 default: 5597 ret = -EINVAL; 5598 } 5599 5600 out: 5601 if (ret < 0) 5602 bpf_prog_put(prog); 5603 return ret; 5604 } 5605 5606 static int link_update_map(struct bpf_link *link, union bpf_attr *attr) 5607 { 5608 struct bpf_map *new_map, *old_map = NULL; 5609 int ret; 5610 5611 new_map = bpf_map_get(attr->link_update.new_map_fd); 5612 if (IS_ERR(new_map)) 5613 return PTR_ERR(new_map); 5614 5615 if (attr->link_update.flags & BPF_F_REPLACE) { 5616 old_map = bpf_map_get(attr->link_update.old_map_fd); 5617 if (IS_ERR(old_map)) { 5618 ret = PTR_ERR(old_map); 5619 goto out_put; 5620 } 5621 } else if (attr->link_update.old_map_fd) { 5622 ret = -EINVAL; 5623 goto out_put; 5624 } 5625 5626 ret = link->ops->update_map(link, new_map, old_map); 5627 5628 if (old_map) 5629 bpf_map_put(old_map); 5630 out_put: 5631 bpf_map_put(new_map); 5632 return ret; 5633 } 5634 5635 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd 5636 5637 static int link_update(union bpf_attr *attr) 5638 { 5639 struct bpf_prog *old_prog = NULL, *new_prog; 5640 struct bpf_link *link; 5641 u32 flags; 5642 int ret; 5643 5644 if (CHECK_ATTR(BPF_LINK_UPDATE)) 5645 return -EINVAL; 5646 5647 flags = attr->link_update.flags; 5648 if (flags & ~BPF_F_REPLACE) 5649 return -EINVAL; 5650 5651 link = bpf_link_get_from_fd(attr->link_update.link_fd); 5652 if (IS_ERR(link)) 5653 return PTR_ERR(link); 5654 5655 if (link->ops->update_map) { 5656 ret = link_update_map(link, attr); 5657 goto out_put_link; 5658 } 5659 5660 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); 5661 if (IS_ERR(new_prog)) { 5662 ret = PTR_ERR(new_prog); 5663 goto out_put_link; 5664 } 5665 5666 if (flags & BPF_F_REPLACE) { 5667 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); 5668 if (IS_ERR(old_prog)) { 5669 ret = PTR_ERR(old_prog); 5670 old_prog = NULL; 5671 goto out_put_progs; 5672 } 5673 } else if (attr->link_update.old_prog_fd) { 5674 ret = -EINVAL; 5675 goto out_put_progs; 5676 } 5677 5678 if (link->ops->update_prog) 5679 ret = link->ops->update_prog(link, new_prog, old_prog); 5680 else 5681 ret = -EINVAL; 5682 5683 out_put_progs: 5684 if (old_prog) 5685 bpf_prog_put(old_prog); 5686 if (ret) 5687 bpf_prog_put(new_prog); 5688 out_put_link: 5689 bpf_link_put_direct(link); 5690 return ret; 5691 } 5692 5693 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd 5694 5695 static int link_detach(union bpf_attr *attr) 5696 { 5697 struct bpf_link *link; 5698 int ret; 5699 5700 if (CHECK_ATTR(BPF_LINK_DETACH)) 5701 return -EINVAL; 5702 5703 link = bpf_link_get_from_fd(attr->link_detach.link_fd); 5704 if (IS_ERR(link)) 5705 return PTR_ERR(link); 5706 5707 if (link->ops->detach) 5708 ret = link->ops->detach(link); 5709 else 5710 ret = -EOPNOTSUPP; 5711 5712 bpf_link_put_direct(link); 5713 return ret; 5714 } 5715 5716 struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 5717 { 5718 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); 5719 } 5720 EXPORT_SYMBOL(bpf_link_inc_not_zero); 5721 5722 struct bpf_link *bpf_link_by_id(u32 id) 5723 { 5724 struct bpf_link *link; 5725 5726 if (!id) 5727 return ERR_PTR(-ENOENT); 5728 5729 spin_lock_bh(&link_idr_lock); 5730 /* before link is "settled", ID is 0, pretend it doesn't exist yet */ 5731 link = idr_find(&link_idr, id); 5732 if (link) { 5733 if (link->id) 5734 link = bpf_link_inc_not_zero(link); 5735 else 5736 link = ERR_PTR(-EAGAIN); 5737 } else { 5738 link = ERR_PTR(-ENOENT); 5739 } 5740 spin_unlock_bh(&link_idr_lock); 5741 return link; 5742 } 5743 5744 struct bpf_link *bpf_link_get_curr_or_next(u32 *id) 5745 { 5746 struct bpf_link *link; 5747 5748 spin_lock_bh(&link_idr_lock); 5749 again: 5750 link = idr_get_next(&link_idr, id); 5751 if (link) { 5752 link = bpf_link_inc_not_zero(link); 5753 if (IS_ERR(link)) { 5754 (*id)++; 5755 goto again; 5756 } 5757 } 5758 spin_unlock_bh(&link_idr_lock); 5759 5760 return link; 5761 } 5762 5763 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id 5764 5765 static int bpf_link_get_fd_by_id(const union bpf_attr *attr) 5766 { 5767 struct bpf_link *link; 5768 u32 id = attr->link_id; 5769 int fd; 5770 5771 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) 5772 return -EINVAL; 5773 5774 if (!capable(CAP_SYS_ADMIN)) 5775 return -EPERM; 5776 5777 link = bpf_link_by_id(id); 5778 if (IS_ERR(link)) 5779 return PTR_ERR(link); 5780 5781 fd = bpf_link_new_fd(link); 5782 if (fd < 0) 5783 bpf_link_put_direct(link); 5784 5785 return fd; 5786 } 5787 5788 DEFINE_MUTEX(bpf_stats_enabled_mutex); 5789 5790 static int bpf_stats_release(struct inode *inode, struct file *file) 5791 { 5792 mutex_lock(&bpf_stats_enabled_mutex); 5793 static_key_slow_dec(&bpf_stats_enabled_key.key); 5794 mutex_unlock(&bpf_stats_enabled_mutex); 5795 return 0; 5796 } 5797 5798 static const struct file_operations bpf_stats_fops = { 5799 .release = bpf_stats_release, 5800 }; 5801 5802 static int bpf_enable_runtime_stats(void) 5803 { 5804 int fd; 5805 5806 mutex_lock(&bpf_stats_enabled_mutex); 5807 5808 /* Set a very high limit to avoid overflow */ 5809 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { 5810 mutex_unlock(&bpf_stats_enabled_mutex); 5811 return -EBUSY; 5812 } 5813 5814 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); 5815 if (fd >= 0) 5816 static_key_slow_inc(&bpf_stats_enabled_key.key); 5817 5818 mutex_unlock(&bpf_stats_enabled_mutex); 5819 return fd; 5820 } 5821 5822 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type 5823 5824 static int bpf_enable_stats(union bpf_attr *attr) 5825 { 5826 5827 if (CHECK_ATTR(BPF_ENABLE_STATS)) 5828 return -EINVAL; 5829 5830 if (!capable(CAP_SYS_ADMIN)) 5831 return -EPERM; 5832 5833 switch (attr->enable_stats.type) { 5834 case BPF_STATS_RUN_TIME: 5835 return bpf_enable_runtime_stats(); 5836 default: 5837 break; 5838 } 5839 return -EINVAL; 5840 } 5841 5842 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags 5843 5844 static int bpf_iter_create(union bpf_attr *attr) 5845 { 5846 struct bpf_link *link; 5847 int err; 5848 5849 if (CHECK_ATTR(BPF_ITER_CREATE)) 5850 return -EINVAL; 5851 5852 if (attr->iter_create.flags) 5853 return -EINVAL; 5854 5855 link = bpf_link_get_from_fd(attr->iter_create.link_fd); 5856 if (IS_ERR(link)) 5857 return PTR_ERR(link); 5858 5859 err = bpf_iter_new_fd(link); 5860 bpf_link_put_direct(link); 5861 5862 return err; 5863 } 5864 5865 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags 5866 5867 static int bpf_prog_bind_map(union bpf_attr *attr) 5868 { 5869 struct bpf_prog *prog; 5870 struct bpf_map *map; 5871 struct bpf_map **used_maps_old, **used_maps_new; 5872 int i, ret = 0; 5873 5874 if (CHECK_ATTR(BPF_PROG_BIND_MAP)) 5875 return -EINVAL; 5876 5877 if (attr->prog_bind_map.flags) 5878 return -EINVAL; 5879 5880 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); 5881 if (IS_ERR(prog)) 5882 return PTR_ERR(prog); 5883 5884 map = bpf_map_get(attr->prog_bind_map.map_fd); 5885 if (IS_ERR(map)) { 5886 ret = PTR_ERR(map); 5887 goto out_prog_put; 5888 } 5889 5890 mutex_lock(&prog->aux->used_maps_mutex); 5891 5892 used_maps_old = prog->aux->used_maps; 5893 5894 for (i = 0; i < prog->aux->used_map_cnt; i++) 5895 if (used_maps_old[i] == map) { 5896 bpf_map_put(map); 5897 goto out_unlock; 5898 } 5899 5900 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, 5901 sizeof(used_maps_new[0]), 5902 GFP_KERNEL); 5903 if (!used_maps_new) { 5904 ret = -ENOMEM; 5905 goto out_unlock; 5906 } 5907 5908 /* The bpf program will not access the bpf map, but for the sake of 5909 * simplicity, increase sleepable_refcnt for sleepable program as well. 5910 */ 5911 if (prog->sleepable) 5912 atomic64_inc(&map->sleepable_refcnt); 5913 memcpy(used_maps_new, used_maps_old, 5914 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); 5915 used_maps_new[prog->aux->used_map_cnt] = map; 5916 5917 prog->aux->used_map_cnt++; 5918 prog->aux->used_maps = used_maps_new; 5919 5920 kfree(used_maps_old); 5921 5922 out_unlock: 5923 mutex_unlock(&prog->aux->used_maps_mutex); 5924 5925 if (ret) 5926 bpf_map_put(map); 5927 out_prog_put: 5928 bpf_prog_put(prog); 5929 return ret; 5930 } 5931 5932 #define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd 5933 5934 static int token_create(union bpf_attr *attr) 5935 { 5936 if (CHECK_ATTR(BPF_TOKEN_CREATE)) 5937 return -EINVAL; 5938 5939 /* no flags are supported yet */ 5940 if (attr->token_create.flags) 5941 return -EINVAL; 5942 5943 return bpf_token_create(attr); 5944 } 5945 5946 #define BPF_PROG_STREAM_READ_BY_FD_LAST_FIELD prog_stream_read.prog_fd 5947 5948 static int prog_stream_read(union bpf_attr *attr) 5949 { 5950 char __user *buf = u64_to_user_ptr(attr->prog_stream_read.stream_buf); 5951 u32 len = attr->prog_stream_read.stream_buf_len; 5952 struct bpf_prog *prog; 5953 int ret; 5954 5955 if (CHECK_ATTR(BPF_PROG_STREAM_READ_BY_FD)) 5956 return -EINVAL; 5957 5958 prog = bpf_prog_get(attr->prog_stream_read.prog_fd); 5959 if (IS_ERR(prog)) 5960 return PTR_ERR(prog); 5961 5962 ret = bpf_prog_stream_read(prog, attr->prog_stream_read.stream_id, buf, len); 5963 bpf_prog_put(prog); 5964 5965 return ret; 5966 } 5967 5968 static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size) 5969 { 5970 union bpf_attr attr; 5971 int err; 5972 5973 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 5974 if (err) 5975 return err; 5976 size = min_t(u32, size, sizeof(attr)); 5977 5978 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 5979 memset(&attr, 0, sizeof(attr)); 5980 if (copy_from_bpfptr(&attr, uattr, size) != 0) 5981 return -EFAULT; 5982 5983 err = security_bpf(cmd, &attr, size, uattr.is_kernel); 5984 if (err < 0) 5985 return err; 5986 5987 switch (cmd) { 5988 case BPF_MAP_CREATE: 5989 err = map_create(&attr, uattr.is_kernel); 5990 break; 5991 case BPF_MAP_LOOKUP_ELEM: 5992 err = map_lookup_elem(&attr); 5993 break; 5994 case BPF_MAP_UPDATE_ELEM: 5995 err = map_update_elem(&attr, uattr); 5996 break; 5997 case BPF_MAP_DELETE_ELEM: 5998 err = map_delete_elem(&attr, uattr); 5999 break; 6000 case BPF_MAP_GET_NEXT_KEY: 6001 err = map_get_next_key(&attr); 6002 break; 6003 case BPF_MAP_FREEZE: 6004 err = map_freeze(&attr); 6005 break; 6006 case BPF_PROG_LOAD: 6007 err = bpf_prog_load(&attr, uattr, size); 6008 break; 6009 case BPF_OBJ_PIN: 6010 err = bpf_obj_pin(&attr); 6011 break; 6012 case BPF_OBJ_GET: 6013 err = bpf_obj_get(&attr); 6014 break; 6015 case BPF_PROG_ATTACH: 6016 err = bpf_prog_attach(&attr); 6017 break; 6018 case BPF_PROG_DETACH: 6019 err = bpf_prog_detach(&attr); 6020 break; 6021 case BPF_PROG_QUERY: 6022 err = bpf_prog_query(&attr, uattr.user); 6023 break; 6024 case BPF_PROG_TEST_RUN: 6025 err = bpf_prog_test_run(&attr, uattr.user); 6026 break; 6027 case BPF_PROG_GET_NEXT_ID: 6028 err = bpf_obj_get_next_id(&attr, uattr.user, 6029 &prog_idr, &prog_idr_lock); 6030 break; 6031 case BPF_MAP_GET_NEXT_ID: 6032 err = bpf_obj_get_next_id(&attr, uattr.user, 6033 &map_idr, &map_idr_lock); 6034 break; 6035 case BPF_BTF_GET_NEXT_ID: 6036 err = bpf_obj_get_next_id(&attr, uattr.user, 6037 &btf_idr, &btf_idr_lock); 6038 break; 6039 case BPF_PROG_GET_FD_BY_ID: 6040 err = bpf_prog_get_fd_by_id(&attr); 6041 break; 6042 case BPF_MAP_GET_FD_BY_ID: 6043 err = bpf_map_get_fd_by_id(&attr); 6044 break; 6045 case BPF_OBJ_GET_INFO_BY_FD: 6046 err = bpf_obj_get_info_by_fd(&attr, uattr.user); 6047 break; 6048 case BPF_RAW_TRACEPOINT_OPEN: 6049 err = bpf_raw_tracepoint_open(&attr); 6050 break; 6051 case BPF_BTF_LOAD: 6052 err = bpf_btf_load(&attr, uattr, size); 6053 break; 6054 case BPF_BTF_GET_FD_BY_ID: 6055 err = bpf_btf_get_fd_by_id(&attr); 6056 break; 6057 case BPF_TASK_FD_QUERY: 6058 err = bpf_task_fd_query(&attr, uattr.user); 6059 break; 6060 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 6061 err = map_lookup_and_delete_elem(&attr); 6062 break; 6063 case BPF_MAP_LOOKUP_BATCH: 6064 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH); 6065 break; 6066 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 6067 err = bpf_map_do_batch(&attr, uattr.user, 6068 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 6069 break; 6070 case BPF_MAP_UPDATE_BATCH: 6071 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH); 6072 break; 6073 case BPF_MAP_DELETE_BATCH: 6074 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH); 6075 break; 6076 case BPF_LINK_CREATE: 6077 err = link_create(&attr, uattr); 6078 break; 6079 case BPF_LINK_UPDATE: 6080 err = link_update(&attr); 6081 break; 6082 case BPF_LINK_GET_FD_BY_ID: 6083 err = bpf_link_get_fd_by_id(&attr); 6084 break; 6085 case BPF_LINK_GET_NEXT_ID: 6086 err = bpf_obj_get_next_id(&attr, uattr.user, 6087 &link_idr, &link_idr_lock); 6088 break; 6089 case BPF_ENABLE_STATS: 6090 err = bpf_enable_stats(&attr); 6091 break; 6092 case BPF_ITER_CREATE: 6093 err = bpf_iter_create(&attr); 6094 break; 6095 case BPF_LINK_DETACH: 6096 err = link_detach(&attr); 6097 break; 6098 case BPF_PROG_BIND_MAP: 6099 err = bpf_prog_bind_map(&attr); 6100 break; 6101 case BPF_TOKEN_CREATE: 6102 err = token_create(&attr); 6103 break; 6104 case BPF_PROG_STREAM_READ_BY_FD: 6105 err = prog_stream_read(&attr); 6106 break; 6107 default: 6108 err = -EINVAL; 6109 break; 6110 } 6111 6112 return err; 6113 } 6114 6115 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 6116 { 6117 return __sys_bpf(cmd, USER_BPFPTR(uattr), size); 6118 } 6119 6120 static bool syscall_prog_is_valid_access(int off, int size, 6121 enum bpf_access_type type, 6122 const struct bpf_prog *prog, 6123 struct bpf_insn_access_aux *info) 6124 { 6125 if (off < 0 || off >= U16_MAX) 6126 return false; 6127 if (off % size != 0) 6128 return false; 6129 return true; 6130 } 6131 6132 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size) 6133 { 6134 switch (cmd) { 6135 case BPF_MAP_CREATE: 6136 case BPF_MAP_DELETE_ELEM: 6137 case BPF_MAP_UPDATE_ELEM: 6138 case BPF_MAP_FREEZE: 6139 case BPF_MAP_GET_FD_BY_ID: 6140 case BPF_PROG_LOAD: 6141 case BPF_BTF_LOAD: 6142 case BPF_LINK_CREATE: 6143 case BPF_RAW_TRACEPOINT_OPEN: 6144 break; 6145 default: 6146 return -EINVAL; 6147 } 6148 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size); 6149 } 6150 6151 6152 /* To shut up -Wmissing-prototypes. 6153 * This function is used by the kernel light skeleton 6154 * to load bpf programs when modules are loaded or during kernel boot. 6155 * See tools/lib/bpf/skel_internal.h 6156 */ 6157 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); 6158 6159 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size) 6160 { 6161 struct bpf_prog * __maybe_unused prog; 6162 struct bpf_tramp_run_ctx __maybe_unused run_ctx; 6163 6164 switch (cmd) { 6165 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */ 6166 case BPF_PROG_TEST_RUN: 6167 if (attr->test.data_in || attr->test.data_out || 6168 attr->test.ctx_out || attr->test.duration || 6169 attr->test.repeat || attr->test.flags) 6170 return -EINVAL; 6171 6172 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); 6173 if (IS_ERR(prog)) 6174 return PTR_ERR(prog); 6175 6176 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || 6177 attr->test.ctx_size_in > U16_MAX) { 6178 bpf_prog_put(prog); 6179 return -EINVAL; 6180 } 6181 6182 run_ctx.bpf_cookie = 0; 6183 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) { 6184 /* recursion detected */ 6185 __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx); 6186 bpf_prog_put(prog); 6187 return -EBUSY; 6188 } 6189 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); 6190 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */, 6191 &run_ctx); 6192 bpf_prog_put(prog); 6193 return 0; 6194 #endif 6195 default: 6196 return ____bpf_sys_bpf(cmd, attr, size); 6197 } 6198 } 6199 EXPORT_SYMBOL_NS(kern_sys_bpf, "BPF_INTERNAL"); 6200 6201 static const struct bpf_func_proto bpf_sys_bpf_proto = { 6202 .func = bpf_sys_bpf, 6203 .gpl_only = false, 6204 .ret_type = RET_INTEGER, 6205 .arg1_type = ARG_ANYTHING, 6206 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6207 .arg3_type = ARG_CONST_SIZE, 6208 }; 6209 6210 const struct bpf_func_proto * __weak 6211 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 6212 { 6213 return bpf_base_func_proto(func_id, prog); 6214 } 6215 6216 BPF_CALL_1(bpf_sys_close, u32, fd) 6217 { 6218 /* When bpf program calls this helper there should not be 6219 * an fdget() without matching completed fdput(). 6220 * This helper is allowed in the following callchain only: 6221 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close 6222 */ 6223 return close_fd(fd); 6224 } 6225 6226 static const struct bpf_func_proto bpf_sys_close_proto = { 6227 .func = bpf_sys_close, 6228 .gpl_only = false, 6229 .ret_type = RET_INTEGER, 6230 .arg1_type = ARG_ANYTHING, 6231 }; 6232 6233 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) 6234 { 6235 *res = 0; 6236 if (flags) 6237 return -EINVAL; 6238 6239 if (name_sz <= 1 || name[name_sz - 1]) 6240 return -EINVAL; 6241 6242 if (!bpf_dump_raw_ok(current_cred())) 6243 return -EPERM; 6244 6245 *res = kallsyms_lookup_name(name); 6246 return *res ? 0 : -ENOENT; 6247 } 6248 6249 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { 6250 .func = bpf_kallsyms_lookup_name, 6251 .gpl_only = false, 6252 .ret_type = RET_INTEGER, 6253 .arg1_type = ARG_PTR_TO_MEM, 6254 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 6255 .arg3_type = ARG_ANYTHING, 6256 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 6257 .arg4_size = sizeof(u64), 6258 }; 6259 6260 static const struct bpf_func_proto * 6261 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 6262 { 6263 switch (func_id) { 6264 case BPF_FUNC_sys_bpf: 6265 return !bpf_token_capable(prog->aux->token, CAP_PERFMON) 6266 ? NULL : &bpf_sys_bpf_proto; 6267 case BPF_FUNC_btf_find_by_name_kind: 6268 return &bpf_btf_find_by_name_kind_proto; 6269 case BPF_FUNC_sys_close: 6270 return &bpf_sys_close_proto; 6271 case BPF_FUNC_kallsyms_lookup_name: 6272 return &bpf_kallsyms_lookup_name_proto; 6273 default: 6274 return tracing_prog_func_proto(func_id, prog); 6275 } 6276 } 6277 6278 const struct bpf_verifier_ops bpf_syscall_verifier_ops = { 6279 .get_func_proto = syscall_prog_func_proto, 6280 .is_valid_access = syscall_prog_is_valid_access, 6281 }; 6282 6283 const struct bpf_prog_ops bpf_syscall_prog_ops = { 6284 .test_run = bpf_prog_test_run_syscall, 6285 }; 6286 6287 #ifdef CONFIG_SYSCTL 6288 static int bpf_stats_handler(const struct ctl_table *table, int write, 6289 void *buffer, size_t *lenp, loff_t *ppos) 6290 { 6291 struct static_key *key = (struct static_key *)table->data; 6292 static int saved_val; 6293 int val, ret; 6294 struct ctl_table tmp = { 6295 .data = &val, 6296 .maxlen = sizeof(val), 6297 .mode = table->mode, 6298 .extra1 = SYSCTL_ZERO, 6299 .extra2 = SYSCTL_ONE, 6300 }; 6301 6302 if (write && !capable(CAP_SYS_ADMIN)) 6303 return -EPERM; 6304 6305 mutex_lock(&bpf_stats_enabled_mutex); 6306 val = saved_val; 6307 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 6308 if (write && !ret && val != saved_val) { 6309 if (val) 6310 static_key_slow_inc(key); 6311 else 6312 static_key_slow_dec(key); 6313 saved_val = val; 6314 } 6315 mutex_unlock(&bpf_stats_enabled_mutex); 6316 return ret; 6317 } 6318 6319 void __weak unpriv_ebpf_notify(int new_state) 6320 { 6321 } 6322 6323 static int bpf_unpriv_handler(const struct ctl_table *table, int write, 6324 void *buffer, size_t *lenp, loff_t *ppos) 6325 { 6326 int ret, unpriv_enable = *(int *)table->data; 6327 bool locked_state = unpriv_enable == 1; 6328 struct ctl_table tmp = *table; 6329 6330 if (write && !capable(CAP_SYS_ADMIN)) 6331 return -EPERM; 6332 6333 tmp.data = &unpriv_enable; 6334 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 6335 if (write && !ret) { 6336 if (locked_state && unpriv_enable != 1) 6337 return -EPERM; 6338 *(int *)table->data = unpriv_enable; 6339 } 6340 6341 if (write) 6342 unpriv_ebpf_notify(unpriv_enable); 6343 6344 return ret; 6345 } 6346 6347 static const struct ctl_table bpf_syscall_table[] = { 6348 { 6349 .procname = "unprivileged_bpf_disabled", 6350 .data = &sysctl_unprivileged_bpf_disabled, 6351 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), 6352 .mode = 0644, 6353 .proc_handler = bpf_unpriv_handler, 6354 .extra1 = SYSCTL_ZERO, 6355 .extra2 = SYSCTL_TWO, 6356 }, 6357 { 6358 .procname = "bpf_stats_enabled", 6359 .data = &bpf_stats_enabled_key.key, 6360 .mode = 0644, 6361 .proc_handler = bpf_stats_handler, 6362 }, 6363 }; 6364 6365 static int __init bpf_syscall_sysctl_init(void) 6366 { 6367 register_sysctl_init("kernel", bpf_syscall_table); 6368 return 0; 6369 } 6370 late_initcall(bpf_syscall_sysctl_init); 6371 #endif /* CONFIG_SYSCTL */ 6372