1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf-cgroup.h> 6 #include <linux/bpf_trace.h> 7 #include <linux/bpf_lirc.h> 8 #include <linux/bpf_verifier.h> 9 #include <linux/bsearch.h> 10 #include <linux/btf.h> 11 #include <linux/syscalls.h> 12 #include <linux/slab.h> 13 #include <linux/sched/signal.h> 14 #include <linux/vmalloc.h> 15 #include <linux/mmzone.h> 16 #include <linux/anon_inodes.h> 17 #include <linux/fdtable.h> 18 #include <linux/file.h> 19 #include <linux/fs.h> 20 #include <linux/license.h> 21 #include <linux/filter.h> 22 #include <linux/kernel.h> 23 #include <linux/idr.h> 24 #include <linux/cred.h> 25 #include <linux/timekeeping.h> 26 #include <linux/ctype.h> 27 #include <linux/nospec.h> 28 #include <linux/audit.h> 29 #include <uapi/linux/btf.h> 30 #include <linux/pgtable.h> 31 #include <linux/bpf_lsm.h> 32 #include <linux/poll.h> 33 #include <linux/sort.h> 34 #include <linux/bpf-netns.h> 35 #include <linux/rcupdate_trace.h> 36 #include <linux/memcontrol.h> 37 #include <linux/trace_events.h> 38 #include <net/netfilter/nf_bpf_link.h> 39 40 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 41 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 42 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 43 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 44 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 45 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 46 IS_FD_HASH(map)) 47 48 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 49 50 DEFINE_PER_CPU(int, bpf_prog_active); 51 static DEFINE_IDR(prog_idr); 52 static DEFINE_SPINLOCK(prog_idr_lock); 53 static DEFINE_IDR(map_idr); 54 static DEFINE_SPINLOCK(map_idr_lock); 55 static DEFINE_IDR(link_idr); 56 static DEFINE_SPINLOCK(link_idr_lock); 57 58 int sysctl_unprivileged_bpf_disabled __read_mostly = 59 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0; 60 61 static const struct bpf_map_ops * const bpf_map_types[] = { 62 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 63 #define BPF_MAP_TYPE(_id, _ops) \ 64 [_id] = &_ops, 65 #define BPF_LINK_TYPE(_id, _name) 66 #include <linux/bpf_types.h> 67 #undef BPF_PROG_TYPE 68 #undef BPF_MAP_TYPE 69 #undef BPF_LINK_TYPE 70 }; 71 72 /* 73 * If we're handed a bigger struct than we know of, ensure all the unknown bits 74 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 75 * we don't know about yet. 76 * 77 * There is a ToCToU between this function call and the following 78 * copy_from_user() call. However, this is not a concern since this function is 79 * meant to be a future-proofing of bits. 80 */ 81 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, 82 size_t expected_size, 83 size_t actual_size) 84 { 85 int res; 86 87 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 88 return -E2BIG; 89 90 if (actual_size <= expected_size) 91 return 0; 92 93 if (uaddr.is_kernel) 94 res = memchr_inv(uaddr.kernel + expected_size, 0, 95 actual_size - expected_size) == NULL; 96 else 97 res = check_zeroed_user(uaddr.user + expected_size, 98 actual_size - expected_size); 99 if (res < 0) 100 return res; 101 return res ? 0 : -E2BIG; 102 } 103 104 const struct bpf_map_ops bpf_map_offload_ops = { 105 .map_meta_equal = bpf_map_meta_equal, 106 .map_alloc = bpf_map_offload_map_alloc, 107 .map_free = bpf_map_offload_map_free, 108 .map_check_btf = map_check_no_btf, 109 .map_mem_usage = bpf_map_offload_map_mem_usage, 110 }; 111 112 static void bpf_map_write_active_inc(struct bpf_map *map) 113 { 114 atomic64_inc(&map->writecnt); 115 } 116 117 static void bpf_map_write_active_dec(struct bpf_map *map) 118 { 119 atomic64_dec(&map->writecnt); 120 } 121 122 bool bpf_map_write_active(const struct bpf_map *map) 123 { 124 return atomic64_read(&map->writecnt) != 0; 125 } 126 127 static u32 bpf_map_value_size(const struct bpf_map *map) 128 { 129 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 130 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 131 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 132 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 133 return round_up(map->value_size, 8) * num_possible_cpus(); 134 else if (IS_FD_MAP(map)) 135 return sizeof(u32); 136 else 137 return map->value_size; 138 } 139 140 static void maybe_wait_bpf_programs(struct bpf_map *map) 141 { 142 /* Wait for any running BPF programs to complete so that 143 * userspace, when we return to it, knows that all programs 144 * that could be running use the new map value. 145 */ 146 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 147 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 148 synchronize_rcu(); 149 } 150 151 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file, 152 void *key, void *value, __u64 flags) 153 { 154 int err; 155 156 /* Need to create a kthread, thus must support schedule */ 157 if (bpf_map_is_offloaded(map)) { 158 return bpf_map_offload_update_elem(map, key, value, flags); 159 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 160 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 161 return map->ops->map_update_elem(map, key, value, flags); 162 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || 163 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 164 return sock_map_update_elem_sys(map, key, value, flags); 165 } else if (IS_FD_PROG_ARRAY(map)) { 166 return bpf_fd_array_map_update_elem(map, map_file, key, value, 167 flags); 168 } 169 170 bpf_disable_instrumentation(); 171 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 172 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 173 err = bpf_percpu_hash_update(map, key, value, flags); 174 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 175 err = bpf_percpu_array_update(map, key, value, flags); 176 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 177 err = bpf_percpu_cgroup_storage_update(map, key, value, 178 flags); 179 } else if (IS_FD_ARRAY(map)) { 180 rcu_read_lock(); 181 err = bpf_fd_array_map_update_elem(map, map_file, key, value, 182 flags); 183 rcu_read_unlock(); 184 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 185 rcu_read_lock(); 186 err = bpf_fd_htab_map_update_elem(map, map_file, key, value, 187 flags); 188 rcu_read_unlock(); 189 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 190 /* rcu_read_lock() is not needed */ 191 err = bpf_fd_reuseport_array_update_elem(map, key, value, 192 flags); 193 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 194 map->map_type == BPF_MAP_TYPE_STACK || 195 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 196 err = map->ops->map_push_elem(map, value, flags); 197 } else { 198 rcu_read_lock(); 199 err = map->ops->map_update_elem(map, key, value, flags); 200 rcu_read_unlock(); 201 } 202 bpf_enable_instrumentation(); 203 maybe_wait_bpf_programs(map); 204 205 return err; 206 } 207 208 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 209 __u64 flags) 210 { 211 void *ptr; 212 int err; 213 214 if (bpf_map_is_offloaded(map)) 215 return bpf_map_offload_lookup_elem(map, key, value); 216 217 bpf_disable_instrumentation(); 218 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 219 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 220 err = bpf_percpu_hash_copy(map, key, value); 221 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 222 err = bpf_percpu_array_copy(map, key, value); 223 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 224 err = bpf_percpu_cgroup_storage_copy(map, key, value); 225 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 226 err = bpf_stackmap_copy(map, key, value); 227 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 228 err = bpf_fd_array_map_lookup_elem(map, key, value); 229 } else if (IS_FD_HASH(map)) { 230 err = bpf_fd_htab_map_lookup_elem(map, key, value); 231 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 232 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 233 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 234 map->map_type == BPF_MAP_TYPE_STACK || 235 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 236 err = map->ops->map_peek_elem(map, value); 237 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 238 /* struct_ops map requires directly updating "value" */ 239 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 240 } else { 241 rcu_read_lock(); 242 if (map->ops->map_lookup_elem_sys_only) 243 ptr = map->ops->map_lookup_elem_sys_only(map, key); 244 else 245 ptr = map->ops->map_lookup_elem(map, key); 246 if (IS_ERR(ptr)) { 247 err = PTR_ERR(ptr); 248 } else if (!ptr) { 249 err = -ENOENT; 250 } else { 251 err = 0; 252 if (flags & BPF_F_LOCK) 253 /* lock 'ptr' and copy everything but lock */ 254 copy_map_value_locked(map, value, ptr, true); 255 else 256 copy_map_value(map, value, ptr); 257 /* mask lock and timer, since value wasn't zero inited */ 258 check_and_init_map_value(map, value); 259 } 260 rcu_read_unlock(); 261 } 262 263 bpf_enable_instrumentation(); 264 maybe_wait_bpf_programs(map); 265 266 return err; 267 } 268 269 /* Please, do not use this function outside from the map creation path 270 * (e.g. in map update path) without taking care of setting the active 271 * memory cgroup (see at bpf_map_kmalloc_node() for example). 272 */ 273 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 274 { 275 /* We really just want to fail instead of triggering OOM killer 276 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 277 * which is used for lower order allocation requests. 278 * 279 * It has been observed that higher order allocation requests done by 280 * vmalloc with __GFP_NORETRY being set might fail due to not trying 281 * to reclaim memory from the page cache, thus we set 282 * __GFP_RETRY_MAYFAIL to avoid such situations. 283 */ 284 285 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO); 286 unsigned int flags = 0; 287 unsigned long align = 1; 288 void *area; 289 290 if (size >= SIZE_MAX) 291 return NULL; 292 293 /* kmalloc()'ed memory can't be mmap()'ed */ 294 if (mmapable) { 295 BUG_ON(!PAGE_ALIGNED(size)); 296 align = SHMLBA; 297 flags = VM_USERMAP; 298 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 299 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, 300 numa_node); 301 if (area != NULL) 302 return area; 303 } 304 305 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 306 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, 307 flags, numa_node, __builtin_return_address(0)); 308 } 309 310 void *bpf_map_area_alloc(u64 size, int numa_node) 311 { 312 return __bpf_map_area_alloc(size, numa_node, false); 313 } 314 315 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 316 { 317 return __bpf_map_area_alloc(size, numa_node, true); 318 } 319 320 void bpf_map_area_free(void *area) 321 { 322 kvfree(area); 323 } 324 325 static u32 bpf_map_flags_retain_permanent(u32 flags) 326 { 327 /* Some map creation flags are not tied to the map object but 328 * rather to the map fd instead, so they have no meaning upon 329 * map object inspection since multiple file descriptors with 330 * different (access) properties can exist here. Thus, given 331 * this has zero meaning for the map itself, lets clear these 332 * from here. 333 */ 334 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 335 } 336 337 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 338 { 339 map->map_type = attr->map_type; 340 map->key_size = attr->key_size; 341 map->value_size = attr->value_size; 342 map->max_entries = attr->max_entries; 343 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 344 map->numa_node = bpf_map_attr_numa_node(attr); 345 map->map_extra = attr->map_extra; 346 } 347 348 static int bpf_map_alloc_id(struct bpf_map *map) 349 { 350 int id; 351 352 idr_preload(GFP_KERNEL); 353 spin_lock_bh(&map_idr_lock); 354 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 355 if (id > 0) 356 map->id = id; 357 spin_unlock_bh(&map_idr_lock); 358 idr_preload_end(); 359 360 if (WARN_ON_ONCE(!id)) 361 return -ENOSPC; 362 363 return id > 0 ? 0 : id; 364 } 365 366 void bpf_map_free_id(struct bpf_map *map) 367 { 368 unsigned long flags; 369 370 /* Offloaded maps are removed from the IDR store when their device 371 * disappears - even if someone holds an fd to them they are unusable, 372 * the memory is gone, all ops will fail; they are simply waiting for 373 * refcnt to drop to be freed. 374 */ 375 if (!map->id) 376 return; 377 378 spin_lock_irqsave(&map_idr_lock, flags); 379 380 idr_remove(&map_idr, map->id); 381 map->id = 0; 382 383 spin_unlock_irqrestore(&map_idr_lock, flags); 384 } 385 386 #ifdef CONFIG_MEMCG_KMEM 387 static void bpf_map_save_memcg(struct bpf_map *map) 388 { 389 /* Currently if a map is created by a process belonging to the root 390 * memory cgroup, get_obj_cgroup_from_current() will return NULL. 391 * So we have to check map->objcg for being NULL each time it's 392 * being used. 393 */ 394 if (memcg_bpf_enabled()) 395 map->objcg = get_obj_cgroup_from_current(); 396 } 397 398 static void bpf_map_release_memcg(struct bpf_map *map) 399 { 400 if (map->objcg) 401 obj_cgroup_put(map->objcg); 402 } 403 404 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map) 405 { 406 if (map->objcg) 407 return get_mem_cgroup_from_objcg(map->objcg); 408 409 return root_mem_cgroup; 410 } 411 412 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 413 int node) 414 { 415 struct mem_cgroup *memcg, *old_memcg; 416 void *ptr; 417 418 memcg = bpf_map_get_memcg(map); 419 old_memcg = set_active_memcg(memcg); 420 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node); 421 set_active_memcg(old_memcg); 422 mem_cgroup_put(memcg); 423 424 return ptr; 425 } 426 427 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 428 { 429 struct mem_cgroup *memcg, *old_memcg; 430 void *ptr; 431 432 memcg = bpf_map_get_memcg(map); 433 old_memcg = set_active_memcg(memcg); 434 ptr = kzalloc(size, flags | __GFP_ACCOUNT); 435 set_active_memcg(old_memcg); 436 mem_cgroup_put(memcg); 437 438 return ptr; 439 } 440 441 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, 442 gfp_t flags) 443 { 444 struct mem_cgroup *memcg, *old_memcg; 445 void *ptr; 446 447 memcg = bpf_map_get_memcg(map); 448 old_memcg = set_active_memcg(memcg); 449 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT); 450 set_active_memcg(old_memcg); 451 mem_cgroup_put(memcg); 452 453 return ptr; 454 } 455 456 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 457 size_t align, gfp_t flags) 458 { 459 struct mem_cgroup *memcg, *old_memcg; 460 void __percpu *ptr; 461 462 memcg = bpf_map_get_memcg(map); 463 old_memcg = set_active_memcg(memcg); 464 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT); 465 set_active_memcg(old_memcg); 466 mem_cgroup_put(memcg); 467 468 return ptr; 469 } 470 471 #else 472 static void bpf_map_save_memcg(struct bpf_map *map) 473 { 474 } 475 476 static void bpf_map_release_memcg(struct bpf_map *map) 477 { 478 } 479 #endif 480 481 static int btf_field_cmp(const void *a, const void *b) 482 { 483 const struct btf_field *f1 = a, *f2 = b; 484 485 if (f1->offset < f2->offset) 486 return -1; 487 else if (f1->offset > f2->offset) 488 return 1; 489 return 0; 490 } 491 492 struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset, 493 u32 field_mask) 494 { 495 struct btf_field *field; 496 497 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask)) 498 return NULL; 499 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp); 500 if (!field || !(field->type & field_mask)) 501 return NULL; 502 return field; 503 } 504 505 void btf_record_free(struct btf_record *rec) 506 { 507 int i; 508 509 if (IS_ERR_OR_NULL(rec)) 510 return; 511 for (i = 0; i < rec->cnt; i++) { 512 switch (rec->fields[i].type) { 513 case BPF_KPTR_UNREF: 514 case BPF_KPTR_REF: 515 if (rec->fields[i].kptr.module) 516 module_put(rec->fields[i].kptr.module); 517 btf_put(rec->fields[i].kptr.btf); 518 break; 519 case BPF_LIST_HEAD: 520 case BPF_LIST_NODE: 521 case BPF_RB_ROOT: 522 case BPF_RB_NODE: 523 case BPF_SPIN_LOCK: 524 case BPF_TIMER: 525 case BPF_REFCOUNT: 526 /* Nothing to release */ 527 break; 528 default: 529 WARN_ON_ONCE(1); 530 continue; 531 } 532 } 533 kfree(rec); 534 } 535 536 void bpf_map_free_record(struct bpf_map *map) 537 { 538 btf_record_free(map->record); 539 map->record = NULL; 540 } 541 542 struct btf_record *btf_record_dup(const struct btf_record *rec) 543 { 544 const struct btf_field *fields; 545 struct btf_record *new_rec; 546 int ret, size, i; 547 548 if (IS_ERR_OR_NULL(rec)) 549 return NULL; 550 size = offsetof(struct btf_record, fields[rec->cnt]); 551 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN); 552 if (!new_rec) 553 return ERR_PTR(-ENOMEM); 554 /* Do a deep copy of the btf_record */ 555 fields = rec->fields; 556 new_rec->cnt = 0; 557 for (i = 0; i < rec->cnt; i++) { 558 switch (fields[i].type) { 559 case BPF_KPTR_UNREF: 560 case BPF_KPTR_REF: 561 btf_get(fields[i].kptr.btf); 562 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) { 563 ret = -ENXIO; 564 goto free; 565 } 566 break; 567 case BPF_LIST_HEAD: 568 case BPF_LIST_NODE: 569 case BPF_RB_ROOT: 570 case BPF_RB_NODE: 571 case BPF_SPIN_LOCK: 572 case BPF_TIMER: 573 case BPF_REFCOUNT: 574 /* Nothing to acquire */ 575 break; 576 default: 577 ret = -EFAULT; 578 WARN_ON_ONCE(1); 579 goto free; 580 } 581 new_rec->cnt++; 582 } 583 return new_rec; 584 free: 585 btf_record_free(new_rec); 586 return ERR_PTR(ret); 587 } 588 589 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b) 590 { 591 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b); 592 int size; 593 594 if (!a_has_fields && !b_has_fields) 595 return true; 596 if (a_has_fields != b_has_fields) 597 return false; 598 if (rec_a->cnt != rec_b->cnt) 599 return false; 600 size = offsetof(struct btf_record, fields[rec_a->cnt]); 601 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused 602 * members are zeroed out. So memcmp is safe to do without worrying 603 * about padding/unused fields. 604 * 605 * While spin_lock, timer, and kptr have no relation to map BTF, 606 * list_head metadata is specific to map BTF, the btf and value_rec 607 * members in particular. btf is the map BTF, while value_rec points to 608 * btf_record in that map BTF. 609 * 610 * So while by default, we don't rely on the map BTF (which the records 611 * were parsed from) matching for both records, which is not backwards 612 * compatible, in case list_head is part of it, we implicitly rely on 613 * that by way of depending on memcmp succeeding for it. 614 */ 615 return !memcmp(rec_a, rec_b, size); 616 } 617 618 void bpf_obj_free_timer(const struct btf_record *rec, void *obj) 619 { 620 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER))) 621 return; 622 bpf_timer_cancel_and_free(obj + rec->timer_off); 623 } 624 625 extern void __bpf_obj_drop_impl(void *p, const struct btf_record *rec); 626 627 void bpf_obj_free_fields(const struct btf_record *rec, void *obj) 628 { 629 const struct btf_field *fields; 630 int i; 631 632 if (IS_ERR_OR_NULL(rec)) 633 return; 634 fields = rec->fields; 635 for (i = 0; i < rec->cnt; i++) { 636 struct btf_struct_meta *pointee_struct_meta; 637 const struct btf_field *field = &fields[i]; 638 void *field_ptr = obj + field->offset; 639 void *xchgd_field; 640 641 switch (fields[i].type) { 642 case BPF_SPIN_LOCK: 643 break; 644 case BPF_TIMER: 645 bpf_timer_cancel_and_free(field_ptr); 646 break; 647 case BPF_KPTR_UNREF: 648 WRITE_ONCE(*(u64 *)field_ptr, 0); 649 break; 650 case BPF_KPTR_REF: 651 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0); 652 if (!xchgd_field) 653 break; 654 655 if (!btf_is_kernel(field->kptr.btf)) { 656 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf, 657 field->kptr.btf_id); 658 WARN_ON_ONCE(!pointee_struct_meta); 659 migrate_disable(); 660 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ? 661 pointee_struct_meta->record : 662 NULL); 663 migrate_enable(); 664 } else { 665 field->kptr.dtor(xchgd_field); 666 } 667 break; 668 case BPF_LIST_HEAD: 669 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 670 continue; 671 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); 672 break; 673 case BPF_RB_ROOT: 674 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 675 continue; 676 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); 677 break; 678 case BPF_LIST_NODE: 679 case BPF_RB_NODE: 680 case BPF_REFCOUNT: 681 break; 682 default: 683 WARN_ON_ONCE(1); 684 continue; 685 } 686 } 687 } 688 689 /* called from workqueue */ 690 static void bpf_map_free_deferred(struct work_struct *work) 691 { 692 struct bpf_map *map = container_of(work, struct bpf_map, work); 693 struct btf_record *rec = map->record; 694 695 security_bpf_map_free(map); 696 bpf_map_release_memcg(map); 697 /* implementation dependent freeing */ 698 map->ops->map_free(map); 699 /* Delay freeing of btf_record for maps, as map_free 700 * callback usually needs access to them. It is better to do it here 701 * than require each callback to do the free itself manually. 702 * 703 * Note that the btf_record stashed in map->inner_map_meta->record was 704 * already freed using the map_free callback for map in map case which 705 * eventually calls bpf_map_free_meta, since inner_map_meta is only a 706 * template bpf_map struct used during verification. 707 */ 708 btf_record_free(rec); 709 } 710 711 static void bpf_map_put_uref(struct bpf_map *map) 712 { 713 if (atomic64_dec_and_test(&map->usercnt)) { 714 if (map->ops->map_release_uref) 715 map->ops->map_release_uref(map); 716 } 717 } 718 719 /* decrement map refcnt and schedule it for freeing via workqueue 720 * (underlying map implementation ops->map_free() might sleep) 721 */ 722 void bpf_map_put(struct bpf_map *map) 723 { 724 if (atomic64_dec_and_test(&map->refcnt)) { 725 /* bpf_map_free_id() must be called first */ 726 bpf_map_free_id(map); 727 btf_put(map->btf); 728 INIT_WORK(&map->work, bpf_map_free_deferred); 729 /* Avoid spawning kworkers, since they all might contend 730 * for the same mutex like slab_mutex. 731 */ 732 queue_work(system_unbound_wq, &map->work); 733 } 734 } 735 EXPORT_SYMBOL_GPL(bpf_map_put); 736 737 void bpf_map_put_with_uref(struct bpf_map *map) 738 { 739 bpf_map_put_uref(map); 740 bpf_map_put(map); 741 } 742 743 static int bpf_map_release(struct inode *inode, struct file *filp) 744 { 745 struct bpf_map *map = filp->private_data; 746 747 if (map->ops->map_release) 748 map->ops->map_release(map, filp); 749 750 bpf_map_put_with_uref(map); 751 return 0; 752 } 753 754 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 755 { 756 fmode_t mode = f.file->f_mode; 757 758 /* Our file permissions may have been overridden by global 759 * map permissions facing syscall side. 760 */ 761 if (READ_ONCE(map->frozen)) 762 mode &= ~FMODE_CAN_WRITE; 763 return mode; 764 } 765 766 #ifdef CONFIG_PROC_FS 767 /* Show the memory usage of a bpf map */ 768 static u64 bpf_map_memory_usage(const struct bpf_map *map) 769 { 770 return map->ops->map_mem_usage(map); 771 } 772 773 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 774 { 775 struct bpf_map *map = filp->private_data; 776 u32 type = 0, jited = 0; 777 778 if (map_type_contains_progs(map)) { 779 spin_lock(&map->owner.lock); 780 type = map->owner.type; 781 jited = map->owner.jited; 782 spin_unlock(&map->owner.lock); 783 } 784 785 seq_printf(m, 786 "map_type:\t%u\n" 787 "key_size:\t%u\n" 788 "value_size:\t%u\n" 789 "max_entries:\t%u\n" 790 "map_flags:\t%#x\n" 791 "map_extra:\t%#llx\n" 792 "memlock:\t%llu\n" 793 "map_id:\t%u\n" 794 "frozen:\t%u\n", 795 map->map_type, 796 map->key_size, 797 map->value_size, 798 map->max_entries, 799 map->map_flags, 800 (unsigned long long)map->map_extra, 801 bpf_map_memory_usage(map), 802 map->id, 803 READ_ONCE(map->frozen)); 804 if (type) { 805 seq_printf(m, "owner_prog_type:\t%u\n", type); 806 seq_printf(m, "owner_jited:\t%u\n", jited); 807 } 808 } 809 #endif 810 811 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 812 loff_t *ppos) 813 { 814 /* We need this handler such that alloc_file() enables 815 * f_mode with FMODE_CAN_READ. 816 */ 817 return -EINVAL; 818 } 819 820 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 821 size_t siz, loff_t *ppos) 822 { 823 /* We need this handler such that alloc_file() enables 824 * f_mode with FMODE_CAN_WRITE. 825 */ 826 return -EINVAL; 827 } 828 829 /* called for any extra memory-mapped regions (except initial) */ 830 static void bpf_map_mmap_open(struct vm_area_struct *vma) 831 { 832 struct bpf_map *map = vma->vm_file->private_data; 833 834 if (vma->vm_flags & VM_MAYWRITE) 835 bpf_map_write_active_inc(map); 836 } 837 838 /* called for all unmapped memory region (including initial) */ 839 static void bpf_map_mmap_close(struct vm_area_struct *vma) 840 { 841 struct bpf_map *map = vma->vm_file->private_data; 842 843 if (vma->vm_flags & VM_MAYWRITE) 844 bpf_map_write_active_dec(map); 845 } 846 847 static const struct vm_operations_struct bpf_map_default_vmops = { 848 .open = bpf_map_mmap_open, 849 .close = bpf_map_mmap_close, 850 }; 851 852 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 853 { 854 struct bpf_map *map = filp->private_data; 855 int err; 856 857 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) 858 return -ENOTSUPP; 859 860 if (!(vma->vm_flags & VM_SHARED)) 861 return -EINVAL; 862 863 mutex_lock(&map->freeze_mutex); 864 865 if (vma->vm_flags & VM_WRITE) { 866 if (map->frozen) { 867 err = -EPERM; 868 goto out; 869 } 870 /* map is meant to be read-only, so do not allow mapping as 871 * writable, because it's possible to leak a writable page 872 * reference and allows user-space to still modify it after 873 * freezing, while verifier will assume contents do not change 874 */ 875 if (map->map_flags & BPF_F_RDONLY_PROG) { 876 err = -EACCES; 877 goto out; 878 } 879 } 880 881 /* set default open/close callbacks */ 882 vma->vm_ops = &bpf_map_default_vmops; 883 vma->vm_private_data = map; 884 vm_flags_clear(vma, VM_MAYEXEC); 885 if (!(vma->vm_flags & VM_WRITE)) 886 /* disallow re-mapping with PROT_WRITE */ 887 vm_flags_clear(vma, VM_MAYWRITE); 888 889 err = map->ops->map_mmap(map, vma); 890 if (err) 891 goto out; 892 893 if (vma->vm_flags & VM_MAYWRITE) 894 bpf_map_write_active_inc(map); 895 out: 896 mutex_unlock(&map->freeze_mutex); 897 return err; 898 } 899 900 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts) 901 { 902 struct bpf_map *map = filp->private_data; 903 904 if (map->ops->map_poll) 905 return map->ops->map_poll(map, filp, pts); 906 907 return EPOLLERR; 908 } 909 910 const struct file_operations bpf_map_fops = { 911 #ifdef CONFIG_PROC_FS 912 .show_fdinfo = bpf_map_show_fdinfo, 913 #endif 914 .release = bpf_map_release, 915 .read = bpf_dummy_read, 916 .write = bpf_dummy_write, 917 .mmap = bpf_map_mmap, 918 .poll = bpf_map_poll, 919 }; 920 921 int bpf_map_new_fd(struct bpf_map *map, int flags) 922 { 923 int ret; 924 925 ret = security_bpf_map(map, OPEN_FMODE(flags)); 926 if (ret < 0) 927 return ret; 928 929 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 930 flags | O_CLOEXEC); 931 } 932 933 int bpf_get_file_flag(int flags) 934 { 935 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 936 return -EINVAL; 937 if (flags & BPF_F_RDONLY) 938 return O_RDONLY; 939 if (flags & BPF_F_WRONLY) 940 return O_WRONLY; 941 return O_RDWR; 942 } 943 944 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 945 #define CHECK_ATTR(CMD) \ 946 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 947 sizeof(attr->CMD##_LAST_FIELD), 0, \ 948 sizeof(*attr) - \ 949 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 950 sizeof(attr->CMD##_LAST_FIELD)) != NULL 951 952 /* dst and src must have at least "size" number of bytes. 953 * Return strlen on success and < 0 on error. 954 */ 955 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 956 { 957 const char *end = src + size; 958 const char *orig_src = src; 959 960 memset(dst, 0, size); 961 /* Copy all isalnum(), '_' and '.' chars. */ 962 while (src < end && *src) { 963 if (!isalnum(*src) && 964 *src != '_' && *src != '.') 965 return -EINVAL; 966 *dst++ = *src++; 967 } 968 969 /* No '\0' found in "size" number of bytes */ 970 if (src == end) 971 return -EINVAL; 972 973 return src - orig_src; 974 } 975 976 int map_check_no_btf(const struct bpf_map *map, 977 const struct btf *btf, 978 const struct btf_type *key_type, 979 const struct btf_type *value_type) 980 { 981 return -ENOTSUPP; 982 } 983 984 static int map_check_btf(struct bpf_map *map, const struct btf *btf, 985 u32 btf_key_id, u32 btf_value_id) 986 { 987 const struct btf_type *key_type, *value_type; 988 u32 key_size, value_size; 989 int ret = 0; 990 991 /* Some maps allow key to be unspecified. */ 992 if (btf_key_id) { 993 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 994 if (!key_type || key_size != map->key_size) 995 return -EINVAL; 996 } else { 997 key_type = btf_type_by_id(btf, 0); 998 if (!map->ops->map_check_btf) 999 return -EINVAL; 1000 } 1001 1002 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 1003 if (!value_type || value_size != map->value_size) 1004 return -EINVAL; 1005 1006 map->record = btf_parse_fields(btf, value_type, 1007 BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD | 1008 BPF_RB_ROOT | BPF_REFCOUNT, 1009 map->value_size); 1010 if (!IS_ERR_OR_NULL(map->record)) { 1011 int i; 1012 1013 if (!bpf_capable()) { 1014 ret = -EPERM; 1015 goto free_map_tab; 1016 } 1017 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { 1018 ret = -EACCES; 1019 goto free_map_tab; 1020 } 1021 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) { 1022 switch (map->record->field_mask & (1 << i)) { 1023 case 0: 1024 continue; 1025 case BPF_SPIN_LOCK: 1026 if (map->map_type != BPF_MAP_TYPE_HASH && 1027 map->map_type != BPF_MAP_TYPE_ARRAY && 1028 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 1029 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1030 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1031 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1032 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1033 ret = -EOPNOTSUPP; 1034 goto free_map_tab; 1035 } 1036 break; 1037 case BPF_TIMER: 1038 if (map->map_type != BPF_MAP_TYPE_HASH && 1039 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1040 map->map_type != BPF_MAP_TYPE_ARRAY) { 1041 ret = -EOPNOTSUPP; 1042 goto free_map_tab; 1043 } 1044 break; 1045 case BPF_KPTR_UNREF: 1046 case BPF_KPTR_REF: 1047 case BPF_REFCOUNT: 1048 if (map->map_type != BPF_MAP_TYPE_HASH && 1049 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 1050 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1051 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH && 1052 map->map_type != BPF_MAP_TYPE_ARRAY && 1053 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && 1054 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1055 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1056 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1057 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1058 ret = -EOPNOTSUPP; 1059 goto free_map_tab; 1060 } 1061 break; 1062 case BPF_LIST_HEAD: 1063 case BPF_RB_ROOT: 1064 if (map->map_type != BPF_MAP_TYPE_HASH && 1065 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1066 map->map_type != BPF_MAP_TYPE_ARRAY) { 1067 ret = -EOPNOTSUPP; 1068 goto free_map_tab; 1069 } 1070 break; 1071 default: 1072 /* Fail if map_type checks are missing for a field type */ 1073 ret = -EOPNOTSUPP; 1074 goto free_map_tab; 1075 } 1076 } 1077 } 1078 1079 ret = btf_check_and_fixup_fields(btf, map->record); 1080 if (ret < 0) 1081 goto free_map_tab; 1082 1083 if (map->ops->map_check_btf) { 1084 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 1085 if (ret < 0) 1086 goto free_map_tab; 1087 } 1088 1089 return ret; 1090 free_map_tab: 1091 bpf_map_free_record(map); 1092 return ret; 1093 } 1094 1095 #define BPF_MAP_CREATE_LAST_FIELD map_extra 1096 /* called via syscall */ 1097 static int map_create(union bpf_attr *attr) 1098 { 1099 const struct bpf_map_ops *ops; 1100 int numa_node = bpf_map_attr_numa_node(attr); 1101 u32 map_type = attr->map_type; 1102 struct bpf_map *map; 1103 int f_flags; 1104 int err; 1105 1106 err = CHECK_ATTR(BPF_MAP_CREATE); 1107 if (err) 1108 return -EINVAL; 1109 1110 if (attr->btf_vmlinux_value_type_id) { 1111 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 1112 attr->btf_key_type_id || attr->btf_value_type_id) 1113 return -EINVAL; 1114 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 1115 return -EINVAL; 1116 } 1117 1118 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && 1119 attr->map_extra != 0) 1120 return -EINVAL; 1121 1122 f_flags = bpf_get_file_flag(attr->map_flags); 1123 if (f_flags < 0) 1124 return f_flags; 1125 1126 if (numa_node != NUMA_NO_NODE && 1127 ((unsigned int)numa_node >= nr_node_ids || 1128 !node_online(numa_node))) 1129 return -EINVAL; 1130 1131 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 1132 map_type = attr->map_type; 1133 if (map_type >= ARRAY_SIZE(bpf_map_types)) 1134 return -EINVAL; 1135 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types)); 1136 ops = bpf_map_types[map_type]; 1137 if (!ops) 1138 return -EINVAL; 1139 1140 if (ops->map_alloc_check) { 1141 err = ops->map_alloc_check(attr); 1142 if (err) 1143 return err; 1144 } 1145 if (attr->map_ifindex) 1146 ops = &bpf_map_offload_ops; 1147 if (!ops->map_mem_usage) 1148 return -EINVAL; 1149 1150 /* Intent here is for unprivileged_bpf_disabled to block BPF map 1151 * creation for unprivileged users; other actions depend 1152 * on fd availability and access to bpffs, so are dependent on 1153 * object creation success. Even with unprivileged BPF disabled, 1154 * capability checks are still carried out. 1155 */ 1156 if (sysctl_unprivileged_bpf_disabled && !bpf_capable()) 1157 return -EPERM; 1158 1159 /* check privileged map type permissions */ 1160 switch (map_type) { 1161 case BPF_MAP_TYPE_ARRAY: 1162 case BPF_MAP_TYPE_PERCPU_ARRAY: 1163 case BPF_MAP_TYPE_PROG_ARRAY: 1164 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1165 case BPF_MAP_TYPE_CGROUP_ARRAY: 1166 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 1167 case BPF_MAP_TYPE_HASH: 1168 case BPF_MAP_TYPE_PERCPU_HASH: 1169 case BPF_MAP_TYPE_HASH_OF_MAPS: 1170 case BPF_MAP_TYPE_RINGBUF: 1171 case BPF_MAP_TYPE_USER_RINGBUF: 1172 case BPF_MAP_TYPE_CGROUP_STORAGE: 1173 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 1174 /* unprivileged */ 1175 break; 1176 case BPF_MAP_TYPE_SK_STORAGE: 1177 case BPF_MAP_TYPE_INODE_STORAGE: 1178 case BPF_MAP_TYPE_TASK_STORAGE: 1179 case BPF_MAP_TYPE_CGRP_STORAGE: 1180 case BPF_MAP_TYPE_BLOOM_FILTER: 1181 case BPF_MAP_TYPE_LPM_TRIE: 1182 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 1183 case BPF_MAP_TYPE_STACK_TRACE: 1184 case BPF_MAP_TYPE_QUEUE: 1185 case BPF_MAP_TYPE_STACK: 1186 case BPF_MAP_TYPE_LRU_HASH: 1187 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 1188 case BPF_MAP_TYPE_STRUCT_OPS: 1189 case BPF_MAP_TYPE_CPUMAP: 1190 if (!bpf_capable()) 1191 return -EPERM; 1192 break; 1193 case BPF_MAP_TYPE_SOCKMAP: 1194 case BPF_MAP_TYPE_SOCKHASH: 1195 case BPF_MAP_TYPE_DEVMAP: 1196 case BPF_MAP_TYPE_DEVMAP_HASH: 1197 case BPF_MAP_TYPE_XSKMAP: 1198 if (!capable(CAP_NET_ADMIN)) 1199 return -EPERM; 1200 break; 1201 default: 1202 WARN(1, "unsupported map type %d", map_type); 1203 return -EPERM; 1204 } 1205 1206 map = ops->map_alloc(attr); 1207 if (IS_ERR(map)) 1208 return PTR_ERR(map); 1209 map->ops = ops; 1210 map->map_type = map_type; 1211 1212 err = bpf_obj_name_cpy(map->name, attr->map_name, 1213 sizeof(attr->map_name)); 1214 if (err < 0) 1215 goto free_map; 1216 1217 atomic64_set(&map->refcnt, 1); 1218 atomic64_set(&map->usercnt, 1); 1219 mutex_init(&map->freeze_mutex); 1220 spin_lock_init(&map->owner.lock); 1221 1222 if (attr->btf_key_type_id || attr->btf_value_type_id || 1223 /* Even the map's value is a kernel's struct, 1224 * the bpf_prog.o must have BTF to begin with 1225 * to figure out the corresponding kernel's 1226 * counter part. Thus, attr->btf_fd has 1227 * to be valid also. 1228 */ 1229 attr->btf_vmlinux_value_type_id) { 1230 struct btf *btf; 1231 1232 btf = btf_get_by_fd(attr->btf_fd); 1233 if (IS_ERR(btf)) { 1234 err = PTR_ERR(btf); 1235 goto free_map; 1236 } 1237 if (btf_is_kernel(btf)) { 1238 btf_put(btf); 1239 err = -EACCES; 1240 goto free_map; 1241 } 1242 map->btf = btf; 1243 1244 if (attr->btf_value_type_id) { 1245 err = map_check_btf(map, btf, attr->btf_key_type_id, 1246 attr->btf_value_type_id); 1247 if (err) 1248 goto free_map; 1249 } 1250 1251 map->btf_key_type_id = attr->btf_key_type_id; 1252 map->btf_value_type_id = attr->btf_value_type_id; 1253 map->btf_vmlinux_value_type_id = 1254 attr->btf_vmlinux_value_type_id; 1255 } 1256 1257 err = security_bpf_map_alloc(map); 1258 if (err) 1259 goto free_map; 1260 1261 err = bpf_map_alloc_id(map); 1262 if (err) 1263 goto free_map_sec; 1264 1265 bpf_map_save_memcg(map); 1266 1267 err = bpf_map_new_fd(map, f_flags); 1268 if (err < 0) { 1269 /* failed to allocate fd. 1270 * bpf_map_put_with_uref() is needed because the above 1271 * bpf_map_alloc_id() has published the map 1272 * to the userspace and the userspace may 1273 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 1274 */ 1275 bpf_map_put_with_uref(map); 1276 return err; 1277 } 1278 1279 return err; 1280 1281 free_map_sec: 1282 security_bpf_map_free(map); 1283 free_map: 1284 btf_put(map->btf); 1285 map->ops->map_free(map); 1286 return err; 1287 } 1288 1289 /* if error is returned, fd is released. 1290 * On success caller should complete fd access with matching fdput() 1291 */ 1292 struct bpf_map *__bpf_map_get(struct fd f) 1293 { 1294 if (!f.file) 1295 return ERR_PTR(-EBADF); 1296 if (f.file->f_op != &bpf_map_fops) { 1297 fdput(f); 1298 return ERR_PTR(-EINVAL); 1299 } 1300 1301 return f.file->private_data; 1302 } 1303 1304 void bpf_map_inc(struct bpf_map *map) 1305 { 1306 atomic64_inc(&map->refcnt); 1307 } 1308 EXPORT_SYMBOL_GPL(bpf_map_inc); 1309 1310 void bpf_map_inc_with_uref(struct bpf_map *map) 1311 { 1312 atomic64_inc(&map->refcnt); 1313 atomic64_inc(&map->usercnt); 1314 } 1315 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 1316 1317 struct bpf_map *bpf_map_get(u32 ufd) 1318 { 1319 struct fd f = fdget(ufd); 1320 struct bpf_map *map; 1321 1322 map = __bpf_map_get(f); 1323 if (IS_ERR(map)) 1324 return map; 1325 1326 bpf_map_inc(map); 1327 fdput(f); 1328 1329 return map; 1330 } 1331 EXPORT_SYMBOL(bpf_map_get); 1332 1333 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 1334 { 1335 struct fd f = fdget(ufd); 1336 struct bpf_map *map; 1337 1338 map = __bpf_map_get(f); 1339 if (IS_ERR(map)) 1340 return map; 1341 1342 bpf_map_inc_with_uref(map); 1343 fdput(f); 1344 1345 return map; 1346 } 1347 1348 /* map_idr_lock should have been held or the map should have been 1349 * protected by rcu read lock. 1350 */ 1351 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 1352 { 1353 int refold; 1354 1355 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 1356 if (!refold) 1357 return ERR_PTR(-ENOENT); 1358 if (uref) 1359 atomic64_inc(&map->usercnt); 1360 1361 return map; 1362 } 1363 1364 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 1365 { 1366 spin_lock_bh(&map_idr_lock); 1367 map = __bpf_map_inc_not_zero(map, false); 1368 spin_unlock_bh(&map_idr_lock); 1369 1370 return map; 1371 } 1372 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 1373 1374 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 1375 { 1376 return -ENOTSUPP; 1377 } 1378 1379 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 1380 { 1381 if (key_size) 1382 return vmemdup_user(ukey, key_size); 1383 1384 if (ukey) 1385 return ERR_PTR(-EINVAL); 1386 1387 return NULL; 1388 } 1389 1390 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size) 1391 { 1392 if (key_size) 1393 return kvmemdup_bpfptr(ukey, key_size); 1394 1395 if (!bpfptr_is_null(ukey)) 1396 return ERR_PTR(-EINVAL); 1397 1398 return NULL; 1399 } 1400 1401 /* last field in 'union bpf_attr' used by this command */ 1402 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 1403 1404 static int map_lookup_elem(union bpf_attr *attr) 1405 { 1406 void __user *ukey = u64_to_user_ptr(attr->key); 1407 void __user *uvalue = u64_to_user_ptr(attr->value); 1408 int ufd = attr->map_fd; 1409 struct bpf_map *map; 1410 void *key, *value; 1411 u32 value_size; 1412 struct fd f; 1413 int err; 1414 1415 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 1416 return -EINVAL; 1417 1418 if (attr->flags & ~BPF_F_LOCK) 1419 return -EINVAL; 1420 1421 f = fdget(ufd); 1422 map = __bpf_map_get(f); 1423 if (IS_ERR(map)) 1424 return PTR_ERR(map); 1425 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1426 err = -EPERM; 1427 goto err_put; 1428 } 1429 1430 if ((attr->flags & BPF_F_LOCK) && 1431 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1432 err = -EINVAL; 1433 goto err_put; 1434 } 1435 1436 key = __bpf_copy_key(ukey, map->key_size); 1437 if (IS_ERR(key)) { 1438 err = PTR_ERR(key); 1439 goto err_put; 1440 } 1441 1442 value_size = bpf_map_value_size(map); 1443 1444 err = -ENOMEM; 1445 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1446 if (!value) 1447 goto free_key; 1448 1449 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 1450 if (copy_from_user(value, uvalue, value_size)) 1451 err = -EFAULT; 1452 else 1453 err = bpf_map_copy_value(map, key, value, attr->flags); 1454 goto free_value; 1455 } 1456 1457 err = bpf_map_copy_value(map, key, value, attr->flags); 1458 if (err) 1459 goto free_value; 1460 1461 err = -EFAULT; 1462 if (copy_to_user(uvalue, value, value_size) != 0) 1463 goto free_value; 1464 1465 err = 0; 1466 1467 free_value: 1468 kvfree(value); 1469 free_key: 1470 kvfree(key); 1471 err_put: 1472 fdput(f); 1473 return err; 1474 } 1475 1476 1477 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1478 1479 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) 1480 { 1481 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1482 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); 1483 int ufd = attr->map_fd; 1484 struct bpf_map *map; 1485 void *key, *value; 1486 u32 value_size; 1487 struct fd f; 1488 int err; 1489 1490 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1491 return -EINVAL; 1492 1493 f = fdget(ufd); 1494 map = __bpf_map_get(f); 1495 if (IS_ERR(map)) 1496 return PTR_ERR(map); 1497 bpf_map_write_active_inc(map); 1498 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1499 err = -EPERM; 1500 goto err_put; 1501 } 1502 1503 if ((attr->flags & BPF_F_LOCK) && 1504 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1505 err = -EINVAL; 1506 goto err_put; 1507 } 1508 1509 key = ___bpf_copy_key(ukey, map->key_size); 1510 if (IS_ERR(key)) { 1511 err = PTR_ERR(key); 1512 goto err_put; 1513 } 1514 1515 value_size = bpf_map_value_size(map); 1516 value = kvmemdup_bpfptr(uvalue, value_size); 1517 if (IS_ERR(value)) { 1518 err = PTR_ERR(value); 1519 goto free_key; 1520 } 1521 1522 err = bpf_map_update_value(map, f.file, key, value, attr->flags); 1523 1524 kvfree(value); 1525 free_key: 1526 kvfree(key); 1527 err_put: 1528 bpf_map_write_active_dec(map); 1529 fdput(f); 1530 return err; 1531 } 1532 1533 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1534 1535 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr) 1536 { 1537 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1538 int ufd = attr->map_fd; 1539 struct bpf_map *map; 1540 struct fd f; 1541 void *key; 1542 int err; 1543 1544 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1545 return -EINVAL; 1546 1547 f = fdget(ufd); 1548 map = __bpf_map_get(f); 1549 if (IS_ERR(map)) 1550 return PTR_ERR(map); 1551 bpf_map_write_active_inc(map); 1552 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1553 err = -EPERM; 1554 goto err_put; 1555 } 1556 1557 key = ___bpf_copy_key(ukey, map->key_size); 1558 if (IS_ERR(key)) { 1559 err = PTR_ERR(key); 1560 goto err_put; 1561 } 1562 1563 if (bpf_map_is_offloaded(map)) { 1564 err = bpf_map_offload_delete_elem(map, key); 1565 goto out; 1566 } else if (IS_FD_PROG_ARRAY(map) || 1567 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1568 /* These maps require sleepable context */ 1569 err = map->ops->map_delete_elem(map, key); 1570 goto out; 1571 } 1572 1573 bpf_disable_instrumentation(); 1574 rcu_read_lock(); 1575 err = map->ops->map_delete_elem(map, key); 1576 rcu_read_unlock(); 1577 bpf_enable_instrumentation(); 1578 maybe_wait_bpf_programs(map); 1579 out: 1580 kvfree(key); 1581 err_put: 1582 bpf_map_write_active_dec(map); 1583 fdput(f); 1584 return err; 1585 } 1586 1587 /* last field in 'union bpf_attr' used by this command */ 1588 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1589 1590 static int map_get_next_key(union bpf_attr *attr) 1591 { 1592 void __user *ukey = u64_to_user_ptr(attr->key); 1593 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1594 int ufd = attr->map_fd; 1595 struct bpf_map *map; 1596 void *key, *next_key; 1597 struct fd f; 1598 int err; 1599 1600 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1601 return -EINVAL; 1602 1603 f = fdget(ufd); 1604 map = __bpf_map_get(f); 1605 if (IS_ERR(map)) 1606 return PTR_ERR(map); 1607 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1608 err = -EPERM; 1609 goto err_put; 1610 } 1611 1612 if (ukey) { 1613 key = __bpf_copy_key(ukey, map->key_size); 1614 if (IS_ERR(key)) { 1615 err = PTR_ERR(key); 1616 goto err_put; 1617 } 1618 } else { 1619 key = NULL; 1620 } 1621 1622 err = -ENOMEM; 1623 next_key = kvmalloc(map->key_size, GFP_USER); 1624 if (!next_key) 1625 goto free_key; 1626 1627 if (bpf_map_is_offloaded(map)) { 1628 err = bpf_map_offload_get_next_key(map, key, next_key); 1629 goto out; 1630 } 1631 1632 rcu_read_lock(); 1633 err = map->ops->map_get_next_key(map, key, next_key); 1634 rcu_read_unlock(); 1635 out: 1636 if (err) 1637 goto free_next_key; 1638 1639 err = -EFAULT; 1640 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1641 goto free_next_key; 1642 1643 err = 0; 1644 1645 free_next_key: 1646 kvfree(next_key); 1647 free_key: 1648 kvfree(key); 1649 err_put: 1650 fdput(f); 1651 return err; 1652 } 1653 1654 int generic_map_delete_batch(struct bpf_map *map, 1655 const union bpf_attr *attr, 1656 union bpf_attr __user *uattr) 1657 { 1658 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1659 u32 cp, max_count; 1660 int err = 0; 1661 void *key; 1662 1663 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1664 return -EINVAL; 1665 1666 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1667 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1668 return -EINVAL; 1669 } 1670 1671 max_count = attr->batch.count; 1672 if (!max_count) 1673 return 0; 1674 1675 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1676 if (!key) 1677 return -ENOMEM; 1678 1679 for (cp = 0; cp < max_count; cp++) { 1680 err = -EFAULT; 1681 if (copy_from_user(key, keys + cp * map->key_size, 1682 map->key_size)) 1683 break; 1684 1685 if (bpf_map_is_offloaded(map)) { 1686 err = bpf_map_offload_delete_elem(map, key); 1687 break; 1688 } 1689 1690 bpf_disable_instrumentation(); 1691 rcu_read_lock(); 1692 err = map->ops->map_delete_elem(map, key); 1693 rcu_read_unlock(); 1694 bpf_enable_instrumentation(); 1695 if (err) 1696 break; 1697 cond_resched(); 1698 } 1699 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1700 err = -EFAULT; 1701 1702 kvfree(key); 1703 1704 maybe_wait_bpf_programs(map); 1705 return err; 1706 } 1707 1708 int generic_map_update_batch(struct bpf_map *map, struct file *map_file, 1709 const union bpf_attr *attr, 1710 union bpf_attr __user *uattr) 1711 { 1712 void __user *values = u64_to_user_ptr(attr->batch.values); 1713 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1714 u32 value_size, cp, max_count; 1715 void *key, *value; 1716 int err = 0; 1717 1718 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1719 return -EINVAL; 1720 1721 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1722 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1723 return -EINVAL; 1724 } 1725 1726 value_size = bpf_map_value_size(map); 1727 1728 max_count = attr->batch.count; 1729 if (!max_count) 1730 return 0; 1731 1732 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1733 if (!key) 1734 return -ENOMEM; 1735 1736 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1737 if (!value) { 1738 kvfree(key); 1739 return -ENOMEM; 1740 } 1741 1742 for (cp = 0; cp < max_count; cp++) { 1743 err = -EFAULT; 1744 if (copy_from_user(key, keys + cp * map->key_size, 1745 map->key_size) || 1746 copy_from_user(value, values + cp * value_size, value_size)) 1747 break; 1748 1749 err = bpf_map_update_value(map, map_file, key, value, 1750 attr->batch.elem_flags); 1751 1752 if (err) 1753 break; 1754 cond_resched(); 1755 } 1756 1757 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1758 err = -EFAULT; 1759 1760 kvfree(value); 1761 kvfree(key); 1762 return err; 1763 } 1764 1765 #define MAP_LOOKUP_RETRIES 3 1766 1767 int generic_map_lookup_batch(struct bpf_map *map, 1768 const union bpf_attr *attr, 1769 union bpf_attr __user *uattr) 1770 { 1771 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 1772 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1773 void __user *values = u64_to_user_ptr(attr->batch.values); 1774 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1775 void *buf, *buf_prevkey, *prev_key, *key, *value; 1776 int err, retry = MAP_LOOKUP_RETRIES; 1777 u32 value_size, cp, max_count; 1778 1779 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1780 return -EINVAL; 1781 1782 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1783 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) 1784 return -EINVAL; 1785 1786 value_size = bpf_map_value_size(map); 1787 1788 max_count = attr->batch.count; 1789 if (!max_count) 1790 return 0; 1791 1792 if (put_user(0, &uattr->batch.count)) 1793 return -EFAULT; 1794 1795 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1796 if (!buf_prevkey) 1797 return -ENOMEM; 1798 1799 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 1800 if (!buf) { 1801 kvfree(buf_prevkey); 1802 return -ENOMEM; 1803 } 1804 1805 err = -EFAULT; 1806 prev_key = NULL; 1807 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 1808 goto free_buf; 1809 key = buf; 1810 value = key + map->key_size; 1811 if (ubatch) 1812 prev_key = buf_prevkey; 1813 1814 for (cp = 0; cp < max_count;) { 1815 rcu_read_lock(); 1816 err = map->ops->map_get_next_key(map, prev_key, key); 1817 rcu_read_unlock(); 1818 if (err) 1819 break; 1820 err = bpf_map_copy_value(map, key, value, 1821 attr->batch.elem_flags); 1822 1823 if (err == -ENOENT) { 1824 if (retry) { 1825 retry--; 1826 continue; 1827 } 1828 err = -EINTR; 1829 break; 1830 } 1831 1832 if (err) 1833 goto free_buf; 1834 1835 if (copy_to_user(keys + cp * map->key_size, key, 1836 map->key_size)) { 1837 err = -EFAULT; 1838 goto free_buf; 1839 } 1840 if (copy_to_user(values + cp * value_size, value, value_size)) { 1841 err = -EFAULT; 1842 goto free_buf; 1843 } 1844 1845 if (!prev_key) 1846 prev_key = buf_prevkey; 1847 1848 swap(prev_key, key); 1849 retry = MAP_LOOKUP_RETRIES; 1850 cp++; 1851 cond_resched(); 1852 } 1853 1854 if (err == -EFAULT) 1855 goto free_buf; 1856 1857 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 1858 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 1859 err = -EFAULT; 1860 1861 free_buf: 1862 kvfree(buf_prevkey); 1863 kvfree(buf); 1864 return err; 1865 } 1866 1867 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags 1868 1869 static int map_lookup_and_delete_elem(union bpf_attr *attr) 1870 { 1871 void __user *ukey = u64_to_user_ptr(attr->key); 1872 void __user *uvalue = u64_to_user_ptr(attr->value); 1873 int ufd = attr->map_fd; 1874 struct bpf_map *map; 1875 void *key, *value; 1876 u32 value_size; 1877 struct fd f; 1878 int err; 1879 1880 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 1881 return -EINVAL; 1882 1883 if (attr->flags & ~BPF_F_LOCK) 1884 return -EINVAL; 1885 1886 f = fdget(ufd); 1887 map = __bpf_map_get(f); 1888 if (IS_ERR(map)) 1889 return PTR_ERR(map); 1890 bpf_map_write_active_inc(map); 1891 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || 1892 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1893 err = -EPERM; 1894 goto err_put; 1895 } 1896 1897 if (attr->flags && 1898 (map->map_type == BPF_MAP_TYPE_QUEUE || 1899 map->map_type == BPF_MAP_TYPE_STACK)) { 1900 err = -EINVAL; 1901 goto err_put; 1902 } 1903 1904 if ((attr->flags & BPF_F_LOCK) && 1905 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1906 err = -EINVAL; 1907 goto err_put; 1908 } 1909 1910 key = __bpf_copy_key(ukey, map->key_size); 1911 if (IS_ERR(key)) { 1912 err = PTR_ERR(key); 1913 goto err_put; 1914 } 1915 1916 value_size = bpf_map_value_size(map); 1917 1918 err = -ENOMEM; 1919 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1920 if (!value) 1921 goto free_key; 1922 1923 err = -ENOTSUPP; 1924 if (map->map_type == BPF_MAP_TYPE_QUEUE || 1925 map->map_type == BPF_MAP_TYPE_STACK) { 1926 err = map->ops->map_pop_elem(map, value); 1927 } else if (map->map_type == BPF_MAP_TYPE_HASH || 1928 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 1929 map->map_type == BPF_MAP_TYPE_LRU_HASH || 1930 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 1931 if (!bpf_map_is_offloaded(map)) { 1932 bpf_disable_instrumentation(); 1933 rcu_read_lock(); 1934 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); 1935 rcu_read_unlock(); 1936 bpf_enable_instrumentation(); 1937 } 1938 } 1939 1940 if (err) 1941 goto free_value; 1942 1943 if (copy_to_user(uvalue, value, value_size) != 0) { 1944 err = -EFAULT; 1945 goto free_value; 1946 } 1947 1948 err = 0; 1949 1950 free_value: 1951 kvfree(value); 1952 free_key: 1953 kvfree(key); 1954 err_put: 1955 bpf_map_write_active_dec(map); 1956 fdput(f); 1957 return err; 1958 } 1959 1960 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 1961 1962 static int map_freeze(const union bpf_attr *attr) 1963 { 1964 int err = 0, ufd = attr->map_fd; 1965 struct bpf_map *map; 1966 struct fd f; 1967 1968 if (CHECK_ATTR(BPF_MAP_FREEZE)) 1969 return -EINVAL; 1970 1971 f = fdget(ufd); 1972 map = __bpf_map_get(f); 1973 if (IS_ERR(map)) 1974 return PTR_ERR(map); 1975 1976 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) { 1977 fdput(f); 1978 return -ENOTSUPP; 1979 } 1980 1981 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1982 fdput(f); 1983 return -EPERM; 1984 } 1985 1986 mutex_lock(&map->freeze_mutex); 1987 if (bpf_map_write_active(map)) { 1988 err = -EBUSY; 1989 goto err_put; 1990 } 1991 if (READ_ONCE(map->frozen)) { 1992 err = -EBUSY; 1993 goto err_put; 1994 } 1995 1996 WRITE_ONCE(map->frozen, true); 1997 err_put: 1998 mutex_unlock(&map->freeze_mutex); 1999 fdput(f); 2000 return err; 2001 } 2002 2003 static const struct bpf_prog_ops * const bpf_prog_types[] = { 2004 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 2005 [_id] = & _name ## _prog_ops, 2006 #define BPF_MAP_TYPE(_id, _ops) 2007 #define BPF_LINK_TYPE(_id, _name) 2008 #include <linux/bpf_types.h> 2009 #undef BPF_PROG_TYPE 2010 #undef BPF_MAP_TYPE 2011 #undef BPF_LINK_TYPE 2012 }; 2013 2014 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 2015 { 2016 const struct bpf_prog_ops *ops; 2017 2018 if (type >= ARRAY_SIZE(bpf_prog_types)) 2019 return -EINVAL; 2020 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 2021 ops = bpf_prog_types[type]; 2022 if (!ops) 2023 return -EINVAL; 2024 2025 if (!bpf_prog_is_offloaded(prog->aux)) 2026 prog->aux->ops = ops; 2027 else 2028 prog->aux->ops = &bpf_offload_prog_ops; 2029 prog->type = type; 2030 return 0; 2031 } 2032 2033 enum bpf_audit { 2034 BPF_AUDIT_LOAD, 2035 BPF_AUDIT_UNLOAD, 2036 BPF_AUDIT_MAX, 2037 }; 2038 2039 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 2040 [BPF_AUDIT_LOAD] = "LOAD", 2041 [BPF_AUDIT_UNLOAD] = "UNLOAD", 2042 }; 2043 2044 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 2045 { 2046 struct audit_context *ctx = NULL; 2047 struct audit_buffer *ab; 2048 2049 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 2050 return; 2051 if (audit_enabled == AUDIT_OFF) 2052 return; 2053 if (!in_irq() && !irqs_disabled()) 2054 ctx = audit_context(); 2055 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 2056 if (unlikely(!ab)) 2057 return; 2058 audit_log_format(ab, "prog-id=%u op=%s", 2059 prog->aux->id, bpf_audit_str[op]); 2060 audit_log_end(ab); 2061 } 2062 2063 static int bpf_prog_alloc_id(struct bpf_prog *prog) 2064 { 2065 int id; 2066 2067 idr_preload(GFP_KERNEL); 2068 spin_lock_bh(&prog_idr_lock); 2069 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 2070 if (id > 0) 2071 prog->aux->id = id; 2072 spin_unlock_bh(&prog_idr_lock); 2073 idr_preload_end(); 2074 2075 /* id is in [1, INT_MAX) */ 2076 if (WARN_ON_ONCE(!id)) 2077 return -ENOSPC; 2078 2079 return id > 0 ? 0 : id; 2080 } 2081 2082 void bpf_prog_free_id(struct bpf_prog *prog) 2083 { 2084 unsigned long flags; 2085 2086 /* cBPF to eBPF migrations are currently not in the idr store. 2087 * Offloaded programs are removed from the store when their device 2088 * disappears - even if someone grabs an fd to them they are unusable, 2089 * simply waiting for refcnt to drop to be freed. 2090 */ 2091 if (!prog->aux->id) 2092 return; 2093 2094 spin_lock_irqsave(&prog_idr_lock, flags); 2095 idr_remove(&prog_idr, prog->aux->id); 2096 prog->aux->id = 0; 2097 spin_unlock_irqrestore(&prog_idr_lock, flags); 2098 } 2099 2100 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 2101 { 2102 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 2103 2104 kvfree(aux->func_info); 2105 kfree(aux->func_info_aux); 2106 free_uid(aux->user); 2107 security_bpf_prog_free(aux); 2108 bpf_prog_free(aux->prog); 2109 } 2110 2111 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 2112 { 2113 bpf_prog_kallsyms_del_all(prog); 2114 btf_put(prog->aux->btf); 2115 module_put(prog->aux->mod); 2116 kvfree(prog->aux->jited_linfo); 2117 kvfree(prog->aux->linfo); 2118 kfree(prog->aux->kfunc_tab); 2119 if (prog->aux->attach_btf) 2120 btf_put(prog->aux->attach_btf); 2121 2122 if (deferred) { 2123 if (prog->aux->sleepable) 2124 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); 2125 else 2126 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 2127 } else { 2128 __bpf_prog_put_rcu(&prog->aux->rcu); 2129 } 2130 } 2131 2132 static void bpf_prog_put_deferred(struct work_struct *work) 2133 { 2134 struct bpf_prog_aux *aux; 2135 struct bpf_prog *prog; 2136 2137 aux = container_of(work, struct bpf_prog_aux, work); 2138 prog = aux->prog; 2139 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 2140 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 2141 bpf_prog_free_id(prog); 2142 __bpf_prog_put_noref(prog, true); 2143 } 2144 2145 static void __bpf_prog_put(struct bpf_prog *prog) 2146 { 2147 struct bpf_prog_aux *aux = prog->aux; 2148 2149 if (atomic64_dec_and_test(&aux->refcnt)) { 2150 if (in_irq() || irqs_disabled()) { 2151 INIT_WORK(&aux->work, bpf_prog_put_deferred); 2152 schedule_work(&aux->work); 2153 } else { 2154 bpf_prog_put_deferred(&aux->work); 2155 } 2156 } 2157 } 2158 2159 void bpf_prog_put(struct bpf_prog *prog) 2160 { 2161 __bpf_prog_put(prog); 2162 } 2163 EXPORT_SYMBOL_GPL(bpf_prog_put); 2164 2165 static int bpf_prog_release(struct inode *inode, struct file *filp) 2166 { 2167 struct bpf_prog *prog = filp->private_data; 2168 2169 bpf_prog_put(prog); 2170 return 0; 2171 } 2172 2173 struct bpf_prog_kstats { 2174 u64 nsecs; 2175 u64 cnt; 2176 u64 misses; 2177 }; 2178 2179 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog) 2180 { 2181 struct bpf_prog_stats *stats; 2182 unsigned int flags; 2183 2184 stats = this_cpu_ptr(prog->stats); 2185 flags = u64_stats_update_begin_irqsave(&stats->syncp); 2186 u64_stats_inc(&stats->misses); 2187 u64_stats_update_end_irqrestore(&stats->syncp, flags); 2188 } 2189 2190 static void bpf_prog_get_stats(const struct bpf_prog *prog, 2191 struct bpf_prog_kstats *stats) 2192 { 2193 u64 nsecs = 0, cnt = 0, misses = 0; 2194 int cpu; 2195 2196 for_each_possible_cpu(cpu) { 2197 const struct bpf_prog_stats *st; 2198 unsigned int start; 2199 u64 tnsecs, tcnt, tmisses; 2200 2201 st = per_cpu_ptr(prog->stats, cpu); 2202 do { 2203 start = u64_stats_fetch_begin(&st->syncp); 2204 tnsecs = u64_stats_read(&st->nsecs); 2205 tcnt = u64_stats_read(&st->cnt); 2206 tmisses = u64_stats_read(&st->misses); 2207 } while (u64_stats_fetch_retry(&st->syncp, start)); 2208 nsecs += tnsecs; 2209 cnt += tcnt; 2210 misses += tmisses; 2211 } 2212 stats->nsecs = nsecs; 2213 stats->cnt = cnt; 2214 stats->misses = misses; 2215 } 2216 2217 #ifdef CONFIG_PROC_FS 2218 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 2219 { 2220 const struct bpf_prog *prog = filp->private_data; 2221 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2222 struct bpf_prog_kstats stats; 2223 2224 bpf_prog_get_stats(prog, &stats); 2225 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2226 seq_printf(m, 2227 "prog_type:\t%u\n" 2228 "prog_jited:\t%u\n" 2229 "prog_tag:\t%s\n" 2230 "memlock:\t%llu\n" 2231 "prog_id:\t%u\n" 2232 "run_time_ns:\t%llu\n" 2233 "run_cnt:\t%llu\n" 2234 "recursion_misses:\t%llu\n" 2235 "verified_insns:\t%u\n", 2236 prog->type, 2237 prog->jited, 2238 prog_tag, 2239 prog->pages * 1ULL << PAGE_SHIFT, 2240 prog->aux->id, 2241 stats.nsecs, 2242 stats.cnt, 2243 stats.misses, 2244 prog->aux->verified_insns); 2245 } 2246 #endif 2247 2248 const struct file_operations bpf_prog_fops = { 2249 #ifdef CONFIG_PROC_FS 2250 .show_fdinfo = bpf_prog_show_fdinfo, 2251 #endif 2252 .release = bpf_prog_release, 2253 .read = bpf_dummy_read, 2254 .write = bpf_dummy_write, 2255 }; 2256 2257 int bpf_prog_new_fd(struct bpf_prog *prog) 2258 { 2259 int ret; 2260 2261 ret = security_bpf_prog(prog); 2262 if (ret < 0) 2263 return ret; 2264 2265 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 2266 O_RDWR | O_CLOEXEC); 2267 } 2268 2269 static struct bpf_prog *____bpf_prog_get(struct fd f) 2270 { 2271 if (!f.file) 2272 return ERR_PTR(-EBADF); 2273 if (f.file->f_op != &bpf_prog_fops) { 2274 fdput(f); 2275 return ERR_PTR(-EINVAL); 2276 } 2277 2278 return f.file->private_data; 2279 } 2280 2281 void bpf_prog_add(struct bpf_prog *prog, int i) 2282 { 2283 atomic64_add(i, &prog->aux->refcnt); 2284 } 2285 EXPORT_SYMBOL_GPL(bpf_prog_add); 2286 2287 void bpf_prog_sub(struct bpf_prog *prog, int i) 2288 { 2289 /* Only to be used for undoing previous bpf_prog_add() in some 2290 * error path. We still know that another entity in our call 2291 * path holds a reference to the program, thus atomic_sub() can 2292 * be safely used in such cases! 2293 */ 2294 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 2295 } 2296 EXPORT_SYMBOL_GPL(bpf_prog_sub); 2297 2298 void bpf_prog_inc(struct bpf_prog *prog) 2299 { 2300 atomic64_inc(&prog->aux->refcnt); 2301 } 2302 EXPORT_SYMBOL_GPL(bpf_prog_inc); 2303 2304 /* prog_idr_lock should have been held */ 2305 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 2306 { 2307 int refold; 2308 2309 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 2310 2311 if (!refold) 2312 return ERR_PTR(-ENOENT); 2313 2314 return prog; 2315 } 2316 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 2317 2318 bool bpf_prog_get_ok(struct bpf_prog *prog, 2319 enum bpf_prog_type *attach_type, bool attach_drv) 2320 { 2321 /* not an attachment, just a refcount inc, always allow */ 2322 if (!attach_type) 2323 return true; 2324 2325 if (prog->type != *attach_type) 2326 return false; 2327 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv) 2328 return false; 2329 2330 return true; 2331 } 2332 2333 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 2334 bool attach_drv) 2335 { 2336 struct fd f = fdget(ufd); 2337 struct bpf_prog *prog; 2338 2339 prog = ____bpf_prog_get(f); 2340 if (IS_ERR(prog)) 2341 return prog; 2342 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 2343 prog = ERR_PTR(-EINVAL); 2344 goto out; 2345 } 2346 2347 bpf_prog_inc(prog); 2348 out: 2349 fdput(f); 2350 return prog; 2351 } 2352 2353 struct bpf_prog *bpf_prog_get(u32 ufd) 2354 { 2355 return __bpf_prog_get(ufd, NULL, false); 2356 } 2357 2358 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 2359 bool attach_drv) 2360 { 2361 return __bpf_prog_get(ufd, &type, attach_drv); 2362 } 2363 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 2364 2365 /* Initially all BPF programs could be loaded w/o specifying 2366 * expected_attach_type. Later for some of them specifying expected_attach_type 2367 * at load time became required so that program could be validated properly. 2368 * Programs of types that are allowed to be loaded both w/ and w/o (for 2369 * backward compatibility) expected_attach_type, should have the default attach 2370 * type assigned to expected_attach_type for the latter case, so that it can be 2371 * validated later at attach time. 2372 * 2373 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 2374 * prog type requires it but has some attach types that have to be backward 2375 * compatible. 2376 */ 2377 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 2378 { 2379 switch (attr->prog_type) { 2380 case BPF_PROG_TYPE_CGROUP_SOCK: 2381 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 2382 * exist so checking for non-zero is the way to go here. 2383 */ 2384 if (!attr->expected_attach_type) 2385 attr->expected_attach_type = 2386 BPF_CGROUP_INET_SOCK_CREATE; 2387 break; 2388 case BPF_PROG_TYPE_SK_REUSEPORT: 2389 if (!attr->expected_attach_type) 2390 attr->expected_attach_type = 2391 BPF_SK_REUSEPORT_SELECT; 2392 break; 2393 } 2394 } 2395 2396 static int 2397 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 2398 enum bpf_attach_type expected_attach_type, 2399 struct btf *attach_btf, u32 btf_id, 2400 struct bpf_prog *dst_prog) 2401 { 2402 if (btf_id) { 2403 if (btf_id > BTF_MAX_TYPE) 2404 return -EINVAL; 2405 2406 if (!attach_btf && !dst_prog) 2407 return -EINVAL; 2408 2409 switch (prog_type) { 2410 case BPF_PROG_TYPE_TRACING: 2411 case BPF_PROG_TYPE_LSM: 2412 case BPF_PROG_TYPE_STRUCT_OPS: 2413 case BPF_PROG_TYPE_EXT: 2414 break; 2415 default: 2416 return -EINVAL; 2417 } 2418 } 2419 2420 if (attach_btf && (!btf_id || dst_prog)) 2421 return -EINVAL; 2422 2423 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING && 2424 prog_type != BPF_PROG_TYPE_EXT) 2425 return -EINVAL; 2426 2427 switch (prog_type) { 2428 case BPF_PROG_TYPE_CGROUP_SOCK: 2429 switch (expected_attach_type) { 2430 case BPF_CGROUP_INET_SOCK_CREATE: 2431 case BPF_CGROUP_INET_SOCK_RELEASE: 2432 case BPF_CGROUP_INET4_POST_BIND: 2433 case BPF_CGROUP_INET6_POST_BIND: 2434 return 0; 2435 default: 2436 return -EINVAL; 2437 } 2438 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2439 switch (expected_attach_type) { 2440 case BPF_CGROUP_INET4_BIND: 2441 case BPF_CGROUP_INET6_BIND: 2442 case BPF_CGROUP_INET4_CONNECT: 2443 case BPF_CGROUP_INET6_CONNECT: 2444 case BPF_CGROUP_INET4_GETPEERNAME: 2445 case BPF_CGROUP_INET6_GETPEERNAME: 2446 case BPF_CGROUP_INET4_GETSOCKNAME: 2447 case BPF_CGROUP_INET6_GETSOCKNAME: 2448 case BPF_CGROUP_UDP4_SENDMSG: 2449 case BPF_CGROUP_UDP6_SENDMSG: 2450 case BPF_CGROUP_UDP4_RECVMSG: 2451 case BPF_CGROUP_UDP6_RECVMSG: 2452 return 0; 2453 default: 2454 return -EINVAL; 2455 } 2456 case BPF_PROG_TYPE_CGROUP_SKB: 2457 switch (expected_attach_type) { 2458 case BPF_CGROUP_INET_INGRESS: 2459 case BPF_CGROUP_INET_EGRESS: 2460 return 0; 2461 default: 2462 return -EINVAL; 2463 } 2464 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2465 switch (expected_attach_type) { 2466 case BPF_CGROUP_SETSOCKOPT: 2467 case BPF_CGROUP_GETSOCKOPT: 2468 return 0; 2469 default: 2470 return -EINVAL; 2471 } 2472 case BPF_PROG_TYPE_SK_LOOKUP: 2473 if (expected_attach_type == BPF_SK_LOOKUP) 2474 return 0; 2475 return -EINVAL; 2476 case BPF_PROG_TYPE_SK_REUSEPORT: 2477 switch (expected_attach_type) { 2478 case BPF_SK_REUSEPORT_SELECT: 2479 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: 2480 return 0; 2481 default: 2482 return -EINVAL; 2483 } 2484 case BPF_PROG_TYPE_NETFILTER: 2485 if (expected_attach_type == BPF_NETFILTER) 2486 return 0; 2487 return -EINVAL; 2488 case BPF_PROG_TYPE_SYSCALL: 2489 case BPF_PROG_TYPE_EXT: 2490 if (expected_attach_type) 2491 return -EINVAL; 2492 fallthrough; 2493 default: 2494 return 0; 2495 } 2496 } 2497 2498 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) 2499 { 2500 switch (prog_type) { 2501 case BPF_PROG_TYPE_SCHED_CLS: 2502 case BPF_PROG_TYPE_SCHED_ACT: 2503 case BPF_PROG_TYPE_XDP: 2504 case BPF_PROG_TYPE_LWT_IN: 2505 case BPF_PROG_TYPE_LWT_OUT: 2506 case BPF_PROG_TYPE_LWT_XMIT: 2507 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2508 case BPF_PROG_TYPE_SK_SKB: 2509 case BPF_PROG_TYPE_SK_MSG: 2510 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2511 case BPF_PROG_TYPE_CGROUP_DEVICE: 2512 case BPF_PROG_TYPE_CGROUP_SOCK: 2513 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2514 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2515 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2516 case BPF_PROG_TYPE_SOCK_OPS: 2517 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2518 case BPF_PROG_TYPE_NETFILTER: 2519 return true; 2520 case BPF_PROG_TYPE_CGROUP_SKB: 2521 /* always unpriv */ 2522 case BPF_PROG_TYPE_SK_REUSEPORT: 2523 /* equivalent to SOCKET_FILTER. need CAP_BPF only */ 2524 default: 2525 return false; 2526 } 2527 } 2528 2529 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) 2530 { 2531 switch (prog_type) { 2532 case BPF_PROG_TYPE_KPROBE: 2533 case BPF_PROG_TYPE_TRACEPOINT: 2534 case BPF_PROG_TYPE_PERF_EVENT: 2535 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2536 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2537 case BPF_PROG_TYPE_TRACING: 2538 case BPF_PROG_TYPE_LSM: 2539 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ 2540 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2541 return true; 2542 default: 2543 return false; 2544 } 2545 } 2546 2547 /* last field in 'union bpf_attr' used by this command */ 2548 #define BPF_PROG_LOAD_LAST_FIELD log_true_size 2549 2550 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) 2551 { 2552 enum bpf_prog_type type = attr->prog_type; 2553 struct bpf_prog *prog, *dst_prog = NULL; 2554 struct btf *attach_btf = NULL; 2555 int err; 2556 char license[128]; 2557 2558 if (CHECK_ATTR(BPF_PROG_LOAD)) 2559 return -EINVAL; 2560 2561 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2562 BPF_F_ANY_ALIGNMENT | 2563 BPF_F_TEST_STATE_FREQ | 2564 BPF_F_SLEEPABLE | 2565 BPF_F_TEST_RND_HI32 | 2566 BPF_F_XDP_HAS_FRAGS | 2567 BPF_F_XDP_DEV_BOUND_ONLY)) 2568 return -EINVAL; 2569 2570 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2571 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2572 !bpf_capable()) 2573 return -EPERM; 2574 2575 /* Intent here is for unprivileged_bpf_disabled to block BPF program 2576 * creation for unprivileged users; other actions depend 2577 * on fd availability and access to bpffs, so are dependent on 2578 * object creation success. Even with unprivileged BPF disabled, 2579 * capability checks are still carried out for these 2580 * and other operations. 2581 */ 2582 if (sysctl_unprivileged_bpf_disabled && !bpf_capable()) 2583 return -EPERM; 2584 2585 if (attr->insn_cnt == 0 || 2586 attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) 2587 return -E2BIG; 2588 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2589 type != BPF_PROG_TYPE_CGROUP_SKB && 2590 !bpf_capable()) 2591 return -EPERM; 2592 2593 if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN)) 2594 return -EPERM; 2595 if (is_perfmon_prog_type(type) && !perfmon_capable()) 2596 return -EPERM; 2597 2598 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog 2599 * or btf, we need to check which one it is 2600 */ 2601 if (attr->attach_prog_fd) { 2602 dst_prog = bpf_prog_get(attr->attach_prog_fd); 2603 if (IS_ERR(dst_prog)) { 2604 dst_prog = NULL; 2605 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); 2606 if (IS_ERR(attach_btf)) 2607 return -EINVAL; 2608 if (!btf_is_kernel(attach_btf)) { 2609 /* attaching through specifying bpf_prog's BTF 2610 * objects directly might be supported eventually 2611 */ 2612 btf_put(attach_btf); 2613 return -ENOTSUPP; 2614 } 2615 } 2616 } else if (attr->attach_btf_id) { 2617 /* fall back to vmlinux BTF, if BTF type ID is specified */ 2618 attach_btf = bpf_get_btf_vmlinux(); 2619 if (IS_ERR(attach_btf)) 2620 return PTR_ERR(attach_btf); 2621 if (!attach_btf) 2622 return -EINVAL; 2623 btf_get(attach_btf); 2624 } 2625 2626 bpf_prog_load_fixup_attach_type(attr); 2627 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2628 attach_btf, attr->attach_btf_id, 2629 dst_prog)) { 2630 if (dst_prog) 2631 bpf_prog_put(dst_prog); 2632 if (attach_btf) 2633 btf_put(attach_btf); 2634 return -EINVAL; 2635 } 2636 2637 /* plain bpf_prog allocation */ 2638 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2639 if (!prog) { 2640 if (dst_prog) 2641 bpf_prog_put(dst_prog); 2642 if (attach_btf) 2643 btf_put(attach_btf); 2644 return -ENOMEM; 2645 } 2646 2647 prog->expected_attach_type = attr->expected_attach_type; 2648 prog->aux->attach_btf = attach_btf; 2649 prog->aux->attach_btf_id = attr->attach_btf_id; 2650 prog->aux->dst_prog = dst_prog; 2651 prog->aux->dev_bound = !!attr->prog_ifindex; 2652 prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; 2653 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; 2654 2655 err = security_bpf_prog_alloc(prog->aux); 2656 if (err) 2657 goto free_prog; 2658 2659 prog->aux->user = get_current_user(); 2660 prog->len = attr->insn_cnt; 2661 2662 err = -EFAULT; 2663 if (copy_from_bpfptr(prog->insns, 2664 make_bpfptr(attr->insns, uattr.is_kernel), 2665 bpf_prog_insn_size(prog)) != 0) 2666 goto free_prog_sec; 2667 /* copy eBPF program license from user space */ 2668 if (strncpy_from_bpfptr(license, 2669 make_bpfptr(attr->license, uattr.is_kernel), 2670 sizeof(license) - 1) < 0) 2671 goto free_prog_sec; 2672 license[sizeof(license) - 1] = 0; 2673 2674 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2675 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0; 2676 2677 prog->orig_prog = NULL; 2678 prog->jited = 0; 2679 2680 atomic64_set(&prog->aux->refcnt, 1); 2681 2682 if (bpf_prog_is_dev_bound(prog->aux)) { 2683 err = bpf_prog_dev_bound_init(prog, attr); 2684 if (err) 2685 goto free_prog_sec; 2686 } 2687 2688 if (type == BPF_PROG_TYPE_EXT && dst_prog && 2689 bpf_prog_is_dev_bound(dst_prog->aux)) { 2690 err = bpf_prog_dev_bound_inherit(prog, dst_prog); 2691 if (err) 2692 goto free_prog_sec; 2693 } 2694 2695 /* find program type: socket_filter vs tracing_filter */ 2696 err = find_prog_type(type, prog); 2697 if (err < 0) 2698 goto free_prog_sec; 2699 2700 prog->aux->load_time = ktime_get_boottime_ns(); 2701 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2702 sizeof(attr->prog_name)); 2703 if (err < 0) 2704 goto free_prog_sec; 2705 2706 /* run eBPF verifier */ 2707 err = bpf_check(&prog, attr, uattr, uattr_size); 2708 if (err < 0) 2709 goto free_used_maps; 2710 2711 prog = bpf_prog_select_runtime(prog, &err); 2712 if (err < 0) 2713 goto free_used_maps; 2714 2715 err = bpf_prog_alloc_id(prog); 2716 if (err) 2717 goto free_used_maps; 2718 2719 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2720 * effectively publicly exposed. However, retrieving via 2721 * bpf_prog_get_fd_by_id() will take another reference, 2722 * therefore it cannot be gone underneath us. 2723 * 2724 * Only for the time /after/ successful bpf_prog_new_fd() 2725 * and before returning to userspace, we might just hold 2726 * one reference and any parallel close on that fd could 2727 * rip everything out. Hence, below notifications must 2728 * happen before bpf_prog_new_fd(). 2729 * 2730 * Also, any failure handling from this point onwards must 2731 * be using bpf_prog_put() given the program is exposed. 2732 */ 2733 bpf_prog_kallsyms_add(prog); 2734 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 2735 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 2736 2737 err = bpf_prog_new_fd(prog); 2738 if (err < 0) 2739 bpf_prog_put(prog); 2740 return err; 2741 2742 free_used_maps: 2743 /* In case we have subprogs, we need to wait for a grace 2744 * period before we can tear down JIT memory since symbols 2745 * are already exposed under kallsyms. 2746 */ 2747 __bpf_prog_put_noref(prog, prog->aux->func_cnt); 2748 return err; 2749 free_prog_sec: 2750 free_uid(prog->aux->user); 2751 security_bpf_prog_free(prog->aux); 2752 free_prog: 2753 if (prog->aux->attach_btf) 2754 btf_put(prog->aux->attach_btf); 2755 bpf_prog_free(prog); 2756 return err; 2757 } 2758 2759 #define BPF_OBJ_LAST_FIELD path_fd 2760 2761 static int bpf_obj_pin(const union bpf_attr *attr) 2762 { 2763 int path_fd; 2764 2765 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD) 2766 return -EINVAL; 2767 2768 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 2769 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 2770 return -EINVAL; 2771 2772 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 2773 return bpf_obj_pin_user(attr->bpf_fd, path_fd, 2774 u64_to_user_ptr(attr->pathname)); 2775 } 2776 2777 static int bpf_obj_get(const union bpf_attr *attr) 2778 { 2779 int path_fd; 2780 2781 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 2782 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD)) 2783 return -EINVAL; 2784 2785 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 2786 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 2787 return -EINVAL; 2788 2789 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 2790 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname), 2791 attr->file_flags); 2792 } 2793 2794 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 2795 const struct bpf_link_ops *ops, struct bpf_prog *prog) 2796 { 2797 atomic64_set(&link->refcnt, 1); 2798 link->type = type; 2799 link->id = 0; 2800 link->ops = ops; 2801 link->prog = prog; 2802 } 2803 2804 static void bpf_link_free_id(int id) 2805 { 2806 if (!id) 2807 return; 2808 2809 spin_lock_bh(&link_idr_lock); 2810 idr_remove(&link_idr, id); 2811 spin_unlock_bh(&link_idr_lock); 2812 } 2813 2814 /* Clean up bpf_link and corresponding anon_inode file and FD. After 2815 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred 2816 * anon_inode's release() call. This helper marksbpf_link as 2817 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt 2818 * is not decremented, it's the responsibility of a calling code that failed 2819 * to complete bpf_link initialization. 2820 */ 2821 void bpf_link_cleanup(struct bpf_link_primer *primer) 2822 { 2823 primer->link->prog = NULL; 2824 bpf_link_free_id(primer->id); 2825 fput(primer->file); 2826 put_unused_fd(primer->fd); 2827 } 2828 2829 void bpf_link_inc(struct bpf_link *link) 2830 { 2831 atomic64_inc(&link->refcnt); 2832 } 2833 2834 /* bpf_link_free is guaranteed to be called from process context */ 2835 static void bpf_link_free(struct bpf_link *link) 2836 { 2837 bpf_link_free_id(link->id); 2838 if (link->prog) { 2839 /* detach BPF program, clean up used resources */ 2840 link->ops->release(link); 2841 bpf_prog_put(link->prog); 2842 } 2843 /* free bpf_link and its containing memory */ 2844 link->ops->dealloc(link); 2845 } 2846 2847 static void bpf_link_put_deferred(struct work_struct *work) 2848 { 2849 struct bpf_link *link = container_of(work, struct bpf_link, work); 2850 2851 bpf_link_free(link); 2852 } 2853 2854 /* bpf_link_put might be called from atomic context. It needs to be called 2855 * from sleepable context in order to acquire sleeping locks during the process. 2856 */ 2857 void bpf_link_put(struct bpf_link *link) 2858 { 2859 if (!atomic64_dec_and_test(&link->refcnt)) 2860 return; 2861 2862 INIT_WORK(&link->work, bpf_link_put_deferred); 2863 schedule_work(&link->work); 2864 } 2865 EXPORT_SYMBOL(bpf_link_put); 2866 2867 static void bpf_link_put_direct(struct bpf_link *link) 2868 { 2869 if (!atomic64_dec_and_test(&link->refcnt)) 2870 return; 2871 bpf_link_free(link); 2872 } 2873 2874 static int bpf_link_release(struct inode *inode, struct file *filp) 2875 { 2876 struct bpf_link *link = filp->private_data; 2877 2878 bpf_link_put_direct(link); 2879 return 0; 2880 } 2881 2882 #ifdef CONFIG_PROC_FS 2883 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 2884 #define BPF_MAP_TYPE(_id, _ops) 2885 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name, 2886 static const char *bpf_link_type_strs[] = { 2887 [BPF_LINK_TYPE_UNSPEC] = "<invalid>", 2888 #include <linux/bpf_types.h> 2889 }; 2890 #undef BPF_PROG_TYPE 2891 #undef BPF_MAP_TYPE 2892 #undef BPF_LINK_TYPE 2893 2894 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) 2895 { 2896 const struct bpf_link *link = filp->private_data; 2897 const struct bpf_prog *prog = link->prog; 2898 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2899 2900 seq_printf(m, 2901 "link_type:\t%s\n" 2902 "link_id:\t%u\n", 2903 bpf_link_type_strs[link->type], 2904 link->id); 2905 if (prog) { 2906 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2907 seq_printf(m, 2908 "prog_tag:\t%s\n" 2909 "prog_id:\t%u\n", 2910 prog_tag, 2911 prog->aux->id); 2912 } 2913 if (link->ops->show_fdinfo) 2914 link->ops->show_fdinfo(link, m); 2915 } 2916 #endif 2917 2918 static const struct file_operations bpf_link_fops = { 2919 #ifdef CONFIG_PROC_FS 2920 .show_fdinfo = bpf_link_show_fdinfo, 2921 #endif 2922 .release = bpf_link_release, 2923 .read = bpf_dummy_read, 2924 .write = bpf_dummy_write, 2925 }; 2926 2927 static int bpf_link_alloc_id(struct bpf_link *link) 2928 { 2929 int id; 2930 2931 idr_preload(GFP_KERNEL); 2932 spin_lock_bh(&link_idr_lock); 2933 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC); 2934 spin_unlock_bh(&link_idr_lock); 2935 idr_preload_end(); 2936 2937 return id; 2938 } 2939 2940 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file, 2941 * reserving unused FD and allocating ID from link_idr. This is to be paired 2942 * with bpf_link_settle() to install FD and ID and expose bpf_link to 2943 * user-space, if bpf_link is successfully attached. If not, bpf_link and 2944 * pre-allocated resources are to be freed with bpf_cleanup() call. All the 2945 * transient state is passed around in struct bpf_link_primer. 2946 * This is preferred way to create and initialize bpf_link, especially when 2947 * there are complicated and expensive operations in between creating bpf_link 2948 * itself and attaching it to BPF hook. By using bpf_link_prime() and 2949 * bpf_link_settle() kernel code using bpf_link doesn't have to perform 2950 * expensive (and potentially failing) roll back operations in a rare case 2951 * that file, FD, or ID can't be allocated. 2952 */ 2953 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) 2954 { 2955 struct file *file; 2956 int fd, id; 2957 2958 fd = get_unused_fd_flags(O_CLOEXEC); 2959 if (fd < 0) 2960 return fd; 2961 2962 2963 id = bpf_link_alloc_id(link); 2964 if (id < 0) { 2965 put_unused_fd(fd); 2966 return id; 2967 } 2968 2969 file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC); 2970 if (IS_ERR(file)) { 2971 bpf_link_free_id(id); 2972 put_unused_fd(fd); 2973 return PTR_ERR(file); 2974 } 2975 2976 primer->link = link; 2977 primer->file = file; 2978 primer->fd = fd; 2979 primer->id = id; 2980 return 0; 2981 } 2982 2983 int bpf_link_settle(struct bpf_link_primer *primer) 2984 { 2985 /* make bpf_link fetchable by ID */ 2986 spin_lock_bh(&link_idr_lock); 2987 primer->link->id = primer->id; 2988 spin_unlock_bh(&link_idr_lock); 2989 /* make bpf_link fetchable by FD */ 2990 fd_install(primer->fd, primer->file); 2991 /* pass through installed FD */ 2992 return primer->fd; 2993 } 2994 2995 int bpf_link_new_fd(struct bpf_link *link) 2996 { 2997 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC); 2998 } 2999 3000 struct bpf_link *bpf_link_get_from_fd(u32 ufd) 3001 { 3002 struct fd f = fdget(ufd); 3003 struct bpf_link *link; 3004 3005 if (!f.file) 3006 return ERR_PTR(-EBADF); 3007 if (f.file->f_op != &bpf_link_fops) { 3008 fdput(f); 3009 return ERR_PTR(-EINVAL); 3010 } 3011 3012 link = f.file->private_data; 3013 bpf_link_inc(link); 3014 fdput(f); 3015 3016 return link; 3017 } 3018 EXPORT_SYMBOL(bpf_link_get_from_fd); 3019 3020 static void bpf_tracing_link_release(struct bpf_link *link) 3021 { 3022 struct bpf_tracing_link *tr_link = 3023 container_of(link, struct bpf_tracing_link, link.link); 3024 3025 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, 3026 tr_link->trampoline)); 3027 3028 bpf_trampoline_put(tr_link->trampoline); 3029 3030 /* tgt_prog is NULL if target is a kernel function */ 3031 if (tr_link->tgt_prog) 3032 bpf_prog_put(tr_link->tgt_prog); 3033 } 3034 3035 static void bpf_tracing_link_dealloc(struct bpf_link *link) 3036 { 3037 struct bpf_tracing_link *tr_link = 3038 container_of(link, struct bpf_tracing_link, link.link); 3039 3040 kfree(tr_link); 3041 } 3042 3043 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, 3044 struct seq_file *seq) 3045 { 3046 struct bpf_tracing_link *tr_link = 3047 container_of(link, struct bpf_tracing_link, link.link); 3048 u32 target_btf_id, target_obj_id; 3049 3050 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3051 &target_obj_id, &target_btf_id); 3052 seq_printf(seq, 3053 "attach_type:\t%d\n" 3054 "target_obj_id:\t%u\n" 3055 "target_btf_id:\t%u\n", 3056 tr_link->attach_type, 3057 target_obj_id, 3058 target_btf_id); 3059 } 3060 3061 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, 3062 struct bpf_link_info *info) 3063 { 3064 struct bpf_tracing_link *tr_link = 3065 container_of(link, struct bpf_tracing_link, link.link); 3066 3067 info->tracing.attach_type = tr_link->attach_type; 3068 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3069 &info->tracing.target_obj_id, 3070 &info->tracing.target_btf_id); 3071 3072 return 0; 3073 } 3074 3075 static const struct bpf_link_ops bpf_tracing_link_lops = { 3076 .release = bpf_tracing_link_release, 3077 .dealloc = bpf_tracing_link_dealloc, 3078 .show_fdinfo = bpf_tracing_link_show_fdinfo, 3079 .fill_link_info = bpf_tracing_link_fill_link_info, 3080 }; 3081 3082 static int bpf_tracing_prog_attach(struct bpf_prog *prog, 3083 int tgt_prog_fd, 3084 u32 btf_id, 3085 u64 bpf_cookie) 3086 { 3087 struct bpf_link_primer link_primer; 3088 struct bpf_prog *tgt_prog = NULL; 3089 struct bpf_trampoline *tr = NULL; 3090 struct bpf_tracing_link *link; 3091 u64 key = 0; 3092 int err; 3093 3094 switch (prog->type) { 3095 case BPF_PROG_TYPE_TRACING: 3096 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 3097 prog->expected_attach_type != BPF_TRACE_FEXIT && 3098 prog->expected_attach_type != BPF_MODIFY_RETURN) { 3099 err = -EINVAL; 3100 goto out_put_prog; 3101 } 3102 break; 3103 case BPF_PROG_TYPE_EXT: 3104 if (prog->expected_attach_type != 0) { 3105 err = -EINVAL; 3106 goto out_put_prog; 3107 } 3108 break; 3109 case BPF_PROG_TYPE_LSM: 3110 if (prog->expected_attach_type != BPF_LSM_MAC) { 3111 err = -EINVAL; 3112 goto out_put_prog; 3113 } 3114 break; 3115 default: 3116 err = -EINVAL; 3117 goto out_put_prog; 3118 } 3119 3120 if (!!tgt_prog_fd != !!btf_id) { 3121 err = -EINVAL; 3122 goto out_put_prog; 3123 } 3124 3125 if (tgt_prog_fd) { 3126 /* For now we only allow new targets for BPF_PROG_TYPE_EXT */ 3127 if (prog->type != BPF_PROG_TYPE_EXT) { 3128 err = -EINVAL; 3129 goto out_put_prog; 3130 } 3131 3132 tgt_prog = bpf_prog_get(tgt_prog_fd); 3133 if (IS_ERR(tgt_prog)) { 3134 err = PTR_ERR(tgt_prog); 3135 tgt_prog = NULL; 3136 goto out_put_prog; 3137 } 3138 3139 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id); 3140 } 3141 3142 link = kzalloc(sizeof(*link), GFP_USER); 3143 if (!link) { 3144 err = -ENOMEM; 3145 goto out_put_prog; 3146 } 3147 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, 3148 &bpf_tracing_link_lops, prog); 3149 link->attach_type = prog->expected_attach_type; 3150 link->link.cookie = bpf_cookie; 3151 3152 mutex_lock(&prog->aux->dst_mutex); 3153 3154 /* There are a few possible cases here: 3155 * 3156 * - if prog->aux->dst_trampoline is set, the program was just loaded 3157 * and not yet attached to anything, so we can use the values stored 3158 * in prog->aux 3159 * 3160 * - if prog->aux->dst_trampoline is NULL, the program has already been 3161 * attached to a target and its initial target was cleared (below) 3162 * 3163 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + 3164 * target_btf_id using the link_create API. 3165 * 3166 * - if tgt_prog == NULL when this function was called using the old 3167 * raw_tracepoint_open API, and we need a target from prog->aux 3168 * 3169 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program 3170 * was detached and is going for re-attachment. 3171 */ 3172 if (!prog->aux->dst_trampoline && !tgt_prog) { 3173 /* 3174 * Allow re-attach for TRACING and LSM programs. If it's 3175 * currently linked, bpf_trampoline_link_prog will fail. 3176 * EXT programs need to specify tgt_prog_fd, so they 3177 * re-attach in separate code path. 3178 */ 3179 if (prog->type != BPF_PROG_TYPE_TRACING && 3180 prog->type != BPF_PROG_TYPE_LSM) { 3181 err = -EINVAL; 3182 goto out_unlock; 3183 } 3184 btf_id = prog->aux->attach_btf_id; 3185 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); 3186 } 3187 3188 if (!prog->aux->dst_trampoline || 3189 (key && key != prog->aux->dst_trampoline->key)) { 3190 /* If there is no saved target, or the specified target is 3191 * different from the destination specified at load time, we 3192 * need a new trampoline and a check for compatibility 3193 */ 3194 struct bpf_attach_target_info tgt_info = {}; 3195 3196 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id, 3197 &tgt_info); 3198 if (err) 3199 goto out_unlock; 3200 3201 if (tgt_info.tgt_mod) { 3202 module_put(prog->aux->mod); 3203 prog->aux->mod = tgt_info.tgt_mod; 3204 } 3205 3206 tr = bpf_trampoline_get(key, &tgt_info); 3207 if (!tr) { 3208 err = -ENOMEM; 3209 goto out_unlock; 3210 } 3211 } else { 3212 /* The caller didn't specify a target, or the target was the 3213 * same as the destination supplied during program load. This 3214 * means we can reuse the trampoline and reference from program 3215 * load time, and there is no need to allocate a new one. This 3216 * can only happen once for any program, as the saved values in 3217 * prog->aux are cleared below. 3218 */ 3219 tr = prog->aux->dst_trampoline; 3220 tgt_prog = prog->aux->dst_prog; 3221 } 3222 3223 err = bpf_link_prime(&link->link.link, &link_primer); 3224 if (err) 3225 goto out_unlock; 3226 3227 err = bpf_trampoline_link_prog(&link->link, tr); 3228 if (err) { 3229 bpf_link_cleanup(&link_primer); 3230 link = NULL; 3231 goto out_unlock; 3232 } 3233 3234 link->tgt_prog = tgt_prog; 3235 link->trampoline = tr; 3236 3237 /* Always clear the trampoline and target prog from prog->aux to make 3238 * sure the original attach destination is not kept alive after a 3239 * program is (re-)attached to another target. 3240 */ 3241 if (prog->aux->dst_prog && 3242 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) 3243 /* got extra prog ref from syscall, or attaching to different prog */ 3244 bpf_prog_put(prog->aux->dst_prog); 3245 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) 3246 /* we allocated a new trampoline, so free the old one */ 3247 bpf_trampoline_put(prog->aux->dst_trampoline); 3248 3249 prog->aux->dst_prog = NULL; 3250 prog->aux->dst_trampoline = NULL; 3251 mutex_unlock(&prog->aux->dst_mutex); 3252 3253 return bpf_link_settle(&link_primer); 3254 out_unlock: 3255 if (tr && tr != prog->aux->dst_trampoline) 3256 bpf_trampoline_put(tr); 3257 mutex_unlock(&prog->aux->dst_mutex); 3258 kfree(link); 3259 out_put_prog: 3260 if (tgt_prog_fd && tgt_prog) 3261 bpf_prog_put(tgt_prog); 3262 return err; 3263 } 3264 3265 struct bpf_raw_tp_link { 3266 struct bpf_link link; 3267 struct bpf_raw_event_map *btp; 3268 }; 3269 3270 static void bpf_raw_tp_link_release(struct bpf_link *link) 3271 { 3272 struct bpf_raw_tp_link *raw_tp = 3273 container_of(link, struct bpf_raw_tp_link, link); 3274 3275 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog); 3276 bpf_put_raw_tracepoint(raw_tp->btp); 3277 } 3278 3279 static void bpf_raw_tp_link_dealloc(struct bpf_link *link) 3280 { 3281 struct bpf_raw_tp_link *raw_tp = 3282 container_of(link, struct bpf_raw_tp_link, link); 3283 3284 kfree(raw_tp); 3285 } 3286 3287 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link, 3288 struct seq_file *seq) 3289 { 3290 struct bpf_raw_tp_link *raw_tp_link = 3291 container_of(link, struct bpf_raw_tp_link, link); 3292 3293 seq_printf(seq, 3294 "tp_name:\t%s\n", 3295 raw_tp_link->btp->tp->name); 3296 } 3297 3298 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, 3299 struct bpf_link_info *info) 3300 { 3301 struct bpf_raw_tp_link *raw_tp_link = 3302 container_of(link, struct bpf_raw_tp_link, link); 3303 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); 3304 const char *tp_name = raw_tp_link->btp->tp->name; 3305 u32 ulen = info->raw_tracepoint.tp_name_len; 3306 size_t tp_len = strlen(tp_name); 3307 3308 if (!ulen ^ !ubuf) 3309 return -EINVAL; 3310 3311 info->raw_tracepoint.tp_name_len = tp_len + 1; 3312 3313 if (!ubuf) 3314 return 0; 3315 3316 if (ulen >= tp_len + 1) { 3317 if (copy_to_user(ubuf, tp_name, tp_len + 1)) 3318 return -EFAULT; 3319 } else { 3320 char zero = '\0'; 3321 3322 if (copy_to_user(ubuf, tp_name, ulen - 1)) 3323 return -EFAULT; 3324 if (put_user(zero, ubuf + ulen - 1)) 3325 return -EFAULT; 3326 return -ENOSPC; 3327 } 3328 3329 return 0; 3330 } 3331 3332 static const struct bpf_link_ops bpf_raw_tp_link_lops = { 3333 .release = bpf_raw_tp_link_release, 3334 .dealloc = bpf_raw_tp_link_dealloc, 3335 .show_fdinfo = bpf_raw_tp_link_show_fdinfo, 3336 .fill_link_info = bpf_raw_tp_link_fill_link_info, 3337 }; 3338 3339 #ifdef CONFIG_PERF_EVENTS 3340 struct bpf_perf_link { 3341 struct bpf_link link; 3342 struct file *perf_file; 3343 }; 3344 3345 static void bpf_perf_link_release(struct bpf_link *link) 3346 { 3347 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3348 struct perf_event *event = perf_link->perf_file->private_data; 3349 3350 perf_event_free_bpf_prog(event); 3351 fput(perf_link->perf_file); 3352 } 3353 3354 static void bpf_perf_link_dealloc(struct bpf_link *link) 3355 { 3356 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3357 3358 kfree(perf_link); 3359 } 3360 3361 static const struct bpf_link_ops bpf_perf_link_lops = { 3362 .release = bpf_perf_link_release, 3363 .dealloc = bpf_perf_link_dealloc, 3364 }; 3365 3366 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3367 { 3368 struct bpf_link_primer link_primer; 3369 struct bpf_perf_link *link; 3370 struct perf_event *event; 3371 struct file *perf_file; 3372 int err; 3373 3374 if (attr->link_create.flags) 3375 return -EINVAL; 3376 3377 perf_file = perf_event_get(attr->link_create.target_fd); 3378 if (IS_ERR(perf_file)) 3379 return PTR_ERR(perf_file); 3380 3381 link = kzalloc(sizeof(*link), GFP_USER); 3382 if (!link) { 3383 err = -ENOMEM; 3384 goto out_put_file; 3385 } 3386 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); 3387 link->perf_file = perf_file; 3388 3389 err = bpf_link_prime(&link->link, &link_primer); 3390 if (err) { 3391 kfree(link); 3392 goto out_put_file; 3393 } 3394 3395 event = perf_file->private_data; 3396 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); 3397 if (err) { 3398 bpf_link_cleanup(&link_primer); 3399 goto out_put_file; 3400 } 3401 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */ 3402 bpf_prog_inc(prog); 3403 3404 return bpf_link_settle(&link_primer); 3405 3406 out_put_file: 3407 fput(perf_file); 3408 return err; 3409 } 3410 #else 3411 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3412 { 3413 return -EOPNOTSUPP; 3414 } 3415 #endif /* CONFIG_PERF_EVENTS */ 3416 3417 static int bpf_raw_tp_link_attach(struct bpf_prog *prog, 3418 const char __user *user_tp_name) 3419 { 3420 struct bpf_link_primer link_primer; 3421 struct bpf_raw_tp_link *link; 3422 struct bpf_raw_event_map *btp; 3423 const char *tp_name; 3424 char buf[128]; 3425 int err; 3426 3427 switch (prog->type) { 3428 case BPF_PROG_TYPE_TRACING: 3429 case BPF_PROG_TYPE_EXT: 3430 case BPF_PROG_TYPE_LSM: 3431 if (user_tp_name) 3432 /* The attach point for this category of programs 3433 * should be specified via btf_id during program load. 3434 */ 3435 return -EINVAL; 3436 if (prog->type == BPF_PROG_TYPE_TRACING && 3437 prog->expected_attach_type == BPF_TRACE_RAW_TP) { 3438 tp_name = prog->aux->attach_func_name; 3439 break; 3440 } 3441 return bpf_tracing_prog_attach(prog, 0, 0, 0); 3442 case BPF_PROG_TYPE_RAW_TRACEPOINT: 3443 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 3444 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) 3445 return -EFAULT; 3446 buf[sizeof(buf) - 1] = 0; 3447 tp_name = buf; 3448 break; 3449 default: 3450 return -EINVAL; 3451 } 3452 3453 btp = bpf_get_raw_tracepoint(tp_name); 3454 if (!btp) 3455 return -ENOENT; 3456 3457 link = kzalloc(sizeof(*link), GFP_USER); 3458 if (!link) { 3459 err = -ENOMEM; 3460 goto out_put_btp; 3461 } 3462 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, 3463 &bpf_raw_tp_link_lops, prog); 3464 link->btp = btp; 3465 3466 err = bpf_link_prime(&link->link, &link_primer); 3467 if (err) { 3468 kfree(link); 3469 goto out_put_btp; 3470 } 3471 3472 err = bpf_probe_register(link->btp, prog); 3473 if (err) { 3474 bpf_link_cleanup(&link_primer); 3475 goto out_put_btp; 3476 } 3477 3478 return bpf_link_settle(&link_primer); 3479 3480 out_put_btp: 3481 bpf_put_raw_tracepoint(btp); 3482 return err; 3483 } 3484 3485 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 3486 3487 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 3488 { 3489 struct bpf_prog *prog; 3490 int fd; 3491 3492 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 3493 return -EINVAL; 3494 3495 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 3496 if (IS_ERR(prog)) 3497 return PTR_ERR(prog); 3498 3499 fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name)); 3500 if (fd < 0) 3501 bpf_prog_put(prog); 3502 return fd; 3503 } 3504 3505 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 3506 enum bpf_attach_type attach_type) 3507 { 3508 switch (prog->type) { 3509 case BPF_PROG_TYPE_CGROUP_SOCK: 3510 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3511 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3512 case BPF_PROG_TYPE_SK_LOOKUP: 3513 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 3514 case BPF_PROG_TYPE_CGROUP_SKB: 3515 if (!capable(CAP_NET_ADMIN)) 3516 /* cg-skb progs can be loaded by unpriv user. 3517 * check permissions at attach time. 3518 */ 3519 return -EPERM; 3520 return prog->enforce_expected_attach_type && 3521 prog->expected_attach_type != attach_type ? 3522 -EINVAL : 0; 3523 case BPF_PROG_TYPE_KPROBE: 3524 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && 3525 attach_type != BPF_TRACE_KPROBE_MULTI) 3526 return -EINVAL; 3527 return 0; 3528 default: 3529 return 0; 3530 } 3531 } 3532 3533 static enum bpf_prog_type 3534 attach_type_to_prog_type(enum bpf_attach_type attach_type) 3535 { 3536 switch (attach_type) { 3537 case BPF_CGROUP_INET_INGRESS: 3538 case BPF_CGROUP_INET_EGRESS: 3539 return BPF_PROG_TYPE_CGROUP_SKB; 3540 case BPF_CGROUP_INET_SOCK_CREATE: 3541 case BPF_CGROUP_INET_SOCK_RELEASE: 3542 case BPF_CGROUP_INET4_POST_BIND: 3543 case BPF_CGROUP_INET6_POST_BIND: 3544 return BPF_PROG_TYPE_CGROUP_SOCK; 3545 case BPF_CGROUP_INET4_BIND: 3546 case BPF_CGROUP_INET6_BIND: 3547 case BPF_CGROUP_INET4_CONNECT: 3548 case BPF_CGROUP_INET6_CONNECT: 3549 case BPF_CGROUP_INET4_GETPEERNAME: 3550 case BPF_CGROUP_INET6_GETPEERNAME: 3551 case BPF_CGROUP_INET4_GETSOCKNAME: 3552 case BPF_CGROUP_INET6_GETSOCKNAME: 3553 case BPF_CGROUP_UDP4_SENDMSG: 3554 case BPF_CGROUP_UDP6_SENDMSG: 3555 case BPF_CGROUP_UDP4_RECVMSG: 3556 case BPF_CGROUP_UDP6_RECVMSG: 3557 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 3558 case BPF_CGROUP_SOCK_OPS: 3559 return BPF_PROG_TYPE_SOCK_OPS; 3560 case BPF_CGROUP_DEVICE: 3561 return BPF_PROG_TYPE_CGROUP_DEVICE; 3562 case BPF_SK_MSG_VERDICT: 3563 return BPF_PROG_TYPE_SK_MSG; 3564 case BPF_SK_SKB_STREAM_PARSER: 3565 case BPF_SK_SKB_STREAM_VERDICT: 3566 case BPF_SK_SKB_VERDICT: 3567 return BPF_PROG_TYPE_SK_SKB; 3568 case BPF_LIRC_MODE2: 3569 return BPF_PROG_TYPE_LIRC_MODE2; 3570 case BPF_FLOW_DISSECTOR: 3571 return BPF_PROG_TYPE_FLOW_DISSECTOR; 3572 case BPF_CGROUP_SYSCTL: 3573 return BPF_PROG_TYPE_CGROUP_SYSCTL; 3574 case BPF_CGROUP_GETSOCKOPT: 3575 case BPF_CGROUP_SETSOCKOPT: 3576 return BPF_PROG_TYPE_CGROUP_SOCKOPT; 3577 case BPF_TRACE_ITER: 3578 case BPF_TRACE_RAW_TP: 3579 case BPF_TRACE_FENTRY: 3580 case BPF_TRACE_FEXIT: 3581 case BPF_MODIFY_RETURN: 3582 return BPF_PROG_TYPE_TRACING; 3583 case BPF_LSM_MAC: 3584 return BPF_PROG_TYPE_LSM; 3585 case BPF_SK_LOOKUP: 3586 return BPF_PROG_TYPE_SK_LOOKUP; 3587 case BPF_XDP: 3588 return BPF_PROG_TYPE_XDP; 3589 case BPF_LSM_CGROUP: 3590 return BPF_PROG_TYPE_LSM; 3591 default: 3592 return BPF_PROG_TYPE_UNSPEC; 3593 } 3594 } 3595 3596 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd 3597 3598 #define BPF_F_ATTACH_MASK \ 3599 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE) 3600 3601 static int bpf_prog_attach(const union bpf_attr *attr) 3602 { 3603 enum bpf_prog_type ptype; 3604 struct bpf_prog *prog; 3605 int ret; 3606 3607 if (CHECK_ATTR(BPF_PROG_ATTACH)) 3608 return -EINVAL; 3609 3610 if (attr->attach_flags & ~BPF_F_ATTACH_MASK) 3611 return -EINVAL; 3612 3613 ptype = attach_type_to_prog_type(attr->attach_type); 3614 if (ptype == BPF_PROG_TYPE_UNSPEC) 3615 return -EINVAL; 3616 3617 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 3618 if (IS_ERR(prog)) 3619 return PTR_ERR(prog); 3620 3621 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 3622 bpf_prog_put(prog); 3623 return -EINVAL; 3624 } 3625 3626 switch (ptype) { 3627 case BPF_PROG_TYPE_SK_SKB: 3628 case BPF_PROG_TYPE_SK_MSG: 3629 ret = sock_map_get_from_fd(attr, prog); 3630 break; 3631 case BPF_PROG_TYPE_LIRC_MODE2: 3632 ret = lirc_prog_attach(attr, prog); 3633 break; 3634 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3635 ret = netns_bpf_prog_attach(attr, prog); 3636 break; 3637 case BPF_PROG_TYPE_CGROUP_DEVICE: 3638 case BPF_PROG_TYPE_CGROUP_SKB: 3639 case BPF_PROG_TYPE_CGROUP_SOCK: 3640 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3641 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3642 case BPF_PROG_TYPE_CGROUP_SYSCTL: 3643 case BPF_PROG_TYPE_SOCK_OPS: 3644 case BPF_PROG_TYPE_LSM: 3645 if (ptype == BPF_PROG_TYPE_LSM && 3646 prog->expected_attach_type != BPF_LSM_CGROUP) 3647 ret = -EINVAL; 3648 else 3649 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 3650 break; 3651 default: 3652 ret = -EINVAL; 3653 } 3654 3655 if (ret) 3656 bpf_prog_put(prog); 3657 return ret; 3658 } 3659 3660 #define BPF_PROG_DETACH_LAST_FIELD attach_type 3661 3662 static int bpf_prog_detach(const union bpf_attr *attr) 3663 { 3664 enum bpf_prog_type ptype; 3665 3666 if (CHECK_ATTR(BPF_PROG_DETACH)) 3667 return -EINVAL; 3668 3669 ptype = attach_type_to_prog_type(attr->attach_type); 3670 3671 switch (ptype) { 3672 case BPF_PROG_TYPE_SK_MSG: 3673 case BPF_PROG_TYPE_SK_SKB: 3674 return sock_map_prog_detach(attr, ptype); 3675 case BPF_PROG_TYPE_LIRC_MODE2: 3676 return lirc_prog_detach(attr); 3677 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3678 return netns_bpf_prog_detach(attr, ptype); 3679 case BPF_PROG_TYPE_CGROUP_DEVICE: 3680 case BPF_PROG_TYPE_CGROUP_SKB: 3681 case BPF_PROG_TYPE_CGROUP_SOCK: 3682 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3683 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3684 case BPF_PROG_TYPE_CGROUP_SYSCTL: 3685 case BPF_PROG_TYPE_SOCK_OPS: 3686 case BPF_PROG_TYPE_LSM: 3687 return cgroup_bpf_prog_detach(attr, ptype); 3688 default: 3689 return -EINVAL; 3690 } 3691 } 3692 3693 #define BPF_PROG_QUERY_LAST_FIELD query.prog_attach_flags 3694 3695 static int bpf_prog_query(const union bpf_attr *attr, 3696 union bpf_attr __user *uattr) 3697 { 3698 if (!capable(CAP_NET_ADMIN)) 3699 return -EPERM; 3700 if (CHECK_ATTR(BPF_PROG_QUERY)) 3701 return -EINVAL; 3702 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 3703 return -EINVAL; 3704 3705 switch (attr->query.attach_type) { 3706 case BPF_CGROUP_INET_INGRESS: 3707 case BPF_CGROUP_INET_EGRESS: 3708 case BPF_CGROUP_INET_SOCK_CREATE: 3709 case BPF_CGROUP_INET_SOCK_RELEASE: 3710 case BPF_CGROUP_INET4_BIND: 3711 case BPF_CGROUP_INET6_BIND: 3712 case BPF_CGROUP_INET4_POST_BIND: 3713 case BPF_CGROUP_INET6_POST_BIND: 3714 case BPF_CGROUP_INET4_CONNECT: 3715 case BPF_CGROUP_INET6_CONNECT: 3716 case BPF_CGROUP_INET4_GETPEERNAME: 3717 case BPF_CGROUP_INET6_GETPEERNAME: 3718 case BPF_CGROUP_INET4_GETSOCKNAME: 3719 case BPF_CGROUP_INET6_GETSOCKNAME: 3720 case BPF_CGROUP_UDP4_SENDMSG: 3721 case BPF_CGROUP_UDP6_SENDMSG: 3722 case BPF_CGROUP_UDP4_RECVMSG: 3723 case BPF_CGROUP_UDP6_RECVMSG: 3724 case BPF_CGROUP_SOCK_OPS: 3725 case BPF_CGROUP_DEVICE: 3726 case BPF_CGROUP_SYSCTL: 3727 case BPF_CGROUP_GETSOCKOPT: 3728 case BPF_CGROUP_SETSOCKOPT: 3729 case BPF_LSM_CGROUP: 3730 return cgroup_bpf_prog_query(attr, uattr); 3731 case BPF_LIRC_MODE2: 3732 return lirc_prog_query(attr, uattr); 3733 case BPF_FLOW_DISSECTOR: 3734 case BPF_SK_LOOKUP: 3735 return netns_bpf_prog_query(attr, uattr); 3736 case BPF_SK_SKB_STREAM_PARSER: 3737 case BPF_SK_SKB_STREAM_VERDICT: 3738 case BPF_SK_MSG_VERDICT: 3739 case BPF_SK_SKB_VERDICT: 3740 return sock_map_bpf_prog_query(attr, uattr); 3741 default: 3742 return -EINVAL; 3743 } 3744 } 3745 3746 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size 3747 3748 static int bpf_prog_test_run(const union bpf_attr *attr, 3749 union bpf_attr __user *uattr) 3750 { 3751 struct bpf_prog *prog; 3752 int ret = -ENOTSUPP; 3753 3754 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 3755 return -EINVAL; 3756 3757 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 3758 (!attr->test.ctx_size_in && attr->test.ctx_in)) 3759 return -EINVAL; 3760 3761 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 3762 (!attr->test.ctx_size_out && attr->test.ctx_out)) 3763 return -EINVAL; 3764 3765 prog = bpf_prog_get(attr->test.prog_fd); 3766 if (IS_ERR(prog)) 3767 return PTR_ERR(prog); 3768 3769 if (prog->aux->ops->test_run) 3770 ret = prog->aux->ops->test_run(prog, attr, uattr); 3771 3772 bpf_prog_put(prog); 3773 return ret; 3774 } 3775 3776 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 3777 3778 static int bpf_obj_get_next_id(const union bpf_attr *attr, 3779 union bpf_attr __user *uattr, 3780 struct idr *idr, 3781 spinlock_t *lock) 3782 { 3783 u32 next_id = attr->start_id; 3784 int err = 0; 3785 3786 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 3787 return -EINVAL; 3788 3789 if (!capable(CAP_SYS_ADMIN)) 3790 return -EPERM; 3791 3792 next_id++; 3793 spin_lock_bh(lock); 3794 if (!idr_get_next(idr, &next_id)) 3795 err = -ENOENT; 3796 spin_unlock_bh(lock); 3797 3798 if (!err) 3799 err = put_user(next_id, &uattr->next_id); 3800 3801 return err; 3802 } 3803 3804 struct bpf_map *bpf_map_get_curr_or_next(u32 *id) 3805 { 3806 struct bpf_map *map; 3807 3808 spin_lock_bh(&map_idr_lock); 3809 again: 3810 map = idr_get_next(&map_idr, id); 3811 if (map) { 3812 map = __bpf_map_inc_not_zero(map, false); 3813 if (IS_ERR(map)) { 3814 (*id)++; 3815 goto again; 3816 } 3817 } 3818 spin_unlock_bh(&map_idr_lock); 3819 3820 return map; 3821 } 3822 3823 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) 3824 { 3825 struct bpf_prog *prog; 3826 3827 spin_lock_bh(&prog_idr_lock); 3828 again: 3829 prog = idr_get_next(&prog_idr, id); 3830 if (prog) { 3831 prog = bpf_prog_inc_not_zero(prog); 3832 if (IS_ERR(prog)) { 3833 (*id)++; 3834 goto again; 3835 } 3836 } 3837 spin_unlock_bh(&prog_idr_lock); 3838 3839 return prog; 3840 } 3841 3842 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 3843 3844 struct bpf_prog *bpf_prog_by_id(u32 id) 3845 { 3846 struct bpf_prog *prog; 3847 3848 if (!id) 3849 return ERR_PTR(-ENOENT); 3850 3851 spin_lock_bh(&prog_idr_lock); 3852 prog = idr_find(&prog_idr, id); 3853 if (prog) 3854 prog = bpf_prog_inc_not_zero(prog); 3855 else 3856 prog = ERR_PTR(-ENOENT); 3857 spin_unlock_bh(&prog_idr_lock); 3858 return prog; 3859 } 3860 3861 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 3862 { 3863 struct bpf_prog *prog; 3864 u32 id = attr->prog_id; 3865 int fd; 3866 3867 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 3868 return -EINVAL; 3869 3870 if (!capable(CAP_SYS_ADMIN)) 3871 return -EPERM; 3872 3873 prog = bpf_prog_by_id(id); 3874 if (IS_ERR(prog)) 3875 return PTR_ERR(prog); 3876 3877 fd = bpf_prog_new_fd(prog); 3878 if (fd < 0) 3879 bpf_prog_put(prog); 3880 3881 return fd; 3882 } 3883 3884 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 3885 3886 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 3887 { 3888 struct bpf_map *map; 3889 u32 id = attr->map_id; 3890 int f_flags; 3891 int fd; 3892 3893 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 3894 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 3895 return -EINVAL; 3896 3897 if (!capable(CAP_SYS_ADMIN)) 3898 return -EPERM; 3899 3900 f_flags = bpf_get_file_flag(attr->open_flags); 3901 if (f_flags < 0) 3902 return f_flags; 3903 3904 spin_lock_bh(&map_idr_lock); 3905 map = idr_find(&map_idr, id); 3906 if (map) 3907 map = __bpf_map_inc_not_zero(map, true); 3908 else 3909 map = ERR_PTR(-ENOENT); 3910 spin_unlock_bh(&map_idr_lock); 3911 3912 if (IS_ERR(map)) 3913 return PTR_ERR(map); 3914 3915 fd = bpf_map_new_fd(map, f_flags); 3916 if (fd < 0) 3917 bpf_map_put_with_uref(map); 3918 3919 return fd; 3920 } 3921 3922 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 3923 unsigned long addr, u32 *off, 3924 u32 *type) 3925 { 3926 const struct bpf_map *map; 3927 int i; 3928 3929 mutex_lock(&prog->aux->used_maps_mutex); 3930 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 3931 map = prog->aux->used_maps[i]; 3932 if (map == (void *)addr) { 3933 *type = BPF_PSEUDO_MAP_FD; 3934 goto out; 3935 } 3936 if (!map->ops->map_direct_value_meta) 3937 continue; 3938 if (!map->ops->map_direct_value_meta(map, addr, off)) { 3939 *type = BPF_PSEUDO_MAP_VALUE; 3940 goto out; 3941 } 3942 } 3943 map = NULL; 3944 3945 out: 3946 mutex_unlock(&prog->aux->used_maps_mutex); 3947 return map; 3948 } 3949 3950 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, 3951 const struct cred *f_cred) 3952 { 3953 const struct bpf_map *map; 3954 struct bpf_insn *insns; 3955 u32 off, type; 3956 u64 imm; 3957 u8 code; 3958 int i; 3959 3960 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 3961 GFP_USER); 3962 if (!insns) 3963 return insns; 3964 3965 for (i = 0; i < prog->len; i++) { 3966 code = insns[i].code; 3967 3968 if (code == (BPF_JMP | BPF_TAIL_CALL)) { 3969 insns[i].code = BPF_JMP | BPF_CALL; 3970 insns[i].imm = BPF_FUNC_tail_call; 3971 /* fall-through */ 3972 } 3973 if (code == (BPF_JMP | BPF_CALL) || 3974 code == (BPF_JMP | BPF_CALL_ARGS)) { 3975 if (code == (BPF_JMP | BPF_CALL_ARGS)) 3976 insns[i].code = BPF_JMP | BPF_CALL; 3977 if (!bpf_dump_raw_ok(f_cred)) 3978 insns[i].imm = 0; 3979 continue; 3980 } 3981 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { 3982 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; 3983 continue; 3984 } 3985 3986 if (code != (BPF_LD | BPF_IMM | BPF_DW)) 3987 continue; 3988 3989 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 3990 map = bpf_map_from_imm(prog, imm, &off, &type); 3991 if (map) { 3992 insns[i].src_reg = type; 3993 insns[i].imm = map->id; 3994 insns[i + 1].imm = off; 3995 continue; 3996 } 3997 } 3998 3999 return insns; 4000 } 4001 4002 static int set_info_rec_size(struct bpf_prog_info *info) 4003 { 4004 /* 4005 * Ensure info.*_rec_size is the same as kernel expected size 4006 * 4007 * or 4008 * 4009 * Only allow zero *_rec_size if both _rec_size and _cnt are 4010 * zero. In this case, the kernel will set the expected 4011 * _rec_size back to the info. 4012 */ 4013 4014 if ((info->nr_func_info || info->func_info_rec_size) && 4015 info->func_info_rec_size != sizeof(struct bpf_func_info)) 4016 return -EINVAL; 4017 4018 if ((info->nr_line_info || info->line_info_rec_size) && 4019 info->line_info_rec_size != sizeof(struct bpf_line_info)) 4020 return -EINVAL; 4021 4022 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 4023 info->jited_line_info_rec_size != sizeof(__u64)) 4024 return -EINVAL; 4025 4026 info->func_info_rec_size = sizeof(struct bpf_func_info); 4027 info->line_info_rec_size = sizeof(struct bpf_line_info); 4028 info->jited_line_info_rec_size = sizeof(__u64); 4029 4030 return 0; 4031 } 4032 4033 static int bpf_prog_get_info_by_fd(struct file *file, 4034 struct bpf_prog *prog, 4035 const union bpf_attr *attr, 4036 union bpf_attr __user *uattr) 4037 { 4038 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4039 struct btf *attach_btf = bpf_prog_get_target_btf(prog); 4040 struct bpf_prog_info info; 4041 u32 info_len = attr->info.info_len; 4042 struct bpf_prog_kstats stats; 4043 char __user *uinsns; 4044 u32 ulen; 4045 int err; 4046 4047 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4048 if (err) 4049 return err; 4050 info_len = min_t(u32, sizeof(info), info_len); 4051 4052 memset(&info, 0, sizeof(info)); 4053 if (copy_from_user(&info, uinfo, info_len)) 4054 return -EFAULT; 4055 4056 info.type = prog->type; 4057 info.id = prog->aux->id; 4058 info.load_time = prog->aux->load_time; 4059 info.created_by_uid = from_kuid_munged(current_user_ns(), 4060 prog->aux->user->uid); 4061 info.gpl_compatible = prog->gpl_compatible; 4062 4063 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 4064 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 4065 4066 mutex_lock(&prog->aux->used_maps_mutex); 4067 ulen = info.nr_map_ids; 4068 info.nr_map_ids = prog->aux->used_map_cnt; 4069 ulen = min_t(u32, info.nr_map_ids, ulen); 4070 if (ulen) { 4071 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 4072 u32 i; 4073 4074 for (i = 0; i < ulen; i++) 4075 if (put_user(prog->aux->used_maps[i]->id, 4076 &user_map_ids[i])) { 4077 mutex_unlock(&prog->aux->used_maps_mutex); 4078 return -EFAULT; 4079 } 4080 } 4081 mutex_unlock(&prog->aux->used_maps_mutex); 4082 4083 err = set_info_rec_size(&info); 4084 if (err) 4085 return err; 4086 4087 bpf_prog_get_stats(prog, &stats); 4088 info.run_time_ns = stats.nsecs; 4089 info.run_cnt = stats.cnt; 4090 info.recursion_misses = stats.misses; 4091 4092 info.verified_insns = prog->aux->verified_insns; 4093 4094 if (!bpf_capable()) { 4095 info.jited_prog_len = 0; 4096 info.xlated_prog_len = 0; 4097 info.nr_jited_ksyms = 0; 4098 info.nr_jited_func_lens = 0; 4099 info.nr_func_info = 0; 4100 info.nr_line_info = 0; 4101 info.nr_jited_line_info = 0; 4102 goto done; 4103 } 4104 4105 ulen = info.xlated_prog_len; 4106 info.xlated_prog_len = bpf_prog_insn_size(prog); 4107 if (info.xlated_prog_len && ulen) { 4108 struct bpf_insn *insns_sanitized; 4109 bool fault; 4110 4111 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { 4112 info.xlated_prog_insns = 0; 4113 goto done; 4114 } 4115 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); 4116 if (!insns_sanitized) 4117 return -ENOMEM; 4118 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 4119 ulen = min_t(u32, info.xlated_prog_len, ulen); 4120 fault = copy_to_user(uinsns, insns_sanitized, ulen); 4121 kfree(insns_sanitized); 4122 if (fault) 4123 return -EFAULT; 4124 } 4125 4126 if (bpf_prog_is_offloaded(prog->aux)) { 4127 err = bpf_prog_offload_info_fill(&info, prog); 4128 if (err) 4129 return err; 4130 goto done; 4131 } 4132 4133 /* NOTE: the following code is supposed to be skipped for offload. 4134 * bpf_prog_offload_info_fill() is the place to fill similar fields 4135 * for offload. 4136 */ 4137 ulen = info.jited_prog_len; 4138 if (prog->aux->func_cnt) { 4139 u32 i; 4140 4141 info.jited_prog_len = 0; 4142 for (i = 0; i < prog->aux->func_cnt; i++) 4143 info.jited_prog_len += prog->aux->func[i]->jited_len; 4144 } else { 4145 info.jited_prog_len = prog->jited_len; 4146 } 4147 4148 if (info.jited_prog_len && ulen) { 4149 if (bpf_dump_raw_ok(file->f_cred)) { 4150 uinsns = u64_to_user_ptr(info.jited_prog_insns); 4151 ulen = min_t(u32, info.jited_prog_len, ulen); 4152 4153 /* for multi-function programs, copy the JITed 4154 * instructions for all the functions 4155 */ 4156 if (prog->aux->func_cnt) { 4157 u32 len, free, i; 4158 u8 *img; 4159 4160 free = ulen; 4161 for (i = 0; i < prog->aux->func_cnt; i++) { 4162 len = prog->aux->func[i]->jited_len; 4163 len = min_t(u32, len, free); 4164 img = (u8 *) prog->aux->func[i]->bpf_func; 4165 if (copy_to_user(uinsns, img, len)) 4166 return -EFAULT; 4167 uinsns += len; 4168 free -= len; 4169 if (!free) 4170 break; 4171 } 4172 } else { 4173 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 4174 return -EFAULT; 4175 } 4176 } else { 4177 info.jited_prog_insns = 0; 4178 } 4179 } 4180 4181 ulen = info.nr_jited_ksyms; 4182 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 4183 if (ulen) { 4184 if (bpf_dump_raw_ok(file->f_cred)) { 4185 unsigned long ksym_addr; 4186 u64 __user *user_ksyms; 4187 u32 i; 4188 4189 /* copy the address of the kernel symbol 4190 * corresponding to each function 4191 */ 4192 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 4193 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 4194 if (prog->aux->func_cnt) { 4195 for (i = 0; i < ulen; i++) { 4196 ksym_addr = (unsigned long) 4197 prog->aux->func[i]->bpf_func; 4198 if (put_user((u64) ksym_addr, 4199 &user_ksyms[i])) 4200 return -EFAULT; 4201 } 4202 } else { 4203 ksym_addr = (unsigned long) prog->bpf_func; 4204 if (put_user((u64) ksym_addr, &user_ksyms[0])) 4205 return -EFAULT; 4206 } 4207 } else { 4208 info.jited_ksyms = 0; 4209 } 4210 } 4211 4212 ulen = info.nr_jited_func_lens; 4213 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 4214 if (ulen) { 4215 if (bpf_dump_raw_ok(file->f_cred)) { 4216 u32 __user *user_lens; 4217 u32 func_len, i; 4218 4219 /* copy the JITed image lengths for each function */ 4220 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 4221 user_lens = u64_to_user_ptr(info.jited_func_lens); 4222 if (prog->aux->func_cnt) { 4223 for (i = 0; i < ulen; i++) { 4224 func_len = 4225 prog->aux->func[i]->jited_len; 4226 if (put_user(func_len, &user_lens[i])) 4227 return -EFAULT; 4228 } 4229 } else { 4230 func_len = prog->jited_len; 4231 if (put_user(func_len, &user_lens[0])) 4232 return -EFAULT; 4233 } 4234 } else { 4235 info.jited_func_lens = 0; 4236 } 4237 } 4238 4239 if (prog->aux->btf) 4240 info.btf_id = btf_obj_id(prog->aux->btf); 4241 info.attach_btf_id = prog->aux->attach_btf_id; 4242 if (attach_btf) 4243 info.attach_btf_obj_id = btf_obj_id(attach_btf); 4244 4245 ulen = info.nr_func_info; 4246 info.nr_func_info = prog->aux->func_info_cnt; 4247 if (info.nr_func_info && ulen) { 4248 char __user *user_finfo; 4249 4250 user_finfo = u64_to_user_ptr(info.func_info); 4251 ulen = min_t(u32, info.nr_func_info, ulen); 4252 if (copy_to_user(user_finfo, prog->aux->func_info, 4253 info.func_info_rec_size * ulen)) 4254 return -EFAULT; 4255 } 4256 4257 ulen = info.nr_line_info; 4258 info.nr_line_info = prog->aux->nr_linfo; 4259 if (info.nr_line_info && ulen) { 4260 __u8 __user *user_linfo; 4261 4262 user_linfo = u64_to_user_ptr(info.line_info); 4263 ulen = min_t(u32, info.nr_line_info, ulen); 4264 if (copy_to_user(user_linfo, prog->aux->linfo, 4265 info.line_info_rec_size * ulen)) 4266 return -EFAULT; 4267 } 4268 4269 ulen = info.nr_jited_line_info; 4270 if (prog->aux->jited_linfo) 4271 info.nr_jited_line_info = prog->aux->nr_linfo; 4272 else 4273 info.nr_jited_line_info = 0; 4274 if (info.nr_jited_line_info && ulen) { 4275 if (bpf_dump_raw_ok(file->f_cred)) { 4276 unsigned long line_addr; 4277 __u64 __user *user_linfo; 4278 u32 i; 4279 4280 user_linfo = u64_to_user_ptr(info.jited_line_info); 4281 ulen = min_t(u32, info.nr_jited_line_info, ulen); 4282 for (i = 0; i < ulen; i++) { 4283 line_addr = (unsigned long)prog->aux->jited_linfo[i]; 4284 if (put_user((__u64)line_addr, &user_linfo[i])) 4285 return -EFAULT; 4286 } 4287 } else { 4288 info.jited_line_info = 0; 4289 } 4290 } 4291 4292 ulen = info.nr_prog_tags; 4293 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 4294 if (ulen) { 4295 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 4296 u32 i; 4297 4298 user_prog_tags = u64_to_user_ptr(info.prog_tags); 4299 ulen = min_t(u32, info.nr_prog_tags, ulen); 4300 if (prog->aux->func_cnt) { 4301 for (i = 0; i < ulen; i++) { 4302 if (copy_to_user(user_prog_tags[i], 4303 prog->aux->func[i]->tag, 4304 BPF_TAG_SIZE)) 4305 return -EFAULT; 4306 } 4307 } else { 4308 if (copy_to_user(user_prog_tags[0], 4309 prog->tag, BPF_TAG_SIZE)) 4310 return -EFAULT; 4311 } 4312 } 4313 4314 done: 4315 if (copy_to_user(uinfo, &info, info_len) || 4316 put_user(info_len, &uattr->info.info_len)) 4317 return -EFAULT; 4318 4319 return 0; 4320 } 4321 4322 static int bpf_map_get_info_by_fd(struct file *file, 4323 struct bpf_map *map, 4324 const union bpf_attr *attr, 4325 union bpf_attr __user *uattr) 4326 { 4327 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4328 struct bpf_map_info info; 4329 u32 info_len = attr->info.info_len; 4330 int err; 4331 4332 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4333 if (err) 4334 return err; 4335 info_len = min_t(u32, sizeof(info), info_len); 4336 4337 memset(&info, 0, sizeof(info)); 4338 info.type = map->map_type; 4339 info.id = map->id; 4340 info.key_size = map->key_size; 4341 info.value_size = map->value_size; 4342 info.max_entries = map->max_entries; 4343 info.map_flags = map->map_flags; 4344 info.map_extra = map->map_extra; 4345 memcpy(info.name, map->name, sizeof(map->name)); 4346 4347 if (map->btf) { 4348 info.btf_id = btf_obj_id(map->btf); 4349 info.btf_key_type_id = map->btf_key_type_id; 4350 info.btf_value_type_id = map->btf_value_type_id; 4351 } 4352 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 4353 4354 if (bpf_map_is_offloaded(map)) { 4355 err = bpf_map_offload_info_fill(&info, map); 4356 if (err) 4357 return err; 4358 } 4359 4360 if (copy_to_user(uinfo, &info, info_len) || 4361 put_user(info_len, &uattr->info.info_len)) 4362 return -EFAULT; 4363 4364 return 0; 4365 } 4366 4367 static int bpf_btf_get_info_by_fd(struct file *file, 4368 struct btf *btf, 4369 const union bpf_attr *attr, 4370 union bpf_attr __user *uattr) 4371 { 4372 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4373 u32 info_len = attr->info.info_len; 4374 int err; 4375 4376 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len); 4377 if (err) 4378 return err; 4379 4380 return btf_get_info_by_fd(btf, attr, uattr); 4381 } 4382 4383 static int bpf_link_get_info_by_fd(struct file *file, 4384 struct bpf_link *link, 4385 const union bpf_attr *attr, 4386 union bpf_attr __user *uattr) 4387 { 4388 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4389 struct bpf_link_info info; 4390 u32 info_len = attr->info.info_len; 4391 int err; 4392 4393 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4394 if (err) 4395 return err; 4396 info_len = min_t(u32, sizeof(info), info_len); 4397 4398 memset(&info, 0, sizeof(info)); 4399 if (copy_from_user(&info, uinfo, info_len)) 4400 return -EFAULT; 4401 4402 info.type = link->type; 4403 info.id = link->id; 4404 if (link->prog) 4405 info.prog_id = link->prog->aux->id; 4406 4407 if (link->ops->fill_link_info) { 4408 err = link->ops->fill_link_info(link, &info); 4409 if (err) 4410 return err; 4411 } 4412 4413 if (copy_to_user(uinfo, &info, info_len) || 4414 put_user(info_len, &uattr->info.info_len)) 4415 return -EFAULT; 4416 4417 return 0; 4418 } 4419 4420 4421 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 4422 4423 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 4424 union bpf_attr __user *uattr) 4425 { 4426 int ufd = attr->info.bpf_fd; 4427 struct fd f; 4428 int err; 4429 4430 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 4431 return -EINVAL; 4432 4433 f = fdget(ufd); 4434 if (!f.file) 4435 return -EBADFD; 4436 4437 if (f.file->f_op == &bpf_prog_fops) 4438 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, 4439 uattr); 4440 else if (f.file->f_op == &bpf_map_fops) 4441 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, 4442 uattr); 4443 else if (f.file->f_op == &btf_fops) 4444 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); 4445 else if (f.file->f_op == &bpf_link_fops) 4446 err = bpf_link_get_info_by_fd(f.file, f.file->private_data, 4447 attr, uattr); 4448 else 4449 err = -EINVAL; 4450 4451 fdput(f); 4452 return err; 4453 } 4454 4455 #define BPF_BTF_LOAD_LAST_FIELD btf_log_true_size 4456 4457 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) 4458 { 4459 if (CHECK_ATTR(BPF_BTF_LOAD)) 4460 return -EINVAL; 4461 4462 if (!bpf_capable()) 4463 return -EPERM; 4464 4465 return btf_new_fd(attr, uattr, uattr_size); 4466 } 4467 4468 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 4469 4470 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 4471 { 4472 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 4473 return -EINVAL; 4474 4475 if (!capable(CAP_SYS_ADMIN)) 4476 return -EPERM; 4477 4478 return btf_get_fd_by_id(attr->btf_id); 4479 } 4480 4481 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 4482 union bpf_attr __user *uattr, 4483 u32 prog_id, u32 fd_type, 4484 const char *buf, u64 probe_offset, 4485 u64 probe_addr) 4486 { 4487 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 4488 u32 len = buf ? strlen(buf) : 0, input_len; 4489 int err = 0; 4490 4491 if (put_user(len, &uattr->task_fd_query.buf_len)) 4492 return -EFAULT; 4493 input_len = attr->task_fd_query.buf_len; 4494 if (input_len && ubuf) { 4495 if (!len) { 4496 /* nothing to copy, just make ubuf NULL terminated */ 4497 char zero = '\0'; 4498 4499 if (put_user(zero, ubuf)) 4500 return -EFAULT; 4501 } else if (input_len >= len + 1) { 4502 /* ubuf can hold the string with NULL terminator */ 4503 if (copy_to_user(ubuf, buf, len + 1)) 4504 return -EFAULT; 4505 } else { 4506 /* ubuf cannot hold the string with NULL terminator, 4507 * do a partial copy with NULL terminator. 4508 */ 4509 char zero = '\0'; 4510 4511 err = -ENOSPC; 4512 if (copy_to_user(ubuf, buf, input_len - 1)) 4513 return -EFAULT; 4514 if (put_user(zero, ubuf + input_len - 1)) 4515 return -EFAULT; 4516 } 4517 } 4518 4519 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 4520 put_user(fd_type, &uattr->task_fd_query.fd_type) || 4521 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 4522 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 4523 return -EFAULT; 4524 4525 return err; 4526 } 4527 4528 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 4529 4530 static int bpf_task_fd_query(const union bpf_attr *attr, 4531 union bpf_attr __user *uattr) 4532 { 4533 pid_t pid = attr->task_fd_query.pid; 4534 u32 fd = attr->task_fd_query.fd; 4535 const struct perf_event *event; 4536 struct task_struct *task; 4537 struct file *file; 4538 int err; 4539 4540 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 4541 return -EINVAL; 4542 4543 if (!capable(CAP_SYS_ADMIN)) 4544 return -EPERM; 4545 4546 if (attr->task_fd_query.flags != 0) 4547 return -EINVAL; 4548 4549 rcu_read_lock(); 4550 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 4551 rcu_read_unlock(); 4552 if (!task) 4553 return -ENOENT; 4554 4555 err = 0; 4556 file = fget_task(task, fd); 4557 put_task_struct(task); 4558 if (!file) 4559 return -EBADF; 4560 4561 if (file->f_op == &bpf_link_fops) { 4562 struct bpf_link *link = file->private_data; 4563 4564 if (link->ops == &bpf_raw_tp_link_lops) { 4565 struct bpf_raw_tp_link *raw_tp = 4566 container_of(link, struct bpf_raw_tp_link, link); 4567 struct bpf_raw_event_map *btp = raw_tp->btp; 4568 4569 err = bpf_task_fd_query_copy(attr, uattr, 4570 raw_tp->link.prog->aux->id, 4571 BPF_FD_TYPE_RAW_TRACEPOINT, 4572 btp->tp->name, 0, 0); 4573 goto put_file; 4574 } 4575 goto out_not_supp; 4576 } 4577 4578 event = perf_get_event(file); 4579 if (!IS_ERR(event)) { 4580 u64 probe_offset, probe_addr; 4581 u32 prog_id, fd_type; 4582 const char *buf; 4583 4584 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 4585 &buf, &probe_offset, 4586 &probe_addr); 4587 if (!err) 4588 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 4589 fd_type, buf, 4590 probe_offset, 4591 probe_addr); 4592 goto put_file; 4593 } 4594 4595 out_not_supp: 4596 err = -ENOTSUPP; 4597 put_file: 4598 fput(file); 4599 return err; 4600 } 4601 4602 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 4603 4604 #define BPF_DO_BATCH(fn, ...) \ 4605 do { \ 4606 if (!fn) { \ 4607 err = -ENOTSUPP; \ 4608 goto err_put; \ 4609 } \ 4610 err = fn(__VA_ARGS__); \ 4611 } while (0) 4612 4613 static int bpf_map_do_batch(const union bpf_attr *attr, 4614 union bpf_attr __user *uattr, 4615 int cmd) 4616 { 4617 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH || 4618 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH; 4619 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH; 4620 struct bpf_map *map; 4621 int err, ufd; 4622 struct fd f; 4623 4624 if (CHECK_ATTR(BPF_MAP_BATCH)) 4625 return -EINVAL; 4626 4627 ufd = attr->batch.map_fd; 4628 f = fdget(ufd); 4629 map = __bpf_map_get(f); 4630 if (IS_ERR(map)) 4631 return PTR_ERR(map); 4632 if (has_write) 4633 bpf_map_write_active_inc(map); 4634 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 4635 err = -EPERM; 4636 goto err_put; 4637 } 4638 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 4639 err = -EPERM; 4640 goto err_put; 4641 } 4642 4643 if (cmd == BPF_MAP_LOOKUP_BATCH) 4644 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr); 4645 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 4646 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr); 4647 else if (cmd == BPF_MAP_UPDATE_BATCH) 4648 BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr); 4649 else 4650 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr); 4651 err_put: 4652 if (has_write) 4653 bpf_map_write_active_dec(map); 4654 fdput(f); 4655 return err; 4656 } 4657 4658 #define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.cookies 4659 static int link_create(union bpf_attr *attr, bpfptr_t uattr) 4660 { 4661 enum bpf_prog_type ptype; 4662 struct bpf_prog *prog; 4663 int ret; 4664 4665 if (CHECK_ATTR(BPF_LINK_CREATE)) 4666 return -EINVAL; 4667 4668 if (attr->link_create.attach_type == BPF_STRUCT_OPS) 4669 return bpf_struct_ops_link_create(attr); 4670 4671 prog = bpf_prog_get(attr->link_create.prog_fd); 4672 if (IS_ERR(prog)) 4673 return PTR_ERR(prog); 4674 4675 ret = bpf_prog_attach_check_attach_type(prog, 4676 attr->link_create.attach_type); 4677 if (ret) 4678 goto out; 4679 4680 switch (prog->type) { 4681 case BPF_PROG_TYPE_EXT: 4682 break; 4683 case BPF_PROG_TYPE_NETFILTER: 4684 if (attr->link_create.attach_type != BPF_NETFILTER) { 4685 ret = -EINVAL; 4686 goto out; 4687 } 4688 break; 4689 case BPF_PROG_TYPE_PERF_EVENT: 4690 case BPF_PROG_TYPE_TRACEPOINT: 4691 if (attr->link_create.attach_type != BPF_PERF_EVENT) { 4692 ret = -EINVAL; 4693 goto out; 4694 } 4695 break; 4696 case BPF_PROG_TYPE_KPROBE: 4697 if (attr->link_create.attach_type != BPF_PERF_EVENT && 4698 attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) { 4699 ret = -EINVAL; 4700 goto out; 4701 } 4702 break; 4703 default: 4704 ptype = attach_type_to_prog_type(attr->link_create.attach_type); 4705 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) { 4706 ret = -EINVAL; 4707 goto out; 4708 } 4709 break; 4710 } 4711 4712 switch (prog->type) { 4713 case BPF_PROG_TYPE_CGROUP_SKB: 4714 case BPF_PROG_TYPE_CGROUP_SOCK: 4715 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4716 case BPF_PROG_TYPE_SOCK_OPS: 4717 case BPF_PROG_TYPE_CGROUP_DEVICE: 4718 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4719 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4720 ret = cgroup_bpf_link_attach(attr, prog); 4721 break; 4722 case BPF_PROG_TYPE_EXT: 4723 ret = bpf_tracing_prog_attach(prog, 4724 attr->link_create.target_fd, 4725 attr->link_create.target_btf_id, 4726 attr->link_create.tracing.cookie); 4727 break; 4728 case BPF_PROG_TYPE_LSM: 4729 case BPF_PROG_TYPE_TRACING: 4730 if (attr->link_create.attach_type != prog->expected_attach_type) { 4731 ret = -EINVAL; 4732 goto out; 4733 } 4734 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) 4735 ret = bpf_raw_tp_link_attach(prog, NULL); 4736 else if (prog->expected_attach_type == BPF_TRACE_ITER) 4737 ret = bpf_iter_link_attach(attr, uattr, prog); 4738 else if (prog->expected_attach_type == BPF_LSM_CGROUP) 4739 ret = cgroup_bpf_link_attach(attr, prog); 4740 else 4741 ret = bpf_tracing_prog_attach(prog, 4742 attr->link_create.target_fd, 4743 attr->link_create.target_btf_id, 4744 attr->link_create.tracing.cookie); 4745 break; 4746 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4747 case BPF_PROG_TYPE_SK_LOOKUP: 4748 ret = netns_bpf_link_create(attr, prog); 4749 break; 4750 #ifdef CONFIG_NET 4751 case BPF_PROG_TYPE_XDP: 4752 ret = bpf_xdp_link_attach(attr, prog); 4753 break; 4754 case BPF_PROG_TYPE_NETFILTER: 4755 ret = bpf_nf_link_attach(attr, prog); 4756 break; 4757 #endif 4758 case BPF_PROG_TYPE_PERF_EVENT: 4759 case BPF_PROG_TYPE_TRACEPOINT: 4760 ret = bpf_perf_link_attach(attr, prog); 4761 break; 4762 case BPF_PROG_TYPE_KPROBE: 4763 if (attr->link_create.attach_type == BPF_PERF_EVENT) 4764 ret = bpf_perf_link_attach(attr, prog); 4765 else 4766 ret = bpf_kprobe_multi_link_attach(attr, prog); 4767 break; 4768 default: 4769 ret = -EINVAL; 4770 } 4771 4772 out: 4773 if (ret < 0) 4774 bpf_prog_put(prog); 4775 return ret; 4776 } 4777 4778 static int link_update_map(struct bpf_link *link, union bpf_attr *attr) 4779 { 4780 struct bpf_map *new_map, *old_map = NULL; 4781 int ret; 4782 4783 new_map = bpf_map_get(attr->link_update.new_map_fd); 4784 if (IS_ERR(new_map)) 4785 return PTR_ERR(new_map); 4786 4787 if (attr->link_update.flags & BPF_F_REPLACE) { 4788 old_map = bpf_map_get(attr->link_update.old_map_fd); 4789 if (IS_ERR(old_map)) { 4790 ret = PTR_ERR(old_map); 4791 goto out_put; 4792 } 4793 } else if (attr->link_update.old_map_fd) { 4794 ret = -EINVAL; 4795 goto out_put; 4796 } 4797 4798 ret = link->ops->update_map(link, new_map, old_map); 4799 4800 if (old_map) 4801 bpf_map_put(old_map); 4802 out_put: 4803 bpf_map_put(new_map); 4804 return ret; 4805 } 4806 4807 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd 4808 4809 static int link_update(union bpf_attr *attr) 4810 { 4811 struct bpf_prog *old_prog = NULL, *new_prog; 4812 struct bpf_link *link; 4813 u32 flags; 4814 int ret; 4815 4816 if (CHECK_ATTR(BPF_LINK_UPDATE)) 4817 return -EINVAL; 4818 4819 flags = attr->link_update.flags; 4820 if (flags & ~BPF_F_REPLACE) 4821 return -EINVAL; 4822 4823 link = bpf_link_get_from_fd(attr->link_update.link_fd); 4824 if (IS_ERR(link)) 4825 return PTR_ERR(link); 4826 4827 if (link->ops->update_map) { 4828 ret = link_update_map(link, attr); 4829 goto out_put_link; 4830 } 4831 4832 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); 4833 if (IS_ERR(new_prog)) { 4834 ret = PTR_ERR(new_prog); 4835 goto out_put_link; 4836 } 4837 4838 if (flags & BPF_F_REPLACE) { 4839 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); 4840 if (IS_ERR(old_prog)) { 4841 ret = PTR_ERR(old_prog); 4842 old_prog = NULL; 4843 goto out_put_progs; 4844 } 4845 } else if (attr->link_update.old_prog_fd) { 4846 ret = -EINVAL; 4847 goto out_put_progs; 4848 } 4849 4850 if (link->ops->update_prog) 4851 ret = link->ops->update_prog(link, new_prog, old_prog); 4852 else 4853 ret = -EINVAL; 4854 4855 out_put_progs: 4856 if (old_prog) 4857 bpf_prog_put(old_prog); 4858 if (ret) 4859 bpf_prog_put(new_prog); 4860 out_put_link: 4861 bpf_link_put_direct(link); 4862 return ret; 4863 } 4864 4865 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd 4866 4867 static int link_detach(union bpf_attr *attr) 4868 { 4869 struct bpf_link *link; 4870 int ret; 4871 4872 if (CHECK_ATTR(BPF_LINK_DETACH)) 4873 return -EINVAL; 4874 4875 link = bpf_link_get_from_fd(attr->link_detach.link_fd); 4876 if (IS_ERR(link)) 4877 return PTR_ERR(link); 4878 4879 if (link->ops->detach) 4880 ret = link->ops->detach(link); 4881 else 4882 ret = -EOPNOTSUPP; 4883 4884 bpf_link_put_direct(link); 4885 return ret; 4886 } 4887 4888 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 4889 { 4890 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); 4891 } 4892 4893 struct bpf_link *bpf_link_by_id(u32 id) 4894 { 4895 struct bpf_link *link; 4896 4897 if (!id) 4898 return ERR_PTR(-ENOENT); 4899 4900 spin_lock_bh(&link_idr_lock); 4901 /* before link is "settled", ID is 0, pretend it doesn't exist yet */ 4902 link = idr_find(&link_idr, id); 4903 if (link) { 4904 if (link->id) 4905 link = bpf_link_inc_not_zero(link); 4906 else 4907 link = ERR_PTR(-EAGAIN); 4908 } else { 4909 link = ERR_PTR(-ENOENT); 4910 } 4911 spin_unlock_bh(&link_idr_lock); 4912 return link; 4913 } 4914 4915 struct bpf_link *bpf_link_get_curr_or_next(u32 *id) 4916 { 4917 struct bpf_link *link; 4918 4919 spin_lock_bh(&link_idr_lock); 4920 again: 4921 link = idr_get_next(&link_idr, id); 4922 if (link) { 4923 link = bpf_link_inc_not_zero(link); 4924 if (IS_ERR(link)) { 4925 (*id)++; 4926 goto again; 4927 } 4928 } 4929 spin_unlock_bh(&link_idr_lock); 4930 4931 return link; 4932 } 4933 4934 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id 4935 4936 static int bpf_link_get_fd_by_id(const union bpf_attr *attr) 4937 { 4938 struct bpf_link *link; 4939 u32 id = attr->link_id; 4940 int fd; 4941 4942 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) 4943 return -EINVAL; 4944 4945 if (!capable(CAP_SYS_ADMIN)) 4946 return -EPERM; 4947 4948 link = bpf_link_by_id(id); 4949 if (IS_ERR(link)) 4950 return PTR_ERR(link); 4951 4952 fd = bpf_link_new_fd(link); 4953 if (fd < 0) 4954 bpf_link_put_direct(link); 4955 4956 return fd; 4957 } 4958 4959 DEFINE_MUTEX(bpf_stats_enabled_mutex); 4960 4961 static int bpf_stats_release(struct inode *inode, struct file *file) 4962 { 4963 mutex_lock(&bpf_stats_enabled_mutex); 4964 static_key_slow_dec(&bpf_stats_enabled_key.key); 4965 mutex_unlock(&bpf_stats_enabled_mutex); 4966 return 0; 4967 } 4968 4969 static const struct file_operations bpf_stats_fops = { 4970 .release = bpf_stats_release, 4971 }; 4972 4973 static int bpf_enable_runtime_stats(void) 4974 { 4975 int fd; 4976 4977 mutex_lock(&bpf_stats_enabled_mutex); 4978 4979 /* Set a very high limit to avoid overflow */ 4980 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { 4981 mutex_unlock(&bpf_stats_enabled_mutex); 4982 return -EBUSY; 4983 } 4984 4985 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); 4986 if (fd >= 0) 4987 static_key_slow_inc(&bpf_stats_enabled_key.key); 4988 4989 mutex_unlock(&bpf_stats_enabled_mutex); 4990 return fd; 4991 } 4992 4993 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type 4994 4995 static int bpf_enable_stats(union bpf_attr *attr) 4996 { 4997 4998 if (CHECK_ATTR(BPF_ENABLE_STATS)) 4999 return -EINVAL; 5000 5001 if (!capable(CAP_SYS_ADMIN)) 5002 return -EPERM; 5003 5004 switch (attr->enable_stats.type) { 5005 case BPF_STATS_RUN_TIME: 5006 return bpf_enable_runtime_stats(); 5007 default: 5008 break; 5009 } 5010 return -EINVAL; 5011 } 5012 5013 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags 5014 5015 static int bpf_iter_create(union bpf_attr *attr) 5016 { 5017 struct bpf_link *link; 5018 int err; 5019 5020 if (CHECK_ATTR(BPF_ITER_CREATE)) 5021 return -EINVAL; 5022 5023 if (attr->iter_create.flags) 5024 return -EINVAL; 5025 5026 link = bpf_link_get_from_fd(attr->iter_create.link_fd); 5027 if (IS_ERR(link)) 5028 return PTR_ERR(link); 5029 5030 err = bpf_iter_new_fd(link); 5031 bpf_link_put_direct(link); 5032 5033 return err; 5034 } 5035 5036 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags 5037 5038 static int bpf_prog_bind_map(union bpf_attr *attr) 5039 { 5040 struct bpf_prog *prog; 5041 struct bpf_map *map; 5042 struct bpf_map **used_maps_old, **used_maps_new; 5043 int i, ret = 0; 5044 5045 if (CHECK_ATTR(BPF_PROG_BIND_MAP)) 5046 return -EINVAL; 5047 5048 if (attr->prog_bind_map.flags) 5049 return -EINVAL; 5050 5051 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); 5052 if (IS_ERR(prog)) 5053 return PTR_ERR(prog); 5054 5055 map = bpf_map_get(attr->prog_bind_map.map_fd); 5056 if (IS_ERR(map)) { 5057 ret = PTR_ERR(map); 5058 goto out_prog_put; 5059 } 5060 5061 mutex_lock(&prog->aux->used_maps_mutex); 5062 5063 used_maps_old = prog->aux->used_maps; 5064 5065 for (i = 0; i < prog->aux->used_map_cnt; i++) 5066 if (used_maps_old[i] == map) { 5067 bpf_map_put(map); 5068 goto out_unlock; 5069 } 5070 5071 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, 5072 sizeof(used_maps_new[0]), 5073 GFP_KERNEL); 5074 if (!used_maps_new) { 5075 ret = -ENOMEM; 5076 goto out_unlock; 5077 } 5078 5079 memcpy(used_maps_new, used_maps_old, 5080 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); 5081 used_maps_new[prog->aux->used_map_cnt] = map; 5082 5083 prog->aux->used_map_cnt++; 5084 prog->aux->used_maps = used_maps_new; 5085 5086 kfree(used_maps_old); 5087 5088 out_unlock: 5089 mutex_unlock(&prog->aux->used_maps_mutex); 5090 5091 if (ret) 5092 bpf_map_put(map); 5093 out_prog_put: 5094 bpf_prog_put(prog); 5095 return ret; 5096 } 5097 5098 static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size) 5099 { 5100 union bpf_attr attr; 5101 int err; 5102 5103 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 5104 if (err) 5105 return err; 5106 size = min_t(u32, size, sizeof(attr)); 5107 5108 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 5109 memset(&attr, 0, sizeof(attr)); 5110 if (copy_from_bpfptr(&attr, uattr, size) != 0) 5111 return -EFAULT; 5112 5113 err = security_bpf(cmd, &attr, size); 5114 if (err < 0) 5115 return err; 5116 5117 switch (cmd) { 5118 case BPF_MAP_CREATE: 5119 err = map_create(&attr); 5120 break; 5121 case BPF_MAP_LOOKUP_ELEM: 5122 err = map_lookup_elem(&attr); 5123 break; 5124 case BPF_MAP_UPDATE_ELEM: 5125 err = map_update_elem(&attr, uattr); 5126 break; 5127 case BPF_MAP_DELETE_ELEM: 5128 err = map_delete_elem(&attr, uattr); 5129 break; 5130 case BPF_MAP_GET_NEXT_KEY: 5131 err = map_get_next_key(&attr); 5132 break; 5133 case BPF_MAP_FREEZE: 5134 err = map_freeze(&attr); 5135 break; 5136 case BPF_PROG_LOAD: 5137 err = bpf_prog_load(&attr, uattr, size); 5138 break; 5139 case BPF_OBJ_PIN: 5140 err = bpf_obj_pin(&attr); 5141 break; 5142 case BPF_OBJ_GET: 5143 err = bpf_obj_get(&attr); 5144 break; 5145 case BPF_PROG_ATTACH: 5146 err = bpf_prog_attach(&attr); 5147 break; 5148 case BPF_PROG_DETACH: 5149 err = bpf_prog_detach(&attr); 5150 break; 5151 case BPF_PROG_QUERY: 5152 err = bpf_prog_query(&attr, uattr.user); 5153 break; 5154 case BPF_PROG_TEST_RUN: 5155 err = bpf_prog_test_run(&attr, uattr.user); 5156 break; 5157 case BPF_PROG_GET_NEXT_ID: 5158 err = bpf_obj_get_next_id(&attr, uattr.user, 5159 &prog_idr, &prog_idr_lock); 5160 break; 5161 case BPF_MAP_GET_NEXT_ID: 5162 err = bpf_obj_get_next_id(&attr, uattr.user, 5163 &map_idr, &map_idr_lock); 5164 break; 5165 case BPF_BTF_GET_NEXT_ID: 5166 err = bpf_obj_get_next_id(&attr, uattr.user, 5167 &btf_idr, &btf_idr_lock); 5168 break; 5169 case BPF_PROG_GET_FD_BY_ID: 5170 err = bpf_prog_get_fd_by_id(&attr); 5171 break; 5172 case BPF_MAP_GET_FD_BY_ID: 5173 err = bpf_map_get_fd_by_id(&attr); 5174 break; 5175 case BPF_OBJ_GET_INFO_BY_FD: 5176 err = bpf_obj_get_info_by_fd(&attr, uattr.user); 5177 break; 5178 case BPF_RAW_TRACEPOINT_OPEN: 5179 err = bpf_raw_tracepoint_open(&attr); 5180 break; 5181 case BPF_BTF_LOAD: 5182 err = bpf_btf_load(&attr, uattr, size); 5183 break; 5184 case BPF_BTF_GET_FD_BY_ID: 5185 err = bpf_btf_get_fd_by_id(&attr); 5186 break; 5187 case BPF_TASK_FD_QUERY: 5188 err = bpf_task_fd_query(&attr, uattr.user); 5189 break; 5190 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 5191 err = map_lookup_and_delete_elem(&attr); 5192 break; 5193 case BPF_MAP_LOOKUP_BATCH: 5194 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH); 5195 break; 5196 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 5197 err = bpf_map_do_batch(&attr, uattr.user, 5198 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 5199 break; 5200 case BPF_MAP_UPDATE_BATCH: 5201 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH); 5202 break; 5203 case BPF_MAP_DELETE_BATCH: 5204 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH); 5205 break; 5206 case BPF_LINK_CREATE: 5207 err = link_create(&attr, uattr); 5208 break; 5209 case BPF_LINK_UPDATE: 5210 err = link_update(&attr); 5211 break; 5212 case BPF_LINK_GET_FD_BY_ID: 5213 err = bpf_link_get_fd_by_id(&attr); 5214 break; 5215 case BPF_LINK_GET_NEXT_ID: 5216 err = bpf_obj_get_next_id(&attr, uattr.user, 5217 &link_idr, &link_idr_lock); 5218 break; 5219 case BPF_ENABLE_STATS: 5220 err = bpf_enable_stats(&attr); 5221 break; 5222 case BPF_ITER_CREATE: 5223 err = bpf_iter_create(&attr); 5224 break; 5225 case BPF_LINK_DETACH: 5226 err = link_detach(&attr); 5227 break; 5228 case BPF_PROG_BIND_MAP: 5229 err = bpf_prog_bind_map(&attr); 5230 break; 5231 default: 5232 err = -EINVAL; 5233 break; 5234 } 5235 5236 return err; 5237 } 5238 5239 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 5240 { 5241 return __sys_bpf(cmd, USER_BPFPTR(uattr), size); 5242 } 5243 5244 static bool syscall_prog_is_valid_access(int off, int size, 5245 enum bpf_access_type type, 5246 const struct bpf_prog *prog, 5247 struct bpf_insn_access_aux *info) 5248 { 5249 if (off < 0 || off >= U16_MAX) 5250 return false; 5251 if (off % size != 0) 5252 return false; 5253 return true; 5254 } 5255 5256 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size) 5257 { 5258 switch (cmd) { 5259 case BPF_MAP_CREATE: 5260 case BPF_MAP_DELETE_ELEM: 5261 case BPF_MAP_UPDATE_ELEM: 5262 case BPF_MAP_FREEZE: 5263 case BPF_MAP_GET_FD_BY_ID: 5264 case BPF_PROG_LOAD: 5265 case BPF_BTF_LOAD: 5266 case BPF_LINK_CREATE: 5267 case BPF_RAW_TRACEPOINT_OPEN: 5268 break; 5269 default: 5270 return -EINVAL; 5271 } 5272 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size); 5273 } 5274 5275 5276 /* To shut up -Wmissing-prototypes. 5277 * This function is used by the kernel light skeleton 5278 * to load bpf programs when modules are loaded or during kernel boot. 5279 * See tools/lib/bpf/skel_internal.h 5280 */ 5281 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); 5282 5283 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size) 5284 { 5285 struct bpf_prog * __maybe_unused prog; 5286 struct bpf_tramp_run_ctx __maybe_unused run_ctx; 5287 5288 switch (cmd) { 5289 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */ 5290 case BPF_PROG_TEST_RUN: 5291 if (attr->test.data_in || attr->test.data_out || 5292 attr->test.ctx_out || attr->test.duration || 5293 attr->test.repeat || attr->test.flags) 5294 return -EINVAL; 5295 5296 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); 5297 if (IS_ERR(prog)) 5298 return PTR_ERR(prog); 5299 5300 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || 5301 attr->test.ctx_size_in > U16_MAX) { 5302 bpf_prog_put(prog); 5303 return -EINVAL; 5304 } 5305 5306 run_ctx.bpf_cookie = 0; 5307 run_ctx.saved_run_ctx = NULL; 5308 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) { 5309 /* recursion detected */ 5310 bpf_prog_put(prog); 5311 return -EBUSY; 5312 } 5313 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); 5314 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */, 5315 &run_ctx); 5316 bpf_prog_put(prog); 5317 return 0; 5318 #endif 5319 default: 5320 return ____bpf_sys_bpf(cmd, attr, size); 5321 } 5322 } 5323 EXPORT_SYMBOL(kern_sys_bpf); 5324 5325 static const struct bpf_func_proto bpf_sys_bpf_proto = { 5326 .func = bpf_sys_bpf, 5327 .gpl_only = false, 5328 .ret_type = RET_INTEGER, 5329 .arg1_type = ARG_ANYTHING, 5330 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5331 .arg3_type = ARG_CONST_SIZE, 5332 }; 5333 5334 const struct bpf_func_proto * __weak 5335 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5336 { 5337 return bpf_base_func_proto(func_id); 5338 } 5339 5340 BPF_CALL_1(bpf_sys_close, u32, fd) 5341 { 5342 /* When bpf program calls this helper there should not be 5343 * an fdget() without matching completed fdput(). 5344 * This helper is allowed in the following callchain only: 5345 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close 5346 */ 5347 return close_fd(fd); 5348 } 5349 5350 static const struct bpf_func_proto bpf_sys_close_proto = { 5351 .func = bpf_sys_close, 5352 .gpl_only = false, 5353 .ret_type = RET_INTEGER, 5354 .arg1_type = ARG_ANYTHING, 5355 }; 5356 5357 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) 5358 { 5359 if (flags) 5360 return -EINVAL; 5361 5362 if (name_sz <= 1 || name[name_sz - 1]) 5363 return -EINVAL; 5364 5365 if (!bpf_dump_raw_ok(current_cred())) 5366 return -EPERM; 5367 5368 *res = kallsyms_lookup_name(name); 5369 return *res ? 0 : -ENOENT; 5370 } 5371 5372 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { 5373 .func = bpf_kallsyms_lookup_name, 5374 .gpl_only = false, 5375 .ret_type = RET_INTEGER, 5376 .arg1_type = ARG_PTR_TO_MEM, 5377 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 5378 .arg3_type = ARG_ANYTHING, 5379 .arg4_type = ARG_PTR_TO_LONG, 5380 }; 5381 5382 static const struct bpf_func_proto * 5383 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5384 { 5385 switch (func_id) { 5386 case BPF_FUNC_sys_bpf: 5387 return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto; 5388 case BPF_FUNC_btf_find_by_name_kind: 5389 return &bpf_btf_find_by_name_kind_proto; 5390 case BPF_FUNC_sys_close: 5391 return &bpf_sys_close_proto; 5392 case BPF_FUNC_kallsyms_lookup_name: 5393 return &bpf_kallsyms_lookup_name_proto; 5394 default: 5395 return tracing_prog_func_proto(func_id, prog); 5396 } 5397 } 5398 5399 const struct bpf_verifier_ops bpf_syscall_verifier_ops = { 5400 .get_func_proto = syscall_prog_func_proto, 5401 .is_valid_access = syscall_prog_is_valid_access, 5402 }; 5403 5404 const struct bpf_prog_ops bpf_syscall_prog_ops = { 5405 .test_run = bpf_prog_test_run_syscall, 5406 }; 5407 5408 #ifdef CONFIG_SYSCTL 5409 static int bpf_stats_handler(struct ctl_table *table, int write, 5410 void *buffer, size_t *lenp, loff_t *ppos) 5411 { 5412 struct static_key *key = (struct static_key *)table->data; 5413 static int saved_val; 5414 int val, ret; 5415 struct ctl_table tmp = { 5416 .data = &val, 5417 .maxlen = sizeof(val), 5418 .mode = table->mode, 5419 .extra1 = SYSCTL_ZERO, 5420 .extra2 = SYSCTL_ONE, 5421 }; 5422 5423 if (write && !capable(CAP_SYS_ADMIN)) 5424 return -EPERM; 5425 5426 mutex_lock(&bpf_stats_enabled_mutex); 5427 val = saved_val; 5428 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 5429 if (write && !ret && val != saved_val) { 5430 if (val) 5431 static_key_slow_inc(key); 5432 else 5433 static_key_slow_dec(key); 5434 saved_val = val; 5435 } 5436 mutex_unlock(&bpf_stats_enabled_mutex); 5437 return ret; 5438 } 5439 5440 void __weak unpriv_ebpf_notify(int new_state) 5441 { 5442 } 5443 5444 static int bpf_unpriv_handler(struct ctl_table *table, int write, 5445 void *buffer, size_t *lenp, loff_t *ppos) 5446 { 5447 int ret, unpriv_enable = *(int *)table->data; 5448 bool locked_state = unpriv_enable == 1; 5449 struct ctl_table tmp = *table; 5450 5451 if (write && !capable(CAP_SYS_ADMIN)) 5452 return -EPERM; 5453 5454 tmp.data = &unpriv_enable; 5455 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 5456 if (write && !ret) { 5457 if (locked_state && unpriv_enable != 1) 5458 return -EPERM; 5459 *(int *)table->data = unpriv_enable; 5460 } 5461 5462 if (write) 5463 unpriv_ebpf_notify(unpriv_enable); 5464 5465 return ret; 5466 } 5467 5468 static struct ctl_table bpf_syscall_table[] = { 5469 { 5470 .procname = "unprivileged_bpf_disabled", 5471 .data = &sysctl_unprivileged_bpf_disabled, 5472 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), 5473 .mode = 0644, 5474 .proc_handler = bpf_unpriv_handler, 5475 .extra1 = SYSCTL_ZERO, 5476 .extra2 = SYSCTL_TWO, 5477 }, 5478 { 5479 .procname = "bpf_stats_enabled", 5480 .data = &bpf_stats_enabled_key.key, 5481 .mode = 0644, 5482 .proc_handler = bpf_stats_handler, 5483 }, 5484 { } 5485 }; 5486 5487 static int __init bpf_syscall_sysctl_init(void) 5488 { 5489 register_sysctl_init("kernel", bpf_syscall_table); 5490 return 0; 5491 } 5492 late_initcall(bpf_syscall_sysctl_init); 5493 #endif /* CONFIG_SYSCTL */ 5494