1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf-cgroup.h> 6 #include <linux/bpf_trace.h> 7 #include <linux/bpf_lirc.h> 8 #include <linux/bpf_verifier.h> 9 #include <linux/bsearch.h> 10 #include <linux/btf.h> 11 #include <linux/syscalls.h> 12 #include <linux/slab.h> 13 #include <linux/sched/signal.h> 14 #include <linux/vmalloc.h> 15 #include <linux/mmzone.h> 16 #include <linux/anon_inodes.h> 17 #include <linux/fdtable.h> 18 #include <linux/file.h> 19 #include <linux/fs.h> 20 #include <linux/license.h> 21 #include <linux/filter.h> 22 #include <linux/kernel.h> 23 #include <linux/idr.h> 24 #include <linux/cred.h> 25 #include <linux/timekeeping.h> 26 #include <linux/ctype.h> 27 #include <linux/nospec.h> 28 #include <linux/audit.h> 29 #include <uapi/linux/btf.h> 30 #include <linux/pgtable.h> 31 #include <linux/bpf_lsm.h> 32 #include <linux/poll.h> 33 #include <linux/sort.h> 34 #include <linux/bpf-netns.h> 35 #include <linux/rcupdate_trace.h> 36 #include <linux/memcontrol.h> 37 #include <linux/trace_events.h> 38 39 #include <net/netfilter/nf_bpf_link.h> 40 #include <net/netkit.h> 41 #include <net/tcx.h> 42 43 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 44 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 45 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 46 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 47 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 48 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 49 IS_FD_HASH(map)) 50 51 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 52 53 DEFINE_PER_CPU(int, bpf_prog_active); 54 static DEFINE_IDR(prog_idr); 55 static DEFINE_SPINLOCK(prog_idr_lock); 56 static DEFINE_IDR(map_idr); 57 static DEFINE_SPINLOCK(map_idr_lock); 58 static DEFINE_IDR(link_idr); 59 static DEFINE_SPINLOCK(link_idr_lock); 60 61 int sysctl_unprivileged_bpf_disabled __read_mostly = 62 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0; 63 64 static const struct bpf_map_ops * const bpf_map_types[] = { 65 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 66 #define BPF_MAP_TYPE(_id, _ops) \ 67 [_id] = &_ops, 68 #define BPF_LINK_TYPE(_id, _name) 69 #include <linux/bpf_types.h> 70 #undef BPF_PROG_TYPE 71 #undef BPF_MAP_TYPE 72 #undef BPF_LINK_TYPE 73 }; 74 75 /* 76 * If we're handed a bigger struct than we know of, ensure all the unknown bits 77 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 78 * we don't know about yet. 79 * 80 * There is a ToCToU between this function call and the following 81 * copy_from_user() call. However, this is not a concern since this function is 82 * meant to be a future-proofing of bits. 83 */ 84 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, 85 size_t expected_size, 86 size_t actual_size) 87 { 88 int res; 89 90 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 91 return -E2BIG; 92 93 if (actual_size <= expected_size) 94 return 0; 95 96 if (uaddr.is_kernel) 97 res = memchr_inv(uaddr.kernel + expected_size, 0, 98 actual_size - expected_size) == NULL; 99 else 100 res = check_zeroed_user(uaddr.user + expected_size, 101 actual_size - expected_size); 102 if (res < 0) 103 return res; 104 return res ? 0 : -E2BIG; 105 } 106 107 const struct bpf_map_ops bpf_map_offload_ops = { 108 .map_meta_equal = bpf_map_meta_equal, 109 .map_alloc = bpf_map_offload_map_alloc, 110 .map_free = bpf_map_offload_map_free, 111 .map_check_btf = map_check_no_btf, 112 .map_mem_usage = bpf_map_offload_map_mem_usage, 113 }; 114 115 static void bpf_map_write_active_inc(struct bpf_map *map) 116 { 117 atomic64_inc(&map->writecnt); 118 } 119 120 static void bpf_map_write_active_dec(struct bpf_map *map) 121 { 122 atomic64_dec(&map->writecnt); 123 } 124 125 bool bpf_map_write_active(const struct bpf_map *map) 126 { 127 return atomic64_read(&map->writecnt) != 0; 128 } 129 130 static u32 bpf_map_value_size(const struct bpf_map *map) 131 { 132 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 133 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 134 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 135 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 136 return round_up(map->value_size, 8) * num_possible_cpus(); 137 else if (IS_FD_MAP(map)) 138 return sizeof(u32); 139 else 140 return map->value_size; 141 } 142 143 static void maybe_wait_bpf_programs(struct bpf_map *map) 144 { 145 /* Wait for any running non-sleepable BPF programs to complete so that 146 * userspace, when we return to it, knows that all non-sleepable 147 * programs that could be running use the new map value. For sleepable 148 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait 149 * for the completions of these programs, but considering the waiting 150 * time can be very long and userspace may think it will hang forever, 151 * so don't handle sleepable BPF programs now. 152 */ 153 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 154 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 155 synchronize_rcu(); 156 } 157 158 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file, 159 void *key, void *value, __u64 flags) 160 { 161 int err; 162 163 /* Need to create a kthread, thus must support schedule */ 164 if (bpf_map_is_offloaded(map)) { 165 return bpf_map_offload_update_elem(map, key, value, flags); 166 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 167 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 168 return map->ops->map_update_elem(map, key, value, flags); 169 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || 170 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 171 return sock_map_update_elem_sys(map, key, value, flags); 172 } else if (IS_FD_PROG_ARRAY(map)) { 173 return bpf_fd_array_map_update_elem(map, map_file, key, value, 174 flags); 175 } 176 177 bpf_disable_instrumentation(); 178 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 179 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 180 err = bpf_percpu_hash_update(map, key, value, flags); 181 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 182 err = bpf_percpu_array_update(map, key, value, flags); 183 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 184 err = bpf_percpu_cgroup_storage_update(map, key, value, 185 flags); 186 } else if (IS_FD_ARRAY(map)) { 187 err = bpf_fd_array_map_update_elem(map, map_file, key, value, 188 flags); 189 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 190 err = bpf_fd_htab_map_update_elem(map, map_file, key, value, 191 flags); 192 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 193 /* rcu_read_lock() is not needed */ 194 err = bpf_fd_reuseport_array_update_elem(map, key, value, 195 flags); 196 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 197 map->map_type == BPF_MAP_TYPE_STACK || 198 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 199 err = map->ops->map_push_elem(map, value, flags); 200 } else { 201 rcu_read_lock(); 202 err = map->ops->map_update_elem(map, key, value, flags); 203 rcu_read_unlock(); 204 } 205 bpf_enable_instrumentation(); 206 207 return err; 208 } 209 210 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 211 __u64 flags) 212 { 213 void *ptr; 214 int err; 215 216 if (bpf_map_is_offloaded(map)) 217 return bpf_map_offload_lookup_elem(map, key, value); 218 219 bpf_disable_instrumentation(); 220 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 221 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 222 err = bpf_percpu_hash_copy(map, key, value); 223 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 224 err = bpf_percpu_array_copy(map, key, value); 225 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 226 err = bpf_percpu_cgroup_storage_copy(map, key, value); 227 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 228 err = bpf_stackmap_copy(map, key, value); 229 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 230 err = bpf_fd_array_map_lookup_elem(map, key, value); 231 } else if (IS_FD_HASH(map)) { 232 err = bpf_fd_htab_map_lookup_elem(map, key, value); 233 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 234 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 235 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 236 map->map_type == BPF_MAP_TYPE_STACK || 237 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 238 err = map->ops->map_peek_elem(map, value); 239 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 240 /* struct_ops map requires directly updating "value" */ 241 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 242 } else { 243 rcu_read_lock(); 244 if (map->ops->map_lookup_elem_sys_only) 245 ptr = map->ops->map_lookup_elem_sys_only(map, key); 246 else 247 ptr = map->ops->map_lookup_elem(map, key); 248 if (IS_ERR(ptr)) { 249 err = PTR_ERR(ptr); 250 } else if (!ptr) { 251 err = -ENOENT; 252 } else { 253 err = 0; 254 if (flags & BPF_F_LOCK) 255 /* lock 'ptr' and copy everything but lock */ 256 copy_map_value_locked(map, value, ptr, true); 257 else 258 copy_map_value(map, value, ptr); 259 /* mask lock and timer, since value wasn't zero inited */ 260 check_and_init_map_value(map, value); 261 } 262 rcu_read_unlock(); 263 } 264 265 bpf_enable_instrumentation(); 266 267 return err; 268 } 269 270 /* Please, do not use this function outside from the map creation path 271 * (e.g. in map update path) without taking care of setting the active 272 * memory cgroup (see at bpf_map_kmalloc_node() for example). 273 */ 274 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 275 { 276 /* We really just want to fail instead of triggering OOM killer 277 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 278 * which is used for lower order allocation requests. 279 * 280 * It has been observed that higher order allocation requests done by 281 * vmalloc with __GFP_NORETRY being set might fail due to not trying 282 * to reclaim memory from the page cache, thus we set 283 * __GFP_RETRY_MAYFAIL to avoid such situations. 284 */ 285 286 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO); 287 unsigned int flags = 0; 288 unsigned long align = 1; 289 void *area; 290 291 if (size >= SIZE_MAX) 292 return NULL; 293 294 /* kmalloc()'ed memory can't be mmap()'ed */ 295 if (mmapable) { 296 BUG_ON(!PAGE_ALIGNED(size)); 297 align = SHMLBA; 298 flags = VM_USERMAP; 299 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 300 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, 301 numa_node); 302 if (area != NULL) 303 return area; 304 } 305 306 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 307 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, 308 flags, numa_node, __builtin_return_address(0)); 309 } 310 311 void *bpf_map_area_alloc(u64 size, int numa_node) 312 { 313 return __bpf_map_area_alloc(size, numa_node, false); 314 } 315 316 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 317 { 318 return __bpf_map_area_alloc(size, numa_node, true); 319 } 320 321 void bpf_map_area_free(void *area) 322 { 323 kvfree(area); 324 } 325 326 static u32 bpf_map_flags_retain_permanent(u32 flags) 327 { 328 /* Some map creation flags are not tied to the map object but 329 * rather to the map fd instead, so they have no meaning upon 330 * map object inspection since multiple file descriptors with 331 * different (access) properties can exist here. Thus, given 332 * this has zero meaning for the map itself, lets clear these 333 * from here. 334 */ 335 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 336 } 337 338 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 339 { 340 map->map_type = attr->map_type; 341 map->key_size = attr->key_size; 342 map->value_size = attr->value_size; 343 map->max_entries = attr->max_entries; 344 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 345 map->numa_node = bpf_map_attr_numa_node(attr); 346 map->map_extra = attr->map_extra; 347 } 348 349 static int bpf_map_alloc_id(struct bpf_map *map) 350 { 351 int id; 352 353 idr_preload(GFP_KERNEL); 354 spin_lock_bh(&map_idr_lock); 355 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 356 if (id > 0) 357 map->id = id; 358 spin_unlock_bh(&map_idr_lock); 359 idr_preload_end(); 360 361 if (WARN_ON_ONCE(!id)) 362 return -ENOSPC; 363 364 return id > 0 ? 0 : id; 365 } 366 367 void bpf_map_free_id(struct bpf_map *map) 368 { 369 unsigned long flags; 370 371 /* Offloaded maps are removed from the IDR store when their device 372 * disappears - even if someone holds an fd to them they are unusable, 373 * the memory is gone, all ops will fail; they are simply waiting for 374 * refcnt to drop to be freed. 375 */ 376 if (!map->id) 377 return; 378 379 spin_lock_irqsave(&map_idr_lock, flags); 380 381 idr_remove(&map_idr, map->id); 382 map->id = 0; 383 384 spin_unlock_irqrestore(&map_idr_lock, flags); 385 } 386 387 #ifdef CONFIG_MEMCG_KMEM 388 static void bpf_map_save_memcg(struct bpf_map *map) 389 { 390 /* Currently if a map is created by a process belonging to the root 391 * memory cgroup, get_obj_cgroup_from_current() will return NULL. 392 * So we have to check map->objcg for being NULL each time it's 393 * being used. 394 */ 395 if (memcg_bpf_enabled()) 396 map->objcg = get_obj_cgroup_from_current(); 397 } 398 399 static void bpf_map_release_memcg(struct bpf_map *map) 400 { 401 if (map->objcg) 402 obj_cgroup_put(map->objcg); 403 } 404 405 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map) 406 { 407 if (map->objcg) 408 return get_mem_cgroup_from_objcg(map->objcg); 409 410 return root_mem_cgroup; 411 } 412 413 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 414 int node) 415 { 416 struct mem_cgroup *memcg, *old_memcg; 417 void *ptr; 418 419 memcg = bpf_map_get_memcg(map); 420 old_memcg = set_active_memcg(memcg); 421 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node); 422 set_active_memcg(old_memcg); 423 mem_cgroup_put(memcg); 424 425 return ptr; 426 } 427 428 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 429 { 430 struct mem_cgroup *memcg, *old_memcg; 431 void *ptr; 432 433 memcg = bpf_map_get_memcg(map); 434 old_memcg = set_active_memcg(memcg); 435 ptr = kzalloc(size, flags | __GFP_ACCOUNT); 436 set_active_memcg(old_memcg); 437 mem_cgroup_put(memcg); 438 439 return ptr; 440 } 441 442 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, 443 gfp_t flags) 444 { 445 struct mem_cgroup *memcg, *old_memcg; 446 void *ptr; 447 448 memcg = bpf_map_get_memcg(map); 449 old_memcg = set_active_memcg(memcg); 450 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT); 451 set_active_memcg(old_memcg); 452 mem_cgroup_put(memcg); 453 454 return ptr; 455 } 456 457 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 458 size_t align, gfp_t flags) 459 { 460 struct mem_cgroup *memcg, *old_memcg; 461 void __percpu *ptr; 462 463 memcg = bpf_map_get_memcg(map); 464 old_memcg = set_active_memcg(memcg); 465 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT); 466 set_active_memcg(old_memcg); 467 mem_cgroup_put(memcg); 468 469 return ptr; 470 } 471 472 #else 473 static void bpf_map_save_memcg(struct bpf_map *map) 474 { 475 } 476 477 static void bpf_map_release_memcg(struct bpf_map *map) 478 { 479 } 480 #endif 481 482 static int btf_field_cmp(const void *a, const void *b) 483 { 484 const struct btf_field *f1 = a, *f2 = b; 485 486 if (f1->offset < f2->offset) 487 return -1; 488 else if (f1->offset > f2->offset) 489 return 1; 490 return 0; 491 } 492 493 struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset, 494 u32 field_mask) 495 { 496 struct btf_field *field; 497 498 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask)) 499 return NULL; 500 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp); 501 if (!field || !(field->type & field_mask)) 502 return NULL; 503 return field; 504 } 505 506 void btf_record_free(struct btf_record *rec) 507 { 508 int i; 509 510 if (IS_ERR_OR_NULL(rec)) 511 return; 512 for (i = 0; i < rec->cnt; i++) { 513 switch (rec->fields[i].type) { 514 case BPF_KPTR_UNREF: 515 case BPF_KPTR_REF: 516 case BPF_KPTR_PERCPU: 517 if (rec->fields[i].kptr.module) 518 module_put(rec->fields[i].kptr.module); 519 btf_put(rec->fields[i].kptr.btf); 520 break; 521 case BPF_LIST_HEAD: 522 case BPF_LIST_NODE: 523 case BPF_RB_ROOT: 524 case BPF_RB_NODE: 525 case BPF_SPIN_LOCK: 526 case BPF_TIMER: 527 case BPF_REFCOUNT: 528 /* Nothing to release */ 529 break; 530 default: 531 WARN_ON_ONCE(1); 532 continue; 533 } 534 } 535 kfree(rec); 536 } 537 538 void bpf_map_free_record(struct bpf_map *map) 539 { 540 btf_record_free(map->record); 541 map->record = NULL; 542 } 543 544 struct btf_record *btf_record_dup(const struct btf_record *rec) 545 { 546 const struct btf_field *fields; 547 struct btf_record *new_rec; 548 int ret, size, i; 549 550 if (IS_ERR_OR_NULL(rec)) 551 return NULL; 552 size = offsetof(struct btf_record, fields[rec->cnt]); 553 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN); 554 if (!new_rec) 555 return ERR_PTR(-ENOMEM); 556 /* Do a deep copy of the btf_record */ 557 fields = rec->fields; 558 new_rec->cnt = 0; 559 for (i = 0; i < rec->cnt; i++) { 560 switch (fields[i].type) { 561 case BPF_KPTR_UNREF: 562 case BPF_KPTR_REF: 563 case BPF_KPTR_PERCPU: 564 btf_get(fields[i].kptr.btf); 565 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) { 566 ret = -ENXIO; 567 goto free; 568 } 569 break; 570 case BPF_LIST_HEAD: 571 case BPF_LIST_NODE: 572 case BPF_RB_ROOT: 573 case BPF_RB_NODE: 574 case BPF_SPIN_LOCK: 575 case BPF_TIMER: 576 case BPF_REFCOUNT: 577 /* Nothing to acquire */ 578 break; 579 default: 580 ret = -EFAULT; 581 WARN_ON_ONCE(1); 582 goto free; 583 } 584 new_rec->cnt++; 585 } 586 return new_rec; 587 free: 588 btf_record_free(new_rec); 589 return ERR_PTR(ret); 590 } 591 592 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b) 593 { 594 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b); 595 int size; 596 597 if (!a_has_fields && !b_has_fields) 598 return true; 599 if (a_has_fields != b_has_fields) 600 return false; 601 if (rec_a->cnt != rec_b->cnt) 602 return false; 603 size = offsetof(struct btf_record, fields[rec_a->cnt]); 604 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused 605 * members are zeroed out. So memcmp is safe to do without worrying 606 * about padding/unused fields. 607 * 608 * While spin_lock, timer, and kptr have no relation to map BTF, 609 * list_head metadata is specific to map BTF, the btf and value_rec 610 * members in particular. btf is the map BTF, while value_rec points to 611 * btf_record in that map BTF. 612 * 613 * So while by default, we don't rely on the map BTF (which the records 614 * were parsed from) matching for both records, which is not backwards 615 * compatible, in case list_head is part of it, we implicitly rely on 616 * that by way of depending on memcmp succeeding for it. 617 */ 618 return !memcmp(rec_a, rec_b, size); 619 } 620 621 void bpf_obj_free_timer(const struct btf_record *rec, void *obj) 622 { 623 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER))) 624 return; 625 bpf_timer_cancel_and_free(obj + rec->timer_off); 626 } 627 628 void bpf_obj_free_fields(const struct btf_record *rec, void *obj) 629 { 630 const struct btf_field *fields; 631 int i; 632 633 if (IS_ERR_OR_NULL(rec)) 634 return; 635 fields = rec->fields; 636 for (i = 0; i < rec->cnt; i++) { 637 struct btf_struct_meta *pointee_struct_meta; 638 const struct btf_field *field = &fields[i]; 639 void *field_ptr = obj + field->offset; 640 void *xchgd_field; 641 642 switch (fields[i].type) { 643 case BPF_SPIN_LOCK: 644 break; 645 case BPF_TIMER: 646 bpf_timer_cancel_and_free(field_ptr); 647 break; 648 case BPF_KPTR_UNREF: 649 WRITE_ONCE(*(u64 *)field_ptr, 0); 650 break; 651 case BPF_KPTR_REF: 652 case BPF_KPTR_PERCPU: 653 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0); 654 if (!xchgd_field) 655 break; 656 657 if (!btf_is_kernel(field->kptr.btf)) { 658 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf, 659 field->kptr.btf_id); 660 migrate_disable(); 661 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ? 662 pointee_struct_meta->record : NULL, 663 fields[i].type == BPF_KPTR_PERCPU); 664 migrate_enable(); 665 } else { 666 field->kptr.dtor(xchgd_field); 667 } 668 break; 669 case BPF_LIST_HEAD: 670 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 671 continue; 672 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); 673 break; 674 case BPF_RB_ROOT: 675 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 676 continue; 677 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); 678 break; 679 case BPF_LIST_NODE: 680 case BPF_RB_NODE: 681 case BPF_REFCOUNT: 682 break; 683 default: 684 WARN_ON_ONCE(1); 685 continue; 686 } 687 } 688 } 689 690 /* called from workqueue */ 691 static void bpf_map_free_deferred(struct work_struct *work) 692 { 693 struct bpf_map *map = container_of(work, struct bpf_map, work); 694 struct btf_record *rec = map->record; 695 struct btf *btf = map->btf; 696 697 security_bpf_map_free(map); 698 bpf_map_release_memcg(map); 699 /* implementation dependent freeing */ 700 map->ops->map_free(map); 701 /* Delay freeing of btf_record for maps, as map_free 702 * callback usually needs access to them. It is better to do it here 703 * than require each callback to do the free itself manually. 704 * 705 * Note that the btf_record stashed in map->inner_map_meta->record was 706 * already freed using the map_free callback for map in map case which 707 * eventually calls bpf_map_free_meta, since inner_map_meta is only a 708 * template bpf_map struct used during verification. 709 */ 710 btf_record_free(rec); 711 /* Delay freeing of btf for maps, as map_free callback may need 712 * struct_meta info which will be freed with btf_put(). 713 */ 714 btf_put(btf); 715 } 716 717 static void bpf_map_put_uref(struct bpf_map *map) 718 { 719 if (atomic64_dec_and_test(&map->usercnt)) { 720 if (map->ops->map_release_uref) 721 map->ops->map_release_uref(map); 722 } 723 } 724 725 static void bpf_map_free_in_work(struct bpf_map *map) 726 { 727 INIT_WORK(&map->work, bpf_map_free_deferred); 728 /* Avoid spawning kworkers, since they all might contend 729 * for the same mutex like slab_mutex. 730 */ 731 queue_work(system_unbound_wq, &map->work); 732 } 733 734 static void bpf_map_free_rcu_gp(struct rcu_head *rcu) 735 { 736 bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu)); 737 } 738 739 static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu) 740 { 741 if (rcu_trace_implies_rcu_gp()) 742 bpf_map_free_rcu_gp(rcu); 743 else 744 call_rcu(rcu, bpf_map_free_rcu_gp); 745 } 746 747 /* decrement map refcnt and schedule it for freeing via workqueue 748 * (underlying map implementation ops->map_free() might sleep) 749 */ 750 void bpf_map_put(struct bpf_map *map) 751 { 752 if (atomic64_dec_and_test(&map->refcnt)) { 753 /* bpf_map_free_id() must be called first */ 754 bpf_map_free_id(map); 755 756 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt)); 757 if (READ_ONCE(map->free_after_mult_rcu_gp)) 758 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); 759 else if (READ_ONCE(map->free_after_rcu_gp)) 760 call_rcu(&map->rcu, bpf_map_free_rcu_gp); 761 else 762 bpf_map_free_in_work(map); 763 } 764 } 765 EXPORT_SYMBOL_GPL(bpf_map_put); 766 767 void bpf_map_put_with_uref(struct bpf_map *map) 768 { 769 bpf_map_put_uref(map); 770 bpf_map_put(map); 771 } 772 773 static int bpf_map_release(struct inode *inode, struct file *filp) 774 { 775 struct bpf_map *map = filp->private_data; 776 777 if (map->ops->map_release) 778 map->ops->map_release(map, filp); 779 780 bpf_map_put_with_uref(map); 781 return 0; 782 } 783 784 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 785 { 786 fmode_t mode = f.file->f_mode; 787 788 /* Our file permissions may have been overridden by global 789 * map permissions facing syscall side. 790 */ 791 if (READ_ONCE(map->frozen)) 792 mode &= ~FMODE_CAN_WRITE; 793 return mode; 794 } 795 796 #ifdef CONFIG_PROC_FS 797 /* Show the memory usage of a bpf map */ 798 static u64 bpf_map_memory_usage(const struct bpf_map *map) 799 { 800 return map->ops->map_mem_usage(map); 801 } 802 803 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 804 { 805 struct bpf_map *map = filp->private_data; 806 u32 type = 0, jited = 0; 807 808 if (map_type_contains_progs(map)) { 809 spin_lock(&map->owner.lock); 810 type = map->owner.type; 811 jited = map->owner.jited; 812 spin_unlock(&map->owner.lock); 813 } 814 815 seq_printf(m, 816 "map_type:\t%u\n" 817 "key_size:\t%u\n" 818 "value_size:\t%u\n" 819 "max_entries:\t%u\n" 820 "map_flags:\t%#x\n" 821 "map_extra:\t%#llx\n" 822 "memlock:\t%llu\n" 823 "map_id:\t%u\n" 824 "frozen:\t%u\n", 825 map->map_type, 826 map->key_size, 827 map->value_size, 828 map->max_entries, 829 map->map_flags, 830 (unsigned long long)map->map_extra, 831 bpf_map_memory_usage(map), 832 map->id, 833 READ_ONCE(map->frozen)); 834 if (type) { 835 seq_printf(m, "owner_prog_type:\t%u\n", type); 836 seq_printf(m, "owner_jited:\t%u\n", jited); 837 } 838 } 839 #endif 840 841 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 842 loff_t *ppos) 843 { 844 /* We need this handler such that alloc_file() enables 845 * f_mode with FMODE_CAN_READ. 846 */ 847 return -EINVAL; 848 } 849 850 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 851 size_t siz, loff_t *ppos) 852 { 853 /* We need this handler such that alloc_file() enables 854 * f_mode with FMODE_CAN_WRITE. 855 */ 856 return -EINVAL; 857 } 858 859 /* called for any extra memory-mapped regions (except initial) */ 860 static void bpf_map_mmap_open(struct vm_area_struct *vma) 861 { 862 struct bpf_map *map = vma->vm_file->private_data; 863 864 if (vma->vm_flags & VM_MAYWRITE) 865 bpf_map_write_active_inc(map); 866 } 867 868 /* called for all unmapped memory region (including initial) */ 869 static void bpf_map_mmap_close(struct vm_area_struct *vma) 870 { 871 struct bpf_map *map = vma->vm_file->private_data; 872 873 if (vma->vm_flags & VM_MAYWRITE) 874 bpf_map_write_active_dec(map); 875 } 876 877 static const struct vm_operations_struct bpf_map_default_vmops = { 878 .open = bpf_map_mmap_open, 879 .close = bpf_map_mmap_close, 880 }; 881 882 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 883 { 884 struct bpf_map *map = filp->private_data; 885 int err; 886 887 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) 888 return -ENOTSUPP; 889 890 if (!(vma->vm_flags & VM_SHARED)) 891 return -EINVAL; 892 893 mutex_lock(&map->freeze_mutex); 894 895 if (vma->vm_flags & VM_WRITE) { 896 if (map->frozen) { 897 err = -EPERM; 898 goto out; 899 } 900 /* map is meant to be read-only, so do not allow mapping as 901 * writable, because it's possible to leak a writable page 902 * reference and allows user-space to still modify it after 903 * freezing, while verifier will assume contents do not change 904 */ 905 if (map->map_flags & BPF_F_RDONLY_PROG) { 906 err = -EACCES; 907 goto out; 908 } 909 } 910 911 /* set default open/close callbacks */ 912 vma->vm_ops = &bpf_map_default_vmops; 913 vma->vm_private_data = map; 914 vm_flags_clear(vma, VM_MAYEXEC); 915 if (!(vma->vm_flags & VM_WRITE)) 916 /* disallow re-mapping with PROT_WRITE */ 917 vm_flags_clear(vma, VM_MAYWRITE); 918 919 err = map->ops->map_mmap(map, vma); 920 if (err) 921 goto out; 922 923 if (vma->vm_flags & VM_MAYWRITE) 924 bpf_map_write_active_inc(map); 925 out: 926 mutex_unlock(&map->freeze_mutex); 927 return err; 928 } 929 930 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts) 931 { 932 struct bpf_map *map = filp->private_data; 933 934 if (map->ops->map_poll) 935 return map->ops->map_poll(map, filp, pts); 936 937 return EPOLLERR; 938 } 939 940 const struct file_operations bpf_map_fops = { 941 #ifdef CONFIG_PROC_FS 942 .show_fdinfo = bpf_map_show_fdinfo, 943 #endif 944 .release = bpf_map_release, 945 .read = bpf_dummy_read, 946 .write = bpf_dummy_write, 947 .mmap = bpf_map_mmap, 948 .poll = bpf_map_poll, 949 }; 950 951 int bpf_map_new_fd(struct bpf_map *map, int flags) 952 { 953 int ret; 954 955 ret = security_bpf_map(map, OPEN_FMODE(flags)); 956 if (ret < 0) 957 return ret; 958 959 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 960 flags | O_CLOEXEC); 961 } 962 963 int bpf_get_file_flag(int flags) 964 { 965 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 966 return -EINVAL; 967 if (flags & BPF_F_RDONLY) 968 return O_RDONLY; 969 if (flags & BPF_F_WRONLY) 970 return O_WRONLY; 971 return O_RDWR; 972 } 973 974 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 975 #define CHECK_ATTR(CMD) \ 976 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 977 sizeof(attr->CMD##_LAST_FIELD), 0, \ 978 sizeof(*attr) - \ 979 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 980 sizeof(attr->CMD##_LAST_FIELD)) != NULL 981 982 /* dst and src must have at least "size" number of bytes. 983 * Return strlen on success and < 0 on error. 984 */ 985 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 986 { 987 const char *end = src + size; 988 const char *orig_src = src; 989 990 memset(dst, 0, size); 991 /* Copy all isalnum(), '_' and '.' chars. */ 992 while (src < end && *src) { 993 if (!isalnum(*src) && 994 *src != '_' && *src != '.') 995 return -EINVAL; 996 *dst++ = *src++; 997 } 998 999 /* No '\0' found in "size" number of bytes */ 1000 if (src == end) 1001 return -EINVAL; 1002 1003 return src - orig_src; 1004 } 1005 1006 int map_check_no_btf(const struct bpf_map *map, 1007 const struct btf *btf, 1008 const struct btf_type *key_type, 1009 const struct btf_type *value_type) 1010 { 1011 return -ENOTSUPP; 1012 } 1013 1014 static int map_check_btf(struct bpf_map *map, struct bpf_token *token, 1015 const struct btf *btf, u32 btf_key_id, u32 btf_value_id) 1016 { 1017 const struct btf_type *key_type, *value_type; 1018 u32 key_size, value_size; 1019 int ret = 0; 1020 1021 /* Some maps allow key to be unspecified. */ 1022 if (btf_key_id) { 1023 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 1024 if (!key_type || key_size != map->key_size) 1025 return -EINVAL; 1026 } else { 1027 key_type = btf_type_by_id(btf, 0); 1028 if (!map->ops->map_check_btf) 1029 return -EINVAL; 1030 } 1031 1032 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 1033 if (!value_type || value_size != map->value_size) 1034 return -EINVAL; 1035 1036 map->record = btf_parse_fields(btf, value_type, 1037 BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD | 1038 BPF_RB_ROOT | BPF_REFCOUNT, 1039 map->value_size); 1040 if (!IS_ERR_OR_NULL(map->record)) { 1041 int i; 1042 1043 if (!bpf_token_capable(token, CAP_BPF)) { 1044 ret = -EPERM; 1045 goto free_map_tab; 1046 } 1047 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { 1048 ret = -EACCES; 1049 goto free_map_tab; 1050 } 1051 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) { 1052 switch (map->record->field_mask & (1 << i)) { 1053 case 0: 1054 continue; 1055 case BPF_SPIN_LOCK: 1056 if (map->map_type != BPF_MAP_TYPE_HASH && 1057 map->map_type != BPF_MAP_TYPE_ARRAY && 1058 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 1059 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1060 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1061 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1062 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1063 ret = -EOPNOTSUPP; 1064 goto free_map_tab; 1065 } 1066 break; 1067 case BPF_TIMER: 1068 if (map->map_type != BPF_MAP_TYPE_HASH && 1069 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1070 map->map_type != BPF_MAP_TYPE_ARRAY) { 1071 ret = -EOPNOTSUPP; 1072 goto free_map_tab; 1073 } 1074 break; 1075 case BPF_KPTR_UNREF: 1076 case BPF_KPTR_REF: 1077 case BPF_KPTR_PERCPU: 1078 case BPF_REFCOUNT: 1079 if (map->map_type != BPF_MAP_TYPE_HASH && 1080 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 1081 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1082 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH && 1083 map->map_type != BPF_MAP_TYPE_ARRAY && 1084 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && 1085 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1086 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1087 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1088 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1089 ret = -EOPNOTSUPP; 1090 goto free_map_tab; 1091 } 1092 break; 1093 case BPF_LIST_HEAD: 1094 case BPF_RB_ROOT: 1095 if (map->map_type != BPF_MAP_TYPE_HASH && 1096 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1097 map->map_type != BPF_MAP_TYPE_ARRAY) { 1098 ret = -EOPNOTSUPP; 1099 goto free_map_tab; 1100 } 1101 break; 1102 default: 1103 /* Fail if map_type checks are missing for a field type */ 1104 ret = -EOPNOTSUPP; 1105 goto free_map_tab; 1106 } 1107 } 1108 } 1109 1110 ret = btf_check_and_fixup_fields(btf, map->record); 1111 if (ret < 0) 1112 goto free_map_tab; 1113 1114 if (map->ops->map_check_btf) { 1115 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 1116 if (ret < 0) 1117 goto free_map_tab; 1118 } 1119 1120 return ret; 1121 free_map_tab: 1122 bpf_map_free_record(map); 1123 return ret; 1124 } 1125 1126 static bool bpf_net_capable(void) 1127 { 1128 return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN); 1129 } 1130 1131 #define BPF_MAP_CREATE_LAST_FIELD map_token_fd 1132 /* called via syscall */ 1133 static int map_create(union bpf_attr *attr) 1134 { 1135 const struct bpf_map_ops *ops; 1136 struct bpf_token *token = NULL; 1137 int numa_node = bpf_map_attr_numa_node(attr); 1138 u32 map_type = attr->map_type; 1139 struct bpf_map *map; 1140 bool token_flag; 1141 int f_flags; 1142 int err; 1143 1144 err = CHECK_ATTR(BPF_MAP_CREATE); 1145 if (err) 1146 return -EINVAL; 1147 1148 /* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it 1149 * to avoid per-map type checks tripping on unknown flag 1150 */ 1151 token_flag = attr->map_flags & BPF_F_TOKEN_FD; 1152 attr->map_flags &= ~BPF_F_TOKEN_FD; 1153 1154 if (attr->btf_vmlinux_value_type_id) { 1155 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 1156 attr->btf_key_type_id || attr->btf_value_type_id) 1157 return -EINVAL; 1158 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 1159 return -EINVAL; 1160 } 1161 1162 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && 1163 attr->map_extra != 0) 1164 return -EINVAL; 1165 1166 f_flags = bpf_get_file_flag(attr->map_flags); 1167 if (f_flags < 0) 1168 return f_flags; 1169 1170 if (numa_node != NUMA_NO_NODE && 1171 ((unsigned int)numa_node >= nr_node_ids || 1172 !node_online(numa_node))) 1173 return -EINVAL; 1174 1175 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 1176 map_type = attr->map_type; 1177 if (map_type >= ARRAY_SIZE(bpf_map_types)) 1178 return -EINVAL; 1179 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types)); 1180 ops = bpf_map_types[map_type]; 1181 if (!ops) 1182 return -EINVAL; 1183 1184 if (ops->map_alloc_check) { 1185 err = ops->map_alloc_check(attr); 1186 if (err) 1187 return err; 1188 } 1189 if (attr->map_ifindex) 1190 ops = &bpf_map_offload_ops; 1191 if (!ops->map_mem_usage) 1192 return -EINVAL; 1193 1194 if (token_flag) { 1195 token = bpf_token_get_from_fd(attr->map_token_fd); 1196 if (IS_ERR(token)) 1197 return PTR_ERR(token); 1198 1199 /* if current token doesn't grant map creation permissions, 1200 * then we can't use this token, so ignore it and rely on 1201 * system-wide capabilities checks 1202 */ 1203 if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) || 1204 !bpf_token_allow_map_type(token, attr->map_type)) { 1205 bpf_token_put(token); 1206 token = NULL; 1207 } 1208 } 1209 1210 err = -EPERM; 1211 1212 /* Intent here is for unprivileged_bpf_disabled to block BPF map 1213 * creation for unprivileged users; other actions depend 1214 * on fd availability and access to bpffs, so are dependent on 1215 * object creation success. Even with unprivileged BPF disabled, 1216 * capability checks are still carried out. 1217 */ 1218 if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF)) 1219 goto put_token; 1220 1221 /* check privileged map type permissions */ 1222 switch (map_type) { 1223 case BPF_MAP_TYPE_ARRAY: 1224 case BPF_MAP_TYPE_PERCPU_ARRAY: 1225 case BPF_MAP_TYPE_PROG_ARRAY: 1226 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1227 case BPF_MAP_TYPE_CGROUP_ARRAY: 1228 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 1229 case BPF_MAP_TYPE_HASH: 1230 case BPF_MAP_TYPE_PERCPU_HASH: 1231 case BPF_MAP_TYPE_HASH_OF_MAPS: 1232 case BPF_MAP_TYPE_RINGBUF: 1233 case BPF_MAP_TYPE_USER_RINGBUF: 1234 case BPF_MAP_TYPE_CGROUP_STORAGE: 1235 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 1236 /* unprivileged */ 1237 break; 1238 case BPF_MAP_TYPE_SK_STORAGE: 1239 case BPF_MAP_TYPE_INODE_STORAGE: 1240 case BPF_MAP_TYPE_TASK_STORAGE: 1241 case BPF_MAP_TYPE_CGRP_STORAGE: 1242 case BPF_MAP_TYPE_BLOOM_FILTER: 1243 case BPF_MAP_TYPE_LPM_TRIE: 1244 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 1245 case BPF_MAP_TYPE_STACK_TRACE: 1246 case BPF_MAP_TYPE_QUEUE: 1247 case BPF_MAP_TYPE_STACK: 1248 case BPF_MAP_TYPE_LRU_HASH: 1249 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 1250 case BPF_MAP_TYPE_STRUCT_OPS: 1251 case BPF_MAP_TYPE_CPUMAP: 1252 if (!bpf_token_capable(token, CAP_BPF)) 1253 goto put_token; 1254 break; 1255 case BPF_MAP_TYPE_SOCKMAP: 1256 case BPF_MAP_TYPE_SOCKHASH: 1257 case BPF_MAP_TYPE_DEVMAP: 1258 case BPF_MAP_TYPE_DEVMAP_HASH: 1259 case BPF_MAP_TYPE_XSKMAP: 1260 if (!bpf_token_capable(token, CAP_NET_ADMIN)) 1261 goto put_token; 1262 break; 1263 default: 1264 WARN(1, "unsupported map type %d", map_type); 1265 goto put_token; 1266 } 1267 1268 map = ops->map_alloc(attr); 1269 if (IS_ERR(map)) { 1270 err = PTR_ERR(map); 1271 goto put_token; 1272 } 1273 map->ops = ops; 1274 map->map_type = map_type; 1275 1276 err = bpf_obj_name_cpy(map->name, attr->map_name, 1277 sizeof(attr->map_name)); 1278 if (err < 0) 1279 goto free_map; 1280 1281 atomic64_set(&map->refcnt, 1); 1282 atomic64_set(&map->usercnt, 1); 1283 mutex_init(&map->freeze_mutex); 1284 spin_lock_init(&map->owner.lock); 1285 1286 if (attr->btf_key_type_id || attr->btf_value_type_id || 1287 /* Even the map's value is a kernel's struct, 1288 * the bpf_prog.o must have BTF to begin with 1289 * to figure out the corresponding kernel's 1290 * counter part. Thus, attr->btf_fd has 1291 * to be valid also. 1292 */ 1293 attr->btf_vmlinux_value_type_id) { 1294 struct btf *btf; 1295 1296 btf = btf_get_by_fd(attr->btf_fd); 1297 if (IS_ERR(btf)) { 1298 err = PTR_ERR(btf); 1299 goto free_map; 1300 } 1301 if (btf_is_kernel(btf)) { 1302 btf_put(btf); 1303 err = -EACCES; 1304 goto free_map; 1305 } 1306 map->btf = btf; 1307 1308 if (attr->btf_value_type_id) { 1309 err = map_check_btf(map, token, btf, attr->btf_key_type_id, 1310 attr->btf_value_type_id); 1311 if (err) 1312 goto free_map; 1313 } 1314 1315 map->btf_key_type_id = attr->btf_key_type_id; 1316 map->btf_value_type_id = attr->btf_value_type_id; 1317 map->btf_vmlinux_value_type_id = 1318 attr->btf_vmlinux_value_type_id; 1319 } 1320 1321 err = security_bpf_map_create(map, attr, token); 1322 if (err) 1323 goto free_map_sec; 1324 1325 err = bpf_map_alloc_id(map); 1326 if (err) 1327 goto free_map_sec; 1328 1329 bpf_map_save_memcg(map); 1330 bpf_token_put(token); 1331 1332 err = bpf_map_new_fd(map, f_flags); 1333 if (err < 0) { 1334 /* failed to allocate fd. 1335 * bpf_map_put_with_uref() is needed because the above 1336 * bpf_map_alloc_id() has published the map 1337 * to the userspace and the userspace may 1338 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 1339 */ 1340 bpf_map_put_with_uref(map); 1341 return err; 1342 } 1343 1344 return err; 1345 1346 free_map_sec: 1347 security_bpf_map_free(map); 1348 free_map: 1349 btf_put(map->btf); 1350 map->ops->map_free(map); 1351 put_token: 1352 bpf_token_put(token); 1353 return err; 1354 } 1355 1356 /* if error is returned, fd is released. 1357 * On success caller should complete fd access with matching fdput() 1358 */ 1359 struct bpf_map *__bpf_map_get(struct fd f) 1360 { 1361 if (!f.file) 1362 return ERR_PTR(-EBADF); 1363 if (f.file->f_op != &bpf_map_fops) { 1364 fdput(f); 1365 return ERR_PTR(-EINVAL); 1366 } 1367 1368 return f.file->private_data; 1369 } 1370 1371 void bpf_map_inc(struct bpf_map *map) 1372 { 1373 atomic64_inc(&map->refcnt); 1374 } 1375 EXPORT_SYMBOL_GPL(bpf_map_inc); 1376 1377 void bpf_map_inc_with_uref(struct bpf_map *map) 1378 { 1379 atomic64_inc(&map->refcnt); 1380 atomic64_inc(&map->usercnt); 1381 } 1382 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 1383 1384 struct bpf_map *bpf_map_get(u32 ufd) 1385 { 1386 struct fd f = fdget(ufd); 1387 struct bpf_map *map; 1388 1389 map = __bpf_map_get(f); 1390 if (IS_ERR(map)) 1391 return map; 1392 1393 bpf_map_inc(map); 1394 fdput(f); 1395 1396 return map; 1397 } 1398 EXPORT_SYMBOL(bpf_map_get); 1399 1400 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 1401 { 1402 struct fd f = fdget(ufd); 1403 struct bpf_map *map; 1404 1405 map = __bpf_map_get(f); 1406 if (IS_ERR(map)) 1407 return map; 1408 1409 bpf_map_inc_with_uref(map); 1410 fdput(f); 1411 1412 return map; 1413 } 1414 1415 /* map_idr_lock should have been held or the map should have been 1416 * protected by rcu read lock. 1417 */ 1418 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 1419 { 1420 int refold; 1421 1422 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 1423 if (!refold) 1424 return ERR_PTR(-ENOENT); 1425 if (uref) 1426 atomic64_inc(&map->usercnt); 1427 1428 return map; 1429 } 1430 1431 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 1432 { 1433 spin_lock_bh(&map_idr_lock); 1434 map = __bpf_map_inc_not_zero(map, false); 1435 spin_unlock_bh(&map_idr_lock); 1436 1437 return map; 1438 } 1439 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 1440 1441 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 1442 { 1443 return -ENOTSUPP; 1444 } 1445 1446 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 1447 { 1448 if (key_size) 1449 return vmemdup_user(ukey, key_size); 1450 1451 if (ukey) 1452 return ERR_PTR(-EINVAL); 1453 1454 return NULL; 1455 } 1456 1457 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size) 1458 { 1459 if (key_size) 1460 return kvmemdup_bpfptr(ukey, key_size); 1461 1462 if (!bpfptr_is_null(ukey)) 1463 return ERR_PTR(-EINVAL); 1464 1465 return NULL; 1466 } 1467 1468 /* last field in 'union bpf_attr' used by this command */ 1469 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 1470 1471 static int map_lookup_elem(union bpf_attr *attr) 1472 { 1473 void __user *ukey = u64_to_user_ptr(attr->key); 1474 void __user *uvalue = u64_to_user_ptr(attr->value); 1475 int ufd = attr->map_fd; 1476 struct bpf_map *map; 1477 void *key, *value; 1478 u32 value_size; 1479 struct fd f; 1480 int err; 1481 1482 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 1483 return -EINVAL; 1484 1485 if (attr->flags & ~BPF_F_LOCK) 1486 return -EINVAL; 1487 1488 f = fdget(ufd); 1489 map = __bpf_map_get(f); 1490 if (IS_ERR(map)) 1491 return PTR_ERR(map); 1492 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1493 err = -EPERM; 1494 goto err_put; 1495 } 1496 1497 if ((attr->flags & BPF_F_LOCK) && 1498 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1499 err = -EINVAL; 1500 goto err_put; 1501 } 1502 1503 key = __bpf_copy_key(ukey, map->key_size); 1504 if (IS_ERR(key)) { 1505 err = PTR_ERR(key); 1506 goto err_put; 1507 } 1508 1509 value_size = bpf_map_value_size(map); 1510 1511 err = -ENOMEM; 1512 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1513 if (!value) 1514 goto free_key; 1515 1516 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 1517 if (copy_from_user(value, uvalue, value_size)) 1518 err = -EFAULT; 1519 else 1520 err = bpf_map_copy_value(map, key, value, attr->flags); 1521 goto free_value; 1522 } 1523 1524 err = bpf_map_copy_value(map, key, value, attr->flags); 1525 if (err) 1526 goto free_value; 1527 1528 err = -EFAULT; 1529 if (copy_to_user(uvalue, value, value_size) != 0) 1530 goto free_value; 1531 1532 err = 0; 1533 1534 free_value: 1535 kvfree(value); 1536 free_key: 1537 kvfree(key); 1538 err_put: 1539 fdput(f); 1540 return err; 1541 } 1542 1543 1544 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1545 1546 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) 1547 { 1548 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1549 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); 1550 int ufd = attr->map_fd; 1551 struct bpf_map *map; 1552 void *key, *value; 1553 u32 value_size; 1554 struct fd f; 1555 int err; 1556 1557 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1558 return -EINVAL; 1559 1560 f = fdget(ufd); 1561 map = __bpf_map_get(f); 1562 if (IS_ERR(map)) 1563 return PTR_ERR(map); 1564 bpf_map_write_active_inc(map); 1565 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1566 err = -EPERM; 1567 goto err_put; 1568 } 1569 1570 if ((attr->flags & BPF_F_LOCK) && 1571 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1572 err = -EINVAL; 1573 goto err_put; 1574 } 1575 1576 key = ___bpf_copy_key(ukey, map->key_size); 1577 if (IS_ERR(key)) { 1578 err = PTR_ERR(key); 1579 goto err_put; 1580 } 1581 1582 value_size = bpf_map_value_size(map); 1583 value = kvmemdup_bpfptr(uvalue, value_size); 1584 if (IS_ERR(value)) { 1585 err = PTR_ERR(value); 1586 goto free_key; 1587 } 1588 1589 err = bpf_map_update_value(map, f.file, key, value, attr->flags); 1590 if (!err) 1591 maybe_wait_bpf_programs(map); 1592 1593 kvfree(value); 1594 free_key: 1595 kvfree(key); 1596 err_put: 1597 bpf_map_write_active_dec(map); 1598 fdput(f); 1599 return err; 1600 } 1601 1602 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1603 1604 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr) 1605 { 1606 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1607 int ufd = attr->map_fd; 1608 struct bpf_map *map; 1609 struct fd f; 1610 void *key; 1611 int err; 1612 1613 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1614 return -EINVAL; 1615 1616 f = fdget(ufd); 1617 map = __bpf_map_get(f); 1618 if (IS_ERR(map)) 1619 return PTR_ERR(map); 1620 bpf_map_write_active_inc(map); 1621 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1622 err = -EPERM; 1623 goto err_put; 1624 } 1625 1626 key = ___bpf_copy_key(ukey, map->key_size); 1627 if (IS_ERR(key)) { 1628 err = PTR_ERR(key); 1629 goto err_put; 1630 } 1631 1632 if (bpf_map_is_offloaded(map)) { 1633 err = bpf_map_offload_delete_elem(map, key); 1634 goto out; 1635 } else if (IS_FD_PROG_ARRAY(map) || 1636 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1637 /* These maps require sleepable context */ 1638 err = map->ops->map_delete_elem(map, key); 1639 goto out; 1640 } 1641 1642 bpf_disable_instrumentation(); 1643 rcu_read_lock(); 1644 err = map->ops->map_delete_elem(map, key); 1645 rcu_read_unlock(); 1646 bpf_enable_instrumentation(); 1647 if (!err) 1648 maybe_wait_bpf_programs(map); 1649 out: 1650 kvfree(key); 1651 err_put: 1652 bpf_map_write_active_dec(map); 1653 fdput(f); 1654 return err; 1655 } 1656 1657 /* last field in 'union bpf_attr' used by this command */ 1658 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1659 1660 static int map_get_next_key(union bpf_attr *attr) 1661 { 1662 void __user *ukey = u64_to_user_ptr(attr->key); 1663 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1664 int ufd = attr->map_fd; 1665 struct bpf_map *map; 1666 void *key, *next_key; 1667 struct fd f; 1668 int err; 1669 1670 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1671 return -EINVAL; 1672 1673 f = fdget(ufd); 1674 map = __bpf_map_get(f); 1675 if (IS_ERR(map)) 1676 return PTR_ERR(map); 1677 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1678 err = -EPERM; 1679 goto err_put; 1680 } 1681 1682 if (ukey) { 1683 key = __bpf_copy_key(ukey, map->key_size); 1684 if (IS_ERR(key)) { 1685 err = PTR_ERR(key); 1686 goto err_put; 1687 } 1688 } else { 1689 key = NULL; 1690 } 1691 1692 err = -ENOMEM; 1693 next_key = kvmalloc(map->key_size, GFP_USER); 1694 if (!next_key) 1695 goto free_key; 1696 1697 if (bpf_map_is_offloaded(map)) { 1698 err = bpf_map_offload_get_next_key(map, key, next_key); 1699 goto out; 1700 } 1701 1702 rcu_read_lock(); 1703 err = map->ops->map_get_next_key(map, key, next_key); 1704 rcu_read_unlock(); 1705 out: 1706 if (err) 1707 goto free_next_key; 1708 1709 err = -EFAULT; 1710 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1711 goto free_next_key; 1712 1713 err = 0; 1714 1715 free_next_key: 1716 kvfree(next_key); 1717 free_key: 1718 kvfree(key); 1719 err_put: 1720 fdput(f); 1721 return err; 1722 } 1723 1724 int generic_map_delete_batch(struct bpf_map *map, 1725 const union bpf_attr *attr, 1726 union bpf_attr __user *uattr) 1727 { 1728 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1729 u32 cp, max_count; 1730 int err = 0; 1731 void *key; 1732 1733 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1734 return -EINVAL; 1735 1736 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1737 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1738 return -EINVAL; 1739 } 1740 1741 max_count = attr->batch.count; 1742 if (!max_count) 1743 return 0; 1744 1745 if (put_user(0, &uattr->batch.count)) 1746 return -EFAULT; 1747 1748 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1749 if (!key) 1750 return -ENOMEM; 1751 1752 for (cp = 0; cp < max_count; cp++) { 1753 err = -EFAULT; 1754 if (copy_from_user(key, keys + cp * map->key_size, 1755 map->key_size)) 1756 break; 1757 1758 if (bpf_map_is_offloaded(map)) { 1759 err = bpf_map_offload_delete_elem(map, key); 1760 break; 1761 } 1762 1763 bpf_disable_instrumentation(); 1764 rcu_read_lock(); 1765 err = map->ops->map_delete_elem(map, key); 1766 rcu_read_unlock(); 1767 bpf_enable_instrumentation(); 1768 if (err) 1769 break; 1770 cond_resched(); 1771 } 1772 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1773 err = -EFAULT; 1774 1775 kvfree(key); 1776 1777 return err; 1778 } 1779 1780 int generic_map_update_batch(struct bpf_map *map, struct file *map_file, 1781 const union bpf_attr *attr, 1782 union bpf_attr __user *uattr) 1783 { 1784 void __user *values = u64_to_user_ptr(attr->batch.values); 1785 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1786 u32 value_size, cp, max_count; 1787 void *key, *value; 1788 int err = 0; 1789 1790 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1791 return -EINVAL; 1792 1793 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1794 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1795 return -EINVAL; 1796 } 1797 1798 value_size = bpf_map_value_size(map); 1799 1800 max_count = attr->batch.count; 1801 if (!max_count) 1802 return 0; 1803 1804 if (put_user(0, &uattr->batch.count)) 1805 return -EFAULT; 1806 1807 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1808 if (!key) 1809 return -ENOMEM; 1810 1811 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1812 if (!value) { 1813 kvfree(key); 1814 return -ENOMEM; 1815 } 1816 1817 for (cp = 0; cp < max_count; cp++) { 1818 err = -EFAULT; 1819 if (copy_from_user(key, keys + cp * map->key_size, 1820 map->key_size) || 1821 copy_from_user(value, values + cp * value_size, value_size)) 1822 break; 1823 1824 err = bpf_map_update_value(map, map_file, key, value, 1825 attr->batch.elem_flags); 1826 1827 if (err) 1828 break; 1829 cond_resched(); 1830 } 1831 1832 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1833 err = -EFAULT; 1834 1835 kvfree(value); 1836 kvfree(key); 1837 1838 return err; 1839 } 1840 1841 #define MAP_LOOKUP_RETRIES 3 1842 1843 int generic_map_lookup_batch(struct bpf_map *map, 1844 const union bpf_attr *attr, 1845 union bpf_attr __user *uattr) 1846 { 1847 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 1848 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1849 void __user *values = u64_to_user_ptr(attr->batch.values); 1850 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1851 void *buf, *buf_prevkey, *prev_key, *key, *value; 1852 int err, retry = MAP_LOOKUP_RETRIES; 1853 u32 value_size, cp, max_count; 1854 1855 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1856 return -EINVAL; 1857 1858 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1859 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) 1860 return -EINVAL; 1861 1862 value_size = bpf_map_value_size(map); 1863 1864 max_count = attr->batch.count; 1865 if (!max_count) 1866 return 0; 1867 1868 if (put_user(0, &uattr->batch.count)) 1869 return -EFAULT; 1870 1871 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1872 if (!buf_prevkey) 1873 return -ENOMEM; 1874 1875 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 1876 if (!buf) { 1877 kvfree(buf_prevkey); 1878 return -ENOMEM; 1879 } 1880 1881 err = -EFAULT; 1882 prev_key = NULL; 1883 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 1884 goto free_buf; 1885 key = buf; 1886 value = key + map->key_size; 1887 if (ubatch) 1888 prev_key = buf_prevkey; 1889 1890 for (cp = 0; cp < max_count;) { 1891 rcu_read_lock(); 1892 err = map->ops->map_get_next_key(map, prev_key, key); 1893 rcu_read_unlock(); 1894 if (err) 1895 break; 1896 err = bpf_map_copy_value(map, key, value, 1897 attr->batch.elem_flags); 1898 1899 if (err == -ENOENT) { 1900 if (retry) { 1901 retry--; 1902 continue; 1903 } 1904 err = -EINTR; 1905 break; 1906 } 1907 1908 if (err) 1909 goto free_buf; 1910 1911 if (copy_to_user(keys + cp * map->key_size, key, 1912 map->key_size)) { 1913 err = -EFAULT; 1914 goto free_buf; 1915 } 1916 if (copy_to_user(values + cp * value_size, value, value_size)) { 1917 err = -EFAULT; 1918 goto free_buf; 1919 } 1920 1921 if (!prev_key) 1922 prev_key = buf_prevkey; 1923 1924 swap(prev_key, key); 1925 retry = MAP_LOOKUP_RETRIES; 1926 cp++; 1927 cond_resched(); 1928 } 1929 1930 if (err == -EFAULT) 1931 goto free_buf; 1932 1933 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 1934 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 1935 err = -EFAULT; 1936 1937 free_buf: 1938 kvfree(buf_prevkey); 1939 kvfree(buf); 1940 return err; 1941 } 1942 1943 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags 1944 1945 static int map_lookup_and_delete_elem(union bpf_attr *attr) 1946 { 1947 void __user *ukey = u64_to_user_ptr(attr->key); 1948 void __user *uvalue = u64_to_user_ptr(attr->value); 1949 int ufd = attr->map_fd; 1950 struct bpf_map *map; 1951 void *key, *value; 1952 u32 value_size; 1953 struct fd f; 1954 int err; 1955 1956 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 1957 return -EINVAL; 1958 1959 if (attr->flags & ~BPF_F_LOCK) 1960 return -EINVAL; 1961 1962 f = fdget(ufd); 1963 map = __bpf_map_get(f); 1964 if (IS_ERR(map)) 1965 return PTR_ERR(map); 1966 bpf_map_write_active_inc(map); 1967 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || 1968 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1969 err = -EPERM; 1970 goto err_put; 1971 } 1972 1973 if (attr->flags && 1974 (map->map_type == BPF_MAP_TYPE_QUEUE || 1975 map->map_type == BPF_MAP_TYPE_STACK)) { 1976 err = -EINVAL; 1977 goto err_put; 1978 } 1979 1980 if ((attr->flags & BPF_F_LOCK) && 1981 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1982 err = -EINVAL; 1983 goto err_put; 1984 } 1985 1986 key = __bpf_copy_key(ukey, map->key_size); 1987 if (IS_ERR(key)) { 1988 err = PTR_ERR(key); 1989 goto err_put; 1990 } 1991 1992 value_size = bpf_map_value_size(map); 1993 1994 err = -ENOMEM; 1995 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1996 if (!value) 1997 goto free_key; 1998 1999 err = -ENOTSUPP; 2000 if (map->map_type == BPF_MAP_TYPE_QUEUE || 2001 map->map_type == BPF_MAP_TYPE_STACK) { 2002 err = map->ops->map_pop_elem(map, value); 2003 } else if (map->map_type == BPF_MAP_TYPE_HASH || 2004 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 2005 map->map_type == BPF_MAP_TYPE_LRU_HASH || 2006 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 2007 if (!bpf_map_is_offloaded(map)) { 2008 bpf_disable_instrumentation(); 2009 rcu_read_lock(); 2010 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); 2011 rcu_read_unlock(); 2012 bpf_enable_instrumentation(); 2013 } 2014 } 2015 2016 if (err) 2017 goto free_value; 2018 2019 if (copy_to_user(uvalue, value, value_size) != 0) { 2020 err = -EFAULT; 2021 goto free_value; 2022 } 2023 2024 err = 0; 2025 2026 free_value: 2027 kvfree(value); 2028 free_key: 2029 kvfree(key); 2030 err_put: 2031 bpf_map_write_active_dec(map); 2032 fdput(f); 2033 return err; 2034 } 2035 2036 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 2037 2038 static int map_freeze(const union bpf_attr *attr) 2039 { 2040 int err = 0, ufd = attr->map_fd; 2041 struct bpf_map *map; 2042 struct fd f; 2043 2044 if (CHECK_ATTR(BPF_MAP_FREEZE)) 2045 return -EINVAL; 2046 2047 f = fdget(ufd); 2048 map = __bpf_map_get(f); 2049 if (IS_ERR(map)) 2050 return PTR_ERR(map); 2051 2052 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) { 2053 fdput(f); 2054 return -ENOTSUPP; 2055 } 2056 2057 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 2058 fdput(f); 2059 return -EPERM; 2060 } 2061 2062 mutex_lock(&map->freeze_mutex); 2063 if (bpf_map_write_active(map)) { 2064 err = -EBUSY; 2065 goto err_put; 2066 } 2067 if (READ_ONCE(map->frozen)) { 2068 err = -EBUSY; 2069 goto err_put; 2070 } 2071 2072 WRITE_ONCE(map->frozen, true); 2073 err_put: 2074 mutex_unlock(&map->freeze_mutex); 2075 fdput(f); 2076 return err; 2077 } 2078 2079 static const struct bpf_prog_ops * const bpf_prog_types[] = { 2080 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 2081 [_id] = & _name ## _prog_ops, 2082 #define BPF_MAP_TYPE(_id, _ops) 2083 #define BPF_LINK_TYPE(_id, _name) 2084 #include <linux/bpf_types.h> 2085 #undef BPF_PROG_TYPE 2086 #undef BPF_MAP_TYPE 2087 #undef BPF_LINK_TYPE 2088 }; 2089 2090 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 2091 { 2092 const struct bpf_prog_ops *ops; 2093 2094 if (type >= ARRAY_SIZE(bpf_prog_types)) 2095 return -EINVAL; 2096 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 2097 ops = bpf_prog_types[type]; 2098 if (!ops) 2099 return -EINVAL; 2100 2101 if (!bpf_prog_is_offloaded(prog->aux)) 2102 prog->aux->ops = ops; 2103 else 2104 prog->aux->ops = &bpf_offload_prog_ops; 2105 prog->type = type; 2106 return 0; 2107 } 2108 2109 enum bpf_audit { 2110 BPF_AUDIT_LOAD, 2111 BPF_AUDIT_UNLOAD, 2112 BPF_AUDIT_MAX, 2113 }; 2114 2115 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 2116 [BPF_AUDIT_LOAD] = "LOAD", 2117 [BPF_AUDIT_UNLOAD] = "UNLOAD", 2118 }; 2119 2120 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 2121 { 2122 struct audit_context *ctx = NULL; 2123 struct audit_buffer *ab; 2124 2125 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 2126 return; 2127 if (audit_enabled == AUDIT_OFF) 2128 return; 2129 if (!in_irq() && !irqs_disabled()) 2130 ctx = audit_context(); 2131 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 2132 if (unlikely(!ab)) 2133 return; 2134 audit_log_format(ab, "prog-id=%u op=%s", 2135 prog->aux->id, bpf_audit_str[op]); 2136 audit_log_end(ab); 2137 } 2138 2139 static int bpf_prog_alloc_id(struct bpf_prog *prog) 2140 { 2141 int id; 2142 2143 idr_preload(GFP_KERNEL); 2144 spin_lock_bh(&prog_idr_lock); 2145 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 2146 if (id > 0) 2147 prog->aux->id = id; 2148 spin_unlock_bh(&prog_idr_lock); 2149 idr_preload_end(); 2150 2151 /* id is in [1, INT_MAX) */ 2152 if (WARN_ON_ONCE(!id)) 2153 return -ENOSPC; 2154 2155 return id > 0 ? 0 : id; 2156 } 2157 2158 void bpf_prog_free_id(struct bpf_prog *prog) 2159 { 2160 unsigned long flags; 2161 2162 /* cBPF to eBPF migrations are currently not in the idr store. 2163 * Offloaded programs are removed from the store when their device 2164 * disappears - even if someone grabs an fd to them they are unusable, 2165 * simply waiting for refcnt to drop to be freed. 2166 */ 2167 if (!prog->aux->id) 2168 return; 2169 2170 spin_lock_irqsave(&prog_idr_lock, flags); 2171 idr_remove(&prog_idr, prog->aux->id); 2172 prog->aux->id = 0; 2173 spin_unlock_irqrestore(&prog_idr_lock, flags); 2174 } 2175 2176 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 2177 { 2178 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 2179 2180 kvfree(aux->func_info); 2181 kfree(aux->func_info_aux); 2182 free_uid(aux->user); 2183 security_bpf_prog_free(aux->prog); 2184 bpf_prog_free(aux->prog); 2185 } 2186 2187 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 2188 { 2189 bpf_prog_kallsyms_del_all(prog); 2190 btf_put(prog->aux->btf); 2191 module_put(prog->aux->mod); 2192 kvfree(prog->aux->jited_linfo); 2193 kvfree(prog->aux->linfo); 2194 kfree(prog->aux->kfunc_tab); 2195 if (prog->aux->attach_btf) 2196 btf_put(prog->aux->attach_btf); 2197 2198 if (deferred) { 2199 if (prog->aux->sleepable) 2200 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); 2201 else 2202 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 2203 } else { 2204 __bpf_prog_put_rcu(&prog->aux->rcu); 2205 } 2206 } 2207 2208 static void bpf_prog_put_deferred(struct work_struct *work) 2209 { 2210 struct bpf_prog_aux *aux; 2211 struct bpf_prog *prog; 2212 2213 aux = container_of(work, struct bpf_prog_aux, work); 2214 prog = aux->prog; 2215 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 2216 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 2217 bpf_prog_free_id(prog); 2218 __bpf_prog_put_noref(prog, true); 2219 } 2220 2221 static void __bpf_prog_put(struct bpf_prog *prog) 2222 { 2223 struct bpf_prog_aux *aux = prog->aux; 2224 2225 if (atomic64_dec_and_test(&aux->refcnt)) { 2226 if (in_irq() || irqs_disabled()) { 2227 INIT_WORK(&aux->work, bpf_prog_put_deferred); 2228 schedule_work(&aux->work); 2229 } else { 2230 bpf_prog_put_deferred(&aux->work); 2231 } 2232 } 2233 } 2234 2235 void bpf_prog_put(struct bpf_prog *prog) 2236 { 2237 __bpf_prog_put(prog); 2238 } 2239 EXPORT_SYMBOL_GPL(bpf_prog_put); 2240 2241 static int bpf_prog_release(struct inode *inode, struct file *filp) 2242 { 2243 struct bpf_prog *prog = filp->private_data; 2244 2245 bpf_prog_put(prog); 2246 return 0; 2247 } 2248 2249 struct bpf_prog_kstats { 2250 u64 nsecs; 2251 u64 cnt; 2252 u64 misses; 2253 }; 2254 2255 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog) 2256 { 2257 struct bpf_prog_stats *stats; 2258 unsigned int flags; 2259 2260 stats = this_cpu_ptr(prog->stats); 2261 flags = u64_stats_update_begin_irqsave(&stats->syncp); 2262 u64_stats_inc(&stats->misses); 2263 u64_stats_update_end_irqrestore(&stats->syncp, flags); 2264 } 2265 2266 static void bpf_prog_get_stats(const struct bpf_prog *prog, 2267 struct bpf_prog_kstats *stats) 2268 { 2269 u64 nsecs = 0, cnt = 0, misses = 0; 2270 int cpu; 2271 2272 for_each_possible_cpu(cpu) { 2273 const struct bpf_prog_stats *st; 2274 unsigned int start; 2275 u64 tnsecs, tcnt, tmisses; 2276 2277 st = per_cpu_ptr(prog->stats, cpu); 2278 do { 2279 start = u64_stats_fetch_begin(&st->syncp); 2280 tnsecs = u64_stats_read(&st->nsecs); 2281 tcnt = u64_stats_read(&st->cnt); 2282 tmisses = u64_stats_read(&st->misses); 2283 } while (u64_stats_fetch_retry(&st->syncp, start)); 2284 nsecs += tnsecs; 2285 cnt += tcnt; 2286 misses += tmisses; 2287 } 2288 stats->nsecs = nsecs; 2289 stats->cnt = cnt; 2290 stats->misses = misses; 2291 } 2292 2293 #ifdef CONFIG_PROC_FS 2294 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 2295 { 2296 const struct bpf_prog *prog = filp->private_data; 2297 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2298 struct bpf_prog_kstats stats; 2299 2300 bpf_prog_get_stats(prog, &stats); 2301 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2302 seq_printf(m, 2303 "prog_type:\t%u\n" 2304 "prog_jited:\t%u\n" 2305 "prog_tag:\t%s\n" 2306 "memlock:\t%llu\n" 2307 "prog_id:\t%u\n" 2308 "run_time_ns:\t%llu\n" 2309 "run_cnt:\t%llu\n" 2310 "recursion_misses:\t%llu\n" 2311 "verified_insns:\t%u\n", 2312 prog->type, 2313 prog->jited, 2314 prog_tag, 2315 prog->pages * 1ULL << PAGE_SHIFT, 2316 prog->aux->id, 2317 stats.nsecs, 2318 stats.cnt, 2319 stats.misses, 2320 prog->aux->verified_insns); 2321 } 2322 #endif 2323 2324 const struct file_operations bpf_prog_fops = { 2325 #ifdef CONFIG_PROC_FS 2326 .show_fdinfo = bpf_prog_show_fdinfo, 2327 #endif 2328 .release = bpf_prog_release, 2329 .read = bpf_dummy_read, 2330 .write = bpf_dummy_write, 2331 }; 2332 2333 int bpf_prog_new_fd(struct bpf_prog *prog) 2334 { 2335 int ret; 2336 2337 ret = security_bpf_prog(prog); 2338 if (ret < 0) 2339 return ret; 2340 2341 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 2342 O_RDWR | O_CLOEXEC); 2343 } 2344 2345 static struct bpf_prog *____bpf_prog_get(struct fd f) 2346 { 2347 if (!f.file) 2348 return ERR_PTR(-EBADF); 2349 if (f.file->f_op != &bpf_prog_fops) { 2350 fdput(f); 2351 return ERR_PTR(-EINVAL); 2352 } 2353 2354 return f.file->private_data; 2355 } 2356 2357 void bpf_prog_add(struct bpf_prog *prog, int i) 2358 { 2359 atomic64_add(i, &prog->aux->refcnt); 2360 } 2361 EXPORT_SYMBOL_GPL(bpf_prog_add); 2362 2363 void bpf_prog_sub(struct bpf_prog *prog, int i) 2364 { 2365 /* Only to be used for undoing previous bpf_prog_add() in some 2366 * error path. We still know that another entity in our call 2367 * path holds a reference to the program, thus atomic_sub() can 2368 * be safely used in such cases! 2369 */ 2370 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 2371 } 2372 EXPORT_SYMBOL_GPL(bpf_prog_sub); 2373 2374 void bpf_prog_inc(struct bpf_prog *prog) 2375 { 2376 atomic64_inc(&prog->aux->refcnt); 2377 } 2378 EXPORT_SYMBOL_GPL(bpf_prog_inc); 2379 2380 /* prog_idr_lock should have been held */ 2381 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 2382 { 2383 int refold; 2384 2385 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 2386 2387 if (!refold) 2388 return ERR_PTR(-ENOENT); 2389 2390 return prog; 2391 } 2392 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 2393 2394 bool bpf_prog_get_ok(struct bpf_prog *prog, 2395 enum bpf_prog_type *attach_type, bool attach_drv) 2396 { 2397 /* not an attachment, just a refcount inc, always allow */ 2398 if (!attach_type) 2399 return true; 2400 2401 if (prog->type != *attach_type) 2402 return false; 2403 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv) 2404 return false; 2405 2406 return true; 2407 } 2408 2409 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 2410 bool attach_drv) 2411 { 2412 struct fd f = fdget(ufd); 2413 struct bpf_prog *prog; 2414 2415 prog = ____bpf_prog_get(f); 2416 if (IS_ERR(prog)) 2417 return prog; 2418 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 2419 prog = ERR_PTR(-EINVAL); 2420 goto out; 2421 } 2422 2423 bpf_prog_inc(prog); 2424 out: 2425 fdput(f); 2426 return prog; 2427 } 2428 2429 struct bpf_prog *bpf_prog_get(u32 ufd) 2430 { 2431 return __bpf_prog_get(ufd, NULL, false); 2432 } 2433 2434 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 2435 bool attach_drv) 2436 { 2437 return __bpf_prog_get(ufd, &type, attach_drv); 2438 } 2439 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 2440 2441 /* Initially all BPF programs could be loaded w/o specifying 2442 * expected_attach_type. Later for some of them specifying expected_attach_type 2443 * at load time became required so that program could be validated properly. 2444 * Programs of types that are allowed to be loaded both w/ and w/o (for 2445 * backward compatibility) expected_attach_type, should have the default attach 2446 * type assigned to expected_attach_type for the latter case, so that it can be 2447 * validated later at attach time. 2448 * 2449 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 2450 * prog type requires it but has some attach types that have to be backward 2451 * compatible. 2452 */ 2453 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 2454 { 2455 switch (attr->prog_type) { 2456 case BPF_PROG_TYPE_CGROUP_SOCK: 2457 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 2458 * exist so checking for non-zero is the way to go here. 2459 */ 2460 if (!attr->expected_attach_type) 2461 attr->expected_attach_type = 2462 BPF_CGROUP_INET_SOCK_CREATE; 2463 break; 2464 case BPF_PROG_TYPE_SK_REUSEPORT: 2465 if (!attr->expected_attach_type) 2466 attr->expected_attach_type = 2467 BPF_SK_REUSEPORT_SELECT; 2468 break; 2469 } 2470 } 2471 2472 static int 2473 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 2474 enum bpf_attach_type expected_attach_type, 2475 struct btf *attach_btf, u32 btf_id, 2476 struct bpf_prog *dst_prog) 2477 { 2478 if (btf_id) { 2479 if (btf_id > BTF_MAX_TYPE) 2480 return -EINVAL; 2481 2482 if (!attach_btf && !dst_prog) 2483 return -EINVAL; 2484 2485 switch (prog_type) { 2486 case BPF_PROG_TYPE_TRACING: 2487 case BPF_PROG_TYPE_LSM: 2488 case BPF_PROG_TYPE_STRUCT_OPS: 2489 case BPF_PROG_TYPE_EXT: 2490 break; 2491 default: 2492 return -EINVAL; 2493 } 2494 } 2495 2496 if (attach_btf && (!btf_id || dst_prog)) 2497 return -EINVAL; 2498 2499 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING && 2500 prog_type != BPF_PROG_TYPE_EXT) 2501 return -EINVAL; 2502 2503 switch (prog_type) { 2504 case BPF_PROG_TYPE_CGROUP_SOCK: 2505 switch (expected_attach_type) { 2506 case BPF_CGROUP_INET_SOCK_CREATE: 2507 case BPF_CGROUP_INET_SOCK_RELEASE: 2508 case BPF_CGROUP_INET4_POST_BIND: 2509 case BPF_CGROUP_INET6_POST_BIND: 2510 return 0; 2511 default: 2512 return -EINVAL; 2513 } 2514 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2515 switch (expected_attach_type) { 2516 case BPF_CGROUP_INET4_BIND: 2517 case BPF_CGROUP_INET6_BIND: 2518 case BPF_CGROUP_INET4_CONNECT: 2519 case BPF_CGROUP_INET6_CONNECT: 2520 case BPF_CGROUP_UNIX_CONNECT: 2521 case BPF_CGROUP_INET4_GETPEERNAME: 2522 case BPF_CGROUP_INET6_GETPEERNAME: 2523 case BPF_CGROUP_UNIX_GETPEERNAME: 2524 case BPF_CGROUP_INET4_GETSOCKNAME: 2525 case BPF_CGROUP_INET6_GETSOCKNAME: 2526 case BPF_CGROUP_UNIX_GETSOCKNAME: 2527 case BPF_CGROUP_UDP4_SENDMSG: 2528 case BPF_CGROUP_UDP6_SENDMSG: 2529 case BPF_CGROUP_UNIX_SENDMSG: 2530 case BPF_CGROUP_UDP4_RECVMSG: 2531 case BPF_CGROUP_UDP6_RECVMSG: 2532 case BPF_CGROUP_UNIX_RECVMSG: 2533 return 0; 2534 default: 2535 return -EINVAL; 2536 } 2537 case BPF_PROG_TYPE_CGROUP_SKB: 2538 switch (expected_attach_type) { 2539 case BPF_CGROUP_INET_INGRESS: 2540 case BPF_CGROUP_INET_EGRESS: 2541 return 0; 2542 default: 2543 return -EINVAL; 2544 } 2545 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2546 switch (expected_attach_type) { 2547 case BPF_CGROUP_SETSOCKOPT: 2548 case BPF_CGROUP_GETSOCKOPT: 2549 return 0; 2550 default: 2551 return -EINVAL; 2552 } 2553 case BPF_PROG_TYPE_SK_LOOKUP: 2554 if (expected_attach_type == BPF_SK_LOOKUP) 2555 return 0; 2556 return -EINVAL; 2557 case BPF_PROG_TYPE_SK_REUSEPORT: 2558 switch (expected_attach_type) { 2559 case BPF_SK_REUSEPORT_SELECT: 2560 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: 2561 return 0; 2562 default: 2563 return -EINVAL; 2564 } 2565 case BPF_PROG_TYPE_NETFILTER: 2566 if (expected_attach_type == BPF_NETFILTER) 2567 return 0; 2568 return -EINVAL; 2569 case BPF_PROG_TYPE_SYSCALL: 2570 case BPF_PROG_TYPE_EXT: 2571 if (expected_attach_type) 2572 return -EINVAL; 2573 fallthrough; 2574 default: 2575 return 0; 2576 } 2577 } 2578 2579 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) 2580 { 2581 switch (prog_type) { 2582 case BPF_PROG_TYPE_SCHED_CLS: 2583 case BPF_PROG_TYPE_SCHED_ACT: 2584 case BPF_PROG_TYPE_XDP: 2585 case BPF_PROG_TYPE_LWT_IN: 2586 case BPF_PROG_TYPE_LWT_OUT: 2587 case BPF_PROG_TYPE_LWT_XMIT: 2588 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2589 case BPF_PROG_TYPE_SK_SKB: 2590 case BPF_PROG_TYPE_SK_MSG: 2591 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2592 case BPF_PROG_TYPE_CGROUP_DEVICE: 2593 case BPF_PROG_TYPE_CGROUP_SOCK: 2594 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2595 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2596 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2597 case BPF_PROG_TYPE_SOCK_OPS: 2598 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2599 case BPF_PROG_TYPE_NETFILTER: 2600 return true; 2601 case BPF_PROG_TYPE_CGROUP_SKB: 2602 /* always unpriv */ 2603 case BPF_PROG_TYPE_SK_REUSEPORT: 2604 /* equivalent to SOCKET_FILTER. need CAP_BPF only */ 2605 default: 2606 return false; 2607 } 2608 } 2609 2610 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) 2611 { 2612 switch (prog_type) { 2613 case BPF_PROG_TYPE_KPROBE: 2614 case BPF_PROG_TYPE_TRACEPOINT: 2615 case BPF_PROG_TYPE_PERF_EVENT: 2616 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2617 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2618 case BPF_PROG_TYPE_TRACING: 2619 case BPF_PROG_TYPE_LSM: 2620 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ 2621 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2622 return true; 2623 default: 2624 return false; 2625 } 2626 } 2627 2628 /* last field in 'union bpf_attr' used by this command */ 2629 #define BPF_PROG_LOAD_LAST_FIELD prog_token_fd 2630 2631 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) 2632 { 2633 enum bpf_prog_type type = attr->prog_type; 2634 struct bpf_prog *prog, *dst_prog = NULL; 2635 struct btf *attach_btf = NULL; 2636 struct bpf_token *token = NULL; 2637 bool bpf_cap; 2638 int err; 2639 char license[128]; 2640 2641 if (CHECK_ATTR(BPF_PROG_LOAD)) 2642 return -EINVAL; 2643 2644 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2645 BPF_F_ANY_ALIGNMENT | 2646 BPF_F_TEST_STATE_FREQ | 2647 BPF_F_SLEEPABLE | 2648 BPF_F_TEST_RND_HI32 | 2649 BPF_F_XDP_HAS_FRAGS | 2650 BPF_F_XDP_DEV_BOUND_ONLY | 2651 BPF_F_TEST_REG_INVARIANTS | 2652 BPF_F_TOKEN_FD)) 2653 return -EINVAL; 2654 2655 bpf_prog_load_fixup_attach_type(attr); 2656 2657 if (attr->prog_flags & BPF_F_TOKEN_FD) { 2658 token = bpf_token_get_from_fd(attr->prog_token_fd); 2659 if (IS_ERR(token)) 2660 return PTR_ERR(token); 2661 /* if current token doesn't grant prog loading permissions, 2662 * then we can't use this token, so ignore it and rely on 2663 * system-wide capabilities checks 2664 */ 2665 if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) || 2666 !bpf_token_allow_prog_type(token, attr->prog_type, 2667 attr->expected_attach_type)) { 2668 bpf_token_put(token); 2669 token = NULL; 2670 } 2671 } 2672 2673 bpf_cap = bpf_token_capable(token, CAP_BPF); 2674 err = -EPERM; 2675 2676 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2677 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2678 !bpf_cap) 2679 goto put_token; 2680 2681 /* Intent here is for unprivileged_bpf_disabled to block BPF program 2682 * creation for unprivileged users; other actions depend 2683 * on fd availability and access to bpffs, so are dependent on 2684 * object creation success. Even with unprivileged BPF disabled, 2685 * capability checks are still carried out for these 2686 * and other operations. 2687 */ 2688 if (sysctl_unprivileged_bpf_disabled && !bpf_cap) 2689 goto put_token; 2690 2691 if (attr->insn_cnt == 0 || 2692 attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) { 2693 err = -E2BIG; 2694 goto put_token; 2695 } 2696 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2697 type != BPF_PROG_TYPE_CGROUP_SKB && 2698 !bpf_cap) 2699 goto put_token; 2700 2701 if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN)) 2702 goto put_token; 2703 if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON)) 2704 goto put_token; 2705 2706 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog 2707 * or btf, we need to check which one it is 2708 */ 2709 if (attr->attach_prog_fd) { 2710 dst_prog = bpf_prog_get(attr->attach_prog_fd); 2711 if (IS_ERR(dst_prog)) { 2712 dst_prog = NULL; 2713 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); 2714 if (IS_ERR(attach_btf)) { 2715 err = -EINVAL; 2716 goto put_token; 2717 } 2718 if (!btf_is_kernel(attach_btf)) { 2719 /* attaching through specifying bpf_prog's BTF 2720 * objects directly might be supported eventually 2721 */ 2722 btf_put(attach_btf); 2723 err = -ENOTSUPP; 2724 goto put_token; 2725 } 2726 } 2727 } else if (attr->attach_btf_id) { 2728 /* fall back to vmlinux BTF, if BTF type ID is specified */ 2729 attach_btf = bpf_get_btf_vmlinux(); 2730 if (IS_ERR(attach_btf)) { 2731 err = PTR_ERR(attach_btf); 2732 goto put_token; 2733 } 2734 if (!attach_btf) { 2735 err = -EINVAL; 2736 goto put_token; 2737 } 2738 btf_get(attach_btf); 2739 } 2740 2741 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2742 attach_btf, attr->attach_btf_id, 2743 dst_prog)) { 2744 if (dst_prog) 2745 bpf_prog_put(dst_prog); 2746 if (attach_btf) 2747 btf_put(attach_btf); 2748 err = -EINVAL; 2749 goto put_token; 2750 } 2751 2752 /* plain bpf_prog allocation */ 2753 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2754 if (!prog) { 2755 if (dst_prog) 2756 bpf_prog_put(dst_prog); 2757 if (attach_btf) 2758 btf_put(attach_btf); 2759 err = -EINVAL; 2760 goto put_token; 2761 } 2762 2763 prog->expected_attach_type = attr->expected_attach_type; 2764 prog->aux->attach_btf = attach_btf; 2765 prog->aux->attach_btf_id = attr->attach_btf_id; 2766 prog->aux->dst_prog = dst_prog; 2767 prog->aux->dev_bound = !!attr->prog_ifindex; 2768 prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; 2769 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; 2770 2771 /* move token into prog->aux, reuse taken refcnt */ 2772 prog->aux->token = token; 2773 token = NULL; 2774 2775 prog->aux->user = get_current_user(); 2776 prog->len = attr->insn_cnt; 2777 2778 err = -EFAULT; 2779 if (copy_from_bpfptr(prog->insns, 2780 make_bpfptr(attr->insns, uattr.is_kernel), 2781 bpf_prog_insn_size(prog)) != 0) 2782 goto free_prog; 2783 /* copy eBPF program license from user space */ 2784 if (strncpy_from_bpfptr(license, 2785 make_bpfptr(attr->license, uattr.is_kernel), 2786 sizeof(license) - 1) < 0) 2787 goto free_prog; 2788 license[sizeof(license) - 1] = 0; 2789 2790 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2791 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0; 2792 2793 prog->orig_prog = NULL; 2794 prog->jited = 0; 2795 2796 atomic64_set(&prog->aux->refcnt, 1); 2797 2798 if (bpf_prog_is_dev_bound(prog->aux)) { 2799 err = bpf_prog_dev_bound_init(prog, attr); 2800 if (err) 2801 goto free_prog; 2802 } 2803 2804 if (type == BPF_PROG_TYPE_EXT && dst_prog && 2805 bpf_prog_is_dev_bound(dst_prog->aux)) { 2806 err = bpf_prog_dev_bound_inherit(prog, dst_prog); 2807 if (err) 2808 goto free_prog; 2809 } 2810 2811 /* 2812 * Bookkeeping for managing the program attachment chain. 2813 * 2814 * It might be tempting to set attach_tracing_prog flag at the attachment 2815 * time, but this will not prevent from loading bunch of tracing prog 2816 * first, then attach them one to another. 2817 * 2818 * The flag attach_tracing_prog is set for the whole program lifecycle, and 2819 * doesn't have to be cleared in bpf_tracing_link_release, since tracing 2820 * programs cannot change attachment target. 2821 */ 2822 if (type == BPF_PROG_TYPE_TRACING && dst_prog && 2823 dst_prog->type == BPF_PROG_TYPE_TRACING) { 2824 prog->aux->attach_tracing_prog = true; 2825 } 2826 2827 /* find program type: socket_filter vs tracing_filter */ 2828 err = find_prog_type(type, prog); 2829 if (err < 0) 2830 goto free_prog; 2831 2832 prog->aux->load_time = ktime_get_boottime_ns(); 2833 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2834 sizeof(attr->prog_name)); 2835 if (err < 0) 2836 goto free_prog; 2837 2838 err = security_bpf_prog_load(prog, attr, token); 2839 if (err) 2840 goto free_prog_sec; 2841 2842 /* run eBPF verifier */ 2843 err = bpf_check(&prog, attr, uattr, uattr_size); 2844 if (err < 0) 2845 goto free_used_maps; 2846 2847 prog = bpf_prog_select_runtime(prog, &err); 2848 if (err < 0) 2849 goto free_used_maps; 2850 2851 err = bpf_prog_alloc_id(prog); 2852 if (err) 2853 goto free_used_maps; 2854 2855 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2856 * effectively publicly exposed. However, retrieving via 2857 * bpf_prog_get_fd_by_id() will take another reference, 2858 * therefore it cannot be gone underneath us. 2859 * 2860 * Only for the time /after/ successful bpf_prog_new_fd() 2861 * and before returning to userspace, we might just hold 2862 * one reference and any parallel close on that fd could 2863 * rip everything out. Hence, below notifications must 2864 * happen before bpf_prog_new_fd(). 2865 * 2866 * Also, any failure handling from this point onwards must 2867 * be using bpf_prog_put() given the program is exposed. 2868 */ 2869 bpf_prog_kallsyms_add(prog); 2870 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 2871 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 2872 2873 err = bpf_prog_new_fd(prog); 2874 if (err < 0) 2875 bpf_prog_put(prog); 2876 return err; 2877 2878 free_used_maps: 2879 /* In case we have subprogs, we need to wait for a grace 2880 * period before we can tear down JIT memory since symbols 2881 * are already exposed under kallsyms. 2882 */ 2883 __bpf_prog_put_noref(prog, prog->aux->real_func_cnt); 2884 return err; 2885 2886 free_prog_sec: 2887 security_bpf_prog_free(prog); 2888 free_prog: 2889 free_uid(prog->aux->user); 2890 if (prog->aux->attach_btf) 2891 btf_put(prog->aux->attach_btf); 2892 bpf_prog_free(prog); 2893 put_token: 2894 bpf_token_put(token); 2895 return err; 2896 } 2897 2898 #define BPF_OBJ_LAST_FIELD path_fd 2899 2900 static int bpf_obj_pin(const union bpf_attr *attr) 2901 { 2902 int path_fd; 2903 2904 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD) 2905 return -EINVAL; 2906 2907 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 2908 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 2909 return -EINVAL; 2910 2911 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 2912 return bpf_obj_pin_user(attr->bpf_fd, path_fd, 2913 u64_to_user_ptr(attr->pathname)); 2914 } 2915 2916 static int bpf_obj_get(const union bpf_attr *attr) 2917 { 2918 int path_fd; 2919 2920 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 2921 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD)) 2922 return -EINVAL; 2923 2924 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 2925 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 2926 return -EINVAL; 2927 2928 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 2929 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname), 2930 attr->file_flags); 2931 } 2932 2933 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 2934 const struct bpf_link_ops *ops, struct bpf_prog *prog) 2935 { 2936 atomic64_set(&link->refcnt, 1); 2937 link->type = type; 2938 link->id = 0; 2939 link->ops = ops; 2940 link->prog = prog; 2941 } 2942 2943 static void bpf_link_free_id(int id) 2944 { 2945 if (!id) 2946 return; 2947 2948 spin_lock_bh(&link_idr_lock); 2949 idr_remove(&link_idr, id); 2950 spin_unlock_bh(&link_idr_lock); 2951 } 2952 2953 /* Clean up bpf_link and corresponding anon_inode file and FD. After 2954 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred 2955 * anon_inode's release() call. This helper marks bpf_link as 2956 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt 2957 * is not decremented, it's the responsibility of a calling code that failed 2958 * to complete bpf_link initialization. 2959 * This helper eventually calls link's dealloc callback, but does not call 2960 * link's release callback. 2961 */ 2962 void bpf_link_cleanup(struct bpf_link_primer *primer) 2963 { 2964 primer->link->prog = NULL; 2965 bpf_link_free_id(primer->id); 2966 fput(primer->file); 2967 put_unused_fd(primer->fd); 2968 } 2969 2970 void bpf_link_inc(struct bpf_link *link) 2971 { 2972 atomic64_inc(&link->refcnt); 2973 } 2974 2975 /* bpf_link_free is guaranteed to be called from process context */ 2976 static void bpf_link_free(struct bpf_link *link) 2977 { 2978 bpf_link_free_id(link->id); 2979 if (link->prog) { 2980 /* detach BPF program, clean up used resources */ 2981 link->ops->release(link); 2982 bpf_prog_put(link->prog); 2983 } 2984 /* free bpf_link and its containing memory */ 2985 link->ops->dealloc(link); 2986 } 2987 2988 static void bpf_link_put_deferred(struct work_struct *work) 2989 { 2990 struct bpf_link *link = container_of(work, struct bpf_link, work); 2991 2992 bpf_link_free(link); 2993 } 2994 2995 /* bpf_link_put might be called from atomic context. It needs to be called 2996 * from sleepable context in order to acquire sleeping locks during the process. 2997 */ 2998 void bpf_link_put(struct bpf_link *link) 2999 { 3000 if (!atomic64_dec_and_test(&link->refcnt)) 3001 return; 3002 3003 INIT_WORK(&link->work, bpf_link_put_deferred); 3004 schedule_work(&link->work); 3005 } 3006 EXPORT_SYMBOL(bpf_link_put); 3007 3008 static void bpf_link_put_direct(struct bpf_link *link) 3009 { 3010 if (!atomic64_dec_and_test(&link->refcnt)) 3011 return; 3012 bpf_link_free(link); 3013 } 3014 3015 static int bpf_link_release(struct inode *inode, struct file *filp) 3016 { 3017 struct bpf_link *link = filp->private_data; 3018 3019 bpf_link_put_direct(link); 3020 return 0; 3021 } 3022 3023 #ifdef CONFIG_PROC_FS 3024 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 3025 #define BPF_MAP_TYPE(_id, _ops) 3026 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name, 3027 static const char *bpf_link_type_strs[] = { 3028 [BPF_LINK_TYPE_UNSPEC] = "<invalid>", 3029 #include <linux/bpf_types.h> 3030 }; 3031 #undef BPF_PROG_TYPE 3032 #undef BPF_MAP_TYPE 3033 #undef BPF_LINK_TYPE 3034 3035 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) 3036 { 3037 const struct bpf_link *link = filp->private_data; 3038 const struct bpf_prog *prog = link->prog; 3039 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 3040 3041 seq_printf(m, 3042 "link_type:\t%s\n" 3043 "link_id:\t%u\n", 3044 bpf_link_type_strs[link->type], 3045 link->id); 3046 if (prog) { 3047 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 3048 seq_printf(m, 3049 "prog_tag:\t%s\n" 3050 "prog_id:\t%u\n", 3051 prog_tag, 3052 prog->aux->id); 3053 } 3054 if (link->ops->show_fdinfo) 3055 link->ops->show_fdinfo(link, m); 3056 } 3057 #endif 3058 3059 static const struct file_operations bpf_link_fops = { 3060 #ifdef CONFIG_PROC_FS 3061 .show_fdinfo = bpf_link_show_fdinfo, 3062 #endif 3063 .release = bpf_link_release, 3064 .read = bpf_dummy_read, 3065 .write = bpf_dummy_write, 3066 }; 3067 3068 static int bpf_link_alloc_id(struct bpf_link *link) 3069 { 3070 int id; 3071 3072 idr_preload(GFP_KERNEL); 3073 spin_lock_bh(&link_idr_lock); 3074 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC); 3075 spin_unlock_bh(&link_idr_lock); 3076 idr_preload_end(); 3077 3078 return id; 3079 } 3080 3081 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file, 3082 * reserving unused FD and allocating ID from link_idr. This is to be paired 3083 * with bpf_link_settle() to install FD and ID and expose bpf_link to 3084 * user-space, if bpf_link is successfully attached. If not, bpf_link and 3085 * pre-allocated resources are to be freed with bpf_cleanup() call. All the 3086 * transient state is passed around in struct bpf_link_primer. 3087 * This is preferred way to create and initialize bpf_link, especially when 3088 * there are complicated and expensive operations in between creating bpf_link 3089 * itself and attaching it to BPF hook. By using bpf_link_prime() and 3090 * bpf_link_settle() kernel code using bpf_link doesn't have to perform 3091 * expensive (and potentially failing) roll back operations in a rare case 3092 * that file, FD, or ID can't be allocated. 3093 */ 3094 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) 3095 { 3096 struct file *file; 3097 int fd, id; 3098 3099 fd = get_unused_fd_flags(O_CLOEXEC); 3100 if (fd < 0) 3101 return fd; 3102 3103 3104 id = bpf_link_alloc_id(link); 3105 if (id < 0) { 3106 put_unused_fd(fd); 3107 return id; 3108 } 3109 3110 file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC); 3111 if (IS_ERR(file)) { 3112 bpf_link_free_id(id); 3113 put_unused_fd(fd); 3114 return PTR_ERR(file); 3115 } 3116 3117 primer->link = link; 3118 primer->file = file; 3119 primer->fd = fd; 3120 primer->id = id; 3121 return 0; 3122 } 3123 3124 int bpf_link_settle(struct bpf_link_primer *primer) 3125 { 3126 /* make bpf_link fetchable by ID */ 3127 spin_lock_bh(&link_idr_lock); 3128 primer->link->id = primer->id; 3129 spin_unlock_bh(&link_idr_lock); 3130 /* make bpf_link fetchable by FD */ 3131 fd_install(primer->fd, primer->file); 3132 /* pass through installed FD */ 3133 return primer->fd; 3134 } 3135 3136 int bpf_link_new_fd(struct bpf_link *link) 3137 { 3138 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC); 3139 } 3140 3141 struct bpf_link *bpf_link_get_from_fd(u32 ufd) 3142 { 3143 struct fd f = fdget(ufd); 3144 struct bpf_link *link; 3145 3146 if (!f.file) 3147 return ERR_PTR(-EBADF); 3148 if (f.file->f_op != &bpf_link_fops) { 3149 fdput(f); 3150 return ERR_PTR(-EINVAL); 3151 } 3152 3153 link = f.file->private_data; 3154 bpf_link_inc(link); 3155 fdput(f); 3156 3157 return link; 3158 } 3159 EXPORT_SYMBOL(bpf_link_get_from_fd); 3160 3161 static void bpf_tracing_link_release(struct bpf_link *link) 3162 { 3163 struct bpf_tracing_link *tr_link = 3164 container_of(link, struct bpf_tracing_link, link.link); 3165 3166 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, 3167 tr_link->trampoline)); 3168 3169 bpf_trampoline_put(tr_link->trampoline); 3170 3171 /* tgt_prog is NULL if target is a kernel function */ 3172 if (tr_link->tgt_prog) 3173 bpf_prog_put(tr_link->tgt_prog); 3174 } 3175 3176 static void bpf_tracing_link_dealloc(struct bpf_link *link) 3177 { 3178 struct bpf_tracing_link *tr_link = 3179 container_of(link, struct bpf_tracing_link, link.link); 3180 3181 kfree(tr_link); 3182 } 3183 3184 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, 3185 struct seq_file *seq) 3186 { 3187 struct bpf_tracing_link *tr_link = 3188 container_of(link, struct bpf_tracing_link, link.link); 3189 u32 target_btf_id, target_obj_id; 3190 3191 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3192 &target_obj_id, &target_btf_id); 3193 seq_printf(seq, 3194 "attach_type:\t%d\n" 3195 "target_obj_id:\t%u\n" 3196 "target_btf_id:\t%u\n", 3197 tr_link->attach_type, 3198 target_obj_id, 3199 target_btf_id); 3200 } 3201 3202 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, 3203 struct bpf_link_info *info) 3204 { 3205 struct bpf_tracing_link *tr_link = 3206 container_of(link, struct bpf_tracing_link, link.link); 3207 3208 info->tracing.attach_type = tr_link->attach_type; 3209 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3210 &info->tracing.target_obj_id, 3211 &info->tracing.target_btf_id); 3212 3213 return 0; 3214 } 3215 3216 static const struct bpf_link_ops bpf_tracing_link_lops = { 3217 .release = bpf_tracing_link_release, 3218 .dealloc = bpf_tracing_link_dealloc, 3219 .show_fdinfo = bpf_tracing_link_show_fdinfo, 3220 .fill_link_info = bpf_tracing_link_fill_link_info, 3221 }; 3222 3223 static int bpf_tracing_prog_attach(struct bpf_prog *prog, 3224 int tgt_prog_fd, 3225 u32 btf_id, 3226 u64 bpf_cookie) 3227 { 3228 struct bpf_link_primer link_primer; 3229 struct bpf_prog *tgt_prog = NULL; 3230 struct bpf_trampoline *tr = NULL; 3231 struct bpf_tracing_link *link; 3232 u64 key = 0; 3233 int err; 3234 3235 switch (prog->type) { 3236 case BPF_PROG_TYPE_TRACING: 3237 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 3238 prog->expected_attach_type != BPF_TRACE_FEXIT && 3239 prog->expected_attach_type != BPF_MODIFY_RETURN) { 3240 err = -EINVAL; 3241 goto out_put_prog; 3242 } 3243 break; 3244 case BPF_PROG_TYPE_EXT: 3245 if (prog->expected_attach_type != 0) { 3246 err = -EINVAL; 3247 goto out_put_prog; 3248 } 3249 break; 3250 case BPF_PROG_TYPE_LSM: 3251 if (prog->expected_attach_type != BPF_LSM_MAC) { 3252 err = -EINVAL; 3253 goto out_put_prog; 3254 } 3255 break; 3256 default: 3257 err = -EINVAL; 3258 goto out_put_prog; 3259 } 3260 3261 if (!!tgt_prog_fd != !!btf_id) { 3262 err = -EINVAL; 3263 goto out_put_prog; 3264 } 3265 3266 if (tgt_prog_fd) { 3267 /* 3268 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this 3269 * part would be changed to implement the same for 3270 * BPF_PROG_TYPE_TRACING, do not forget to update the way how 3271 * attach_tracing_prog flag is set. 3272 */ 3273 if (prog->type != BPF_PROG_TYPE_EXT) { 3274 err = -EINVAL; 3275 goto out_put_prog; 3276 } 3277 3278 tgt_prog = bpf_prog_get(tgt_prog_fd); 3279 if (IS_ERR(tgt_prog)) { 3280 err = PTR_ERR(tgt_prog); 3281 tgt_prog = NULL; 3282 goto out_put_prog; 3283 } 3284 3285 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id); 3286 } 3287 3288 link = kzalloc(sizeof(*link), GFP_USER); 3289 if (!link) { 3290 err = -ENOMEM; 3291 goto out_put_prog; 3292 } 3293 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, 3294 &bpf_tracing_link_lops, prog); 3295 link->attach_type = prog->expected_attach_type; 3296 link->link.cookie = bpf_cookie; 3297 3298 mutex_lock(&prog->aux->dst_mutex); 3299 3300 /* There are a few possible cases here: 3301 * 3302 * - if prog->aux->dst_trampoline is set, the program was just loaded 3303 * and not yet attached to anything, so we can use the values stored 3304 * in prog->aux 3305 * 3306 * - if prog->aux->dst_trampoline is NULL, the program has already been 3307 * attached to a target and its initial target was cleared (below) 3308 * 3309 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + 3310 * target_btf_id using the link_create API. 3311 * 3312 * - if tgt_prog == NULL when this function was called using the old 3313 * raw_tracepoint_open API, and we need a target from prog->aux 3314 * 3315 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program 3316 * was detached and is going for re-attachment. 3317 * 3318 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf 3319 * are NULL, then program was already attached and user did not provide 3320 * tgt_prog_fd so we have no way to find out or create trampoline 3321 */ 3322 if (!prog->aux->dst_trampoline && !tgt_prog) { 3323 /* 3324 * Allow re-attach for TRACING and LSM programs. If it's 3325 * currently linked, bpf_trampoline_link_prog will fail. 3326 * EXT programs need to specify tgt_prog_fd, so they 3327 * re-attach in separate code path. 3328 */ 3329 if (prog->type != BPF_PROG_TYPE_TRACING && 3330 prog->type != BPF_PROG_TYPE_LSM) { 3331 err = -EINVAL; 3332 goto out_unlock; 3333 } 3334 /* We can allow re-attach only if we have valid attach_btf. */ 3335 if (!prog->aux->attach_btf) { 3336 err = -EINVAL; 3337 goto out_unlock; 3338 } 3339 btf_id = prog->aux->attach_btf_id; 3340 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); 3341 } 3342 3343 if (!prog->aux->dst_trampoline || 3344 (key && key != prog->aux->dst_trampoline->key)) { 3345 /* If there is no saved target, or the specified target is 3346 * different from the destination specified at load time, we 3347 * need a new trampoline and a check for compatibility 3348 */ 3349 struct bpf_attach_target_info tgt_info = {}; 3350 3351 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id, 3352 &tgt_info); 3353 if (err) 3354 goto out_unlock; 3355 3356 if (tgt_info.tgt_mod) { 3357 module_put(prog->aux->mod); 3358 prog->aux->mod = tgt_info.tgt_mod; 3359 } 3360 3361 tr = bpf_trampoline_get(key, &tgt_info); 3362 if (!tr) { 3363 err = -ENOMEM; 3364 goto out_unlock; 3365 } 3366 } else { 3367 /* The caller didn't specify a target, or the target was the 3368 * same as the destination supplied during program load. This 3369 * means we can reuse the trampoline and reference from program 3370 * load time, and there is no need to allocate a new one. This 3371 * can only happen once for any program, as the saved values in 3372 * prog->aux are cleared below. 3373 */ 3374 tr = prog->aux->dst_trampoline; 3375 tgt_prog = prog->aux->dst_prog; 3376 } 3377 3378 err = bpf_link_prime(&link->link.link, &link_primer); 3379 if (err) 3380 goto out_unlock; 3381 3382 err = bpf_trampoline_link_prog(&link->link, tr); 3383 if (err) { 3384 bpf_link_cleanup(&link_primer); 3385 link = NULL; 3386 goto out_unlock; 3387 } 3388 3389 link->tgt_prog = tgt_prog; 3390 link->trampoline = tr; 3391 3392 /* Always clear the trampoline and target prog from prog->aux to make 3393 * sure the original attach destination is not kept alive after a 3394 * program is (re-)attached to another target. 3395 */ 3396 if (prog->aux->dst_prog && 3397 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) 3398 /* got extra prog ref from syscall, or attaching to different prog */ 3399 bpf_prog_put(prog->aux->dst_prog); 3400 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) 3401 /* we allocated a new trampoline, so free the old one */ 3402 bpf_trampoline_put(prog->aux->dst_trampoline); 3403 3404 prog->aux->dst_prog = NULL; 3405 prog->aux->dst_trampoline = NULL; 3406 mutex_unlock(&prog->aux->dst_mutex); 3407 3408 return bpf_link_settle(&link_primer); 3409 out_unlock: 3410 if (tr && tr != prog->aux->dst_trampoline) 3411 bpf_trampoline_put(tr); 3412 mutex_unlock(&prog->aux->dst_mutex); 3413 kfree(link); 3414 out_put_prog: 3415 if (tgt_prog_fd && tgt_prog) 3416 bpf_prog_put(tgt_prog); 3417 return err; 3418 } 3419 3420 struct bpf_raw_tp_link { 3421 struct bpf_link link; 3422 struct bpf_raw_event_map *btp; 3423 }; 3424 3425 static void bpf_raw_tp_link_release(struct bpf_link *link) 3426 { 3427 struct bpf_raw_tp_link *raw_tp = 3428 container_of(link, struct bpf_raw_tp_link, link); 3429 3430 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog); 3431 bpf_put_raw_tracepoint(raw_tp->btp); 3432 } 3433 3434 static void bpf_raw_tp_link_dealloc(struct bpf_link *link) 3435 { 3436 struct bpf_raw_tp_link *raw_tp = 3437 container_of(link, struct bpf_raw_tp_link, link); 3438 3439 kfree(raw_tp); 3440 } 3441 3442 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link, 3443 struct seq_file *seq) 3444 { 3445 struct bpf_raw_tp_link *raw_tp_link = 3446 container_of(link, struct bpf_raw_tp_link, link); 3447 3448 seq_printf(seq, 3449 "tp_name:\t%s\n", 3450 raw_tp_link->btp->tp->name); 3451 } 3452 3453 static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen, 3454 u32 len) 3455 { 3456 if (ulen >= len + 1) { 3457 if (copy_to_user(ubuf, buf, len + 1)) 3458 return -EFAULT; 3459 } else { 3460 char zero = '\0'; 3461 3462 if (copy_to_user(ubuf, buf, ulen - 1)) 3463 return -EFAULT; 3464 if (put_user(zero, ubuf + ulen - 1)) 3465 return -EFAULT; 3466 return -ENOSPC; 3467 } 3468 3469 return 0; 3470 } 3471 3472 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, 3473 struct bpf_link_info *info) 3474 { 3475 struct bpf_raw_tp_link *raw_tp_link = 3476 container_of(link, struct bpf_raw_tp_link, link); 3477 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); 3478 const char *tp_name = raw_tp_link->btp->tp->name; 3479 u32 ulen = info->raw_tracepoint.tp_name_len; 3480 size_t tp_len = strlen(tp_name); 3481 3482 if (!ulen ^ !ubuf) 3483 return -EINVAL; 3484 3485 info->raw_tracepoint.tp_name_len = tp_len + 1; 3486 3487 if (!ubuf) 3488 return 0; 3489 3490 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len); 3491 } 3492 3493 static const struct bpf_link_ops bpf_raw_tp_link_lops = { 3494 .release = bpf_raw_tp_link_release, 3495 .dealloc = bpf_raw_tp_link_dealloc, 3496 .show_fdinfo = bpf_raw_tp_link_show_fdinfo, 3497 .fill_link_info = bpf_raw_tp_link_fill_link_info, 3498 }; 3499 3500 #ifdef CONFIG_PERF_EVENTS 3501 struct bpf_perf_link { 3502 struct bpf_link link; 3503 struct file *perf_file; 3504 }; 3505 3506 static void bpf_perf_link_release(struct bpf_link *link) 3507 { 3508 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3509 struct perf_event *event = perf_link->perf_file->private_data; 3510 3511 perf_event_free_bpf_prog(event); 3512 fput(perf_link->perf_file); 3513 } 3514 3515 static void bpf_perf_link_dealloc(struct bpf_link *link) 3516 { 3517 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3518 3519 kfree(perf_link); 3520 } 3521 3522 static int bpf_perf_link_fill_common(const struct perf_event *event, 3523 char __user *uname, u32 ulen, 3524 u64 *probe_offset, u64 *probe_addr, 3525 u32 *fd_type, unsigned long *missed) 3526 { 3527 const char *buf; 3528 u32 prog_id; 3529 size_t len; 3530 int err; 3531 3532 if (!ulen ^ !uname) 3533 return -EINVAL; 3534 3535 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf, 3536 probe_offset, probe_addr, missed); 3537 if (err) 3538 return err; 3539 if (!uname) 3540 return 0; 3541 if (buf) { 3542 len = strlen(buf); 3543 err = bpf_copy_to_user(uname, buf, ulen, len); 3544 if (err) 3545 return err; 3546 } else { 3547 char zero = '\0'; 3548 3549 if (put_user(zero, uname)) 3550 return -EFAULT; 3551 } 3552 return 0; 3553 } 3554 3555 #ifdef CONFIG_KPROBE_EVENTS 3556 static int bpf_perf_link_fill_kprobe(const struct perf_event *event, 3557 struct bpf_link_info *info) 3558 { 3559 unsigned long missed; 3560 char __user *uname; 3561 u64 addr, offset; 3562 u32 ulen, type; 3563 int err; 3564 3565 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name); 3566 ulen = info->perf_event.kprobe.name_len; 3567 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr, 3568 &type, &missed); 3569 if (err) 3570 return err; 3571 if (type == BPF_FD_TYPE_KRETPROBE) 3572 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE; 3573 else 3574 info->perf_event.type = BPF_PERF_EVENT_KPROBE; 3575 3576 info->perf_event.kprobe.offset = offset; 3577 info->perf_event.kprobe.missed = missed; 3578 if (!kallsyms_show_value(current_cred())) 3579 addr = 0; 3580 info->perf_event.kprobe.addr = addr; 3581 info->perf_event.kprobe.cookie = event->bpf_cookie; 3582 return 0; 3583 } 3584 #endif 3585 3586 #ifdef CONFIG_UPROBE_EVENTS 3587 static int bpf_perf_link_fill_uprobe(const struct perf_event *event, 3588 struct bpf_link_info *info) 3589 { 3590 char __user *uname; 3591 u64 addr, offset; 3592 u32 ulen, type; 3593 int err; 3594 3595 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name); 3596 ulen = info->perf_event.uprobe.name_len; 3597 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr, 3598 &type, NULL); 3599 if (err) 3600 return err; 3601 3602 if (type == BPF_FD_TYPE_URETPROBE) 3603 info->perf_event.type = BPF_PERF_EVENT_URETPROBE; 3604 else 3605 info->perf_event.type = BPF_PERF_EVENT_UPROBE; 3606 info->perf_event.uprobe.offset = offset; 3607 info->perf_event.uprobe.cookie = event->bpf_cookie; 3608 return 0; 3609 } 3610 #endif 3611 3612 static int bpf_perf_link_fill_probe(const struct perf_event *event, 3613 struct bpf_link_info *info) 3614 { 3615 #ifdef CONFIG_KPROBE_EVENTS 3616 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE) 3617 return bpf_perf_link_fill_kprobe(event, info); 3618 #endif 3619 #ifdef CONFIG_UPROBE_EVENTS 3620 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE) 3621 return bpf_perf_link_fill_uprobe(event, info); 3622 #endif 3623 return -EOPNOTSUPP; 3624 } 3625 3626 static int bpf_perf_link_fill_tracepoint(const struct perf_event *event, 3627 struct bpf_link_info *info) 3628 { 3629 char __user *uname; 3630 u32 ulen; 3631 3632 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name); 3633 ulen = info->perf_event.tracepoint.name_len; 3634 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT; 3635 info->perf_event.tracepoint.cookie = event->bpf_cookie; 3636 return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL); 3637 } 3638 3639 static int bpf_perf_link_fill_perf_event(const struct perf_event *event, 3640 struct bpf_link_info *info) 3641 { 3642 info->perf_event.event.type = event->attr.type; 3643 info->perf_event.event.config = event->attr.config; 3644 info->perf_event.event.cookie = event->bpf_cookie; 3645 info->perf_event.type = BPF_PERF_EVENT_EVENT; 3646 return 0; 3647 } 3648 3649 static int bpf_perf_link_fill_link_info(const struct bpf_link *link, 3650 struct bpf_link_info *info) 3651 { 3652 struct bpf_perf_link *perf_link; 3653 const struct perf_event *event; 3654 3655 perf_link = container_of(link, struct bpf_perf_link, link); 3656 event = perf_get_event(perf_link->perf_file); 3657 if (IS_ERR(event)) 3658 return PTR_ERR(event); 3659 3660 switch (event->prog->type) { 3661 case BPF_PROG_TYPE_PERF_EVENT: 3662 return bpf_perf_link_fill_perf_event(event, info); 3663 case BPF_PROG_TYPE_TRACEPOINT: 3664 return bpf_perf_link_fill_tracepoint(event, info); 3665 case BPF_PROG_TYPE_KPROBE: 3666 return bpf_perf_link_fill_probe(event, info); 3667 default: 3668 return -EOPNOTSUPP; 3669 } 3670 } 3671 3672 static const struct bpf_link_ops bpf_perf_link_lops = { 3673 .release = bpf_perf_link_release, 3674 .dealloc = bpf_perf_link_dealloc, 3675 .fill_link_info = bpf_perf_link_fill_link_info, 3676 }; 3677 3678 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3679 { 3680 struct bpf_link_primer link_primer; 3681 struct bpf_perf_link *link; 3682 struct perf_event *event; 3683 struct file *perf_file; 3684 int err; 3685 3686 if (attr->link_create.flags) 3687 return -EINVAL; 3688 3689 perf_file = perf_event_get(attr->link_create.target_fd); 3690 if (IS_ERR(perf_file)) 3691 return PTR_ERR(perf_file); 3692 3693 link = kzalloc(sizeof(*link), GFP_USER); 3694 if (!link) { 3695 err = -ENOMEM; 3696 goto out_put_file; 3697 } 3698 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); 3699 link->perf_file = perf_file; 3700 3701 err = bpf_link_prime(&link->link, &link_primer); 3702 if (err) { 3703 kfree(link); 3704 goto out_put_file; 3705 } 3706 3707 event = perf_file->private_data; 3708 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); 3709 if (err) { 3710 bpf_link_cleanup(&link_primer); 3711 goto out_put_file; 3712 } 3713 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */ 3714 bpf_prog_inc(prog); 3715 3716 return bpf_link_settle(&link_primer); 3717 3718 out_put_file: 3719 fput(perf_file); 3720 return err; 3721 } 3722 #else 3723 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3724 { 3725 return -EOPNOTSUPP; 3726 } 3727 #endif /* CONFIG_PERF_EVENTS */ 3728 3729 static int bpf_raw_tp_link_attach(struct bpf_prog *prog, 3730 const char __user *user_tp_name) 3731 { 3732 struct bpf_link_primer link_primer; 3733 struct bpf_raw_tp_link *link; 3734 struct bpf_raw_event_map *btp; 3735 const char *tp_name; 3736 char buf[128]; 3737 int err; 3738 3739 switch (prog->type) { 3740 case BPF_PROG_TYPE_TRACING: 3741 case BPF_PROG_TYPE_EXT: 3742 case BPF_PROG_TYPE_LSM: 3743 if (user_tp_name) 3744 /* The attach point for this category of programs 3745 * should be specified via btf_id during program load. 3746 */ 3747 return -EINVAL; 3748 if (prog->type == BPF_PROG_TYPE_TRACING && 3749 prog->expected_attach_type == BPF_TRACE_RAW_TP) { 3750 tp_name = prog->aux->attach_func_name; 3751 break; 3752 } 3753 return bpf_tracing_prog_attach(prog, 0, 0, 0); 3754 case BPF_PROG_TYPE_RAW_TRACEPOINT: 3755 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 3756 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) 3757 return -EFAULT; 3758 buf[sizeof(buf) - 1] = 0; 3759 tp_name = buf; 3760 break; 3761 default: 3762 return -EINVAL; 3763 } 3764 3765 btp = bpf_get_raw_tracepoint(tp_name); 3766 if (!btp) 3767 return -ENOENT; 3768 3769 link = kzalloc(sizeof(*link), GFP_USER); 3770 if (!link) { 3771 err = -ENOMEM; 3772 goto out_put_btp; 3773 } 3774 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, 3775 &bpf_raw_tp_link_lops, prog); 3776 link->btp = btp; 3777 3778 err = bpf_link_prime(&link->link, &link_primer); 3779 if (err) { 3780 kfree(link); 3781 goto out_put_btp; 3782 } 3783 3784 err = bpf_probe_register(link->btp, prog); 3785 if (err) { 3786 bpf_link_cleanup(&link_primer); 3787 goto out_put_btp; 3788 } 3789 3790 return bpf_link_settle(&link_primer); 3791 3792 out_put_btp: 3793 bpf_put_raw_tracepoint(btp); 3794 return err; 3795 } 3796 3797 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 3798 3799 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 3800 { 3801 struct bpf_prog *prog; 3802 int fd; 3803 3804 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 3805 return -EINVAL; 3806 3807 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 3808 if (IS_ERR(prog)) 3809 return PTR_ERR(prog); 3810 3811 fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name)); 3812 if (fd < 0) 3813 bpf_prog_put(prog); 3814 return fd; 3815 } 3816 3817 static enum bpf_prog_type 3818 attach_type_to_prog_type(enum bpf_attach_type attach_type) 3819 { 3820 switch (attach_type) { 3821 case BPF_CGROUP_INET_INGRESS: 3822 case BPF_CGROUP_INET_EGRESS: 3823 return BPF_PROG_TYPE_CGROUP_SKB; 3824 case BPF_CGROUP_INET_SOCK_CREATE: 3825 case BPF_CGROUP_INET_SOCK_RELEASE: 3826 case BPF_CGROUP_INET4_POST_BIND: 3827 case BPF_CGROUP_INET6_POST_BIND: 3828 return BPF_PROG_TYPE_CGROUP_SOCK; 3829 case BPF_CGROUP_INET4_BIND: 3830 case BPF_CGROUP_INET6_BIND: 3831 case BPF_CGROUP_INET4_CONNECT: 3832 case BPF_CGROUP_INET6_CONNECT: 3833 case BPF_CGROUP_UNIX_CONNECT: 3834 case BPF_CGROUP_INET4_GETPEERNAME: 3835 case BPF_CGROUP_INET6_GETPEERNAME: 3836 case BPF_CGROUP_UNIX_GETPEERNAME: 3837 case BPF_CGROUP_INET4_GETSOCKNAME: 3838 case BPF_CGROUP_INET6_GETSOCKNAME: 3839 case BPF_CGROUP_UNIX_GETSOCKNAME: 3840 case BPF_CGROUP_UDP4_SENDMSG: 3841 case BPF_CGROUP_UDP6_SENDMSG: 3842 case BPF_CGROUP_UNIX_SENDMSG: 3843 case BPF_CGROUP_UDP4_RECVMSG: 3844 case BPF_CGROUP_UDP6_RECVMSG: 3845 case BPF_CGROUP_UNIX_RECVMSG: 3846 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 3847 case BPF_CGROUP_SOCK_OPS: 3848 return BPF_PROG_TYPE_SOCK_OPS; 3849 case BPF_CGROUP_DEVICE: 3850 return BPF_PROG_TYPE_CGROUP_DEVICE; 3851 case BPF_SK_MSG_VERDICT: 3852 return BPF_PROG_TYPE_SK_MSG; 3853 case BPF_SK_SKB_STREAM_PARSER: 3854 case BPF_SK_SKB_STREAM_VERDICT: 3855 case BPF_SK_SKB_VERDICT: 3856 return BPF_PROG_TYPE_SK_SKB; 3857 case BPF_LIRC_MODE2: 3858 return BPF_PROG_TYPE_LIRC_MODE2; 3859 case BPF_FLOW_DISSECTOR: 3860 return BPF_PROG_TYPE_FLOW_DISSECTOR; 3861 case BPF_CGROUP_SYSCTL: 3862 return BPF_PROG_TYPE_CGROUP_SYSCTL; 3863 case BPF_CGROUP_GETSOCKOPT: 3864 case BPF_CGROUP_SETSOCKOPT: 3865 return BPF_PROG_TYPE_CGROUP_SOCKOPT; 3866 case BPF_TRACE_ITER: 3867 case BPF_TRACE_RAW_TP: 3868 case BPF_TRACE_FENTRY: 3869 case BPF_TRACE_FEXIT: 3870 case BPF_MODIFY_RETURN: 3871 return BPF_PROG_TYPE_TRACING; 3872 case BPF_LSM_MAC: 3873 return BPF_PROG_TYPE_LSM; 3874 case BPF_SK_LOOKUP: 3875 return BPF_PROG_TYPE_SK_LOOKUP; 3876 case BPF_XDP: 3877 return BPF_PROG_TYPE_XDP; 3878 case BPF_LSM_CGROUP: 3879 return BPF_PROG_TYPE_LSM; 3880 case BPF_TCX_INGRESS: 3881 case BPF_TCX_EGRESS: 3882 case BPF_NETKIT_PRIMARY: 3883 case BPF_NETKIT_PEER: 3884 return BPF_PROG_TYPE_SCHED_CLS; 3885 default: 3886 return BPF_PROG_TYPE_UNSPEC; 3887 } 3888 } 3889 3890 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 3891 enum bpf_attach_type attach_type) 3892 { 3893 enum bpf_prog_type ptype; 3894 3895 switch (prog->type) { 3896 case BPF_PROG_TYPE_CGROUP_SOCK: 3897 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3898 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3899 case BPF_PROG_TYPE_SK_LOOKUP: 3900 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 3901 case BPF_PROG_TYPE_CGROUP_SKB: 3902 if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN)) 3903 /* cg-skb progs can be loaded by unpriv user. 3904 * check permissions at attach time. 3905 */ 3906 return -EPERM; 3907 return prog->enforce_expected_attach_type && 3908 prog->expected_attach_type != attach_type ? 3909 -EINVAL : 0; 3910 case BPF_PROG_TYPE_EXT: 3911 return 0; 3912 case BPF_PROG_TYPE_NETFILTER: 3913 if (attach_type != BPF_NETFILTER) 3914 return -EINVAL; 3915 return 0; 3916 case BPF_PROG_TYPE_PERF_EVENT: 3917 case BPF_PROG_TYPE_TRACEPOINT: 3918 if (attach_type != BPF_PERF_EVENT) 3919 return -EINVAL; 3920 return 0; 3921 case BPF_PROG_TYPE_KPROBE: 3922 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && 3923 attach_type != BPF_TRACE_KPROBE_MULTI) 3924 return -EINVAL; 3925 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && 3926 attach_type != BPF_TRACE_UPROBE_MULTI) 3927 return -EINVAL; 3928 if (attach_type != BPF_PERF_EVENT && 3929 attach_type != BPF_TRACE_KPROBE_MULTI && 3930 attach_type != BPF_TRACE_UPROBE_MULTI) 3931 return -EINVAL; 3932 return 0; 3933 case BPF_PROG_TYPE_SCHED_CLS: 3934 if (attach_type != BPF_TCX_INGRESS && 3935 attach_type != BPF_TCX_EGRESS && 3936 attach_type != BPF_NETKIT_PRIMARY && 3937 attach_type != BPF_NETKIT_PEER) 3938 return -EINVAL; 3939 return 0; 3940 default: 3941 ptype = attach_type_to_prog_type(attach_type); 3942 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) 3943 return -EINVAL; 3944 return 0; 3945 } 3946 } 3947 3948 #define BPF_PROG_ATTACH_LAST_FIELD expected_revision 3949 3950 #define BPF_F_ATTACH_MASK_BASE \ 3951 (BPF_F_ALLOW_OVERRIDE | \ 3952 BPF_F_ALLOW_MULTI | \ 3953 BPF_F_REPLACE) 3954 3955 #define BPF_F_ATTACH_MASK_MPROG \ 3956 (BPF_F_REPLACE | \ 3957 BPF_F_BEFORE | \ 3958 BPF_F_AFTER | \ 3959 BPF_F_ID | \ 3960 BPF_F_LINK) 3961 3962 static int bpf_prog_attach(const union bpf_attr *attr) 3963 { 3964 enum bpf_prog_type ptype; 3965 struct bpf_prog *prog; 3966 int ret; 3967 3968 if (CHECK_ATTR(BPF_PROG_ATTACH)) 3969 return -EINVAL; 3970 3971 ptype = attach_type_to_prog_type(attr->attach_type); 3972 if (ptype == BPF_PROG_TYPE_UNSPEC) 3973 return -EINVAL; 3974 if (bpf_mprog_supported(ptype)) { 3975 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 3976 return -EINVAL; 3977 } else { 3978 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE) 3979 return -EINVAL; 3980 if (attr->relative_fd || 3981 attr->expected_revision) 3982 return -EINVAL; 3983 } 3984 3985 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 3986 if (IS_ERR(prog)) 3987 return PTR_ERR(prog); 3988 3989 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 3990 bpf_prog_put(prog); 3991 return -EINVAL; 3992 } 3993 3994 switch (ptype) { 3995 case BPF_PROG_TYPE_SK_SKB: 3996 case BPF_PROG_TYPE_SK_MSG: 3997 ret = sock_map_get_from_fd(attr, prog); 3998 break; 3999 case BPF_PROG_TYPE_LIRC_MODE2: 4000 ret = lirc_prog_attach(attr, prog); 4001 break; 4002 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4003 ret = netns_bpf_prog_attach(attr, prog); 4004 break; 4005 case BPF_PROG_TYPE_CGROUP_DEVICE: 4006 case BPF_PROG_TYPE_CGROUP_SKB: 4007 case BPF_PROG_TYPE_CGROUP_SOCK: 4008 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4009 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4010 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4011 case BPF_PROG_TYPE_SOCK_OPS: 4012 case BPF_PROG_TYPE_LSM: 4013 if (ptype == BPF_PROG_TYPE_LSM && 4014 prog->expected_attach_type != BPF_LSM_CGROUP) 4015 ret = -EINVAL; 4016 else 4017 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 4018 break; 4019 case BPF_PROG_TYPE_SCHED_CLS: 4020 if (attr->attach_type == BPF_TCX_INGRESS || 4021 attr->attach_type == BPF_TCX_EGRESS) 4022 ret = tcx_prog_attach(attr, prog); 4023 else 4024 ret = netkit_prog_attach(attr, prog); 4025 break; 4026 default: 4027 ret = -EINVAL; 4028 } 4029 4030 if (ret) 4031 bpf_prog_put(prog); 4032 return ret; 4033 } 4034 4035 #define BPF_PROG_DETACH_LAST_FIELD expected_revision 4036 4037 static int bpf_prog_detach(const union bpf_attr *attr) 4038 { 4039 struct bpf_prog *prog = NULL; 4040 enum bpf_prog_type ptype; 4041 int ret; 4042 4043 if (CHECK_ATTR(BPF_PROG_DETACH)) 4044 return -EINVAL; 4045 4046 ptype = attach_type_to_prog_type(attr->attach_type); 4047 if (bpf_mprog_supported(ptype)) { 4048 if (ptype == BPF_PROG_TYPE_UNSPEC) 4049 return -EINVAL; 4050 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 4051 return -EINVAL; 4052 if (attr->attach_bpf_fd) { 4053 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 4054 if (IS_ERR(prog)) 4055 return PTR_ERR(prog); 4056 } 4057 } else if (attr->attach_flags || 4058 attr->relative_fd || 4059 attr->expected_revision) { 4060 return -EINVAL; 4061 } 4062 4063 switch (ptype) { 4064 case BPF_PROG_TYPE_SK_MSG: 4065 case BPF_PROG_TYPE_SK_SKB: 4066 ret = sock_map_prog_detach(attr, ptype); 4067 break; 4068 case BPF_PROG_TYPE_LIRC_MODE2: 4069 ret = lirc_prog_detach(attr); 4070 break; 4071 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4072 ret = netns_bpf_prog_detach(attr, ptype); 4073 break; 4074 case BPF_PROG_TYPE_CGROUP_DEVICE: 4075 case BPF_PROG_TYPE_CGROUP_SKB: 4076 case BPF_PROG_TYPE_CGROUP_SOCK: 4077 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4078 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4079 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4080 case BPF_PROG_TYPE_SOCK_OPS: 4081 case BPF_PROG_TYPE_LSM: 4082 ret = cgroup_bpf_prog_detach(attr, ptype); 4083 break; 4084 case BPF_PROG_TYPE_SCHED_CLS: 4085 if (attr->attach_type == BPF_TCX_INGRESS || 4086 attr->attach_type == BPF_TCX_EGRESS) 4087 ret = tcx_prog_detach(attr, prog); 4088 else 4089 ret = netkit_prog_detach(attr, prog); 4090 break; 4091 default: 4092 ret = -EINVAL; 4093 } 4094 4095 if (prog) 4096 bpf_prog_put(prog); 4097 return ret; 4098 } 4099 4100 #define BPF_PROG_QUERY_LAST_FIELD query.revision 4101 4102 static int bpf_prog_query(const union bpf_attr *attr, 4103 union bpf_attr __user *uattr) 4104 { 4105 if (!bpf_net_capable()) 4106 return -EPERM; 4107 if (CHECK_ATTR(BPF_PROG_QUERY)) 4108 return -EINVAL; 4109 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 4110 return -EINVAL; 4111 4112 switch (attr->query.attach_type) { 4113 case BPF_CGROUP_INET_INGRESS: 4114 case BPF_CGROUP_INET_EGRESS: 4115 case BPF_CGROUP_INET_SOCK_CREATE: 4116 case BPF_CGROUP_INET_SOCK_RELEASE: 4117 case BPF_CGROUP_INET4_BIND: 4118 case BPF_CGROUP_INET6_BIND: 4119 case BPF_CGROUP_INET4_POST_BIND: 4120 case BPF_CGROUP_INET6_POST_BIND: 4121 case BPF_CGROUP_INET4_CONNECT: 4122 case BPF_CGROUP_INET6_CONNECT: 4123 case BPF_CGROUP_UNIX_CONNECT: 4124 case BPF_CGROUP_INET4_GETPEERNAME: 4125 case BPF_CGROUP_INET6_GETPEERNAME: 4126 case BPF_CGROUP_UNIX_GETPEERNAME: 4127 case BPF_CGROUP_INET4_GETSOCKNAME: 4128 case BPF_CGROUP_INET6_GETSOCKNAME: 4129 case BPF_CGROUP_UNIX_GETSOCKNAME: 4130 case BPF_CGROUP_UDP4_SENDMSG: 4131 case BPF_CGROUP_UDP6_SENDMSG: 4132 case BPF_CGROUP_UNIX_SENDMSG: 4133 case BPF_CGROUP_UDP4_RECVMSG: 4134 case BPF_CGROUP_UDP6_RECVMSG: 4135 case BPF_CGROUP_UNIX_RECVMSG: 4136 case BPF_CGROUP_SOCK_OPS: 4137 case BPF_CGROUP_DEVICE: 4138 case BPF_CGROUP_SYSCTL: 4139 case BPF_CGROUP_GETSOCKOPT: 4140 case BPF_CGROUP_SETSOCKOPT: 4141 case BPF_LSM_CGROUP: 4142 return cgroup_bpf_prog_query(attr, uattr); 4143 case BPF_LIRC_MODE2: 4144 return lirc_prog_query(attr, uattr); 4145 case BPF_FLOW_DISSECTOR: 4146 case BPF_SK_LOOKUP: 4147 return netns_bpf_prog_query(attr, uattr); 4148 case BPF_SK_SKB_STREAM_PARSER: 4149 case BPF_SK_SKB_STREAM_VERDICT: 4150 case BPF_SK_MSG_VERDICT: 4151 case BPF_SK_SKB_VERDICT: 4152 return sock_map_bpf_prog_query(attr, uattr); 4153 case BPF_TCX_INGRESS: 4154 case BPF_TCX_EGRESS: 4155 return tcx_prog_query(attr, uattr); 4156 case BPF_NETKIT_PRIMARY: 4157 case BPF_NETKIT_PEER: 4158 return netkit_prog_query(attr, uattr); 4159 default: 4160 return -EINVAL; 4161 } 4162 } 4163 4164 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size 4165 4166 static int bpf_prog_test_run(const union bpf_attr *attr, 4167 union bpf_attr __user *uattr) 4168 { 4169 struct bpf_prog *prog; 4170 int ret = -ENOTSUPP; 4171 4172 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 4173 return -EINVAL; 4174 4175 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 4176 (!attr->test.ctx_size_in && attr->test.ctx_in)) 4177 return -EINVAL; 4178 4179 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 4180 (!attr->test.ctx_size_out && attr->test.ctx_out)) 4181 return -EINVAL; 4182 4183 prog = bpf_prog_get(attr->test.prog_fd); 4184 if (IS_ERR(prog)) 4185 return PTR_ERR(prog); 4186 4187 if (prog->aux->ops->test_run) 4188 ret = prog->aux->ops->test_run(prog, attr, uattr); 4189 4190 bpf_prog_put(prog); 4191 return ret; 4192 } 4193 4194 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 4195 4196 static int bpf_obj_get_next_id(const union bpf_attr *attr, 4197 union bpf_attr __user *uattr, 4198 struct idr *idr, 4199 spinlock_t *lock) 4200 { 4201 u32 next_id = attr->start_id; 4202 int err = 0; 4203 4204 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 4205 return -EINVAL; 4206 4207 if (!capable(CAP_SYS_ADMIN)) 4208 return -EPERM; 4209 4210 next_id++; 4211 spin_lock_bh(lock); 4212 if (!idr_get_next(idr, &next_id)) 4213 err = -ENOENT; 4214 spin_unlock_bh(lock); 4215 4216 if (!err) 4217 err = put_user(next_id, &uattr->next_id); 4218 4219 return err; 4220 } 4221 4222 struct bpf_map *bpf_map_get_curr_or_next(u32 *id) 4223 { 4224 struct bpf_map *map; 4225 4226 spin_lock_bh(&map_idr_lock); 4227 again: 4228 map = idr_get_next(&map_idr, id); 4229 if (map) { 4230 map = __bpf_map_inc_not_zero(map, false); 4231 if (IS_ERR(map)) { 4232 (*id)++; 4233 goto again; 4234 } 4235 } 4236 spin_unlock_bh(&map_idr_lock); 4237 4238 return map; 4239 } 4240 4241 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) 4242 { 4243 struct bpf_prog *prog; 4244 4245 spin_lock_bh(&prog_idr_lock); 4246 again: 4247 prog = idr_get_next(&prog_idr, id); 4248 if (prog) { 4249 prog = bpf_prog_inc_not_zero(prog); 4250 if (IS_ERR(prog)) { 4251 (*id)++; 4252 goto again; 4253 } 4254 } 4255 spin_unlock_bh(&prog_idr_lock); 4256 4257 return prog; 4258 } 4259 4260 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 4261 4262 struct bpf_prog *bpf_prog_by_id(u32 id) 4263 { 4264 struct bpf_prog *prog; 4265 4266 if (!id) 4267 return ERR_PTR(-ENOENT); 4268 4269 spin_lock_bh(&prog_idr_lock); 4270 prog = idr_find(&prog_idr, id); 4271 if (prog) 4272 prog = bpf_prog_inc_not_zero(prog); 4273 else 4274 prog = ERR_PTR(-ENOENT); 4275 spin_unlock_bh(&prog_idr_lock); 4276 return prog; 4277 } 4278 4279 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 4280 { 4281 struct bpf_prog *prog; 4282 u32 id = attr->prog_id; 4283 int fd; 4284 4285 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 4286 return -EINVAL; 4287 4288 if (!capable(CAP_SYS_ADMIN)) 4289 return -EPERM; 4290 4291 prog = bpf_prog_by_id(id); 4292 if (IS_ERR(prog)) 4293 return PTR_ERR(prog); 4294 4295 fd = bpf_prog_new_fd(prog); 4296 if (fd < 0) 4297 bpf_prog_put(prog); 4298 4299 return fd; 4300 } 4301 4302 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 4303 4304 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 4305 { 4306 struct bpf_map *map; 4307 u32 id = attr->map_id; 4308 int f_flags; 4309 int fd; 4310 4311 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 4312 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 4313 return -EINVAL; 4314 4315 if (!capable(CAP_SYS_ADMIN)) 4316 return -EPERM; 4317 4318 f_flags = bpf_get_file_flag(attr->open_flags); 4319 if (f_flags < 0) 4320 return f_flags; 4321 4322 spin_lock_bh(&map_idr_lock); 4323 map = idr_find(&map_idr, id); 4324 if (map) 4325 map = __bpf_map_inc_not_zero(map, true); 4326 else 4327 map = ERR_PTR(-ENOENT); 4328 spin_unlock_bh(&map_idr_lock); 4329 4330 if (IS_ERR(map)) 4331 return PTR_ERR(map); 4332 4333 fd = bpf_map_new_fd(map, f_flags); 4334 if (fd < 0) 4335 bpf_map_put_with_uref(map); 4336 4337 return fd; 4338 } 4339 4340 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 4341 unsigned long addr, u32 *off, 4342 u32 *type) 4343 { 4344 const struct bpf_map *map; 4345 int i; 4346 4347 mutex_lock(&prog->aux->used_maps_mutex); 4348 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 4349 map = prog->aux->used_maps[i]; 4350 if (map == (void *)addr) { 4351 *type = BPF_PSEUDO_MAP_FD; 4352 goto out; 4353 } 4354 if (!map->ops->map_direct_value_meta) 4355 continue; 4356 if (!map->ops->map_direct_value_meta(map, addr, off)) { 4357 *type = BPF_PSEUDO_MAP_VALUE; 4358 goto out; 4359 } 4360 } 4361 map = NULL; 4362 4363 out: 4364 mutex_unlock(&prog->aux->used_maps_mutex); 4365 return map; 4366 } 4367 4368 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, 4369 const struct cred *f_cred) 4370 { 4371 const struct bpf_map *map; 4372 struct bpf_insn *insns; 4373 u32 off, type; 4374 u64 imm; 4375 u8 code; 4376 int i; 4377 4378 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 4379 GFP_USER); 4380 if (!insns) 4381 return insns; 4382 4383 for (i = 0; i < prog->len; i++) { 4384 code = insns[i].code; 4385 4386 if (code == (BPF_JMP | BPF_TAIL_CALL)) { 4387 insns[i].code = BPF_JMP | BPF_CALL; 4388 insns[i].imm = BPF_FUNC_tail_call; 4389 /* fall-through */ 4390 } 4391 if (code == (BPF_JMP | BPF_CALL) || 4392 code == (BPF_JMP | BPF_CALL_ARGS)) { 4393 if (code == (BPF_JMP | BPF_CALL_ARGS)) 4394 insns[i].code = BPF_JMP | BPF_CALL; 4395 if (!bpf_dump_raw_ok(f_cred)) 4396 insns[i].imm = 0; 4397 continue; 4398 } 4399 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { 4400 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; 4401 continue; 4402 } 4403 4404 if (code != (BPF_LD | BPF_IMM | BPF_DW)) 4405 continue; 4406 4407 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 4408 map = bpf_map_from_imm(prog, imm, &off, &type); 4409 if (map) { 4410 insns[i].src_reg = type; 4411 insns[i].imm = map->id; 4412 insns[i + 1].imm = off; 4413 continue; 4414 } 4415 } 4416 4417 return insns; 4418 } 4419 4420 static int set_info_rec_size(struct bpf_prog_info *info) 4421 { 4422 /* 4423 * Ensure info.*_rec_size is the same as kernel expected size 4424 * 4425 * or 4426 * 4427 * Only allow zero *_rec_size if both _rec_size and _cnt are 4428 * zero. In this case, the kernel will set the expected 4429 * _rec_size back to the info. 4430 */ 4431 4432 if ((info->nr_func_info || info->func_info_rec_size) && 4433 info->func_info_rec_size != sizeof(struct bpf_func_info)) 4434 return -EINVAL; 4435 4436 if ((info->nr_line_info || info->line_info_rec_size) && 4437 info->line_info_rec_size != sizeof(struct bpf_line_info)) 4438 return -EINVAL; 4439 4440 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 4441 info->jited_line_info_rec_size != sizeof(__u64)) 4442 return -EINVAL; 4443 4444 info->func_info_rec_size = sizeof(struct bpf_func_info); 4445 info->line_info_rec_size = sizeof(struct bpf_line_info); 4446 info->jited_line_info_rec_size = sizeof(__u64); 4447 4448 return 0; 4449 } 4450 4451 static int bpf_prog_get_info_by_fd(struct file *file, 4452 struct bpf_prog *prog, 4453 const union bpf_attr *attr, 4454 union bpf_attr __user *uattr) 4455 { 4456 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4457 struct btf *attach_btf = bpf_prog_get_target_btf(prog); 4458 struct bpf_prog_info info; 4459 u32 info_len = attr->info.info_len; 4460 struct bpf_prog_kstats stats; 4461 char __user *uinsns; 4462 u32 ulen; 4463 int err; 4464 4465 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4466 if (err) 4467 return err; 4468 info_len = min_t(u32, sizeof(info), info_len); 4469 4470 memset(&info, 0, sizeof(info)); 4471 if (copy_from_user(&info, uinfo, info_len)) 4472 return -EFAULT; 4473 4474 info.type = prog->type; 4475 info.id = prog->aux->id; 4476 info.load_time = prog->aux->load_time; 4477 info.created_by_uid = from_kuid_munged(current_user_ns(), 4478 prog->aux->user->uid); 4479 info.gpl_compatible = prog->gpl_compatible; 4480 4481 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 4482 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 4483 4484 mutex_lock(&prog->aux->used_maps_mutex); 4485 ulen = info.nr_map_ids; 4486 info.nr_map_ids = prog->aux->used_map_cnt; 4487 ulen = min_t(u32, info.nr_map_ids, ulen); 4488 if (ulen) { 4489 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 4490 u32 i; 4491 4492 for (i = 0; i < ulen; i++) 4493 if (put_user(prog->aux->used_maps[i]->id, 4494 &user_map_ids[i])) { 4495 mutex_unlock(&prog->aux->used_maps_mutex); 4496 return -EFAULT; 4497 } 4498 } 4499 mutex_unlock(&prog->aux->used_maps_mutex); 4500 4501 err = set_info_rec_size(&info); 4502 if (err) 4503 return err; 4504 4505 bpf_prog_get_stats(prog, &stats); 4506 info.run_time_ns = stats.nsecs; 4507 info.run_cnt = stats.cnt; 4508 info.recursion_misses = stats.misses; 4509 4510 info.verified_insns = prog->aux->verified_insns; 4511 4512 if (!bpf_capable()) { 4513 info.jited_prog_len = 0; 4514 info.xlated_prog_len = 0; 4515 info.nr_jited_ksyms = 0; 4516 info.nr_jited_func_lens = 0; 4517 info.nr_func_info = 0; 4518 info.nr_line_info = 0; 4519 info.nr_jited_line_info = 0; 4520 goto done; 4521 } 4522 4523 ulen = info.xlated_prog_len; 4524 info.xlated_prog_len = bpf_prog_insn_size(prog); 4525 if (info.xlated_prog_len && ulen) { 4526 struct bpf_insn *insns_sanitized; 4527 bool fault; 4528 4529 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { 4530 info.xlated_prog_insns = 0; 4531 goto done; 4532 } 4533 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); 4534 if (!insns_sanitized) 4535 return -ENOMEM; 4536 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 4537 ulen = min_t(u32, info.xlated_prog_len, ulen); 4538 fault = copy_to_user(uinsns, insns_sanitized, ulen); 4539 kfree(insns_sanitized); 4540 if (fault) 4541 return -EFAULT; 4542 } 4543 4544 if (bpf_prog_is_offloaded(prog->aux)) { 4545 err = bpf_prog_offload_info_fill(&info, prog); 4546 if (err) 4547 return err; 4548 goto done; 4549 } 4550 4551 /* NOTE: the following code is supposed to be skipped for offload. 4552 * bpf_prog_offload_info_fill() is the place to fill similar fields 4553 * for offload. 4554 */ 4555 ulen = info.jited_prog_len; 4556 if (prog->aux->func_cnt) { 4557 u32 i; 4558 4559 info.jited_prog_len = 0; 4560 for (i = 0; i < prog->aux->func_cnt; i++) 4561 info.jited_prog_len += prog->aux->func[i]->jited_len; 4562 } else { 4563 info.jited_prog_len = prog->jited_len; 4564 } 4565 4566 if (info.jited_prog_len && ulen) { 4567 if (bpf_dump_raw_ok(file->f_cred)) { 4568 uinsns = u64_to_user_ptr(info.jited_prog_insns); 4569 ulen = min_t(u32, info.jited_prog_len, ulen); 4570 4571 /* for multi-function programs, copy the JITed 4572 * instructions for all the functions 4573 */ 4574 if (prog->aux->func_cnt) { 4575 u32 len, free, i; 4576 u8 *img; 4577 4578 free = ulen; 4579 for (i = 0; i < prog->aux->func_cnt; i++) { 4580 len = prog->aux->func[i]->jited_len; 4581 len = min_t(u32, len, free); 4582 img = (u8 *) prog->aux->func[i]->bpf_func; 4583 if (copy_to_user(uinsns, img, len)) 4584 return -EFAULT; 4585 uinsns += len; 4586 free -= len; 4587 if (!free) 4588 break; 4589 } 4590 } else { 4591 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 4592 return -EFAULT; 4593 } 4594 } else { 4595 info.jited_prog_insns = 0; 4596 } 4597 } 4598 4599 ulen = info.nr_jited_ksyms; 4600 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 4601 if (ulen) { 4602 if (bpf_dump_raw_ok(file->f_cred)) { 4603 unsigned long ksym_addr; 4604 u64 __user *user_ksyms; 4605 u32 i; 4606 4607 /* copy the address of the kernel symbol 4608 * corresponding to each function 4609 */ 4610 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 4611 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 4612 if (prog->aux->func_cnt) { 4613 for (i = 0; i < ulen; i++) { 4614 ksym_addr = (unsigned long) 4615 prog->aux->func[i]->bpf_func; 4616 if (put_user((u64) ksym_addr, 4617 &user_ksyms[i])) 4618 return -EFAULT; 4619 } 4620 } else { 4621 ksym_addr = (unsigned long) prog->bpf_func; 4622 if (put_user((u64) ksym_addr, &user_ksyms[0])) 4623 return -EFAULT; 4624 } 4625 } else { 4626 info.jited_ksyms = 0; 4627 } 4628 } 4629 4630 ulen = info.nr_jited_func_lens; 4631 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 4632 if (ulen) { 4633 if (bpf_dump_raw_ok(file->f_cred)) { 4634 u32 __user *user_lens; 4635 u32 func_len, i; 4636 4637 /* copy the JITed image lengths for each function */ 4638 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 4639 user_lens = u64_to_user_ptr(info.jited_func_lens); 4640 if (prog->aux->func_cnt) { 4641 for (i = 0; i < ulen; i++) { 4642 func_len = 4643 prog->aux->func[i]->jited_len; 4644 if (put_user(func_len, &user_lens[i])) 4645 return -EFAULT; 4646 } 4647 } else { 4648 func_len = prog->jited_len; 4649 if (put_user(func_len, &user_lens[0])) 4650 return -EFAULT; 4651 } 4652 } else { 4653 info.jited_func_lens = 0; 4654 } 4655 } 4656 4657 if (prog->aux->btf) 4658 info.btf_id = btf_obj_id(prog->aux->btf); 4659 info.attach_btf_id = prog->aux->attach_btf_id; 4660 if (attach_btf) 4661 info.attach_btf_obj_id = btf_obj_id(attach_btf); 4662 4663 ulen = info.nr_func_info; 4664 info.nr_func_info = prog->aux->func_info_cnt; 4665 if (info.nr_func_info && ulen) { 4666 char __user *user_finfo; 4667 4668 user_finfo = u64_to_user_ptr(info.func_info); 4669 ulen = min_t(u32, info.nr_func_info, ulen); 4670 if (copy_to_user(user_finfo, prog->aux->func_info, 4671 info.func_info_rec_size * ulen)) 4672 return -EFAULT; 4673 } 4674 4675 ulen = info.nr_line_info; 4676 info.nr_line_info = prog->aux->nr_linfo; 4677 if (info.nr_line_info && ulen) { 4678 __u8 __user *user_linfo; 4679 4680 user_linfo = u64_to_user_ptr(info.line_info); 4681 ulen = min_t(u32, info.nr_line_info, ulen); 4682 if (copy_to_user(user_linfo, prog->aux->linfo, 4683 info.line_info_rec_size * ulen)) 4684 return -EFAULT; 4685 } 4686 4687 ulen = info.nr_jited_line_info; 4688 if (prog->aux->jited_linfo) 4689 info.nr_jited_line_info = prog->aux->nr_linfo; 4690 else 4691 info.nr_jited_line_info = 0; 4692 if (info.nr_jited_line_info && ulen) { 4693 if (bpf_dump_raw_ok(file->f_cred)) { 4694 unsigned long line_addr; 4695 __u64 __user *user_linfo; 4696 u32 i; 4697 4698 user_linfo = u64_to_user_ptr(info.jited_line_info); 4699 ulen = min_t(u32, info.nr_jited_line_info, ulen); 4700 for (i = 0; i < ulen; i++) { 4701 line_addr = (unsigned long)prog->aux->jited_linfo[i]; 4702 if (put_user((__u64)line_addr, &user_linfo[i])) 4703 return -EFAULT; 4704 } 4705 } else { 4706 info.jited_line_info = 0; 4707 } 4708 } 4709 4710 ulen = info.nr_prog_tags; 4711 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 4712 if (ulen) { 4713 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 4714 u32 i; 4715 4716 user_prog_tags = u64_to_user_ptr(info.prog_tags); 4717 ulen = min_t(u32, info.nr_prog_tags, ulen); 4718 if (prog->aux->func_cnt) { 4719 for (i = 0; i < ulen; i++) { 4720 if (copy_to_user(user_prog_tags[i], 4721 prog->aux->func[i]->tag, 4722 BPF_TAG_SIZE)) 4723 return -EFAULT; 4724 } 4725 } else { 4726 if (copy_to_user(user_prog_tags[0], 4727 prog->tag, BPF_TAG_SIZE)) 4728 return -EFAULT; 4729 } 4730 } 4731 4732 done: 4733 if (copy_to_user(uinfo, &info, info_len) || 4734 put_user(info_len, &uattr->info.info_len)) 4735 return -EFAULT; 4736 4737 return 0; 4738 } 4739 4740 static int bpf_map_get_info_by_fd(struct file *file, 4741 struct bpf_map *map, 4742 const union bpf_attr *attr, 4743 union bpf_attr __user *uattr) 4744 { 4745 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4746 struct bpf_map_info info; 4747 u32 info_len = attr->info.info_len; 4748 int err; 4749 4750 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4751 if (err) 4752 return err; 4753 info_len = min_t(u32, sizeof(info), info_len); 4754 4755 memset(&info, 0, sizeof(info)); 4756 info.type = map->map_type; 4757 info.id = map->id; 4758 info.key_size = map->key_size; 4759 info.value_size = map->value_size; 4760 info.max_entries = map->max_entries; 4761 info.map_flags = map->map_flags; 4762 info.map_extra = map->map_extra; 4763 memcpy(info.name, map->name, sizeof(map->name)); 4764 4765 if (map->btf) { 4766 info.btf_id = btf_obj_id(map->btf); 4767 info.btf_key_type_id = map->btf_key_type_id; 4768 info.btf_value_type_id = map->btf_value_type_id; 4769 } 4770 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 4771 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) 4772 bpf_map_struct_ops_info_fill(&info, map); 4773 4774 if (bpf_map_is_offloaded(map)) { 4775 err = bpf_map_offload_info_fill(&info, map); 4776 if (err) 4777 return err; 4778 } 4779 4780 if (copy_to_user(uinfo, &info, info_len) || 4781 put_user(info_len, &uattr->info.info_len)) 4782 return -EFAULT; 4783 4784 return 0; 4785 } 4786 4787 static int bpf_btf_get_info_by_fd(struct file *file, 4788 struct btf *btf, 4789 const union bpf_attr *attr, 4790 union bpf_attr __user *uattr) 4791 { 4792 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4793 u32 info_len = attr->info.info_len; 4794 int err; 4795 4796 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len); 4797 if (err) 4798 return err; 4799 4800 return btf_get_info_by_fd(btf, attr, uattr); 4801 } 4802 4803 static int bpf_link_get_info_by_fd(struct file *file, 4804 struct bpf_link *link, 4805 const union bpf_attr *attr, 4806 union bpf_attr __user *uattr) 4807 { 4808 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4809 struct bpf_link_info info; 4810 u32 info_len = attr->info.info_len; 4811 int err; 4812 4813 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4814 if (err) 4815 return err; 4816 info_len = min_t(u32, sizeof(info), info_len); 4817 4818 memset(&info, 0, sizeof(info)); 4819 if (copy_from_user(&info, uinfo, info_len)) 4820 return -EFAULT; 4821 4822 info.type = link->type; 4823 info.id = link->id; 4824 if (link->prog) 4825 info.prog_id = link->prog->aux->id; 4826 4827 if (link->ops->fill_link_info) { 4828 err = link->ops->fill_link_info(link, &info); 4829 if (err) 4830 return err; 4831 } 4832 4833 if (copy_to_user(uinfo, &info, info_len) || 4834 put_user(info_len, &uattr->info.info_len)) 4835 return -EFAULT; 4836 4837 return 0; 4838 } 4839 4840 4841 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 4842 4843 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 4844 union bpf_attr __user *uattr) 4845 { 4846 int ufd = attr->info.bpf_fd; 4847 struct fd f; 4848 int err; 4849 4850 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 4851 return -EINVAL; 4852 4853 f = fdget(ufd); 4854 if (!f.file) 4855 return -EBADFD; 4856 4857 if (f.file->f_op == &bpf_prog_fops) 4858 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, 4859 uattr); 4860 else if (f.file->f_op == &bpf_map_fops) 4861 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, 4862 uattr); 4863 else if (f.file->f_op == &btf_fops) 4864 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); 4865 else if (f.file->f_op == &bpf_link_fops) 4866 err = bpf_link_get_info_by_fd(f.file, f.file->private_data, 4867 attr, uattr); 4868 else 4869 err = -EINVAL; 4870 4871 fdput(f); 4872 return err; 4873 } 4874 4875 #define BPF_BTF_LOAD_LAST_FIELD btf_token_fd 4876 4877 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) 4878 { 4879 struct bpf_token *token = NULL; 4880 4881 if (CHECK_ATTR(BPF_BTF_LOAD)) 4882 return -EINVAL; 4883 4884 if (attr->btf_flags & ~BPF_F_TOKEN_FD) 4885 return -EINVAL; 4886 4887 if (attr->btf_flags & BPF_F_TOKEN_FD) { 4888 token = bpf_token_get_from_fd(attr->btf_token_fd); 4889 if (IS_ERR(token)) 4890 return PTR_ERR(token); 4891 if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) { 4892 bpf_token_put(token); 4893 token = NULL; 4894 } 4895 } 4896 4897 if (!bpf_token_capable(token, CAP_BPF)) { 4898 bpf_token_put(token); 4899 return -EPERM; 4900 } 4901 4902 bpf_token_put(token); 4903 4904 return btf_new_fd(attr, uattr, uattr_size); 4905 } 4906 4907 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 4908 4909 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 4910 { 4911 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 4912 return -EINVAL; 4913 4914 if (!capable(CAP_SYS_ADMIN)) 4915 return -EPERM; 4916 4917 return btf_get_fd_by_id(attr->btf_id); 4918 } 4919 4920 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 4921 union bpf_attr __user *uattr, 4922 u32 prog_id, u32 fd_type, 4923 const char *buf, u64 probe_offset, 4924 u64 probe_addr) 4925 { 4926 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 4927 u32 len = buf ? strlen(buf) : 0, input_len; 4928 int err = 0; 4929 4930 if (put_user(len, &uattr->task_fd_query.buf_len)) 4931 return -EFAULT; 4932 input_len = attr->task_fd_query.buf_len; 4933 if (input_len && ubuf) { 4934 if (!len) { 4935 /* nothing to copy, just make ubuf NULL terminated */ 4936 char zero = '\0'; 4937 4938 if (put_user(zero, ubuf)) 4939 return -EFAULT; 4940 } else if (input_len >= len + 1) { 4941 /* ubuf can hold the string with NULL terminator */ 4942 if (copy_to_user(ubuf, buf, len + 1)) 4943 return -EFAULT; 4944 } else { 4945 /* ubuf cannot hold the string with NULL terminator, 4946 * do a partial copy with NULL terminator. 4947 */ 4948 char zero = '\0'; 4949 4950 err = -ENOSPC; 4951 if (copy_to_user(ubuf, buf, input_len - 1)) 4952 return -EFAULT; 4953 if (put_user(zero, ubuf + input_len - 1)) 4954 return -EFAULT; 4955 } 4956 } 4957 4958 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 4959 put_user(fd_type, &uattr->task_fd_query.fd_type) || 4960 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 4961 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 4962 return -EFAULT; 4963 4964 return err; 4965 } 4966 4967 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 4968 4969 static int bpf_task_fd_query(const union bpf_attr *attr, 4970 union bpf_attr __user *uattr) 4971 { 4972 pid_t pid = attr->task_fd_query.pid; 4973 u32 fd = attr->task_fd_query.fd; 4974 const struct perf_event *event; 4975 struct task_struct *task; 4976 struct file *file; 4977 int err; 4978 4979 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 4980 return -EINVAL; 4981 4982 if (!capable(CAP_SYS_ADMIN)) 4983 return -EPERM; 4984 4985 if (attr->task_fd_query.flags != 0) 4986 return -EINVAL; 4987 4988 rcu_read_lock(); 4989 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 4990 rcu_read_unlock(); 4991 if (!task) 4992 return -ENOENT; 4993 4994 err = 0; 4995 file = fget_task(task, fd); 4996 put_task_struct(task); 4997 if (!file) 4998 return -EBADF; 4999 5000 if (file->f_op == &bpf_link_fops) { 5001 struct bpf_link *link = file->private_data; 5002 5003 if (link->ops == &bpf_raw_tp_link_lops) { 5004 struct bpf_raw_tp_link *raw_tp = 5005 container_of(link, struct bpf_raw_tp_link, link); 5006 struct bpf_raw_event_map *btp = raw_tp->btp; 5007 5008 err = bpf_task_fd_query_copy(attr, uattr, 5009 raw_tp->link.prog->aux->id, 5010 BPF_FD_TYPE_RAW_TRACEPOINT, 5011 btp->tp->name, 0, 0); 5012 goto put_file; 5013 } 5014 goto out_not_supp; 5015 } 5016 5017 event = perf_get_event(file); 5018 if (!IS_ERR(event)) { 5019 u64 probe_offset, probe_addr; 5020 u32 prog_id, fd_type; 5021 const char *buf; 5022 5023 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 5024 &buf, &probe_offset, 5025 &probe_addr, NULL); 5026 if (!err) 5027 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 5028 fd_type, buf, 5029 probe_offset, 5030 probe_addr); 5031 goto put_file; 5032 } 5033 5034 out_not_supp: 5035 err = -ENOTSUPP; 5036 put_file: 5037 fput(file); 5038 return err; 5039 } 5040 5041 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 5042 5043 #define BPF_DO_BATCH(fn, ...) \ 5044 do { \ 5045 if (!fn) { \ 5046 err = -ENOTSUPP; \ 5047 goto err_put; \ 5048 } \ 5049 err = fn(__VA_ARGS__); \ 5050 } while (0) 5051 5052 static int bpf_map_do_batch(const union bpf_attr *attr, 5053 union bpf_attr __user *uattr, 5054 int cmd) 5055 { 5056 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH || 5057 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH; 5058 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH; 5059 struct bpf_map *map; 5060 int err, ufd; 5061 struct fd f; 5062 5063 if (CHECK_ATTR(BPF_MAP_BATCH)) 5064 return -EINVAL; 5065 5066 ufd = attr->batch.map_fd; 5067 f = fdget(ufd); 5068 map = __bpf_map_get(f); 5069 if (IS_ERR(map)) 5070 return PTR_ERR(map); 5071 if (has_write) 5072 bpf_map_write_active_inc(map); 5073 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 5074 err = -EPERM; 5075 goto err_put; 5076 } 5077 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 5078 err = -EPERM; 5079 goto err_put; 5080 } 5081 5082 if (cmd == BPF_MAP_LOOKUP_BATCH) 5083 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr); 5084 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 5085 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr); 5086 else if (cmd == BPF_MAP_UPDATE_BATCH) 5087 BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr); 5088 else 5089 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr); 5090 err_put: 5091 if (has_write) { 5092 maybe_wait_bpf_programs(map); 5093 bpf_map_write_active_dec(map); 5094 } 5095 fdput(f); 5096 return err; 5097 } 5098 5099 #define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid 5100 static int link_create(union bpf_attr *attr, bpfptr_t uattr) 5101 { 5102 struct bpf_prog *prog; 5103 int ret; 5104 5105 if (CHECK_ATTR(BPF_LINK_CREATE)) 5106 return -EINVAL; 5107 5108 if (attr->link_create.attach_type == BPF_STRUCT_OPS) 5109 return bpf_struct_ops_link_create(attr); 5110 5111 prog = bpf_prog_get(attr->link_create.prog_fd); 5112 if (IS_ERR(prog)) 5113 return PTR_ERR(prog); 5114 5115 ret = bpf_prog_attach_check_attach_type(prog, 5116 attr->link_create.attach_type); 5117 if (ret) 5118 goto out; 5119 5120 switch (prog->type) { 5121 case BPF_PROG_TYPE_CGROUP_SKB: 5122 case BPF_PROG_TYPE_CGROUP_SOCK: 5123 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 5124 case BPF_PROG_TYPE_SOCK_OPS: 5125 case BPF_PROG_TYPE_CGROUP_DEVICE: 5126 case BPF_PROG_TYPE_CGROUP_SYSCTL: 5127 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 5128 ret = cgroup_bpf_link_attach(attr, prog); 5129 break; 5130 case BPF_PROG_TYPE_EXT: 5131 ret = bpf_tracing_prog_attach(prog, 5132 attr->link_create.target_fd, 5133 attr->link_create.target_btf_id, 5134 attr->link_create.tracing.cookie); 5135 break; 5136 case BPF_PROG_TYPE_LSM: 5137 case BPF_PROG_TYPE_TRACING: 5138 if (attr->link_create.attach_type != prog->expected_attach_type) { 5139 ret = -EINVAL; 5140 goto out; 5141 } 5142 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) 5143 ret = bpf_raw_tp_link_attach(prog, NULL); 5144 else if (prog->expected_attach_type == BPF_TRACE_ITER) 5145 ret = bpf_iter_link_attach(attr, uattr, prog); 5146 else if (prog->expected_attach_type == BPF_LSM_CGROUP) 5147 ret = cgroup_bpf_link_attach(attr, prog); 5148 else 5149 ret = bpf_tracing_prog_attach(prog, 5150 attr->link_create.target_fd, 5151 attr->link_create.target_btf_id, 5152 attr->link_create.tracing.cookie); 5153 break; 5154 case BPF_PROG_TYPE_FLOW_DISSECTOR: 5155 case BPF_PROG_TYPE_SK_LOOKUP: 5156 ret = netns_bpf_link_create(attr, prog); 5157 break; 5158 #ifdef CONFIG_NET 5159 case BPF_PROG_TYPE_XDP: 5160 ret = bpf_xdp_link_attach(attr, prog); 5161 break; 5162 case BPF_PROG_TYPE_SCHED_CLS: 5163 if (attr->link_create.attach_type == BPF_TCX_INGRESS || 5164 attr->link_create.attach_type == BPF_TCX_EGRESS) 5165 ret = tcx_link_attach(attr, prog); 5166 else 5167 ret = netkit_link_attach(attr, prog); 5168 break; 5169 case BPF_PROG_TYPE_NETFILTER: 5170 ret = bpf_nf_link_attach(attr, prog); 5171 break; 5172 #endif 5173 case BPF_PROG_TYPE_PERF_EVENT: 5174 case BPF_PROG_TYPE_TRACEPOINT: 5175 ret = bpf_perf_link_attach(attr, prog); 5176 break; 5177 case BPF_PROG_TYPE_KPROBE: 5178 if (attr->link_create.attach_type == BPF_PERF_EVENT) 5179 ret = bpf_perf_link_attach(attr, prog); 5180 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI) 5181 ret = bpf_kprobe_multi_link_attach(attr, prog); 5182 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI) 5183 ret = bpf_uprobe_multi_link_attach(attr, prog); 5184 break; 5185 default: 5186 ret = -EINVAL; 5187 } 5188 5189 out: 5190 if (ret < 0) 5191 bpf_prog_put(prog); 5192 return ret; 5193 } 5194 5195 static int link_update_map(struct bpf_link *link, union bpf_attr *attr) 5196 { 5197 struct bpf_map *new_map, *old_map = NULL; 5198 int ret; 5199 5200 new_map = bpf_map_get(attr->link_update.new_map_fd); 5201 if (IS_ERR(new_map)) 5202 return PTR_ERR(new_map); 5203 5204 if (attr->link_update.flags & BPF_F_REPLACE) { 5205 old_map = bpf_map_get(attr->link_update.old_map_fd); 5206 if (IS_ERR(old_map)) { 5207 ret = PTR_ERR(old_map); 5208 goto out_put; 5209 } 5210 } else if (attr->link_update.old_map_fd) { 5211 ret = -EINVAL; 5212 goto out_put; 5213 } 5214 5215 ret = link->ops->update_map(link, new_map, old_map); 5216 5217 if (old_map) 5218 bpf_map_put(old_map); 5219 out_put: 5220 bpf_map_put(new_map); 5221 return ret; 5222 } 5223 5224 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd 5225 5226 static int link_update(union bpf_attr *attr) 5227 { 5228 struct bpf_prog *old_prog = NULL, *new_prog; 5229 struct bpf_link *link; 5230 u32 flags; 5231 int ret; 5232 5233 if (CHECK_ATTR(BPF_LINK_UPDATE)) 5234 return -EINVAL; 5235 5236 flags = attr->link_update.flags; 5237 if (flags & ~BPF_F_REPLACE) 5238 return -EINVAL; 5239 5240 link = bpf_link_get_from_fd(attr->link_update.link_fd); 5241 if (IS_ERR(link)) 5242 return PTR_ERR(link); 5243 5244 if (link->ops->update_map) { 5245 ret = link_update_map(link, attr); 5246 goto out_put_link; 5247 } 5248 5249 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); 5250 if (IS_ERR(new_prog)) { 5251 ret = PTR_ERR(new_prog); 5252 goto out_put_link; 5253 } 5254 5255 if (flags & BPF_F_REPLACE) { 5256 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); 5257 if (IS_ERR(old_prog)) { 5258 ret = PTR_ERR(old_prog); 5259 old_prog = NULL; 5260 goto out_put_progs; 5261 } 5262 } else if (attr->link_update.old_prog_fd) { 5263 ret = -EINVAL; 5264 goto out_put_progs; 5265 } 5266 5267 if (link->ops->update_prog) 5268 ret = link->ops->update_prog(link, new_prog, old_prog); 5269 else 5270 ret = -EINVAL; 5271 5272 out_put_progs: 5273 if (old_prog) 5274 bpf_prog_put(old_prog); 5275 if (ret) 5276 bpf_prog_put(new_prog); 5277 out_put_link: 5278 bpf_link_put_direct(link); 5279 return ret; 5280 } 5281 5282 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd 5283 5284 static int link_detach(union bpf_attr *attr) 5285 { 5286 struct bpf_link *link; 5287 int ret; 5288 5289 if (CHECK_ATTR(BPF_LINK_DETACH)) 5290 return -EINVAL; 5291 5292 link = bpf_link_get_from_fd(attr->link_detach.link_fd); 5293 if (IS_ERR(link)) 5294 return PTR_ERR(link); 5295 5296 if (link->ops->detach) 5297 ret = link->ops->detach(link); 5298 else 5299 ret = -EOPNOTSUPP; 5300 5301 bpf_link_put_direct(link); 5302 return ret; 5303 } 5304 5305 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 5306 { 5307 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); 5308 } 5309 5310 struct bpf_link *bpf_link_by_id(u32 id) 5311 { 5312 struct bpf_link *link; 5313 5314 if (!id) 5315 return ERR_PTR(-ENOENT); 5316 5317 spin_lock_bh(&link_idr_lock); 5318 /* before link is "settled", ID is 0, pretend it doesn't exist yet */ 5319 link = idr_find(&link_idr, id); 5320 if (link) { 5321 if (link->id) 5322 link = bpf_link_inc_not_zero(link); 5323 else 5324 link = ERR_PTR(-EAGAIN); 5325 } else { 5326 link = ERR_PTR(-ENOENT); 5327 } 5328 spin_unlock_bh(&link_idr_lock); 5329 return link; 5330 } 5331 5332 struct bpf_link *bpf_link_get_curr_or_next(u32 *id) 5333 { 5334 struct bpf_link *link; 5335 5336 spin_lock_bh(&link_idr_lock); 5337 again: 5338 link = idr_get_next(&link_idr, id); 5339 if (link) { 5340 link = bpf_link_inc_not_zero(link); 5341 if (IS_ERR(link)) { 5342 (*id)++; 5343 goto again; 5344 } 5345 } 5346 spin_unlock_bh(&link_idr_lock); 5347 5348 return link; 5349 } 5350 5351 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id 5352 5353 static int bpf_link_get_fd_by_id(const union bpf_attr *attr) 5354 { 5355 struct bpf_link *link; 5356 u32 id = attr->link_id; 5357 int fd; 5358 5359 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) 5360 return -EINVAL; 5361 5362 if (!capable(CAP_SYS_ADMIN)) 5363 return -EPERM; 5364 5365 link = bpf_link_by_id(id); 5366 if (IS_ERR(link)) 5367 return PTR_ERR(link); 5368 5369 fd = bpf_link_new_fd(link); 5370 if (fd < 0) 5371 bpf_link_put_direct(link); 5372 5373 return fd; 5374 } 5375 5376 DEFINE_MUTEX(bpf_stats_enabled_mutex); 5377 5378 static int bpf_stats_release(struct inode *inode, struct file *file) 5379 { 5380 mutex_lock(&bpf_stats_enabled_mutex); 5381 static_key_slow_dec(&bpf_stats_enabled_key.key); 5382 mutex_unlock(&bpf_stats_enabled_mutex); 5383 return 0; 5384 } 5385 5386 static const struct file_operations bpf_stats_fops = { 5387 .release = bpf_stats_release, 5388 }; 5389 5390 static int bpf_enable_runtime_stats(void) 5391 { 5392 int fd; 5393 5394 mutex_lock(&bpf_stats_enabled_mutex); 5395 5396 /* Set a very high limit to avoid overflow */ 5397 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { 5398 mutex_unlock(&bpf_stats_enabled_mutex); 5399 return -EBUSY; 5400 } 5401 5402 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); 5403 if (fd >= 0) 5404 static_key_slow_inc(&bpf_stats_enabled_key.key); 5405 5406 mutex_unlock(&bpf_stats_enabled_mutex); 5407 return fd; 5408 } 5409 5410 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type 5411 5412 static int bpf_enable_stats(union bpf_attr *attr) 5413 { 5414 5415 if (CHECK_ATTR(BPF_ENABLE_STATS)) 5416 return -EINVAL; 5417 5418 if (!capable(CAP_SYS_ADMIN)) 5419 return -EPERM; 5420 5421 switch (attr->enable_stats.type) { 5422 case BPF_STATS_RUN_TIME: 5423 return bpf_enable_runtime_stats(); 5424 default: 5425 break; 5426 } 5427 return -EINVAL; 5428 } 5429 5430 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags 5431 5432 static int bpf_iter_create(union bpf_attr *attr) 5433 { 5434 struct bpf_link *link; 5435 int err; 5436 5437 if (CHECK_ATTR(BPF_ITER_CREATE)) 5438 return -EINVAL; 5439 5440 if (attr->iter_create.flags) 5441 return -EINVAL; 5442 5443 link = bpf_link_get_from_fd(attr->iter_create.link_fd); 5444 if (IS_ERR(link)) 5445 return PTR_ERR(link); 5446 5447 err = bpf_iter_new_fd(link); 5448 bpf_link_put_direct(link); 5449 5450 return err; 5451 } 5452 5453 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags 5454 5455 static int bpf_prog_bind_map(union bpf_attr *attr) 5456 { 5457 struct bpf_prog *prog; 5458 struct bpf_map *map; 5459 struct bpf_map **used_maps_old, **used_maps_new; 5460 int i, ret = 0; 5461 5462 if (CHECK_ATTR(BPF_PROG_BIND_MAP)) 5463 return -EINVAL; 5464 5465 if (attr->prog_bind_map.flags) 5466 return -EINVAL; 5467 5468 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); 5469 if (IS_ERR(prog)) 5470 return PTR_ERR(prog); 5471 5472 map = bpf_map_get(attr->prog_bind_map.map_fd); 5473 if (IS_ERR(map)) { 5474 ret = PTR_ERR(map); 5475 goto out_prog_put; 5476 } 5477 5478 mutex_lock(&prog->aux->used_maps_mutex); 5479 5480 used_maps_old = prog->aux->used_maps; 5481 5482 for (i = 0; i < prog->aux->used_map_cnt; i++) 5483 if (used_maps_old[i] == map) { 5484 bpf_map_put(map); 5485 goto out_unlock; 5486 } 5487 5488 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, 5489 sizeof(used_maps_new[0]), 5490 GFP_KERNEL); 5491 if (!used_maps_new) { 5492 ret = -ENOMEM; 5493 goto out_unlock; 5494 } 5495 5496 /* The bpf program will not access the bpf map, but for the sake of 5497 * simplicity, increase sleepable_refcnt for sleepable program as well. 5498 */ 5499 if (prog->aux->sleepable) 5500 atomic64_inc(&map->sleepable_refcnt); 5501 memcpy(used_maps_new, used_maps_old, 5502 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); 5503 used_maps_new[prog->aux->used_map_cnt] = map; 5504 5505 prog->aux->used_map_cnt++; 5506 prog->aux->used_maps = used_maps_new; 5507 5508 kfree(used_maps_old); 5509 5510 out_unlock: 5511 mutex_unlock(&prog->aux->used_maps_mutex); 5512 5513 if (ret) 5514 bpf_map_put(map); 5515 out_prog_put: 5516 bpf_prog_put(prog); 5517 return ret; 5518 } 5519 5520 #define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd 5521 5522 static int token_create(union bpf_attr *attr) 5523 { 5524 if (CHECK_ATTR(BPF_TOKEN_CREATE)) 5525 return -EINVAL; 5526 5527 /* no flags are supported yet */ 5528 if (attr->token_create.flags) 5529 return -EINVAL; 5530 5531 return bpf_token_create(attr); 5532 } 5533 5534 static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size) 5535 { 5536 union bpf_attr attr; 5537 int err; 5538 5539 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 5540 if (err) 5541 return err; 5542 size = min_t(u32, size, sizeof(attr)); 5543 5544 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 5545 memset(&attr, 0, sizeof(attr)); 5546 if (copy_from_bpfptr(&attr, uattr, size) != 0) 5547 return -EFAULT; 5548 5549 err = security_bpf(cmd, &attr, size); 5550 if (err < 0) 5551 return err; 5552 5553 switch (cmd) { 5554 case BPF_MAP_CREATE: 5555 err = map_create(&attr); 5556 break; 5557 case BPF_MAP_LOOKUP_ELEM: 5558 err = map_lookup_elem(&attr); 5559 break; 5560 case BPF_MAP_UPDATE_ELEM: 5561 err = map_update_elem(&attr, uattr); 5562 break; 5563 case BPF_MAP_DELETE_ELEM: 5564 err = map_delete_elem(&attr, uattr); 5565 break; 5566 case BPF_MAP_GET_NEXT_KEY: 5567 err = map_get_next_key(&attr); 5568 break; 5569 case BPF_MAP_FREEZE: 5570 err = map_freeze(&attr); 5571 break; 5572 case BPF_PROG_LOAD: 5573 err = bpf_prog_load(&attr, uattr, size); 5574 break; 5575 case BPF_OBJ_PIN: 5576 err = bpf_obj_pin(&attr); 5577 break; 5578 case BPF_OBJ_GET: 5579 err = bpf_obj_get(&attr); 5580 break; 5581 case BPF_PROG_ATTACH: 5582 err = bpf_prog_attach(&attr); 5583 break; 5584 case BPF_PROG_DETACH: 5585 err = bpf_prog_detach(&attr); 5586 break; 5587 case BPF_PROG_QUERY: 5588 err = bpf_prog_query(&attr, uattr.user); 5589 break; 5590 case BPF_PROG_TEST_RUN: 5591 err = bpf_prog_test_run(&attr, uattr.user); 5592 break; 5593 case BPF_PROG_GET_NEXT_ID: 5594 err = bpf_obj_get_next_id(&attr, uattr.user, 5595 &prog_idr, &prog_idr_lock); 5596 break; 5597 case BPF_MAP_GET_NEXT_ID: 5598 err = bpf_obj_get_next_id(&attr, uattr.user, 5599 &map_idr, &map_idr_lock); 5600 break; 5601 case BPF_BTF_GET_NEXT_ID: 5602 err = bpf_obj_get_next_id(&attr, uattr.user, 5603 &btf_idr, &btf_idr_lock); 5604 break; 5605 case BPF_PROG_GET_FD_BY_ID: 5606 err = bpf_prog_get_fd_by_id(&attr); 5607 break; 5608 case BPF_MAP_GET_FD_BY_ID: 5609 err = bpf_map_get_fd_by_id(&attr); 5610 break; 5611 case BPF_OBJ_GET_INFO_BY_FD: 5612 err = bpf_obj_get_info_by_fd(&attr, uattr.user); 5613 break; 5614 case BPF_RAW_TRACEPOINT_OPEN: 5615 err = bpf_raw_tracepoint_open(&attr); 5616 break; 5617 case BPF_BTF_LOAD: 5618 err = bpf_btf_load(&attr, uattr, size); 5619 break; 5620 case BPF_BTF_GET_FD_BY_ID: 5621 err = bpf_btf_get_fd_by_id(&attr); 5622 break; 5623 case BPF_TASK_FD_QUERY: 5624 err = bpf_task_fd_query(&attr, uattr.user); 5625 break; 5626 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 5627 err = map_lookup_and_delete_elem(&attr); 5628 break; 5629 case BPF_MAP_LOOKUP_BATCH: 5630 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH); 5631 break; 5632 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 5633 err = bpf_map_do_batch(&attr, uattr.user, 5634 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 5635 break; 5636 case BPF_MAP_UPDATE_BATCH: 5637 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH); 5638 break; 5639 case BPF_MAP_DELETE_BATCH: 5640 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH); 5641 break; 5642 case BPF_LINK_CREATE: 5643 err = link_create(&attr, uattr); 5644 break; 5645 case BPF_LINK_UPDATE: 5646 err = link_update(&attr); 5647 break; 5648 case BPF_LINK_GET_FD_BY_ID: 5649 err = bpf_link_get_fd_by_id(&attr); 5650 break; 5651 case BPF_LINK_GET_NEXT_ID: 5652 err = bpf_obj_get_next_id(&attr, uattr.user, 5653 &link_idr, &link_idr_lock); 5654 break; 5655 case BPF_ENABLE_STATS: 5656 err = bpf_enable_stats(&attr); 5657 break; 5658 case BPF_ITER_CREATE: 5659 err = bpf_iter_create(&attr); 5660 break; 5661 case BPF_LINK_DETACH: 5662 err = link_detach(&attr); 5663 break; 5664 case BPF_PROG_BIND_MAP: 5665 err = bpf_prog_bind_map(&attr); 5666 break; 5667 case BPF_TOKEN_CREATE: 5668 err = token_create(&attr); 5669 break; 5670 default: 5671 err = -EINVAL; 5672 break; 5673 } 5674 5675 return err; 5676 } 5677 5678 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 5679 { 5680 return __sys_bpf(cmd, USER_BPFPTR(uattr), size); 5681 } 5682 5683 static bool syscall_prog_is_valid_access(int off, int size, 5684 enum bpf_access_type type, 5685 const struct bpf_prog *prog, 5686 struct bpf_insn_access_aux *info) 5687 { 5688 if (off < 0 || off >= U16_MAX) 5689 return false; 5690 if (off % size != 0) 5691 return false; 5692 return true; 5693 } 5694 5695 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size) 5696 { 5697 switch (cmd) { 5698 case BPF_MAP_CREATE: 5699 case BPF_MAP_DELETE_ELEM: 5700 case BPF_MAP_UPDATE_ELEM: 5701 case BPF_MAP_FREEZE: 5702 case BPF_MAP_GET_FD_BY_ID: 5703 case BPF_PROG_LOAD: 5704 case BPF_BTF_LOAD: 5705 case BPF_LINK_CREATE: 5706 case BPF_RAW_TRACEPOINT_OPEN: 5707 break; 5708 default: 5709 return -EINVAL; 5710 } 5711 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size); 5712 } 5713 5714 5715 /* To shut up -Wmissing-prototypes. 5716 * This function is used by the kernel light skeleton 5717 * to load bpf programs when modules are loaded or during kernel boot. 5718 * See tools/lib/bpf/skel_internal.h 5719 */ 5720 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); 5721 5722 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size) 5723 { 5724 struct bpf_prog * __maybe_unused prog; 5725 struct bpf_tramp_run_ctx __maybe_unused run_ctx; 5726 5727 switch (cmd) { 5728 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */ 5729 case BPF_PROG_TEST_RUN: 5730 if (attr->test.data_in || attr->test.data_out || 5731 attr->test.ctx_out || attr->test.duration || 5732 attr->test.repeat || attr->test.flags) 5733 return -EINVAL; 5734 5735 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); 5736 if (IS_ERR(prog)) 5737 return PTR_ERR(prog); 5738 5739 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || 5740 attr->test.ctx_size_in > U16_MAX) { 5741 bpf_prog_put(prog); 5742 return -EINVAL; 5743 } 5744 5745 run_ctx.bpf_cookie = 0; 5746 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) { 5747 /* recursion detected */ 5748 __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx); 5749 bpf_prog_put(prog); 5750 return -EBUSY; 5751 } 5752 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); 5753 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */, 5754 &run_ctx); 5755 bpf_prog_put(prog); 5756 return 0; 5757 #endif 5758 default: 5759 return ____bpf_sys_bpf(cmd, attr, size); 5760 } 5761 } 5762 EXPORT_SYMBOL(kern_sys_bpf); 5763 5764 static const struct bpf_func_proto bpf_sys_bpf_proto = { 5765 .func = bpf_sys_bpf, 5766 .gpl_only = false, 5767 .ret_type = RET_INTEGER, 5768 .arg1_type = ARG_ANYTHING, 5769 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5770 .arg3_type = ARG_CONST_SIZE, 5771 }; 5772 5773 const struct bpf_func_proto * __weak 5774 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5775 { 5776 return bpf_base_func_proto(func_id, prog); 5777 } 5778 5779 BPF_CALL_1(bpf_sys_close, u32, fd) 5780 { 5781 /* When bpf program calls this helper there should not be 5782 * an fdget() without matching completed fdput(). 5783 * This helper is allowed in the following callchain only: 5784 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close 5785 */ 5786 return close_fd(fd); 5787 } 5788 5789 static const struct bpf_func_proto bpf_sys_close_proto = { 5790 .func = bpf_sys_close, 5791 .gpl_only = false, 5792 .ret_type = RET_INTEGER, 5793 .arg1_type = ARG_ANYTHING, 5794 }; 5795 5796 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) 5797 { 5798 if (flags) 5799 return -EINVAL; 5800 5801 if (name_sz <= 1 || name[name_sz - 1]) 5802 return -EINVAL; 5803 5804 if (!bpf_dump_raw_ok(current_cred())) 5805 return -EPERM; 5806 5807 *res = kallsyms_lookup_name(name); 5808 return *res ? 0 : -ENOENT; 5809 } 5810 5811 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { 5812 .func = bpf_kallsyms_lookup_name, 5813 .gpl_only = false, 5814 .ret_type = RET_INTEGER, 5815 .arg1_type = ARG_PTR_TO_MEM, 5816 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 5817 .arg3_type = ARG_ANYTHING, 5818 .arg4_type = ARG_PTR_TO_LONG, 5819 }; 5820 5821 static const struct bpf_func_proto * 5822 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5823 { 5824 switch (func_id) { 5825 case BPF_FUNC_sys_bpf: 5826 return !bpf_token_capable(prog->aux->token, CAP_PERFMON) 5827 ? NULL : &bpf_sys_bpf_proto; 5828 case BPF_FUNC_btf_find_by_name_kind: 5829 return &bpf_btf_find_by_name_kind_proto; 5830 case BPF_FUNC_sys_close: 5831 return &bpf_sys_close_proto; 5832 case BPF_FUNC_kallsyms_lookup_name: 5833 return &bpf_kallsyms_lookup_name_proto; 5834 default: 5835 return tracing_prog_func_proto(func_id, prog); 5836 } 5837 } 5838 5839 const struct bpf_verifier_ops bpf_syscall_verifier_ops = { 5840 .get_func_proto = syscall_prog_func_proto, 5841 .is_valid_access = syscall_prog_is_valid_access, 5842 }; 5843 5844 const struct bpf_prog_ops bpf_syscall_prog_ops = { 5845 .test_run = bpf_prog_test_run_syscall, 5846 }; 5847 5848 #ifdef CONFIG_SYSCTL 5849 static int bpf_stats_handler(struct ctl_table *table, int write, 5850 void *buffer, size_t *lenp, loff_t *ppos) 5851 { 5852 struct static_key *key = (struct static_key *)table->data; 5853 static int saved_val; 5854 int val, ret; 5855 struct ctl_table tmp = { 5856 .data = &val, 5857 .maxlen = sizeof(val), 5858 .mode = table->mode, 5859 .extra1 = SYSCTL_ZERO, 5860 .extra2 = SYSCTL_ONE, 5861 }; 5862 5863 if (write && !capable(CAP_SYS_ADMIN)) 5864 return -EPERM; 5865 5866 mutex_lock(&bpf_stats_enabled_mutex); 5867 val = saved_val; 5868 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 5869 if (write && !ret && val != saved_val) { 5870 if (val) 5871 static_key_slow_inc(key); 5872 else 5873 static_key_slow_dec(key); 5874 saved_val = val; 5875 } 5876 mutex_unlock(&bpf_stats_enabled_mutex); 5877 return ret; 5878 } 5879 5880 void __weak unpriv_ebpf_notify(int new_state) 5881 { 5882 } 5883 5884 static int bpf_unpriv_handler(struct ctl_table *table, int write, 5885 void *buffer, size_t *lenp, loff_t *ppos) 5886 { 5887 int ret, unpriv_enable = *(int *)table->data; 5888 bool locked_state = unpriv_enable == 1; 5889 struct ctl_table tmp = *table; 5890 5891 if (write && !capable(CAP_SYS_ADMIN)) 5892 return -EPERM; 5893 5894 tmp.data = &unpriv_enable; 5895 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 5896 if (write && !ret) { 5897 if (locked_state && unpriv_enable != 1) 5898 return -EPERM; 5899 *(int *)table->data = unpriv_enable; 5900 } 5901 5902 if (write) 5903 unpriv_ebpf_notify(unpriv_enable); 5904 5905 return ret; 5906 } 5907 5908 static struct ctl_table bpf_syscall_table[] = { 5909 { 5910 .procname = "unprivileged_bpf_disabled", 5911 .data = &sysctl_unprivileged_bpf_disabled, 5912 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), 5913 .mode = 0644, 5914 .proc_handler = bpf_unpriv_handler, 5915 .extra1 = SYSCTL_ZERO, 5916 .extra2 = SYSCTL_TWO, 5917 }, 5918 { 5919 .procname = "bpf_stats_enabled", 5920 .data = &bpf_stats_enabled_key.key, 5921 .mode = 0644, 5922 .proc_handler = bpf_stats_handler, 5923 }, 5924 { } 5925 }; 5926 5927 static int __init bpf_syscall_sysctl_init(void) 5928 { 5929 register_sysctl_init("kernel", bpf_syscall_table); 5930 return 0; 5931 } 5932 late_initcall(bpf_syscall_sysctl_init); 5933 #endif /* CONFIG_SYSCTL */ 5934