1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf_trace.h> 6 #include <linux/bpf_lirc.h> 7 #include <linux/btf.h> 8 #include <linux/syscalls.h> 9 #include <linux/slab.h> 10 #include <linux/sched/signal.h> 11 #include <linux/vmalloc.h> 12 #include <linux/mmzone.h> 13 #include <linux/anon_inodes.h> 14 #include <linux/fdtable.h> 15 #include <linux/file.h> 16 #include <linux/fs.h> 17 #include <linux/license.h> 18 #include <linux/filter.h> 19 #include <linux/version.h> 20 #include <linux/kernel.h> 21 #include <linux/idr.h> 22 #include <linux/cred.h> 23 #include <linux/timekeeping.h> 24 #include <linux/ctype.h> 25 #include <linux/nospec.h> 26 27 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ 28 (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 29 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 30 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 31 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 32 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map)) 33 34 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 35 36 DEFINE_PER_CPU(int, bpf_prog_active); 37 static DEFINE_IDR(prog_idr); 38 static DEFINE_SPINLOCK(prog_idr_lock); 39 static DEFINE_IDR(map_idr); 40 static DEFINE_SPINLOCK(map_idr_lock); 41 42 int sysctl_unprivileged_bpf_disabled __read_mostly; 43 44 static const struct bpf_map_ops * const bpf_map_types[] = { 45 #define BPF_PROG_TYPE(_id, _ops) 46 #define BPF_MAP_TYPE(_id, _ops) \ 47 [_id] = &_ops, 48 #include <linux/bpf_types.h> 49 #undef BPF_PROG_TYPE 50 #undef BPF_MAP_TYPE 51 }; 52 53 /* 54 * If we're handed a bigger struct than we know of, ensure all the unknown bits 55 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 56 * we don't know about yet. 57 * 58 * There is a ToCToU between this function call and the following 59 * copy_from_user() call. However, this is not a concern since this function is 60 * meant to be a future-proofing of bits. 61 */ 62 int bpf_check_uarg_tail_zero(void __user *uaddr, 63 size_t expected_size, 64 size_t actual_size) 65 { 66 unsigned char __user *addr; 67 unsigned char __user *end; 68 unsigned char val; 69 int err; 70 71 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 72 return -E2BIG; 73 74 if (unlikely(!access_ok(uaddr, actual_size))) 75 return -EFAULT; 76 77 if (actual_size <= expected_size) 78 return 0; 79 80 addr = uaddr + expected_size; 81 end = uaddr + actual_size; 82 83 for (; addr < end; addr++) { 84 err = get_user(val, addr); 85 if (err) 86 return err; 87 if (val) 88 return -E2BIG; 89 } 90 91 return 0; 92 } 93 94 const struct bpf_map_ops bpf_map_offload_ops = { 95 .map_alloc = bpf_map_offload_map_alloc, 96 .map_free = bpf_map_offload_map_free, 97 .map_check_btf = map_check_no_btf, 98 }; 99 100 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) 101 { 102 const struct bpf_map_ops *ops; 103 u32 type = attr->map_type; 104 struct bpf_map *map; 105 int err; 106 107 if (type >= ARRAY_SIZE(bpf_map_types)) 108 return ERR_PTR(-EINVAL); 109 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types)); 110 ops = bpf_map_types[type]; 111 if (!ops) 112 return ERR_PTR(-EINVAL); 113 114 if (ops->map_alloc_check) { 115 err = ops->map_alloc_check(attr); 116 if (err) 117 return ERR_PTR(err); 118 } 119 if (attr->map_ifindex) 120 ops = &bpf_map_offload_ops; 121 map = ops->map_alloc(attr); 122 if (IS_ERR(map)) 123 return map; 124 map->ops = ops; 125 map->map_type = type; 126 return map; 127 } 128 129 void *bpf_map_area_alloc(size_t size, int numa_node) 130 { 131 /* We really just want to fail instead of triggering OOM killer 132 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 133 * which is used for lower order allocation requests. 134 * 135 * It has been observed that higher order allocation requests done by 136 * vmalloc with __GFP_NORETRY being set might fail due to not trying 137 * to reclaim memory from the page cache, thus we set 138 * __GFP_RETRY_MAYFAIL to avoid such situations. 139 */ 140 141 const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; 142 void *area; 143 144 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 145 area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, 146 numa_node); 147 if (area != NULL) 148 return area; 149 } 150 151 return __vmalloc_node_flags_caller(size, numa_node, 152 GFP_KERNEL | __GFP_RETRY_MAYFAIL | 153 flags, __builtin_return_address(0)); 154 } 155 156 void bpf_map_area_free(void *area) 157 { 158 kvfree(area); 159 } 160 161 static u32 bpf_map_flags_retain_permanent(u32 flags) 162 { 163 /* Some map creation flags are not tied to the map object but 164 * rather to the map fd instead, so they have no meaning upon 165 * map object inspection since multiple file descriptors with 166 * different (access) properties can exist here. Thus, given 167 * this has zero meaning for the map itself, lets clear these 168 * from here. 169 */ 170 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 171 } 172 173 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 174 { 175 map->map_type = attr->map_type; 176 map->key_size = attr->key_size; 177 map->value_size = attr->value_size; 178 map->max_entries = attr->max_entries; 179 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 180 map->numa_node = bpf_map_attr_numa_node(attr); 181 } 182 183 int bpf_map_precharge_memlock(u32 pages) 184 { 185 struct user_struct *user = get_current_user(); 186 unsigned long memlock_limit, cur; 187 188 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 189 cur = atomic_long_read(&user->locked_vm); 190 free_uid(user); 191 if (cur + pages > memlock_limit) 192 return -EPERM; 193 return 0; 194 } 195 196 static int bpf_charge_memlock(struct user_struct *user, u32 pages) 197 { 198 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 199 200 if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) { 201 atomic_long_sub(pages, &user->locked_vm); 202 return -EPERM; 203 } 204 return 0; 205 } 206 207 static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) 208 { 209 atomic_long_sub(pages, &user->locked_vm); 210 } 211 212 static int bpf_map_init_memlock(struct bpf_map *map) 213 { 214 struct user_struct *user = get_current_user(); 215 int ret; 216 217 ret = bpf_charge_memlock(user, map->pages); 218 if (ret) { 219 free_uid(user); 220 return ret; 221 } 222 map->user = user; 223 return ret; 224 } 225 226 static void bpf_map_release_memlock(struct bpf_map *map) 227 { 228 struct user_struct *user = map->user; 229 bpf_uncharge_memlock(user, map->pages); 230 free_uid(user); 231 } 232 233 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) 234 { 235 int ret; 236 237 ret = bpf_charge_memlock(map->user, pages); 238 if (ret) 239 return ret; 240 map->pages += pages; 241 return ret; 242 } 243 244 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) 245 { 246 bpf_uncharge_memlock(map->user, pages); 247 map->pages -= pages; 248 } 249 250 static int bpf_map_alloc_id(struct bpf_map *map) 251 { 252 int id; 253 254 idr_preload(GFP_KERNEL); 255 spin_lock_bh(&map_idr_lock); 256 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 257 if (id > 0) 258 map->id = id; 259 spin_unlock_bh(&map_idr_lock); 260 idr_preload_end(); 261 262 if (WARN_ON_ONCE(!id)) 263 return -ENOSPC; 264 265 return id > 0 ? 0 : id; 266 } 267 268 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) 269 { 270 unsigned long flags; 271 272 /* Offloaded maps are removed from the IDR store when their device 273 * disappears - even if someone holds an fd to them they are unusable, 274 * the memory is gone, all ops will fail; they are simply waiting for 275 * refcnt to drop to be freed. 276 */ 277 if (!map->id) 278 return; 279 280 if (do_idr_lock) 281 spin_lock_irqsave(&map_idr_lock, flags); 282 else 283 __acquire(&map_idr_lock); 284 285 idr_remove(&map_idr, map->id); 286 map->id = 0; 287 288 if (do_idr_lock) 289 spin_unlock_irqrestore(&map_idr_lock, flags); 290 else 291 __release(&map_idr_lock); 292 } 293 294 /* called from workqueue */ 295 static void bpf_map_free_deferred(struct work_struct *work) 296 { 297 struct bpf_map *map = container_of(work, struct bpf_map, work); 298 299 bpf_map_release_memlock(map); 300 security_bpf_map_free(map); 301 /* implementation dependent freeing */ 302 map->ops->map_free(map); 303 } 304 305 static void bpf_map_put_uref(struct bpf_map *map) 306 { 307 if (atomic_dec_and_test(&map->usercnt)) { 308 if (map->ops->map_release_uref) 309 map->ops->map_release_uref(map); 310 } 311 } 312 313 /* decrement map refcnt and schedule it for freeing via workqueue 314 * (unrelying map implementation ops->map_free() might sleep) 315 */ 316 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) 317 { 318 if (atomic_dec_and_test(&map->refcnt)) { 319 /* bpf_map_free_id() must be called first */ 320 bpf_map_free_id(map, do_idr_lock); 321 btf_put(map->btf); 322 INIT_WORK(&map->work, bpf_map_free_deferred); 323 schedule_work(&map->work); 324 } 325 } 326 327 void bpf_map_put(struct bpf_map *map) 328 { 329 __bpf_map_put(map, true); 330 } 331 EXPORT_SYMBOL_GPL(bpf_map_put); 332 333 void bpf_map_put_with_uref(struct bpf_map *map) 334 { 335 bpf_map_put_uref(map); 336 bpf_map_put(map); 337 } 338 339 static int bpf_map_release(struct inode *inode, struct file *filp) 340 { 341 struct bpf_map *map = filp->private_data; 342 343 if (map->ops->map_release) 344 map->ops->map_release(map, filp); 345 346 bpf_map_put_with_uref(map); 347 return 0; 348 } 349 350 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 351 { 352 fmode_t mode = f.file->f_mode; 353 354 /* Our file permissions may have been overridden by global 355 * map permissions facing syscall side. 356 */ 357 if (READ_ONCE(map->frozen)) 358 mode &= ~FMODE_CAN_WRITE; 359 return mode; 360 } 361 362 #ifdef CONFIG_PROC_FS 363 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 364 { 365 const struct bpf_map *map = filp->private_data; 366 const struct bpf_array *array; 367 u32 owner_prog_type = 0; 368 u32 owner_jited = 0; 369 370 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { 371 array = container_of(map, struct bpf_array, map); 372 owner_prog_type = array->owner_prog_type; 373 owner_jited = array->owner_jited; 374 } 375 376 seq_printf(m, 377 "map_type:\t%u\n" 378 "key_size:\t%u\n" 379 "value_size:\t%u\n" 380 "max_entries:\t%u\n" 381 "map_flags:\t%#x\n" 382 "memlock:\t%llu\n" 383 "map_id:\t%u\n" 384 "frozen:\t%u\n", 385 map->map_type, 386 map->key_size, 387 map->value_size, 388 map->max_entries, 389 map->map_flags, 390 map->pages * 1ULL << PAGE_SHIFT, 391 map->id, 392 READ_ONCE(map->frozen)); 393 394 if (owner_prog_type) { 395 seq_printf(m, "owner_prog_type:\t%u\n", 396 owner_prog_type); 397 seq_printf(m, "owner_jited:\t%u\n", 398 owner_jited); 399 } 400 } 401 #endif 402 403 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 404 loff_t *ppos) 405 { 406 /* We need this handler such that alloc_file() enables 407 * f_mode with FMODE_CAN_READ. 408 */ 409 return -EINVAL; 410 } 411 412 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 413 size_t siz, loff_t *ppos) 414 { 415 /* We need this handler such that alloc_file() enables 416 * f_mode with FMODE_CAN_WRITE. 417 */ 418 return -EINVAL; 419 } 420 421 const struct file_operations bpf_map_fops = { 422 #ifdef CONFIG_PROC_FS 423 .show_fdinfo = bpf_map_show_fdinfo, 424 #endif 425 .release = bpf_map_release, 426 .read = bpf_dummy_read, 427 .write = bpf_dummy_write, 428 }; 429 430 int bpf_map_new_fd(struct bpf_map *map, int flags) 431 { 432 int ret; 433 434 ret = security_bpf_map(map, OPEN_FMODE(flags)); 435 if (ret < 0) 436 return ret; 437 438 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 439 flags | O_CLOEXEC); 440 } 441 442 int bpf_get_file_flag(int flags) 443 { 444 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 445 return -EINVAL; 446 if (flags & BPF_F_RDONLY) 447 return O_RDONLY; 448 if (flags & BPF_F_WRONLY) 449 return O_WRONLY; 450 return O_RDWR; 451 } 452 453 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 454 #define CHECK_ATTR(CMD) \ 455 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 456 sizeof(attr->CMD##_LAST_FIELD), 0, \ 457 sizeof(*attr) - \ 458 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 459 sizeof(attr->CMD##_LAST_FIELD)) != NULL 460 461 /* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes. 462 * Return 0 on success and < 0 on error. 463 */ 464 static int bpf_obj_name_cpy(char *dst, const char *src) 465 { 466 const char *end = src + BPF_OBJ_NAME_LEN; 467 468 memset(dst, 0, BPF_OBJ_NAME_LEN); 469 /* Copy all isalnum(), '_' and '.' chars. */ 470 while (src < end && *src) { 471 if (!isalnum(*src) && 472 *src != '_' && *src != '.') 473 return -EINVAL; 474 *dst++ = *src++; 475 } 476 477 /* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */ 478 if (src == end) 479 return -EINVAL; 480 481 return 0; 482 } 483 484 int map_check_no_btf(const struct bpf_map *map, 485 const struct btf *btf, 486 const struct btf_type *key_type, 487 const struct btf_type *value_type) 488 { 489 return -ENOTSUPP; 490 } 491 492 static int map_check_btf(struct bpf_map *map, const struct btf *btf, 493 u32 btf_key_id, u32 btf_value_id) 494 { 495 const struct btf_type *key_type, *value_type; 496 u32 key_size, value_size; 497 int ret = 0; 498 499 /* Some maps allow key to be unspecified. */ 500 if (btf_key_id) { 501 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 502 if (!key_type || key_size != map->key_size) 503 return -EINVAL; 504 } else { 505 key_type = btf_type_by_id(btf, 0); 506 if (!map->ops->map_check_btf) 507 return -EINVAL; 508 } 509 510 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 511 if (!value_type || value_size != map->value_size) 512 return -EINVAL; 513 514 map->spin_lock_off = btf_find_spin_lock(btf, value_type); 515 516 if (map_value_has_spin_lock(map)) { 517 if (map->map_flags & BPF_F_RDONLY_PROG) 518 return -EACCES; 519 if (map->map_type != BPF_MAP_TYPE_HASH && 520 map->map_type != BPF_MAP_TYPE_ARRAY && 521 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 522 map->map_type != BPF_MAP_TYPE_SK_STORAGE) 523 return -ENOTSUPP; 524 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > 525 map->value_size) { 526 WARN_ONCE(1, 527 "verifier bug spin_lock_off %d value_size %d\n", 528 map->spin_lock_off, map->value_size); 529 return -EFAULT; 530 } 531 } 532 533 if (map->ops->map_check_btf) 534 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 535 536 return ret; 537 } 538 539 #define BPF_MAP_CREATE_LAST_FIELD btf_value_type_id 540 /* called via syscall */ 541 static int map_create(union bpf_attr *attr) 542 { 543 int numa_node = bpf_map_attr_numa_node(attr); 544 struct bpf_map *map; 545 int f_flags; 546 int err; 547 548 err = CHECK_ATTR(BPF_MAP_CREATE); 549 if (err) 550 return -EINVAL; 551 552 f_flags = bpf_get_file_flag(attr->map_flags); 553 if (f_flags < 0) 554 return f_flags; 555 556 if (numa_node != NUMA_NO_NODE && 557 ((unsigned int)numa_node >= nr_node_ids || 558 !node_online(numa_node))) 559 return -EINVAL; 560 561 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 562 map = find_and_alloc_map(attr); 563 if (IS_ERR(map)) 564 return PTR_ERR(map); 565 566 err = bpf_obj_name_cpy(map->name, attr->map_name); 567 if (err) 568 goto free_map_nouncharge; 569 570 atomic_set(&map->refcnt, 1); 571 atomic_set(&map->usercnt, 1); 572 573 if (attr->btf_key_type_id || attr->btf_value_type_id) { 574 struct btf *btf; 575 576 if (!attr->btf_value_type_id) { 577 err = -EINVAL; 578 goto free_map_nouncharge; 579 } 580 581 btf = btf_get_by_fd(attr->btf_fd); 582 if (IS_ERR(btf)) { 583 err = PTR_ERR(btf); 584 goto free_map_nouncharge; 585 } 586 587 err = map_check_btf(map, btf, attr->btf_key_type_id, 588 attr->btf_value_type_id); 589 if (err) { 590 btf_put(btf); 591 goto free_map_nouncharge; 592 } 593 594 map->btf = btf; 595 map->btf_key_type_id = attr->btf_key_type_id; 596 map->btf_value_type_id = attr->btf_value_type_id; 597 } else { 598 map->spin_lock_off = -EINVAL; 599 } 600 601 err = security_bpf_map_alloc(map); 602 if (err) 603 goto free_map_nouncharge; 604 605 err = bpf_map_init_memlock(map); 606 if (err) 607 goto free_map_sec; 608 609 err = bpf_map_alloc_id(map); 610 if (err) 611 goto free_map; 612 613 err = bpf_map_new_fd(map, f_flags); 614 if (err < 0) { 615 /* failed to allocate fd. 616 * bpf_map_put_with_uref() is needed because the above 617 * bpf_map_alloc_id() has published the map 618 * to the userspace and the userspace may 619 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 620 */ 621 bpf_map_put_with_uref(map); 622 return err; 623 } 624 625 return err; 626 627 free_map: 628 bpf_map_release_memlock(map); 629 free_map_sec: 630 security_bpf_map_free(map); 631 free_map_nouncharge: 632 btf_put(map->btf); 633 map->ops->map_free(map); 634 return err; 635 } 636 637 /* if error is returned, fd is released. 638 * On success caller should complete fd access with matching fdput() 639 */ 640 struct bpf_map *__bpf_map_get(struct fd f) 641 { 642 if (!f.file) 643 return ERR_PTR(-EBADF); 644 if (f.file->f_op != &bpf_map_fops) { 645 fdput(f); 646 return ERR_PTR(-EINVAL); 647 } 648 649 return f.file->private_data; 650 } 651 652 /* prog's and map's refcnt limit */ 653 #define BPF_MAX_REFCNT 32768 654 655 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref) 656 { 657 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) { 658 atomic_dec(&map->refcnt); 659 return ERR_PTR(-EBUSY); 660 } 661 if (uref) 662 atomic_inc(&map->usercnt); 663 return map; 664 } 665 EXPORT_SYMBOL_GPL(bpf_map_inc); 666 667 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 668 { 669 struct fd f = fdget(ufd); 670 struct bpf_map *map; 671 672 map = __bpf_map_get(f); 673 if (IS_ERR(map)) 674 return map; 675 676 map = bpf_map_inc(map, true); 677 fdput(f); 678 679 return map; 680 } 681 682 /* map_idr_lock should have been held */ 683 static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, 684 bool uref) 685 { 686 int refold; 687 688 refold = atomic_fetch_add_unless(&map->refcnt, 1, 0); 689 690 if (refold >= BPF_MAX_REFCNT) { 691 __bpf_map_put(map, false); 692 return ERR_PTR(-EBUSY); 693 } 694 695 if (!refold) 696 return ERR_PTR(-ENOENT); 697 698 if (uref) 699 atomic_inc(&map->usercnt); 700 701 return map; 702 } 703 704 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 705 { 706 return -ENOTSUPP; 707 } 708 709 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 710 { 711 if (key_size) 712 return memdup_user(ukey, key_size); 713 714 if (ukey) 715 return ERR_PTR(-EINVAL); 716 717 return NULL; 718 } 719 720 /* last field in 'union bpf_attr' used by this command */ 721 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 722 723 static int map_lookup_elem(union bpf_attr *attr) 724 { 725 void __user *ukey = u64_to_user_ptr(attr->key); 726 void __user *uvalue = u64_to_user_ptr(attr->value); 727 int ufd = attr->map_fd; 728 struct bpf_map *map; 729 void *key, *value, *ptr; 730 u32 value_size; 731 struct fd f; 732 int err; 733 734 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 735 return -EINVAL; 736 737 if (attr->flags & ~BPF_F_LOCK) 738 return -EINVAL; 739 740 f = fdget(ufd); 741 map = __bpf_map_get(f); 742 if (IS_ERR(map)) 743 return PTR_ERR(map); 744 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 745 err = -EPERM; 746 goto err_put; 747 } 748 749 if ((attr->flags & BPF_F_LOCK) && 750 !map_value_has_spin_lock(map)) { 751 err = -EINVAL; 752 goto err_put; 753 } 754 755 key = __bpf_copy_key(ukey, map->key_size); 756 if (IS_ERR(key)) { 757 err = PTR_ERR(key); 758 goto err_put; 759 } 760 761 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 762 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 763 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 764 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 765 value_size = round_up(map->value_size, 8) * num_possible_cpus(); 766 else if (IS_FD_MAP(map)) 767 value_size = sizeof(u32); 768 else 769 value_size = map->value_size; 770 771 err = -ENOMEM; 772 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 773 if (!value) 774 goto free_key; 775 776 if (bpf_map_is_dev_bound(map)) { 777 err = bpf_map_offload_lookup_elem(map, key, value); 778 goto done; 779 } 780 781 preempt_disable(); 782 this_cpu_inc(bpf_prog_active); 783 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 784 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 785 err = bpf_percpu_hash_copy(map, key, value); 786 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 787 err = bpf_percpu_array_copy(map, key, value); 788 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 789 err = bpf_percpu_cgroup_storage_copy(map, key, value); 790 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 791 err = bpf_stackmap_copy(map, key, value); 792 } else if (IS_FD_ARRAY(map)) { 793 err = bpf_fd_array_map_lookup_elem(map, key, value); 794 } else if (IS_FD_HASH(map)) { 795 err = bpf_fd_htab_map_lookup_elem(map, key, value); 796 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 797 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 798 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 799 map->map_type == BPF_MAP_TYPE_STACK) { 800 err = map->ops->map_peek_elem(map, value); 801 } else { 802 rcu_read_lock(); 803 if (map->ops->map_lookup_elem_sys_only) 804 ptr = map->ops->map_lookup_elem_sys_only(map, key); 805 else 806 ptr = map->ops->map_lookup_elem(map, key); 807 if (IS_ERR(ptr)) { 808 err = PTR_ERR(ptr); 809 } else if (!ptr) { 810 err = -ENOENT; 811 } else { 812 err = 0; 813 if (attr->flags & BPF_F_LOCK) 814 /* lock 'ptr' and copy everything but lock */ 815 copy_map_value_locked(map, value, ptr, true); 816 else 817 copy_map_value(map, value, ptr); 818 /* mask lock, since value wasn't zero inited */ 819 check_and_init_map_lock(map, value); 820 } 821 rcu_read_unlock(); 822 } 823 this_cpu_dec(bpf_prog_active); 824 preempt_enable(); 825 826 done: 827 if (err) 828 goto free_value; 829 830 err = -EFAULT; 831 if (copy_to_user(uvalue, value, value_size) != 0) 832 goto free_value; 833 834 err = 0; 835 836 free_value: 837 kfree(value); 838 free_key: 839 kfree(key); 840 err_put: 841 fdput(f); 842 return err; 843 } 844 845 static void maybe_wait_bpf_programs(struct bpf_map *map) 846 { 847 /* Wait for any running BPF programs to complete so that 848 * userspace, when we return to it, knows that all programs 849 * that could be running use the new map value. 850 */ 851 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 852 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 853 synchronize_rcu(); 854 } 855 856 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 857 858 static int map_update_elem(union bpf_attr *attr) 859 { 860 void __user *ukey = u64_to_user_ptr(attr->key); 861 void __user *uvalue = u64_to_user_ptr(attr->value); 862 int ufd = attr->map_fd; 863 struct bpf_map *map; 864 void *key, *value; 865 u32 value_size; 866 struct fd f; 867 int err; 868 869 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 870 return -EINVAL; 871 872 f = fdget(ufd); 873 map = __bpf_map_get(f); 874 if (IS_ERR(map)) 875 return PTR_ERR(map); 876 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 877 err = -EPERM; 878 goto err_put; 879 } 880 881 if ((attr->flags & BPF_F_LOCK) && 882 !map_value_has_spin_lock(map)) { 883 err = -EINVAL; 884 goto err_put; 885 } 886 887 key = __bpf_copy_key(ukey, map->key_size); 888 if (IS_ERR(key)) { 889 err = PTR_ERR(key); 890 goto err_put; 891 } 892 893 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 894 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 895 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 896 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 897 value_size = round_up(map->value_size, 8) * num_possible_cpus(); 898 else 899 value_size = map->value_size; 900 901 err = -ENOMEM; 902 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 903 if (!value) 904 goto free_key; 905 906 err = -EFAULT; 907 if (copy_from_user(value, uvalue, value_size) != 0) 908 goto free_value; 909 910 /* Need to create a kthread, thus must support schedule */ 911 if (bpf_map_is_dev_bound(map)) { 912 err = bpf_map_offload_update_elem(map, key, value, attr->flags); 913 goto out; 914 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 915 map->map_type == BPF_MAP_TYPE_SOCKHASH || 916 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 917 err = map->ops->map_update_elem(map, key, value, attr->flags); 918 goto out; 919 } 920 921 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from 922 * inside bpf map update or delete otherwise deadlocks are possible 923 */ 924 preempt_disable(); 925 __this_cpu_inc(bpf_prog_active); 926 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 927 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 928 err = bpf_percpu_hash_update(map, key, value, attr->flags); 929 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 930 err = bpf_percpu_array_update(map, key, value, attr->flags); 931 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 932 err = bpf_percpu_cgroup_storage_update(map, key, value, 933 attr->flags); 934 } else if (IS_FD_ARRAY(map)) { 935 rcu_read_lock(); 936 err = bpf_fd_array_map_update_elem(map, f.file, key, value, 937 attr->flags); 938 rcu_read_unlock(); 939 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 940 rcu_read_lock(); 941 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, 942 attr->flags); 943 rcu_read_unlock(); 944 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 945 /* rcu_read_lock() is not needed */ 946 err = bpf_fd_reuseport_array_update_elem(map, key, value, 947 attr->flags); 948 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 949 map->map_type == BPF_MAP_TYPE_STACK) { 950 err = map->ops->map_push_elem(map, value, attr->flags); 951 } else { 952 rcu_read_lock(); 953 err = map->ops->map_update_elem(map, key, value, attr->flags); 954 rcu_read_unlock(); 955 } 956 __this_cpu_dec(bpf_prog_active); 957 preempt_enable(); 958 maybe_wait_bpf_programs(map); 959 out: 960 free_value: 961 kfree(value); 962 free_key: 963 kfree(key); 964 err_put: 965 fdput(f); 966 return err; 967 } 968 969 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 970 971 static int map_delete_elem(union bpf_attr *attr) 972 { 973 void __user *ukey = u64_to_user_ptr(attr->key); 974 int ufd = attr->map_fd; 975 struct bpf_map *map; 976 struct fd f; 977 void *key; 978 int err; 979 980 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 981 return -EINVAL; 982 983 f = fdget(ufd); 984 map = __bpf_map_get(f); 985 if (IS_ERR(map)) 986 return PTR_ERR(map); 987 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 988 err = -EPERM; 989 goto err_put; 990 } 991 992 key = __bpf_copy_key(ukey, map->key_size); 993 if (IS_ERR(key)) { 994 err = PTR_ERR(key); 995 goto err_put; 996 } 997 998 if (bpf_map_is_dev_bound(map)) { 999 err = bpf_map_offload_delete_elem(map, key); 1000 goto out; 1001 } 1002 1003 preempt_disable(); 1004 __this_cpu_inc(bpf_prog_active); 1005 rcu_read_lock(); 1006 err = map->ops->map_delete_elem(map, key); 1007 rcu_read_unlock(); 1008 __this_cpu_dec(bpf_prog_active); 1009 preempt_enable(); 1010 maybe_wait_bpf_programs(map); 1011 out: 1012 kfree(key); 1013 err_put: 1014 fdput(f); 1015 return err; 1016 } 1017 1018 /* last field in 'union bpf_attr' used by this command */ 1019 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1020 1021 static int map_get_next_key(union bpf_attr *attr) 1022 { 1023 void __user *ukey = u64_to_user_ptr(attr->key); 1024 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1025 int ufd = attr->map_fd; 1026 struct bpf_map *map; 1027 void *key, *next_key; 1028 struct fd f; 1029 int err; 1030 1031 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1032 return -EINVAL; 1033 1034 f = fdget(ufd); 1035 map = __bpf_map_get(f); 1036 if (IS_ERR(map)) 1037 return PTR_ERR(map); 1038 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1039 err = -EPERM; 1040 goto err_put; 1041 } 1042 1043 if (ukey) { 1044 key = __bpf_copy_key(ukey, map->key_size); 1045 if (IS_ERR(key)) { 1046 err = PTR_ERR(key); 1047 goto err_put; 1048 } 1049 } else { 1050 key = NULL; 1051 } 1052 1053 err = -ENOMEM; 1054 next_key = kmalloc(map->key_size, GFP_USER); 1055 if (!next_key) 1056 goto free_key; 1057 1058 if (bpf_map_is_dev_bound(map)) { 1059 err = bpf_map_offload_get_next_key(map, key, next_key); 1060 goto out; 1061 } 1062 1063 rcu_read_lock(); 1064 err = map->ops->map_get_next_key(map, key, next_key); 1065 rcu_read_unlock(); 1066 out: 1067 if (err) 1068 goto free_next_key; 1069 1070 err = -EFAULT; 1071 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1072 goto free_next_key; 1073 1074 err = 0; 1075 1076 free_next_key: 1077 kfree(next_key); 1078 free_key: 1079 kfree(key); 1080 err_put: 1081 fdput(f); 1082 return err; 1083 } 1084 1085 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value 1086 1087 static int map_lookup_and_delete_elem(union bpf_attr *attr) 1088 { 1089 void __user *ukey = u64_to_user_ptr(attr->key); 1090 void __user *uvalue = u64_to_user_ptr(attr->value); 1091 int ufd = attr->map_fd; 1092 struct bpf_map *map; 1093 void *key, *value; 1094 u32 value_size; 1095 struct fd f; 1096 int err; 1097 1098 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 1099 return -EINVAL; 1100 1101 f = fdget(ufd); 1102 map = __bpf_map_get(f); 1103 if (IS_ERR(map)) 1104 return PTR_ERR(map); 1105 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1106 err = -EPERM; 1107 goto err_put; 1108 } 1109 1110 key = __bpf_copy_key(ukey, map->key_size); 1111 if (IS_ERR(key)) { 1112 err = PTR_ERR(key); 1113 goto err_put; 1114 } 1115 1116 value_size = map->value_size; 1117 1118 err = -ENOMEM; 1119 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1120 if (!value) 1121 goto free_key; 1122 1123 if (map->map_type == BPF_MAP_TYPE_QUEUE || 1124 map->map_type == BPF_MAP_TYPE_STACK) { 1125 err = map->ops->map_pop_elem(map, value); 1126 } else { 1127 err = -ENOTSUPP; 1128 } 1129 1130 if (err) 1131 goto free_value; 1132 1133 if (copy_to_user(uvalue, value, value_size) != 0) 1134 goto free_value; 1135 1136 err = 0; 1137 1138 free_value: 1139 kfree(value); 1140 free_key: 1141 kfree(key); 1142 err_put: 1143 fdput(f); 1144 return err; 1145 } 1146 1147 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 1148 1149 static int map_freeze(const union bpf_attr *attr) 1150 { 1151 int err = 0, ufd = attr->map_fd; 1152 struct bpf_map *map; 1153 struct fd f; 1154 1155 if (CHECK_ATTR(BPF_MAP_FREEZE)) 1156 return -EINVAL; 1157 1158 f = fdget(ufd); 1159 map = __bpf_map_get(f); 1160 if (IS_ERR(map)) 1161 return PTR_ERR(map); 1162 if (READ_ONCE(map->frozen)) { 1163 err = -EBUSY; 1164 goto err_put; 1165 } 1166 if (!capable(CAP_SYS_ADMIN)) { 1167 err = -EPERM; 1168 goto err_put; 1169 } 1170 1171 WRITE_ONCE(map->frozen, true); 1172 err_put: 1173 fdput(f); 1174 return err; 1175 } 1176 1177 static const struct bpf_prog_ops * const bpf_prog_types[] = { 1178 #define BPF_PROG_TYPE(_id, _name) \ 1179 [_id] = & _name ## _prog_ops, 1180 #define BPF_MAP_TYPE(_id, _ops) 1181 #include <linux/bpf_types.h> 1182 #undef BPF_PROG_TYPE 1183 #undef BPF_MAP_TYPE 1184 }; 1185 1186 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 1187 { 1188 const struct bpf_prog_ops *ops; 1189 1190 if (type >= ARRAY_SIZE(bpf_prog_types)) 1191 return -EINVAL; 1192 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 1193 ops = bpf_prog_types[type]; 1194 if (!ops) 1195 return -EINVAL; 1196 1197 if (!bpf_prog_is_dev_bound(prog->aux)) 1198 prog->aux->ops = ops; 1199 else 1200 prog->aux->ops = &bpf_offload_prog_ops; 1201 prog->type = type; 1202 return 0; 1203 } 1204 1205 /* drop refcnt on maps used by eBPF program and free auxilary data */ 1206 static void free_used_maps(struct bpf_prog_aux *aux) 1207 { 1208 enum bpf_cgroup_storage_type stype; 1209 int i; 1210 1211 for_each_cgroup_storage_type(stype) { 1212 if (!aux->cgroup_storage[stype]) 1213 continue; 1214 bpf_cgroup_storage_release(aux->prog, 1215 aux->cgroup_storage[stype]); 1216 } 1217 1218 for (i = 0; i < aux->used_map_cnt; i++) 1219 bpf_map_put(aux->used_maps[i]); 1220 1221 kfree(aux->used_maps); 1222 } 1223 1224 int __bpf_prog_charge(struct user_struct *user, u32 pages) 1225 { 1226 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1227 unsigned long user_bufs; 1228 1229 if (user) { 1230 user_bufs = atomic_long_add_return(pages, &user->locked_vm); 1231 if (user_bufs > memlock_limit) { 1232 atomic_long_sub(pages, &user->locked_vm); 1233 return -EPERM; 1234 } 1235 } 1236 1237 return 0; 1238 } 1239 1240 void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 1241 { 1242 if (user) 1243 atomic_long_sub(pages, &user->locked_vm); 1244 } 1245 1246 static int bpf_prog_charge_memlock(struct bpf_prog *prog) 1247 { 1248 struct user_struct *user = get_current_user(); 1249 int ret; 1250 1251 ret = __bpf_prog_charge(user, prog->pages); 1252 if (ret) { 1253 free_uid(user); 1254 return ret; 1255 } 1256 1257 prog->aux->user = user; 1258 return 0; 1259 } 1260 1261 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog) 1262 { 1263 struct user_struct *user = prog->aux->user; 1264 1265 __bpf_prog_uncharge(user, prog->pages); 1266 free_uid(user); 1267 } 1268 1269 static int bpf_prog_alloc_id(struct bpf_prog *prog) 1270 { 1271 int id; 1272 1273 idr_preload(GFP_KERNEL); 1274 spin_lock_bh(&prog_idr_lock); 1275 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 1276 if (id > 0) 1277 prog->aux->id = id; 1278 spin_unlock_bh(&prog_idr_lock); 1279 idr_preload_end(); 1280 1281 /* id is in [1, INT_MAX) */ 1282 if (WARN_ON_ONCE(!id)) 1283 return -ENOSPC; 1284 1285 return id > 0 ? 0 : id; 1286 } 1287 1288 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) 1289 { 1290 /* cBPF to eBPF migrations are currently not in the idr store. 1291 * Offloaded programs are removed from the store when their device 1292 * disappears - even if someone grabs an fd to them they are unusable, 1293 * simply waiting for refcnt to drop to be freed. 1294 */ 1295 if (!prog->aux->id) 1296 return; 1297 1298 if (do_idr_lock) 1299 spin_lock_bh(&prog_idr_lock); 1300 else 1301 __acquire(&prog_idr_lock); 1302 1303 idr_remove(&prog_idr, prog->aux->id); 1304 prog->aux->id = 0; 1305 1306 if (do_idr_lock) 1307 spin_unlock_bh(&prog_idr_lock); 1308 else 1309 __release(&prog_idr_lock); 1310 } 1311 1312 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 1313 { 1314 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 1315 1316 free_used_maps(aux); 1317 bpf_prog_uncharge_memlock(aux->prog); 1318 security_bpf_prog_free(aux); 1319 bpf_prog_free(aux->prog); 1320 } 1321 1322 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 1323 { 1324 if (atomic_dec_and_test(&prog->aux->refcnt)) { 1325 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 1326 /* bpf_prog_free_id() must be called first */ 1327 bpf_prog_free_id(prog, do_idr_lock); 1328 bpf_prog_kallsyms_del_all(prog); 1329 btf_put(prog->aux->btf); 1330 kvfree(prog->aux->func_info); 1331 bpf_prog_free_linfo(prog); 1332 1333 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 1334 } 1335 } 1336 1337 void bpf_prog_put(struct bpf_prog *prog) 1338 { 1339 __bpf_prog_put(prog, true); 1340 } 1341 EXPORT_SYMBOL_GPL(bpf_prog_put); 1342 1343 static int bpf_prog_release(struct inode *inode, struct file *filp) 1344 { 1345 struct bpf_prog *prog = filp->private_data; 1346 1347 bpf_prog_put(prog); 1348 return 0; 1349 } 1350 1351 static void bpf_prog_get_stats(const struct bpf_prog *prog, 1352 struct bpf_prog_stats *stats) 1353 { 1354 u64 nsecs = 0, cnt = 0; 1355 int cpu; 1356 1357 for_each_possible_cpu(cpu) { 1358 const struct bpf_prog_stats *st; 1359 unsigned int start; 1360 u64 tnsecs, tcnt; 1361 1362 st = per_cpu_ptr(prog->aux->stats, cpu); 1363 do { 1364 start = u64_stats_fetch_begin_irq(&st->syncp); 1365 tnsecs = st->nsecs; 1366 tcnt = st->cnt; 1367 } while (u64_stats_fetch_retry_irq(&st->syncp, start)); 1368 nsecs += tnsecs; 1369 cnt += tcnt; 1370 } 1371 stats->nsecs = nsecs; 1372 stats->cnt = cnt; 1373 } 1374 1375 #ifdef CONFIG_PROC_FS 1376 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 1377 { 1378 const struct bpf_prog *prog = filp->private_data; 1379 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 1380 struct bpf_prog_stats stats; 1381 1382 bpf_prog_get_stats(prog, &stats); 1383 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 1384 seq_printf(m, 1385 "prog_type:\t%u\n" 1386 "prog_jited:\t%u\n" 1387 "prog_tag:\t%s\n" 1388 "memlock:\t%llu\n" 1389 "prog_id:\t%u\n" 1390 "run_time_ns:\t%llu\n" 1391 "run_cnt:\t%llu\n", 1392 prog->type, 1393 prog->jited, 1394 prog_tag, 1395 prog->pages * 1ULL << PAGE_SHIFT, 1396 prog->aux->id, 1397 stats.nsecs, 1398 stats.cnt); 1399 } 1400 #endif 1401 1402 const struct file_operations bpf_prog_fops = { 1403 #ifdef CONFIG_PROC_FS 1404 .show_fdinfo = bpf_prog_show_fdinfo, 1405 #endif 1406 .release = bpf_prog_release, 1407 .read = bpf_dummy_read, 1408 .write = bpf_dummy_write, 1409 }; 1410 1411 int bpf_prog_new_fd(struct bpf_prog *prog) 1412 { 1413 int ret; 1414 1415 ret = security_bpf_prog(prog); 1416 if (ret < 0) 1417 return ret; 1418 1419 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 1420 O_RDWR | O_CLOEXEC); 1421 } 1422 1423 static struct bpf_prog *____bpf_prog_get(struct fd f) 1424 { 1425 if (!f.file) 1426 return ERR_PTR(-EBADF); 1427 if (f.file->f_op != &bpf_prog_fops) { 1428 fdput(f); 1429 return ERR_PTR(-EINVAL); 1430 } 1431 1432 return f.file->private_data; 1433 } 1434 1435 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) 1436 { 1437 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) { 1438 atomic_sub(i, &prog->aux->refcnt); 1439 return ERR_PTR(-EBUSY); 1440 } 1441 return prog; 1442 } 1443 EXPORT_SYMBOL_GPL(bpf_prog_add); 1444 1445 void bpf_prog_sub(struct bpf_prog *prog, int i) 1446 { 1447 /* Only to be used for undoing previous bpf_prog_add() in some 1448 * error path. We still know that another entity in our call 1449 * path holds a reference to the program, thus atomic_sub() can 1450 * be safely used in such cases! 1451 */ 1452 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0); 1453 } 1454 EXPORT_SYMBOL_GPL(bpf_prog_sub); 1455 1456 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) 1457 { 1458 return bpf_prog_add(prog, 1); 1459 } 1460 EXPORT_SYMBOL_GPL(bpf_prog_inc); 1461 1462 /* prog_idr_lock should have been held */ 1463 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 1464 { 1465 int refold; 1466 1467 refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0); 1468 1469 if (refold >= BPF_MAX_REFCNT) { 1470 __bpf_prog_put(prog, false); 1471 return ERR_PTR(-EBUSY); 1472 } 1473 1474 if (!refold) 1475 return ERR_PTR(-ENOENT); 1476 1477 return prog; 1478 } 1479 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1480 1481 bool bpf_prog_get_ok(struct bpf_prog *prog, 1482 enum bpf_prog_type *attach_type, bool attach_drv) 1483 { 1484 /* not an attachment, just a refcount inc, always allow */ 1485 if (!attach_type) 1486 return true; 1487 1488 if (prog->type != *attach_type) 1489 return false; 1490 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) 1491 return false; 1492 1493 return true; 1494 } 1495 1496 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 1497 bool attach_drv) 1498 { 1499 struct fd f = fdget(ufd); 1500 struct bpf_prog *prog; 1501 1502 prog = ____bpf_prog_get(f); 1503 if (IS_ERR(prog)) 1504 return prog; 1505 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 1506 prog = ERR_PTR(-EINVAL); 1507 goto out; 1508 } 1509 1510 prog = bpf_prog_inc(prog); 1511 out: 1512 fdput(f); 1513 return prog; 1514 } 1515 1516 struct bpf_prog *bpf_prog_get(u32 ufd) 1517 { 1518 return __bpf_prog_get(ufd, NULL, false); 1519 } 1520 1521 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1522 bool attach_drv) 1523 { 1524 return __bpf_prog_get(ufd, &type, attach_drv); 1525 } 1526 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 1527 1528 /* Initially all BPF programs could be loaded w/o specifying 1529 * expected_attach_type. Later for some of them specifying expected_attach_type 1530 * at load time became required so that program could be validated properly. 1531 * Programs of types that are allowed to be loaded both w/ and w/o (for 1532 * backward compatibility) expected_attach_type, should have the default attach 1533 * type assigned to expected_attach_type for the latter case, so that it can be 1534 * validated later at attach time. 1535 * 1536 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 1537 * prog type requires it but has some attach types that have to be backward 1538 * compatible. 1539 */ 1540 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 1541 { 1542 switch (attr->prog_type) { 1543 case BPF_PROG_TYPE_CGROUP_SOCK: 1544 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 1545 * exist so checking for non-zero is the way to go here. 1546 */ 1547 if (!attr->expected_attach_type) 1548 attr->expected_attach_type = 1549 BPF_CGROUP_INET_SOCK_CREATE; 1550 break; 1551 } 1552 } 1553 1554 static int 1555 bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, 1556 enum bpf_attach_type expected_attach_type) 1557 { 1558 switch (prog_type) { 1559 case BPF_PROG_TYPE_CGROUP_SOCK: 1560 switch (expected_attach_type) { 1561 case BPF_CGROUP_INET_SOCK_CREATE: 1562 case BPF_CGROUP_INET4_POST_BIND: 1563 case BPF_CGROUP_INET6_POST_BIND: 1564 return 0; 1565 default: 1566 return -EINVAL; 1567 } 1568 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1569 switch (expected_attach_type) { 1570 case BPF_CGROUP_INET4_BIND: 1571 case BPF_CGROUP_INET6_BIND: 1572 case BPF_CGROUP_INET4_CONNECT: 1573 case BPF_CGROUP_INET6_CONNECT: 1574 case BPF_CGROUP_UDP4_SENDMSG: 1575 case BPF_CGROUP_UDP6_SENDMSG: 1576 case BPF_CGROUP_UDP4_RECVMSG: 1577 case BPF_CGROUP_UDP6_RECVMSG: 1578 return 0; 1579 default: 1580 return -EINVAL; 1581 } 1582 default: 1583 return 0; 1584 } 1585 } 1586 1587 /* last field in 'union bpf_attr' used by this command */ 1588 #define BPF_PROG_LOAD_LAST_FIELD line_info_cnt 1589 1590 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) 1591 { 1592 enum bpf_prog_type type = attr->prog_type; 1593 struct bpf_prog *prog; 1594 int err; 1595 char license[128]; 1596 bool is_gpl; 1597 1598 if (CHECK_ATTR(BPF_PROG_LOAD)) 1599 return -EINVAL; 1600 1601 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | BPF_F_ANY_ALIGNMENT)) 1602 return -EINVAL; 1603 1604 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 1605 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 1606 !capable(CAP_SYS_ADMIN)) 1607 return -EPERM; 1608 1609 /* copy eBPF program license from user space */ 1610 if (strncpy_from_user(license, u64_to_user_ptr(attr->license), 1611 sizeof(license) - 1) < 0) 1612 return -EFAULT; 1613 license[sizeof(license) - 1] = 0; 1614 1615 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 1616 is_gpl = license_is_gpl_compatible(license); 1617 1618 if (attr->insn_cnt == 0 || 1619 attr->insn_cnt > (capable(CAP_SYS_ADMIN) ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) 1620 return -E2BIG; 1621 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 1622 type != BPF_PROG_TYPE_CGROUP_SKB && 1623 !capable(CAP_SYS_ADMIN)) 1624 return -EPERM; 1625 1626 bpf_prog_load_fixup_attach_type(attr); 1627 if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type)) 1628 return -EINVAL; 1629 1630 /* plain bpf_prog allocation */ 1631 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 1632 if (!prog) 1633 return -ENOMEM; 1634 1635 prog->expected_attach_type = attr->expected_attach_type; 1636 1637 prog->aux->offload_requested = !!attr->prog_ifindex; 1638 1639 err = security_bpf_prog_alloc(prog->aux); 1640 if (err) 1641 goto free_prog_nouncharge; 1642 1643 err = bpf_prog_charge_memlock(prog); 1644 if (err) 1645 goto free_prog_sec; 1646 1647 prog->len = attr->insn_cnt; 1648 1649 err = -EFAULT; 1650 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), 1651 bpf_prog_insn_size(prog)) != 0) 1652 goto free_prog; 1653 1654 prog->orig_prog = NULL; 1655 prog->jited = 0; 1656 1657 atomic_set(&prog->aux->refcnt, 1); 1658 prog->gpl_compatible = is_gpl ? 1 : 0; 1659 1660 if (bpf_prog_is_dev_bound(prog->aux)) { 1661 err = bpf_prog_offload_init(prog, attr); 1662 if (err) 1663 goto free_prog; 1664 } 1665 1666 /* find program type: socket_filter vs tracing_filter */ 1667 err = find_prog_type(type, prog); 1668 if (err < 0) 1669 goto free_prog; 1670 1671 prog->aux->load_time = ktime_get_boottime_ns(); 1672 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name); 1673 if (err) 1674 goto free_prog; 1675 1676 /* run eBPF verifier */ 1677 err = bpf_check(&prog, attr, uattr); 1678 if (err < 0) 1679 goto free_used_maps; 1680 1681 prog = bpf_prog_select_runtime(prog, &err); 1682 if (err < 0) 1683 goto free_used_maps; 1684 1685 err = bpf_prog_alloc_id(prog); 1686 if (err) 1687 goto free_used_maps; 1688 1689 err = bpf_prog_new_fd(prog); 1690 if (err < 0) { 1691 /* failed to allocate fd. 1692 * bpf_prog_put() is needed because the above 1693 * bpf_prog_alloc_id() has published the prog 1694 * to the userspace and the userspace may 1695 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID. 1696 */ 1697 bpf_prog_put(prog); 1698 return err; 1699 } 1700 1701 bpf_prog_kallsyms_add(prog); 1702 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 1703 return err; 1704 1705 free_used_maps: 1706 bpf_prog_free_linfo(prog); 1707 kvfree(prog->aux->func_info); 1708 btf_put(prog->aux->btf); 1709 bpf_prog_kallsyms_del_subprogs(prog); 1710 free_used_maps(prog->aux); 1711 free_prog: 1712 bpf_prog_uncharge_memlock(prog); 1713 free_prog_sec: 1714 security_bpf_prog_free(prog->aux); 1715 free_prog_nouncharge: 1716 bpf_prog_free(prog); 1717 return err; 1718 } 1719 1720 #define BPF_OBJ_LAST_FIELD file_flags 1721 1722 static int bpf_obj_pin(const union bpf_attr *attr) 1723 { 1724 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) 1725 return -EINVAL; 1726 1727 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); 1728 } 1729 1730 static int bpf_obj_get(const union bpf_attr *attr) 1731 { 1732 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 1733 attr->file_flags & ~BPF_OBJ_FLAG_MASK) 1734 return -EINVAL; 1735 1736 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), 1737 attr->file_flags); 1738 } 1739 1740 struct bpf_raw_tracepoint { 1741 struct bpf_raw_event_map *btp; 1742 struct bpf_prog *prog; 1743 }; 1744 1745 static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp) 1746 { 1747 struct bpf_raw_tracepoint *raw_tp = filp->private_data; 1748 1749 if (raw_tp->prog) { 1750 bpf_probe_unregister(raw_tp->btp, raw_tp->prog); 1751 bpf_prog_put(raw_tp->prog); 1752 } 1753 bpf_put_raw_tracepoint(raw_tp->btp); 1754 kfree(raw_tp); 1755 return 0; 1756 } 1757 1758 static const struct file_operations bpf_raw_tp_fops = { 1759 .release = bpf_raw_tracepoint_release, 1760 .read = bpf_dummy_read, 1761 .write = bpf_dummy_write, 1762 }; 1763 1764 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 1765 1766 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 1767 { 1768 struct bpf_raw_tracepoint *raw_tp; 1769 struct bpf_raw_event_map *btp; 1770 struct bpf_prog *prog; 1771 char tp_name[128]; 1772 int tp_fd, err; 1773 1774 if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name), 1775 sizeof(tp_name) - 1) < 0) 1776 return -EFAULT; 1777 tp_name[sizeof(tp_name) - 1] = 0; 1778 1779 btp = bpf_get_raw_tracepoint(tp_name); 1780 if (!btp) 1781 return -ENOENT; 1782 1783 raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER); 1784 if (!raw_tp) { 1785 err = -ENOMEM; 1786 goto out_put_btp; 1787 } 1788 raw_tp->btp = btp; 1789 1790 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 1791 if (IS_ERR(prog)) { 1792 err = PTR_ERR(prog); 1793 goto out_free_tp; 1794 } 1795 if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT && 1796 prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) { 1797 err = -EINVAL; 1798 goto out_put_prog; 1799 } 1800 1801 err = bpf_probe_register(raw_tp->btp, prog); 1802 if (err) 1803 goto out_put_prog; 1804 1805 raw_tp->prog = prog; 1806 tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp, 1807 O_CLOEXEC); 1808 if (tp_fd < 0) { 1809 bpf_probe_unregister(raw_tp->btp, prog); 1810 err = tp_fd; 1811 goto out_put_prog; 1812 } 1813 return tp_fd; 1814 1815 out_put_prog: 1816 bpf_prog_put(prog); 1817 out_free_tp: 1818 kfree(raw_tp); 1819 out_put_btp: 1820 bpf_put_raw_tracepoint(btp); 1821 return err; 1822 } 1823 1824 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 1825 enum bpf_attach_type attach_type) 1826 { 1827 switch (prog->type) { 1828 case BPF_PROG_TYPE_CGROUP_SOCK: 1829 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1830 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 1831 default: 1832 return 0; 1833 } 1834 } 1835 1836 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags 1837 1838 #define BPF_F_ATTACH_MASK \ 1839 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI) 1840 1841 static int bpf_prog_attach(const union bpf_attr *attr) 1842 { 1843 enum bpf_prog_type ptype; 1844 struct bpf_prog *prog; 1845 int ret; 1846 1847 if (!capable(CAP_NET_ADMIN)) 1848 return -EPERM; 1849 1850 if (CHECK_ATTR(BPF_PROG_ATTACH)) 1851 return -EINVAL; 1852 1853 if (attr->attach_flags & ~BPF_F_ATTACH_MASK) 1854 return -EINVAL; 1855 1856 switch (attr->attach_type) { 1857 case BPF_CGROUP_INET_INGRESS: 1858 case BPF_CGROUP_INET_EGRESS: 1859 ptype = BPF_PROG_TYPE_CGROUP_SKB; 1860 break; 1861 case BPF_CGROUP_INET_SOCK_CREATE: 1862 case BPF_CGROUP_INET4_POST_BIND: 1863 case BPF_CGROUP_INET6_POST_BIND: 1864 ptype = BPF_PROG_TYPE_CGROUP_SOCK; 1865 break; 1866 case BPF_CGROUP_INET4_BIND: 1867 case BPF_CGROUP_INET6_BIND: 1868 case BPF_CGROUP_INET4_CONNECT: 1869 case BPF_CGROUP_INET6_CONNECT: 1870 case BPF_CGROUP_UDP4_SENDMSG: 1871 case BPF_CGROUP_UDP6_SENDMSG: 1872 case BPF_CGROUP_UDP4_RECVMSG: 1873 case BPF_CGROUP_UDP6_RECVMSG: 1874 ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 1875 break; 1876 case BPF_CGROUP_SOCK_OPS: 1877 ptype = BPF_PROG_TYPE_SOCK_OPS; 1878 break; 1879 case BPF_CGROUP_DEVICE: 1880 ptype = BPF_PROG_TYPE_CGROUP_DEVICE; 1881 break; 1882 case BPF_SK_MSG_VERDICT: 1883 ptype = BPF_PROG_TYPE_SK_MSG; 1884 break; 1885 case BPF_SK_SKB_STREAM_PARSER: 1886 case BPF_SK_SKB_STREAM_VERDICT: 1887 ptype = BPF_PROG_TYPE_SK_SKB; 1888 break; 1889 case BPF_LIRC_MODE2: 1890 ptype = BPF_PROG_TYPE_LIRC_MODE2; 1891 break; 1892 case BPF_FLOW_DISSECTOR: 1893 ptype = BPF_PROG_TYPE_FLOW_DISSECTOR; 1894 break; 1895 case BPF_CGROUP_SYSCTL: 1896 ptype = BPF_PROG_TYPE_CGROUP_SYSCTL; 1897 break; 1898 default: 1899 return -EINVAL; 1900 } 1901 1902 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 1903 if (IS_ERR(prog)) 1904 return PTR_ERR(prog); 1905 1906 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 1907 bpf_prog_put(prog); 1908 return -EINVAL; 1909 } 1910 1911 switch (ptype) { 1912 case BPF_PROG_TYPE_SK_SKB: 1913 case BPF_PROG_TYPE_SK_MSG: 1914 ret = sock_map_get_from_fd(attr, prog); 1915 break; 1916 case BPF_PROG_TYPE_LIRC_MODE2: 1917 ret = lirc_prog_attach(attr, prog); 1918 break; 1919 case BPF_PROG_TYPE_FLOW_DISSECTOR: 1920 ret = skb_flow_dissector_bpf_prog_attach(attr, prog); 1921 break; 1922 default: 1923 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 1924 } 1925 1926 if (ret) 1927 bpf_prog_put(prog); 1928 return ret; 1929 } 1930 1931 #define BPF_PROG_DETACH_LAST_FIELD attach_type 1932 1933 static int bpf_prog_detach(const union bpf_attr *attr) 1934 { 1935 enum bpf_prog_type ptype; 1936 1937 if (!capable(CAP_NET_ADMIN)) 1938 return -EPERM; 1939 1940 if (CHECK_ATTR(BPF_PROG_DETACH)) 1941 return -EINVAL; 1942 1943 switch (attr->attach_type) { 1944 case BPF_CGROUP_INET_INGRESS: 1945 case BPF_CGROUP_INET_EGRESS: 1946 ptype = BPF_PROG_TYPE_CGROUP_SKB; 1947 break; 1948 case BPF_CGROUP_INET_SOCK_CREATE: 1949 case BPF_CGROUP_INET4_POST_BIND: 1950 case BPF_CGROUP_INET6_POST_BIND: 1951 ptype = BPF_PROG_TYPE_CGROUP_SOCK; 1952 break; 1953 case BPF_CGROUP_INET4_BIND: 1954 case BPF_CGROUP_INET6_BIND: 1955 case BPF_CGROUP_INET4_CONNECT: 1956 case BPF_CGROUP_INET6_CONNECT: 1957 case BPF_CGROUP_UDP4_SENDMSG: 1958 case BPF_CGROUP_UDP6_SENDMSG: 1959 case BPF_CGROUP_UDP4_RECVMSG: 1960 case BPF_CGROUP_UDP6_RECVMSG: 1961 ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 1962 break; 1963 case BPF_CGROUP_SOCK_OPS: 1964 ptype = BPF_PROG_TYPE_SOCK_OPS; 1965 break; 1966 case BPF_CGROUP_DEVICE: 1967 ptype = BPF_PROG_TYPE_CGROUP_DEVICE; 1968 break; 1969 case BPF_SK_MSG_VERDICT: 1970 return sock_map_get_from_fd(attr, NULL); 1971 case BPF_SK_SKB_STREAM_PARSER: 1972 case BPF_SK_SKB_STREAM_VERDICT: 1973 return sock_map_get_from_fd(attr, NULL); 1974 case BPF_LIRC_MODE2: 1975 return lirc_prog_detach(attr); 1976 case BPF_FLOW_DISSECTOR: 1977 return skb_flow_dissector_bpf_prog_detach(attr); 1978 case BPF_CGROUP_SYSCTL: 1979 ptype = BPF_PROG_TYPE_CGROUP_SYSCTL; 1980 break; 1981 default: 1982 return -EINVAL; 1983 } 1984 1985 return cgroup_bpf_prog_detach(attr, ptype); 1986 } 1987 1988 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt 1989 1990 static int bpf_prog_query(const union bpf_attr *attr, 1991 union bpf_attr __user *uattr) 1992 { 1993 if (!capable(CAP_NET_ADMIN)) 1994 return -EPERM; 1995 if (CHECK_ATTR(BPF_PROG_QUERY)) 1996 return -EINVAL; 1997 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 1998 return -EINVAL; 1999 2000 switch (attr->query.attach_type) { 2001 case BPF_CGROUP_INET_INGRESS: 2002 case BPF_CGROUP_INET_EGRESS: 2003 case BPF_CGROUP_INET_SOCK_CREATE: 2004 case BPF_CGROUP_INET4_BIND: 2005 case BPF_CGROUP_INET6_BIND: 2006 case BPF_CGROUP_INET4_POST_BIND: 2007 case BPF_CGROUP_INET6_POST_BIND: 2008 case BPF_CGROUP_INET4_CONNECT: 2009 case BPF_CGROUP_INET6_CONNECT: 2010 case BPF_CGROUP_UDP4_SENDMSG: 2011 case BPF_CGROUP_UDP6_SENDMSG: 2012 case BPF_CGROUP_UDP4_RECVMSG: 2013 case BPF_CGROUP_UDP6_RECVMSG: 2014 case BPF_CGROUP_SOCK_OPS: 2015 case BPF_CGROUP_DEVICE: 2016 case BPF_CGROUP_SYSCTL: 2017 break; 2018 case BPF_LIRC_MODE2: 2019 return lirc_prog_query(attr, uattr); 2020 case BPF_FLOW_DISSECTOR: 2021 return skb_flow_dissector_prog_query(attr, uattr); 2022 default: 2023 return -EINVAL; 2024 } 2025 2026 return cgroup_bpf_prog_query(attr, uattr); 2027 } 2028 2029 #define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out 2030 2031 static int bpf_prog_test_run(const union bpf_attr *attr, 2032 union bpf_attr __user *uattr) 2033 { 2034 struct bpf_prog *prog; 2035 int ret = -ENOTSUPP; 2036 2037 if (!capable(CAP_SYS_ADMIN)) 2038 return -EPERM; 2039 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 2040 return -EINVAL; 2041 2042 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 2043 (!attr->test.ctx_size_in && attr->test.ctx_in)) 2044 return -EINVAL; 2045 2046 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 2047 (!attr->test.ctx_size_out && attr->test.ctx_out)) 2048 return -EINVAL; 2049 2050 prog = bpf_prog_get(attr->test.prog_fd); 2051 if (IS_ERR(prog)) 2052 return PTR_ERR(prog); 2053 2054 if (prog->aux->ops->test_run) 2055 ret = prog->aux->ops->test_run(prog, attr, uattr); 2056 2057 bpf_prog_put(prog); 2058 return ret; 2059 } 2060 2061 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 2062 2063 static int bpf_obj_get_next_id(const union bpf_attr *attr, 2064 union bpf_attr __user *uattr, 2065 struct idr *idr, 2066 spinlock_t *lock) 2067 { 2068 u32 next_id = attr->start_id; 2069 int err = 0; 2070 2071 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 2072 return -EINVAL; 2073 2074 if (!capable(CAP_SYS_ADMIN)) 2075 return -EPERM; 2076 2077 next_id++; 2078 spin_lock_bh(lock); 2079 if (!idr_get_next(idr, &next_id)) 2080 err = -ENOENT; 2081 spin_unlock_bh(lock); 2082 2083 if (!err) 2084 err = put_user(next_id, &uattr->next_id); 2085 2086 return err; 2087 } 2088 2089 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 2090 2091 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 2092 { 2093 struct bpf_prog *prog; 2094 u32 id = attr->prog_id; 2095 int fd; 2096 2097 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 2098 return -EINVAL; 2099 2100 if (!capable(CAP_SYS_ADMIN)) 2101 return -EPERM; 2102 2103 spin_lock_bh(&prog_idr_lock); 2104 prog = idr_find(&prog_idr, id); 2105 if (prog) 2106 prog = bpf_prog_inc_not_zero(prog); 2107 else 2108 prog = ERR_PTR(-ENOENT); 2109 spin_unlock_bh(&prog_idr_lock); 2110 2111 if (IS_ERR(prog)) 2112 return PTR_ERR(prog); 2113 2114 fd = bpf_prog_new_fd(prog); 2115 if (fd < 0) 2116 bpf_prog_put(prog); 2117 2118 return fd; 2119 } 2120 2121 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 2122 2123 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 2124 { 2125 struct bpf_map *map; 2126 u32 id = attr->map_id; 2127 int f_flags; 2128 int fd; 2129 2130 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 2131 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 2132 return -EINVAL; 2133 2134 if (!capable(CAP_SYS_ADMIN)) 2135 return -EPERM; 2136 2137 f_flags = bpf_get_file_flag(attr->open_flags); 2138 if (f_flags < 0) 2139 return f_flags; 2140 2141 spin_lock_bh(&map_idr_lock); 2142 map = idr_find(&map_idr, id); 2143 if (map) 2144 map = bpf_map_inc_not_zero(map, true); 2145 else 2146 map = ERR_PTR(-ENOENT); 2147 spin_unlock_bh(&map_idr_lock); 2148 2149 if (IS_ERR(map)) 2150 return PTR_ERR(map); 2151 2152 fd = bpf_map_new_fd(map, f_flags); 2153 if (fd < 0) 2154 bpf_map_put_with_uref(map); 2155 2156 return fd; 2157 } 2158 2159 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 2160 unsigned long addr, u32 *off, 2161 u32 *type) 2162 { 2163 const struct bpf_map *map; 2164 int i; 2165 2166 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 2167 map = prog->aux->used_maps[i]; 2168 if (map == (void *)addr) { 2169 *type = BPF_PSEUDO_MAP_FD; 2170 return map; 2171 } 2172 if (!map->ops->map_direct_value_meta) 2173 continue; 2174 if (!map->ops->map_direct_value_meta(map, addr, off)) { 2175 *type = BPF_PSEUDO_MAP_VALUE; 2176 return map; 2177 } 2178 } 2179 2180 return NULL; 2181 } 2182 2183 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) 2184 { 2185 const struct bpf_map *map; 2186 struct bpf_insn *insns; 2187 u32 off, type; 2188 u64 imm; 2189 int i; 2190 2191 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 2192 GFP_USER); 2193 if (!insns) 2194 return insns; 2195 2196 for (i = 0; i < prog->len; i++) { 2197 if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) { 2198 insns[i].code = BPF_JMP | BPF_CALL; 2199 insns[i].imm = BPF_FUNC_tail_call; 2200 /* fall-through */ 2201 } 2202 if (insns[i].code == (BPF_JMP | BPF_CALL) || 2203 insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) { 2204 if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) 2205 insns[i].code = BPF_JMP | BPF_CALL; 2206 if (!bpf_dump_raw_ok()) 2207 insns[i].imm = 0; 2208 continue; 2209 } 2210 2211 if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW)) 2212 continue; 2213 2214 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 2215 map = bpf_map_from_imm(prog, imm, &off, &type); 2216 if (map) { 2217 insns[i].src_reg = type; 2218 insns[i].imm = map->id; 2219 insns[i + 1].imm = off; 2220 continue; 2221 } 2222 } 2223 2224 return insns; 2225 } 2226 2227 static int set_info_rec_size(struct bpf_prog_info *info) 2228 { 2229 /* 2230 * Ensure info.*_rec_size is the same as kernel expected size 2231 * 2232 * or 2233 * 2234 * Only allow zero *_rec_size if both _rec_size and _cnt are 2235 * zero. In this case, the kernel will set the expected 2236 * _rec_size back to the info. 2237 */ 2238 2239 if ((info->nr_func_info || info->func_info_rec_size) && 2240 info->func_info_rec_size != sizeof(struct bpf_func_info)) 2241 return -EINVAL; 2242 2243 if ((info->nr_line_info || info->line_info_rec_size) && 2244 info->line_info_rec_size != sizeof(struct bpf_line_info)) 2245 return -EINVAL; 2246 2247 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 2248 info->jited_line_info_rec_size != sizeof(__u64)) 2249 return -EINVAL; 2250 2251 info->func_info_rec_size = sizeof(struct bpf_func_info); 2252 info->line_info_rec_size = sizeof(struct bpf_line_info); 2253 info->jited_line_info_rec_size = sizeof(__u64); 2254 2255 return 0; 2256 } 2257 2258 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, 2259 const union bpf_attr *attr, 2260 union bpf_attr __user *uattr) 2261 { 2262 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 2263 struct bpf_prog_info info = {}; 2264 u32 info_len = attr->info.info_len; 2265 struct bpf_prog_stats stats; 2266 char __user *uinsns; 2267 u32 ulen; 2268 int err; 2269 2270 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 2271 if (err) 2272 return err; 2273 info_len = min_t(u32, sizeof(info), info_len); 2274 2275 if (copy_from_user(&info, uinfo, info_len)) 2276 return -EFAULT; 2277 2278 info.type = prog->type; 2279 info.id = prog->aux->id; 2280 info.load_time = prog->aux->load_time; 2281 info.created_by_uid = from_kuid_munged(current_user_ns(), 2282 prog->aux->user->uid); 2283 info.gpl_compatible = prog->gpl_compatible; 2284 2285 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 2286 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 2287 2288 ulen = info.nr_map_ids; 2289 info.nr_map_ids = prog->aux->used_map_cnt; 2290 ulen = min_t(u32, info.nr_map_ids, ulen); 2291 if (ulen) { 2292 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 2293 u32 i; 2294 2295 for (i = 0; i < ulen; i++) 2296 if (put_user(prog->aux->used_maps[i]->id, 2297 &user_map_ids[i])) 2298 return -EFAULT; 2299 } 2300 2301 err = set_info_rec_size(&info); 2302 if (err) 2303 return err; 2304 2305 bpf_prog_get_stats(prog, &stats); 2306 info.run_time_ns = stats.nsecs; 2307 info.run_cnt = stats.cnt; 2308 2309 if (!capable(CAP_SYS_ADMIN)) { 2310 info.jited_prog_len = 0; 2311 info.xlated_prog_len = 0; 2312 info.nr_jited_ksyms = 0; 2313 info.nr_jited_func_lens = 0; 2314 info.nr_func_info = 0; 2315 info.nr_line_info = 0; 2316 info.nr_jited_line_info = 0; 2317 goto done; 2318 } 2319 2320 ulen = info.xlated_prog_len; 2321 info.xlated_prog_len = bpf_prog_insn_size(prog); 2322 if (info.xlated_prog_len && ulen) { 2323 struct bpf_insn *insns_sanitized; 2324 bool fault; 2325 2326 if (prog->blinded && !bpf_dump_raw_ok()) { 2327 info.xlated_prog_insns = 0; 2328 goto done; 2329 } 2330 insns_sanitized = bpf_insn_prepare_dump(prog); 2331 if (!insns_sanitized) 2332 return -ENOMEM; 2333 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 2334 ulen = min_t(u32, info.xlated_prog_len, ulen); 2335 fault = copy_to_user(uinsns, insns_sanitized, ulen); 2336 kfree(insns_sanitized); 2337 if (fault) 2338 return -EFAULT; 2339 } 2340 2341 if (bpf_prog_is_dev_bound(prog->aux)) { 2342 err = bpf_prog_offload_info_fill(&info, prog); 2343 if (err) 2344 return err; 2345 goto done; 2346 } 2347 2348 /* NOTE: the following code is supposed to be skipped for offload. 2349 * bpf_prog_offload_info_fill() is the place to fill similar fields 2350 * for offload. 2351 */ 2352 ulen = info.jited_prog_len; 2353 if (prog->aux->func_cnt) { 2354 u32 i; 2355 2356 info.jited_prog_len = 0; 2357 for (i = 0; i < prog->aux->func_cnt; i++) 2358 info.jited_prog_len += prog->aux->func[i]->jited_len; 2359 } else { 2360 info.jited_prog_len = prog->jited_len; 2361 } 2362 2363 if (info.jited_prog_len && ulen) { 2364 if (bpf_dump_raw_ok()) { 2365 uinsns = u64_to_user_ptr(info.jited_prog_insns); 2366 ulen = min_t(u32, info.jited_prog_len, ulen); 2367 2368 /* for multi-function programs, copy the JITed 2369 * instructions for all the functions 2370 */ 2371 if (prog->aux->func_cnt) { 2372 u32 len, free, i; 2373 u8 *img; 2374 2375 free = ulen; 2376 for (i = 0; i < prog->aux->func_cnt; i++) { 2377 len = prog->aux->func[i]->jited_len; 2378 len = min_t(u32, len, free); 2379 img = (u8 *) prog->aux->func[i]->bpf_func; 2380 if (copy_to_user(uinsns, img, len)) 2381 return -EFAULT; 2382 uinsns += len; 2383 free -= len; 2384 if (!free) 2385 break; 2386 } 2387 } else { 2388 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 2389 return -EFAULT; 2390 } 2391 } else { 2392 info.jited_prog_insns = 0; 2393 } 2394 } 2395 2396 ulen = info.nr_jited_ksyms; 2397 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 2398 if (ulen) { 2399 if (bpf_dump_raw_ok()) { 2400 unsigned long ksym_addr; 2401 u64 __user *user_ksyms; 2402 u32 i; 2403 2404 /* copy the address of the kernel symbol 2405 * corresponding to each function 2406 */ 2407 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 2408 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 2409 if (prog->aux->func_cnt) { 2410 for (i = 0; i < ulen; i++) { 2411 ksym_addr = (unsigned long) 2412 prog->aux->func[i]->bpf_func; 2413 if (put_user((u64) ksym_addr, 2414 &user_ksyms[i])) 2415 return -EFAULT; 2416 } 2417 } else { 2418 ksym_addr = (unsigned long) prog->bpf_func; 2419 if (put_user((u64) ksym_addr, &user_ksyms[0])) 2420 return -EFAULT; 2421 } 2422 } else { 2423 info.jited_ksyms = 0; 2424 } 2425 } 2426 2427 ulen = info.nr_jited_func_lens; 2428 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 2429 if (ulen) { 2430 if (bpf_dump_raw_ok()) { 2431 u32 __user *user_lens; 2432 u32 func_len, i; 2433 2434 /* copy the JITed image lengths for each function */ 2435 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 2436 user_lens = u64_to_user_ptr(info.jited_func_lens); 2437 if (prog->aux->func_cnt) { 2438 for (i = 0; i < ulen; i++) { 2439 func_len = 2440 prog->aux->func[i]->jited_len; 2441 if (put_user(func_len, &user_lens[i])) 2442 return -EFAULT; 2443 } 2444 } else { 2445 func_len = prog->jited_len; 2446 if (put_user(func_len, &user_lens[0])) 2447 return -EFAULT; 2448 } 2449 } else { 2450 info.jited_func_lens = 0; 2451 } 2452 } 2453 2454 if (prog->aux->btf) 2455 info.btf_id = btf_id(prog->aux->btf); 2456 2457 ulen = info.nr_func_info; 2458 info.nr_func_info = prog->aux->func_info_cnt; 2459 if (info.nr_func_info && ulen) { 2460 char __user *user_finfo; 2461 2462 user_finfo = u64_to_user_ptr(info.func_info); 2463 ulen = min_t(u32, info.nr_func_info, ulen); 2464 if (copy_to_user(user_finfo, prog->aux->func_info, 2465 info.func_info_rec_size * ulen)) 2466 return -EFAULT; 2467 } 2468 2469 ulen = info.nr_line_info; 2470 info.nr_line_info = prog->aux->nr_linfo; 2471 if (info.nr_line_info && ulen) { 2472 __u8 __user *user_linfo; 2473 2474 user_linfo = u64_to_user_ptr(info.line_info); 2475 ulen = min_t(u32, info.nr_line_info, ulen); 2476 if (copy_to_user(user_linfo, prog->aux->linfo, 2477 info.line_info_rec_size * ulen)) 2478 return -EFAULT; 2479 } 2480 2481 ulen = info.nr_jited_line_info; 2482 if (prog->aux->jited_linfo) 2483 info.nr_jited_line_info = prog->aux->nr_linfo; 2484 else 2485 info.nr_jited_line_info = 0; 2486 if (info.nr_jited_line_info && ulen) { 2487 if (bpf_dump_raw_ok()) { 2488 __u64 __user *user_linfo; 2489 u32 i; 2490 2491 user_linfo = u64_to_user_ptr(info.jited_line_info); 2492 ulen = min_t(u32, info.nr_jited_line_info, ulen); 2493 for (i = 0; i < ulen; i++) { 2494 if (put_user((__u64)(long)prog->aux->jited_linfo[i], 2495 &user_linfo[i])) 2496 return -EFAULT; 2497 } 2498 } else { 2499 info.jited_line_info = 0; 2500 } 2501 } 2502 2503 ulen = info.nr_prog_tags; 2504 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 2505 if (ulen) { 2506 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 2507 u32 i; 2508 2509 user_prog_tags = u64_to_user_ptr(info.prog_tags); 2510 ulen = min_t(u32, info.nr_prog_tags, ulen); 2511 if (prog->aux->func_cnt) { 2512 for (i = 0; i < ulen; i++) { 2513 if (copy_to_user(user_prog_tags[i], 2514 prog->aux->func[i]->tag, 2515 BPF_TAG_SIZE)) 2516 return -EFAULT; 2517 } 2518 } else { 2519 if (copy_to_user(user_prog_tags[0], 2520 prog->tag, BPF_TAG_SIZE)) 2521 return -EFAULT; 2522 } 2523 } 2524 2525 done: 2526 if (copy_to_user(uinfo, &info, info_len) || 2527 put_user(info_len, &uattr->info.info_len)) 2528 return -EFAULT; 2529 2530 return 0; 2531 } 2532 2533 static int bpf_map_get_info_by_fd(struct bpf_map *map, 2534 const union bpf_attr *attr, 2535 union bpf_attr __user *uattr) 2536 { 2537 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 2538 struct bpf_map_info info = {}; 2539 u32 info_len = attr->info.info_len; 2540 int err; 2541 2542 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 2543 if (err) 2544 return err; 2545 info_len = min_t(u32, sizeof(info), info_len); 2546 2547 info.type = map->map_type; 2548 info.id = map->id; 2549 info.key_size = map->key_size; 2550 info.value_size = map->value_size; 2551 info.max_entries = map->max_entries; 2552 info.map_flags = map->map_flags; 2553 memcpy(info.name, map->name, sizeof(map->name)); 2554 2555 if (map->btf) { 2556 info.btf_id = btf_id(map->btf); 2557 info.btf_key_type_id = map->btf_key_type_id; 2558 info.btf_value_type_id = map->btf_value_type_id; 2559 } 2560 2561 if (bpf_map_is_dev_bound(map)) { 2562 err = bpf_map_offload_info_fill(&info, map); 2563 if (err) 2564 return err; 2565 } 2566 2567 if (copy_to_user(uinfo, &info, info_len) || 2568 put_user(info_len, &uattr->info.info_len)) 2569 return -EFAULT; 2570 2571 return 0; 2572 } 2573 2574 static int bpf_btf_get_info_by_fd(struct btf *btf, 2575 const union bpf_attr *attr, 2576 union bpf_attr __user *uattr) 2577 { 2578 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 2579 u32 info_len = attr->info.info_len; 2580 int err; 2581 2582 err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len); 2583 if (err) 2584 return err; 2585 2586 return btf_get_info_by_fd(btf, attr, uattr); 2587 } 2588 2589 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 2590 2591 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 2592 union bpf_attr __user *uattr) 2593 { 2594 int ufd = attr->info.bpf_fd; 2595 struct fd f; 2596 int err; 2597 2598 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 2599 return -EINVAL; 2600 2601 f = fdget(ufd); 2602 if (!f.file) 2603 return -EBADFD; 2604 2605 if (f.file->f_op == &bpf_prog_fops) 2606 err = bpf_prog_get_info_by_fd(f.file->private_data, attr, 2607 uattr); 2608 else if (f.file->f_op == &bpf_map_fops) 2609 err = bpf_map_get_info_by_fd(f.file->private_data, attr, 2610 uattr); 2611 else if (f.file->f_op == &btf_fops) 2612 err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr); 2613 else 2614 err = -EINVAL; 2615 2616 fdput(f); 2617 return err; 2618 } 2619 2620 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level 2621 2622 static int bpf_btf_load(const union bpf_attr *attr) 2623 { 2624 if (CHECK_ATTR(BPF_BTF_LOAD)) 2625 return -EINVAL; 2626 2627 if (!capable(CAP_SYS_ADMIN)) 2628 return -EPERM; 2629 2630 return btf_new_fd(attr); 2631 } 2632 2633 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 2634 2635 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 2636 { 2637 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 2638 return -EINVAL; 2639 2640 if (!capable(CAP_SYS_ADMIN)) 2641 return -EPERM; 2642 2643 return btf_get_fd_by_id(attr->btf_id); 2644 } 2645 2646 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 2647 union bpf_attr __user *uattr, 2648 u32 prog_id, u32 fd_type, 2649 const char *buf, u64 probe_offset, 2650 u64 probe_addr) 2651 { 2652 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 2653 u32 len = buf ? strlen(buf) : 0, input_len; 2654 int err = 0; 2655 2656 if (put_user(len, &uattr->task_fd_query.buf_len)) 2657 return -EFAULT; 2658 input_len = attr->task_fd_query.buf_len; 2659 if (input_len && ubuf) { 2660 if (!len) { 2661 /* nothing to copy, just make ubuf NULL terminated */ 2662 char zero = '\0'; 2663 2664 if (put_user(zero, ubuf)) 2665 return -EFAULT; 2666 } else if (input_len >= len + 1) { 2667 /* ubuf can hold the string with NULL terminator */ 2668 if (copy_to_user(ubuf, buf, len + 1)) 2669 return -EFAULT; 2670 } else { 2671 /* ubuf cannot hold the string with NULL terminator, 2672 * do a partial copy with NULL terminator. 2673 */ 2674 char zero = '\0'; 2675 2676 err = -ENOSPC; 2677 if (copy_to_user(ubuf, buf, input_len - 1)) 2678 return -EFAULT; 2679 if (put_user(zero, ubuf + input_len - 1)) 2680 return -EFAULT; 2681 } 2682 } 2683 2684 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 2685 put_user(fd_type, &uattr->task_fd_query.fd_type) || 2686 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 2687 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 2688 return -EFAULT; 2689 2690 return err; 2691 } 2692 2693 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 2694 2695 static int bpf_task_fd_query(const union bpf_attr *attr, 2696 union bpf_attr __user *uattr) 2697 { 2698 pid_t pid = attr->task_fd_query.pid; 2699 u32 fd = attr->task_fd_query.fd; 2700 const struct perf_event *event; 2701 struct files_struct *files; 2702 struct task_struct *task; 2703 struct file *file; 2704 int err; 2705 2706 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 2707 return -EINVAL; 2708 2709 if (!capable(CAP_SYS_ADMIN)) 2710 return -EPERM; 2711 2712 if (attr->task_fd_query.flags != 0) 2713 return -EINVAL; 2714 2715 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 2716 if (!task) 2717 return -ENOENT; 2718 2719 files = get_files_struct(task); 2720 put_task_struct(task); 2721 if (!files) 2722 return -ENOENT; 2723 2724 err = 0; 2725 spin_lock(&files->file_lock); 2726 file = fcheck_files(files, fd); 2727 if (!file) 2728 err = -EBADF; 2729 else 2730 get_file(file); 2731 spin_unlock(&files->file_lock); 2732 put_files_struct(files); 2733 2734 if (err) 2735 goto out; 2736 2737 if (file->f_op == &bpf_raw_tp_fops) { 2738 struct bpf_raw_tracepoint *raw_tp = file->private_data; 2739 struct bpf_raw_event_map *btp = raw_tp->btp; 2740 2741 err = bpf_task_fd_query_copy(attr, uattr, 2742 raw_tp->prog->aux->id, 2743 BPF_FD_TYPE_RAW_TRACEPOINT, 2744 btp->tp->name, 0, 0); 2745 goto put_file; 2746 } 2747 2748 event = perf_get_event(file); 2749 if (!IS_ERR(event)) { 2750 u64 probe_offset, probe_addr; 2751 u32 prog_id, fd_type; 2752 const char *buf; 2753 2754 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 2755 &buf, &probe_offset, 2756 &probe_addr); 2757 if (!err) 2758 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 2759 fd_type, buf, 2760 probe_offset, 2761 probe_addr); 2762 goto put_file; 2763 } 2764 2765 err = -ENOTSUPP; 2766 put_file: 2767 fput(file); 2768 out: 2769 return err; 2770 } 2771 2772 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 2773 { 2774 union bpf_attr attr = {}; 2775 int err; 2776 2777 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) 2778 return -EPERM; 2779 2780 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 2781 if (err) 2782 return err; 2783 size = min_t(u32, size, sizeof(attr)); 2784 2785 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 2786 if (copy_from_user(&attr, uattr, size) != 0) 2787 return -EFAULT; 2788 2789 err = security_bpf(cmd, &attr, size); 2790 if (err < 0) 2791 return err; 2792 2793 switch (cmd) { 2794 case BPF_MAP_CREATE: 2795 err = map_create(&attr); 2796 break; 2797 case BPF_MAP_LOOKUP_ELEM: 2798 err = map_lookup_elem(&attr); 2799 break; 2800 case BPF_MAP_UPDATE_ELEM: 2801 err = map_update_elem(&attr); 2802 break; 2803 case BPF_MAP_DELETE_ELEM: 2804 err = map_delete_elem(&attr); 2805 break; 2806 case BPF_MAP_GET_NEXT_KEY: 2807 err = map_get_next_key(&attr); 2808 break; 2809 case BPF_MAP_FREEZE: 2810 err = map_freeze(&attr); 2811 break; 2812 case BPF_PROG_LOAD: 2813 err = bpf_prog_load(&attr, uattr); 2814 break; 2815 case BPF_OBJ_PIN: 2816 err = bpf_obj_pin(&attr); 2817 break; 2818 case BPF_OBJ_GET: 2819 err = bpf_obj_get(&attr); 2820 break; 2821 case BPF_PROG_ATTACH: 2822 err = bpf_prog_attach(&attr); 2823 break; 2824 case BPF_PROG_DETACH: 2825 err = bpf_prog_detach(&attr); 2826 break; 2827 case BPF_PROG_QUERY: 2828 err = bpf_prog_query(&attr, uattr); 2829 break; 2830 case BPF_PROG_TEST_RUN: 2831 err = bpf_prog_test_run(&attr, uattr); 2832 break; 2833 case BPF_PROG_GET_NEXT_ID: 2834 err = bpf_obj_get_next_id(&attr, uattr, 2835 &prog_idr, &prog_idr_lock); 2836 break; 2837 case BPF_MAP_GET_NEXT_ID: 2838 err = bpf_obj_get_next_id(&attr, uattr, 2839 &map_idr, &map_idr_lock); 2840 break; 2841 case BPF_PROG_GET_FD_BY_ID: 2842 err = bpf_prog_get_fd_by_id(&attr); 2843 break; 2844 case BPF_MAP_GET_FD_BY_ID: 2845 err = bpf_map_get_fd_by_id(&attr); 2846 break; 2847 case BPF_OBJ_GET_INFO_BY_FD: 2848 err = bpf_obj_get_info_by_fd(&attr, uattr); 2849 break; 2850 case BPF_RAW_TRACEPOINT_OPEN: 2851 err = bpf_raw_tracepoint_open(&attr); 2852 break; 2853 case BPF_BTF_LOAD: 2854 err = bpf_btf_load(&attr); 2855 break; 2856 case BPF_BTF_GET_FD_BY_ID: 2857 err = bpf_btf_get_fd_by_id(&attr); 2858 break; 2859 case BPF_TASK_FD_QUERY: 2860 err = bpf_task_fd_query(&attr, uattr); 2861 break; 2862 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 2863 err = map_lookup_and_delete_elem(&attr); 2864 break; 2865 default: 2866 err = -EINVAL; 2867 break; 2868 } 2869 2870 return err; 2871 } 2872