1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf_trace.h> 6 #include <linux/bpf_lirc.h> 7 #include <linux/btf.h> 8 #include <linux/syscalls.h> 9 #include <linux/slab.h> 10 #include <linux/sched/signal.h> 11 #include <linux/vmalloc.h> 12 #include <linux/mmzone.h> 13 #include <linux/anon_inodes.h> 14 #include <linux/fdtable.h> 15 #include <linux/file.h> 16 #include <linux/fs.h> 17 #include <linux/license.h> 18 #include <linux/filter.h> 19 #include <linux/version.h> 20 #include <linux/kernel.h> 21 #include <linux/idr.h> 22 #include <linux/cred.h> 23 #include <linux/timekeeping.h> 24 #include <linux/ctype.h> 25 #include <linux/nospec.h> 26 #include <uapi/linux/btf.h> 27 28 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ 29 (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 30 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 31 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 32 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 33 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map)) 34 35 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 36 37 DEFINE_PER_CPU(int, bpf_prog_active); 38 static DEFINE_IDR(prog_idr); 39 static DEFINE_SPINLOCK(prog_idr_lock); 40 static DEFINE_IDR(map_idr); 41 static DEFINE_SPINLOCK(map_idr_lock); 42 43 int sysctl_unprivileged_bpf_disabled __read_mostly; 44 45 static const struct bpf_map_ops * const bpf_map_types[] = { 46 #define BPF_PROG_TYPE(_id, _ops) 47 #define BPF_MAP_TYPE(_id, _ops) \ 48 [_id] = &_ops, 49 #include <linux/bpf_types.h> 50 #undef BPF_PROG_TYPE 51 #undef BPF_MAP_TYPE 52 }; 53 54 /* 55 * If we're handed a bigger struct than we know of, ensure all the unknown bits 56 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 57 * we don't know about yet. 58 * 59 * There is a ToCToU between this function call and the following 60 * copy_from_user() call. However, this is not a concern since this function is 61 * meant to be a future-proofing of bits. 62 */ 63 int bpf_check_uarg_tail_zero(void __user *uaddr, 64 size_t expected_size, 65 size_t actual_size) 66 { 67 unsigned char __user *addr; 68 unsigned char __user *end; 69 unsigned char val; 70 int err; 71 72 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 73 return -E2BIG; 74 75 if (unlikely(!access_ok(uaddr, actual_size))) 76 return -EFAULT; 77 78 if (actual_size <= expected_size) 79 return 0; 80 81 addr = uaddr + expected_size; 82 end = uaddr + actual_size; 83 84 for (; addr < end; addr++) { 85 err = get_user(val, addr); 86 if (err) 87 return err; 88 if (val) 89 return -E2BIG; 90 } 91 92 return 0; 93 } 94 95 const struct bpf_map_ops bpf_map_offload_ops = { 96 .map_alloc = bpf_map_offload_map_alloc, 97 .map_free = bpf_map_offload_map_free, 98 .map_check_btf = map_check_no_btf, 99 }; 100 101 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) 102 { 103 const struct bpf_map_ops *ops; 104 u32 type = attr->map_type; 105 struct bpf_map *map; 106 int err; 107 108 if (type >= ARRAY_SIZE(bpf_map_types)) 109 return ERR_PTR(-EINVAL); 110 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types)); 111 ops = bpf_map_types[type]; 112 if (!ops) 113 return ERR_PTR(-EINVAL); 114 115 if (ops->map_alloc_check) { 116 err = ops->map_alloc_check(attr); 117 if (err) 118 return ERR_PTR(err); 119 } 120 if (attr->map_ifindex) 121 ops = &bpf_map_offload_ops; 122 map = ops->map_alloc(attr); 123 if (IS_ERR(map)) 124 return map; 125 map->ops = ops; 126 map->map_type = type; 127 return map; 128 } 129 130 void *bpf_map_area_alloc(u64 size, int numa_node) 131 { 132 /* We really just want to fail instead of triggering OOM killer 133 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 134 * which is used for lower order allocation requests. 135 * 136 * It has been observed that higher order allocation requests done by 137 * vmalloc with __GFP_NORETRY being set might fail due to not trying 138 * to reclaim memory from the page cache, thus we set 139 * __GFP_RETRY_MAYFAIL to avoid such situations. 140 */ 141 142 const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; 143 void *area; 144 145 if (size >= SIZE_MAX) 146 return NULL; 147 148 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 149 area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, 150 numa_node); 151 if (area != NULL) 152 return area; 153 } 154 155 return __vmalloc_node_flags_caller(size, numa_node, 156 GFP_KERNEL | __GFP_RETRY_MAYFAIL | 157 flags, __builtin_return_address(0)); 158 } 159 160 void bpf_map_area_free(void *area) 161 { 162 kvfree(area); 163 } 164 165 static u32 bpf_map_flags_retain_permanent(u32 flags) 166 { 167 /* Some map creation flags are not tied to the map object but 168 * rather to the map fd instead, so they have no meaning upon 169 * map object inspection since multiple file descriptors with 170 * different (access) properties can exist here. Thus, given 171 * this has zero meaning for the map itself, lets clear these 172 * from here. 173 */ 174 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 175 } 176 177 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 178 { 179 map->map_type = attr->map_type; 180 map->key_size = attr->key_size; 181 map->value_size = attr->value_size; 182 map->max_entries = attr->max_entries; 183 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 184 map->numa_node = bpf_map_attr_numa_node(attr); 185 } 186 187 static int bpf_charge_memlock(struct user_struct *user, u32 pages) 188 { 189 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 190 191 if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) { 192 atomic_long_sub(pages, &user->locked_vm); 193 return -EPERM; 194 } 195 return 0; 196 } 197 198 static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) 199 { 200 if (user) 201 atomic_long_sub(pages, &user->locked_vm); 202 } 203 204 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size) 205 { 206 u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; 207 struct user_struct *user; 208 int ret; 209 210 if (size >= U32_MAX - PAGE_SIZE) 211 return -E2BIG; 212 213 user = get_current_user(); 214 ret = bpf_charge_memlock(user, pages); 215 if (ret) { 216 free_uid(user); 217 return ret; 218 } 219 220 mem->pages = pages; 221 mem->user = user; 222 223 return 0; 224 } 225 226 void bpf_map_charge_finish(struct bpf_map_memory *mem) 227 { 228 bpf_uncharge_memlock(mem->user, mem->pages); 229 free_uid(mem->user); 230 } 231 232 void bpf_map_charge_move(struct bpf_map_memory *dst, 233 struct bpf_map_memory *src) 234 { 235 *dst = *src; 236 237 /* Make sure src will not be used for the redundant uncharging. */ 238 memset(src, 0, sizeof(struct bpf_map_memory)); 239 } 240 241 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) 242 { 243 int ret; 244 245 ret = bpf_charge_memlock(map->memory.user, pages); 246 if (ret) 247 return ret; 248 map->memory.pages += pages; 249 return ret; 250 } 251 252 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) 253 { 254 bpf_uncharge_memlock(map->memory.user, pages); 255 map->memory.pages -= pages; 256 } 257 258 static int bpf_map_alloc_id(struct bpf_map *map) 259 { 260 int id; 261 262 idr_preload(GFP_KERNEL); 263 spin_lock_bh(&map_idr_lock); 264 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 265 if (id > 0) 266 map->id = id; 267 spin_unlock_bh(&map_idr_lock); 268 idr_preload_end(); 269 270 if (WARN_ON_ONCE(!id)) 271 return -ENOSPC; 272 273 return id > 0 ? 0 : id; 274 } 275 276 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) 277 { 278 unsigned long flags; 279 280 /* Offloaded maps are removed from the IDR store when their device 281 * disappears - even if someone holds an fd to them they are unusable, 282 * the memory is gone, all ops will fail; they are simply waiting for 283 * refcnt to drop to be freed. 284 */ 285 if (!map->id) 286 return; 287 288 if (do_idr_lock) 289 spin_lock_irqsave(&map_idr_lock, flags); 290 else 291 __acquire(&map_idr_lock); 292 293 idr_remove(&map_idr, map->id); 294 map->id = 0; 295 296 if (do_idr_lock) 297 spin_unlock_irqrestore(&map_idr_lock, flags); 298 else 299 __release(&map_idr_lock); 300 } 301 302 /* called from workqueue */ 303 static void bpf_map_free_deferred(struct work_struct *work) 304 { 305 struct bpf_map *map = container_of(work, struct bpf_map, work); 306 struct bpf_map_memory mem; 307 308 bpf_map_charge_move(&mem, &map->memory); 309 security_bpf_map_free(map); 310 /* implementation dependent freeing */ 311 map->ops->map_free(map); 312 bpf_map_charge_finish(&mem); 313 } 314 315 static void bpf_map_put_uref(struct bpf_map *map) 316 { 317 if (atomic_dec_and_test(&map->usercnt)) { 318 if (map->ops->map_release_uref) 319 map->ops->map_release_uref(map); 320 } 321 } 322 323 /* decrement map refcnt and schedule it for freeing via workqueue 324 * (unrelying map implementation ops->map_free() might sleep) 325 */ 326 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) 327 { 328 if (atomic_dec_and_test(&map->refcnt)) { 329 /* bpf_map_free_id() must be called first */ 330 bpf_map_free_id(map, do_idr_lock); 331 btf_put(map->btf); 332 INIT_WORK(&map->work, bpf_map_free_deferred); 333 schedule_work(&map->work); 334 } 335 } 336 337 void bpf_map_put(struct bpf_map *map) 338 { 339 __bpf_map_put(map, true); 340 } 341 EXPORT_SYMBOL_GPL(bpf_map_put); 342 343 void bpf_map_put_with_uref(struct bpf_map *map) 344 { 345 bpf_map_put_uref(map); 346 bpf_map_put(map); 347 } 348 349 static int bpf_map_release(struct inode *inode, struct file *filp) 350 { 351 struct bpf_map *map = filp->private_data; 352 353 if (map->ops->map_release) 354 map->ops->map_release(map, filp); 355 356 bpf_map_put_with_uref(map); 357 return 0; 358 } 359 360 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 361 { 362 fmode_t mode = f.file->f_mode; 363 364 /* Our file permissions may have been overridden by global 365 * map permissions facing syscall side. 366 */ 367 if (READ_ONCE(map->frozen)) 368 mode &= ~FMODE_CAN_WRITE; 369 return mode; 370 } 371 372 #ifdef CONFIG_PROC_FS 373 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 374 { 375 const struct bpf_map *map = filp->private_data; 376 const struct bpf_array *array; 377 u32 owner_prog_type = 0; 378 u32 owner_jited = 0; 379 380 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { 381 array = container_of(map, struct bpf_array, map); 382 owner_prog_type = array->owner_prog_type; 383 owner_jited = array->owner_jited; 384 } 385 386 seq_printf(m, 387 "map_type:\t%u\n" 388 "key_size:\t%u\n" 389 "value_size:\t%u\n" 390 "max_entries:\t%u\n" 391 "map_flags:\t%#x\n" 392 "memlock:\t%llu\n" 393 "map_id:\t%u\n" 394 "frozen:\t%u\n", 395 map->map_type, 396 map->key_size, 397 map->value_size, 398 map->max_entries, 399 map->map_flags, 400 map->memory.pages * 1ULL << PAGE_SHIFT, 401 map->id, 402 READ_ONCE(map->frozen)); 403 404 if (owner_prog_type) { 405 seq_printf(m, "owner_prog_type:\t%u\n", 406 owner_prog_type); 407 seq_printf(m, "owner_jited:\t%u\n", 408 owner_jited); 409 } 410 } 411 #endif 412 413 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 414 loff_t *ppos) 415 { 416 /* We need this handler such that alloc_file() enables 417 * f_mode with FMODE_CAN_READ. 418 */ 419 return -EINVAL; 420 } 421 422 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 423 size_t siz, loff_t *ppos) 424 { 425 /* We need this handler such that alloc_file() enables 426 * f_mode with FMODE_CAN_WRITE. 427 */ 428 return -EINVAL; 429 } 430 431 const struct file_operations bpf_map_fops = { 432 #ifdef CONFIG_PROC_FS 433 .show_fdinfo = bpf_map_show_fdinfo, 434 #endif 435 .release = bpf_map_release, 436 .read = bpf_dummy_read, 437 .write = bpf_dummy_write, 438 }; 439 440 int bpf_map_new_fd(struct bpf_map *map, int flags) 441 { 442 int ret; 443 444 ret = security_bpf_map(map, OPEN_FMODE(flags)); 445 if (ret < 0) 446 return ret; 447 448 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 449 flags | O_CLOEXEC); 450 } 451 452 int bpf_get_file_flag(int flags) 453 { 454 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 455 return -EINVAL; 456 if (flags & BPF_F_RDONLY) 457 return O_RDONLY; 458 if (flags & BPF_F_WRONLY) 459 return O_WRONLY; 460 return O_RDWR; 461 } 462 463 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 464 #define CHECK_ATTR(CMD) \ 465 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 466 sizeof(attr->CMD##_LAST_FIELD), 0, \ 467 sizeof(*attr) - \ 468 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 469 sizeof(attr->CMD##_LAST_FIELD)) != NULL 470 471 /* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes. 472 * Return 0 on success and < 0 on error. 473 */ 474 static int bpf_obj_name_cpy(char *dst, const char *src) 475 { 476 const char *end = src + BPF_OBJ_NAME_LEN; 477 478 memset(dst, 0, BPF_OBJ_NAME_LEN); 479 /* Copy all isalnum(), '_' and '.' chars. */ 480 while (src < end && *src) { 481 if (!isalnum(*src) && 482 *src != '_' && *src != '.') 483 return -EINVAL; 484 *dst++ = *src++; 485 } 486 487 /* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */ 488 if (src == end) 489 return -EINVAL; 490 491 return 0; 492 } 493 494 int map_check_no_btf(const struct bpf_map *map, 495 const struct btf *btf, 496 const struct btf_type *key_type, 497 const struct btf_type *value_type) 498 { 499 return -ENOTSUPP; 500 } 501 502 static int map_check_btf(struct bpf_map *map, const struct btf *btf, 503 u32 btf_key_id, u32 btf_value_id) 504 { 505 const struct btf_type *key_type, *value_type; 506 u32 key_size, value_size; 507 int ret = 0; 508 509 /* Some maps allow key to be unspecified. */ 510 if (btf_key_id) { 511 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 512 if (!key_type || key_size != map->key_size) 513 return -EINVAL; 514 } else { 515 key_type = btf_type_by_id(btf, 0); 516 if (!map->ops->map_check_btf) 517 return -EINVAL; 518 } 519 520 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 521 if (!value_type || value_size != map->value_size) 522 return -EINVAL; 523 524 map->spin_lock_off = btf_find_spin_lock(btf, value_type); 525 526 if (map_value_has_spin_lock(map)) { 527 if (map->map_flags & BPF_F_RDONLY_PROG) 528 return -EACCES; 529 if (map->map_type != BPF_MAP_TYPE_HASH && 530 map->map_type != BPF_MAP_TYPE_ARRAY && 531 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 532 map->map_type != BPF_MAP_TYPE_SK_STORAGE) 533 return -ENOTSUPP; 534 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > 535 map->value_size) { 536 WARN_ONCE(1, 537 "verifier bug spin_lock_off %d value_size %d\n", 538 map->spin_lock_off, map->value_size); 539 return -EFAULT; 540 } 541 } 542 543 if (map->ops->map_check_btf) 544 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 545 546 return ret; 547 } 548 549 #define BPF_MAP_CREATE_LAST_FIELD btf_value_type_id 550 /* called via syscall */ 551 static int map_create(union bpf_attr *attr) 552 { 553 int numa_node = bpf_map_attr_numa_node(attr); 554 struct bpf_map_memory mem; 555 struct bpf_map *map; 556 int f_flags; 557 int err; 558 559 err = CHECK_ATTR(BPF_MAP_CREATE); 560 if (err) 561 return -EINVAL; 562 563 f_flags = bpf_get_file_flag(attr->map_flags); 564 if (f_flags < 0) 565 return f_flags; 566 567 if (numa_node != NUMA_NO_NODE && 568 ((unsigned int)numa_node >= nr_node_ids || 569 !node_online(numa_node))) 570 return -EINVAL; 571 572 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 573 map = find_and_alloc_map(attr); 574 if (IS_ERR(map)) 575 return PTR_ERR(map); 576 577 err = bpf_obj_name_cpy(map->name, attr->map_name); 578 if (err) 579 goto free_map; 580 581 atomic_set(&map->refcnt, 1); 582 atomic_set(&map->usercnt, 1); 583 584 if (attr->btf_key_type_id || attr->btf_value_type_id) { 585 struct btf *btf; 586 587 if (!attr->btf_value_type_id) { 588 err = -EINVAL; 589 goto free_map; 590 } 591 592 btf = btf_get_by_fd(attr->btf_fd); 593 if (IS_ERR(btf)) { 594 err = PTR_ERR(btf); 595 goto free_map; 596 } 597 598 err = map_check_btf(map, btf, attr->btf_key_type_id, 599 attr->btf_value_type_id); 600 if (err) { 601 btf_put(btf); 602 goto free_map; 603 } 604 605 map->btf = btf; 606 map->btf_key_type_id = attr->btf_key_type_id; 607 map->btf_value_type_id = attr->btf_value_type_id; 608 } else { 609 map->spin_lock_off = -EINVAL; 610 } 611 612 err = security_bpf_map_alloc(map); 613 if (err) 614 goto free_map; 615 616 err = bpf_map_alloc_id(map); 617 if (err) 618 goto free_map_sec; 619 620 err = bpf_map_new_fd(map, f_flags); 621 if (err < 0) { 622 /* failed to allocate fd. 623 * bpf_map_put_with_uref() is needed because the above 624 * bpf_map_alloc_id() has published the map 625 * to the userspace and the userspace may 626 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 627 */ 628 bpf_map_put_with_uref(map); 629 return err; 630 } 631 632 return err; 633 634 free_map_sec: 635 security_bpf_map_free(map); 636 free_map: 637 btf_put(map->btf); 638 bpf_map_charge_move(&mem, &map->memory); 639 map->ops->map_free(map); 640 bpf_map_charge_finish(&mem); 641 return err; 642 } 643 644 /* if error is returned, fd is released. 645 * On success caller should complete fd access with matching fdput() 646 */ 647 struct bpf_map *__bpf_map_get(struct fd f) 648 { 649 if (!f.file) 650 return ERR_PTR(-EBADF); 651 if (f.file->f_op != &bpf_map_fops) { 652 fdput(f); 653 return ERR_PTR(-EINVAL); 654 } 655 656 return f.file->private_data; 657 } 658 659 /* prog's and map's refcnt limit */ 660 #define BPF_MAX_REFCNT 32768 661 662 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref) 663 { 664 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) { 665 atomic_dec(&map->refcnt); 666 return ERR_PTR(-EBUSY); 667 } 668 if (uref) 669 atomic_inc(&map->usercnt); 670 return map; 671 } 672 EXPORT_SYMBOL_GPL(bpf_map_inc); 673 674 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 675 { 676 struct fd f = fdget(ufd); 677 struct bpf_map *map; 678 679 map = __bpf_map_get(f); 680 if (IS_ERR(map)) 681 return map; 682 683 map = bpf_map_inc(map, true); 684 fdput(f); 685 686 return map; 687 } 688 689 /* map_idr_lock should have been held */ 690 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, 691 bool uref) 692 { 693 int refold; 694 695 refold = atomic_fetch_add_unless(&map->refcnt, 1, 0); 696 697 if (refold >= BPF_MAX_REFCNT) { 698 __bpf_map_put(map, false); 699 return ERR_PTR(-EBUSY); 700 } 701 702 if (!refold) 703 return ERR_PTR(-ENOENT); 704 705 if (uref) 706 atomic_inc(&map->usercnt); 707 708 return map; 709 } 710 711 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 712 { 713 spin_lock_bh(&map_idr_lock); 714 map = __bpf_map_inc_not_zero(map, uref); 715 spin_unlock_bh(&map_idr_lock); 716 717 return map; 718 } 719 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 720 721 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 722 { 723 return -ENOTSUPP; 724 } 725 726 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 727 { 728 if (key_size) 729 return memdup_user(ukey, key_size); 730 731 if (ukey) 732 return ERR_PTR(-EINVAL); 733 734 return NULL; 735 } 736 737 /* last field in 'union bpf_attr' used by this command */ 738 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 739 740 static int map_lookup_elem(union bpf_attr *attr) 741 { 742 void __user *ukey = u64_to_user_ptr(attr->key); 743 void __user *uvalue = u64_to_user_ptr(attr->value); 744 int ufd = attr->map_fd; 745 struct bpf_map *map; 746 void *key, *value, *ptr; 747 u32 value_size; 748 struct fd f; 749 int err; 750 751 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 752 return -EINVAL; 753 754 if (attr->flags & ~BPF_F_LOCK) 755 return -EINVAL; 756 757 f = fdget(ufd); 758 map = __bpf_map_get(f); 759 if (IS_ERR(map)) 760 return PTR_ERR(map); 761 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 762 err = -EPERM; 763 goto err_put; 764 } 765 766 if ((attr->flags & BPF_F_LOCK) && 767 !map_value_has_spin_lock(map)) { 768 err = -EINVAL; 769 goto err_put; 770 } 771 772 key = __bpf_copy_key(ukey, map->key_size); 773 if (IS_ERR(key)) { 774 err = PTR_ERR(key); 775 goto err_put; 776 } 777 778 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 779 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 780 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 781 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 782 value_size = round_up(map->value_size, 8) * num_possible_cpus(); 783 else if (IS_FD_MAP(map)) 784 value_size = sizeof(u32); 785 else 786 value_size = map->value_size; 787 788 err = -ENOMEM; 789 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 790 if (!value) 791 goto free_key; 792 793 if (bpf_map_is_dev_bound(map)) { 794 err = bpf_map_offload_lookup_elem(map, key, value); 795 goto done; 796 } 797 798 preempt_disable(); 799 this_cpu_inc(bpf_prog_active); 800 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 801 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 802 err = bpf_percpu_hash_copy(map, key, value); 803 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 804 err = bpf_percpu_array_copy(map, key, value); 805 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 806 err = bpf_percpu_cgroup_storage_copy(map, key, value); 807 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 808 err = bpf_stackmap_copy(map, key, value); 809 } else if (IS_FD_ARRAY(map)) { 810 err = bpf_fd_array_map_lookup_elem(map, key, value); 811 } else if (IS_FD_HASH(map)) { 812 err = bpf_fd_htab_map_lookup_elem(map, key, value); 813 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 814 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 815 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 816 map->map_type == BPF_MAP_TYPE_STACK) { 817 err = map->ops->map_peek_elem(map, value); 818 } else { 819 rcu_read_lock(); 820 if (map->ops->map_lookup_elem_sys_only) 821 ptr = map->ops->map_lookup_elem_sys_only(map, key); 822 else 823 ptr = map->ops->map_lookup_elem(map, key); 824 if (IS_ERR(ptr)) { 825 err = PTR_ERR(ptr); 826 } else if (!ptr) { 827 err = -ENOENT; 828 } else { 829 err = 0; 830 if (attr->flags & BPF_F_LOCK) 831 /* lock 'ptr' and copy everything but lock */ 832 copy_map_value_locked(map, value, ptr, true); 833 else 834 copy_map_value(map, value, ptr); 835 /* mask lock, since value wasn't zero inited */ 836 check_and_init_map_lock(map, value); 837 } 838 rcu_read_unlock(); 839 } 840 this_cpu_dec(bpf_prog_active); 841 preempt_enable(); 842 843 done: 844 if (err) 845 goto free_value; 846 847 err = -EFAULT; 848 if (copy_to_user(uvalue, value, value_size) != 0) 849 goto free_value; 850 851 err = 0; 852 853 free_value: 854 kfree(value); 855 free_key: 856 kfree(key); 857 err_put: 858 fdput(f); 859 return err; 860 } 861 862 static void maybe_wait_bpf_programs(struct bpf_map *map) 863 { 864 /* Wait for any running BPF programs to complete so that 865 * userspace, when we return to it, knows that all programs 866 * that could be running use the new map value. 867 */ 868 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 869 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 870 synchronize_rcu(); 871 } 872 873 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 874 875 static int map_update_elem(union bpf_attr *attr) 876 { 877 void __user *ukey = u64_to_user_ptr(attr->key); 878 void __user *uvalue = u64_to_user_ptr(attr->value); 879 int ufd = attr->map_fd; 880 struct bpf_map *map; 881 void *key, *value; 882 u32 value_size; 883 struct fd f; 884 int err; 885 886 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 887 return -EINVAL; 888 889 f = fdget(ufd); 890 map = __bpf_map_get(f); 891 if (IS_ERR(map)) 892 return PTR_ERR(map); 893 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 894 err = -EPERM; 895 goto err_put; 896 } 897 898 if ((attr->flags & BPF_F_LOCK) && 899 !map_value_has_spin_lock(map)) { 900 err = -EINVAL; 901 goto err_put; 902 } 903 904 key = __bpf_copy_key(ukey, map->key_size); 905 if (IS_ERR(key)) { 906 err = PTR_ERR(key); 907 goto err_put; 908 } 909 910 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 911 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 912 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 913 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 914 value_size = round_up(map->value_size, 8) * num_possible_cpus(); 915 else 916 value_size = map->value_size; 917 918 err = -ENOMEM; 919 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 920 if (!value) 921 goto free_key; 922 923 err = -EFAULT; 924 if (copy_from_user(value, uvalue, value_size) != 0) 925 goto free_value; 926 927 /* Need to create a kthread, thus must support schedule */ 928 if (bpf_map_is_dev_bound(map)) { 929 err = bpf_map_offload_update_elem(map, key, value, attr->flags); 930 goto out; 931 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 932 map->map_type == BPF_MAP_TYPE_SOCKHASH || 933 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 934 err = map->ops->map_update_elem(map, key, value, attr->flags); 935 goto out; 936 } 937 938 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from 939 * inside bpf map update or delete otherwise deadlocks are possible 940 */ 941 preempt_disable(); 942 __this_cpu_inc(bpf_prog_active); 943 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 944 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 945 err = bpf_percpu_hash_update(map, key, value, attr->flags); 946 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 947 err = bpf_percpu_array_update(map, key, value, attr->flags); 948 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 949 err = bpf_percpu_cgroup_storage_update(map, key, value, 950 attr->flags); 951 } else if (IS_FD_ARRAY(map)) { 952 rcu_read_lock(); 953 err = bpf_fd_array_map_update_elem(map, f.file, key, value, 954 attr->flags); 955 rcu_read_unlock(); 956 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 957 rcu_read_lock(); 958 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, 959 attr->flags); 960 rcu_read_unlock(); 961 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 962 /* rcu_read_lock() is not needed */ 963 err = bpf_fd_reuseport_array_update_elem(map, key, value, 964 attr->flags); 965 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 966 map->map_type == BPF_MAP_TYPE_STACK) { 967 err = map->ops->map_push_elem(map, value, attr->flags); 968 } else { 969 rcu_read_lock(); 970 err = map->ops->map_update_elem(map, key, value, attr->flags); 971 rcu_read_unlock(); 972 } 973 __this_cpu_dec(bpf_prog_active); 974 preempt_enable(); 975 maybe_wait_bpf_programs(map); 976 out: 977 free_value: 978 kfree(value); 979 free_key: 980 kfree(key); 981 err_put: 982 fdput(f); 983 return err; 984 } 985 986 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 987 988 static int map_delete_elem(union bpf_attr *attr) 989 { 990 void __user *ukey = u64_to_user_ptr(attr->key); 991 int ufd = attr->map_fd; 992 struct bpf_map *map; 993 struct fd f; 994 void *key; 995 int err; 996 997 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 998 return -EINVAL; 999 1000 f = fdget(ufd); 1001 map = __bpf_map_get(f); 1002 if (IS_ERR(map)) 1003 return PTR_ERR(map); 1004 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1005 err = -EPERM; 1006 goto err_put; 1007 } 1008 1009 key = __bpf_copy_key(ukey, map->key_size); 1010 if (IS_ERR(key)) { 1011 err = PTR_ERR(key); 1012 goto err_put; 1013 } 1014 1015 if (bpf_map_is_dev_bound(map)) { 1016 err = bpf_map_offload_delete_elem(map, key); 1017 goto out; 1018 } 1019 1020 preempt_disable(); 1021 __this_cpu_inc(bpf_prog_active); 1022 rcu_read_lock(); 1023 err = map->ops->map_delete_elem(map, key); 1024 rcu_read_unlock(); 1025 __this_cpu_dec(bpf_prog_active); 1026 preempt_enable(); 1027 maybe_wait_bpf_programs(map); 1028 out: 1029 kfree(key); 1030 err_put: 1031 fdput(f); 1032 return err; 1033 } 1034 1035 /* last field in 'union bpf_attr' used by this command */ 1036 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1037 1038 static int map_get_next_key(union bpf_attr *attr) 1039 { 1040 void __user *ukey = u64_to_user_ptr(attr->key); 1041 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1042 int ufd = attr->map_fd; 1043 struct bpf_map *map; 1044 void *key, *next_key; 1045 struct fd f; 1046 int err; 1047 1048 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1049 return -EINVAL; 1050 1051 f = fdget(ufd); 1052 map = __bpf_map_get(f); 1053 if (IS_ERR(map)) 1054 return PTR_ERR(map); 1055 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1056 err = -EPERM; 1057 goto err_put; 1058 } 1059 1060 if (ukey) { 1061 key = __bpf_copy_key(ukey, map->key_size); 1062 if (IS_ERR(key)) { 1063 err = PTR_ERR(key); 1064 goto err_put; 1065 } 1066 } else { 1067 key = NULL; 1068 } 1069 1070 err = -ENOMEM; 1071 next_key = kmalloc(map->key_size, GFP_USER); 1072 if (!next_key) 1073 goto free_key; 1074 1075 if (bpf_map_is_dev_bound(map)) { 1076 err = bpf_map_offload_get_next_key(map, key, next_key); 1077 goto out; 1078 } 1079 1080 rcu_read_lock(); 1081 err = map->ops->map_get_next_key(map, key, next_key); 1082 rcu_read_unlock(); 1083 out: 1084 if (err) 1085 goto free_next_key; 1086 1087 err = -EFAULT; 1088 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1089 goto free_next_key; 1090 1091 err = 0; 1092 1093 free_next_key: 1094 kfree(next_key); 1095 free_key: 1096 kfree(key); 1097 err_put: 1098 fdput(f); 1099 return err; 1100 } 1101 1102 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value 1103 1104 static int map_lookup_and_delete_elem(union bpf_attr *attr) 1105 { 1106 void __user *ukey = u64_to_user_ptr(attr->key); 1107 void __user *uvalue = u64_to_user_ptr(attr->value); 1108 int ufd = attr->map_fd; 1109 struct bpf_map *map; 1110 void *key, *value; 1111 u32 value_size; 1112 struct fd f; 1113 int err; 1114 1115 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 1116 return -EINVAL; 1117 1118 f = fdget(ufd); 1119 map = __bpf_map_get(f); 1120 if (IS_ERR(map)) 1121 return PTR_ERR(map); 1122 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1123 err = -EPERM; 1124 goto err_put; 1125 } 1126 1127 key = __bpf_copy_key(ukey, map->key_size); 1128 if (IS_ERR(key)) { 1129 err = PTR_ERR(key); 1130 goto err_put; 1131 } 1132 1133 value_size = map->value_size; 1134 1135 err = -ENOMEM; 1136 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1137 if (!value) 1138 goto free_key; 1139 1140 if (map->map_type == BPF_MAP_TYPE_QUEUE || 1141 map->map_type == BPF_MAP_TYPE_STACK) { 1142 err = map->ops->map_pop_elem(map, value); 1143 } else { 1144 err = -ENOTSUPP; 1145 } 1146 1147 if (err) 1148 goto free_value; 1149 1150 if (copy_to_user(uvalue, value, value_size) != 0) 1151 goto free_value; 1152 1153 err = 0; 1154 1155 free_value: 1156 kfree(value); 1157 free_key: 1158 kfree(key); 1159 err_put: 1160 fdput(f); 1161 return err; 1162 } 1163 1164 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 1165 1166 static int map_freeze(const union bpf_attr *attr) 1167 { 1168 int err = 0, ufd = attr->map_fd; 1169 struct bpf_map *map; 1170 struct fd f; 1171 1172 if (CHECK_ATTR(BPF_MAP_FREEZE)) 1173 return -EINVAL; 1174 1175 f = fdget(ufd); 1176 map = __bpf_map_get(f); 1177 if (IS_ERR(map)) 1178 return PTR_ERR(map); 1179 if (READ_ONCE(map->frozen)) { 1180 err = -EBUSY; 1181 goto err_put; 1182 } 1183 if (!capable(CAP_SYS_ADMIN)) { 1184 err = -EPERM; 1185 goto err_put; 1186 } 1187 1188 WRITE_ONCE(map->frozen, true); 1189 err_put: 1190 fdput(f); 1191 return err; 1192 } 1193 1194 static const struct bpf_prog_ops * const bpf_prog_types[] = { 1195 #define BPF_PROG_TYPE(_id, _name) \ 1196 [_id] = & _name ## _prog_ops, 1197 #define BPF_MAP_TYPE(_id, _ops) 1198 #include <linux/bpf_types.h> 1199 #undef BPF_PROG_TYPE 1200 #undef BPF_MAP_TYPE 1201 }; 1202 1203 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 1204 { 1205 const struct bpf_prog_ops *ops; 1206 1207 if (type >= ARRAY_SIZE(bpf_prog_types)) 1208 return -EINVAL; 1209 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 1210 ops = bpf_prog_types[type]; 1211 if (!ops) 1212 return -EINVAL; 1213 1214 if (!bpf_prog_is_dev_bound(prog->aux)) 1215 prog->aux->ops = ops; 1216 else 1217 prog->aux->ops = &bpf_offload_prog_ops; 1218 prog->type = type; 1219 return 0; 1220 } 1221 1222 /* drop refcnt on maps used by eBPF program and free auxilary data */ 1223 static void free_used_maps(struct bpf_prog_aux *aux) 1224 { 1225 enum bpf_cgroup_storage_type stype; 1226 int i; 1227 1228 for_each_cgroup_storage_type(stype) { 1229 if (!aux->cgroup_storage[stype]) 1230 continue; 1231 bpf_cgroup_storage_release(aux->prog, 1232 aux->cgroup_storage[stype]); 1233 } 1234 1235 for (i = 0; i < aux->used_map_cnt; i++) 1236 bpf_map_put(aux->used_maps[i]); 1237 1238 kfree(aux->used_maps); 1239 } 1240 1241 int __bpf_prog_charge(struct user_struct *user, u32 pages) 1242 { 1243 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1244 unsigned long user_bufs; 1245 1246 if (user) { 1247 user_bufs = atomic_long_add_return(pages, &user->locked_vm); 1248 if (user_bufs > memlock_limit) { 1249 atomic_long_sub(pages, &user->locked_vm); 1250 return -EPERM; 1251 } 1252 } 1253 1254 return 0; 1255 } 1256 1257 void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 1258 { 1259 if (user) 1260 atomic_long_sub(pages, &user->locked_vm); 1261 } 1262 1263 static int bpf_prog_charge_memlock(struct bpf_prog *prog) 1264 { 1265 struct user_struct *user = get_current_user(); 1266 int ret; 1267 1268 ret = __bpf_prog_charge(user, prog->pages); 1269 if (ret) { 1270 free_uid(user); 1271 return ret; 1272 } 1273 1274 prog->aux->user = user; 1275 return 0; 1276 } 1277 1278 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog) 1279 { 1280 struct user_struct *user = prog->aux->user; 1281 1282 __bpf_prog_uncharge(user, prog->pages); 1283 free_uid(user); 1284 } 1285 1286 static int bpf_prog_alloc_id(struct bpf_prog *prog) 1287 { 1288 int id; 1289 1290 idr_preload(GFP_KERNEL); 1291 spin_lock_bh(&prog_idr_lock); 1292 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 1293 if (id > 0) 1294 prog->aux->id = id; 1295 spin_unlock_bh(&prog_idr_lock); 1296 idr_preload_end(); 1297 1298 /* id is in [1, INT_MAX) */ 1299 if (WARN_ON_ONCE(!id)) 1300 return -ENOSPC; 1301 1302 return id > 0 ? 0 : id; 1303 } 1304 1305 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) 1306 { 1307 /* cBPF to eBPF migrations are currently not in the idr store. 1308 * Offloaded programs are removed from the store when their device 1309 * disappears - even if someone grabs an fd to them they are unusable, 1310 * simply waiting for refcnt to drop to be freed. 1311 */ 1312 if (!prog->aux->id) 1313 return; 1314 1315 if (do_idr_lock) 1316 spin_lock_bh(&prog_idr_lock); 1317 else 1318 __acquire(&prog_idr_lock); 1319 1320 idr_remove(&prog_idr, prog->aux->id); 1321 prog->aux->id = 0; 1322 1323 if (do_idr_lock) 1324 spin_unlock_bh(&prog_idr_lock); 1325 else 1326 __release(&prog_idr_lock); 1327 } 1328 1329 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 1330 { 1331 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 1332 1333 kvfree(aux->func_info); 1334 free_used_maps(aux); 1335 bpf_prog_uncharge_memlock(aux->prog); 1336 security_bpf_prog_free(aux); 1337 bpf_prog_free(aux->prog); 1338 } 1339 1340 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 1341 { 1342 bpf_prog_kallsyms_del_all(prog); 1343 btf_put(prog->aux->btf); 1344 bpf_prog_free_linfo(prog); 1345 1346 if (deferred) 1347 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 1348 else 1349 __bpf_prog_put_rcu(&prog->aux->rcu); 1350 } 1351 1352 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 1353 { 1354 if (atomic_dec_and_test(&prog->aux->refcnt)) { 1355 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 1356 /* bpf_prog_free_id() must be called first */ 1357 bpf_prog_free_id(prog, do_idr_lock); 1358 __bpf_prog_put_noref(prog, true); 1359 } 1360 } 1361 1362 void bpf_prog_put(struct bpf_prog *prog) 1363 { 1364 __bpf_prog_put(prog, true); 1365 } 1366 EXPORT_SYMBOL_GPL(bpf_prog_put); 1367 1368 static int bpf_prog_release(struct inode *inode, struct file *filp) 1369 { 1370 struct bpf_prog *prog = filp->private_data; 1371 1372 bpf_prog_put(prog); 1373 return 0; 1374 } 1375 1376 static void bpf_prog_get_stats(const struct bpf_prog *prog, 1377 struct bpf_prog_stats *stats) 1378 { 1379 u64 nsecs = 0, cnt = 0; 1380 int cpu; 1381 1382 for_each_possible_cpu(cpu) { 1383 const struct bpf_prog_stats *st; 1384 unsigned int start; 1385 u64 tnsecs, tcnt; 1386 1387 st = per_cpu_ptr(prog->aux->stats, cpu); 1388 do { 1389 start = u64_stats_fetch_begin_irq(&st->syncp); 1390 tnsecs = st->nsecs; 1391 tcnt = st->cnt; 1392 } while (u64_stats_fetch_retry_irq(&st->syncp, start)); 1393 nsecs += tnsecs; 1394 cnt += tcnt; 1395 } 1396 stats->nsecs = nsecs; 1397 stats->cnt = cnt; 1398 } 1399 1400 #ifdef CONFIG_PROC_FS 1401 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 1402 { 1403 const struct bpf_prog *prog = filp->private_data; 1404 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 1405 struct bpf_prog_stats stats; 1406 1407 bpf_prog_get_stats(prog, &stats); 1408 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 1409 seq_printf(m, 1410 "prog_type:\t%u\n" 1411 "prog_jited:\t%u\n" 1412 "prog_tag:\t%s\n" 1413 "memlock:\t%llu\n" 1414 "prog_id:\t%u\n" 1415 "run_time_ns:\t%llu\n" 1416 "run_cnt:\t%llu\n", 1417 prog->type, 1418 prog->jited, 1419 prog_tag, 1420 prog->pages * 1ULL << PAGE_SHIFT, 1421 prog->aux->id, 1422 stats.nsecs, 1423 stats.cnt); 1424 } 1425 #endif 1426 1427 const struct file_operations bpf_prog_fops = { 1428 #ifdef CONFIG_PROC_FS 1429 .show_fdinfo = bpf_prog_show_fdinfo, 1430 #endif 1431 .release = bpf_prog_release, 1432 .read = bpf_dummy_read, 1433 .write = bpf_dummy_write, 1434 }; 1435 1436 int bpf_prog_new_fd(struct bpf_prog *prog) 1437 { 1438 int ret; 1439 1440 ret = security_bpf_prog(prog); 1441 if (ret < 0) 1442 return ret; 1443 1444 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 1445 O_RDWR | O_CLOEXEC); 1446 } 1447 1448 static struct bpf_prog *____bpf_prog_get(struct fd f) 1449 { 1450 if (!f.file) 1451 return ERR_PTR(-EBADF); 1452 if (f.file->f_op != &bpf_prog_fops) { 1453 fdput(f); 1454 return ERR_PTR(-EINVAL); 1455 } 1456 1457 return f.file->private_data; 1458 } 1459 1460 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) 1461 { 1462 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) { 1463 atomic_sub(i, &prog->aux->refcnt); 1464 return ERR_PTR(-EBUSY); 1465 } 1466 return prog; 1467 } 1468 EXPORT_SYMBOL_GPL(bpf_prog_add); 1469 1470 void bpf_prog_sub(struct bpf_prog *prog, int i) 1471 { 1472 /* Only to be used for undoing previous bpf_prog_add() in some 1473 * error path. We still know that another entity in our call 1474 * path holds a reference to the program, thus atomic_sub() can 1475 * be safely used in such cases! 1476 */ 1477 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0); 1478 } 1479 EXPORT_SYMBOL_GPL(bpf_prog_sub); 1480 1481 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) 1482 { 1483 return bpf_prog_add(prog, 1); 1484 } 1485 EXPORT_SYMBOL_GPL(bpf_prog_inc); 1486 1487 /* prog_idr_lock should have been held */ 1488 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 1489 { 1490 int refold; 1491 1492 refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0); 1493 1494 if (refold >= BPF_MAX_REFCNT) { 1495 __bpf_prog_put(prog, false); 1496 return ERR_PTR(-EBUSY); 1497 } 1498 1499 if (!refold) 1500 return ERR_PTR(-ENOENT); 1501 1502 return prog; 1503 } 1504 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1505 1506 bool bpf_prog_get_ok(struct bpf_prog *prog, 1507 enum bpf_prog_type *attach_type, bool attach_drv) 1508 { 1509 /* not an attachment, just a refcount inc, always allow */ 1510 if (!attach_type) 1511 return true; 1512 1513 if (prog->type != *attach_type) 1514 return false; 1515 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) 1516 return false; 1517 1518 return true; 1519 } 1520 1521 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 1522 bool attach_drv) 1523 { 1524 struct fd f = fdget(ufd); 1525 struct bpf_prog *prog; 1526 1527 prog = ____bpf_prog_get(f); 1528 if (IS_ERR(prog)) 1529 return prog; 1530 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 1531 prog = ERR_PTR(-EINVAL); 1532 goto out; 1533 } 1534 1535 prog = bpf_prog_inc(prog); 1536 out: 1537 fdput(f); 1538 return prog; 1539 } 1540 1541 struct bpf_prog *bpf_prog_get(u32 ufd) 1542 { 1543 return __bpf_prog_get(ufd, NULL, false); 1544 } 1545 1546 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1547 bool attach_drv) 1548 { 1549 return __bpf_prog_get(ufd, &type, attach_drv); 1550 } 1551 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 1552 1553 /* Initially all BPF programs could be loaded w/o specifying 1554 * expected_attach_type. Later for some of them specifying expected_attach_type 1555 * at load time became required so that program could be validated properly. 1556 * Programs of types that are allowed to be loaded both w/ and w/o (for 1557 * backward compatibility) expected_attach_type, should have the default attach 1558 * type assigned to expected_attach_type for the latter case, so that it can be 1559 * validated later at attach time. 1560 * 1561 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 1562 * prog type requires it but has some attach types that have to be backward 1563 * compatible. 1564 */ 1565 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 1566 { 1567 switch (attr->prog_type) { 1568 case BPF_PROG_TYPE_CGROUP_SOCK: 1569 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 1570 * exist so checking for non-zero is the way to go here. 1571 */ 1572 if (!attr->expected_attach_type) 1573 attr->expected_attach_type = 1574 BPF_CGROUP_INET_SOCK_CREATE; 1575 break; 1576 } 1577 } 1578 1579 static int 1580 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 1581 enum bpf_attach_type expected_attach_type, 1582 u32 btf_id) 1583 { 1584 switch (prog_type) { 1585 case BPF_PROG_TYPE_TRACING: 1586 if (btf_id > BTF_MAX_TYPE) 1587 return -EINVAL; 1588 break; 1589 default: 1590 if (btf_id) 1591 return -EINVAL; 1592 break; 1593 } 1594 1595 switch (prog_type) { 1596 case BPF_PROG_TYPE_CGROUP_SOCK: 1597 switch (expected_attach_type) { 1598 case BPF_CGROUP_INET_SOCK_CREATE: 1599 case BPF_CGROUP_INET4_POST_BIND: 1600 case BPF_CGROUP_INET6_POST_BIND: 1601 return 0; 1602 default: 1603 return -EINVAL; 1604 } 1605 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1606 switch (expected_attach_type) { 1607 case BPF_CGROUP_INET4_BIND: 1608 case BPF_CGROUP_INET6_BIND: 1609 case BPF_CGROUP_INET4_CONNECT: 1610 case BPF_CGROUP_INET6_CONNECT: 1611 case BPF_CGROUP_UDP4_SENDMSG: 1612 case BPF_CGROUP_UDP6_SENDMSG: 1613 case BPF_CGROUP_UDP4_RECVMSG: 1614 case BPF_CGROUP_UDP6_RECVMSG: 1615 return 0; 1616 default: 1617 return -EINVAL; 1618 } 1619 case BPF_PROG_TYPE_CGROUP_SKB: 1620 switch (expected_attach_type) { 1621 case BPF_CGROUP_INET_INGRESS: 1622 case BPF_CGROUP_INET_EGRESS: 1623 return 0; 1624 default: 1625 return -EINVAL; 1626 } 1627 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 1628 switch (expected_attach_type) { 1629 case BPF_CGROUP_SETSOCKOPT: 1630 case BPF_CGROUP_GETSOCKOPT: 1631 return 0; 1632 default: 1633 return -EINVAL; 1634 } 1635 default: 1636 return 0; 1637 } 1638 } 1639 1640 /* last field in 'union bpf_attr' used by this command */ 1641 #define BPF_PROG_LOAD_LAST_FIELD attach_btf_id 1642 1643 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) 1644 { 1645 enum bpf_prog_type type = attr->prog_type; 1646 struct bpf_prog *prog; 1647 int err; 1648 char license[128]; 1649 bool is_gpl; 1650 1651 if (CHECK_ATTR(BPF_PROG_LOAD)) 1652 return -EINVAL; 1653 1654 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 1655 BPF_F_ANY_ALIGNMENT | 1656 BPF_F_TEST_STATE_FREQ | 1657 BPF_F_TEST_RND_HI32)) 1658 return -EINVAL; 1659 1660 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 1661 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 1662 !capable(CAP_SYS_ADMIN)) 1663 return -EPERM; 1664 1665 /* copy eBPF program license from user space */ 1666 if (strncpy_from_user(license, u64_to_user_ptr(attr->license), 1667 sizeof(license) - 1) < 0) 1668 return -EFAULT; 1669 license[sizeof(license) - 1] = 0; 1670 1671 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 1672 is_gpl = license_is_gpl_compatible(license); 1673 1674 if (attr->insn_cnt == 0 || 1675 attr->insn_cnt > (capable(CAP_SYS_ADMIN) ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) 1676 return -E2BIG; 1677 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 1678 type != BPF_PROG_TYPE_CGROUP_SKB && 1679 !capable(CAP_SYS_ADMIN)) 1680 return -EPERM; 1681 1682 bpf_prog_load_fixup_attach_type(attr); 1683 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 1684 attr->attach_btf_id)) 1685 return -EINVAL; 1686 1687 /* plain bpf_prog allocation */ 1688 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 1689 if (!prog) 1690 return -ENOMEM; 1691 1692 prog->expected_attach_type = attr->expected_attach_type; 1693 prog->aux->attach_btf_id = attr->attach_btf_id; 1694 1695 prog->aux->offload_requested = !!attr->prog_ifindex; 1696 1697 err = security_bpf_prog_alloc(prog->aux); 1698 if (err) 1699 goto free_prog_nouncharge; 1700 1701 err = bpf_prog_charge_memlock(prog); 1702 if (err) 1703 goto free_prog_sec; 1704 1705 prog->len = attr->insn_cnt; 1706 1707 err = -EFAULT; 1708 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), 1709 bpf_prog_insn_size(prog)) != 0) 1710 goto free_prog; 1711 1712 prog->orig_prog = NULL; 1713 prog->jited = 0; 1714 1715 atomic_set(&prog->aux->refcnt, 1); 1716 prog->gpl_compatible = is_gpl ? 1 : 0; 1717 1718 if (bpf_prog_is_dev_bound(prog->aux)) { 1719 err = bpf_prog_offload_init(prog, attr); 1720 if (err) 1721 goto free_prog; 1722 } 1723 1724 /* find program type: socket_filter vs tracing_filter */ 1725 err = find_prog_type(type, prog); 1726 if (err < 0) 1727 goto free_prog; 1728 1729 prog->aux->load_time = ktime_get_boottime_ns(); 1730 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name); 1731 if (err) 1732 goto free_prog; 1733 1734 /* run eBPF verifier */ 1735 err = bpf_check(&prog, attr, uattr); 1736 if (err < 0) 1737 goto free_used_maps; 1738 1739 prog = bpf_prog_select_runtime(prog, &err); 1740 if (err < 0) 1741 goto free_used_maps; 1742 1743 err = bpf_prog_alloc_id(prog); 1744 if (err) 1745 goto free_used_maps; 1746 1747 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 1748 * effectively publicly exposed. However, retrieving via 1749 * bpf_prog_get_fd_by_id() will take another reference, 1750 * therefore it cannot be gone underneath us. 1751 * 1752 * Only for the time /after/ successful bpf_prog_new_fd() 1753 * and before returning to userspace, we might just hold 1754 * one reference and any parallel close on that fd could 1755 * rip everything out. Hence, below notifications must 1756 * happen before bpf_prog_new_fd(). 1757 * 1758 * Also, any failure handling from this point onwards must 1759 * be using bpf_prog_put() given the program is exposed. 1760 */ 1761 bpf_prog_kallsyms_add(prog); 1762 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 1763 1764 err = bpf_prog_new_fd(prog); 1765 if (err < 0) 1766 bpf_prog_put(prog); 1767 return err; 1768 1769 free_used_maps: 1770 /* In case we have subprogs, we need to wait for a grace 1771 * period before we can tear down JIT memory since symbols 1772 * are already exposed under kallsyms. 1773 */ 1774 __bpf_prog_put_noref(prog, prog->aux->func_cnt); 1775 return err; 1776 free_prog: 1777 bpf_prog_uncharge_memlock(prog); 1778 free_prog_sec: 1779 security_bpf_prog_free(prog->aux); 1780 free_prog_nouncharge: 1781 bpf_prog_free(prog); 1782 return err; 1783 } 1784 1785 #define BPF_OBJ_LAST_FIELD file_flags 1786 1787 static int bpf_obj_pin(const union bpf_attr *attr) 1788 { 1789 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) 1790 return -EINVAL; 1791 1792 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); 1793 } 1794 1795 static int bpf_obj_get(const union bpf_attr *attr) 1796 { 1797 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 1798 attr->file_flags & ~BPF_OBJ_FLAG_MASK) 1799 return -EINVAL; 1800 1801 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), 1802 attr->file_flags); 1803 } 1804 1805 struct bpf_raw_tracepoint { 1806 struct bpf_raw_event_map *btp; 1807 struct bpf_prog *prog; 1808 }; 1809 1810 static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp) 1811 { 1812 struct bpf_raw_tracepoint *raw_tp = filp->private_data; 1813 1814 if (raw_tp->prog) { 1815 bpf_probe_unregister(raw_tp->btp, raw_tp->prog); 1816 bpf_prog_put(raw_tp->prog); 1817 } 1818 bpf_put_raw_tracepoint(raw_tp->btp); 1819 kfree(raw_tp); 1820 return 0; 1821 } 1822 1823 static const struct file_operations bpf_raw_tp_fops = { 1824 .release = bpf_raw_tracepoint_release, 1825 .read = bpf_dummy_read, 1826 .write = bpf_dummy_write, 1827 }; 1828 1829 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 1830 1831 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 1832 { 1833 struct bpf_raw_tracepoint *raw_tp; 1834 struct bpf_raw_event_map *btp; 1835 struct bpf_prog *prog; 1836 const char *tp_name; 1837 char buf[128]; 1838 int tp_fd, err; 1839 1840 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 1841 return -EINVAL; 1842 1843 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 1844 if (IS_ERR(prog)) 1845 return PTR_ERR(prog); 1846 1847 if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT && 1848 prog->type != BPF_PROG_TYPE_TRACING && 1849 prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) { 1850 err = -EINVAL; 1851 goto out_put_prog; 1852 } 1853 1854 if (prog->type == BPF_PROG_TYPE_TRACING) { 1855 if (attr->raw_tracepoint.name) { 1856 /* raw_tp name should not be specified in raw_tp 1857 * programs that were verified via in-kernel BTF info 1858 */ 1859 err = -EINVAL; 1860 goto out_put_prog; 1861 } 1862 /* raw_tp name is taken from type name instead */ 1863 tp_name = prog->aux->attach_func_name; 1864 } else { 1865 if (strncpy_from_user(buf, 1866 u64_to_user_ptr(attr->raw_tracepoint.name), 1867 sizeof(buf) - 1) < 0) { 1868 err = -EFAULT; 1869 goto out_put_prog; 1870 } 1871 buf[sizeof(buf) - 1] = 0; 1872 tp_name = buf; 1873 } 1874 1875 btp = bpf_get_raw_tracepoint(tp_name); 1876 if (!btp) { 1877 err = -ENOENT; 1878 goto out_put_prog; 1879 } 1880 1881 raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER); 1882 if (!raw_tp) { 1883 err = -ENOMEM; 1884 goto out_put_btp; 1885 } 1886 raw_tp->btp = btp; 1887 raw_tp->prog = prog; 1888 1889 err = bpf_probe_register(raw_tp->btp, prog); 1890 if (err) 1891 goto out_free_tp; 1892 1893 tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp, 1894 O_CLOEXEC); 1895 if (tp_fd < 0) { 1896 bpf_probe_unregister(raw_tp->btp, prog); 1897 err = tp_fd; 1898 goto out_free_tp; 1899 } 1900 return tp_fd; 1901 1902 out_free_tp: 1903 kfree(raw_tp); 1904 out_put_btp: 1905 bpf_put_raw_tracepoint(btp); 1906 out_put_prog: 1907 bpf_prog_put(prog); 1908 return err; 1909 } 1910 1911 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 1912 enum bpf_attach_type attach_type) 1913 { 1914 switch (prog->type) { 1915 case BPF_PROG_TYPE_CGROUP_SOCK: 1916 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1917 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 1918 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 1919 case BPF_PROG_TYPE_CGROUP_SKB: 1920 return prog->enforce_expected_attach_type && 1921 prog->expected_attach_type != attach_type ? 1922 -EINVAL : 0; 1923 default: 1924 return 0; 1925 } 1926 } 1927 1928 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags 1929 1930 #define BPF_F_ATTACH_MASK \ 1931 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI) 1932 1933 static int bpf_prog_attach(const union bpf_attr *attr) 1934 { 1935 enum bpf_prog_type ptype; 1936 struct bpf_prog *prog; 1937 int ret; 1938 1939 if (!capable(CAP_NET_ADMIN)) 1940 return -EPERM; 1941 1942 if (CHECK_ATTR(BPF_PROG_ATTACH)) 1943 return -EINVAL; 1944 1945 if (attr->attach_flags & ~BPF_F_ATTACH_MASK) 1946 return -EINVAL; 1947 1948 switch (attr->attach_type) { 1949 case BPF_CGROUP_INET_INGRESS: 1950 case BPF_CGROUP_INET_EGRESS: 1951 ptype = BPF_PROG_TYPE_CGROUP_SKB; 1952 break; 1953 case BPF_CGROUP_INET_SOCK_CREATE: 1954 case BPF_CGROUP_INET4_POST_BIND: 1955 case BPF_CGROUP_INET6_POST_BIND: 1956 ptype = BPF_PROG_TYPE_CGROUP_SOCK; 1957 break; 1958 case BPF_CGROUP_INET4_BIND: 1959 case BPF_CGROUP_INET6_BIND: 1960 case BPF_CGROUP_INET4_CONNECT: 1961 case BPF_CGROUP_INET6_CONNECT: 1962 case BPF_CGROUP_UDP4_SENDMSG: 1963 case BPF_CGROUP_UDP6_SENDMSG: 1964 case BPF_CGROUP_UDP4_RECVMSG: 1965 case BPF_CGROUP_UDP6_RECVMSG: 1966 ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 1967 break; 1968 case BPF_CGROUP_SOCK_OPS: 1969 ptype = BPF_PROG_TYPE_SOCK_OPS; 1970 break; 1971 case BPF_CGROUP_DEVICE: 1972 ptype = BPF_PROG_TYPE_CGROUP_DEVICE; 1973 break; 1974 case BPF_SK_MSG_VERDICT: 1975 ptype = BPF_PROG_TYPE_SK_MSG; 1976 break; 1977 case BPF_SK_SKB_STREAM_PARSER: 1978 case BPF_SK_SKB_STREAM_VERDICT: 1979 ptype = BPF_PROG_TYPE_SK_SKB; 1980 break; 1981 case BPF_LIRC_MODE2: 1982 ptype = BPF_PROG_TYPE_LIRC_MODE2; 1983 break; 1984 case BPF_FLOW_DISSECTOR: 1985 ptype = BPF_PROG_TYPE_FLOW_DISSECTOR; 1986 break; 1987 case BPF_CGROUP_SYSCTL: 1988 ptype = BPF_PROG_TYPE_CGROUP_SYSCTL; 1989 break; 1990 case BPF_CGROUP_GETSOCKOPT: 1991 case BPF_CGROUP_SETSOCKOPT: 1992 ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT; 1993 break; 1994 default: 1995 return -EINVAL; 1996 } 1997 1998 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 1999 if (IS_ERR(prog)) 2000 return PTR_ERR(prog); 2001 2002 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 2003 bpf_prog_put(prog); 2004 return -EINVAL; 2005 } 2006 2007 switch (ptype) { 2008 case BPF_PROG_TYPE_SK_SKB: 2009 case BPF_PROG_TYPE_SK_MSG: 2010 ret = sock_map_get_from_fd(attr, prog); 2011 break; 2012 case BPF_PROG_TYPE_LIRC_MODE2: 2013 ret = lirc_prog_attach(attr, prog); 2014 break; 2015 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2016 ret = skb_flow_dissector_bpf_prog_attach(attr, prog); 2017 break; 2018 default: 2019 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 2020 } 2021 2022 if (ret) 2023 bpf_prog_put(prog); 2024 return ret; 2025 } 2026 2027 #define BPF_PROG_DETACH_LAST_FIELD attach_type 2028 2029 static int bpf_prog_detach(const union bpf_attr *attr) 2030 { 2031 enum bpf_prog_type ptype; 2032 2033 if (!capable(CAP_NET_ADMIN)) 2034 return -EPERM; 2035 2036 if (CHECK_ATTR(BPF_PROG_DETACH)) 2037 return -EINVAL; 2038 2039 switch (attr->attach_type) { 2040 case BPF_CGROUP_INET_INGRESS: 2041 case BPF_CGROUP_INET_EGRESS: 2042 ptype = BPF_PROG_TYPE_CGROUP_SKB; 2043 break; 2044 case BPF_CGROUP_INET_SOCK_CREATE: 2045 case BPF_CGROUP_INET4_POST_BIND: 2046 case BPF_CGROUP_INET6_POST_BIND: 2047 ptype = BPF_PROG_TYPE_CGROUP_SOCK; 2048 break; 2049 case BPF_CGROUP_INET4_BIND: 2050 case BPF_CGROUP_INET6_BIND: 2051 case BPF_CGROUP_INET4_CONNECT: 2052 case BPF_CGROUP_INET6_CONNECT: 2053 case BPF_CGROUP_UDP4_SENDMSG: 2054 case BPF_CGROUP_UDP6_SENDMSG: 2055 case BPF_CGROUP_UDP4_RECVMSG: 2056 case BPF_CGROUP_UDP6_RECVMSG: 2057 ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 2058 break; 2059 case BPF_CGROUP_SOCK_OPS: 2060 ptype = BPF_PROG_TYPE_SOCK_OPS; 2061 break; 2062 case BPF_CGROUP_DEVICE: 2063 ptype = BPF_PROG_TYPE_CGROUP_DEVICE; 2064 break; 2065 case BPF_SK_MSG_VERDICT: 2066 return sock_map_get_from_fd(attr, NULL); 2067 case BPF_SK_SKB_STREAM_PARSER: 2068 case BPF_SK_SKB_STREAM_VERDICT: 2069 return sock_map_get_from_fd(attr, NULL); 2070 case BPF_LIRC_MODE2: 2071 return lirc_prog_detach(attr); 2072 case BPF_FLOW_DISSECTOR: 2073 return skb_flow_dissector_bpf_prog_detach(attr); 2074 case BPF_CGROUP_SYSCTL: 2075 ptype = BPF_PROG_TYPE_CGROUP_SYSCTL; 2076 break; 2077 case BPF_CGROUP_GETSOCKOPT: 2078 case BPF_CGROUP_SETSOCKOPT: 2079 ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT; 2080 break; 2081 default: 2082 return -EINVAL; 2083 } 2084 2085 return cgroup_bpf_prog_detach(attr, ptype); 2086 } 2087 2088 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt 2089 2090 static int bpf_prog_query(const union bpf_attr *attr, 2091 union bpf_attr __user *uattr) 2092 { 2093 if (!capable(CAP_NET_ADMIN)) 2094 return -EPERM; 2095 if (CHECK_ATTR(BPF_PROG_QUERY)) 2096 return -EINVAL; 2097 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 2098 return -EINVAL; 2099 2100 switch (attr->query.attach_type) { 2101 case BPF_CGROUP_INET_INGRESS: 2102 case BPF_CGROUP_INET_EGRESS: 2103 case BPF_CGROUP_INET_SOCK_CREATE: 2104 case BPF_CGROUP_INET4_BIND: 2105 case BPF_CGROUP_INET6_BIND: 2106 case BPF_CGROUP_INET4_POST_BIND: 2107 case BPF_CGROUP_INET6_POST_BIND: 2108 case BPF_CGROUP_INET4_CONNECT: 2109 case BPF_CGROUP_INET6_CONNECT: 2110 case BPF_CGROUP_UDP4_SENDMSG: 2111 case BPF_CGROUP_UDP6_SENDMSG: 2112 case BPF_CGROUP_UDP4_RECVMSG: 2113 case BPF_CGROUP_UDP6_RECVMSG: 2114 case BPF_CGROUP_SOCK_OPS: 2115 case BPF_CGROUP_DEVICE: 2116 case BPF_CGROUP_SYSCTL: 2117 case BPF_CGROUP_GETSOCKOPT: 2118 case BPF_CGROUP_SETSOCKOPT: 2119 break; 2120 case BPF_LIRC_MODE2: 2121 return lirc_prog_query(attr, uattr); 2122 case BPF_FLOW_DISSECTOR: 2123 return skb_flow_dissector_prog_query(attr, uattr); 2124 default: 2125 return -EINVAL; 2126 } 2127 2128 return cgroup_bpf_prog_query(attr, uattr); 2129 } 2130 2131 #define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out 2132 2133 static int bpf_prog_test_run(const union bpf_attr *attr, 2134 union bpf_attr __user *uattr) 2135 { 2136 struct bpf_prog *prog; 2137 int ret = -ENOTSUPP; 2138 2139 if (!capable(CAP_SYS_ADMIN)) 2140 return -EPERM; 2141 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 2142 return -EINVAL; 2143 2144 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 2145 (!attr->test.ctx_size_in && attr->test.ctx_in)) 2146 return -EINVAL; 2147 2148 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 2149 (!attr->test.ctx_size_out && attr->test.ctx_out)) 2150 return -EINVAL; 2151 2152 prog = bpf_prog_get(attr->test.prog_fd); 2153 if (IS_ERR(prog)) 2154 return PTR_ERR(prog); 2155 2156 if (prog->aux->ops->test_run) 2157 ret = prog->aux->ops->test_run(prog, attr, uattr); 2158 2159 bpf_prog_put(prog); 2160 return ret; 2161 } 2162 2163 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 2164 2165 static int bpf_obj_get_next_id(const union bpf_attr *attr, 2166 union bpf_attr __user *uattr, 2167 struct idr *idr, 2168 spinlock_t *lock) 2169 { 2170 u32 next_id = attr->start_id; 2171 int err = 0; 2172 2173 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 2174 return -EINVAL; 2175 2176 if (!capable(CAP_SYS_ADMIN)) 2177 return -EPERM; 2178 2179 next_id++; 2180 spin_lock_bh(lock); 2181 if (!idr_get_next(idr, &next_id)) 2182 err = -ENOENT; 2183 spin_unlock_bh(lock); 2184 2185 if (!err) 2186 err = put_user(next_id, &uattr->next_id); 2187 2188 return err; 2189 } 2190 2191 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 2192 2193 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 2194 { 2195 struct bpf_prog *prog; 2196 u32 id = attr->prog_id; 2197 int fd; 2198 2199 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 2200 return -EINVAL; 2201 2202 if (!capable(CAP_SYS_ADMIN)) 2203 return -EPERM; 2204 2205 spin_lock_bh(&prog_idr_lock); 2206 prog = idr_find(&prog_idr, id); 2207 if (prog) 2208 prog = bpf_prog_inc_not_zero(prog); 2209 else 2210 prog = ERR_PTR(-ENOENT); 2211 spin_unlock_bh(&prog_idr_lock); 2212 2213 if (IS_ERR(prog)) 2214 return PTR_ERR(prog); 2215 2216 fd = bpf_prog_new_fd(prog); 2217 if (fd < 0) 2218 bpf_prog_put(prog); 2219 2220 return fd; 2221 } 2222 2223 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 2224 2225 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 2226 { 2227 struct bpf_map *map; 2228 u32 id = attr->map_id; 2229 int f_flags; 2230 int fd; 2231 2232 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 2233 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 2234 return -EINVAL; 2235 2236 if (!capable(CAP_SYS_ADMIN)) 2237 return -EPERM; 2238 2239 f_flags = bpf_get_file_flag(attr->open_flags); 2240 if (f_flags < 0) 2241 return f_flags; 2242 2243 spin_lock_bh(&map_idr_lock); 2244 map = idr_find(&map_idr, id); 2245 if (map) 2246 map = __bpf_map_inc_not_zero(map, true); 2247 else 2248 map = ERR_PTR(-ENOENT); 2249 spin_unlock_bh(&map_idr_lock); 2250 2251 if (IS_ERR(map)) 2252 return PTR_ERR(map); 2253 2254 fd = bpf_map_new_fd(map, f_flags); 2255 if (fd < 0) 2256 bpf_map_put_with_uref(map); 2257 2258 return fd; 2259 } 2260 2261 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 2262 unsigned long addr, u32 *off, 2263 u32 *type) 2264 { 2265 const struct bpf_map *map; 2266 int i; 2267 2268 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 2269 map = prog->aux->used_maps[i]; 2270 if (map == (void *)addr) { 2271 *type = BPF_PSEUDO_MAP_FD; 2272 return map; 2273 } 2274 if (!map->ops->map_direct_value_meta) 2275 continue; 2276 if (!map->ops->map_direct_value_meta(map, addr, off)) { 2277 *type = BPF_PSEUDO_MAP_VALUE; 2278 return map; 2279 } 2280 } 2281 2282 return NULL; 2283 } 2284 2285 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) 2286 { 2287 const struct bpf_map *map; 2288 struct bpf_insn *insns; 2289 u32 off, type; 2290 u64 imm; 2291 int i; 2292 2293 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 2294 GFP_USER); 2295 if (!insns) 2296 return insns; 2297 2298 for (i = 0; i < prog->len; i++) { 2299 if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) { 2300 insns[i].code = BPF_JMP | BPF_CALL; 2301 insns[i].imm = BPF_FUNC_tail_call; 2302 /* fall-through */ 2303 } 2304 if (insns[i].code == (BPF_JMP | BPF_CALL) || 2305 insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) { 2306 if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) 2307 insns[i].code = BPF_JMP | BPF_CALL; 2308 if (!bpf_dump_raw_ok()) 2309 insns[i].imm = 0; 2310 continue; 2311 } 2312 2313 if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW)) 2314 continue; 2315 2316 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 2317 map = bpf_map_from_imm(prog, imm, &off, &type); 2318 if (map) { 2319 insns[i].src_reg = type; 2320 insns[i].imm = map->id; 2321 insns[i + 1].imm = off; 2322 continue; 2323 } 2324 } 2325 2326 return insns; 2327 } 2328 2329 static int set_info_rec_size(struct bpf_prog_info *info) 2330 { 2331 /* 2332 * Ensure info.*_rec_size is the same as kernel expected size 2333 * 2334 * or 2335 * 2336 * Only allow zero *_rec_size if both _rec_size and _cnt are 2337 * zero. In this case, the kernel will set the expected 2338 * _rec_size back to the info. 2339 */ 2340 2341 if ((info->nr_func_info || info->func_info_rec_size) && 2342 info->func_info_rec_size != sizeof(struct bpf_func_info)) 2343 return -EINVAL; 2344 2345 if ((info->nr_line_info || info->line_info_rec_size) && 2346 info->line_info_rec_size != sizeof(struct bpf_line_info)) 2347 return -EINVAL; 2348 2349 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 2350 info->jited_line_info_rec_size != sizeof(__u64)) 2351 return -EINVAL; 2352 2353 info->func_info_rec_size = sizeof(struct bpf_func_info); 2354 info->line_info_rec_size = sizeof(struct bpf_line_info); 2355 info->jited_line_info_rec_size = sizeof(__u64); 2356 2357 return 0; 2358 } 2359 2360 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, 2361 const union bpf_attr *attr, 2362 union bpf_attr __user *uattr) 2363 { 2364 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 2365 struct bpf_prog_info info = {}; 2366 u32 info_len = attr->info.info_len; 2367 struct bpf_prog_stats stats; 2368 char __user *uinsns; 2369 u32 ulen; 2370 int err; 2371 2372 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 2373 if (err) 2374 return err; 2375 info_len = min_t(u32, sizeof(info), info_len); 2376 2377 if (copy_from_user(&info, uinfo, info_len)) 2378 return -EFAULT; 2379 2380 info.type = prog->type; 2381 info.id = prog->aux->id; 2382 info.load_time = prog->aux->load_time; 2383 info.created_by_uid = from_kuid_munged(current_user_ns(), 2384 prog->aux->user->uid); 2385 info.gpl_compatible = prog->gpl_compatible; 2386 2387 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 2388 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 2389 2390 ulen = info.nr_map_ids; 2391 info.nr_map_ids = prog->aux->used_map_cnt; 2392 ulen = min_t(u32, info.nr_map_ids, ulen); 2393 if (ulen) { 2394 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 2395 u32 i; 2396 2397 for (i = 0; i < ulen; i++) 2398 if (put_user(prog->aux->used_maps[i]->id, 2399 &user_map_ids[i])) 2400 return -EFAULT; 2401 } 2402 2403 err = set_info_rec_size(&info); 2404 if (err) 2405 return err; 2406 2407 bpf_prog_get_stats(prog, &stats); 2408 info.run_time_ns = stats.nsecs; 2409 info.run_cnt = stats.cnt; 2410 2411 if (!capable(CAP_SYS_ADMIN)) { 2412 info.jited_prog_len = 0; 2413 info.xlated_prog_len = 0; 2414 info.nr_jited_ksyms = 0; 2415 info.nr_jited_func_lens = 0; 2416 info.nr_func_info = 0; 2417 info.nr_line_info = 0; 2418 info.nr_jited_line_info = 0; 2419 goto done; 2420 } 2421 2422 ulen = info.xlated_prog_len; 2423 info.xlated_prog_len = bpf_prog_insn_size(prog); 2424 if (info.xlated_prog_len && ulen) { 2425 struct bpf_insn *insns_sanitized; 2426 bool fault; 2427 2428 if (prog->blinded && !bpf_dump_raw_ok()) { 2429 info.xlated_prog_insns = 0; 2430 goto done; 2431 } 2432 insns_sanitized = bpf_insn_prepare_dump(prog); 2433 if (!insns_sanitized) 2434 return -ENOMEM; 2435 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 2436 ulen = min_t(u32, info.xlated_prog_len, ulen); 2437 fault = copy_to_user(uinsns, insns_sanitized, ulen); 2438 kfree(insns_sanitized); 2439 if (fault) 2440 return -EFAULT; 2441 } 2442 2443 if (bpf_prog_is_dev_bound(prog->aux)) { 2444 err = bpf_prog_offload_info_fill(&info, prog); 2445 if (err) 2446 return err; 2447 goto done; 2448 } 2449 2450 /* NOTE: the following code is supposed to be skipped for offload. 2451 * bpf_prog_offload_info_fill() is the place to fill similar fields 2452 * for offload. 2453 */ 2454 ulen = info.jited_prog_len; 2455 if (prog->aux->func_cnt) { 2456 u32 i; 2457 2458 info.jited_prog_len = 0; 2459 for (i = 0; i < prog->aux->func_cnt; i++) 2460 info.jited_prog_len += prog->aux->func[i]->jited_len; 2461 } else { 2462 info.jited_prog_len = prog->jited_len; 2463 } 2464 2465 if (info.jited_prog_len && ulen) { 2466 if (bpf_dump_raw_ok()) { 2467 uinsns = u64_to_user_ptr(info.jited_prog_insns); 2468 ulen = min_t(u32, info.jited_prog_len, ulen); 2469 2470 /* for multi-function programs, copy the JITed 2471 * instructions for all the functions 2472 */ 2473 if (prog->aux->func_cnt) { 2474 u32 len, free, i; 2475 u8 *img; 2476 2477 free = ulen; 2478 for (i = 0; i < prog->aux->func_cnt; i++) { 2479 len = prog->aux->func[i]->jited_len; 2480 len = min_t(u32, len, free); 2481 img = (u8 *) prog->aux->func[i]->bpf_func; 2482 if (copy_to_user(uinsns, img, len)) 2483 return -EFAULT; 2484 uinsns += len; 2485 free -= len; 2486 if (!free) 2487 break; 2488 } 2489 } else { 2490 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 2491 return -EFAULT; 2492 } 2493 } else { 2494 info.jited_prog_insns = 0; 2495 } 2496 } 2497 2498 ulen = info.nr_jited_ksyms; 2499 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 2500 if (ulen) { 2501 if (bpf_dump_raw_ok()) { 2502 unsigned long ksym_addr; 2503 u64 __user *user_ksyms; 2504 u32 i; 2505 2506 /* copy the address of the kernel symbol 2507 * corresponding to each function 2508 */ 2509 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 2510 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 2511 if (prog->aux->func_cnt) { 2512 for (i = 0; i < ulen; i++) { 2513 ksym_addr = (unsigned long) 2514 prog->aux->func[i]->bpf_func; 2515 if (put_user((u64) ksym_addr, 2516 &user_ksyms[i])) 2517 return -EFAULT; 2518 } 2519 } else { 2520 ksym_addr = (unsigned long) prog->bpf_func; 2521 if (put_user((u64) ksym_addr, &user_ksyms[0])) 2522 return -EFAULT; 2523 } 2524 } else { 2525 info.jited_ksyms = 0; 2526 } 2527 } 2528 2529 ulen = info.nr_jited_func_lens; 2530 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 2531 if (ulen) { 2532 if (bpf_dump_raw_ok()) { 2533 u32 __user *user_lens; 2534 u32 func_len, i; 2535 2536 /* copy the JITed image lengths for each function */ 2537 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 2538 user_lens = u64_to_user_ptr(info.jited_func_lens); 2539 if (prog->aux->func_cnt) { 2540 for (i = 0; i < ulen; i++) { 2541 func_len = 2542 prog->aux->func[i]->jited_len; 2543 if (put_user(func_len, &user_lens[i])) 2544 return -EFAULT; 2545 } 2546 } else { 2547 func_len = prog->jited_len; 2548 if (put_user(func_len, &user_lens[0])) 2549 return -EFAULT; 2550 } 2551 } else { 2552 info.jited_func_lens = 0; 2553 } 2554 } 2555 2556 if (prog->aux->btf) 2557 info.btf_id = btf_id(prog->aux->btf); 2558 2559 ulen = info.nr_func_info; 2560 info.nr_func_info = prog->aux->func_info_cnt; 2561 if (info.nr_func_info && ulen) { 2562 char __user *user_finfo; 2563 2564 user_finfo = u64_to_user_ptr(info.func_info); 2565 ulen = min_t(u32, info.nr_func_info, ulen); 2566 if (copy_to_user(user_finfo, prog->aux->func_info, 2567 info.func_info_rec_size * ulen)) 2568 return -EFAULT; 2569 } 2570 2571 ulen = info.nr_line_info; 2572 info.nr_line_info = prog->aux->nr_linfo; 2573 if (info.nr_line_info && ulen) { 2574 __u8 __user *user_linfo; 2575 2576 user_linfo = u64_to_user_ptr(info.line_info); 2577 ulen = min_t(u32, info.nr_line_info, ulen); 2578 if (copy_to_user(user_linfo, prog->aux->linfo, 2579 info.line_info_rec_size * ulen)) 2580 return -EFAULT; 2581 } 2582 2583 ulen = info.nr_jited_line_info; 2584 if (prog->aux->jited_linfo) 2585 info.nr_jited_line_info = prog->aux->nr_linfo; 2586 else 2587 info.nr_jited_line_info = 0; 2588 if (info.nr_jited_line_info && ulen) { 2589 if (bpf_dump_raw_ok()) { 2590 __u64 __user *user_linfo; 2591 u32 i; 2592 2593 user_linfo = u64_to_user_ptr(info.jited_line_info); 2594 ulen = min_t(u32, info.nr_jited_line_info, ulen); 2595 for (i = 0; i < ulen; i++) { 2596 if (put_user((__u64)(long)prog->aux->jited_linfo[i], 2597 &user_linfo[i])) 2598 return -EFAULT; 2599 } 2600 } else { 2601 info.jited_line_info = 0; 2602 } 2603 } 2604 2605 ulen = info.nr_prog_tags; 2606 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 2607 if (ulen) { 2608 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 2609 u32 i; 2610 2611 user_prog_tags = u64_to_user_ptr(info.prog_tags); 2612 ulen = min_t(u32, info.nr_prog_tags, ulen); 2613 if (prog->aux->func_cnt) { 2614 for (i = 0; i < ulen; i++) { 2615 if (copy_to_user(user_prog_tags[i], 2616 prog->aux->func[i]->tag, 2617 BPF_TAG_SIZE)) 2618 return -EFAULT; 2619 } 2620 } else { 2621 if (copy_to_user(user_prog_tags[0], 2622 prog->tag, BPF_TAG_SIZE)) 2623 return -EFAULT; 2624 } 2625 } 2626 2627 done: 2628 if (copy_to_user(uinfo, &info, info_len) || 2629 put_user(info_len, &uattr->info.info_len)) 2630 return -EFAULT; 2631 2632 return 0; 2633 } 2634 2635 static int bpf_map_get_info_by_fd(struct bpf_map *map, 2636 const union bpf_attr *attr, 2637 union bpf_attr __user *uattr) 2638 { 2639 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 2640 struct bpf_map_info info = {}; 2641 u32 info_len = attr->info.info_len; 2642 int err; 2643 2644 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 2645 if (err) 2646 return err; 2647 info_len = min_t(u32, sizeof(info), info_len); 2648 2649 info.type = map->map_type; 2650 info.id = map->id; 2651 info.key_size = map->key_size; 2652 info.value_size = map->value_size; 2653 info.max_entries = map->max_entries; 2654 info.map_flags = map->map_flags; 2655 memcpy(info.name, map->name, sizeof(map->name)); 2656 2657 if (map->btf) { 2658 info.btf_id = btf_id(map->btf); 2659 info.btf_key_type_id = map->btf_key_type_id; 2660 info.btf_value_type_id = map->btf_value_type_id; 2661 } 2662 2663 if (bpf_map_is_dev_bound(map)) { 2664 err = bpf_map_offload_info_fill(&info, map); 2665 if (err) 2666 return err; 2667 } 2668 2669 if (copy_to_user(uinfo, &info, info_len) || 2670 put_user(info_len, &uattr->info.info_len)) 2671 return -EFAULT; 2672 2673 return 0; 2674 } 2675 2676 static int bpf_btf_get_info_by_fd(struct btf *btf, 2677 const union bpf_attr *attr, 2678 union bpf_attr __user *uattr) 2679 { 2680 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 2681 u32 info_len = attr->info.info_len; 2682 int err; 2683 2684 err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len); 2685 if (err) 2686 return err; 2687 2688 return btf_get_info_by_fd(btf, attr, uattr); 2689 } 2690 2691 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 2692 2693 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 2694 union bpf_attr __user *uattr) 2695 { 2696 int ufd = attr->info.bpf_fd; 2697 struct fd f; 2698 int err; 2699 2700 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 2701 return -EINVAL; 2702 2703 f = fdget(ufd); 2704 if (!f.file) 2705 return -EBADFD; 2706 2707 if (f.file->f_op == &bpf_prog_fops) 2708 err = bpf_prog_get_info_by_fd(f.file->private_data, attr, 2709 uattr); 2710 else if (f.file->f_op == &bpf_map_fops) 2711 err = bpf_map_get_info_by_fd(f.file->private_data, attr, 2712 uattr); 2713 else if (f.file->f_op == &btf_fops) 2714 err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr); 2715 else 2716 err = -EINVAL; 2717 2718 fdput(f); 2719 return err; 2720 } 2721 2722 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level 2723 2724 static int bpf_btf_load(const union bpf_attr *attr) 2725 { 2726 if (CHECK_ATTR(BPF_BTF_LOAD)) 2727 return -EINVAL; 2728 2729 if (!capable(CAP_SYS_ADMIN)) 2730 return -EPERM; 2731 2732 return btf_new_fd(attr); 2733 } 2734 2735 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 2736 2737 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 2738 { 2739 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 2740 return -EINVAL; 2741 2742 if (!capable(CAP_SYS_ADMIN)) 2743 return -EPERM; 2744 2745 return btf_get_fd_by_id(attr->btf_id); 2746 } 2747 2748 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 2749 union bpf_attr __user *uattr, 2750 u32 prog_id, u32 fd_type, 2751 const char *buf, u64 probe_offset, 2752 u64 probe_addr) 2753 { 2754 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 2755 u32 len = buf ? strlen(buf) : 0, input_len; 2756 int err = 0; 2757 2758 if (put_user(len, &uattr->task_fd_query.buf_len)) 2759 return -EFAULT; 2760 input_len = attr->task_fd_query.buf_len; 2761 if (input_len && ubuf) { 2762 if (!len) { 2763 /* nothing to copy, just make ubuf NULL terminated */ 2764 char zero = '\0'; 2765 2766 if (put_user(zero, ubuf)) 2767 return -EFAULT; 2768 } else if (input_len >= len + 1) { 2769 /* ubuf can hold the string with NULL terminator */ 2770 if (copy_to_user(ubuf, buf, len + 1)) 2771 return -EFAULT; 2772 } else { 2773 /* ubuf cannot hold the string with NULL terminator, 2774 * do a partial copy with NULL terminator. 2775 */ 2776 char zero = '\0'; 2777 2778 err = -ENOSPC; 2779 if (copy_to_user(ubuf, buf, input_len - 1)) 2780 return -EFAULT; 2781 if (put_user(zero, ubuf + input_len - 1)) 2782 return -EFAULT; 2783 } 2784 } 2785 2786 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 2787 put_user(fd_type, &uattr->task_fd_query.fd_type) || 2788 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 2789 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 2790 return -EFAULT; 2791 2792 return err; 2793 } 2794 2795 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 2796 2797 static int bpf_task_fd_query(const union bpf_attr *attr, 2798 union bpf_attr __user *uattr) 2799 { 2800 pid_t pid = attr->task_fd_query.pid; 2801 u32 fd = attr->task_fd_query.fd; 2802 const struct perf_event *event; 2803 struct files_struct *files; 2804 struct task_struct *task; 2805 struct file *file; 2806 int err; 2807 2808 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 2809 return -EINVAL; 2810 2811 if (!capable(CAP_SYS_ADMIN)) 2812 return -EPERM; 2813 2814 if (attr->task_fd_query.flags != 0) 2815 return -EINVAL; 2816 2817 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 2818 if (!task) 2819 return -ENOENT; 2820 2821 files = get_files_struct(task); 2822 put_task_struct(task); 2823 if (!files) 2824 return -ENOENT; 2825 2826 err = 0; 2827 spin_lock(&files->file_lock); 2828 file = fcheck_files(files, fd); 2829 if (!file) 2830 err = -EBADF; 2831 else 2832 get_file(file); 2833 spin_unlock(&files->file_lock); 2834 put_files_struct(files); 2835 2836 if (err) 2837 goto out; 2838 2839 if (file->f_op == &bpf_raw_tp_fops) { 2840 struct bpf_raw_tracepoint *raw_tp = file->private_data; 2841 struct bpf_raw_event_map *btp = raw_tp->btp; 2842 2843 err = bpf_task_fd_query_copy(attr, uattr, 2844 raw_tp->prog->aux->id, 2845 BPF_FD_TYPE_RAW_TRACEPOINT, 2846 btp->tp->name, 0, 0); 2847 goto put_file; 2848 } 2849 2850 event = perf_get_event(file); 2851 if (!IS_ERR(event)) { 2852 u64 probe_offset, probe_addr; 2853 u32 prog_id, fd_type; 2854 const char *buf; 2855 2856 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 2857 &buf, &probe_offset, 2858 &probe_addr); 2859 if (!err) 2860 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 2861 fd_type, buf, 2862 probe_offset, 2863 probe_addr); 2864 goto put_file; 2865 } 2866 2867 err = -ENOTSUPP; 2868 put_file: 2869 fput(file); 2870 out: 2871 return err; 2872 } 2873 2874 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 2875 { 2876 union bpf_attr attr = {}; 2877 int err; 2878 2879 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) 2880 return -EPERM; 2881 2882 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 2883 if (err) 2884 return err; 2885 size = min_t(u32, size, sizeof(attr)); 2886 2887 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 2888 if (copy_from_user(&attr, uattr, size) != 0) 2889 return -EFAULT; 2890 2891 err = security_bpf(cmd, &attr, size); 2892 if (err < 0) 2893 return err; 2894 2895 switch (cmd) { 2896 case BPF_MAP_CREATE: 2897 err = map_create(&attr); 2898 break; 2899 case BPF_MAP_LOOKUP_ELEM: 2900 err = map_lookup_elem(&attr); 2901 break; 2902 case BPF_MAP_UPDATE_ELEM: 2903 err = map_update_elem(&attr); 2904 break; 2905 case BPF_MAP_DELETE_ELEM: 2906 err = map_delete_elem(&attr); 2907 break; 2908 case BPF_MAP_GET_NEXT_KEY: 2909 err = map_get_next_key(&attr); 2910 break; 2911 case BPF_MAP_FREEZE: 2912 err = map_freeze(&attr); 2913 break; 2914 case BPF_PROG_LOAD: 2915 err = bpf_prog_load(&attr, uattr); 2916 break; 2917 case BPF_OBJ_PIN: 2918 err = bpf_obj_pin(&attr); 2919 break; 2920 case BPF_OBJ_GET: 2921 err = bpf_obj_get(&attr); 2922 break; 2923 case BPF_PROG_ATTACH: 2924 err = bpf_prog_attach(&attr); 2925 break; 2926 case BPF_PROG_DETACH: 2927 err = bpf_prog_detach(&attr); 2928 break; 2929 case BPF_PROG_QUERY: 2930 err = bpf_prog_query(&attr, uattr); 2931 break; 2932 case BPF_PROG_TEST_RUN: 2933 err = bpf_prog_test_run(&attr, uattr); 2934 break; 2935 case BPF_PROG_GET_NEXT_ID: 2936 err = bpf_obj_get_next_id(&attr, uattr, 2937 &prog_idr, &prog_idr_lock); 2938 break; 2939 case BPF_MAP_GET_NEXT_ID: 2940 err = bpf_obj_get_next_id(&attr, uattr, 2941 &map_idr, &map_idr_lock); 2942 break; 2943 case BPF_BTF_GET_NEXT_ID: 2944 err = bpf_obj_get_next_id(&attr, uattr, 2945 &btf_idr, &btf_idr_lock); 2946 break; 2947 case BPF_PROG_GET_FD_BY_ID: 2948 err = bpf_prog_get_fd_by_id(&attr); 2949 break; 2950 case BPF_MAP_GET_FD_BY_ID: 2951 err = bpf_map_get_fd_by_id(&attr); 2952 break; 2953 case BPF_OBJ_GET_INFO_BY_FD: 2954 err = bpf_obj_get_info_by_fd(&attr, uattr); 2955 break; 2956 case BPF_RAW_TRACEPOINT_OPEN: 2957 err = bpf_raw_tracepoint_open(&attr); 2958 break; 2959 case BPF_BTF_LOAD: 2960 err = bpf_btf_load(&attr); 2961 break; 2962 case BPF_BTF_GET_FD_BY_ID: 2963 err = bpf_btf_get_fd_by_id(&attr); 2964 break; 2965 case BPF_TASK_FD_QUERY: 2966 err = bpf_task_fd_query(&attr, uattr); 2967 break; 2968 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 2969 err = map_lookup_and_delete_elem(&attr); 2970 break; 2971 default: 2972 err = -EINVAL; 2973 break; 2974 } 2975 2976 return err; 2977 } 2978