1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, but 8 * WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 10 * General Public License for more details. 11 */ 12 #include <linux/bpf.h> 13 #include <linux/bpf_trace.h> 14 #include <linux/bpf_lirc.h> 15 #include <linux/btf.h> 16 #include <linux/syscalls.h> 17 #include <linux/slab.h> 18 #include <linux/sched/signal.h> 19 #include <linux/vmalloc.h> 20 #include <linux/mmzone.h> 21 #include <linux/anon_inodes.h> 22 #include <linux/fdtable.h> 23 #include <linux/file.h> 24 #include <linux/fs.h> 25 #include <linux/license.h> 26 #include <linux/filter.h> 27 #include <linux/version.h> 28 #include <linux/kernel.h> 29 #include <linux/idr.h> 30 #include <linux/cred.h> 31 #include <linux/timekeeping.h> 32 #include <linux/ctype.h> 33 #include <linux/btf.h> 34 #include <linux/nospec.h> 35 36 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ 37 (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 38 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 39 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 40 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 41 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map)) 42 43 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 44 45 DEFINE_PER_CPU(int, bpf_prog_active); 46 static DEFINE_IDR(prog_idr); 47 static DEFINE_SPINLOCK(prog_idr_lock); 48 static DEFINE_IDR(map_idr); 49 static DEFINE_SPINLOCK(map_idr_lock); 50 51 int sysctl_unprivileged_bpf_disabled __read_mostly; 52 53 static const struct bpf_map_ops * const bpf_map_types[] = { 54 #define BPF_PROG_TYPE(_id, _ops) 55 #define BPF_MAP_TYPE(_id, _ops) \ 56 [_id] = &_ops, 57 #include <linux/bpf_types.h> 58 #undef BPF_PROG_TYPE 59 #undef BPF_MAP_TYPE 60 }; 61 62 /* 63 * If we're handed a bigger struct than we know of, ensure all the unknown bits 64 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 65 * we don't know about yet. 66 * 67 * There is a ToCToU between this function call and the following 68 * copy_from_user() call. However, this is not a concern since this function is 69 * meant to be a future-proofing of bits. 70 */ 71 int bpf_check_uarg_tail_zero(void __user *uaddr, 72 size_t expected_size, 73 size_t actual_size) 74 { 75 unsigned char __user *addr; 76 unsigned char __user *end; 77 unsigned char val; 78 int err; 79 80 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 81 return -E2BIG; 82 83 if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size))) 84 return -EFAULT; 85 86 if (actual_size <= expected_size) 87 return 0; 88 89 addr = uaddr + expected_size; 90 end = uaddr + actual_size; 91 92 for (; addr < end; addr++) { 93 err = get_user(val, addr); 94 if (err) 95 return err; 96 if (val) 97 return -E2BIG; 98 } 99 100 return 0; 101 } 102 103 const struct bpf_map_ops bpf_map_offload_ops = { 104 .map_alloc = bpf_map_offload_map_alloc, 105 .map_free = bpf_map_offload_map_free, 106 }; 107 108 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) 109 { 110 const struct bpf_map_ops *ops; 111 u32 type = attr->map_type; 112 struct bpf_map *map; 113 int err; 114 115 if (type >= ARRAY_SIZE(bpf_map_types)) 116 return ERR_PTR(-EINVAL); 117 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types)); 118 ops = bpf_map_types[type]; 119 if (!ops) 120 return ERR_PTR(-EINVAL); 121 122 if (ops->map_alloc_check) { 123 err = ops->map_alloc_check(attr); 124 if (err) 125 return ERR_PTR(err); 126 } 127 if (attr->map_ifindex) 128 ops = &bpf_map_offload_ops; 129 map = ops->map_alloc(attr); 130 if (IS_ERR(map)) 131 return map; 132 map->ops = ops; 133 map->map_type = type; 134 return map; 135 } 136 137 void *bpf_map_area_alloc(size_t size, int numa_node) 138 { 139 /* We definitely need __GFP_NORETRY, so OOM killer doesn't 140 * trigger under memory pressure as we really just want to 141 * fail instead. 142 */ 143 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO; 144 void *area; 145 146 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 147 area = kmalloc_node(size, GFP_USER | flags, numa_node); 148 if (area != NULL) 149 return area; 150 } 151 152 return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags, 153 __builtin_return_address(0)); 154 } 155 156 void bpf_map_area_free(void *area) 157 { 158 kvfree(area); 159 } 160 161 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 162 { 163 map->map_type = attr->map_type; 164 map->key_size = attr->key_size; 165 map->value_size = attr->value_size; 166 map->max_entries = attr->max_entries; 167 map->map_flags = attr->map_flags; 168 map->numa_node = bpf_map_attr_numa_node(attr); 169 } 170 171 int bpf_map_precharge_memlock(u32 pages) 172 { 173 struct user_struct *user = get_current_user(); 174 unsigned long memlock_limit, cur; 175 176 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 177 cur = atomic_long_read(&user->locked_vm); 178 free_uid(user); 179 if (cur + pages > memlock_limit) 180 return -EPERM; 181 return 0; 182 } 183 184 static int bpf_map_charge_memlock(struct bpf_map *map) 185 { 186 struct user_struct *user = get_current_user(); 187 unsigned long memlock_limit; 188 189 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 190 191 atomic_long_add(map->pages, &user->locked_vm); 192 193 if (atomic_long_read(&user->locked_vm) > memlock_limit) { 194 atomic_long_sub(map->pages, &user->locked_vm); 195 free_uid(user); 196 return -EPERM; 197 } 198 map->user = user; 199 return 0; 200 } 201 202 static void bpf_map_uncharge_memlock(struct bpf_map *map) 203 { 204 struct user_struct *user = map->user; 205 206 atomic_long_sub(map->pages, &user->locked_vm); 207 free_uid(user); 208 } 209 210 static int bpf_map_alloc_id(struct bpf_map *map) 211 { 212 int id; 213 214 idr_preload(GFP_KERNEL); 215 spin_lock_bh(&map_idr_lock); 216 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 217 if (id > 0) 218 map->id = id; 219 spin_unlock_bh(&map_idr_lock); 220 idr_preload_end(); 221 222 if (WARN_ON_ONCE(!id)) 223 return -ENOSPC; 224 225 return id > 0 ? 0 : id; 226 } 227 228 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) 229 { 230 unsigned long flags; 231 232 /* Offloaded maps are removed from the IDR store when their device 233 * disappears - even if someone holds an fd to them they are unusable, 234 * the memory is gone, all ops will fail; they are simply waiting for 235 * refcnt to drop to be freed. 236 */ 237 if (!map->id) 238 return; 239 240 if (do_idr_lock) 241 spin_lock_irqsave(&map_idr_lock, flags); 242 else 243 __acquire(&map_idr_lock); 244 245 idr_remove(&map_idr, map->id); 246 map->id = 0; 247 248 if (do_idr_lock) 249 spin_unlock_irqrestore(&map_idr_lock, flags); 250 else 251 __release(&map_idr_lock); 252 } 253 254 /* called from workqueue */ 255 static void bpf_map_free_deferred(struct work_struct *work) 256 { 257 struct bpf_map *map = container_of(work, struct bpf_map, work); 258 259 bpf_map_uncharge_memlock(map); 260 security_bpf_map_free(map); 261 /* implementation dependent freeing */ 262 map->ops->map_free(map); 263 } 264 265 static void bpf_map_put_uref(struct bpf_map *map) 266 { 267 if (atomic_dec_and_test(&map->usercnt)) { 268 if (map->ops->map_release_uref) 269 map->ops->map_release_uref(map); 270 } 271 } 272 273 /* decrement map refcnt and schedule it for freeing via workqueue 274 * (unrelying map implementation ops->map_free() might sleep) 275 */ 276 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) 277 { 278 if (atomic_dec_and_test(&map->refcnt)) { 279 /* bpf_map_free_id() must be called first */ 280 bpf_map_free_id(map, do_idr_lock); 281 btf_put(map->btf); 282 INIT_WORK(&map->work, bpf_map_free_deferred); 283 schedule_work(&map->work); 284 } 285 } 286 287 void bpf_map_put(struct bpf_map *map) 288 { 289 __bpf_map_put(map, true); 290 } 291 EXPORT_SYMBOL_GPL(bpf_map_put); 292 293 void bpf_map_put_with_uref(struct bpf_map *map) 294 { 295 bpf_map_put_uref(map); 296 bpf_map_put(map); 297 } 298 299 static int bpf_map_release(struct inode *inode, struct file *filp) 300 { 301 struct bpf_map *map = filp->private_data; 302 303 if (map->ops->map_release) 304 map->ops->map_release(map, filp); 305 306 bpf_map_put_with_uref(map); 307 return 0; 308 } 309 310 #ifdef CONFIG_PROC_FS 311 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 312 { 313 const struct bpf_map *map = filp->private_data; 314 const struct bpf_array *array; 315 u32 owner_prog_type = 0; 316 u32 owner_jited = 0; 317 318 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { 319 array = container_of(map, struct bpf_array, map); 320 owner_prog_type = array->owner_prog_type; 321 owner_jited = array->owner_jited; 322 } 323 324 seq_printf(m, 325 "map_type:\t%u\n" 326 "key_size:\t%u\n" 327 "value_size:\t%u\n" 328 "max_entries:\t%u\n" 329 "map_flags:\t%#x\n" 330 "memlock:\t%llu\n" 331 "map_id:\t%u\n", 332 map->map_type, 333 map->key_size, 334 map->value_size, 335 map->max_entries, 336 map->map_flags, 337 map->pages * 1ULL << PAGE_SHIFT, 338 map->id); 339 340 if (owner_prog_type) { 341 seq_printf(m, "owner_prog_type:\t%u\n", 342 owner_prog_type); 343 seq_printf(m, "owner_jited:\t%u\n", 344 owner_jited); 345 } 346 } 347 #endif 348 349 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 350 loff_t *ppos) 351 { 352 /* We need this handler such that alloc_file() enables 353 * f_mode with FMODE_CAN_READ. 354 */ 355 return -EINVAL; 356 } 357 358 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 359 size_t siz, loff_t *ppos) 360 { 361 /* We need this handler such that alloc_file() enables 362 * f_mode with FMODE_CAN_WRITE. 363 */ 364 return -EINVAL; 365 } 366 367 const struct file_operations bpf_map_fops = { 368 #ifdef CONFIG_PROC_FS 369 .show_fdinfo = bpf_map_show_fdinfo, 370 #endif 371 .release = bpf_map_release, 372 .read = bpf_dummy_read, 373 .write = bpf_dummy_write, 374 }; 375 376 int bpf_map_new_fd(struct bpf_map *map, int flags) 377 { 378 int ret; 379 380 ret = security_bpf_map(map, OPEN_FMODE(flags)); 381 if (ret < 0) 382 return ret; 383 384 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 385 flags | O_CLOEXEC); 386 } 387 388 int bpf_get_file_flag(int flags) 389 { 390 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 391 return -EINVAL; 392 if (flags & BPF_F_RDONLY) 393 return O_RDONLY; 394 if (flags & BPF_F_WRONLY) 395 return O_WRONLY; 396 return O_RDWR; 397 } 398 399 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 400 #define CHECK_ATTR(CMD) \ 401 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 402 sizeof(attr->CMD##_LAST_FIELD), 0, \ 403 sizeof(*attr) - \ 404 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 405 sizeof(attr->CMD##_LAST_FIELD)) != NULL 406 407 /* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes. 408 * Return 0 on success and < 0 on error. 409 */ 410 static int bpf_obj_name_cpy(char *dst, const char *src) 411 { 412 const char *end = src + BPF_OBJ_NAME_LEN; 413 414 memset(dst, 0, BPF_OBJ_NAME_LEN); 415 416 /* Copy all isalnum() and '_' char */ 417 while (src < end && *src) { 418 if (!isalnum(*src) && *src != '_') 419 return -EINVAL; 420 *dst++ = *src++; 421 } 422 423 /* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */ 424 if (src == end) 425 return -EINVAL; 426 427 return 0; 428 } 429 430 #define BPF_MAP_CREATE_LAST_FIELD btf_value_type_id 431 /* called via syscall */ 432 static int map_create(union bpf_attr *attr) 433 { 434 int numa_node = bpf_map_attr_numa_node(attr); 435 struct bpf_map *map; 436 int f_flags; 437 int err; 438 439 err = CHECK_ATTR(BPF_MAP_CREATE); 440 if (err) 441 return -EINVAL; 442 443 f_flags = bpf_get_file_flag(attr->map_flags); 444 if (f_flags < 0) 445 return f_flags; 446 447 if (numa_node != NUMA_NO_NODE && 448 ((unsigned int)numa_node >= nr_node_ids || 449 !node_online(numa_node))) 450 return -EINVAL; 451 452 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 453 map = find_and_alloc_map(attr); 454 if (IS_ERR(map)) 455 return PTR_ERR(map); 456 457 err = bpf_obj_name_cpy(map->name, attr->map_name); 458 if (err) 459 goto free_map_nouncharge; 460 461 atomic_set(&map->refcnt, 1); 462 atomic_set(&map->usercnt, 1); 463 464 if (bpf_map_support_seq_show(map) && 465 (attr->btf_key_type_id || attr->btf_value_type_id)) { 466 struct btf *btf; 467 468 if (!attr->btf_key_type_id || !attr->btf_value_type_id) { 469 err = -EINVAL; 470 goto free_map_nouncharge; 471 } 472 473 btf = btf_get_by_fd(attr->btf_fd); 474 if (IS_ERR(btf)) { 475 err = PTR_ERR(btf); 476 goto free_map_nouncharge; 477 } 478 479 err = map->ops->map_check_btf(map, btf, attr->btf_key_type_id, 480 attr->btf_value_type_id); 481 if (err) { 482 btf_put(btf); 483 goto free_map_nouncharge; 484 } 485 486 map->btf = btf; 487 map->btf_key_type_id = attr->btf_key_type_id; 488 map->btf_value_type_id = attr->btf_value_type_id; 489 } 490 491 err = security_bpf_map_alloc(map); 492 if (err) 493 goto free_map_nouncharge; 494 495 err = bpf_map_charge_memlock(map); 496 if (err) 497 goto free_map_sec; 498 499 err = bpf_map_alloc_id(map); 500 if (err) 501 goto free_map; 502 503 err = bpf_map_new_fd(map, f_flags); 504 if (err < 0) { 505 /* failed to allocate fd. 506 * bpf_map_put() is needed because the above 507 * bpf_map_alloc_id() has published the map 508 * to the userspace and the userspace may 509 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 510 */ 511 bpf_map_put(map); 512 return err; 513 } 514 515 return err; 516 517 free_map: 518 bpf_map_uncharge_memlock(map); 519 free_map_sec: 520 security_bpf_map_free(map); 521 free_map_nouncharge: 522 btf_put(map->btf); 523 map->ops->map_free(map); 524 return err; 525 } 526 527 /* if error is returned, fd is released. 528 * On success caller should complete fd access with matching fdput() 529 */ 530 struct bpf_map *__bpf_map_get(struct fd f) 531 { 532 if (!f.file) 533 return ERR_PTR(-EBADF); 534 if (f.file->f_op != &bpf_map_fops) { 535 fdput(f); 536 return ERR_PTR(-EINVAL); 537 } 538 539 return f.file->private_data; 540 } 541 542 /* prog's and map's refcnt limit */ 543 #define BPF_MAX_REFCNT 32768 544 545 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref) 546 { 547 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) { 548 atomic_dec(&map->refcnt); 549 return ERR_PTR(-EBUSY); 550 } 551 if (uref) 552 atomic_inc(&map->usercnt); 553 return map; 554 } 555 EXPORT_SYMBOL_GPL(bpf_map_inc); 556 557 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 558 { 559 struct fd f = fdget(ufd); 560 struct bpf_map *map; 561 562 map = __bpf_map_get(f); 563 if (IS_ERR(map)) 564 return map; 565 566 map = bpf_map_inc(map, true); 567 fdput(f); 568 569 return map; 570 } 571 572 /* map_idr_lock should have been held */ 573 static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, 574 bool uref) 575 { 576 int refold; 577 578 refold = __atomic_add_unless(&map->refcnt, 1, 0); 579 580 if (refold >= BPF_MAX_REFCNT) { 581 __bpf_map_put(map, false); 582 return ERR_PTR(-EBUSY); 583 } 584 585 if (!refold) 586 return ERR_PTR(-ENOENT); 587 588 if (uref) 589 atomic_inc(&map->usercnt); 590 591 return map; 592 } 593 594 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 595 { 596 return -ENOTSUPP; 597 } 598 599 /* last field in 'union bpf_attr' used by this command */ 600 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value 601 602 static int map_lookup_elem(union bpf_attr *attr) 603 { 604 void __user *ukey = u64_to_user_ptr(attr->key); 605 void __user *uvalue = u64_to_user_ptr(attr->value); 606 int ufd = attr->map_fd; 607 struct bpf_map *map; 608 void *key, *value, *ptr; 609 u32 value_size; 610 struct fd f; 611 int err; 612 613 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 614 return -EINVAL; 615 616 f = fdget(ufd); 617 map = __bpf_map_get(f); 618 if (IS_ERR(map)) 619 return PTR_ERR(map); 620 621 if (!(f.file->f_mode & FMODE_CAN_READ)) { 622 err = -EPERM; 623 goto err_put; 624 } 625 626 key = memdup_user(ukey, map->key_size); 627 if (IS_ERR(key)) { 628 err = PTR_ERR(key); 629 goto err_put; 630 } 631 632 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 633 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 634 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 635 value_size = round_up(map->value_size, 8) * num_possible_cpus(); 636 else if (IS_FD_MAP(map)) 637 value_size = sizeof(u32); 638 else 639 value_size = map->value_size; 640 641 err = -ENOMEM; 642 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 643 if (!value) 644 goto free_key; 645 646 if (bpf_map_is_dev_bound(map)) { 647 err = bpf_map_offload_lookup_elem(map, key, value); 648 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 649 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 650 err = bpf_percpu_hash_copy(map, key, value); 651 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 652 err = bpf_percpu_array_copy(map, key, value); 653 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 654 err = bpf_stackmap_copy(map, key, value); 655 } else if (IS_FD_ARRAY(map)) { 656 err = bpf_fd_array_map_lookup_elem(map, key, value); 657 } else if (IS_FD_HASH(map)) { 658 err = bpf_fd_htab_map_lookup_elem(map, key, value); 659 } else { 660 rcu_read_lock(); 661 ptr = map->ops->map_lookup_elem(map, key); 662 if (ptr) 663 memcpy(value, ptr, value_size); 664 rcu_read_unlock(); 665 err = ptr ? 0 : -ENOENT; 666 } 667 668 if (err) 669 goto free_value; 670 671 err = -EFAULT; 672 if (copy_to_user(uvalue, value, value_size) != 0) 673 goto free_value; 674 675 err = 0; 676 677 free_value: 678 kfree(value); 679 free_key: 680 kfree(key); 681 err_put: 682 fdput(f); 683 return err; 684 } 685 686 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 687 688 static int map_update_elem(union bpf_attr *attr) 689 { 690 void __user *ukey = u64_to_user_ptr(attr->key); 691 void __user *uvalue = u64_to_user_ptr(attr->value); 692 int ufd = attr->map_fd; 693 struct bpf_map *map; 694 void *key, *value; 695 u32 value_size; 696 struct fd f; 697 int err; 698 699 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 700 return -EINVAL; 701 702 f = fdget(ufd); 703 map = __bpf_map_get(f); 704 if (IS_ERR(map)) 705 return PTR_ERR(map); 706 707 if (!(f.file->f_mode & FMODE_CAN_WRITE)) { 708 err = -EPERM; 709 goto err_put; 710 } 711 712 key = memdup_user(ukey, map->key_size); 713 if (IS_ERR(key)) { 714 err = PTR_ERR(key); 715 goto err_put; 716 } 717 718 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 719 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 720 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 721 value_size = round_up(map->value_size, 8) * num_possible_cpus(); 722 else 723 value_size = map->value_size; 724 725 err = -ENOMEM; 726 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 727 if (!value) 728 goto free_key; 729 730 err = -EFAULT; 731 if (copy_from_user(value, uvalue, value_size) != 0) 732 goto free_value; 733 734 /* Need to create a kthread, thus must support schedule */ 735 if (bpf_map_is_dev_bound(map)) { 736 err = bpf_map_offload_update_elem(map, key, value, attr->flags); 737 goto out; 738 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 739 map->map_type == BPF_MAP_TYPE_SOCKHASH || 740 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 741 err = map->ops->map_update_elem(map, key, value, attr->flags); 742 goto out; 743 } 744 745 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from 746 * inside bpf map update or delete otherwise deadlocks are possible 747 */ 748 preempt_disable(); 749 __this_cpu_inc(bpf_prog_active); 750 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 751 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 752 err = bpf_percpu_hash_update(map, key, value, attr->flags); 753 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 754 err = bpf_percpu_array_update(map, key, value, attr->flags); 755 } else if (IS_FD_ARRAY(map)) { 756 rcu_read_lock(); 757 err = bpf_fd_array_map_update_elem(map, f.file, key, value, 758 attr->flags); 759 rcu_read_unlock(); 760 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 761 rcu_read_lock(); 762 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, 763 attr->flags); 764 rcu_read_unlock(); 765 } else { 766 rcu_read_lock(); 767 err = map->ops->map_update_elem(map, key, value, attr->flags); 768 rcu_read_unlock(); 769 } 770 __this_cpu_dec(bpf_prog_active); 771 preempt_enable(); 772 out: 773 free_value: 774 kfree(value); 775 free_key: 776 kfree(key); 777 err_put: 778 fdput(f); 779 return err; 780 } 781 782 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 783 784 static int map_delete_elem(union bpf_attr *attr) 785 { 786 void __user *ukey = u64_to_user_ptr(attr->key); 787 int ufd = attr->map_fd; 788 struct bpf_map *map; 789 struct fd f; 790 void *key; 791 int err; 792 793 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 794 return -EINVAL; 795 796 f = fdget(ufd); 797 map = __bpf_map_get(f); 798 if (IS_ERR(map)) 799 return PTR_ERR(map); 800 801 if (!(f.file->f_mode & FMODE_CAN_WRITE)) { 802 err = -EPERM; 803 goto err_put; 804 } 805 806 key = memdup_user(ukey, map->key_size); 807 if (IS_ERR(key)) { 808 err = PTR_ERR(key); 809 goto err_put; 810 } 811 812 if (bpf_map_is_dev_bound(map)) { 813 err = bpf_map_offload_delete_elem(map, key); 814 goto out; 815 } 816 817 preempt_disable(); 818 __this_cpu_inc(bpf_prog_active); 819 rcu_read_lock(); 820 err = map->ops->map_delete_elem(map, key); 821 rcu_read_unlock(); 822 __this_cpu_dec(bpf_prog_active); 823 preempt_enable(); 824 out: 825 kfree(key); 826 err_put: 827 fdput(f); 828 return err; 829 } 830 831 /* last field in 'union bpf_attr' used by this command */ 832 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 833 834 static int map_get_next_key(union bpf_attr *attr) 835 { 836 void __user *ukey = u64_to_user_ptr(attr->key); 837 void __user *unext_key = u64_to_user_ptr(attr->next_key); 838 int ufd = attr->map_fd; 839 struct bpf_map *map; 840 void *key, *next_key; 841 struct fd f; 842 int err; 843 844 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 845 return -EINVAL; 846 847 f = fdget(ufd); 848 map = __bpf_map_get(f); 849 if (IS_ERR(map)) 850 return PTR_ERR(map); 851 852 if (!(f.file->f_mode & FMODE_CAN_READ)) { 853 err = -EPERM; 854 goto err_put; 855 } 856 857 if (ukey) { 858 key = memdup_user(ukey, map->key_size); 859 if (IS_ERR(key)) { 860 err = PTR_ERR(key); 861 goto err_put; 862 } 863 } else { 864 key = NULL; 865 } 866 867 err = -ENOMEM; 868 next_key = kmalloc(map->key_size, GFP_USER); 869 if (!next_key) 870 goto free_key; 871 872 if (bpf_map_is_dev_bound(map)) { 873 err = bpf_map_offload_get_next_key(map, key, next_key); 874 goto out; 875 } 876 877 rcu_read_lock(); 878 err = map->ops->map_get_next_key(map, key, next_key); 879 rcu_read_unlock(); 880 out: 881 if (err) 882 goto free_next_key; 883 884 err = -EFAULT; 885 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 886 goto free_next_key; 887 888 err = 0; 889 890 free_next_key: 891 kfree(next_key); 892 free_key: 893 kfree(key); 894 err_put: 895 fdput(f); 896 return err; 897 } 898 899 static const struct bpf_prog_ops * const bpf_prog_types[] = { 900 #define BPF_PROG_TYPE(_id, _name) \ 901 [_id] = & _name ## _prog_ops, 902 #define BPF_MAP_TYPE(_id, _ops) 903 #include <linux/bpf_types.h> 904 #undef BPF_PROG_TYPE 905 #undef BPF_MAP_TYPE 906 }; 907 908 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 909 { 910 const struct bpf_prog_ops *ops; 911 912 if (type >= ARRAY_SIZE(bpf_prog_types)) 913 return -EINVAL; 914 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 915 ops = bpf_prog_types[type]; 916 if (!ops) 917 return -EINVAL; 918 919 if (!bpf_prog_is_dev_bound(prog->aux)) 920 prog->aux->ops = ops; 921 else 922 prog->aux->ops = &bpf_offload_prog_ops; 923 prog->type = type; 924 return 0; 925 } 926 927 /* drop refcnt on maps used by eBPF program and free auxilary data */ 928 static void free_used_maps(struct bpf_prog_aux *aux) 929 { 930 int i; 931 932 for (i = 0; i < aux->used_map_cnt; i++) 933 bpf_map_put(aux->used_maps[i]); 934 935 kfree(aux->used_maps); 936 } 937 938 int __bpf_prog_charge(struct user_struct *user, u32 pages) 939 { 940 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 941 unsigned long user_bufs; 942 943 if (user) { 944 user_bufs = atomic_long_add_return(pages, &user->locked_vm); 945 if (user_bufs > memlock_limit) { 946 atomic_long_sub(pages, &user->locked_vm); 947 return -EPERM; 948 } 949 } 950 951 return 0; 952 } 953 954 void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 955 { 956 if (user) 957 atomic_long_sub(pages, &user->locked_vm); 958 } 959 960 static int bpf_prog_charge_memlock(struct bpf_prog *prog) 961 { 962 struct user_struct *user = get_current_user(); 963 int ret; 964 965 ret = __bpf_prog_charge(user, prog->pages); 966 if (ret) { 967 free_uid(user); 968 return ret; 969 } 970 971 prog->aux->user = user; 972 return 0; 973 } 974 975 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog) 976 { 977 struct user_struct *user = prog->aux->user; 978 979 __bpf_prog_uncharge(user, prog->pages); 980 free_uid(user); 981 } 982 983 static int bpf_prog_alloc_id(struct bpf_prog *prog) 984 { 985 int id; 986 987 idr_preload(GFP_KERNEL); 988 spin_lock_bh(&prog_idr_lock); 989 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 990 if (id > 0) 991 prog->aux->id = id; 992 spin_unlock_bh(&prog_idr_lock); 993 idr_preload_end(); 994 995 /* id is in [1, INT_MAX) */ 996 if (WARN_ON_ONCE(!id)) 997 return -ENOSPC; 998 999 return id > 0 ? 0 : id; 1000 } 1001 1002 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) 1003 { 1004 /* cBPF to eBPF migrations are currently not in the idr store. 1005 * Offloaded programs are removed from the store when their device 1006 * disappears - even if someone grabs an fd to them they are unusable, 1007 * simply waiting for refcnt to drop to be freed. 1008 */ 1009 if (!prog->aux->id) 1010 return; 1011 1012 if (do_idr_lock) 1013 spin_lock_bh(&prog_idr_lock); 1014 else 1015 __acquire(&prog_idr_lock); 1016 1017 idr_remove(&prog_idr, prog->aux->id); 1018 prog->aux->id = 0; 1019 1020 if (do_idr_lock) 1021 spin_unlock_bh(&prog_idr_lock); 1022 else 1023 __release(&prog_idr_lock); 1024 } 1025 1026 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 1027 { 1028 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 1029 1030 free_used_maps(aux); 1031 bpf_prog_uncharge_memlock(aux->prog); 1032 security_bpf_prog_free(aux); 1033 bpf_prog_free(aux->prog); 1034 } 1035 1036 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 1037 { 1038 if (atomic_dec_and_test(&prog->aux->refcnt)) { 1039 /* bpf_prog_free_id() must be called first */ 1040 bpf_prog_free_id(prog, do_idr_lock); 1041 bpf_prog_kallsyms_del_all(prog); 1042 1043 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 1044 } 1045 } 1046 1047 void bpf_prog_put(struct bpf_prog *prog) 1048 { 1049 __bpf_prog_put(prog, true); 1050 } 1051 EXPORT_SYMBOL_GPL(bpf_prog_put); 1052 1053 static int bpf_prog_release(struct inode *inode, struct file *filp) 1054 { 1055 struct bpf_prog *prog = filp->private_data; 1056 1057 bpf_prog_put(prog); 1058 return 0; 1059 } 1060 1061 #ifdef CONFIG_PROC_FS 1062 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 1063 { 1064 const struct bpf_prog *prog = filp->private_data; 1065 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 1066 1067 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 1068 seq_printf(m, 1069 "prog_type:\t%u\n" 1070 "prog_jited:\t%u\n" 1071 "prog_tag:\t%s\n" 1072 "memlock:\t%llu\n" 1073 "prog_id:\t%u\n", 1074 prog->type, 1075 prog->jited, 1076 prog_tag, 1077 prog->pages * 1ULL << PAGE_SHIFT, 1078 prog->aux->id); 1079 } 1080 #endif 1081 1082 const struct file_operations bpf_prog_fops = { 1083 #ifdef CONFIG_PROC_FS 1084 .show_fdinfo = bpf_prog_show_fdinfo, 1085 #endif 1086 .release = bpf_prog_release, 1087 .read = bpf_dummy_read, 1088 .write = bpf_dummy_write, 1089 }; 1090 1091 int bpf_prog_new_fd(struct bpf_prog *prog) 1092 { 1093 int ret; 1094 1095 ret = security_bpf_prog(prog); 1096 if (ret < 0) 1097 return ret; 1098 1099 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 1100 O_RDWR | O_CLOEXEC); 1101 } 1102 1103 static struct bpf_prog *____bpf_prog_get(struct fd f) 1104 { 1105 if (!f.file) 1106 return ERR_PTR(-EBADF); 1107 if (f.file->f_op != &bpf_prog_fops) { 1108 fdput(f); 1109 return ERR_PTR(-EINVAL); 1110 } 1111 1112 return f.file->private_data; 1113 } 1114 1115 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) 1116 { 1117 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) { 1118 atomic_sub(i, &prog->aux->refcnt); 1119 return ERR_PTR(-EBUSY); 1120 } 1121 return prog; 1122 } 1123 EXPORT_SYMBOL_GPL(bpf_prog_add); 1124 1125 void bpf_prog_sub(struct bpf_prog *prog, int i) 1126 { 1127 /* Only to be used for undoing previous bpf_prog_add() in some 1128 * error path. We still know that another entity in our call 1129 * path holds a reference to the program, thus atomic_sub() can 1130 * be safely used in such cases! 1131 */ 1132 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0); 1133 } 1134 EXPORT_SYMBOL_GPL(bpf_prog_sub); 1135 1136 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) 1137 { 1138 return bpf_prog_add(prog, 1); 1139 } 1140 EXPORT_SYMBOL_GPL(bpf_prog_inc); 1141 1142 /* prog_idr_lock should have been held */ 1143 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 1144 { 1145 int refold; 1146 1147 refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0); 1148 1149 if (refold >= BPF_MAX_REFCNT) { 1150 __bpf_prog_put(prog, false); 1151 return ERR_PTR(-EBUSY); 1152 } 1153 1154 if (!refold) 1155 return ERR_PTR(-ENOENT); 1156 1157 return prog; 1158 } 1159 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1160 1161 bool bpf_prog_get_ok(struct bpf_prog *prog, 1162 enum bpf_prog_type *attach_type, bool attach_drv) 1163 { 1164 /* not an attachment, just a refcount inc, always allow */ 1165 if (!attach_type) 1166 return true; 1167 1168 if (prog->type != *attach_type) 1169 return false; 1170 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) 1171 return false; 1172 1173 return true; 1174 } 1175 1176 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 1177 bool attach_drv) 1178 { 1179 struct fd f = fdget(ufd); 1180 struct bpf_prog *prog; 1181 1182 prog = ____bpf_prog_get(f); 1183 if (IS_ERR(prog)) 1184 return prog; 1185 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 1186 prog = ERR_PTR(-EINVAL); 1187 goto out; 1188 } 1189 1190 prog = bpf_prog_inc(prog); 1191 out: 1192 fdput(f); 1193 return prog; 1194 } 1195 1196 struct bpf_prog *bpf_prog_get(u32 ufd) 1197 { 1198 return __bpf_prog_get(ufd, NULL, false); 1199 } 1200 1201 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1202 bool attach_drv) 1203 { 1204 return __bpf_prog_get(ufd, &type, attach_drv); 1205 } 1206 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 1207 1208 /* Initially all BPF programs could be loaded w/o specifying 1209 * expected_attach_type. Later for some of them specifying expected_attach_type 1210 * at load time became required so that program could be validated properly. 1211 * Programs of types that are allowed to be loaded both w/ and w/o (for 1212 * backward compatibility) expected_attach_type, should have the default attach 1213 * type assigned to expected_attach_type for the latter case, so that it can be 1214 * validated later at attach time. 1215 * 1216 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 1217 * prog type requires it but has some attach types that have to be backward 1218 * compatible. 1219 */ 1220 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 1221 { 1222 switch (attr->prog_type) { 1223 case BPF_PROG_TYPE_CGROUP_SOCK: 1224 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 1225 * exist so checking for non-zero is the way to go here. 1226 */ 1227 if (!attr->expected_attach_type) 1228 attr->expected_attach_type = 1229 BPF_CGROUP_INET_SOCK_CREATE; 1230 break; 1231 } 1232 } 1233 1234 static int 1235 bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, 1236 enum bpf_attach_type expected_attach_type) 1237 { 1238 switch (prog_type) { 1239 case BPF_PROG_TYPE_CGROUP_SOCK: 1240 switch (expected_attach_type) { 1241 case BPF_CGROUP_INET_SOCK_CREATE: 1242 case BPF_CGROUP_INET4_POST_BIND: 1243 case BPF_CGROUP_INET6_POST_BIND: 1244 return 0; 1245 default: 1246 return -EINVAL; 1247 } 1248 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1249 switch (expected_attach_type) { 1250 case BPF_CGROUP_INET4_BIND: 1251 case BPF_CGROUP_INET6_BIND: 1252 case BPF_CGROUP_INET4_CONNECT: 1253 case BPF_CGROUP_INET6_CONNECT: 1254 case BPF_CGROUP_UDP4_SENDMSG: 1255 case BPF_CGROUP_UDP6_SENDMSG: 1256 return 0; 1257 default: 1258 return -EINVAL; 1259 } 1260 default: 1261 return 0; 1262 } 1263 } 1264 1265 /* last field in 'union bpf_attr' used by this command */ 1266 #define BPF_PROG_LOAD_LAST_FIELD expected_attach_type 1267 1268 static int bpf_prog_load(union bpf_attr *attr) 1269 { 1270 enum bpf_prog_type type = attr->prog_type; 1271 struct bpf_prog *prog; 1272 int err; 1273 char license[128]; 1274 bool is_gpl; 1275 1276 if (CHECK_ATTR(BPF_PROG_LOAD)) 1277 return -EINVAL; 1278 1279 if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT) 1280 return -EINVAL; 1281 1282 /* copy eBPF program license from user space */ 1283 if (strncpy_from_user(license, u64_to_user_ptr(attr->license), 1284 sizeof(license) - 1) < 0) 1285 return -EFAULT; 1286 license[sizeof(license) - 1] = 0; 1287 1288 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 1289 is_gpl = license_is_gpl_compatible(license); 1290 1291 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS) 1292 return -E2BIG; 1293 1294 if (type == BPF_PROG_TYPE_KPROBE && 1295 attr->kern_version != LINUX_VERSION_CODE) 1296 return -EINVAL; 1297 1298 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 1299 type != BPF_PROG_TYPE_CGROUP_SKB && 1300 !capable(CAP_SYS_ADMIN)) 1301 return -EPERM; 1302 1303 bpf_prog_load_fixup_attach_type(attr); 1304 if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type)) 1305 return -EINVAL; 1306 1307 /* plain bpf_prog allocation */ 1308 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 1309 if (!prog) 1310 return -ENOMEM; 1311 1312 prog->expected_attach_type = attr->expected_attach_type; 1313 1314 prog->aux->offload_requested = !!attr->prog_ifindex; 1315 1316 err = security_bpf_prog_alloc(prog->aux); 1317 if (err) 1318 goto free_prog_nouncharge; 1319 1320 err = bpf_prog_charge_memlock(prog); 1321 if (err) 1322 goto free_prog_sec; 1323 1324 prog->len = attr->insn_cnt; 1325 1326 err = -EFAULT; 1327 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), 1328 bpf_prog_insn_size(prog)) != 0) 1329 goto free_prog; 1330 1331 prog->orig_prog = NULL; 1332 prog->jited = 0; 1333 1334 atomic_set(&prog->aux->refcnt, 1); 1335 prog->gpl_compatible = is_gpl ? 1 : 0; 1336 1337 if (bpf_prog_is_dev_bound(prog->aux)) { 1338 err = bpf_prog_offload_init(prog, attr); 1339 if (err) 1340 goto free_prog; 1341 } 1342 1343 /* find program type: socket_filter vs tracing_filter */ 1344 err = find_prog_type(type, prog); 1345 if (err < 0) 1346 goto free_prog; 1347 1348 prog->aux->load_time = ktime_get_boot_ns(); 1349 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name); 1350 if (err) 1351 goto free_prog; 1352 1353 /* run eBPF verifier */ 1354 err = bpf_check(&prog, attr); 1355 if (err < 0) 1356 goto free_used_maps; 1357 1358 prog = bpf_prog_select_runtime(prog, &err); 1359 if (err < 0) 1360 goto free_used_maps; 1361 1362 err = bpf_prog_alloc_id(prog); 1363 if (err) 1364 goto free_used_maps; 1365 1366 err = bpf_prog_new_fd(prog); 1367 if (err < 0) { 1368 /* failed to allocate fd. 1369 * bpf_prog_put() is needed because the above 1370 * bpf_prog_alloc_id() has published the prog 1371 * to the userspace and the userspace may 1372 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID. 1373 */ 1374 bpf_prog_put(prog); 1375 return err; 1376 } 1377 1378 bpf_prog_kallsyms_add(prog); 1379 return err; 1380 1381 free_used_maps: 1382 bpf_prog_kallsyms_del_subprogs(prog); 1383 free_used_maps(prog->aux); 1384 free_prog: 1385 bpf_prog_uncharge_memlock(prog); 1386 free_prog_sec: 1387 security_bpf_prog_free(prog->aux); 1388 free_prog_nouncharge: 1389 bpf_prog_free(prog); 1390 return err; 1391 } 1392 1393 #define BPF_OBJ_LAST_FIELD file_flags 1394 1395 static int bpf_obj_pin(const union bpf_attr *attr) 1396 { 1397 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) 1398 return -EINVAL; 1399 1400 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); 1401 } 1402 1403 static int bpf_obj_get(const union bpf_attr *attr) 1404 { 1405 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 1406 attr->file_flags & ~BPF_OBJ_FLAG_MASK) 1407 return -EINVAL; 1408 1409 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), 1410 attr->file_flags); 1411 } 1412 1413 struct bpf_raw_tracepoint { 1414 struct bpf_raw_event_map *btp; 1415 struct bpf_prog *prog; 1416 }; 1417 1418 static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp) 1419 { 1420 struct bpf_raw_tracepoint *raw_tp = filp->private_data; 1421 1422 if (raw_tp->prog) { 1423 bpf_probe_unregister(raw_tp->btp, raw_tp->prog); 1424 bpf_prog_put(raw_tp->prog); 1425 } 1426 kfree(raw_tp); 1427 return 0; 1428 } 1429 1430 static const struct file_operations bpf_raw_tp_fops = { 1431 .release = bpf_raw_tracepoint_release, 1432 .read = bpf_dummy_read, 1433 .write = bpf_dummy_write, 1434 }; 1435 1436 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 1437 1438 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 1439 { 1440 struct bpf_raw_tracepoint *raw_tp; 1441 struct bpf_raw_event_map *btp; 1442 struct bpf_prog *prog; 1443 char tp_name[128]; 1444 int tp_fd, err; 1445 1446 if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name), 1447 sizeof(tp_name) - 1) < 0) 1448 return -EFAULT; 1449 tp_name[sizeof(tp_name) - 1] = 0; 1450 1451 btp = bpf_find_raw_tracepoint(tp_name); 1452 if (!btp) 1453 return -ENOENT; 1454 1455 raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER); 1456 if (!raw_tp) 1457 return -ENOMEM; 1458 raw_tp->btp = btp; 1459 1460 prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd, 1461 BPF_PROG_TYPE_RAW_TRACEPOINT); 1462 if (IS_ERR(prog)) { 1463 err = PTR_ERR(prog); 1464 goto out_free_tp; 1465 } 1466 1467 err = bpf_probe_register(raw_tp->btp, prog); 1468 if (err) 1469 goto out_put_prog; 1470 1471 raw_tp->prog = prog; 1472 tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp, 1473 O_CLOEXEC); 1474 if (tp_fd < 0) { 1475 bpf_probe_unregister(raw_tp->btp, prog); 1476 err = tp_fd; 1477 goto out_put_prog; 1478 } 1479 return tp_fd; 1480 1481 out_put_prog: 1482 bpf_prog_put(prog); 1483 out_free_tp: 1484 kfree(raw_tp); 1485 return err; 1486 } 1487 1488 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 1489 enum bpf_attach_type attach_type) 1490 { 1491 switch (prog->type) { 1492 case BPF_PROG_TYPE_CGROUP_SOCK: 1493 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1494 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 1495 default: 1496 return 0; 1497 } 1498 } 1499 1500 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags 1501 1502 #define BPF_F_ATTACH_MASK \ 1503 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI) 1504 1505 static int bpf_prog_attach(const union bpf_attr *attr) 1506 { 1507 enum bpf_prog_type ptype; 1508 struct bpf_prog *prog; 1509 int ret; 1510 1511 if (!capable(CAP_NET_ADMIN)) 1512 return -EPERM; 1513 1514 if (CHECK_ATTR(BPF_PROG_ATTACH)) 1515 return -EINVAL; 1516 1517 if (attr->attach_flags & ~BPF_F_ATTACH_MASK) 1518 return -EINVAL; 1519 1520 switch (attr->attach_type) { 1521 case BPF_CGROUP_INET_INGRESS: 1522 case BPF_CGROUP_INET_EGRESS: 1523 ptype = BPF_PROG_TYPE_CGROUP_SKB; 1524 break; 1525 case BPF_CGROUP_INET_SOCK_CREATE: 1526 case BPF_CGROUP_INET4_POST_BIND: 1527 case BPF_CGROUP_INET6_POST_BIND: 1528 ptype = BPF_PROG_TYPE_CGROUP_SOCK; 1529 break; 1530 case BPF_CGROUP_INET4_BIND: 1531 case BPF_CGROUP_INET6_BIND: 1532 case BPF_CGROUP_INET4_CONNECT: 1533 case BPF_CGROUP_INET6_CONNECT: 1534 case BPF_CGROUP_UDP4_SENDMSG: 1535 case BPF_CGROUP_UDP6_SENDMSG: 1536 ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 1537 break; 1538 case BPF_CGROUP_SOCK_OPS: 1539 ptype = BPF_PROG_TYPE_SOCK_OPS; 1540 break; 1541 case BPF_CGROUP_DEVICE: 1542 ptype = BPF_PROG_TYPE_CGROUP_DEVICE; 1543 break; 1544 case BPF_SK_MSG_VERDICT: 1545 ptype = BPF_PROG_TYPE_SK_MSG; 1546 break; 1547 case BPF_SK_SKB_STREAM_PARSER: 1548 case BPF_SK_SKB_STREAM_VERDICT: 1549 ptype = BPF_PROG_TYPE_SK_SKB; 1550 break; 1551 case BPF_LIRC_MODE2: 1552 ptype = BPF_PROG_TYPE_LIRC_MODE2; 1553 break; 1554 default: 1555 return -EINVAL; 1556 } 1557 1558 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 1559 if (IS_ERR(prog)) 1560 return PTR_ERR(prog); 1561 1562 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 1563 bpf_prog_put(prog); 1564 return -EINVAL; 1565 } 1566 1567 switch (ptype) { 1568 case BPF_PROG_TYPE_SK_SKB: 1569 case BPF_PROG_TYPE_SK_MSG: 1570 ret = sockmap_get_from_fd(attr, ptype, prog); 1571 break; 1572 case BPF_PROG_TYPE_LIRC_MODE2: 1573 ret = lirc_prog_attach(attr, prog); 1574 break; 1575 default: 1576 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 1577 } 1578 1579 if (ret) 1580 bpf_prog_put(prog); 1581 return ret; 1582 } 1583 1584 #define BPF_PROG_DETACH_LAST_FIELD attach_type 1585 1586 static int bpf_prog_detach(const union bpf_attr *attr) 1587 { 1588 enum bpf_prog_type ptype; 1589 1590 if (!capable(CAP_NET_ADMIN)) 1591 return -EPERM; 1592 1593 if (CHECK_ATTR(BPF_PROG_DETACH)) 1594 return -EINVAL; 1595 1596 switch (attr->attach_type) { 1597 case BPF_CGROUP_INET_INGRESS: 1598 case BPF_CGROUP_INET_EGRESS: 1599 ptype = BPF_PROG_TYPE_CGROUP_SKB; 1600 break; 1601 case BPF_CGROUP_INET_SOCK_CREATE: 1602 case BPF_CGROUP_INET4_POST_BIND: 1603 case BPF_CGROUP_INET6_POST_BIND: 1604 ptype = BPF_PROG_TYPE_CGROUP_SOCK; 1605 break; 1606 case BPF_CGROUP_INET4_BIND: 1607 case BPF_CGROUP_INET6_BIND: 1608 case BPF_CGROUP_INET4_CONNECT: 1609 case BPF_CGROUP_INET6_CONNECT: 1610 case BPF_CGROUP_UDP4_SENDMSG: 1611 case BPF_CGROUP_UDP6_SENDMSG: 1612 ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 1613 break; 1614 case BPF_CGROUP_SOCK_OPS: 1615 ptype = BPF_PROG_TYPE_SOCK_OPS; 1616 break; 1617 case BPF_CGROUP_DEVICE: 1618 ptype = BPF_PROG_TYPE_CGROUP_DEVICE; 1619 break; 1620 case BPF_SK_MSG_VERDICT: 1621 return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL); 1622 case BPF_SK_SKB_STREAM_PARSER: 1623 case BPF_SK_SKB_STREAM_VERDICT: 1624 return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL); 1625 case BPF_LIRC_MODE2: 1626 return lirc_prog_detach(attr); 1627 default: 1628 return -EINVAL; 1629 } 1630 1631 return cgroup_bpf_prog_detach(attr, ptype); 1632 } 1633 1634 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt 1635 1636 static int bpf_prog_query(const union bpf_attr *attr, 1637 union bpf_attr __user *uattr) 1638 { 1639 if (!capable(CAP_NET_ADMIN)) 1640 return -EPERM; 1641 if (CHECK_ATTR(BPF_PROG_QUERY)) 1642 return -EINVAL; 1643 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 1644 return -EINVAL; 1645 1646 switch (attr->query.attach_type) { 1647 case BPF_CGROUP_INET_INGRESS: 1648 case BPF_CGROUP_INET_EGRESS: 1649 case BPF_CGROUP_INET_SOCK_CREATE: 1650 case BPF_CGROUP_INET4_BIND: 1651 case BPF_CGROUP_INET6_BIND: 1652 case BPF_CGROUP_INET4_POST_BIND: 1653 case BPF_CGROUP_INET6_POST_BIND: 1654 case BPF_CGROUP_INET4_CONNECT: 1655 case BPF_CGROUP_INET6_CONNECT: 1656 case BPF_CGROUP_UDP4_SENDMSG: 1657 case BPF_CGROUP_UDP6_SENDMSG: 1658 case BPF_CGROUP_SOCK_OPS: 1659 case BPF_CGROUP_DEVICE: 1660 break; 1661 case BPF_LIRC_MODE2: 1662 return lirc_prog_query(attr, uattr); 1663 default: 1664 return -EINVAL; 1665 } 1666 1667 return cgroup_bpf_prog_query(attr, uattr); 1668 } 1669 1670 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration 1671 1672 static int bpf_prog_test_run(const union bpf_attr *attr, 1673 union bpf_attr __user *uattr) 1674 { 1675 struct bpf_prog *prog; 1676 int ret = -ENOTSUPP; 1677 1678 if (!capable(CAP_SYS_ADMIN)) 1679 return -EPERM; 1680 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 1681 return -EINVAL; 1682 1683 prog = bpf_prog_get(attr->test.prog_fd); 1684 if (IS_ERR(prog)) 1685 return PTR_ERR(prog); 1686 1687 if (prog->aux->ops->test_run) 1688 ret = prog->aux->ops->test_run(prog, attr, uattr); 1689 1690 bpf_prog_put(prog); 1691 return ret; 1692 } 1693 1694 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 1695 1696 static int bpf_obj_get_next_id(const union bpf_attr *attr, 1697 union bpf_attr __user *uattr, 1698 struct idr *idr, 1699 spinlock_t *lock) 1700 { 1701 u32 next_id = attr->start_id; 1702 int err = 0; 1703 1704 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 1705 return -EINVAL; 1706 1707 if (!capable(CAP_SYS_ADMIN)) 1708 return -EPERM; 1709 1710 next_id++; 1711 spin_lock_bh(lock); 1712 if (!idr_get_next(idr, &next_id)) 1713 err = -ENOENT; 1714 spin_unlock_bh(lock); 1715 1716 if (!err) 1717 err = put_user(next_id, &uattr->next_id); 1718 1719 return err; 1720 } 1721 1722 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 1723 1724 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 1725 { 1726 struct bpf_prog *prog; 1727 u32 id = attr->prog_id; 1728 int fd; 1729 1730 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 1731 return -EINVAL; 1732 1733 if (!capable(CAP_SYS_ADMIN)) 1734 return -EPERM; 1735 1736 spin_lock_bh(&prog_idr_lock); 1737 prog = idr_find(&prog_idr, id); 1738 if (prog) 1739 prog = bpf_prog_inc_not_zero(prog); 1740 else 1741 prog = ERR_PTR(-ENOENT); 1742 spin_unlock_bh(&prog_idr_lock); 1743 1744 if (IS_ERR(prog)) 1745 return PTR_ERR(prog); 1746 1747 fd = bpf_prog_new_fd(prog); 1748 if (fd < 0) 1749 bpf_prog_put(prog); 1750 1751 return fd; 1752 } 1753 1754 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 1755 1756 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 1757 { 1758 struct bpf_map *map; 1759 u32 id = attr->map_id; 1760 int f_flags; 1761 int fd; 1762 1763 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 1764 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 1765 return -EINVAL; 1766 1767 if (!capable(CAP_SYS_ADMIN)) 1768 return -EPERM; 1769 1770 f_flags = bpf_get_file_flag(attr->open_flags); 1771 if (f_flags < 0) 1772 return f_flags; 1773 1774 spin_lock_bh(&map_idr_lock); 1775 map = idr_find(&map_idr, id); 1776 if (map) 1777 map = bpf_map_inc_not_zero(map, true); 1778 else 1779 map = ERR_PTR(-ENOENT); 1780 spin_unlock_bh(&map_idr_lock); 1781 1782 if (IS_ERR(map)) 1783 return PTR_ERR(map); 1784 1785 fd = bpf_map_new_fd(map, f_flags); 1786 if (fd < 0) 1787 bpf_map_put(map); 1788 1789 return fd; 1790 } 1791 1792 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 1793 unsigned long addr) 1794 { 1795 int i; 1796 1797 for (i = 0; i < prog->aux->used_map_cnt; i++) 1798 if (prog->aux->used_maps[i] == (void *)addr) 1799 return prog->aux->used_maps[i]; 1800 return NULL; 1801 } 1802 1803 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) 1804 { 1805 const struct bpf_map *map; 1806 struct bpf_insn *insns; 1807 u64 imm; 1808 int i; 1809 1810 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 1811 GFP_USER); 1812 if (!insns) 1813 return insns; 1814 1815 for (i = 0; i < prog->len; i++) { 1816 if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) { 1817 insns[i].code = BPF_JMP | BPF_CALL; 1818 insns[i].imm = BPF_FUNC_tail_call; 1819 /* fall-through */ 1820 } 1821 if (insns[i].code == (BPF_JMP | BPF_CALL) || 1822 insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) { 1823 if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) 1824 insns[i].code = BPF_JMP | BPF_CALL; 1825 if (!bpf_dump_raw_ok()) 1826 insns[i].imm = 0; 1827 continue; 1828 } 1829 1830 if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW)) 1831 continue; 1832 1833 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 1834 map = bpf_map_from_imm(prog, imm); 1835 if (map) { 1836 insns[i].src_reg = BPF_PSEUDO_MAP_FD; 1837 insns[i].imm = map->id; 1838 insns[i + 1].imm = 0; 1839 continue; 1840 } 1841 1842 if (!bpf_dump_raw_ok() && 1843 imm == (unsigned long)prog->aux) { 1844 insns[i].imm = 0; 1845 insns[i + 1].imm = 0; 1846 continue; 1847 } 1848 } 1849 1850 return insns; 1851 } 1852 1853 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, 1854 const union bpf_attr *attr, 1855 union bpf_attr __user *uattr) 1856 { 1857 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 1858 struct bpf_prog_info info = {}; 1859 u32 info_len = attr->info.info_len; 1860 char __user *uinsns; 1861 u32 ulen; 1862 int err; 1863 1864 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 1865 if (err) 1866 return err; 1867 info_len = min_t(u32, sizeof(info), info_len); 1868 1869 if (copy_from_user(&info, uinfo, info_len)) 1870 return -EFAULT; 1871 1872 info.type = prog->type; 1873 info.id = prog->aux->id; 1874 info.load_time = prog->aux->load_time; 1875 info.created_by_uid = from_kuid_munged(current_user_ns(), 1876 prog->aux->user->uid); 1877 info.gpl_compatible = prog->gpl_compatible; 1878 1879 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 1880 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 1881 1882 ulen = info.nr_map_ids; 1883 info.nr_map_ids = prog->aux->used_map_cnt; 1884 ulen = min_t(u32, info.nr_map_ids, ulen); 1885 if (ulen) { 1886 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 1887 u32 i; 1888 1889 for (i = 0; i < ulen; i++) 1890 if (put_user(prog->aux->used_maps[i]->id, 1891 &user_map_ids[i])) 1892 return -EFAULT; 1893 } 1894 1895 if (!capable(CAP_SYS_ADMIN)) { 1896 info.jited_prog_len = 0; 1897 info.xlated_prog_len = 0; 1898 info.nr_jited_ksyms = 0; 1899 goto done; 1900 } 1901 1902 ulen = info.xlated_prog_len; 1903 info.xlated_prog_len = bpf_prog_insn_size(prog); 1904 if (info.xlated_prog_len && ulen) { 1905 struct bpf_insn *insns_sanitized; 1906 bool fault; 1907 1908 if (prog->blinded && !bpf_dump_raw_ok()) { 1909 info.xlated_prog_insns = 0; 1910 goto done; 1911 } 1912 insns_sanitized = bpf_insn_prepare_dump(prog); 1913 if (!insns_sanitized) 1914 return -ENOMEM; 1915 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 1916 ulen = min_t(u32, info.xlated_prog_len, ulen); 1917 fault = copy_to_user(uinsns, insns_sanitized, ulen); 1918 kfree(insns_sanitized); 1919 if (fault) 1920 return -EFAULT; 1921 } 1922 1923 if (bpf_prog_is_dev_bound(prog->aux)) { 1924 err = bpf_prog_offload_info_fill(&info, prog); 1925 if (err) 1926 return err; 1927 goto done; 1928 } 1929 1930 /* NOTE: the following code is supposed to be skipped for offload. 1931 * bpf_prog_offload_info_fill() is the place to fill similar fields 1932 * for offload. 1933 */ 1934 ulen = info.jited_prog_len; 1935 if (prog->aux->func_cnt) { 1936 u32 i; 1937 1938 info.jited_prog_len = 0; 1939 for (i = 0; i < prog->aux->func_cnt; i++) 1940 info.jited_prog_len += prog->aux->func[i]->jited_len; 1941 } else { 1942 info.jited_prog_len = prog->jited_len; 1943 } 1944 1945 if (info.jited_prog_len && ulen) { 1946 if (bpf_dump_raw_ok()) { 1947 uinsns = u64_to_user_ptr(info.jited_prog_insns); 1948 ulen = min_t(u32, info.jited_prog_len, ulen); 1949 1950 /* for multi-function programs, copy the JITed 1951 * instructions for all the functions 1952 */ 1953 if (prog->aux->func_cnt) { 1954 u32 len, free, i; 1955 u8 *img; 1956 1957 free = ulen; 1958 for (i = 0; i < prog->aux->func_cnt; i++) { 1959 len = prog->aux->func[i]->jited_len; 1960 len = min_t(u32, len, free); 1961 img = (u8 *) prog->aux->func[i]->bpf_func; 1962 if (copy_to_user(uinsns, img, len)) 1963 return -EFAULT; 1964 uinsns += len; 1965 free -= len; 1966 if (!free) 1967 break; 1968 } 1969 } else { 1970 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 1971 return -EFAULT; 1972 } 1973 } else { 1974 info.jited_prog_insns = 0; 1975 } 1976 } 1977 1978 ulen = info.nr_jited_ksyms; 1979 info.nr_jited_ksyms = prog->aux->func_cnt; 1980 if (info.nr_jited_ksyms && ulen) { 1981 if (bpf_dump_raw_ok()) { 1982 u64 __user *user_ksyms; 1983 ulong ksym_addr; 1984 u32 i; 1985 1986 /* copy the address of the kernel symbol 1987 * corresponding to each function 1988 */ 1989 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 1990 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 1991 for (i = 0; i < ulen; i++) { 1992 ksym_addr = (ulong) prog->aux->func[i]->bpf_func; 1993 ksym_addr &= PAGE_MASK; 1994 if (put_user((u64) ksym_addr, &user_ksyms[i])) 1995 return -EFAULT; 1996 } 1997 } else { 1998 info.jited_ksyms = 0; 1999 } 2000 } 2001 2002 ulen = info.nr_jited_func_lens; 2003 info.nr_jited_func_lens = prog->aux->func_cnt; 2004 if (info.nr_jited_func_lens && ulen) { 2005 if (bpf_dump_raw_ok()) { 2006 u32 __user *user_lens; 2007 u32 func_len, i; 2008 2009 /* copy the JITed image lengths for each function */ 2010 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 2011 user_lens = u64_to_user_ptr(info.jited_func_lens); 2012 for (i = 0; i < ulen; i++) { 2013 func_len = prog->aux->func[i]->jited_len; 2014 if (put_user(func_len, &user_lens[i])) 2015 return -EFAULT; 2016 } 2017 } else { 2018 info.jited_func_lens = 0; 2019 } 2020 } 2021 2022 done: 2023 if (copy_to_user(uinfo, &info, info_len) || 2024 put_user(info_len, &uattr->info.info_len)) 2025 return -EFAULT; 2026 2027 return 0; 2028 } 2029 2030 static int bpf_map_get_info_by_fd(struct bpf_map *map, 2031 const union bpf_attr *attr, 2032 union bpf_attr __user *uattr) 2033 { 2034 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 2035 struct bpf_map_info info = {}; 2036 u32 info_len = attr->info.info_len; 2037 int err; 2038 2039 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 2040 if (err) 2041 return err; 2042 info_len = min_t(u32, sizeof(info), info_len); 2043 2044 info.type = map->map_type; 2045 info.id = map->id; 2046 info.key_size = map->key_size; 2047 info.value_size = map->value_size; 2048 info.max_entries = map->max_entries; 2049 info.map_flags = map->map_flags; 2050 memcpy(info.name, map->name, sizeof(map->name)); 2051 2052 if (map->btf) { 2053 info.btf_id = btf_id(map->btf); 2054 info.btf_key_type_id = map->btf_key_type_id; 2055 info.btf_value_type_id = map->btf_value_type_id; 2056 } 2057 2058 if (bpf_map_is_dev_bound(map)) { 2059 err = bpf_map_offload_info_fill(&info, map); 2060 if (err) 2061 return err; 2062 } 2063 2064 if (copy_to_user(uinfo, &info, info_len) || 2065 put_user(info_len, &uattr->info.info_len)) 2066 return -EFAULT; 2067 2068 return 0; 2069 } 2070 2071 static int bpf_btf_get_info_by_fd(struct btf *btf, 2072 const union bpf_attr *attr, 2073 union bpf_attr __user *uattr) 2074 { 2075 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 2076 u32 info_len = attr->info.info_len; 2077 int err; 2078 2079 err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len); 2080 if (err) 2081 return err; 2082 2083 return btf_get_info_by_fd(btf, attr, uattr); 2084 } 2085 2086 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 2087 2088 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 2089 union bpf_attr __user *uattr) 2090 { 2091 int ufd = attr->info.bpf_fd; 2092 struct fd f; 2093 int err; 2094 2095 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 2096 return -EINVAL; 2097 2098 f = fdget(ufd); 2099 if (!f.file) 2100 return -EBADFD; 2101 2102 if (f.file->f_op == &bpf_prog_fops) 2103 err = bpf_prog_get_info_by_fd(f.file->private_data, attr, 2104 uattr); 2105 else if (f.file->f_op == &bpf_map_fops) 2106 err = bpf_map_get_info_by_fd(f.file->private_data, attr, 2107 uattr); 2108 else if (f.file->f_op == &btf_fops) 2109 err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr); 2110 else 2111 err = -EINVAL; 2112 2113 fdput(f); 2114 return err; 2115 } 2116 2117 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level 2118 2119 static int bpf_btf_load(const union bpf_attr *attr) 2120 { 2121 if (CHECK_ATTR(BPF_BTF_LOAD)) 2122 return -EINVAL; 2123 2124 if (!capable(CAP_SYS_ADMIN)) 2125 return -EPERM; 2126 2127 return btf_new_fd(attr); 2128 } 2129 2130 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 2131 2132 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 2133 { 2134 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 2135 return -EINVAL; 2136 2137 if (!capable(CAP_SYS_ADMIN)) 2138 return -EPERM; 2139 2140 return btf_get_fd_by_id(attr->btf_id); 2141 } 2142 2143 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 2144 union bpf_attr __user *uattr, 2145 u32 prog_id, u32 fd_type, 2146 const char *buf, u64 probe_offset, 2147 u64 probe_addr) 2148 { 2149 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 2150 u32 len = buf ? strlen(buf) : 0, input_len; 2151 int err = 0; 2152 2153 if (put_user(len, &uattr->task_fd_query.buf_len)) 2154 return -EFAULT; 2155 input_len = attr->task_fd_query.buf_len; 2156 if (input_len && ubuf) { 2157 if (!len) { 2158 /* nothing to copy, just make ubuf NULL terminated */ 2159 char zero = '\0'; 2160 2161 if (put_user(zero, ubuf)) 2162 return -EFAULT; 2163 } else if (input_len >= len + 1) { 2164 /* ubuf can hold the string with NULL terminator */ 2165 if (copy_to_user(ubuf, buf, len + 1)) 2166 return -EFAULT; 2167 } else { 2168 /* ubuf cannot hold the string with NULL terminator, 2169 * do a partial copy with NULL terminator. 2170 */ 2171 char zero = '\0'; 2172 2173 err = -ENOSPC; 2174 if (copy_to_user(ubuf, buf, input_len - 1)) 2175 return -EFAULT; 2176 if (put_user(zero, ubuf + input_len - 1)) 2177 return -EFAULT; 2178 } 2179 } 2180 2181 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 2182 put_user(fd_type, &uattr->task_fd_query.fd_type) || 2183 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 2184 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 2185 return -EFAULT; 2186 2187 return err; 2188 } 2189 2190 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 2191 2192 static int bpf_task_fd_query(const union bpf_attr *attr, 2193 union bpf_attr __user *uattr) 2194 { 2195 pid_t pid = attr->task_fd_query.pid; 2196 u32 fd = attr->task_fd_query.fd; 2197 const struct perf_event *event; 2198 struct files_struct *files; 2199 struct task_struct *task; 2200 struct file *file; 2201 int err; 2202 2203 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 2204 return -EINVAL; 2205 2206 if (!capable(CAP_SYS_ADMIN)) 2207 return -EPERM; 2208 2209 if (attr->task_fd_query.flags != 0) 2210 return -EINVAL; 2211 2212 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 2213 if (!task) 2214 return -ENOENT; 2215 2216 files = get_files_struct(task); 2217 put_task_struct(task); 2218 if (!files) 2219 return -ENOENT; 2220 2221 err = 0; 2222 spin_lock(&files->file_lock); 2223 file = fcheck_files(files, fd); 2224 if (!file) 2225 err = -EBADF; 2226 else 2227 get_file(file); 2228 spin_unlock(&files->file_lock); 2229 put_files_struct(files); 2230 2231 if (err) 2232 goto out; 2233 2234 if (file->f_op == &bpf_raw_tp_fops) { 2235 struct bpf_raw_tracepoint *raw_tp = file->private_data; 2236 struct bpf_raw_event_map *btp = raw_tp->btp; 2237 2238 err = bpf_task_fd_query_copy(attr, uattr, 2239 raw_tp->prog->aux->id, 2240 BPF_FD_TYPE_RAW_TRACEPOINT, 2241 btp->tp->name, 0, 0); 2242 goto put_file; 2243 } 2244 2245 event = perf_get_event(file); 2246 if (!IS_ERR(event)) { 2247 u64 probe_offset, probe_addr; 2248 u32 prog_id, fd_type; 2249 const char *buf; 2250 2251 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 2252 &buf, &probe_offset, 2253 &probe_addr); 2254 if (!err) 2255 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 2256 fd_type, buf, 2257 probe_offset, 2258 probe_addr); 2259 goto put_file; 2260 } 2261 2262 err = -ENOTSUPP; 2263 put_file: 2264 fput(file); 2265 out: 2266 return err; 2267 } 2268 2269 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 2270 { 2271 union bpf_attr attr = {}; 2272 int err; 2273 2274 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) 2275 return -EPERM; 2276 2277 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 2278 if (err) 2279 return err; 2280 size = min_t(u32, size, sizeof(attr)); 2281 2282 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 2283 if (copy_from_user(&attr, uattr, size) != 0) 2284 return -EFAULT; 2285 2286 err = security_bpf(cmd, &attr, size); 2287 if (err < 0) 2288 return err; 2289 2290 switch (cmd) { 2291 case BPF_MAP_CREATE: 2292 err = map_create(&attr); 2293 break; 2294 case BPF_MAP_LOOKUP_ELEM: 2295 err = map_lookup_elem(&attr); 2296 break; 2297 case BPF_MAP_UPDATE_ELEM: 2298 err = map_update_elem(&attr); 2299 break; 2300 case BPF_MAP_DELETE_ELEM: 2301 err = map_delete_elem(&attr); 2302 break; 2303 case BPF_MAP_GET_NEXT_KEY: 2304 err = map_get_next_key(&attr); 2305 break; 2306 case BPF_PROG_LOAD: 2307 err = bpf_prog_load(&attr); 2308 break; 2309 case BPF_OBJ_PIN: 2310 err = bpf_obj_pin(&attr); 2311 break; 2312 case BPF_OBJ_GET: 2313 err = bpf_obj_get(&attr); 2314 break; 2315 case BPF_PROG_ATTACH: 2316 err = bpf_prog_attach(&attr); 2317 break; 2318 case BPF_PROG_DETACH: 2319 err = bpf_prog_detach(&attr); 2320 break; 2321 case BPF_PROG_QUERY: 2322 err = bpf_prog_query(&attr, uattr); 2323 break; 2324 case BPF_PROG_TEST_RUN: 2325 err = bpf_prog_test_run(&attr, uattr); 2326 break; 2327 case BPF_PROG_GET_NEXT_ID: 2328 err = bpf_obj_get_next_id(&attr, uattr, 2329 &prog_idr, &prog_idr_lock); 2330 break; 2331 case BPF_MAP_GET_NEXT_ID: 2332 err = bpf_obj_get_next_id(&attr, uattr, 2333 &map_idr, &map_idr_lock); 2334 break; 2335 case BPF_PROG_GET_FD_BY_ID: 2336 err = bpf_prog_get_fd_by_id(&attr); 2337 break; 2338 case BPF_MAP_GET_FD_BY_ID: 2339 err = bpf_map_get_fd_by_id(&attr); 2340 break; 2341 case BPF_OBJ_GET_INFO_BY_FD: 2342 err = bpf_obj_get_info_by_fd(&attr, uattr); 2343 break; 2344 case BPF_RAW_TRACEPOINT_OPEN: 2345 err = bpf_raw_tracepoint_open(&attr); 2346 break; 2347 case BPF_BTF_LOAD: 2348 err = bpf_btf_load(&attr); 2349 break; 2350 case BPF_BTF_GET_FD_BY_ID: 2351 err = bpf_btf_get_fd_by_id(&attr); 2352 break; 2353 case BPF_TASK_FD_QUERY: 2354 err = bpf_task_fd_query(&attr, uattr); 2355 break; 2356 default: 2357 err = -EINVAL; 2358 break; 2359 } 2360 2361 return err; 2362 } 2363