1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf_trace.h> 6 #include <linux/bpf_lirc.h> 7 #include <linux/btf.h> 8 #include <linux/syscalls.h> 9 #include <linux/slab.h> 10 #include <linux/sched/signal.h> 11 #include <linux/vmalloc.h> 12 #include <linux/mmzone.h> 13 #include <linux/anon_inodes.h> 14 #include <linux/fdtable.h> 15 #include <linux/file.h> 16 #include <linux/fs.h> 17 #include <linux/license.h> 18 #include <linux/filter.h> 19 #include <linux/version.h> 20 #include <linux/kernel.h> 21 #include <linux/idr.h> 22 #include <linux/cred.h> 23 #include <linux/timekeeping.h> 24 #include <linux/ctype.h> 25 #include <linux/nospec.h> 26 #include <linux/audit.h> 27 #include <uapi/linux/btf.h> 28 #include <asm/pgtable.h> 29 #include <linux/bpf_lsm.h> 30 31 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 32 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 33 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 34 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 35 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 36 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 37 IS_FD_HASH(map)) 38 39 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 40 41 DEFINE_PER_CPU(int, bpf_prog_active); 42 static DEFINE_IDR(prog_idr); 43 static DEFINE_SPINLOCK(prog_idr_lock); 44 static DEFINE_IDR(map_idr); 45 static DEFINE_SPINLOCK(map_idr_lock); 46 47 int sysctl_unprivileged_bpf_disabled __read_mostly; 48 49 static const struct bpf_map_ops * const bpf_map_types[] = { 50 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 51 #define BPF_MAP_TYPE(_id, _ops) \ 52 [_id] = &_ops, 53 #include <linux/bpf_types.h> 54 #undef BPF_PROG_TYPE 55 #undef BPF_MAP_TYPE 56 }; 57 58 /* 59 * If we're handed a bigger struct than we know of, ensure all the unknown bits 60 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 61 * we don't know about yet. 62 * 63 * There is a ToCToU between this function call and the following 64 * copy_from_user() call. However, this is not a concern since this function is 65 * meant to be a future-proofing of bits. 66 */ 67 int bpf_check_uarg_tail_zero(void __user *uaddr, 68 size_t expected_size, 69 size_t actual_size) 70 { 71 unsigned char __user *addr; 72 unsigned char __user *end; 73 unsigned char val; 74 int err; 75 76 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 77 return -E2BIG; 78 79 if (unlikely(!access_ok(uaddr, actual_size))) 80 return -EFAULT; 81 82 if (actual_size <= expected_size) 83 return 0; 84 85 addr = uaddr + expected_size; 86 end = uaddr + actual_size; 87 88 for (; addr < end; addr++) { 89 err = get_user(val, addr); 90 if (err) 91 return err; 92 if (val) 93 return -E2BIG; 94 } 95 96 return 0; 97 } 98 99 const struct bpf_map_ops bpf_map_offload_ops = { 100 .map_alloc = bpf_map_offload_map_alloc, 101 .map_free = bpf_map_offload_map_free, 102 .map_check_btf = map_check_no_btf, 103 }; 104 105 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) 106 { 107 const struct bpf_map_ops *ops; 108 u32 type = attr->map_type; 109 struct bpf_map *map; 110 int err; 111 112 if (type >= ARRAY_SIZE(bpf_map_types)) 113 return ERR_PTR(-EINVAL); 114 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types)); 115 ops = bpf_map_types[type]; 116 if (!ops) 117 return ERR_PTR(-EINVAL); 118 119 if (ops->map_alloc_check) { 120 err = ops->map_alloc_check(attr); 121 if (err) 122 return ERR_PTR(err); 123 } 124 if (attr->map_ifindex) 125 ops = &bpf_map_offload_ops; 126 map = ops->map_alloc(attr); 127 if (IS_ERR(map)) 128 return map; 129 map->ops = ops; 130 map->map_type = type; 131 return map; 132 } 133 134 static u32 bpf_map_value_size(struct bpf_map *map) 135 { 136 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 137 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 138 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 139 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 140 return round_up(map->value_size, 8) * num_possible_cpus(); 141 else if (IS_FD_MAP(map)) 142 return sizeof(u32); 143 else 144 return map->value_size; 145 } 146 147 static void maybe_wait_bpf_programs(struct bpf_map *map) 148 { 149 /* Wait for any running BPF programs to complete so that 150 * userspace, when we return to it, knows that all programs 151 * that could be running use the new map value. 152 */ 153 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 154 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 155 synchronize_rcu(); 156 } 157 158 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, 159 void *value, __u64 flags) 160 { 161 int err; 162 163 /* Need to create a kthread, thus must support schedule */ 164 if (bpf_map_is_dev_bound(map)) { 165 return bpf_map_offload_update_elem(map, key, value, flags); 166 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 167 map->map_type == BPF_MAP_TYPE_SOCKHASH || 168 map->map_type == BPF_MAP_TYPE_SOCKMAP || 169 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 170 return map->ops->map_update_elem(map, key, value, flags); 171 } else if (IS_FD_PROG_ARRAY(map)) { 172 return bpf_fd_array_map_update_elem(map, f.file, key, value, 173 flags); 174 } 175 176 bpf_disable_instrumentation(); 177 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 178 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 179 err = bpf_percpu_hash_update(map, key, value, flags); 180 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 181 err = bpf_percpu_array_update(map, key, value, flags); 182 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 183 err = bpf_percpu_cgroup_storage_update(map, key, value, 184 flags); 185 } else if (IS_FD_ARRAY(map)) { 186 rcu_read_lock(); 187 err = bpf_fd_array_map_update_elem(map, f.file, key, value, 188 flags); 189 rcu_read_unlock(); 190 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 191 rcu_read_lock(); 192 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, 193 flags); 194 rcu_read_unlock(); 195 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 196 /* rcu_read_lock() is not needed */ 197 err = bpf_fd_reuseport_array_update_elem(map, key, value, 198 flags); 199 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 200 map->map_type == BPF_MAP_TYPE_STACK) { 201 err = map->ops->map_push_elem(map, value, flags); 202 } else { 203 rcu_read_lock(); 204 err = map->ops->map_update_elem(map, key, value, flags); 205 rcu_read_unlock(); 206 } 207 bpf_enable_instrumentation(); 208 maybe_wait_bpf_programs(map); 209 210 return err; 211 } 212 213 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 214 __u64 flags) 215 { 216 void *ptr; 217 int err; 218 219 if (bpf_map_is_dev_bound(map)) 220 return bpf_map_offload_lookup_elem(map, key, value); 221 222 bpf_disable_instrumentation(); 223 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 224 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 225 err = bpf_percpu_hash_copy(map, key, value); 226 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 227 err = bpf_percpu_array_copy(map, key, value); 228 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 229 err = bpf_percpu_cgroup_storage_copy(map, key, value); 230 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 231 err = bpf_stackmap_copy(map, key, value); 232 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 233 err = bpf_fd_array_map_lookup_elem(map, key, value); 234 } else if (IS_FD_HASH(map)) { 235 err = bpf_fd_htab_map_lookup_elem(map, key, value); 236 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 237 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 238 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 239 map->map_type == BPF_MAP_TYPE_STACK) { 240 err = map->ops->map_peek_elem(map, value); 241 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 242 /* struct_ops map requires directly updating "value" */ 243 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 244 } else { 245 rcu_read_lock(); 246 if (map->ops->map_lookup_elem_sys_only) 247 ptr = map->ops->map_lookup_elem_sys_only(map, key); 248 else 249 ptr = map->ops->map_lookup_elem(map, key); 250 if (IS_ERR(ptr)) { 251 err = PTR_ERR(ptr); 252 } else if (!ptr) { 253 err = -ENOENT; 254 } else { 255 err = 0; 256 if (flags & BPF_F_LOCK) 257 /* lock 'ptr' and copy everything but lock */ 258 copy_map_value_locked(map, value, ptr, true); 259 else 260 copy_map_value(map, value, ptr); 261 /* mask lock, since value wasn't zero inited */ 262 check_and_init_map_lock(map, value); 263 } 264 rcu_read_unlock(); 265 } 266 267 bpf_enable_instrumentation(); 268 maybe_wait_bpf_programs(map); 269 270 return err; 271 } 272 273 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 274 { 275 /* We really just want to fail instead of triggering OOM killer 276 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 277 * which is used for lower order allocation requests. 278 * 279 * It has been observed that higher order allocation requests done by 280 * vmalloc with __GFP_NORETRY being set might fail due to not trying 281 * to reclaim memory from the page cache, thus we set 282 * __GFP_RETRY_MAYFAIL to avoid such situations. 283 */ 284 285 const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO; 286 unsigned int flags = 0; 287 unsigned long align = 1; 288 void *area; 289 290 if (size >= SIZE_MAX) 291 return NULL; 292 293 /* kmalloc()'ed memory can't be mmap()'ed */ 294 if (mmapable) { 295 BUG_ON(!PAGE_ALIGNED(size)); 296 align = SHMLBA; 297 flags = VM_USERMAP; 298 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 299 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, 300 numa_node); 301 if (area != NULL) 302 return area; 303 } 304 305 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 306 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, 307 flags, numa_node, __builtin_return_address(0)); 308 } 309 310 void *bpf_map_area_alloc(u64 size, int numa_node) 311 { 312 return __bpf_map_area_alloc(size, numa_node, false); 313 } 314 315 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 316 { 317 return __bpf_map_area_alloc(size, numa_node, true); 318 } 319 320 void bpf_map_area_free(void *area) 321 { 322 kvfree(area); 323 } 324 325 static u32 bpf_map_flags_retain_permanent(u32 flags) 326 { 327 /* Some map creation flags are not tied to the map object but 328 * rather to the map fd instead, so they have no meaning upon 329 * map object inspection since multiple file descriptors with 330 * different (access) properties can exist here. Thus, given 331 * this has zero meaning for the map itself, lets clear these 332 * from here. 333 */ 334 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 335 } 336 337 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 338 { 339 map->map_type = attr->map_type; 340 map->key_size = attr->key_size; 341 map->value_size = attr->value_size; 342 map->max_entries = attr->max_entries; 343 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 344 map->numa_node = bpf_map_attr_numa_node(attr); 345 } 346 347 static int bpf_charge_memlock(struct user_struct *user, u32 pages) 348 { 349 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 350 351 if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) { 352 atomic_long_sub(pages, &user->locked_vm); 353 return -EPERM; 354 } 355 return 0; 356 } 357 358 static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) 359 { 360 if (user) 361 atomic_long_sub(pages, &user->locked_vm); 362 } 363 364 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size) 365 { 366 u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; 367 struct user_struct *user; 368 int ret; 369 370 if (size >= U32_MAX - PAGE_SIZE) 371 return -E2BIG; 372 373 user = get_current_user(); 374 ret = bpf_charge_memlock(user, pages); 375 if (ret) { 376 free_uid(user); 377 return ret; 378 } 379 380 mem->pages = pages; 381 mem->user = user; 382 383 return 0; 384 } 385 386 void bpf_map_charge_finish(struct bpf_map_memory *mem) 387 { 388 bpf_uncharge_memlock(mem->user, mem->pages); 389 free_uid(mem->user); 390 } 391 392 void bpf_map_charge_move(struct bpf_map_memory *dst, 393 struct bpf_map_memory *src) 394 { 395 *dst = *src; 396 397 /* Make sure src will not be used for the redundant uncharging. */ 398 memset(src, 0, sizeof(struct bpf_map_memory)); 399 } 400 401 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) 402 { 403 int ret; 404 405 ret = bpf_charge_memlock(map->memory.user, pages); 406 if (ret) 407 return ret; 408 map->memory.pages += pages; 409 return ret; 410 } 411 412 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) 413 { 414 bpf_uncharge_memlock(map->memory.user, pages); 415 map->memory.pages -= pages; 416 } 417 418 static int bpf_map_alloc_id(struct bpf_map *map) 419 { 420 int id; 421 422 idr_preload(GFP_KERNEL); 423 spin_lock_bh(&map_idr_lock); 424 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 425 if (id > 0) 426 map->id = id; 427 spin_unlock_bh(&map_idr_lock); 428 idr_preload_end(); 429 430 if (WARN_ON_ONCE(!id)) 431 return -ENOSPC; 432 433 return id > 0 ? 0 : id; 434 } 435 436 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) 437 { 438 unsigned long flags; 439 440 /* Offloaded maps are removed from the IDR store when their device 441 * disappears - even if someone holds an fd to them they are unusable, 442 * the memory is gone, all ops will fail; they are simply waiting for 443 * refcnt to drop to be freed. 444 */ 445 if (!map->id) 446 return; 447 448 if (do_idr_lock) 449 spin_lock_irqsave(&map_idr_lock, flags); 450 else 451 __acquire(&map_idr_lock); 452 453 idr_remove(&map_idr, map->id); 454 map->id = 0; 455 456 if (do_idr_lock) 457 spin_unlock_irqrestore(&map_idr_lock, flags); 458 else 459 __release(&map_idr_lock); 460 } 461 462 /* called from workqueue */ 463 static void bpf_map_free_deferred(struct work_struct *work) 464 { 465 struct bpf_map *map = container_of(work, struct bpf_map, work); 466 struct bpf_map_memory mem; 467 468 bpf_map_charge_move(&mem, &map->memory); 469 security_bpf_map_free(map); 470 /* implementation dependent freeing */ 471 map->ops->map_free(map); 472 bpf_map_charge_finish(&mem); 473 } 474 475 static void bpf_map_put_uref(struct bpf_map *map) 476 { 477 if (atomic64_dec_and_test(&map->usercnt)) { 478 if (map->ops->map_release_uref) 479 map->ops->map_release_uref(map); 480 } 481 } 482 483 /* decrement map refcnt and schedule it for freeing via workqueue 484 * (unrelying map implementation ops->map_free() might sleep) 485 */ 486 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) 487 { 488 if (atomic64_dec_and_test(&map->refcnt)) { 489 /* bpf_map_free_id() must be called first */ 490 bpf_map_free_id(map, do_idr_lock); 491 btf_put(map->btf); 492 INIT_WORK(&map->work, bpf_map_free_deferred); 493 schedule_work(&map->work); 494 } 495 } 496 497 void bpf_map_put(struct bpf_map *map) 498 { 499 __bpf_map_put(map, true); 500 } 501 EXPORT_SYMBOL_GPL(bpf_map_put); 502 503 void bpf_map_put_with_uref(struct bpf_map *map) 504 { 505 bpf_map_put_uref(map); 506 bpf_map_put(map); 507 } 508 509 static int bpf_map_release(struct inode *inode, struct file *filp) 510 { 511 struct bpf_map *map = filp->private_data; 512 513 if (map->ops->map_release) 514 map->ops->map_release(map, filp); 515 516 bpf_map_put_with_uref(map); 517 return 0; 518 } 519 520 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 521 { 522 fmode_t mode = f.file->f_mode; 523 524 /* Our file permissions may have been overridden by global 525 * map permissions facing syscall side. 526 */ 527 if (READ_ONCE(map->frozen)) 528 mode &= ~FMODE_CAN_WRITE; 529 return mode; 530 } 531 532 #ifdef CONFIG_PROC_FS 533 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 534 { 535 const struct bpf_map *map = filp->private_data; 536 const struct bpf_array *array; 537 u32 type = 0, jited = 0; 538 539 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { 540 array = container_of(map, struct bpf_array, map); 541 type = array->aux->type; 542 jited = array->aux->jited; 543 } 544 545 seq_printf(m, 546 "map_type:\t%u\n" 547 "key_size:\t%u\n" 548 "value_size:\t%u\n" 549 "max_entries:\t%u\n" 550 "map_flags:\t%#x\n" 551 "memlock:\t%llu\n" 552 "map_id:\t%u\n" 553 "frozen:\t%u\n", 554 map->map_type, 555 map->key_size, 556 map->value_size, 557 map->max_entries, 558 map->map_flags, 559 map->memory.pages * 1ULL << PAGE_SHIFT, 560 map->id, 561 READ_ONCE(map->frozen)); 562 if (type) { 563 seq_printf(m, "owner_prog_type:\t%u\n", type); 564 seq_printf(m, "owner_jited:\t%u\n", jited); 565 } 566 } 567 #endif 568 569 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 570 loff_t *ppos) 571 { 572 /* We need this handler such that alloc_file() enables 573 * f_mode with FMODE_CAN_READ. 574 */ 575 return -EINVAL; 576 } 577 578 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 579 size_t siz, loff_t *ppos) 580 { 581 /* We need this handler such that alloc_file() enables 582 * f_mode with FMODE_CAN_WRITE. 583 */ 584 return -EINVAL; 585 } 586 587 /* called for any extra memory-mapped regions (except initial) */ 588 static void bpf_map_mmap_open(struct vm_area_struct *vma) 589 { 590 struct bpf_map *map = vma->vm_file->private_data; 591 592 if (vma->vm_flags & VM_MAYWRITE) { 593 mutex_lock(&map->freeze_mutex); 594 map->writecnt++; 595 mutex_unlock(&map->freeze_mutex); 596 } 597 } 598 599 /* called for all unmapped memory region (including initial) */ 600 static void bpf_map_mmap_close(struct vm_area_struct *vma) 601 { 602 struct bpf_map *map = vma->vm_file->private_data; 603 604 if (vma->vm_flags & VM_MAYWRITE) { 605 mutex_lock(&map->freeze_mutex); 606 map->writecnt--; 607 mutex_unlock(&map->freeze_mutex); 608 } 609 } 610 611 static const struct vm_operations_struct bpf_map_default_vmops = { 612 .open = bpf_map_mmap_open, 613 .close = bpf_map_mmap_close, 614 }; 615 616 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 617 { 618 struct bpf_map *map = filp->private_data; 619 int err; 620 621 if (!map->ops->map_mmap || map_value_has_spin_lock(map)) 622 return -ENOTSUPP; 623 624 if (!(vma->vm_flags & VM_SHARED)) 625 return -EINVAL; 626 627 mutex_lock(&map->freeze_mutex); 628 629 if (vma->vm_flags & VM_WRITE) { 630 if (map->frozen) { 631 err = -EPERM; 632 goto out; 633 } 634 /* map is meant to be read-only, so do not allow mapping as 635 * writable, because it's possible to leak a writable page 636 * reference and allows user-space to still modify it after 637 * freezing, while verifier will assume contents do not change 638 */ 639 if (map->map_flags & BPF_F_RDONLY_PROG) { 640 err = -EACCES; 641 goto out; 642 } 643 } 644 645 /* set default open/close callbacks */ 646 vma->vm_ops = &bpf_map_default_vmops; 647 vma->vm_private_data = map; 648 vma->vm_flags &= ~VM_MAYEXEC; 649 if (!(vma->vm_flags & VM_WRITE)) 650 /* disallow re-mapping with PROT_WRITE */ 651 vma->vm_flags &= ~VM_MAYWRITE; 652 653 err = map->ops->map_mmap(map, vma); 654 if (err) 655 goto out; 656 657 if (vma->vm_flags & VM_MAYWRITE) 658 map->writecnt++; 659 out: 660 mutex_unlock(&map->freeze_mutex); 661 return err; 662 } 663 664 const struct file_operations bpf_map_fops = { 665 #ifdef CONFIG_PROC_FS 666 .show_fdinfo = bpf_map_show_fdinfo, 667 #endif 668 .release = bpf_map_release, 669 .read = bpf_dummy_read, 670 .write = bpf_dummy_write, 671 .mmap = bpf_map_mmap, 672 }; 673 674 int bpf_map_new_fd(struct bpf_map *map, int flags) 675 { 676 int ret; 677 678 ret = security_bpf_map(map, OPEN_FMODE(flags)); 679 if (ret < 0) 680 return ret; 681 682 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 683 flags | O_CLOEXEC); 684 } 685 686 int bpf_get_file_flag(int flags) 687 { 688 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 689 return -EINVAL; 690 if (flags & BPF_F_RDONLY) 691 return O_RDONLY; 692 if (flags & BPF_F_WRONLY) 693 return O_WRONLY; 694 return O_RDWR; 695 } 696 697 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 698 #define CHECK_ATTR(CMD) \ 699 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 700 sizeof(attr->CMD##_LAST_FIELD), 0, \ 701 sizeof(*attr) - \ 702 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 703 sizeof(attr->CMD##_LAST_FIELD)) != NULL 704 705 /* dst and src must have at least "size" number of bytes. 706 * Return strlen on success and < 0 on error. 707 */ 708 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 709 { 710 const char *end = src + size; 711 const char *orig_src = src; 712 713 memset(dst, 0, size); 714 /* Copy all isalnum(), '_' and '.' chars. */ 715 while (src < end && *src) { 716 if (!isalnum(*src) && 717 *src != '_' && *src != '.') 718 return -EINVAL; 719 *dst++ = *src++; 720 } 721 722 /* No '\0' found in "size" number of bytes */ 723 if (src == end) 724 return -EINVAL; 725 726 return src - orig_src; 727 } 728 729 int map_check_no_btf(const struct bpf_map *map, 730 const struct btf *btf, 731 const struct btf_type *key_type, 732 const struct btf_type *value_type) 733 { 734 return -ENOTSUPP; 735 } 736 737 static int map_check_btf(struct bpf_map *map, const struct btf *btf, 738 u32 btf_key_id, u32 btf_value_id) 739 { 740 const struct btf_type *key_type, *value_type; 741 u32 key_size, value_size; 742 int ret = 0; 743 744 /* Some maps allow key to be unspecified. */ 745 if (btf_key_id) { 746 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 747 if (!key_type || key_size != map->key_size) 748 return -EINVAL; 749 } else { 750 key_type = btf_type_by_id(btf, 0); 751 if (!map->ops->map_check_btf) 752 return -EINVAL; 753 } 754 755 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 756 if (!value_type || value_size != map->value_size) 757 return -EINVAL; 758 759 map->spin_lock_off = btf_find_spin_lock(btf, value_type); 760 761 if (map_value_has_spin_lock(map)) { 762 if (map->map_flags & BPF_F_RDONLY_PROG) 763 return -EACCES; 764 if (map->map_type != BPF_MAP_TYPE_HASH && 765 map->map_type != BPF_MAP_TYPE_ARRAY && 766 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 767 map->map_type != BPF_MAP_TYPE_SK_STORAGE) 768 return -ENOTSUPP; 769 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > 770 map->value_size) { 771 WARN_ONCE(1, 772 "verifier bug spin_lock_off %d value_size %d\n", 773 map->spin_lock_off, map->value_size); 774 return -EFAULT; 775 } 776 } 777 778 if (map->ops->map_check_btf) 779 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 780 781 return ret; 782 } 783 784 #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id 785 /* called via syscall */ 786 static int map_create(union bpf_attr *attr) 787 { 788 int numa_node = bpf_map_attr_numa_node(attr); 789 struct bpf_map_memory mem; 790 struct bpf_map *map; 791 int f_flags; 792 int err; 793 794 err = CHECK_ATTR(BPF_MAP_CREATE); 795 if (err) 796 return -EINVAL; 797 798 if (attr->btf_vmlinux_value_type_id) { 799 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 800 attr->btf_key_type_id || attr->btf_value_type_id) 801 return -EINVAL; 802 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 803 return -EINVAL; 804 } 805 806 f_flags = bpf_get_file_flag(attr->map_flags); 807 if (f_flags < 0) 808 return f_flags; 809 810 if (numa_node != NUMA_NO_NODE && 811 ((unsigned int)numa_node >= nr_node_ids || 812 !node_online(numa_node))) 813 return -EINVAL; 814 815 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 816 map = find_and_alloc_map(attr); 817 if (IS_ERR(map)) 818 return PTR_ERR(map); 819 820 err = bpf_obj_name_cpy(map->name, attr->map_name, 821 sizeof(attr->map_name)); 822 if (err < 0) 823 goto free_map; 824 825 atomic64_set(&map->refcnt, 1); 826 atomic64_set(&map->usercnt, 1); 827 mutex_init(&map->freeze_mutex); 828 829 map->spin_lock_off = -EINVAL; 830 if (attr->btf_key_type_id || attr->btf_value_type_id || 831 /* Even the map's value is a kernel's struct, 832 * the bpf_prog.o must have BTF to begin with 833 * to figure out the corresponding kernel's 834 * counter part. Thus, attr->btf_fd has 835 * to be valid also. 836 */ 837 attr->btf_vmlinux_value_type_id) { 838 struct btf *btf; 839 840 btf = btf_get_by_fd(attr->btf_fd); 841 if (IS_ERR(btf)) { 842 err = PTR_ERR(btf); 843 goto free_map; 844 } 845 map->btf = btf; 846 847 if (attr->btf_value_type_id) { 848 err = map_check_btf(map, btf, attr->btf_key_type_id, 849 attr->btf_value_type_id); 850 if (err) 851 goto free_map; 852 } 853 854 map->btf_key_type_id = attr->btf_key_type_id; 855 map->btf_value_type_id = attr->btf_value_type_id; 856 map->btf_vmlinux_value_type_id = 857 attr->btf_vmlinux_value_type_id; 858 } 859 860 err = security_bpf_map_alloc(map); 861 if (err) 862 goto free_map; 863 864 err = bpf_map_alloc_id(map); 865 if (err) 866 goto free_map_sec; 867 868 err = bpf_map_new_fd(map, f_flags); 869 if (err < 0) { 870 /* failed to allocate fd. 871 * bpf_map_put_with_uref() is needed because the above 872 * bpf_map_alloc_id() has published the map 873 * to the userspace and the userspace may 874 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 875 */ 876 bpf_map_put_with_uref(map); 877 return err; 878 } 879 880 return err; 881 882 free_map_sec: 883 security_bpf_map_free(map); 884 free_map: 885 btf_put(map->btf); 886 bpf_map_charge_move(&mem, &map->memory); 887 map->ops->map_free(map); 888 bpf_map_charge_finish(&mem); 889 return err; 890 } 891 892 /* if error is returned, fd is released. 893 * On success caller should complete fd access with matching fdput() 894 */ 895 struct bpf_map *__bpf_map_get(struct fd f) 896 { 897 if (!f.file) 898 return ERR_PTR(-EBADF); 899 if (f.file->f_op != &bpf_map_fops) { 900 fdput(f); 901 return ERR_PTR(-EINVAL); 902 } 903 904 return f.file->private_data; 905 } 906 907 void bpf_map_inc(struct bpf_map *map) 908 { 909 atomic64_inc(&map->refcnt); 910 } 911 EXPORT_SYMBOL_GPL(bpf_map_inc); 912 913 void bpf_map_inc_with_uref(struct bpf_map *map) 914 { 915 atomic64_inc(&map->refcnt); 916 atomic64_inc(&map->usercnt); 917 } 918 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 919 920 struct bpf_map *bpf_map_get(u32 ufd) 921 { 922 struct fd f = fdget(ufd); 923 struct bpf_map *map; 924 925 map = __bpf_map_get(f); 926 if (IS_ERR(map)) 927 return map; 928 929 bpf_map_inc(map); 930 fdput(f); 931 932 return map; 933 } 934 935 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 936 { 937 struct fd f = fdget(ufd); 938 struct bpf_map *map; 939 940 map = __bpf_map_get(f); 941 if (IS_ERR(map)) 942 return map; 943 944 bpf_map_inc_with_uref(map); 945 fdput(f); 946 947 return map; 948 } 949 950 /* map_idr_lock should have been held */ 951 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 952 { 953 int refold; 954 955 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 956 if (!refold) 957 return ERR_PTR(-ENOENT); 958 if (uref) 959 atomic64_inc(&map->usercnt); 960 961 return map; 962 } 963 964 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 965 { 966 spin_lock_bh(&map_idr_lock); 967 map = __bpf_map_inc_not_zero(map, false); 968 spin_unlock_bh(&map_idr_lock); 969 970 return map; 971 } 972 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 973 974 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 975 { 976 return -ENOTSUPP; 977 } 978 979 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 980 { 981 if (key_size) 982 return memdup_user(ukey, key_size); 983 984 if (ukey) 985 return ERR_PTR(-EINVAL); 986 987 return NULL; 988 } 989 990 /* last field in 'union bpf_attr' used by this command */ 991 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 992 993 static int map_lookup_elem(union bpf_attr *attr) 994 { 995 void __user *ukey = u64_to_user_ptr(attr->key); 996 void __user *uvalue = u64_to_user_ptr(attr->value); 997 int ufd = attr->map_fd; 998 struct bpf_map *map; 999 void *key, *value; 1000 u32 value_size; 1001 struct fd f; 1002 int err; 1003 1004 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 1005 return -EINVAL; 1006 1007 if (attr->flags & ~BPF_F_LOCK) 1008 return -EINVAL; 1009 1010 f = fdget(ufd); 1011 map = __bpf_map_get(f); 1012 if (IS_ERR(map)) 1013 return PTR_ERR(map); 1014 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1015 err = -EPERM; 1016 goto err_put; 1017 } 1018 1019 if ((attr->flags & BPF_F_LOCK) && 1020 !map_value_has_spin_lock(map)) { 1021 err = -EINVAL; 1022 goto err_put; 1023 } 1024 1025 key = __bpf_copy_key(ukey, map->key_size); 1026 if (IS_ERR(key)) { 1027 err = PTR_ERR(key); 1028 goto err_put; 1029 } 1030 1031 value_size = bpf_map_value_size(map); 1032 1033 err = -ENOMEM; 1034 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1035 if (!value) 1036 goto free_key; 1037 1038 err = bpf_map_copy_value(map, key, value, attr->flags); 1039 if (err) 1040 goto free_value; 1041 1042 err = -EFAULT; 1043 if (copy_to_user(uvalue, value, value_size) != 0) 1044 goto free_value; 1045 1046 err = 0; 1047 1048 free_value: 1049 kfree(value); 1050 free_key: 1051 kfree(key); 1052 err_put: 1053 fdput(f); 1054 return err; 1055 } 1056 1057 1058 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1059 1060 static int map_update_elem(union bpf_attr *attr) 1061 { 1062 void __user *ukey = u64_to_user_ptr(attr->key); 1063 void __user *uvalue = u64_to_user_ptr(attr->value); 1064 int ufd = attr->map_fd; 1065 struct bpf_map *map; 1066 void *key, *value; 1067 u32 value_size; 1068 struct fd f; 1069 int err; 1070 1071 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1072 return -EINVAL; 1073 1074 f = fdget(ufd); 1075 map = __bpf_map_get(f); 1076 if (IS_ERR(map)) 1077 return PTR_ERR(map); 1078 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1079 err = -EPERM; 1080 goto err_put; 1081 } 1082 1083 if ((attr->flags & BPF_F_LOCK) && 1084 !map_value_has_spin_lock(map)) { 1085 err = -EINVAL; 1086 goto err_put; 1087 } 1088 1089 key = __bpf_copy_key(ukey, map->key_size); 1090 if (IS_ERR(key)) { 1091 err = PTR_ERR(key); 1092 goto err_put; 1093 } 1094 1095 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 1096 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 1097 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 1098 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 1099 value_size = round_up(map->value_size, 8) * num_possible_cpus(); 1100 else 1101 value_size = map->value_size; 1102 1103 err = -ENOMEM; 1104 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1105 if (!value) 1106 goto free_key; 1107 1108 err = -EFAULT; 1109 if (copy_from_user(value, uvalue, value_size) != 0) 1110 goto free_value; 1111 1112 err = bpf_map_update_value(map, f, key, value, attr->flags); 1113 1114 free_value: 1115 kfree(value); 1116 free_key: 1117 kfree(key); 1118 err_put: 1119 fdput(f); 1120 return err; 1121 } 1122 1123 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1124 1125 static int map_delete_elem(union bpf_attr *attr) 1126 { 1127 void __user *ukey = u64_to_user_ptr(attr->key); 1128 int ufd = attr->map_fd; 1129 struct bpf_map *map; 1130 struct fd f; 1131 void *key; 1132 int err; 1133 1134 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1135 return -EINVAL; 1136 1137 f = fdget(ufd); 1138 map = __bpf_map_get(f); 1139 if (IS_ERR(map)) 1140 return PTR_ERR(map); 1141 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1142 err = -EPERM; 1143 goto err_put; 1144 } 1145 1146 key = __bpf_copy_key(ukey, map->key_size); 1147 if (IS_ERR(key)) { 1148 err = PTR_ERR(key); 1149 goto err_put; 1150 } 1151 1152 if (bpf_map_is_dev_bound(map)) { 1153 err = bpf_map_offload_delete_elem(map, key); 1154 goto out; 1155 } else if (IS_FD_PROG_ARRAY(map) || 1156 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1157 /* These maps require sleepable context */ 1158 err = map->ops->map_delete_elem(map, key); 1159 goto out; 1160 } 1161 1162 bpf_disable_instrumentation(); 1163 rcu_read_lock(); 1164 err = map->ops->map_delete_elem(map, key); 1165 rcu_read_unlock(); 1166 bpf_enable_instrumentation(); 1167 maybe_wait_bpf_programs(map); 1168 out: 1169 kfree(key); 1170 err_put: 1171 fdput(f); 1172 return err; 1173 } 1174 1175 /* last field in 'union bpf_attr' used by this command */ 1176 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1177 1178 static int map_get_next_key(union bpf_attr *attr) 1179 { 1180 void __user *ukey = u64_to_user_ptr(attr->key); 1181 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1182 int ufd = attr->map_fd; 1183 struct bpf_map *map; 1184 void *key, *next_key; 1185 struct fd f; 1186 int err; 1187 1188 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1189 return -EINVAL; 1190 1191 f = fdget(ufd); 1192 map = __bpf_map_get(f); 1193 if (IS_ERR(map)) 1194 return PTR_ERR(map); 1195 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1196 err = -EPERM; 1197 goto err_put; 1198 } 1199 1200 if (ukey) { 1201 key = __bpf_copy_key(ukey, map->key_size); 1202 if (IS_ERR(key)) { 1203 err = PTR_ERR(key); 1204 goto err_put; 1205 } 1206 } else { 1207 key = NULL; 1208 } 1209 1210 err = -ENOMEM; 1211 next_key = kmalloc(map->key_size, GFP_USER); 1212 if (!next_key) 1213 goto free_key; 1214 1215 if (bpf_map_is_dev_bound(map)) { 1216 err = bpf_map_offload_get_next_key(map, key, next_key); 1217 goto out; 1218 } 1219 1220 rcu_read_lock(); 1221 err = map->ops->map_get_next_key(map, key, next_key); 1222 rcu_read_unlock(); 1223 out: 1224 if (err) 1225 goto free_next_key; 1226 1227 err = -EFAULT; 1228 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1229 goto free_next_key; 1230 1231 err = 0; 1232 1233 free_next_key: 1234 kfree(next_key); 1235 free_key: 1236 kfree(key); 1237 err_put: 1238 fdput(f); 1239 return err; 1240 } 1241 1242 int generic_map_delete_batch(struct bpf_map *map, 1243 const union bpf_attr *attr, 1244 union bpf_attr __user *uattr) 1245 { 1246 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1247 u32 cp, max_count; 1248 int err = 0; 1249 void *key; 1250 1251 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1252 return -EINVAL; 1253 1254 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1255 !map_value_has_spin_lock(map)) { 1256 return -EINVAL; 1257 } 1258 1259 max_count = attr->batch.count; 1260 if (!max_count) 1261 return 0; 1262 1263 key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1264 if (!key) 1265 return -ENOMEM; 1266 1267 for (cp = 0; cp < max_count; cp++) { 1268 err = -EFAULT; 1269 if (copy_from_user(key, keys + cp * map->key_size, 1270 map->key_size)) 1271 break; 1272 1273 if (bpf_map_is_dev_bound(map)) { 1274 err = bpf_map_offload_delete_elem(map, key); 1275 break; 1276 } 1277 1278 bpf_disable_instrumentation(); 1279 rcu_read_lock(); 1280 err = map->ops->map_delete_elem(map, key); 1281 rcu_read_unlock(); 1282 bpf_enable_instrumentation(); 1283 maybe_wait_bpf_programs(map); 1284 if (err) 1285 break; 1286 } 1287 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1288 err = -EFAULT; 1289 1290 kfree(key); 1291 return err; 1292 } 1293 1294 int generic_map_update_batch(struct bpf_map *map, 1295 const union bpf_attr *attr, 1296 union bpf_attr __user *uattr) 1297 { 1298 void __user *values = u64_to_user_ptr(attr->batch.values); 1299 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1300 u32 value_size, cp, max_count; 1301 int ufd = attr->map_fd; 1302 void *key, *value; 1303 struct fd f; 1304 int err = 0; 1305 1306 f = fdget(ufd); 1307 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1308 return -EINVAL; 1309 1310 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1311 !map_value_has_spin_lock(map)) { 1312 return -EINVAL; 1313 } 1314 1315 value_size = bpf_map_value_size(map); 1316 1317 max_count = attr->batch.count; 1318 if (!max_count) 1319 return 0; 1320 1321 key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1322 if (!key) 1323 return -ENOMEM; 1324 1325 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1326 if (!value) { 1327 kfree(key); 1328 return -ENOMEM; 1329 } 1330 1331 for (cp = 0; cp < max_count; cp++) { 1332 err = -EFAULT; 1333 if (copy_from_user(key, keys + cp * map->key_size, 1334 map->key_size) || 1335 copy_from_user(value, values + cp * value_size, value_size)) 1336 break; 1337 1338 err = bpf_map_update_value(map, f, key, value, 1339 attr->batch.elem_flags); 1340 1341 if (err) 1342 break; 1343 } 1344 1345 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1346 err = -EFAULT; 1347 1348 kfree(value); 1349 kfree(key); 1350 return err; 1351 } 1352 1353 #define MAP_LOOKUP_RETRIES 3 1354 1355 int generic_map_lookup_batch(struct bpf_map *map, 1356 const union bpf_attr *attr, 1357 union bpf_attr __user *uattr) 1358 { 1359 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 1360 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1361 void __user *values = u64_to_user_ptr(attr->batch.values); 1362 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1363 void *buf, *buf_prevkey, *prev_key, *key, *value; 1364 int err, retry = MAP_LOOKUP_RETRIES; 1365 u32 value_size, cp, max_count; 1366 1367 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1368 return -EINVAL; 1369 1370 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1371 !map_value_has_spin_lock(map)) 1372 return -EINVAL; 1373 1374 value_size = bpf_map_value_size(map); 1375 1376 max_count = attr->batch.count; 1377 if (!max_count) 1378 return 0; 1379 1380 if (put_user(0, &uattr->batch.count)) 1381 return -EFAULT; 1382 1383 buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1384 if (!buf_prevkey) 1385 return -ENOMEM; 1386 1387 buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 1388 if (!buf) { 1389 kvfree(buf_prevkey); 1390 return -ENOMEM; 1391 } 1392 1393 err = -EFAULT; 1394 prev_key = NULL; 1395 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 1396 goto free_buf; 1397 key = buf; 1398 value = key + map->key_size; 1399 if (ubatch) 1400 prev_key = buf_prevkey; 1401 1402 for (cp = 0; cp < max_count;) { 1403 rcu_read_lock(); 1404 err = map->ops->map_get_next_key(map, prev_key, key); 1405 rcu_read_unlock(); 1406 if (err) 1407 break; 1408 err = bpf_map_copy_value(map, key, value, 1409 attr->batch.elem_flags); 1410 1411 if (err == -ENOENT) { 1412 if (retry) { 1413 retry--; 1414 continue; 1415 } 1416 err = -EINTR; 1417 break; 1418 } 1419 1420 if (err) 1421 goto free_buf; 1422 1423 if (copy_to_user(keys + cp * map->key_size, key, 1424 map->key_size)) { 1425 err = -EFAULT; 1426 goto free_buf; 1427 } 1428 if (copy_to_user(values + cp * value_size, value, value_size)) { 1429 err = -EFAULT; 1430 goto free_buf; 1431 } 1432 1433 if (!prev_key) 1434 prev_key = buf_prevkey; 1435 1436 swap(prev_key, key); 1437 retry = MAP_LOOKUP_RETRIES; 1438 cp++; 1439 } 1440 1441 if (err == -EFAULT) 1442 goto free_buf; 1443 1444 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 1445 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 1446 err = -EFAULT; 1447 1448 free_buf: 1449 kfree(buf_prevkey); 1450 kfree(buf); 1451 return err; 1452 } 1453 1454 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value 1455 1456 static int map_lookup_and_delete_elem(union bpf_attr *attr) 1457 { 1458 void __user *ukey = u64_to_user_ptr(attr->key); 1459 void __user *uvalue = u64_to_user_ptr(attr->value); 1460 int ufd = attr->map_fd; 1461 struct bpf_map *map; 1462 void *key, *value; 1463 u32 value_size; 1464 struct fd f; 1465 int err; 1466 1467 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 1468 return -EINVAL; 1469 1470 f = fdget(ufd); 1471 map = __bpf_map_get(f); 1472 if (IS_ERR(map)) 1473 return PTR_ERR(map); 1474 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1475 err = -EPERM; 1476 goto err_put; 1477 } 1478 1479 key = __bpf_copy_key(ukey, map->key_size); 1480 if (IS_ERR(key)) { 1481 err = PTR_ERR(key); 1482 goto err_put; 1483 } 1484 1485 value_size = map->value_size; 1486 1487 err = -ENOMEM; 1488 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1489 if (!value) 1490 goto free_key; 1491 1492 if (map->map_type == BPF_MAP_TYPE_QUEUE || 1493 map->map_type == BPF_MAP_TYPE_STACK) { 1494 err = map->ops->map_pop_elem(map, value); 1495 } else { 1496 err = -ENOTSUPP; 1497 } 1498 1499 if (err) 1500 goto free_value; 1501 1502 if (copy_to_user(uvalue, value, value_size) != 0) { 1503 err = -EFAULT; 1504 goto free_value; 1505 } 1506 1507 err = 0; 1508 1509 free_value: 1510 kfree(value); 1511 free_key: 1512 kfree(key); 1513 err_put: 1514 fdput(f); 1515 return err; 1516 } 1517 1518 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 1519 1520 static int map_freeze(const union bpf_attr *attr) 1521 { 1522 int err = 0, ufd = attr->map_fd; 1523 struct bpf_map *map; 1524 struct fd f; 1525 1526 if (CHECK_ATTR(BPF_MAP_FREEZE)) 1527 return -EINVAL; 1528 1529 f = fdget(ufd); 1530 map = __bpf_map_get(f); 1531 if (IS_ERR(map)) 1532 return PTR_ERR(map); 1533 1534 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1535 fdput(f); 1536 return -ENOTSUPP; 1537 } 1538 1539 mutex_lock(&map->freeze_mutex); 1540 1541 if (map->writecnt) { 1542 err = -EBUSY; 1543 goto err_put; 1544 } 1545 if (READ_ONCE(map->frozen)) { 1546 err = -EBUSY; 1547 goto err_put; 1548 } 1549 if (!capable(CAP_SYS_ADMIN)) { 1550 err = -EPERM; 1551 goto err_put; 1552 } 1553 1554 WRITE_ONCE(map->frozen, true); 1555 err_put: 1556 mutex_unlock(&map->freeze_mutex); 1557 fdput(f); 1558 return err; 1559 } 1560 1561 static const struct bpf_prog_ops * const bpf_prog_types[] = { 1562 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1563 [_id] = & _name ## _prog_ops, 1564 #define BPF_MAP_TYPE(_id, _ops) 1565 #include <linux/bpf_types.h> 1566 #undef BPF_PROG_TYPE 1567 #undef BPF_MAP_TYPE 1568 }; 1569 1570 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 1571 { 1572 const struct bpf_prog_ops *ops; 1573 1574 if (type >= ARRAY_SIZE(bpf_prog_types)) 1575 return -EINVAL; 1576 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 1577 ops = bpf_prog_types[type]; 1578 if (!ops) 1579 return -EINVAL; 1580 1581 if (!bpf_prog_is_dev_bound(prog->aux)) 1582 prog->aux->ops = ops; 1583 else 1584 prog->aux->ops = &bpf_offload_prog_ops; 1585 prog->type = type; 1586 return 0; 1587 } 1588 1589 enum bpf_audit { 1590 BPF_AUDIT_LOAD, 1591 BPF_AUDIT_UNLOAD, 1592 BPF_AUDIT_MAX, 1593 }; 1594 1595 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 1596 [BPF_AUDIT_LOAD] = "LOAD", 1597 [BPF_AUDIT_UNLOAD] = "UNLOAD", 1598 }; 1599 1600 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 1601 { 1602 struct audit_context *ctx = NULL; 1603 struct audit_buffer *ab; 1604 1605 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 1606 return; 1607 if (audit_enabled == AUDIT_OFF) 1608 return; 1609 if (op == BPF_AUDIT_LOAD) 1610 ctx = audit_context(); 1611 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 1612 if (unlikely(!ab)) 1613 return; 1614 audit_log_format(ab, "prog-id=%u op=%s", 1615 prog->aux->id, bpf_audit_str[op]); 1616 audit_log_end(ab); 1617 } 1618 1619 int __bpf_prog_charge(struct user_struct *user, u32 pages) 1620 { 1621 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1622 unsigned long user_bufs; 1623 1624 if (user) { 1625 user_bufs = atomic_long_add_return(pages, &user->locked_vm); 1626 if (user_bufs > memlock_limit) { 1627 atomic_long_sub(pages, &user->locked_vm); 1628 return -EPERM; 1629 } 1630 } 1631 1632 return 0; 1633 } 1634 1635 void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 1636 { 1637 if (user) 1638 atomic_long_sub(pages, &user->locked_vm); 1639 } 1640 1641 static int bpf_prog_charge_memlock(struct bpf_prog *prog) 1642 { 1643 struct user_struct *user = get_current_user(); 1644 int ret; 1645 1646 ret = __bpf_prog_charge(user, prog->pages); 1647 if (ret) { 1648 free_uid(user); 1649 return ret; 1650 } 1651 1652 prog->aux->user = user; 1653 return 0; 1654 } 1655 1656 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog) 1657 { 1658 struct user_struct *user = prog->aux->user; 1659 1660 __bpf_prog_uncharge(user, prog->pages); 1661 free_uid(user); 1662 } 1663 1664 static int bpf_prog_alloc_id(struct bpf_prog *prog) 1665 { 1666 int id; 1667 1668 idr_preload(GFP_KERNEL); 1669 spin_lock_bh(&prog_idr_lock); 1670 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 1671 if (id > 0) 1672 prog->aux->id = id; 1673 spin_unlock_bh(&prog_idr_lock); 1674 idr_preload_end(); 1675 1676 /* id is in [1, INT_MAX) */ 1677 if (WARN_ON_ONCE(!id)) 1678 return -ENOSPC; 1679 1680 return id > 0 ? 0 : id; 1681 } 1682 1683 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) 1684 { 1685 /* cBPF to eBPF migrations are currently not in the idr store. 1686 * Offloaded programs are removed from the store when their device 1687 * disappears - even if someone grabs an fd to them they are unusable, 1688 * simply waiting for refcnt to drop to be freed. 1689 */ 1690 if (!prog->aux->id) 1691 return; 1692 1693 if (do_idr_lock) 1694 spin_lock_bh(&prog_idr_lock); 1695 else 1696 __acquire(&prog_idr_lock); 1697 1698 idr_remove(&prog_idr, prog->aux->id); 1699 prog->aux->id = 0; 1700 1701 if (do_idr_lock) 1702 spin_unlock_bh(&prog_idr_lock); 1703 else 1704 __release(&prog_idr_lock); 1705 } 1706 1707 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 1708 { 1709 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 1710 1711 kvfree(aux->func_info); 1712 kfree(aux->func_info_aux); 1713 bpf_prog_uncharge_memlock(aux->prog); 1714 security_bpf_prog_free(aux); 1715 bpf_prog_free(aux->prog); 1716 } 1717 1718 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 1719 { 1720 bpf_prog_kallsyms_del_all(prog); 1721 btf_put(prog->aux->btf); 1722 bpf_prog_free_linfo(prog); 1723 1724 if (deferred) 1725 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 1726 else 1727 __bpf_prog_put_rcu(&prog->aux->rcu); 1728 } 1729 1730 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 1731 { 1732 if (atomic64_dec_and_test(&prog->aux->refcnt)) { 1733 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 1734 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 1735 /* bpf_prog_free_id() must be called first */ 1736 bpf_prog_free_id(prog, do_idr_lock); 1737 __bpf_prog_put_noref(prog, true); 1738 } 1739 } 1740 1741 void bpf_prog_put(struct bpf_prog *prog) 1742 { 1743 __bpf_prog_put(prog, true); 1744 } 1745 EXPORT_SYMBOL_GPL(bpf_prog_put); 1746 1747 static int bpf_prog_release(struct inode *inode, struct file *filp) 1748 { 1749 struct bpf_prog *prog = filp->private_data; 1750 1751 bpf_prog_put(prog); 1752 return 0; 1753 } 1754 1755 static void bpf_prog_get_stats(const struct bpf_prog *prog, 1756 struct bpf_prog_stats *stats) 1757 { 1758 u64 nsecs = 0, cnt = 0; 1759 int cpu; 1760 1761 for_each_possible_cpu(cpu) { 1762 const struct bpf_prog_stats *st; 1763 unsigned int start; 1764 u64 tnsecs, tcnt; 1765 1766 st = per_cpu_ptr(prog->aux->stats, cpu); 1767 do { 1768 start = u64_stats_fetch_begin_irq(&st->syncp); 1769 tnsecs = st->nsecs; 1770 tcnt = st->cnt; 1771 } while (u64_stats_fetch_retry_irq(&st->syncp, start)); 1772 nsecs += tnsecs; 1773 cnt += tcnt; 1774 } 1775 stats->nsecs = nsecs; 1776 stats->cnt = cnt; 1777 } 1778 1779 #ifdef CONFIG_PROC_FS 1780 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 1781 { 1782 const struct bpf_prog *prog = filp->private_data; 1783 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 1784 struct bpf_prog_stats stats; 1785 1786 bpf_prog_get_stats(prog, &stats); 1787 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 1788 seq_printf(m, 1789 "prog_type:\t%u\n" 1790 "prog_jited:\t%u\n" 1791 "prog_tag:\t%s\n" 1792 "memlock:\t%llu\n" 1793 "prog_id:\t%u\n" 1794 "run_time_ns:\t%llu\n" 1795 "run_cnt:\t%llu\n", 1796 prog->type, 1797 prog->jited, 1798 prog_tag, 1799 prog->pages * 1ULL << PAGE_SHIFT, 1800 prog->aux->id, 1801 stats.nsecs, 1802 stats.cnt); 1803 } 1804 #endif 1805 1806 const struct file_operations bpf_prog_fops = { 1807 #ifdef CONFIG_PROC_FS 1808 .show_fdinfo = bpf_prog_show_fdinfo, 1809 #endif 1810 .release = bpf_prog_release, 1811 .read = bpf_dummy_read, 1812 .write = bpf_dummy_write, 1813 }; 1814 1815 int bpf_prog_new_fd(struct bpf_prog *prog) 1816 { 1817 int ret; 1818 1819 ret = security_bpf_prog(prog); 1820 if (ret < 0) 1821 return ret; 1822 1823 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 1824 O_RDWR | O_CLOEXEC); 1825 } 1826 1827 static struct bpf_prog *____bpf_prog_get(struct fd f) 1828 { 1829 if (!f.file) 1830 return ERR_PTR(-EBADF); 1831 if (f.file->f_op != &bpf_prog_fops) { 1832 fdput(f); 1833 return ERR_PTR(-EINVAL); 1834 } 1835 1836 return f.file->private_data; 1837 } 1838 1839 void bpf_prog_add(struct bpf_prog *prog, int i) 1840 { 1841 atomic64_add(i, &prog->aux->refcnt); 1842 } 1843 EXPORT_SYMBOL_GPL(bpf_prog_add); 1844 1845 void bpf_prog_sub(struct bpf_prog *prog, int i) 1846 { 1847 /* Only to be used for undoing previous bpf_prog_add() in some 1848 * error path. We still know that another entity in our call 1849 * path holds a reference to the program, thus atomic_sub() can 1850 * be safely used in such cases! 1851 */ 1852 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 1853 } 1854 EXPORT_SYMBOL_GPL(bpf_prog_sub); 1855 1856 void bpf_prog_inc(struct bpf_prog *prog) 1857 { 1858 atomic64_inc(&prog->aux->refcnt); 1859 } 1860 EXPORT_SYMBOL_GPL(bpf_prog_inc); 1861 1862 /* prog_idr_lock should have been held */ 1863 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 1864 { 1865 int refold; 1866 1867 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 1868 1869 if (!refold) 1870 return ERR_PTR(-ENOENT); 1871 1872 return prog; 1873 } 1874 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1875 1876 bool bpf_prog_get_ok(struct bpf_prog *prog, 1877 enum bpf_prog_type *attach_type, bool attach_drv) 1878 { 1879 /* not an attachment, just a refcount inc, always allow */ 1880 if (!attach_type) 1881 return true; 1882 1883 if (prog->type != *attach_type) 1884 return false; 1885 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) 1886 return false; 1887 1888 return true; 1889 } 1890 1891 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 1892 bool attach_drv) 1893 { 1894 struct fd f = fdget(ufd); 1895 struct bpf_prog *prog; 1896 1897 prog = ____bpf_prog_get(f); 1898 if (IS_ERR(prog)) 1899 return prog; 1900 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 1901 prog = ERR_PTR(-EINVAL); 1902 goto out; 1903 } 1904 1905 bpf_prog_inc(prog); 1906 out: 1907 fdput(f); 1908 return prog; 1909 } 1910 1911 struct bpf_prog *bpf_prog_get(u32 ufd) 1912 { 1913 return __bpf_prog_get(ufd, NULL, false); 1914 } 1915 1916 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1917 bool attach_drv) 1918 { 1919 return __bpf_prog_get(ufd, &type, attach_drv); 1920 } 1921 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 1922 1923 /* Initially all BPF programs could be loaded w/o specifying 1924 * expected_attach_type. Later for some of them specifying expected_attach_type 1925 * at load time became required so that program could be validated properly. 1926 * Programs of types that are allowed to be loaded both w/ and w/o (for 1927 * backward compatibility) expected_attach_type, should have the default attach 1928 * type assigned to expected_attach_type for the latter case, so that it can be 1929 * validated later at attach time. 1930 * 1931 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 1932 * prog type requires it but has some attach types that have to be backward 1933 * compatible. 1934 */ 1935 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 1936 { 1937 switch (attr->prog_type) { 1938 case BPF_PROG_TYPE_CGROUP_SOCK: 1939 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 1940 * exist so checking for non-zero is the way to go here. 1941 */ 1942 if (!attr->expected_attach_type) 1943 attr->expected_attach_type = 1944 BPF_CGROUP_INET_SOCK_CREATE; 1945 break; 1946 } 1947 } 1948 1949 static int 1950 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 1951 enum bpf_attach_type expected_attach_type, 1952 u32 btf_id, u32 prog_fd) 1953 { 1954 if (btf_id) { 1955 if (btf_id > BTF_MAX_TYPE) 1956 return -EINVAL; 1957 1958 switch (prog_type) { 1959 case BPF_PROG_TYPE_TRACING: 1960 case BPF_PROG_TYPE_LSM: 1961 case BPF_PROG_TYPE_STRUCT_OPS: 1962 case BPF_PROG_TYPE_EXT: 1963 break; 1964 default: 1965 return -EINVAL; 1966 } 1967 } 1968 1969 if (prog_fd && prog_type != BPF_PROG_TYPE_TRACING && 1970 prog_type != BPF_PROG_TYPE_EXT) 1971 return -EINVAL; 1972 1973 switch (prog_type) { 1974 case BPF_PROG_TYPE_CGROUP_SOCK: 1975 switch (expected_attach_type) { 1976 case BPF_CGROUP_INET_SOCK_CREATE: 1977 case BPF_CGROUP_INET4_POST_BIND: 1978 case BPF_CGROUP_INET6_POST_BIND: 1979 return 0; 1980 default: 1981 return -EINVAL; 1982 } 1983 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1984 switch (expected_attach_type) { 1985 case BPF_CGROUP_INET4_BIND: 1986 case BPF_CGROUP_INET6_BIND: 1987 case BPF_CGROUP_INET4_CONNECT: 1988 case BPF_CGROUP_INET6_CONNECT: 1989 case BPF_CGROUP_UDP4_SENDMSG: 1990 case BPF_CGROUP_UDP6_SENDMSG: 1991 case BPF_CGROUP_UDP4_RECVMSG: 1992 case BPF_CGROUP_UDP6_RECVMSG: 1993 return 0; 1994 default: 1995 return -EINVAL; 1996 } 1997 case BPF_PROG_TYPE_CGROUP_SKB: 1998 switch (expected_attach_type) { 1999 case BPF_CGROUP_INET_INGRESS: 2000 case BPF_CGROUP_INET_EGRESS: 2001 return 0; 2002 default: 2003 return -EINVAL; 2004 } 2005 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2006 switch (expected_attach_type) { 2007 case BPF_CGROUP_SETSOCKOPT: 2008 case BPF_CGROUP_GETSOCKOPT: 2009 return 0; 2010 default: 2011 return -EINVAL; 2012 } 2013 case BPF_PROG_TYPE_EXT: 2014 if (expected_attach_type) 2015 return -EINVAL; 2016 /* fallthrough */ 2017 default: 2018 return 0; 2019 } 2020 } 2021 2022 /* last field in 'union bpf_attr' used by this command */ 2023 #define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd 2024 2025 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) 2026 { 2027 enum bpf_prog_type type = attr->prog_type; 2028 struct bpf_prog *prog; 2029 int err; 2030 char license[128]; 2031 bool is_gpl; 2032 2033 if (CHECK_ATTR(BPF_PROG_LOAD)) 2034 return -EINVAL; 2035 2036 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2037 BPF_F_ANY_ALIGNMENT | 2038 BPF_F_TEST_STATE_FREQ | 2039 BPF_F_TEST_RND_HI32)) 2040 return -EINVAL; 2041 2042 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2043 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2044 !capable(CAP_SYS_ADMIN)) 2045 return -EPERM; 2046 2047 /* copy eBPF program license from user space */ 2048 if (strncpy_from_user(license, u64_to_user_ptr(attr->license), 2049 sizeof(license) - 1) < 0) 2050 return -EFAULT; 2051 license[sizeof(license) - 1] = 0; 2052 2053 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2054 is_gpl = license_is_gpl_compatible(license); 2055 2056 if (attr->insn_cnt == 0 || 2057 attr->insn_cnt > (capable(CAP_SYS_ADMIN) ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) 2058 return -E2BIG; 2059 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2060 type != BPF_PROG_TYPE_CGROUP_SKB && 2061 !capable(CAP_SYS_ADMIN)) 2062 return -EPERM; 2063 2064 bpf_prog_load_fixup_attach_type(attr); 2065 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2066 attr->attach_btf_id, 2067 attr->attach_prog_fd)) 2068 return -EINVAL; 2069 2070 /* plain bpf_prog allocation */ 2071 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2072 if (!prog) 2073 return -ENOMEM; 2074 2075 prog->expected_attach_type = attr->expected_attach_type; 2076 prog->aux->attach_btf_id = attr->attach_btf_id; 2077 if (attr->attach_prog_fd) { 2078 struct bpf_prog *tgt_prog; 2079 2080 tgt_prog = bpf_prog_get(attr->attach_prog_fd); 2081 if (IS_ERR(tgt_prog)) { 2082 err = PTR_ERR(tgt_prog); 2083 goto free_prog_nouncharge; 2084 } 2085 prog->aux->linked_prog = tgt_prog; 2086 } 2087 2088 prog->aux->offload_requested = !!attr->prog_ifindex; 2089 2090 err = security_bpf_prog_alloc(prog->aux); 2091 if (err) 2092 goto free_prog_nouncharge; 2093 2094 err = bpf_prog_charge_memlock(prog); 2095 if (err) 2096 goto free_prog_sec; 2097 2098 prog->len = attr->insn_cnt; 2099 2100 err = -EFAULT; 2101 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), 2102 bpf_prog_insn_size(prog)) != 0) 2103 goto free_prog; 2104 2105 prog->orig_prog = NULL; 2106 prog->jited = 0; 2107 2108 atomic64_set(&prog->aux->refcnt, 1); 2109 prog->gpl_compatible = is_gpl ? 1 : 0; 2110 2111 if (bpf_prog_is_dev_bound(prog->aux)) { 2112 err = bpf_prog_offload_init(prog, attr); 2113 if (err) 2114 goto free_prog; 2115 } 2116 2117 /* find program type: socket_filter vs tracing_filter */ 2118 err = find_prog_type(type, prog); 2119 if (err < 0) 2120 goto free_prog; 2121 2122 prog->aux->load_time = ktime_get_boottime_ns(); 2123 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2124 sizeof(attr->prog_name)); 2125 if (err < 0) 2126 goto free_prog; 2127 2128 /* run eBPF verifier */ 2129 err = bpf_check(&prog, attr, uattr); 2130 if (err < 0) 2131 goto free_used_maps; 2132 2133 prog = bpf_prog_select_runtime(prog, &err); 2134 if (err < 0) 2135 goto free_used_maps; 2136 2137 err = bpf_prog_alloc_id(prog); 2138 if (err) 2139 goto free_used_maps; 2140 2141 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2142 * effectively publicly exposed. However, retrieving via 2143 * bpf_prog_get_fd_by_id() will take another reference, 2144 * therefore it cannot be gone underneath us. 2145 * 2146 * Only for the time /after/ successful bpf_prog_new_fd() 2147 * and before returning to userspace, we might just hold 2148 * one reference and any parallel close on that fd could 2149 * rip everything out. Hence, below notifications must 2150 * happen before bpf_prog_new_fd(). 2151 * 2152 * Also, any failure handling from this point onwards must 2153 * be using bpf_prog_put() given the program is exposed. 2154 */ 2155 bpf_prog_kallsyms_add(prog); 2156 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 2157 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 2158 2159 err = bpf_prog_new_fd(prog); 2160 if (err < 0) 2161 bpf_prog_put(prog); 2162 return err; 2163 2164 free_used_maps: 2165 /* In case we have subprogs, we need to wait for a grace 2166 * period before we can tear down JIT memory since symbols 2167 * are already exposed under kallsyms. 2168 */ 2169 __bpf_prog_put_noref(prog, prog->aux->func_cnt); 2170 return err; 2171 free_prog: 2172 bpf_prog_uncharge_memlock(prog); 2173 free_prog_sec: 2174 security_bpf_prog_free(prog->aux); 2175 free_prog_nouncharge: 2176 bpf_prog_free(prog); 2177 return err; 2178 } 2179 2180 #define BPF_OBJ_LAST_FIELD file_flags 2181 2182 static int bpf_obj_pin(const union bpf_attr *attr) 2183 { 2184 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) 2185 return -EINVAL; 2186 2187 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); 2188 } 2189 2190 static int bpf_obj_get(const union bpf_attr *attr) 2191 { 2192 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 2193 attr->file_flags & ~BPF_OBJ_FLAG_MASK) 2194 return -EINVAL; 2195 2196 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), 2197 attr->file_flags); 2198 } 2199 2200 void bpf_link_init(struct bpf_link *link, const struct bpf_link_ops *ops, 2201 struct bpf_prog *prog) 2202 { 2203 atomic64_set(&link->refcnt, 1); 2204 link->ops = ops; 2205 link->prog = prog; 2206 } 2207 2208 /* Clean up bpf_link and corresponding anon_inode file and FD. After 2209 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred 2210 * anon_inode's release() call. This helper manages marking bpf_link as 2211 * defunct, releases anon_inode file and puts reserved FD. 2212 */ 2213 void bpf_link_cleanup(struct bpf_link *link, struct file *link_file, 2214 int link_fd) 2215 { 2216 link->prog = NULL; 2217 fput(link_file); 2218 put_unused_fd(link_fd); 2219 } 2220 2221 void bpf_link_inc(struct bpf_link *link) 2222 { 2223 atomic64_inc(&link->refcnt); 2224 } 2225 2226 /* bpf_link_free is guaranteed to be called from process context */ 2227 static void bpf_link_free(struct bpf_link *link) 2228 { 2229 if (link->prog) { 2230 /* detach BPF program, clean up used resources */ 2231 link->ops->release(link); 2232 bpf_prog_put(link->prog); 2233 } 2234 /* free bpf_link and its containing memory */ 2235 link->ops->dealloc(link); 2236 } 2237 2238 static void bpf_link_put_deferred(struct work_struct *work) 2239 { 2240 struct bpf_link *link = container_of(work, struct bpf_link, work); 2241 2242 bpf_link_free(link); 2243 } 2244 2245 /* bpf_link_put can be called from atomic context, but ensures that resources 2246 * are freed from process context 2247 */ 2248 void bpf_link_put(struct bpf_link *link) 2249 { 2250 if (!atomic64_dec_and_test(&link->refcnt)) 2251 return; 2252 2253 if (in_atomic()) { 2254 INIT_WORK(&link->work, bpf_link_put_deferred); 2255 schedule_work(&link->work); 2256 } else { 2257 bpf_link_free(link); 2258 } 2259 } 2260 2261 static int bpf_link_release(struct inode *inode, struct file *filp) 2262 { 2263 struct bpf_link *link = filp->private_data; 2264 2265 bpf_link_put(link); 2266 return 0; 2267 } 2268 2269 #ifdef CONFIG_PROC_FS 2270 static const struct bpf_link_ops bpf_raw_tp_lops; 2271 static const struct bpf_link_ops bpf_tracing_link_lops; 2272 2273 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) 2274 { 2275 const struct bpf_link *link = filp->private_data; 2276 const struct bpf_prog *prog = link->prog; 2277 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2278 const char *link_type; 2279 2280 if (link->ops == &bpf_raw_tp_lops) 2281 link_type = "raw_tracepoint"; 2282 else if (link->ops == &bpf_tracing_link_lops) 2283 link_type = "tracing"; 2284 #ifdef CONFIG_CGROUP_BPF 2285 else if (link->ops == &bpf_cgroup_link_lops) 2286 link_type = "cgroup"; 2287 #endif 2288 else 2289 link_type = "unknown"; 2290 2291 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2292 seq_printf(m, 2293 "link_type:\t%s\n" 2294 "prog_tag:\t%s\n" 2295 "prog_id:\t%u\n", 2296 link_type, 2297 prog_tag, 2298 prog->aux->id); 2299 } 2300 #endif 2301 2302 static const struct file_operations bpf_link_fops = { 2303 #ifdef CONFIG_PROC_FS 2304 .show_fdinfo = bpf_link_show_fdinfo, 2305 #endif 2306 .release = bpf_link_release, 2307 .read = bpf_dummy_read, 2308 .write = bpf_dummy_write, 2309 }; 2310 2311 int bpf_link_new_fd(struct bpf_link *link) 2312 { 2313 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC); 2314 } 2315 2316 /* Similar to bpf_link_new_fd, create anon_inode for given bpf_link, but 2317 * instead of immediately installing fd in fdtable, just reserve it and 2318 * return. Caller then need to either install it with fd_install(fd, file) or 2319 * release with put_unused_fd(fd). 2320 * This is useful for cases when bpf_link attachment/detachment are 2321 * complicated and expensive operations and should be delayed until all the fd 2322 * reservation and anon_inode creation succeeds. 2323 */ 2324 struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd) 2325 { 2326 struct file *file; 2327 int fd; 2328 2329 fd = get_unused_fd_flags(O_CLOEXEC); 2330 if (fd < 0) 2331 return ERR_PTR(fd); 2332 2333 file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC); 2334 if (IS_ERR(file)) { 2335 put_unused_fd(fd); 2336 return file; 2337 } 2338 2339 *reserved_fd = fd; 2340 return file; 2341 } 2342 2343 struct bpf_link *bpf_link_get_from_fd(u32 ufd) 2344 { 2345 struct fd f = fdget(ufd); 2346 struct bpf_link *link; 2347 2348 if (!f.file) 2349 return ERR_PTR(-EBADF); 2350 if (f.file->f_op != &bpf_link_fops) { 2351 fdput(f); 2352 return ERR_PTR(-EINVAL); 2353 } 2354 2355 link = f.file->private_data; 2356 bpf_link_inc(link); 2357 fdput(f); 2358 2359 return link; 2360 } 2361 2362 struct bpf_tracing_link { 2363 struct bpf_link link; 2364 }; 2365 2366 static void bpf_tracing_link_release(struct bpf_link *link) 2367 { 2368 WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog)); 2369 } 2370 2371 static void bpf_tracing_link_dealloc(struct bpf_link *link) 2372 { 2373 struct bpf_tracing_link *tr_link = 2374 container_of(link, struct bpf_tracing_link, link); 2375 2376 kfree(tr_link); 2377 } 2378 2379 static const struct bpf_link_ops bpf_tracing_link_lops = { 2380 .release = bpf_tracing_link_release, 2381 .dealloc = bpf_tracing_link_dealloc, 2382 }; 2383 2384 static int bpf_tracing_prog_attach(struct bpf_prog *prog) 2385 { 2386 struct bpf_tracing_link *link; 2387 struct file *link_file; 2388 int link_fd, err; 2389 2390 switch (prog->type) { 2391 case BPF_PROG_TYPE_TRACING: 2392 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 2393 prog->expected_attach_type != BPF_TRACE_FEXIT && 2394 prog->expected_attach_type != BPF_MODIFY_RETURN) { 2395 err = -EINVAL; 2396 goto out_put_prog; 2397 } 2398 break; 2399 case BPF_PROG_TYPE_EXT: 2400 if (prog->expected_attach_type != 0) { 2401 err = -EINVAL; 2402 goto out_put_prog; 2403 } 2404 break; 2405 case BPF_PROG_TYPE_LSM: 2406 if (prog->expected_attach_type != BPF_LSM_MAC) { 2407 err = -EINVAL; 2408 goto out_put_prog; 2409 } 2410 break; 2411 default: 2412 err = -EINVAL; 2413 goto out_put_prog; 2414 } 2415 2416 link = kzalloc(sizeof(*link), GFP_USER); 2417 if (!link) { 2418 err = -ENOMEM; 2419 goto out_put_prog; 2420 } 2421 bpf_link_init(&link->link, &bpf_tracing_link_lops, prog); 2422 2423 link_file = bpf_link_new_file(&link->link, &link_fd); 2424 if (IS_ERR(link_file)) { 2425 kfree(link); 2426 err = PTR_ERR(link_file); 2427 goto out_put_prog; 2428 } 2429 2430 err = bpf_trampoline_link_prog(prog); 2431 if (err) { 2432 bpf_link_cleanup(&link->link, link_file, link_fd); 2433 goto out_put_prog; 2434 } 2435 2436 fd_install(link_fd, link_file); 2437 return link_fd; 2438 2439 out_put_prog: 2440 bpf_prog_put(prog); 2441 return err; 2442 } 2443 2444 struct bpf_raw_tp_link { 2445 struct bpf_link link; 2446 struct bpf_raw_event_map *btp; 2447 }; 2448 2449 static void bpf_raw_tp_link_release(struct bpf_link *link) 2450 { 2451 struct bpf_raw_tp_link *raw_tp = 2452 container_of(link, struct bpf_raw_tp_link, link); 2453 2454 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog); 2455 bpf_put_raw_tracepoint(raw_tp->btp); 2456 } 2457 2458 static void bpf_raw_tp_link_dealloc(struct bpf_link *link) 2459 { 2460 struct bpf_raw_tp_link *raw_tp = 2461 container_of(link, struct bpf_raw_tp_link, link); 2462 2463 kfree(raw_tp); 2464 } 2465 2466 static const struct bpf_link_ops bpf_raw_tp_lops = { 2467 .release = bpf_raw_tp_link_release, 2468 .dealloc = bpf_raw_tp_link_dealloc, 2469 }; 2470 2471 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 2472 2473 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 2474 { 2475 struct bpf_raw_tp_link *link; 2476 struct bpf_raw_event_map *btp; 2477 struct file *link_file; 2478 struct bpf_prog *prog; 2479 const char *tp_name; 2480 char buf[128]; 2481 int link_fd, err; 2482 2483 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 2484 return -EINVAL; 2485 2486 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 2487 if (IS_ERR(prog)) 2488 return PTR_ERR(prog); 2489 2490 switch (prog->type) { 2491 case BPF_PROG_TYPE_TRACING: 2492 case BPF_PROG_TYPE_EXT: 2493 case BPF_PROG_TYPE_LSM: 2494 if (attr->raw_tracepoint.name) { 2495 /* The attach point for this category of programs 2496 * should be specified via btf_id during program load. 2497 */ 2498 err = -EINVAL; 2499 goto out_put_prog; 2500 } 2501 if (prog->type == BPF_PROG_TYPE_TRACING && 2502 prog->expected_attach_type == BPF_TRACE_RAW_TP) { 2503 tp_name = prog->aux->attach_func_name; 2504 break; 2505 } 2506 return bpf_tracing_prog_attach(prog); 2507 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2508 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2509 if (strncpy_from_user(buf, 2510 u64_to_user_ptr(attr->raw_tracepoint.name), 2511 sizeof(buf) - 1) < 0) { 2512 err = -EFAULT; 2513 goto out_put_prog; 2514 } 2515 buf[sizeof(buf) - 1] = 0; 2516 tp_name = buf; 2517 break; 2518 default: 2519 err = -EINVAL; 2520 goto out_put_prog; 2521 } 2522 2523 btp = bpf_get_raw_tracepoint(tp_name); 2524 if (!btp) { 2525 err = -ENOENT; 2526 goto out_put_prog; 2527 } 2528 2529 link = kzalloc(sizeof(*link), GFP_USER); 2530 if (!link) { 2531 err = -ENOMEM; 2532 goto out_put_btp; 2533 } 2534 bpf_link_init(&link->link, &bpf_raw_tp_lops, prog); 2535 link->btp = btp; 2536 2537 link_file = bpf_link_new_file(&link->link, &link_fd); 2538 if (IS_ERR(link_file)) { 2539 kfree(link); 2540 err = PTR_ERR(link_file); 2541 goto out_put_btp; 2542 } 2543 2544 err = bpf_probe_register(link->btp, prog); 2545 if (err) { 2546 bpf_link_cleanup(&link->link, link_file, link_fd); 2547 goto out_put_btp; 2548 } 2549 2550 fd_install(link_fd, link_file); 2551 return link_fd; 2552 2553 out_put_btp: 2554 bpf_put_raw_tracepoint(btp); 2555 out_put_prog: 2556 bpf_prog_put(prog); 2557 return err; 2558 } 2559 2560 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 2561 enum bpf_attach_type attach_type) 2562 { 2563 switch (prog->type) { 2564 case BPF_PROG_TYPE_CGROUP_SOCK: 2565 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2566 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2567 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 2568 case BPF_PROG_TYPE_CGROUP_SKB: 2569 return prog->enforce_expected_attach_type && 2570 prog->expected_attach_type != attach_type ? 2571 -EINVAL : 0; 2572 default: 2573 return 0; 2574 } 2575 } 2576 2577 static enum bpf_prog_type 2578 attach_type_to_prog_type(enum bpf_attach_type attach_type) 2579 { 2580 switch (attach_type) { 2581 case BPF_CGROUP_INET_INGRESS: 2582 case BPF_CGROUP_INET_EGRESS: 2583 return BPF_PROG_TYPE_CGROUP_SKB; 2584 break; 2585 case BPF_CGROUP_INET_SOCK_CREATE: 2586 case BPF_CGROUP_INET4_POST_BIND: 2587 case BPF_CGROUP_INET6_POST_BIND: 2588 return BPF_PROG_TYPE_CGROUP_SOCK; 2589 case BPF_CGROUP_INET4_BIND: 2590 case BPF_CGROUP_INET6_BIND: 2591 case BPF_CGROUP_INET4_CONNECT: 2592 case BPF_CGROUP_INET6_CONNECT: 2593 case BPF_CGROUP_UDP4_SENDMSG: 2594 case BPF_CGROUP_UDP6_SENDMSG: 2595 case BPF_CGROUP_UDP4_RECVMSG: 2596 case BPF_CGROUP_UDP6_RECVMSG: 2597 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 2598 case BPF_CGROUP_SOCK_OPS: 2599 return BPF_PROG_TYPE_SOCK_OPS; 2600 case BPF_CGROUP_DEVICE: 2601 return BPF_PROG_TYPE_CGROUP_DEVICE; 2602 case BPF_SK_MSG_VERDICT: 2603 return BPF_PROG_TYPE_SK_MSG; 2604 case BPF_SK_SKB_STREAM_PARSER: 2605 case BPF_SK_SKB_STREAM_VERDICT: 2606 return BPF_PROG_TYPE_SK_SKB; 2607 case BPF_LIRC_MODE2: 2608 return BPF_PROG_TYPE_LIRC_MODE2; 2609 case BPF_FLOW_DISSECTOR: 2610 return BPF_PROG_TYPE_FLOW_DISSECTOR; 2611 case BPF_CGROUP_SYSCTL: 2612 return BPF_PROG_TYPE_CGROUP_SYSCTL; 2613 case BPF_CGROUP_GETSOCKOPT: 2614 case BPF_CGROUP_SETSOCKOPT: 2615 return BPF_PROG_TYPE_CGROUP_SOCKOPT; 2616 default: 2617 return BPF_PROG_TYPE_UNSPEC; 2618 } 2619 } 2620 2621 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd 2622 2623 #define BPF_F_ATTACH_MASK \ 2624 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE) 2625 2626 static int bpf_prog_attach(const union bpf_attr *attr) 2627 { 2628 enum bpf_prog_type ptype; 2629 struct bpf_prog *prog; 2630 int ret; 2631 2632 if (!capable(CAP_NET_ADMIN)) 2633 return -EPERM; 2634 2635 if (CHECK_ATTR(BPF_PROG_ATTACH)) 2636 return -EINVAL; 2637 2638 if (attr->attach_flags & ~BPF_F_ATTACH_MASK) 2639 return -EINVAL; 2640 2641 ptype = attach_type_to_prog_type(attr->attach_type); 2642 if (ptype == BPF_PROG_TYPE_UNSPEC) 2643 return -EINVAL; 2644 2645 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 2646 if (IS_ERR(prog)) 2647 return PTR_ERR(prog); 2648 2649 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 2650 bpf_prog_put(prog); 2651 return -EINVAL; 2652 } 2653 2654 switch (ptype) { 2655 case BPF_PROG_TYPE_SK_SKB: 2656 case BPF_PROG_TYPE_SK_MSG: 2657 ret = sock_map_get_from_fd(attr, prog); 2658 break; 2659 case BPF_PROG_TYPE_LIRC_MODE2: 2660 ret = lirc_prog_attach(attr, prog); 2661 break; 2662 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2663 ret = skb_flow_dissector_bpf_prog_attach(attr, prog); 2664 break; 2665 case BPF_PROG_TYPE_CGROUP_DEVICE: 2666 case BPF_PROG_TYPE_CGROUP_SKB: 2667 case BPF_PROG_TYPE_CGROUP_SOCK: 2668 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2669 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2670 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2671 case BPF_PROG_TYPE_SOCK_OPS: 2672 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 2673 break; 2674 default: 2675 ret = -EINVAL; 2676 } 2677 2678 if (ret) 2679 bpf_prog_put(prog); 2680 return ret; 2681 } 2682 2683 #define BPF_PROG_DETACH_LAST_FIELD attach_type 2684 2685 static int bpf_prog_detach(const union bpf_attr *attr) 2686 { 2687 enum bpf_prog_type ptype; 2688 2689 if (!capable(CAP_NET_ADMIN)) 2690 return -EPERM; 2691 2692 if (CHECK_ATTR(BPF_PROG_DETACH)) 2693 return -EINVAL; 2694 2695 ptype = attach_type_to_prog_type(attr->attach_type); 2696 2697 switch (ptype) { 2698 case BPF_PROG_TYPE_SK_MSG: 2699 case BPF_PROG_TYPE_SK_SKB: 2700 return sock_map_get_from_fd(attr, NULL); 2701 case BPF_PROG_TYPE_LIRC_MODE2: 2702 return lirc_prog_detach(attr); 2703 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2704 return skb_flow_dissector_bpf_prog_detach(attr); 2705 case BPF_PROG_TYPE_CGROUP_DEVICE: 2706 case BPF_PROG_TYPE_CGROUP_SKB: 2707 case BPF_PROG_TYPE_CGROUP_SOCK: 2708 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2709 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2710 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2711 case BPF_PROG_TYPE_SOCK_OPS: 2712 return cgroup_bpf_prog_detach(attr, ptype); 2713 default: 2714 return -EINVAL; 2715 } 2716 } 2717 2718 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt 2719 2720 static int bpf_prog_query(const union bpf_attr *attr, 2721 union bpf_attr __user *uattr) 2722 { 2723 if (!capable(CAP_NET_ADMIN)) 2724 return -EPERM; 2725 if (CHECK_ATTR(BPF_PROG_QUERY)) 2726 return -EINVAL; 2727 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 2728 return -EINVAL; 2729 2730 switch (attr->query.attach_type) { 2731 case BPF_CGROUP_INET_INGRESS: 2732 case BPF_CGROUP_INET_EGRESS: 2733 case BPF_CGROUP_INET_SOCK_CREATE: 2734 case BPF_CGROUP_INET4_BIND: 2735 case BPF_CGROUP_INET6_BIND: 2736 case BPF_CGROUP_INET4_POST_BIND: 2737 case BPF_CGROUP_INET6_POST_BIND: 2738 case BPF_CGROUP_INET4_CONNECT: 2739 case BPF_CGROUP_INET6_CONNECT: 2740 case BPF_CGROUP_UDP4_SENDMSG: 2741 case BPF_CGROUP_UDP6_SENDMSG: 2742 case BPF_CGROUP_UDP4_RECVMSG: 2743 case BPF_CGROUP_UDP6_RECVMSG: 2744 case BPF_CGROUP_SOCK_OPS: 2745 case BPF_CGROUP_DEVICE: 2746 case BPF_CGROUP_SYSCTL: 2747 case BPF_CGROUP_GETSOCKOPT: 2748 case BPF_CGROUP_SETSOCKOPT: 2749 return cgroup_bpf_prog_query(attr, uattr); 2750 case BPF_LIRC_MODE2: 2751 return lirc_prog_query(attr, uattr); 2752 case BPF_FLOW_DISSECTOR: 2753 return skb_flow_dissector_prog_query(attr, uattr); 2754 default: 2755 return -EINVAL; 2756 } 2757 } 2758 2759 #define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out 2760 2761 static int bpf_prog_test_run(const union bpf_attr *attr, 2762 union bpf_attr __user *uattr) 2763 { 2764 struct bpf_prog *prog; 2765 int ret = -ENOTSUPP; 2766 2767 if (!capable(CAP_SYS_ADMIN)) 2768 return -EPERM; 2769 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 2770 return -EINVAL; 2771 2772 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 2773 (!attr->test.ctx_size_in && attr->test.ctx_in)) 2774 return -EINVAL; 2775 2776 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 2777 (!attr->test.ctx_size_out && attr->test.ctx_out)) 2778 return -EINVAL; 2779 2780 prog = bpf_prog_get(attr->test.prog_fd); 2781 if (IS_ERR(prog)) 2782 return PTR_ERR(prog); 2783 2784 if (prog->aux->ops->test_run) 2785 ret = prog->aux->ops->test_run(prog, attr, uattr); 2786 2787 bpf_prog_put(prog); 2788 return ret; 2789 } 2790 2791 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 2792 2793 static int bpf_obj_get_next_id(const union bpf_attr *attr, 2794 union bpf_attr __user *uattr, 2795 struct idr *idr, 2796 spinlock_t *lock) 2797 { 2798 u32 next_id = attr->start_id; 2799 int err = 0; 2800 2801 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 2802 return -EINVAL; 2803 2804 if (!capable(CAP_SYS_ADMIN)) 2805 return -EPERM; 2806 2807 next_id++; 2808 spin_lock_bh(lock); 2809 if (!idr_get_next(idr, &next_id)) 2810 err = -ENOENT; 2811 spin_unlock_bh(lock); 2812 2813 if (!err) 2814 err = put_user(next_id, &uattr->next_id); 2815 2816 return err; 2817 } 2818 2819 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 2820 2821 struct bpf_prog *bpf_prog_by_id(u32 id) 2822 { 2823 struct bpf_prog *prog; 2824 2825 if (!id) 2826 return ERR_PTR(-ENOENT); 2827 2828 spin_lock_bh(&prog_idr_lock); 2829 prog = idr_find(&prog_idr, id); 2830 if (prog) 2831 prog = bpf_prog_inc_not_zero(prog); 2832 else 2833 prog = ERR_PTR(-ENOENT); 2834 spin_unlock_bh(&prog_idr_lock); 2835 return prog; 2836 } 2837 2838 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 2839 { 2840 struct bpf_prog *prog; 2841 u32 id = attr->prog_id; 2842 int fd; 2843 2844 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 2845 return -EINVAL; 2846 2847 if (!capable(CAP_SYS_ADMIN)) 2848 return -EPERM; 2849 2850 prog = bpf_prog_by_id(id); 2851 if (IS_ERR(prog)) 2852 return PTR_ERR(prog); 2853 2854 fd = bpf_prog_new_fd(prog); 2855 if (fd < 0) 2856 bpf_prog_put(prog); 2857 2858 return fd; 2859 } 2860 2861 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 2862 2863 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 2864 { 2865 struct bpf_map *map; 2866 u32 id = attr->map_id; 2867 int f_flags; 2868 int fd; 2869 2870 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 2871 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 2872 return -EINVAL; 2873 2874 if (!capable(CAP_SYS_ADMIN)) 2875 return -EPERM; 2876 2877 f_flags = bpf_get_file_flag(attr->open_flags); 2878 if (f_flags < 0) 2879 return f_flags; 2880 2881 spin_lock_bh(&map_idr_lock); 2882 map = idr_find(&map_idr, id); 2883 if (map) 2884 map = __bpf_map_inc_not_zero(map, true); 2885 else 2886 map = ERR_PTR(-ENOENT); 2887 spin_unlock_bh(&map_idr_lock); 2888 2889 if (IS_ERR(map)) 2890 return PTR_ERR(map); 2891 2892 fd = bpf_map_new_fd(map, f_flags); 2893 if (fd < 0) 2894 bpf_map_put_with_uref(map); 2895 2896 return fd; 2897 } 2898 2899 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 2900 unsigned long addr, u32 *off, 2901 u32 *type) 2902 { 2903 const struct bpf_map *map; 2904 int i; 2905 2906 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 2907 map = prog->aux->used_maps[i]; 2908 if (map == (void *)addr) { 2909 *type = BPF_PSEUDO_MAP_FD; 2910 return map; 2911 } 2912 if (!map->ops->map_direct_value_meta) 2913 continue; 2914 if (!map->ops->map_direct_value_meta(map, addr, off)) { 2915 *type = BPF_PSEUDO_MAP_VALUE; 2916 return map; 2917 } 2918 } 2919 2920 return NULL; 2921 } 2922 2923 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) 2924 { 2925 const struct bpf_map *map; 2926 struct bpf_insn *insns; 2927 u32 off, type; 2928 u64 imm; 2929 int i; 2930 2931 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 2932 GFP_USER); 2933 if (!insns) 2934 return insns; 2935 2936 for (i = 0; i < prog->len; i++) { 2937 if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) { 2938 insns[i].code = BPF_JMP | BPF_CALL; 2939 insns[i].imm = BPF_FUNC_tail_call; 2940 /* fall-through */ 2941 } 2942 if (insns[i].code == (BPF_JMP | BPF_CALL) || 2943 insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) { 2944 if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) 2945 insns[i].code = BPF_JMP | BPF_CALL; 2946 if (!bpf_dump_raw_ok()) 2947 insns[i].imm = 0; 2948 continue; 2949 } 2950 2951 if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW)) 2952 continue; 2953 2954 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 2955 map = bpf_map_from_imm(prog, imm, &off, &type); 2956 if (map) { 2957 insns[i].src_reg = type; 2958 insns[i].imm = map->id; 2959 insns[i + 1].imm = off; 2960 continue; 2961 } 2962 } 2963 2964 return insns; 2965 } 2966 2967 static int set_info_rec_size(struct bpf_prog_info *info) 2968 { 2969 /* 2970 * Ensure info.*_rec_size is the same as kernel expected size 2971 * 2972 * or 2973 * 2974 * Only allow zero *_rec_size if both _rec_size and _cnt are 2975 * zero. In this case, the kernel will set the expected 2976 * _rec_size back to the info. 2977 */ 2978 2979 if ((info->nr_func_info || info->func_info_rec_size) && 2980 info->func_info_rec_size != sizeof(struct bpf_func_info)) 2981 return -EINVAL; 2982 2983 if ((info->nr_line_info || info->line_info_rec_size) && 2984 info->line_info_rec_size != sizeof(struct bpf_line_info)) 2985 return -EINVAL; 2986 2987 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 2988 info->jited_line_info_rec_size != sizeof(__u64)) 2989 return -EINVAL; 2990 2991 info->func_info_rec_size = sizeof(struct bpf_func_info); 2992 info->line_info_rec_size = sizeof(struct bpf_line_info); 2993 info->jited_line_info_rec_size = sizeof(__u64); 2994 2995 return 0; 2996 } 2997 2998 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, 2999 const union bpf_attr *attr, 3000 union bpf_attr __user *uattr) 3001 { 3002 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3003 struct bpf_prog_info info; 3004 u32 info_len = attr->info.info_len; 3005 struct bpf_prog_stats stats; 3006 char __user *uinsns; 3007 u32 ulen; 3008 int err; 3009 3010 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 3011 if (err) 3012 return err; 3013 info_len = min_t(u32, sizeof(info), info_len); 3014 3015 memset(&info, 0, sizeof(info)); 3016 if (copy_from_user(&info, uinfo, info_len)) 3017 return -EFAULT; 3018 3019 info.type = prog->type; 3020 info.id = prog->aux->id; 3021 info.load_time = prog->aux->load_time; 3022 info.created_by_uid = from_kuid_munged(current_user_ns(), 3023 prog->aux->user->uid); 3024 info.gpl_compatible = prog->gpl_compatible; 3025 3026 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 3027 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 3028 3029 ulen = info.nr_map_ids; 3030 info.nr_map_ids = prog->aux->used_map_cnt; 3031 ulen = min_t(u32, info.nr_map_ids, ulen); 3032 if (ulen) { 3033 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 3034 u32 i; 3035 3036 for (i = 0; i < ulen; i++) 3037 if (put_user(prog->aux->used_maps[i]->id, 3038 &user_map_ids[i])) 3039 return -EFAULT; 3040 } 3041 3042 err = set_info_rec_size(&info); 3043 if (err) 3044 return err; 3045 3046 bpf_prog_get_stats(prog, &stats); 3047 info.run_time_ns = stats.nsecs; 3048 info.run_cnt = stats.cnt; 3049 3050 if (!capable(CAP_SYS_ADMIN)) { 3051 info.jited_prog_len = 0; 3052 info.xlated_prog_len = 0; 3053 info.nr_jited_ksyms = 0; 3054 info.nr_jited_func_lens = 0; 3055 info.nr_func_info = 0; 3056 info.nr_line_info = 0; 3057 info.nr_jited_line_info = 0; 3058 goto done; 3059 } 3060 3061 ulen = info.xlated_prog_len; 3062 info.xlated_prog_len = bpf_prog_insn_size(prog); 3063 if (info.xlated_prog_len && ulen) { 3064 struct bpf_insn *insns_sanitized; 3065 bool fault; 3066 3067 if (prog->blinded && !bpf_dump_raw_ok()) { 3068 info.xlated_prog_insns = 0; 3069 goto done; 3070 } 3071 insns_sanitized = bpf_insn_prepare_dump(prog); 3072 if (!insns_sanitized) 3073 return -ENOMEM; 3074 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 3075 ulen = min_t(u32, info.xlated_prog_len, ulen); 3076 fault = copy_to_user(uinsns, insns_sanitized, ulen); 3077 kfree(insns_sanitized); 3078 if (fault) 3079 return -EFAULT; 3080 } 3081 3082 if (bpf_prog_is_dev_bound(prog->aux)) { 3083 err = bpf_prog_offload_info_fill(&info, prog); 3084 if (err) 3085 return err; 3086 goto done; 3087 } 3088 3089 /* NOTE: the following code is supposed to be skipped for offload. 3090 * bpf_prog_offload_info_fill() is the place to fill similar fields 3091 * for offload. 3092 */ 3093 ulen = info.jited_prog_len; 3094 if (prog->aux->func_cnt) { 3095 u32 i; 3096 3097 info.jited_prog_len = 0; 3098 for (i = 0; i < prog->aux->func_cnt; i++) 3099 info.jited_prog_len += prog->aux->func[i]->jited_len; 3100 } else { 3101 info.jited_prog_len = prog->jited_len; 3102 } 3103 3104 if (info.jited_prog_len && ulen) { 3105 if (bpf_dump_raw_ok()) { 3106 uinsns = u64_to_user_ptr(info.jited_prog_insns); 3107 ulen = min_t(u32, info.jited_prog_len, ulen); 3108 3109 /* for multi-function programs, copy the JITed 3110 * instructions for all the functions 3111 */ 3112 if (prog->aux->func_cnt) { 3113 u32 len, free, i; 3114 u8 *img; 3115 3116 free = ulen; 3117 for (i = 0; i < prog->aux->func_cnt; i++) { 3118 len = prog->aux->func[i]->jited_len; 3119 len = min_t(u32, len, free); 3120 img = (u8 *) prog->aux->func[i]->bpf_func; 3121 if (copy_to_user(uinsns, img, len)) 3122 return -EFAULT; 3123 uinsns += len; 3124 free -= len; 3125 if (!free) 3126 break; 3127 } 3128 } else { 3129 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 3130 return -EFAULT; 3131 } 3132 } else { 3133 info.jited_prog_insns = 0; 3134 } 3135 } 3136 3137 ulen = info.nr_jited_ksyms; 3138 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 3139 if (ulen) { 3140 if (bpf_dump_raw_ok()) { 3141 unsigned long ksym_addr; 3142 u64 __user *user_ksyms; 3143 u32 i; 3144 3145 /* copy the address of the kernel symbol 3146 * corresponding to each function 3147 */ 3148 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 3149 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 3150 if (prog->aux->func_cnt) { 3151 for (i = 0; i < ulen; i++) { 3152 ksym_addr = (unsigned long) 3153 prog->aux->func[i]->bpf_func; 3154 if (put_user((u64) ksym_addr, 3155 &user_ksyms[i])) 3156 return -EFAULT; 3157 } 3158 } else { 3159 ksym_addr = (unsigned long) prog->bpf_func; 3160 if (put_user((u64) ksym_addr, &user_ksyms[0])) 3161 return -EFAULT; 3162 } 3163 } else { 3164 info.jited_ksyms = 0; 3165 } 3166 } 3167 3168 ulen = info.nr_jited_func_lens; 3169 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 3170 if (ulen) { 3171 if (bpf_dump_raw_ok()) { 3172 u32 __user *user_lens; 3173 u32 func_len, i; 3174 3175 /* copy the JITed image lengths for each function */ 3176 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 3177 user_lens = u64_to_user_ptr(info.jited_func_lens); 3178 if (prog->aux->func_cnt) { 3179 for (i = 0; i < ulen; i++) { 3180 func_len = 3181 prog->aux->func[i]->jited_len; 3182 if (put_user(func_len, &user_lens[i])) 3183 return -EFAULT; 3184 } 3185 } else { 3186 func_len = prog->jited_len; 3187 if (put_user(func_len, &user_lens[0])) 3188 return -EFAULT; 3189 } 3190 } else { 3191 info.jited_func_lens = 0; 3192 } 3193 } 3194 3195 if (prog->aux->btf) 3196 info.btf_id = btf_id(prog->aux->btf); 3197 3198 ulen = info.nr_func_info; 3199 info.nr_func_info = prog->aux->func_info_cnt; 3200 if (info.nr_func_info && ulen) { 3201 char __user *user_finfo; 3202 3203 user_finfo = u64_to_user_ptr(info.func_info); 3204 ulen = min_t(u32, info.nr_func_info, ulen); 3205 if (copy_to_user(user_finfo, prog->aux->func_info, 3206 info.func_info_rec_size * ulen)) 3207 return -EFAULT; 3208 } 3209 3210 ulen = info.nr_line_info; 3211 info.nr_line_info = prog->aux->nr_linfo; 3212 if (info.nr_line_info && ulen) { 3213 __u8 __user *user_linfo; 3214 3215 user_linfo = u64_to_user_ptr(info.line_info); 3216 ulen = min_t(u32, info.nr_line_info, ulen); 3217 if (copy_to_user(user_linfo, prog->aux->linfo, 3218 info.line_info_rec_size * ulen)) 3219 return -EFAULT; 3220 } 3221 3222 ulen = info.nr_jited_line_info; 3223 if (prog->aux->jited_linfo) 3224 info.nr_jited_line_info = prog->aux->nr_linfo; 3225 else 3226 info.nr_jited_line_info = 0; 3227 if (info.nr_jited_line_info && ulen) { 3228 if (bpf_dump_raw_ok()) { 3229 __u64 __user *user_linfo; 3230 u32 i; 3231 3232 user_linfo = u64_to_user_ptr(info.jited_line_info); 3233 ulen = min_t(u32, info.nr_jited_line_info, ulen); 3234 for (i = 0; i < ulen; i++) { 3235 if (put_user((__u64)(long)prog->aux->jited_linfo[i], 3236 &user_linfo[i])) 3237 return -EFAULT; 3238 } 3239 } else { 3240 info.jited_line_info = 0; 3241 } 3242 } 3243 3244 ulen = info.nr_prog_tags; 3245 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 3246 if (ulen) { 3247 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 3248 u32 i; 3249 3250 user_prog_tags = u64_to_user_ptr(info.prog_tags); 3251 ulen = min_t(u32, info.nr_prog_tags, ulen); 3252 if (prog->aux->func_cnt) { 3253 for (i = 0; i < ulen; i++) { 3254 if (copy_to_user(user_prog_tags[i], 3255 prog->aux->func[i]->tag, 3256 BPF_TAG_SIZE)) 3257 return -EFAULT; 3258 } 3259 } else { 3260 if (copy_to_user(user_prog_tags[0], 3261 prog->tag, BPF_TAG_SIZE)) 3262 return -EFAULT; 3263 } 3264 } 3265 3266 done: 3267 if (copy_to_user(uinfo, &info, info_len) || 3268 put_user(info_len, &uattr->info.info_len)) 3269 return -EFAULT; 3270 3271 return 0; 3272 } 3273 3274 static int bpf_map_get_info_by_fd(struct bpf_map *map, 3275 const union bpf_attr *attr, 3276 union bpf_attr __user *uattr) 3277 { 3278 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3279 struct bpf_map_info info; 3280 u32 info_len = attr->info.info_len; 3281 int err; 3282 3283 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 3284 if (err) 3285 return err; 3286 info_len = min_t(u32, sizeof(info), info_len); 3287 3288 memset(&info, 0, sizeof(info)); 3289 info.type = map->map_type; 3290 info.id = map->id; 3291 info.key_size = map->key_size; 3292 info.value_size = map->value_size; 3293 info.max_entries = map->max_entries; 3294 info.map_flags = map->map_flags; 3295 memcpy(info.name, map->name, sizeof(map->name)); 3296 3297 if (map->btf) { 3298 info.btf_id = btf_id(map->btf); 3299 info.btf_key_type_id = map->btf_key_type_id; 3300 info.btf_value_type_id = map->btf_value_type_id; 3301 } 3302 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 3303 3304 if (bpf_map_is_dev_bound(map)) { 3305 err = bpf_map_offload_info_fill(&info, map); 3306 if (err) 3307 return err; 3308 } 3309 3310 if (copy_to_user(uinfo, &info, info_len) || 3311 put_user(info_len, &uattr->info.info_len)) 3312 return -EFAULT; 3313 3314 return 0; 3315 } 3316 3317 static int bpf_btf_get_info_by_fd(struct btf *btf, 3318 const union bpf_attr *attr, 3319 union bpf_attr __user *uattr) 3320 { 3321 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3322 u32 info_len = attr->info.info_len; 3323 int err; 3324 3325 err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len); 3326 if (err) 3327 return err; 3328 3329 return btf_get_info_by_fd(btf, attr, uattr); 3330 } 3331 3332 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 3333 3334 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 3335 union bpf_attr __user *uattr) 3336 { 3337 int ufd = attr->info.bpf_fd; 3338 struct fd f; 3339 int err; 3340 3341 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 3342 return -EINVAL; 3343 3344 f = fdget(ufd); 3345 if (!f.file) 3346 return -EBADFD; 3347 3348 if (f.file->f_op == &bpf_prog_fops) 3349 err = bpf_prog_get_info_by_fd(f.file->private_data, attr, 3350 uattr); 3351 else if (f.file->f_op == &bpf_map_fops) 3352 err = bpf_map_get_info_by_fd(f.file->private_data, attr, 3353 uattr); 3354 else if (f.file->f_op == &btf_fops) 3355 err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr); 3356 else 3357 err = -EINVAL; 3358 3359 fdput(f); 3360 return err; 3361 } 3362 3363 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level 3364 3365 static int bpf_btf_load(const union bpf_attr *attr) 3366 { 3367 if (CHECK_ATTR(BPF_BTF_LOAD)) 3368 return -EINVAL; 3369 3370 if (!capable(CAP_SYS_ADMIN)) 3371 return -EPERM; 3372 3373 return btf_new_fd(attr); 3374 } 3375 3376 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 3377 3378 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 3379 { 3380 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 3381 return -EINVAL; 3382 3383 if (!capable(CAP_SYS_ADMIN)) 3384 return -EPERM; 3385 3386 return btf_get_fd_by_id(attr->btf_id); 3387 } 3388 3389 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 3390 union bpf_attr __user *uattr, 3391 u32 prog_id, u32 fd_type, 3392 const char *buf, u64 probe_offset, 3393 u64 probe_addr) 3394 { 3395 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 3396 u32 len = buf ? strlen(buf) : 0, input_len; 3397 int err = 0; 3398 3399 if (put_user(len, &uattr->task_fd_query.buf_len)) 3400 return -EFAULT; 3401 input_len = attr->task_fd_query.buf_len; 3402 if (input_len && ubuf) { 3403 if (!len) { 3404 /* nothing to copy, just make ubuf NULL terminated */ 3405 char zero = '\0'; 3406 3407 if (put_user(zero, ubuf)) 3408 return -EFAULT; 3409 } else if (input_len >= len + 1) { 3410 /* ubuf can hold the string with NULL terminator */ 3411 if (copy_to_user(ubuf, buf, len + 1)) 3412 return -EFAULT; 3413 } else { 3414 /* ubuf cannot hold the string with NULL terminator, 3415 * do a partial copy with NULL terminator. 3416 */ 3417 char zero = '\0'; 3418 3419 err = -ENOSPC; 3420 if (copy_to_user(ubuf, buf, input_len - 1)) 3421 return -EFAULT; 3422 if (put_user(zero, ubuf + input_len - 1)) 3423 return -EFAULT; 3424 } 3425 } 3426 3427 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 3428 put_user(fd_type, &uattr->task_fd_query.fd_type) || 3429 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 3430 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 3431 return -EFAULT; 3432 3433 return err; 3434 } 3435 3436 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 3437 3438 static int bpf_task_fd_query(const union bpf_attr *attr, 3439 union bpf_attr __user *uattr) 3440 { 3441 pid_t pid = attr->task_fd_query.pid; 3442 u32 fd = attr->task_fd_query.fd; 3443 const struct perf_event *event; 3444 struct files_struct *files; 3445 struct task_struct *task; 3446 struct file *file; 3447 int err; 3448 3449 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 3450 return -EINVAL; 3451 3452 if (!capable(CAP_SYS_ADMIN)) 3453 return -EPERM; 3454 3455 if (attr->task_fd_query.flags != 0) 3456 return -EINVAL; 3457 3458 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 3459 if (!task) 3460 return -ENOENT; 3461 3462 files = get_files_struct(task); 3463 put_task_struct(task); 3464 if (!files) 3465 return -ENOENT; 3466 3467 err = 0; 3468 spin_lock(&files->file_lock); 3469 file = fcheck_files(files, fd); 3470 if (!file) 3471 err = -EBADF; 3472 else 3473 get_file(file); 3474 spin_unlock(&files->file_lock); 3475 put_files_struct(files); 3476 3477 if (err) 3478 goto out; 3479 3480 if (file->f_op == &bpf_link_fops) { 3481 struct bpf_link *link = file->private_data; 3482 3483 if (link->ops == &bpf_raw_tp_lops) { 3484 struct bpf_raw_tp_link *raw_tp = 3485 container_of(link, struct bpf_raw_tp_link, link); 3486 struct bpf_raw_event_map *btp = raw_tp->btp; 3487 3488 err = bpf_task_fd_query_copy(attr, uattr, 3489 raw_tp->link.prog->aux->id, 3490 BPF_FD_TYPE_RAW_TRACEPOINT, 3491 btp->tp->name, 0, 0); 3492 goto put_file; 3493 } 3494 goto out_not_supp; 3495 } 3496 3497 event = perf_get_event(file); 3498 if (!IS_ERR(event)) { 3499 u64 probe_offset, probe_addr; 3500 u32 prog_id, fd_type; 3501 const char *buf; 3502 3503 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 3504 &buf, &probe_offset, 3505 &probe_addr); 3506 if (!err) 3507 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 3508 fd_type, buf, 3509 probe_offset, 3510 probe_addr); 3511 goto put_file; 3512 } 3513 3514 out_not_supp: 3515 err = -ENOTSUPP; 3516 put_file: 3517 fput(file); 3518 out: 3519 return err; 3520 } 3521 3522 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 3523 3524 #define BPF_DO_BATCH(fn) \ 3525 do { \ 3526 if (!fn) { \ 3527 err = -ENOTSUPP; \ 3528 goto err_put; \ 3529 } \ 3530 err = fn(map, attr, uattr); \ 3531 } while (0) 3532 3533 static int bpf_map_do_batch(const union bpf_attr *attr, 3534 union bpf_attr __user *uattr, 3535 int cmd) 3536 { 3537 struct bpf_map *map; 3538 int err, ufd; 3539 struct fd f; 3540 3541 if (CHECK_ATTR(BPF_MAP_BATCH)) 3542 return -EINVAL; 3543 3544 ufd = attr->batch.map_fd; 3545 f = fdget(ufd); 3546 map = __bpf_map_get(f); 3547 if (IS_ERR(map)) 3548 return PTR_ERR(map); 3549 3550 if ((cmd == BPF_MAP_LOOKUP_BATCH || 3551 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) && 3552 !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 3553 err = -EPERM; 3554 goto err_put; 3555 } 3556 3557 if (cmd != BPF_MAP_LOOKUP_BATCH && 3558 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 3559 err = -EPERM; 3560 goto err_put; 3561 } 3562 3563 if (cmd == BPF_MAP_LOOKUP_BATCH) 3564 BPF_DO_BATCH(map->ops->map_lookup_batch); 3565 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 3566 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch); 3567 else if (cmd == BPF_MAP_UPDATE_BATCH) 3568 BPF_DO_BATCH(map->ops->map_update_batch); 3569 else 3570 BPF_DO_BATCH(map->ops->map_delete_batch); 3571 3572 err_put: 3573 fdput(f); 3574 return err; 3575 } 3576 3577 #define BPF_LINK_CREATE_LAST_FIELD link_create.flags 3578 static int link_create(union bpf_attr *attr) 3579 { 3580 enum bpf_prog_type ptype; 3581 struct bpf_prog *prog; 3582 int ret; 3583 3584 if (!capable(CAP_NET_ADMIN)) 3585 return -EPERM; 3586 3587 if (CHECK_ATTR(BPF_LINK_CREATE)) 3588 return -EINVAL; 3589 3590 ptype = attach_type_to_prog_type(attr->link_create.attach_type); 3591 if (ptype == BPF_PROG_TYPE_UNSPEC) 3592 return -EINVAL; 3593 3594 prog = bpf_prog_get_type(attr->link_create.prog_fd, ptype); 3595 if (IS_ERR(prog)) 3596 return PTR_ERR(prog); 3597 3598 ret = bpf_prog_attach_check_attach_type(prog, 3599 attr->link_create.attach_type); 3600 if (ret) 3601 goto err_out; 3602 3603 switch (ptype) { 3604 case BPF_PROG_TYPE_CGROUP_SKB: 3605 case BPF_PROG_TYPE_CGROUP_SOCK: 3606 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3607 case BPF_PROG_TYPE_SOCK_OPS: 3608 case BPF_PROG_TYPE_CGROUP_DEVICE: 3609 case BPF_PROG_TYPE_CGROUP_SYSCTL: 3610 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3611 ret = cgroup_bpf_link_attach(attr, prog); 3612 break; 3613 default: 3614 ret = -EINVAL; 3615 } 3616 3617 err_out: 3618 if (ret < 0) 3619 bpf_prog_put(prog); 3620 return ret; 3621 } 3622 3623 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd 3624 3625 static int link_update(union bpf_attr *attr) 3626 { 3627 struct bpf_prog *old_prog = NULL, *new_prog; 3628 struct bpf_link *link; 3629 u32 flags; 3630 int ret; 3631 3632 if (!capable(CAP_NET_ADMIN)) 3633 return -EPERM; 3634 3635 if (CHECK_ATTR(BPF_LINK_UPDATE)) 3636 return -EINVAL; 3637 3638 flags = attr->link_update.flags; 3639 if (flags & ~BPF_F_REPLACE) 3640 return -EINVAL; 3641 3642 link = bpf_link_get_from_fd(attr->link_update.link_fd); 3643 if (IS_ERR(link)) 3644 return PTR_ERR(link); 3645 3646 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); 3647 if (IS_ERR(new_prog)) { 3648 ret = PTR_ERR(new_prog); 3649 goto out_put_link; 3650 } 3651 3652 if (flags & BPF_F_REPLACE) { 3653 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); 3654 if (IS_ERR(old_prog)) { 3655 ret = PTR_ERR(old_prog); 3656 old_prog = NULL; 3657 goto out_put_progs; 3658 } 3659 } else if (attr->link_update.old_prog_fd) { 3660 ret = -EINVAL; 3661 goto out_put_progs; 3662 } 3663 3664 #ifdef CONFIG_CGROUP_BPF 3665 if (link->ops == &bpf_cgroup_link_lops) { 3666 ret = cgroup_bpf_replace(link, old_prog, new_prog); 3667 goto out_put_progs; 3668 } 3669 #endif 3670 ret = -EINVAL; 3671 3672 out_put_progs: 3673 if (old_prog) 3674 bpf_prog_put(old_prog); 3675 if (ret) 3676 bpf_prog_put(new_prog); 3677 out_put_link: 3678 bpf_link_put(link); 3679 return ret; 3680 } 3681 3682 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 3683 { 3684 union bpf_attr attr; 3685 int err; 3686 3687 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) 3688 return -EPERM; 3689 3690 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 3691 if (err) 3692 return err; 3693 size = min_t(u32, size, sizeof(attr)); 3694 3695 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 3696 memset(&attr, 0, sizeof(attr)); 3697 if (copy_from_user(&attr, uattr, size) != 0) 3698 return -EFAULT; 3699 3700 err = security_bpf(cmd, &attr, size); 3701 if (err < 0) 3702 return err; 3703 3704 switch (cmd) { 3705 case BPF_MAP_CREATE: 3706 err = map_create(&attr); 3707 break; 3708 case BPF_MAP_LOOKUP_ELEM: 3709 err = map_lookup_elem(&attr); 3710 break; 3711 case BPF_MAP_UPDATE_ELEM: 3712 err = map_update_elem(&attr); 3713 break; 3714 case BPF_MAP_DELETE_ELEM: 3715 err = map_delete_elem(&attr); 3716 break; 3717 case BPF_MAP_GET_NEXT_KEY: 3718 err = map_get_next_key(&attr); 3719 break; 3720 case BPF_MAP_FREEZE: 3721 err = map_freeze(&attr); 3722 break; 3723 case BPF_PROG_LOAD: 3724 err = bpf_prog_load(&attr, uattr); 3725 break; 3726 case BPF_OBJ_PIN: 3727 err = bpf_obj_pin(&attr); 3728 break; 3729 case BPF_OBJ_GET: 3730 err = bpf_obj_get(&attr); 3731 break; 3732 case BPF_PROG_ATTACH: 3733 err = bpf_prog_attach(&attr); 3734 break; 3735 case BPF_PROG_DETACH: 3736 err = bpf_prog_detach(&attr); 3737 break; 3738 case BPF_PROG_QUERY: 3739 err = bpf_prog_query(&attr, uattr); 3740 break; 3741 case BPF_PROG_TEST_RUN: 3742 err = bpf_prog_test_run(&attr, uattr); 3743 break; 3744 case BPF_PROG_GET_NEXT_ID: 3745 err = bpf_obj_get_next_id(&attr, uattr, 3746 &prog_idr, &prog_idr_lock); 3747 break; 3748 case BPF_MAP_GET_NEXT_ID: 3749 err = bpf_obj_get_next_id(&attr, uattr, 3750 &map_idr, &map_idr_lock); 3751 break; 3752 case BPF_BTF_GET_NEXT_ID: 3753 err = bpf_obj_get_next_id(&attr, uattr, 3754 &btf_idr, &btf_idr_lock); 3755 break; 3756 case BPF_PROG_GET_FD_BY_ID: 3757 err = bpf_prog_get_fd_by_id(&attr); 3758 break; 3759 case BPF_MAP_GET_FD_BY_ID: 3760 err = bpf_map_get_fd_by_id(&attr); 3761 break; 3762 case BPF_OBJ_GET_INFO_BY_FD: 3763 err = bpf_obj_get_info_by_fd(&attr, uattr); 3764 break; 3765 case BPF_RAW_TRACEPOINT_OPEN: 3766 err = bpf_raw_tracepoint_open(&attr); 3767 break; 3768 case BPF_BTF_LOAD: 3769 err = bpf_btf_load(&attr); 3770 break; 3771 case BPF_BTF_GET_FD_BY_ID: 3772 err = bpf_btf_get_fd_by_id(&attr); 3773 break; 3774 case BPF_TASK_FD_QUERY: 3775 err = bpf_task_fd_query(&attr, uattr); 3776 break; 3777 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 3778 err = map_lookup_and_delete_elem(&attr); 3779 break; 3780 case BPF_MAP_LOOKUP_BATCH: 3781 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH); 3782 break; 3783 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 3784 err = bpf_map_do_batch(&attr, uattr, 3785 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 3786 break; 3787 case BPF_MAP_UPDATE_BATCH: 3788 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH); 3789 break; 3790 case BPF_MAP_DELETE_BATCH: 3791 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH); 3792 break; 3793 case BPF_LINK_CREATE: 3794 err = link_create(&attr); 3795 break; 3796 case BPF_LINK_UPDATE: 3797 err = link_update(&attr); 3798 break; 3799 default: 3800 err = -EINVAL; 3801 break; 3802 } 3803 3804 return err; 3805 } 3806