1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf_trace.h> 6 #include <linux/bpf_lirc.h> 7 #include <linux/btf.h> 8 #include <linux/syscalls.h> 9 #include <linux/slab.h> 10 #include <linux/sched/signal.h> 11 #include <linux/vmalloc.h> 12 #include <linux/mmzone.h> 13 #include <linux/anon_inodes.h> 14 #include <linux/fdtable.h> 15 #include <linux/file.h> 16 #include <linux/fs.h> 17 #include <linux/license.h> 18 #include <linux/filter.h> 19 #include <linux/version.h> 20 #include <linux/kernel.h> 21 #include <linux/idr.h> 22 #include <linux/cred.h> 23 #include <linux/timekeeping.h> 24 #include <linux/ctype.h> 25 #include <linux/nospec.h> 26 #include <linux/audit.h> 27 #include <uapi/linux/btf.h> 28 29 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 30 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 31 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 32 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 33 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 34 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 35 IS_FD_HASH(map)) 36 37 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 38 39 DEFINE_PER_CPU(int, bpf_prog_active); 40 static DEFINE_IDR(prog_idr); 41 static DEFINE_SPINLOCK(prog_idr_lock); 42 static DEFINE_IDR(map_idr); 43 static DEFINE_SPINLOCK(map_idr_lock); 44 45 int sysctl_unprivileged_bpf_disabled __read_mostly; 46 47 static const struct bpf_map_ops * const bpf_map_types[] = { 48 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 49 #define BPF_MAP_TYPE(_id, _ops) \ 50 [_id] = &_ops, 51 #include <linux/bpf_types.h> 52 #undef BPF_PROG_TYPE 53 #undef BPF_MAP_TYPE 54 }; 55 56 /* 57 * If we're handed a bigger struct than we know of, ensure all the unknown bits 58 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 59 * we don't know about yet. 60 * 61 * There is a ToCToU between this function call and the following 62 * copy_from_user() call. However, this is not a concern since this function is 63 * meant to be a future-proofing of bits. 64 */ 65 int bpf_check_uarg_tail_zero(void __user *uaddr, 66 size_t expected_size, 67 size_t actual_size) 68 { 69 unsigned char __user *addr; 70 unsigned char __user *end; 71 unsigned char val; 72 int err; 73 74 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 75 return -E2BIG; 76 77 if (unlikely(!access_ok(uaddr, actual_size))) 78 return -EFAULT; 79 80 if (actual_size <= expected_size) 81 return 0; 82 83 addr = uaddr + expected_size; 84 end = uaddr + actual_size; 85 86 for (; addr < end; addr++) { 87 err = get_user(val, addr); 88 if (err) 89 return err; 90 if (val) 91 return -E2BIG; 92 } 93 94 return 0; 95 } 96 97 const struct bpf_map_ops bpf_map_offload_ops = { 98 .map_alloc = bpf_map_offload_map_alloc, 99 .map_free = bpf_map_offload_map_free, 100 .map_check_btf = map_check_no_btf, 101 }; 102 103 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) 104 { 105 const struct bpf_map_ops *ops; 106 u32 type = attr->map_type; 107 struct bpf_map *map; 108 int err; 109 110 if (type >= ARRAY_SIZE(bpf_map_types)) 111 return ERR_PTR(-EINVAL); 112 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types)); 113 ops = bpf_map_types[type]; 114 if (!ops) 115 return ERR_PTR(-EINVAL); 116 117 if (ops->map_alloc_check) { 118 err = ops->map_alloc_check(attr); 119 if (err) 120 return ERR_PTR(err); 121 } 122 if (attr->map_ifindex) 123 ops = &bpf_map_offload_ops; 124 map = ops->map_alloc(attr); 125 if (IS_ERR(map)) 126 return map; 127 map->ops = ops; 128 map->map_type = type; 129 return map; 130 } 131 132 static u32 bpf_map_value_size(struct bpf_map *map) 133 { 134 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 135 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 136 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 137 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 138 return round_up(map->value_size, 8) * num_possible_cpus(); 139 else if (IS_FD_MAP(map)) 140 return sizeof(u32); 141 else 142 return map->value_size; 143 } 144 145 static void maybe_wait_bpf_programs(struct bpf_map *map) 146 { 147 /* Wait for any running BPF programs to complete so that 148 * userspace, when we return to it, knows that all programs 149 * that could be running use the new map value. 150 */ 151 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 152 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 153 synchronize_rcu(); 154 } 155 156 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, 157 void *value, __u64 flags) 158 { 159 int err; 160 161 /* Need to create a kthread, thus must support schedule */ 162 if (bpf_map_is_dev_bound(map)) { 163 return bpf_map_offload_update_elem(map, key, value, flags); 164 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 165 map->map_type == BPF_MAP_TYPE_SOCKHASH || 166 map->map_type == BPF_MAP_TYPE_SOCKMAP || 167 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 168 return map->ops->map_update_elem(map, key, value, flags); 169 } else if (IS_FD_PROG_ARRAY(map)) { 170 return bpf_fd_array_map_update_elem(map, f.file, key, value, 171 flags); 172 } 173 174 bpf_disable_instrumentation(); 175 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 176 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 177 err = bpf_percpu_hash_update(map, key, value, flags); 178 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 179 err = bpf_percpu_array_update(map, key, value, flags); 180 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 181 err = bpf_percpu_cgroup_storage_update(map, key, value, 182 flags); 183 } else if (IS_FD_ARRAY(map)) { 184 rcu_read_lock(); 185 err = bpf_fd_array_map_update_elem(map, f.file, key, value, 186 flags); 187 rcu_read_unlock(); 188 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 189 rcu_read_lock(); 190 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, 191 flags); 192 rcu_read_unlock(); 193 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 194 /* rcu_read_lock() is not needed */ 195 err = bpf_fd_reuseport_array_update_elem(map, key, value, 196 flags); 197 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 198 map->map_type == BPF_MAP_TYPE_STACK) { 199 err = map->ops->map_push_elem(map, value, flags); 200 } else { 201 rcu_read_lock(); 202 err = map->ops->map_update_elem(map, key, value, flags); 203 rcu_read_unlock(); 204 } 205 bpf_enable_instrumentation(); 206 maybe_wait_bpf_programs(map); 207 208 return err; 209 } 210 211 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 212 __u64 flags) 213 { 214 void *ptr; 215 int err; 216 217 if (bpf_map_is_dev_bound(map)) 218 return bpf_map_offload_lookup_elem(map, key, value); 219 220 bpf_disable_instrumentation(); 221 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 222 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 223 err = bpf_percpu_hash_copy(map, key, value); 224 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 225 err = bpf_percpu_array_copy(map, key, value); 226 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 227 err = bpf_percpu_cgroup_storage_copy(map, key, value); 228 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 229 err = bpf_stackmap_copy(map, key, value); 230 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 231 err = bpf_fd_array_map_lookup_elem(map, key, value); 232 } else if (IS_FD_HASH(map)) { 233 err = bpf_fd_htab_map_lookup_elem(map, key, value); 234 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 235 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 236 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 237 map->map_type == BPF_MAP_TYPE_STACK) { 238 err = map->ops->map_peek_elem(map, value); 239 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 240 /* struct_ops map requires directly updating "value" */ 241 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 242 } else { 243 rcu_read_lock(); 244 if (map->ops->map_lookup_elem_sys_only) 245 ptr = map->ops->map_lookup_elem_sys_only(map, key); 246 else 247 ptr = map->ops->map_lookup_elem(map, key); 248 if (IS_ERR(ptr)) { 249 err = PTR_ERR(ptr); 250 } else if (!ptr) { 251 err = -ENOENT; 252 } else { 253 err = 0; 254 if (flags & BPF_F_LOCK) 255 /* lock 'ptr' and copy everything but lock */ 256 copy_map_value_locked(map, value, ptr, true); 257 else 258 copy_map_value(map, value, ptr); 259 /* mask lock, since value wasn't zero inited */ 260 check_and_init_map_lock(map, value); 261 } 262 rcu_read_unlock(); 263 } 264 265 bpf_enable_instrumentation(); 266 maybe_wait_bpf_programs(map); 267 268 return err; 269 } 270 271 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 272 { 273 /* We really just want to fail instead of triggering OOM killer 274 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 275 * which is used for lower order allocation requests. 276 * 277 * It has been observed that higher order allocation requests done by 278 * vmalloc with __GFP_NORETRY being set might fail due to not trying 279 * to reclaim memory from the page cache, thus we set 280 * __GFP_RETRY_MAYFAIL to avoid such situations. 281 */ 282 283 const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; 284 void *area; 285 286 if (size >= SIZE_MAX) 287 return NULL; 288 289 /* kmalloc()'ed memory can't be mmap()'ed */ 290 if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 291 area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, 292 numa_node); 293 if (area != NULL) 294 return area; 295 } 296 if (mmapable) { 297 BUG_ON(!PAGE_ALIGNED(size)); 298 return vmalloc_user_node_flags(size, numa_node, GFP_KERNEL | 299 __GFP_RETRY_MAYFAIL | flags); 300 } 301 return __vmalloc_node_flags_caller(size, numa_node, 302 GFP_KERNEL | __GFP_RETRY_MAYFAIL | 303 flags, __builtin_return_address(0)); 304 } 305 306 void *bpf_map_area_alloc(u64 size, int numa_node) 307 { 308 return __bpf_map_area_alloc(size, numa_node, false); 309 } 310 311 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 312 { 313 return __bpf_map_area_alloc(size, numa_node, true); 314 } 315 316 void bpf_map_area_free(void *area) 317 { 318 kvfree(area); 319 } 320 321 static u32 bpf_map_flags_retain_permanent(u32 flags) 322 { 323 /* Some map creation flags are not tied to the map object but 324 * rather to the map fd instead, so they have no meaning upon 325 * map object inspection since multiple file descriptors with 326 * different (access) properties can exist here. Thus, given 327 * this has zero meaning for the map itself, lets clear these 328 * from here. 329 */ 330 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 331 } 332 333 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 334 { 335 map->map_type = attr->map_type; 336 map->key_size = attr->key_size; 337 map->value_size = attr->value_size; 338 map->max_entries = attr->max_entries; 339 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 340 map->numa_node = bpf_map_attr_numa_node(attr); 341 } 342 343 static int bpf_charge_memlock(struct user_struct *user, u32 pages) 344 { 345 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 346 347 if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) { 348 atomic_long_sub(pages, &user->locked_vm); 349 return -EPERM; 350 } 351 return 0; 352 } 353 354 static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) 355 { 356 if (user) 357 atomic_long_sub(pages, &user->locked_vm); 358 } 359 360 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size) 361 { 362 u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; 363 struct user_struct *user; 364 int ret; 365 366 if (size >= U32_MAX - PAGE_SIZE) 367 return -E2BIG; 368 369 user = get_current_user(); 370 ret = bpf_charge_memlock(user, pages); 371 if (ret) { 372 free_uid(user); 373 return ret; 374 } 375 376 mem->pages = pages; 377 mem->user = user; 378 379 return 0; 380 } 381 382 void bpf_map_charge_finish(struct bpf_map_memory *mem) 383 { 384 bpf_uncharge_memlock(mem->user, mem->pages); 385 free_uid(mem->user); 386 } 387 388 void bpf_map_charge_move(struct bpf_map_memory *dst, 389 struct bpf_map_memory *src) 390 { 391 *dst = *src; 392 393 /* Make sure src will not be used for the redundant uncharging. */ 394 memset(src, 0, sizeof(struct bpf_map_memory)); 395 } 396 397 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) 398 { 399 int ret; 400 401 ret = bpf_charge_memlock(map->memory.user, pages); 402 if (ret) 403 return ret; 404 map->memory.pages += pages; 405 return ret; 406 } 407 408 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) 409 { 410 bpf_uncharge_memlock(map->memory.user, pages); 411 map->memory.pages -= pages; 412 } 413 414 static int bpf_map_alloc_id(struct bpf_map *map) 415 { 416 int id; 417 418 idr_preload(GFP_KERNEL); 419 spin_lock_bh(&map_idr_lock); 420 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 421 if (id > 0) 422 map->id = id; 423 spin_unlock_bh(&map_idr_lock); 424 idr_preload_end(); 425 426 if (WARN_ON_ONCE(!id)) 427 return -ENOSPC; 428 429 return id > 0 ? 0 : id; 430 } 431 432 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) 433 { 434 unsigned long flags; 435 436 /* Offloaded maps are removed from the IDR store when their device 437 * disappears - even if someone holds an fd to them they are unusable, 438 * the memory is gone, all ops will fail; they are simply waiting for 439 * refcnt to drop to be freed. 440 */ 441 if (!map->id) 442 return; 443 444 if (do_idr_lock) 445 spin_lock_irqsave(&map_idr_lock, flags); 446 else 447 __acquire(&map_idr_lock); 448 449 idr_remove(&map_idr, map->id); 450 map->id = 0; 451 452 if (do_idr_lock) 453 spin_unlock_irqrestore(&map_idr_lock, flags); 454 else 455 __release(&map_idr_lock); 456 } 457 458 /* called from workqueue */ 459 static void bpf_map_free_deferred(struct work_struct *work) 460 { 461 struct bpf_map *map = container_of(work, struct bpf_map, work); 462 struct bpf_map_memory mem; 463 464 bpf_map_charge_move(&mem, &map->memory); 465 security_bpf_map_free(map); 466 /* implementation dependent freeing */ 467 map->ops->map_free(map); 468 bpf_map_charge_finish(&mem); 469 } 470 471 static void bpf_map_put_uref(struct bpf_map *map) 472 { 473 if (atomic64_dec_and_test(&map->usercnt)) { 474 if (map->ops->map_release_uref) 475 map->ops->map_release_uref(map); 476 } 477 } 478 479 /* decrement map refcnt and schedule it for freeing via workqueue 480 * (unrelying map implementation ops->map_free() might sleep) 481 */ 482 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) 483 { 484 if (atomic64_dec_and_test(&map->refcnt)) { 485 /* bpf_map_free_id() must be called first */ 486 bpf_map_free_id(map, do_idr_lock); 487 btf_put(map->btf); 488 INIT_WORK(&map->work, bpf_map_free_deferred); 489 schedule_work(&map->work); 490 } 491 } 492 493 void bpf_map_put(struct bpf_map *map) 494 { 495 __bpf_map_put(map, true); 496 } 497 EXPORT_SYMBOL_GPL(bpf_map_put); 498 499 void bpf_map_put_with_uref(struct bpf_map *map) 500 { 501 bpf_map_put_uref(map); 502 bpf_map_put(map); 503 } 504 505 static int bpf_map_release(struct inode *inode, struct file *filp) 506 { 507 struct bpf_map *map = filp->private_data; 508 509 if (map->ops->map_release) 510 map->ops->map_release(map, filp); 511 512 bpf_map_put_with_uref(map); 513 return 0; 514 } 515 516 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 517 { 518 fmode_t mode = f.file->f_mode; 519 520 /* Our file permissions may have been overridden by global 521 * map permissions facing syscall side. 522 */ 523 if (READ_ONCE(map->frozen)) 524 mode &= ~FMODE_CAN_WRITE; 525 return mode; 526 } 527 528 #ifdef CONFIG_PROC_FS 529 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 530 { 531 const struct bpf_map *map = filp->private_data; 532 const struct bpf_array *array; 533 u32 type = 0, jited = 0; 534 535 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { 536 array = container_of(map, struct bpf_array, map); 537 type = array->aux->type; 538 jited = array->aux->jited; 539 } 540 541 seq_printf(m, 542 "map_type:\t%u\n" 543 "key_size:\t%u\n" 544 "value_size:\t%u\n" 545 "max_entries:\t%u\n" 546 "map_flags:\t%#x\n" 547 "memlock:\t%llu\n" 548 "map_id:\t%u\n" 549 "frozen:\t%u\n", 550 map->map_type, 551 map->key_size, 552 map->value_size, 553 map->max_entries, 554 map->map_flags, 555 map->memory.pages * 1ULL << PAGE_SHIFT, 556 map->id, 557 READ_ONCE(map->frozen)); 558 if (type) { 559 seq_printf(m, "owner_prog_type:\t%u\n", type); 560 seq_printf(m, "owner_jited:\t%u\n", jited); 561 } 562 } 563 #endif 564 565 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 566 loff_t *ppos) 567 { 568 /* We need this handler such that alloc_file() enables 569 * f_mode with FMODE_CAN_READ. 570 */ 571 return -EINVAL; 572 } 573 574 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 575 size_t siz, loff_t *ppos) 576 { 577 /* We need this handler such that alloc_file() enables 578 * f_mode with FMODE_CAN_WRITE. 579 */ 580 return -EINVAL; 581 } 582 583 /* called for any extra memory-mapped regions (except initial) */ 584 static void bpf_map_mmap_open(struct vm_area_struct *vma) 585 { 586 struct bpf_map *map = vma->vm_file->private_data; 587 588 bpf_map_inc_with_uref(map); 589 590 if (vma->vm_flags & VM_WRITE) { 591 mutex_lock(&map->freeze_mutex); 592 map->writecnt++; 593 mutex_unlock(&map->freeze_mutex); 594 } 595 } 596 597 /* called for all unmapped memory region (including initial) */ 598 static void bpf_map_mmap_close(struct vm_area_struct *vma) 599 { 600 struct bpf_map *map = vma->vm_file->private_data; 601 602 if (vma->vm_flags & VM_WRITE) { 603 mutex_lock(&map->freeze_mutex); 604 map->writecnt--; 605 mutex_unlock(&map->freeze_mutex); 606 } 607 608 bpf_map_put_with_uref(map); 609 } 610 611 static const struct vm_operations_struct bpf_map_default_vmops = { 612 .open = bpf_map_mmap_open, 613 .close = bpf_map_mmap_close, 614 }; 615 616 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 617 { 618 struct bpf_map *map = filp->private_data; 619 int err; 620 621 if (!map->ops->map_mmap || map_value_has_spin_lock(map)) 622 return -ENOTSUPP; 623 624 if (!(vma->vm_flags & VM_SHARED)) 625 return -EINVAL; 626 627 mutex_lock(&map->freeze_mutex); 628 629 if ((vma->vm_flags & VM_WRITE) && map->frozen) { 630 err = -EPERM; 631 goto out; 632 } 633 634 /* set default open/close callbacks */ 635 vma->vm_ops = &bpf_map_default_vmops; 636 vma->vm_private_data = map; 637 638 err = map->ops->map_mmap(map, vma); 639 if (err) 640 goto out; 641 642 bpf_map_inc_with_uref(map); 643 644 if (vma->vm_flags & VM_WRITE) 645 map->writecnt++; 646 out: 647 mutex_unlock(&map->freeze_mutex); 648 return err; 649 } 650 651 const struct file_operations bpf_map_fops = { 652 #ifdef CONFIG_PROC_FS 653 .show_fdinfo = bpf_map_show_fdinfo, 654 #endif 655 .release = bpf_map_release, 656 .read = bpf_dummy_read, 657 .write = bpf_dummy_write, 658 .mmap = bpf_map_mmap, 659 }; 660 661 int bpf_map_new_fd(struct bpf_map *map, int flags) 662 { 663 int ret; 664 665 ret = security_bpf_map(map, OPEN_FMODE(flags)); 666 if (ret < 0) 667 return ret; 668 669 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 670 flags | O_CLOEXEC); 671 } 672 673 int bpf_get_file_flag(int flags) 674 { 675 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 676 return -EINVAL; 677 if (flags & BPF_F_RDONLY) 678 return O_RDONLY; 679 if (flags & BPF_F_WRONLY) 680 return O_WRONLY; 681 return O_RDWR; 682 } 683 684 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 685 #define CHECK_ATTR(CMD) \ 686 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 687 sizeof(attr->CMD##_LAST_FIELD), 0, \ 688 sizeof(*attr) - \ 689 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 690 sizeof(attr->CMD##_LAST_FIELD)) != NULL 691 692 /* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes. 693 * Return 0 on success and < 0 on error. 694 */ 695 static int bpf_obj_name_cpy(char *dst, const char *src) 696 { 697 const char *end = src + BPF_OBJ_NAME_LEN; 698 699 memset(dst, 0, BPF_OBJ_NAME_LEN); 700 /* Copy all isalnum(), '_' and '.' chars. */ 701 while (src < end && *src) { 702 if (!isalnum(*src) && 703 *src != '_' && *src != '.') 704 return -EINVAL; 705 *dst++ = *src++; 706 } 707 708 /* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */ 709 if (src == end) 710 return -EINVAL; 711 712 return 0; 713 } 714 715 int map_check_no_btf(const struct bpf_map *map, 716 const struct btf *btf, 717 const struct btf_type *key_type, 718 const struct btf_type *value_type) 719 { 720 return -ENOTSUPP; 721 } 722 723 static int map_check_btf(struct bpf_map *map, const struct btf *btf, 724 u32 btf_key_id, u32 btf_value_id) 725 { 726 const struct btf_type *key_type, *value_type; 727 u32 key_size, value_size; 728 int ret = 0; 729 730 /* Some maps allow key to be unspecified. */ 731 if (btf_key_id) { 732 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 733 if (!key_type || key_size != map->key_size) 734 return -EINVAL; 735 } else { 736 key_type = btf_type_by_id(btf, 0); 737 if (!map->ops->map_check_btf) 738 return -EINVAL; 739 } 740 741 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 742 if (!value_type || value_size != map->value_size) 743 return -EINVAL; 744 745 map->spin_lock_off = btf_find_spin_lock(btf, value_type); 746 747 if (map_value_has_spin_lock(map)) { 748 if (map->map_flags & BPF_F_RDONLY_PROG) 749 return -EACCES; 750 if (map->map_type != BPF_MAP_TYPE_HASH && 751 map->map_type != BPF_MAP_TYPE_ARRAY && 752 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 753 map->map_type != BPF_MAP_TYPE_SK_STORAGE) 754 return -ENOTSUPP; 755 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > 756 map->value_size) { 757 WARN_ONCE(1, 758 "verifier bug spin_lock_off %d value_size %d\n", 759 map->spin_lock_off, map->value_size); 760 return -EFAULT; 761 } 762 } 763 764 if (map->ops->map_check_btf) 765 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 766 767 return ret; 768 } 769 770 #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id 771 /* called via syscall */ 772 static int map_create(union bpf_attr *attr) 773 { 774 int numa_node = bpf_map_attr_numa_node(attr); 775 struct bpf_map_memory mem; 776 struct bpf_map *map; 777 int f_flags; 778 int err; 779 780 err = CHECK_ATTR(BPF_MAP_CREATE); 781 if (err) 782 return -EINVAL; 783 784 if (attr->btf_vmlinux_value_type_id) { 785 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 786 attr->btf_key_type_id || attr->btf_value_type_id) 787 return -EINVAL; 788 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 789 return -EINVAL; 790 } 791 792 f_flags = bpf_get_file_flag(attr->map_flags); 793 if (f_flags < 0) 794 return f_flags; 795 796 if (numa_node != NUMA_NO_NODE && 797 ((unsigned int)numa_node >= nr_node_ids || 798 !node_online(numa_node))) 799 return -EINVAL; 800 801 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 802 map = find_and_alloc_map(attr); 803 if (IS_ERR(map)) 804 return PTR_ERR(map); 805 806 err = bpf_obj_name_cpy(map->name, attr->map_name); 807 if (err) 808 goto free_map; 809 810 atomic64_set(&map->refcnt, 1); 811 atomic64_set(&map->usercnt, 1); 812 mutex_init(&map->freeze_mutex); 813 814 map->spin_lock_off = -EINVAL; 815 if (attr->btf_key_type_id || attr->btf_value_type_id || 816 /* Even the map's value is a kernel's struct, 817 * the bpf_prog.o must have BTF to begin with 818 * to figure out the corresponding kernel's 819 * counter part. Thus, attr->btf_fd has 820 * to be valid also. 821 */ 822 attr->btf_vmlinux_value_type_id) { 823 struct btf *btf; 824 825 btf = btf_get_by_fd(attr->btf_fd); 826 if (IS_ERR(btf)) { 827 err = PTR_ERR(btf); 828 goto free_map; 829 } 830 map->btf = btf; 831 832 if (attr->btf_value_type_id) { 833 err = map_check_btf(map, btf, attr->btf_key_type_id, 834 attr->btf_value_type_id); 835 if (err) 836 goto free_map; 837 } 838 839 map->btf_key_type_id = attr->btf_key_type_id; 840 map->btf_value_type_id = attr->btf_value_type_id; 841 map->btf_vmlinux_value_type_id = 842 attr->btf_vmlinux_value_type_id; 843 } 844 845 err = security_bpf_map_alloc(map); 846 if (err) 847 goto free_map; 848 849 err = bpf_map_alloc_id(map); 850 if (err) 851 goto free_map_sec; 852 853 err = bpf_map_new_fd(map, f_flags); 854 if (err < 0) { 855 /* failed to allocate fd. 856 * bpf_map_put_with_uref() is needed because the above 857 * bpf_map_alloc_id() has published the map 858 * to the userspace and the userspace may 859 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 860 */ 861 bpf_map_put_with_uref(map); 862 return err; 863 } 864 865 return err; 866 867 free_map_sec: 868 security_bpf_map_free(map); 869 free_map: 870 btf_put(map->btf); 871 bpf_map_charge_move(&mem, &map->memory); 872 map->ops->map_free(map); 873 bpf_map_charge_finish(&mem); 874 return err; 875 } 876 877 /* if error is returned, fd is released. 878 * On success caller should complete fd access with matching fdput() 879 */ 880 struct bpf_map *__bpf_map_get(struct fd f) 881 { 882 if (!f.file) 883 return ERR_PTR(-EBADF); 884 if (f.file->f_op != &bpf_map_fops) { 885 fdput(f); 886 return ERR_PTR(-EINVAL); 887 } 888 889 return f.file->private_data; 890 } 891 892 void bpf_map_inc(struct bpf_map *map) 893 { 894 atomic64_inc(&map->refcnt); 895 } 896 EXPORT_SYMBOL_GPL(bpf_map_inc); 897 898 void bpf_map_inc_with_uref(struct bpf_map *map) 899 { 900 atomic64_inc(&map->refcnt); 901 atomic64_inc(&map->usercnt); 902 } 903 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 904 905 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 906 { 907 struct fd f = fdget(ufd); 908 struct bpf_map *map; 909 910 map = __bpf_map_get(f); 911 if (IS_ERR(map)) 912 return map; 913 914 bpf_map_inc_with_uref(map); 915 fdput(f); 916 917 return map; 918 } 919 920 /* map_idr_lock should have been held */ 921 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 922 { 923 int refold; 924 925 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 926 if (!refold) 927 return ERR_PTR(-ENOENT); 928 if (uref) 929 atomic64_inc(&map->usercnt); 930 931 return map; 932 } 933 934 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 935 { 936 spin_lock_bh(&map_idr_lock); 937 map = __bpf_map_inc_not_zero(map, false); 938 spin_unlock_bh(&map_idr_lock); 939 940 return map; 941 } 942 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 943 944 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 945 { 946 return -ENOTSUPP; 947 } 948 949 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 950 { 951 if (key_size) 952 return memdup_user(ukey, key_size); 953 954 if (ukey) 955 return ERR_PTR(-EINVAL); 956 957 return NULL; 958 } 959 960 /* last field in 'union bpf_attr' used by this command */ 961 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 962 963 static int map_lookup_elem(union bpf_attr *attr) 964 { 965 void __user *ukey = u64_to_user_ptr(attr->key); 966 void __user *uvalue = u64_to_user_ptr(attr->value); 967 int ufd = attr->map_fd; 968 struct bpf_map *map; 969 void *key, *value; 970 u32 value_size; 971 struct fd f; 972 int err; 973 974 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 975 return -EINVAL; 976 977 if (attr->flags & ~BPF_F_LOCK) 978 return -EINVAL; 979 980 f = fdget(ufd); 981 map = __bpf_map_get(f); 982 if (IS_ERR(map)) 983 return PTR_ERR(map); 984 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 985 err = -EPERM; 986 goto err_put; 987 } 988 989 if ((attr->flags & BPF_F_LOCK) && 990 !map_value_has_spin_lock(map)) { 991 err = -EINVAL; 992 goto err_put; 993 } 994 995 key = __bpf_copy_key(ukey, map->key_size); 996 if (IS_ERR(key)) { 997 err = PTR_ERR(key); 998 goto err_put; 999 } 1000 1001 value_size = bpf_map_value_size(map); 1002 1003 err = -ENOMEM; 1004 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1005 if (!value) 1006 goto free_key; 1007 1008 err = bpf_map_copy_value(map, key, value, attr->flags); 1009 if (err) 1010 goto free_value; 1011 1012 err = -EFAULT; 1013 if (copy_to_user(uvalue, value, value_size) != 0) 1014 goto free_value; 1015 1016 err = 0; 1017 1018 free_value: 1019 kfree(value); 1020 free_key: 1021 kfree(key); 1022 err_put: 1023 fdput(f); 1024 return err; 1025 } 1026 1027 1028 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1029 1030 static int map_update_elem(union bpf_attr *attr) 1031 { 1032 void __user *ukey = u64_to_user_ptr(attr->key); 1033 void __user *uvalue = u64_to_user_ptr(attr->value); 1034 int ufd = attr->map_fd; 1035 struct bpf_map *map; 1036 void *key, *value; 1037 u32 value_size; 1038 struct fd f; 1039 int err; 1040 1041 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1042 return -EINVAL; 1043 1044 f = fdget(ufd); 1045 map = __bpf_map_get(f); 1046 if (IS_ERR(map)) 1047 return PTR_ERR(map); 1048 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1049 err = -EPERM; 1050 goto err_put; 1051 } 1052 1053 if ((attr->flags & BPF_F_LOCK) && 1054 !map_value_has_spin_lock(map)) { 1055 err = -EINVAL; 1056 goto err_put; 1057 } 1058 1059 key = __bpf_copy_key(ukey, map->key_size); 1060 if (IS_ERR(key)) { 1061 err = PTR_ERR(key); 1062 goto err_put; 1063 } 1064 1065 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 1066 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 1067 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 1068 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 1069 value_size = round_up(map->value_size, 8) * num_possible_cpus(); 1070 else 1071 value_size = map->value_size; 1072 1073 err = -ENOMEM; 1074 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1075 if (!value) 1076 goto free_key; 1077 1078 err = -EFAULT; 1079 if (copy_from_user(value, uvalue, value_size) != 0) 1080 goto free_value; 1081 1082 err = bpf_map_update_value(map, f, key, value, attr->flags); 1083 1084 free_value: 1085 kfree(value); 1086 free_key: 1087 kfree(key); 1088 err_put: 1089 fdput(f); 1090 return err; 1091 } 1092 1093 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1094 1095 static int map_delete_elem(union bpf_attr *attr) 1096 { 1097 void __user *ukey = u64_to_user_ptr(attr->key); 1098 int ufd = attr->map_fd; 1099 struct bpf_map *map; 1100 struct fd f; 1101 void *key; 1102 int err; 1103 1104 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1105 return -EINVAL; 1106 1107 f = fdget(ufd); 1108 map = __bpf_map_get(f); 1109 if (IS_ERR(map)) 1110 return PTR_ERR(map); 1111 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1112 err = -EPERM; 1113 goto err_put; 1114 } 1115 1116 key = __bpf_copy_key(ukey, map->key_size); 1117 if (IS_ERR(key)) { 1118 err = PTR_ERR(key); 1119 goto err_put; 1120 } 1121 1122 if (bpf_map_is_dev_bound(map)) { 1123 err = bpf_map_offload_delete_elem(map, key); 1124 goto out; 1125 } else if (IS_FD_PROG_ARRAY(map) || 1126 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1127 /* These maps require sleepable context */ 1128 err = map->ops->map_delete_elem(map, key); 1129 goto out; 1130 } 1131 1132 bpf_disable_instrumentation(); 1133 rcu_read_lock(); 1134 err = map->ops->map_delete_elem(map, key); 1135 rcu_read_unlock(); 1136 bpf_enable_instrumentation(); 1137 maybe_wait_bpf_programs(map); 1138 out: 1139 kfree(key); 1140 err_put: 1141 fdput(f); 1142 return err; 1143 } 1144 1145 /* last field in 'union bpf_attr' used by this command */ 1146 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1147 1148 static int map_get_next_key(union bpf_attr *attr) 1149 { 1150 void __user *ukey = u64_to_user_ptr(attr->key); 1151 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1152 int ufd = attr->map_fd; 1153 struct bpf_map *map; 1154 void *key, *next_key; 1155 struct fd f; 1156 int err; 1157 1158 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1159 return -EINVAL; 1160 1161 f = fdget(ufd); 1162 map = __bpf_map_get(f); 1163 if (IS_ERR(map)) 1164 return PTR_ERR(map); 1165 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1166 err = -EPERM; 1167 goto err_put; 1168 } 1169 1170 if (ukey) { 1171 key = __bpf_copy_key(ukey, map->key_size); 1172 if (IS_ERR(key)) { 1173 err = PTR_ERR(key); 1174 goto err_put; 1175 } 1176 } else { 1177 key = NULL; 1178 } 1179 1180 err = -ENOMEM; 1181 next_key = kmalloc(map->key_size, GFP_USER); 1182 if (!next_key) 1183 goto free_key; 1184 1185 if (bpf_map_is_dev_bound(map)) { 1186 err = bpf_map_offload_get_next_key(map, key, next_key); 1187 goto out; 1188 } 1189 1190 rcu_read_lock(); 1191 err = map->ops->map_get_next_key(map, key, next_key); 1192 rcu_read_unlock(); 1193 out: 1194 if (err) 1195 goto free_next_key; 1196 1197 err = -EFAULT; 1198 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1199 goto free_next_key; 1200 1201 err = 0; 1202 1203 free_next_key: 1204 kfree(next_key); 1205 free_key: 1206 kfree(key); 1207 err_put: 1208 fdput(f); 1209 return err; 1210 } 1211 1212 int generic_map_delete_batch(struct bpf_map *map, 1213 const union bpf_attr *attr, 1214 union bpf_attr __user *uattr) 1215 { 1216 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1217 u32 cp, max_count; 1218 int err = 0; 1219 void *key; 1220 1221 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1222 return -EINVAL; 1223 1224 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1225 !map_value_has_spin_lock(map)) { 1226 return -EINVAL; 1227 } 1228 1229 max_count = attr->batch.count; 1230 if (!max_count) 1231 return 0; 1232 1233 key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1234 if (!key) 1235 return -ENOMEM; 1236 1237 for (cp = 0; cp < max_count; cp++) { 1238 err = -EFAULT; 1239 if (copy_from_user(key, keys + cp * map->key_size, 1240 map->key_size)) 1241 break; 1242 1243 if (bpf_map_is_dev_bound(map)) { 1244 err = bpf_map_offload_delete_elem(map, key); 1245 break; 1246 } 1247 1248 bpf_disable_instrumentation(); 1249 rcu_read_lock(); 1250 err = map->ops->map_delete_elem(map, key); 1251 rcu_read_unlock(); 1252 bpf_enable_instrumentation(); 1253 maybe_wait_bpf_programs(map); 1254 if (err) 1255 break; 1256 } 1257 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1258 err = -EFAULT; 1259 1260 kfree(key); 1261 return err; 1262 } 1263 1264 int generic_map_update_batch(struct bpf_map *map, 1265 const union bpf_attr *attr, 1266 union bpf_attr __user *uattr) 1267 { 1268 void __user *values = u64_to_user_ptr(attr->batch.values); 1269 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1270 u32 value_size, cp, max_count; 1271 int ufd = attr->map_fd; 1272 void *key, *value; 1273 struct fd f; 1274 int err = 0; 1275 1276 f = fdget(ufd); 1277 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1278 return -EINVAL; 1279 1280 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1281 !map_value_has_spin_lock(map)) { 1282 return -EINVAL; 1283 } 1284 1285 value_size = bpf_map_value_size(map); 1286 1287 max_count = attr->batch.count; 1288 if (!max_count) 1289 return 0; 1290 1291 key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1292 if (!key) 1293 return -ENOMEM; 1294 1295 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1296 if (!value) { 1297 kfree(key); 1298 return -ENOMEM; 1299 } 1300 1301 for (cp = 0; cp < max_count; cp++) { 1302 err = -EFAULT; 1303 if (copy_from_user(key, keys + cp * map->key_size, 1304 map->key_size) || 1305 copy_from_user(value, values + cp * value_size, value_size)) 1306 break; 1307 1308 err = bpf_map_update_value(map, f, key, value, 1309 attr->batch.elem_flags); 1310 1311 if (err) 1312 break; 1313 } 1314 1315 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1316 err = -EFAULT; 1317 1318 kfree(value); 1319 kfree(key); 1320 return err; 1321 } 1322 1323 #define MAP_LOOKUP_RETRIES 3 1324 1325 int generic_map_lookup_batch(struct bpf_map *map, 1326 const union bpf_attr *attr, 1327 union bpf_attr __user *uattr) 1328 { 1329 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 1330 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1331 void __user *values = u64_to_user_ptr(attr->batch.values); 1332 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1333 void *buf, *buf_prevkey, *prev_key, *key, *value; 1334 int err, retry = MAP_LOOKUP_RETRIES; 1335 u32 value_size, cp, max_count; 1336 1337 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1338 return -EINVAL; 1339 1340 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1341 !map_value_has_spin_lock(map)) 1342 return -EINVAL; 1343 1344 value_size = bpf_map_value_size(map); 1345 1346 max_count = attr->batch.count; 1347 if (!max_count) 1348 return 0; 1349 1350 if (put_user(0, &uattr->batch.count)) 1351 return -EFAULT; 1352 1353 buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1354 if (!buf_prevkey) 1355 return -ENOMEM; 1356 1357 buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 1358 if (!buf) { 1359 kvfree(buf_prevkey); 1360 return -ENOMEM; 1361 } 1362 1363 err = -EFAULT; 1364 prev_key = NULL; 1365 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 1366 goto free_buf; 1367 key = buf; 1368 value = key + map->key_size; 1369 if (ubatch) 1370 prev_key = buf_prevkey; 1371 1372 for (cp = 0; cp < max_count;) { 1373 rcu_read_lock(); 1374 err = map->ops->map_get_next_key(map, prev_key, key); 1375 rcu_read_unlock(); 1376 if (err) 1377 break; 1378 err = bpf_map_copy_value(map, key, value, 1379 attr->batch.elem_flags); 1380 1381 if (err == -ENOENT) { 1382 if (retry) { 1383 retry--; 1384 continue; 1385 } 1386 err = -EINTR; 1387 break; 1388 } 1389 1390 if (err) 1391 goto free_buf; 1392 1393 if (copy_to_user(keys + cp * map->key_size, key, 1394 map->key_size)) { 1395 err = -EFAULT; 1396 goto free_buf; 1397 } 1398 if (copy_to_user(values + cp * value_size, value, value_size)) { 1399 err = -EFAULT; 1400 goto free_buf; 1401 } 1402 1403 if (!prev_key) 1404 prev_key = buf_prevkey; 1405 1406 swap(prev_key, key); 1407 retry = MAP_LOOKUP_RETRIES; 1408 cp++; 1409 } 1410 1411 if (err == -EFAULT) 1412 goto free_buf; 1413 1414 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 1415 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 1416 err = -EFAULT; 1417 1418 free_buf: 1419 kfree(buf_prevkey); 1420 kfree(buf); 1421 return err; 1422 } 1423 1424 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value 1425 1426 static int map_lookup_and_delete_elem(union bpf_attr *attr) 1427 { 1428 void __user *ukey = u64_to_user_ptr(attr->key); 1429 void __user *uvalue = u64_to_user_ptr(attr->value); 1430 int ufd = attr->map_fd; 1431 struct bpf_map *map; 1432 void *key, *value; 1433 u32 value_size; 1434 struct fd f; 1435 int err; 1436 1437 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 1438 return -EINVAL; 1439 1440 f = fdget(ufd); 1441 map = __bpf_map_get(f); 1442 if (IS_ERR(map)) 1443 return PTR_ERR(map); 1444 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1445 err = -EPERM; 1446 goto err_put; 1447 } 1448 1449 key = __bpf_copy_key(ukey, map->key_size); 1450 if (IS_ERR(key)) { 1451 err = PTR_ERR(key); 1452 goto err_put; 1453 } 1454 1455 value_size = map->value_size; 1456 1457 err = -ENOMEM; 1458 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1459 if (!value) 1460 goto free_key; 1461 1462 if (map->map_type == BPF_MAP_TYPE_QUEUE || 1463 map->map_type == BPF_MAP_TYPE_STACK) { 1464 err = map->ops->map_pop_elem(map, value); 1465 } else { 1466 err = -ENOTSUPP; 1467 } 1468 1469 if (err) 1470 goto free_value; 1471 1472 if (copy_to_user(uvalue, value, value_size) != 0) 1473 goto free_value; 1474 1475 err = 0; 1476 1477 free_value: 1478 kfree(value); 1479 free_key: 1480 kfree(key); 1481 err_put: 1482 fdput(f); 1483 return err; 1484 } 1485 1486 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 1487 1488 static int map_freeze(const union bpf_attr *attr) 1489 { 1490 int err = 0, ufd = attr->map_fd; 1491 struct bpf_map *map; 1492 struct fd f; 1493 1494 if (CHECK_ATTR(BPF_MAP_FREEZE)) 1495 return -EINVAL; 1496 1497 f = fdget(ufd); 1498 map = __bpf_map_get(f); 1499 if (IS_ERR(map)) 1500 return PTR_ERR(map); 1501 1502 mutex_lock(&map->freeze_mutex); 1503 1504 if (map->writecnt) { 1505 err = -EBUSY; 1506 goto err_put; 1507 } 1508 if (READ_ONCE(map->frozen)) { 1509 err = -EBUSY; 1510 goto err_put; 1511 } 1512 if (!capable(CAP_SYS_ADMIN)) { 1513 err = -EPERM; 1514 goto err_put; 1515 } 1516 1517 WRITE_ONCE(map->frozen, true); 1518 err_put: 1519 mutex_unlock(&map->freeze_mutex); 1520 fdput(f); 1521 return err; 1522 } 1523 1524 static const struct bpf_prog_ops * const bpf_prog_types[] = { 1525 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1526 [_id] = & _name ## _prog_ops, 1527 #define BPF_MAP_TYPE(_id, _ops) 1528 #include <linux/bpf_types.h> 1529 #undef BPF_PROG_TYPE 1530 #undef BPF_MAP_TYPE 1531 }; 1532 1533 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 1534 { 1535 const struct bpf_prog_ops *ops; 1536 1537 if (type >= ARRAY_SIZE(bpf_prog_types)) 1538 return -EINVAL; 1539 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 1540 ops = bpf_prog_types[type]; 1541 if (!ops) 1542 return -EINVAL; 1543 1544 if (!bpf_prog_is_dev_bound(prog->aux)) 1545 prog->aux->ops = ops; 1546 else 1547 prog->aux->ops = &bpf_offload_prog_ops; 1548 prog->type = type; 1549 return 0; 1550 } 1551 1552 enum bpf_audit { 1553 BPF_AUDIT_LOAD, 1554 BPF_AUDIT_UNLOAD, 1555 BPF_AUDIT_MAX, 1556 }; 1557 1558 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 1559 [BPF_AUDIT_LOAD] = "LOAD", 1560 [BPF_AUDIT_UNLOAD] = "UNLOAD", 1561 }; 1562 1563 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 1564 { 1565 struct audit_context *ctx = NULL; 1566 struct audit_buffer *ab; 1567 1568 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 1569 return; 1570 if (audit_enabled == AUDIT_OFF) 1571 return; 1572 if (op == BPF_AUDIT_LOAD) 1573 ctx = audit_context(); 1574 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 1575 if (unlikely(!ab)) 1576 return; 1577 audit_log_format(ab, "prog-id=%u op=%s", 1578 prog->aux->id, bpf_audit_str[op]); 1579 audit_log_end(ab); 1580 } 1581 1582 int __bpf_prog_charge(struct user_struct *user, u32 pages) 1583 { 1584 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1585 unsigned long user_bufs; 1586 1587 if (user) { 1588 user_bufs = atomic_long_add_return(pages, &user->locked_vm); 1589 if (user_bufs > memlock_limit) { 1590 atomic_long_sub(pages, &user->locked_vm); 1591 return -EPERM; 1592 } 1593 } 1594 1595 return 0; 1596 } 1597 1598 void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 1599 { 1600 if (user) 1601 atomic_long_sub(pages, &user->locked_vm); 1602 } 1603 1604 static int bpf_prog_charge_memlock(struct bpf_prog *prog) 1605 { 1606 struct user_struct *user = get_current_user(); 1607 int ret; 1608 1609 ret = __bpf_prog_charge(user, prog->pages); 1610 if (ret) { 1611 free_uid(user); 1612 return ret; 1613 } 1614 1615 prog->aux->user = user; 1616 return 0; 1617 } 1618 1619 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog) 1620 { 1621 struct user_struct *user = prog->aux->user; 1622 1623 __bpf_prog_uncharge(user, prog->pages); 1624 free_uid(user); 1625 } 1626 1627 static int bpf_prog_alloc_id(struct bpf_prog *prog) 1628 { 1629 int id; 1630 1631 idr_preload(GFP_KERNEL); 1632 spin_lock_bh(&prog_idr_lock); 1633 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 1634 if (id > 0) 1635 prog->aux->id = id; 1636 spin_unlock_bh(&prog_idr_lock); 1637 idr_preload_end(); 1638 1639 /* id is in [1, INT_MAX) */ 1640 if (WARN_ON_ONCE(!id)) 1641 return -ENOSPC; 1642 1643 return id > 0 ? 0 : id; 1644 } 1645 1646 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) 1647 { 1648 /* cBPF to eBPF migrations are currently not in the idr store. 1649 * Offloaded programs are removed from the store when their device 1650 * disappears - even if someone grabs an fd to them they are unusable, 1651 * simply waiting for refcnt to drop to be freed. 1652 */ 1653 if (!prog->aux->id) 1654 return; 1655 1656 if (do_idr_lock) 1657 spin_lock_bh(&prog_idr_lock); 1658 else 1659 __acquire(&prog_idr_lock); 1660 1661 idr_remove(&prog_idr, prog->aux->id); 1662 prog->aux->id = 0; 1663 1664 if (do_idr_lock) 1665 spin_unlock_bh(&prog_idr_lock); 1666 else 1667 __release(&prog_idr_lock); 1668 } 1669 1670 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 1671 { 1672 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 1673 1674 kvfree(aux->func_info); 1675 kfree(aux->func_info_aux); 1676 bpf_prog_uncharge_memlock(aux->prog); 1677 security_bpf_prog_free(aux); 1678 bpf_prog_free(aux->prog); 1679 } 1680 1681 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 1682 { 1683 bpf_prog_kallsyms_del_all(prog); 1684 btf_put(prog->aux->btf); 1685 bpf_prog_free_linfo(prog); 1686 1687 if (deferred) 1688 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 1689 else 1690 __bpf_prog_put_rcu(&prog->aux->rcu); 1691 } 1692 1693 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 1694 { 1695 if (atomic64_dec_and_test(&prog->aux->refcnt)) { 1696 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 1697 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 1698 /* bpf_prog_free_id() must be called first */ 1699 bpf_prog_free_id(prog, do_idr_lock); 1700 __bpf_prog_put_noref(prog, true); 1701 } 1702 } 1703 1704 void bpf_prog_put(struct bpf_prog *prog) 1705 { 1706 __bpf_prog_put(prog, true); 1707 } 1708 EXPORT_SYMBOL_GPL(bpf_prog_put); 1709 1710 static int bpf_prog_release(struct inode *inode, struct file *filp) 1711 { 1712 struct bpf_prog *prog = filp->private_data; 1713 1714 bpf_prog_put(prog); 1715 return 0; 1716 } 1717 1718 static void bpf_prog_get_stats(const struct bpf_prog *prog, 1719 struct bpf_prog_stats *stats) 1720 { 1721 u64 nsecs = 0, cnt = 0; 1722 int cpu; 1723 1724 for_each_possible_cpu(cpu) { 1725 const struct bpf_prog_stats *st; 1726 unsigned int start; 1727 u64 tnsecs, tcnt; 1728 1729 st = per_cpu_ptr(prog->aux->stats, cpu); 1730 do { 1731 start = u64_stats_fetch_begin_irq(&st->syncp); 1732 tnsecs = st->nsecs; 1733 tcnt = st->cnt; 1734 } while (u64_stats_fetch_retry_irq(&st->syncp, start)); 1735 nsecs += tnsecs; 1736 cnt += tcnt; 1737 } 1738 stats->nsecs = nsecs; 1739 stats->cnt = cnt; 1740 } 1741 1742 #ifdef CONFIG_PROC_FS 1743 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 1744 { 1745 const struct bpf_prog *prog = filp->private_data; 1746 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 1747 struct bpf_prog_stats stats; 1748 1749 bpf_prog_get_stats(prog, &stats); 1750 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 1751 seq_printf(m, 1752 "prog_type:\t%u\n" 1753 "prog_jited:\t%u\n" 1754 "prog_tag:\t%s\n" 1755 "memlock:\t%llu\n" 1756 "prog_id:\t%u\n" 1757 "run_time_ns:\t%llu\n" 1758 "run_cnt:\t%llu\n", 1759 prog->type, 1760 prog->jited, 1761 prog_tag, 1762 prog->pages * 1ULL << PAGE_SHIFT, 1763 prog->aux->id, 1764 stats.nsecs, 1765 stats.cnt); 1766 } 1767 #endif 1768 1769 const struct file_operations bpf_prog_fops = { 1770 #ifdef CONFIG_PROC_FS 1771 .show_fdinfo = bpf_prog_show_fdinfo, 1772 #endif 1773 .release = bpf_prog_release, 1774 .read = bpf_dummy_read, 1775 .write = bpf_dummy_write, 1776 }; 1777 1778 int bpf_prog_new_fd(struct bpf_prog *prog) 1779 { 1780 int ret; 1781 1782 ret = security_bpf_prog(prog); 1783 if (ret < 0) 1784 return ret; 1785 1786 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 1787 O_RDWR | O_CLOEXEC); 1788 } 1789 1790 static struct bpf_prog *____bpf_prog_get(struct fd f) 1791 { 1792 if (!f.file) 1793 return ERR_PTR(-EBADF); 1794 if (f.file->f_op != &bpf_prog_fops) { 1795 fdput(f); 1796 return ERR_PTR(-EINVAL); 1797 } 1798 1799 return f.file->private_data; 1800 } 1801 1802 void bpf_prog_add(struct bpf_prog *prog, int i) 1803 { 1804 atomic64_add(i, &prog->aux->refcnt); 1805 } 1806 EXPORT_SYMBOL_GPL(bpf_prog_add); 1807 1808 void bpf_prog_sub(struct bpf_prog *prog, int i) 1809 { 1810 /* Only to be used for undoing previous bpf_prog_add() in some 1811 * error path. We still know that another entity in our call 1812 * path holds a reference to the program, thus atomic_sub() can 1813 * be safely used in such cases! 1814 */ 1815 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 1816 } 1817 EXPORT_SYMBOL_GPL(bpf_prog_sub); 1818 1819 void bpf_prog_inc(struct bpf_prog *prog) 1820 { 1821 atomic64_inc(&prog->aux->refcnt); 1822 } 1823 EXPORT_SYMBOL_GPL(bpf_prog_inc); 1824 1825 /* prog_idr_lock should have been held */ 1826 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 1827 { 1828 int refold; 1829 1830 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 1831 1832 if (!refold) 1833 return ERR_PTR(-ENOENT); 1834 1835 return prog; 1836 } 1837 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1838 1839 bool bpf_prog_get_ok(struct bpf_prog *prog, 1840 enum bpf_prog_type *attach_type, bool attach_drv) 1841 { 1842 /* not an attachment, just a refcount inc, always allow */ 1843 if (!attach_type) 1844 return true; 1845 1846 if (prog->type != *attach_type) 1847 return false; 1848 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) 1849 return false; 1850 1851 return true; 1852 } 1853 1854 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 1855 bool attach_drv) 1856 { 1857 struct fd f = fdget(ufd); 1858 struct bpf_prog *prog; 1859 1860 prog = ____bpf_prog_get(f); 1861 if (IS_ERR(prog)) 1862 return prog; 1863 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 1864 prog = ERR_PTR(-EINVAL); 1865 goto out; 1866 } 1867 1868 bpf_prog_inc(prog); 1869 out: 1870 fdput(f); 1871 return prog; 1872 } 1873 1874 struct bpf_prog *bpf_prog_get(u32 ufd) 1875 { 1876 return __bpf_prog_get(ufd, NULL, false); 1877 } 1878 1879 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1880 bool attach_drv) 1881 { 1882 return __bpf_prog_get(ufd, &type, attach_drv); 1883 } 1884 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 1885 1886 /* Initially all BPF programs could be loaded w/o specifying 1887 * expected_attach_type. Later for some of them specifying expected_attach_type 1888 * at load time became required so that program could be validated properly. 1889 * Programs of types that are allowed to be loaded both w/ and w/o (for 1890 * backward compatibility) expected_attach_type, should have the default attach 1891 * type assigned to expected_attach_type for the latter case, so that it can be 1892 * validated later at attach time. 1893 * 1894 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 1895 * prog type requires it but has some attach types that have to be backward 1896 * compatible. 1897 */ 1898 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 1899 { 1900 switch (attr->prog_type) { 1901 case BPF_PROG_TYPE_CGROUP_SOCK: 1902 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 1903 * exist so checking for non-zero is the way to go here. 1904 */ 1905 if (!attr->expected_attach_type) 1906 attr->expected_attach_type = 1907 BPF_CGROUP_INET_SOCK_CREATE; 1908 break; 1909 } 1910 } 1911 1912 static int 1913 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 1914 enum bpf_attach_type expected_attach_type, 1915 u32 btf_id, u32 prog_fd) 1916 { 1917 if (btf_id) { 1918 if (btf_id > BTF_MAX_TYPE) 1919 return -EINVAL; 1920 1921 switch (prog_type) { 1922 case BPF_PROG_TYPE_TRACING: 1923 case BPF_PROG_TYPE_STRUCT_OPS: 1924 case BPF_PROG_TYPE_EXT: 1925 break; 1926 default: 1927 return -EINVAL; 1928 } 1929 } 1930 1931 if (prog_fd && prog_type != BPF_PROG_TYPE_TRACING && 1932 prog_type != BPF_PROG_TYPE_EXT) 1933 return -EINVAL; 1934 1935 switch (prog_type) { 1936 case BPF_PROG_TYPE_CGROUP_SOCK: 1937 switch (expected_attach_type) { 1938 case BPF_CGROUP_INET_SOCK_CREATE: 1939 case BPF_CGROUP_INET4_POST_BIND: 1940 case BPF_CGROUP_INET6_POST_BIND: 1941 return 0; 1942 default: 1943 return -EINVAL; 1944 } 1945 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1946 switch (expected_attach_type) { 1947 case BPF_CGROUP_INET4_BIND: 1948 case BPF_CGROUP_INET6_BIND: 1949 case BPF_CGROUP_INET4_CONNECT: 1950 case BPF_CGROUP_INET6_CONNECT: 1951 case BPF_CGROUP_UDP4_SENDMSG: 1952 case BPF_CGROUP_UDP6_SENDMSG: 1953 case BPF_CGROUP_UDP4_RECVMSG: 1954 case BPF_CGROUP_UDP6_RECVMSG: 1955 return 0; 1956 default: 1957 return -EINVAL; 1958 } 1959 case BPF_PROG_TYPE_CGROUP_SKB: 1960 switch (expected_attach_type) { 1961 case BPF_CGROUP_INET_INGRESS: 1962 case BPF_CGROUP_INET_EGRESS: 1963 return 0; 1964 default: 1965 return -EINVAL; 1966 } 1967 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 1968 switch (expected_attach_type) { 1969 case BPF_CGROUP_SETSOCKOPT: 1970 case BPF_CGROUP_GETSOCKOPT: 1971 return 0; 1972 default: 1973 return -EINVAL; 1974 } 1975 case BPF_PROG_TYPE_EXT: 1976 if (expected_attach_type) 1977 return -EINVAL; 1978 /* fallthrough */ 1979 default: 1980 return 0; 1981 } 1982 } 1983 1984 /* last field in 'union bpf_attr' used by this command */ 1985 #define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd 1986 1987 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) 1988 { 1989 enum bpf_prog_type type = attr->prog_type; 1990 struct bpf_prog *prog; 1991 int err; 1992 char license[128]; 1993 bool is_gpl; 1994 1995 if (CHECK_ATTR(BPF_PROG_LOAD)) 1996 return -EINVAL; 1997 1998 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 1999 BPF_F_ANY_ALIGNMENT | 2000 BPF_F_TEST_STATE_FREQ | 2001 BPF_F_TEST_RND_HI32)) 2002 return -EINVAL; 2003 2004 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2005 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2006 !capable(CAP_SYS_ADMIN)) 2007 return -EPERM; 2008 2009 /* copy eBPF program license from user space */ 2010 if (strncpy_from_user(license, u64_to_user_ptr(attr->license), 2011 sizeof(license) - 1) < 0) 2012 return -EFAULT; 2013 license[sizeof(license) - 1] = 0; 2014 2015 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2016 is_gpl = license_is_gpl_compatible(license); 2017 2018 if (attr->insn_cnt == 0 || 2019 attr->insn_cnt > (capable(CAP_SYS_ADMIN) ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) 2020 return -E2BIG; 2021 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2022 type != BPF_PROG_TYPE_CGROUP_SKB && 2023 !capable(CAP_SYS_ADMIN)) 2024 return -EPERM; 2025 2026 bpf_prog_load_fixup_attach_type(attr); 2027 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2028 attr->attach_btf_id, 2029 attr->attach_prog_fd)) 2030 return -EINVAL; 2031 2032 /* plain bpf_prog allocation */ 2033 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2034 if (!prog) 2035 return -ENOMEM; 2036 2037 prog->expected_attach_type = attr->expected_attach_type; 2038 prog->aux->attach_btf_id = attr->attach_btf_id; 2039 if (attr->attach_prog_fd) { 2040 struct bpf_prog *tgt_prog; 2041 2042 tgt_prog = bpf_prog_get(attr->attach_prog_fd); 2043 if (IS_ERR(tgt_prog)) { 2044 err = PTR_ERR(tgt_prog); 2045 goto free_prog_nouncharge; 2046 } 2047 prog->aux->linked_prog = tgt_prog; 2048 } 2049 2050 prog->aux->offload_requested = !!attr->prog_ifindex; 2051 2052 err = security_bpf_prog_alloc(prog->aux); 2053 if (err) 2054 goto free_prog_nouncharge; 2055 2056 err = bpf_prog_charge_memlock(prog); 2057 if (err) 2058 goto free_prog_sec; 2059 2060 prog->len = attr->insn_cnt; 2061 2062 err = -EFAULT; 2063 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), 2064 bpf_prog_insn_size(prog)) != 0) 2065 goto free_prog; 2066 2067 prog->orig_prog = NULL; 2068 prog->jited = 0; 2069 2070 atomic64_set(&prog->aux->refcnt, 1); 2071 prog->gpl_compatible = is_gpl ? 1 : 0; 2072 2073 if (bpf_prog_is_dev_bound(prog->aux)) { 2074 err = bpf_prog_offload_init(prog, attr); 2075 if (err) 2076 goto free_prog; 2077 } 2078 2079 /* find program type: socket_filter vs tracing_filter */ 2080 err = find_prog_type(type, prog); 2081 if (err < 0) 2082 goto free_prog; 2083 2084 prog->aux->load_time = ktime_get_boottime_ns(); 2085 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name); 2086 if (err) 2087 goto free_prog; 2088 2089 /* run eBPF verifier */ 2090 err = bpf_check(&prog, attr, uattr); 2091 if (err < 0) 2092 goto free_used_maps; 2093 2094 prog = bpf_prog_select_runtime(prog, &err); 2095 if (err < 0) 2096 goto free_used_maps; 2097 2098 err = bpf_prog_alloc_id(prog); 2099 if (err) 2100 goto free_used_maps; 2101 2102 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2103 * effectively publicly exposed. However, retrieving via 2104 * bpf_prog_get_fd_by_id() will take another reference, 2105 * therefore it cannot be gone underneath us. 2106 * 2107 * Only for the time /after/ successful bpf_prog_new_fd() 2108 * and before returning to userspace, we might just hold 2109 * one reference and any parallel close on that fd could 2110 * rip everything out. Hence, below notifications must 2111 * happen before bpf_prog_new_fd(). 2112 * 2113 * Also, any failure handling from this point onwards must 2114 * be using bpf_prog_put() given the program is exposed. 2115 */ 2116 bpf_prog_kallsyms_add(prog); 2117 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 2118 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 2119 2120 err = bpf_prog_new_fd(prog); 2121 if (err < 0) 2122 bpf_prog_put(prog); 2123 return err; 2124 2125 free_used_maps: 2126 /* In case we have subprogs, we need to wait for a grace 2127 * period before we can tear down JIT memory since symbols 2128 * are already exposed under kallsyms. 2129 */ 2130 __bpf_prog_put_noref(prog, prog->aux->func_cnt); 2131 return err; 2132 free_prog: 2133 bpf_prog_uncharge_memlock(prog); 2134 free_prog_sec: 2135 security_bpf_prog_free(prog->aux); 2136 free_prog_nouncharge: 2137 bpf_prog_free(prog); 2138 return err; 2139 } 2140 2141 #define BPF_OBJ_LAST_FIELD file_flags 2142 2143 static int bpf_obj_pin(const union bpf_attr *attr) 2144 { 2145 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) 2146 return -EINVAL; 2147 2148 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); 2149 } 2150 2151 static int bpf_obj_get(const union bpf_attr *attr) 2152 { 2153 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 2154 attr->file_flags & ~BPF_OBJ_FLAG_MASK) 2155 return -EINVAL; 2156 2157 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), 2158 attr->file_flags); 2159 } 2160 2161 static int bpf_tracing_prog_release(struct inode *inode, struct file *filp) 2162 { 2163 struct bpf_prog *prog = filp->private_data; 2164 2165 WARN_ON_ONCE(bpf_trampoline_unlink_prog(prog)); 2166 bpf_prog_put(prog); 2167 return 0; 2168 } 2169 2170 static const struct file_operations bpf_tracing_prog_fops = { 2171 .release = bpf_tracing_prog_release, 2172 .read = bpf_dummy_read, 2173 .write = bpf_dummy_write, 2174 }; 2175 2176 static int bpf_tracing_prog_attach(struct bpf_prog *prog) 2177 { 2178 int tr_fd, err; 2179 2180 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 2181 prog->expected_attach_type != BPF_TRACE_FEXIT && 2182 prog->type != BPF_PROG_TYPE_EXT) { 2183 err = -EINVAL; 2184 goto out_put_prog; 2185 } 2186 2187 err = bpf_trampoline_link_prog(prog); 2188 if (err) 2189 goto out_put_prog; 2190 2191 tr_fd = anon_inode_getfd("bpf-tracing-prog", &bpf_tracing_prog_fops, 2192 prog, O_CLOEXEC); 2193 if (tr_fd < 0) { 2194 WARN_ON_ONCE(bpf_trampoline_unlink_prog(prog)); 2195 err = tr_fd; 2196 goto out_put_prog; 2197 } 2198 return tr_fd; 2199 2200 out_put_prog: 2201 bpf_prog_put(prog); 2202 return err; 2203 } 2204 2205 struct bpf_raw_tracepoint { 2206 struct bpf_raw_event_map *btp; 2207 struct bpf_prog *prog; 2208 }; 2209 2210 static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp) 2211 { 2212 struct bpf_raw_tracepoint *raw_tp = filp->private_data; 2213 2214 if (raw_tp->prog) { 2215 bpf_probe_unregister(raw_tp->btp, raw_tp->prog); 2216 bpf_prog_put(raw_tp->prog); 2217 } 2218 bpf_put_raw_tracepoint(raw_tp->btp); 2219 kfree(raw_tp); 2220 return 0; 2221 } 2222 2223 static const struct file_operations bpf_raw_tp_fops = { 2224 .release = bpf_raw_tracepoint_release, 2225 .read = bpf_dummy_read, 2226 .write = bpf_dummy_write, 2227 }; 2228 2229 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 2230 2231 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 2232 { 2233 struct bpf_raw_tracepoint *raw_tp; 2234 struct bpf_raw_event_map *btp; 2235 struct bpf_prog *prog; 2236 const char *tp_name; 2237 char buf[128]; 2238 int tp_fd, err; 2239 2240 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 2241 return -EINVAL; 2242 2243 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 2244 if (IS_ERR(prog)) 2245 return PTR_ERR(prog); 2246 2247 if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT && 2248 prog->type != BPF_PROG_TYPE_TRACING && 2249 prog->type != BPF_PROG_TYPE_EXT && 2250 prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) { 2251 err = -EINVAL; 2252 goto out_put_prog; 2253 } 2254 2255 if (prog->type == BPF_PROG_TYPE_TRACING || 2256 prog->type == BPF_PROG_TYPE_EXT) { 2257 if (attr->raw_tracepoint.name) { 2258 /* The attach point for this category of programs 2259 * should be specified via btf_id during program load. 2260 */ 2261 err = -EINVAL; 2262 goto out_put_prog; 2263 } 2264 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) 2265 tp_name = prog->aux->attach_func_name; 2266 else 2267 return bpf_tracing_prog_attach(prog); 2268 } else { 2269 if (strncpy_from_user(buf, 2270 u64_to_user_ptr(attr->raw_tracepoint.name), 2271 sizeof(buf) - 1) < 0) { 2272 err = -EFAULT; 2273 goto out_put_prog; 2274 } 2275 buf[sizeof(buf) - 1] = 0; 2276 tp_name = buf; 2277 } 2278 2279 btp = bpf_get_raw_tracepoint(tp_name); 2280 if (!btp) { 2281 err = -ENOENT; 2282 goto out_put_prog; 2283 } 2284 2285 raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER); 2286 if (!raw_tp) { 2287 err = -ENOMEM; 2288 goto out_put_btp; 2289 } 2290 raw_tp->btp = btp; 2291 raw_tp->prog = prog; 2292 2293 err = bpf_probe_register(raw_tp->btp, prog); 2294 if (err) 2295 goto out_free_tp; 2296 2297 tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp, 2298 O_CLOEXEC); 2299 if (tp_fd < 0) { 2300 bpf_probe_unregister(raw_tp->btp, prog); 2301 err = tp_fd; 2302 goto out_free_tp; 2303 } 2304 return tp_fd; 2305 2306 out_free_tp: 2307 kfree(raw_tp); 2308 out_put_btp: 2309 bpf_put_raw_tracepoint(btp); 2310 out_put_prog: 2311 bpf_prog_put(prog); 2312 return err; 2313 } 2314 2315 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 2316 enum bpf_attach_type attach_type) 2317 { 2318 switch (prog->type) { 2319 case BPF_PROG_TYPE_CGROUP_SOCK: 2320 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2321 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2322 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 2323 case BPF_PROG_TYPE_CGROUP_SKB: 2324 return prog->enforce_expected_attach_type && 2325 prog->expected_attach_type != attach_type ? 2326 -EINVAL : 0; 2327 default: 2328 return 0; 2329 } 2330 } 2331 2332 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd 2333 2334 #define BPF_F_ATTACH_MASK \ 2335 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE) 2336 2337 static int bpf_prog_attach(const union bpf_attr *attr) 2338 { 2339 enum bpf_prog_type ptype; 2340 struct bpf_prog *prog; 2341 int ret; 2342 2343 if (!capable(CAP_NET_ADMIN)) 2344 return -EPERM; 2345 2346 if (CHECK_ATTR(BPF_PROG_ATTACH)) 2347 return -EINVAL; 2348 2349 if (attr->attach_flags & ~BPF_F_ATTACH_MASK) 2350 return -EINVAL; 2351 2352 switch (attr->attach_type) { 2353 case BPF_CGROUP_INET_INGRESS: 2354 case BPF_CGROUP_INET_EGRESS: 2355 ptype = BPF_PROG_TYPE_CGROUP_SKB; 2356 break; 2357 case BPF_CGROUP_INET_SOCK_CREATE: 2358 case BPF_CGROUP_INET4_POST_BIND: 2359 case BPF_CGROUP_INET6_POST_BIND: 2360 ptype = BPF_PROG_TYPE_CGROUP_SOCK; 2361 break; 2362 case BPF_CGROUP_INET4_BIND: 2363 case BPF_CGROUP_INET6_BIND: 2364 case BPF_CGROUP_INET4_CONNECT: 2365 case BPF_CGROUP_INET6_CONNECT: 2366 case BPF_CGROUP_UDP4_SENDMSG: 2367 case BPF_CGROUP_UDP6_SENDMSG: 2368 case BPF_CGROUP_UDP4_RECVMSG: 2369 case BPF_CGROUP_UDP6_RECVMSG: 2370 ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 2371 break; 2372 case BPF_CGROUP_SOCK_OPS: 2373 ptype = BPF_PROG_TYPE_SOCK_OPS; 2374 break; 2375 case BPF_CGROUP_DEVICE: 2376 ptype = BPF_PROG_TYPE_CGROUP_DEVICE; 2377 break; 2378 case BPF_SK_MSG_VERDICT: 2379 ptype = BPF_PROG_TYPE_SK_MSG; 2380 break; 2381 case BPF_SK_SKB_STREAM_PARSER: 2382 case BPF_SK_SKB_STREAM_VERDICT: 2383 ptype = BPF_PROG_TYPE_SK_SKB; 2384 break; 2385 case BPF_LIRC_MODE2: 2386 ptype = BPF_PROG_TYPE_LIRC_MODE2; 2387 break; 2388 case BPF_FLOW_DISSECTOR: 2389 ptype = BPF_PROG_TYPE_FLOW_DISSECTOR; 2390 break; 2391 case BPF_CGROUP_SYSCTL: 2392 ptype = BPF_PROG_TYPE_CGROUP_SYSCTL; 2393 break; 2394 case BPF_CGROUP_GETSOCKOPT: 2395 case BPF_CGROUP_SETSOCKOPT: 2396 ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT; 2397 break; 2398 default: 2399 return -EINVAL; 2400 } 2401 2402 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 2403 if (IS_ERR(prog)) 2404 return PTR_ERR(prog); 2405 2406 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 2407 bpf_prog_put(prog); 2408 return -EINVAL; 2409 } 2410 2411 switch (ptype) { 2412 case BPF_PROG_TYPE_SK_SKB: 2413 case BPF_PROG_TYPE_SK_MSG: 2414 ret = sock_map_get_from_fd(attr, prog); 2415 break; 2416 case BPF_PROG_TYPE_LIRC_MODE2: 2417 ret = lirc_prog_attach(attr, prog); 2418 break; 2419 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2420 ret = skb_flow_dissector_bpf_prog_attach(attr, prog); 2421 break; 2422 default: 2423 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 2424 } 2425 2426 if (ret) 2427 bpf_prog_put(prog); 2428 return ret; 2429 } 2430 2431 #define BPF_PROG_DETACH_LAST_FIELD attach_type 2432 2433 static int bpf_prog_detach(const union bpf_attr *attr) 2434 { 2435 enum bpf_prog_type ptype; 2436 2437 if (!capable(CAP_NET_ADMIN)) 2438 return -EPERM; 2439 2440 if (CHECK_ATTR(BPF_PROG_DETACH)) 2441 return -EINVAL; 2442 2443 switch (attr->attach_type) { 2444 case BPF_CGROUP_INET_INGRESS: 2445 case BPF_CGROUP_INET_EGRESS: 2446 ptype = BPF_PROG_TYPE_CGROUP_SKB; 2447 break; 2448 case BPF_CGROUP_INET_SOCK_CREATE: 2449 case BPF_CGROUP_INET4_POST_BIND: 2450 case BPF_CGROUP_INET6_POST_BIND: 2451 ptype = BPF_PROG_TYPE_CGROUP_SOCK; 2452 break; 2453 case BPF_CGROUP_INET4_BIND: 2454 case BPF_CGROUP_INET6_BIND: 2455 case BPF_CGROUP_INET4_CONNECT: 2456 case BPF_CGROUP_INET6_CONNECT: 2457 case BPF_CGROUP_UDP4_SENDMSG: 2458 case BPF_CGROUP_UDP6_SENDMSG: 2459 case BPF_CGROUP_UDP4_RECVMSG: 2460 case BPF_CGROUP_UDP6_RECVMSG: 2461 ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 2462 break; 2463 case BPF_CGROUP_SOCK_OPS: 2464 ptype = BPF_PROG_TYPE_SOCK_OPS; 2465 break; 2466 case BPF_CGROUP_DEVICE: 2467 ptype = BPF_PROG_TYPE_CGROUP_DEVICE; 2468 break; 2469 case BPF_SK_MSG_VERDICT: 2470 return sock_map_get_from_fd(attr, NULL); 2471 case BPF_SK_SKB_STREAM_PARSER: 2472 case BPF_SK_SKB_STREAM_VERDICT: 2473 return sock_map_get_from_fd(attr, NULL); 2474 case BPF_LIRC_MODE2: 2475 return lirc_prog_detach(attr); 2476 case BPF_FLOW_DISSECTOR: 2477 return skb_flow_dissector_bpf_prog_detach(attr); 2478 case BPF_CGROUP_SYSCTL: 2479 ptype = BPF_PROG_TYPE_CGROUP_SYSCTL; 2480 break; 2481 case BPF_CGROUP_GETSOCKOPT: 2482 case BPF_CGROUP_SETSOCKOPT: 2483 ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT; 2484 break; 2485 default: 2486 return -EINVAL; 2487 } 2488 2489 return cgroup_bpf_prog_detach(attr, ptype); 2490 } 2491 2492 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt 2493 2494 static int bpf_prog_query(const union bpf_attr *attr, 2495 union bpf_attr __user *uattr) 2496 { 2497 if (!capable(CAP_NET_ADMIN)) 2498 return -EPERM; 2499 if (CHECK_ATTR(BPF_PROG_QUERY)) 2500 return -EINVAL; 2501 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 2502 return -EINVAL; 2503 2504 switch (attr->query.attach_type) { 2505 case BPF_CGROUP_INET_INGRESS: 2506 case BPF_CGROUP_INET_EGRESS: 2507 case BPF_CGROUP_INET_SOCK_CREATE: 2508 case BPF_CGROUP_INET4_BIND: 2509 case BPF_CGROUP_INET6_BIND: 2510 case BPF_CGROUP_INET4_POST_BIND: 2511 case BPF_CGROUP_INET6_POST_BIND: 2512 case BPF_CGROUP_INET4_CONNECT: 2513 case BPF_CGROUP_INET6_CONNECT: 2514 case BPF_CGROUP_UDP4_SENDMSG: 2515 case BPF_CGROUP_UDP6_SENDMSG: 2516 case BPF_CGROUP_UDP4_RECVMSG: 2517 case BPF_CGROUP_UDP6_RECVMSG: 2518 case BPF_CGROUP_SOCK_OPS: 2519 case BPF_CGROUP_DEVICE: 2520 case BPF_CGROUP_SYSCTL: 2521 case BPF_CGROUP_GETSOCKOPT: 2522 case BPF_CGROUP_SETSOCKOPT: 2523 break; 2524 case BPF_LIRC_MODE2: 2525 return lirc_prog_query(attr, uattr); 2526 case BPF_FLOW_DISSECTOR: 2527 return skb_flow_dissector_prog_query(attr, uattr); 2528 default: 2529 return -EINVAL; 2530 } 2531 2532 return cgroup_bpf_prog_query(attr, uattr); 2533 } 2534 2535 #define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out 2536 2537 static int bpf_prog_test_run(const union bpf_attr *attr, 2538 union bpf_attr __user *uattr) 2539 { 2540 struct bpf_prog *prog; 2541 int ret = -ENOTSUPP; 2542 2543 if (!capable(CAP_SYS_ADMIN)) 2544 return -EPERM; 2545 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 2546 return -EINVAL; 2547 2548 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 2549 (!attr->test.ctx_size_in && attr->test.ctx_in)) 2550 return -EINVAL; 2551 2552 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 2553 (!attr->test.ctx_size_out && attr->test.ctx_out)) 2554 return -EINVAL; 2555 2556 prog = bpf_prog_get(attr->test.prog_fd); 2557 if (IS_ERR(prog)) 2558 return PTR_ERR(prog); 2559 2560 if (prog->aux->ops->test_run) 2561 ret = prog->aux->ops->test_run(prog, attr, uattr); 2562 2563 bpf_prog_put(prog); 2564 return ret; 2565 } 2566 2567 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 2568 2569 static int bpf_obj_get_next_id(const union bpf_attr *attr, 2570 union bpf_attr __user *uattr, 2571 struct idr *idr, 2572 spinlock_t *lock) 2573 { 2574 u32 next_id = attr->start_id; 2575 int err = 0; 2576 2577 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 2578 return -EINVAL; 2579 2580 if (!capable(CAP_SYS_ADMIN)) 2581 return -EPERM; 2582 2583 next_id++; 2584 spin_lock_bh(lock); 2585 if (!idr_get_next(idr, &next_id)) 2586 err = -ENOENT; 2587 spin_unlock_bh(lock); 2588 2589 if (!err) 2590 err = put_user(next_id, &uattr->next_id); 2591 2592 return err; 2593 } 2594 2595 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 2596 2597 struct bpf_prog *bpf_prog_by_id(u32 id) 2598 { 2599 struct bpf_prog *prog; 2600 2601 if (!id) 2602 return ERR_PTR(-ENOENT); 2603 2604 spin_lock_bh(&prog_idr_lock); 2605 prog = idr_find(&prog_idr, id); 2606 if (prog) 2607 prog = bpf_prog_inc_not_zero(prog); 2608 else 2609 prog = ERR_PTR(-ENOENT); 2610 spin_unlock_bh(&prog_idr_lock); 2611 return prog; 2612 } 2613 2614 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 2615 { 2616 struct bpf_prog *prog; 2617 u32 id = attr->prog_id; 2618 int fd; 2619 2620 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 2621 return -EINVAL; 2622 2623 if (!capable(CAP_SYS_ADMIN)) 2624 return -EPERM; 2625 2626 prog = bpf_prog_by_id(id); 2627 if (IS_ERR(prog)) 2628 return PTR_ERR(prog); 2629 2630 fd = bpf_prog_new_fd(prog); 2631 if (fd < 0) 2632 bpf_prog_put(prog); 2633 2634 return fd; 2635 } 2636 2637 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 2638 2639 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 2640 { 2641 struct bpf_map *map; 2642 u32 id = attr->map_id; 2643 int f_flags; 2644 int fd; 2645 2646 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 2647 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 2648 return -EINVAL; 2649 2650 if (!capable(CAP_SYS_ADMIN)) 2651 return -EPERM; 2652 2653 f_flags = bpf_get_file_flag(attr->open_flags); 2654 if (f_flags < 0) 2655 return f_flags; 2656 2657 spin_lock_bh(&map_idr_lock); 2658 map = idr_find(&map_idr, id); 2659 if (map) 2660 map = __bpf_map_inc_not_zero(map, true); 2661 else 2662 map = ERR_PTR(-ENOENT); 2663 spin_unlock_bh(&map_idr_lock); 2664 2665 if (IS_ERR(map)) 2666 return PTR_ERR(map); 2667 2668 fd = bpf_map_new_fd(map, f_flags); 2669 if (fd < 0) 2670 bpf_map_put_with_uref(map); 2671 2672 return fd; 2673 } 2674 2675 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 2676 unsigned long addr, u32 *off, 2677 u32 *type) 2678 { 2679 const struct bpf_map *map; 2680 int i; 2681 2682 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 2683 map = prog->aux->used_maps[i]; 2684 if (map == (void *)addr) { 2685 *type = BPF_PSEUDO_MAP_FD; 2686 return map; 2687 } 2688 if (!map->ops->map_direct_value_meta) 2689 continue; 2690 if (!map->ops->map_direct_value_meta(map, addr, off)) { 2691 *type = BPF_PSEUDO_MAP_VALUE; 2692 return map; 2693 } 2694 } 2695 2696 return NULL; 2697 } 2698 2699 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) 2700 { 2701 const struct bpf_map *map; 2702 struct bpf_insn *insns; 2703 u32 off, type; 2704 u64 imm; 2705 int i; 2706 2707 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 2708 GFP_USER); 2709 if (!insns) 2710 return insns; 2711 2712 for (i = 0; i < prog->len; i++) { 2713 if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) { 2714 insns[i].code = BPF_JMP | BPF_CALL; 2715 insns[i].imm = BPF_FUNC_tail_call; 2716 /* fall-through */ 2717 } 2718 if (insns[i].code == (BPF_JMP | BPF_CALL) || 2719 insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) { 2720 if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) 2721 insns[i].code = BPF_JMP | BPF_CALL; 2722 if (!bpf_dump_raw_ok()) 2723 insns[i].imm = 0; 2724 continue; 2725 } 2726 2727 if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW)) 2728 continue; 2729 2730 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 2731 map = bpf_map_from_imm(prog, imm, &off, &type); 2732 if (map) { 2733 insns[i].src_reg = type; 2734 insns[i].imm = map->id; 2735 insns[i + 1].imm = off; 2736 continue; 2737 } 2738 } 2739 2740 return insns; 2741 } 2742 2743 static int set_info_rec_size(struct bpf_prog_info *info) 2744 { 2745 /* 2746 * Ensure info.*_rec_size is the same as kernel expected size 2747 * 2748 * or 2749 * 2750 * Only allow zero *_rec_size if both _rec_size and _cnt are 2751 * zero. In this case, the kernel will set the expected 2752 * _rec_size back to the info. 2753 */ 2754 2755 if ((info->nr_func_info || info->func_info_rec_size) && 2756 info->func_info_rec_size != sizeof(struct bpf_func_info)) 2757 return -EINVAL; 2758 2759 if ((info->nr_line_info || info->line_info_rec_size) && 2760 info->line_info_rec_size != sizeof(struct bpf_line_info)) 2761 return -EINVAL; 2762 2763 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 2764 info->jited_line_info_rec_size != sizeof(__u64)) 2765 return -EINVAL; 2766 2767 info->func_info_rec_size = sizeof(struct bpf_func_info); 2768 info->line_info_rec_size = sizeof(struct bpf_line_info); 2769 info->jited_line_info_rec_size = sizeof(__u64); 2770 2771 return 0; 2772 } 2773 2774 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, 2775 const union bpf_attr *attr, 2776 union bpf_attr __user *uattr) 2777 { 2778 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 2779 struct bpf_prog_info info = {}; 2780 u32 info_len = attr->info.info_len; 2781 struct bpf_prog_stats stats; 2782 char __user *uinsns; 2783 u32 ulen; 2784 int err; 2785 2786 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 2787 if (err) 2788 return err; 2789 info_len = min_t(u32, sizeof(info), info_len); 2790 2791 if (copy_from_user(&info, uinfo, info_len)) 2792 return -EFAULT; 2793 2794 info.type = prog->type; 2795 info.id = prog->aux->id; 2796 info.load_time = prog->aux->load_time; 2797 info.created_by_uid = from_kuid_munged(current_user_ns(), 2798 prog->aux->user->uid); 2799 info.gpl_compatible = prog->gpl_compatible; 2800 2801 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 2802 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 2803 2804 ulen = info.nr_map_ids; 2805 info.nr_map_ids = prog->aux->used_map_cnt; 2806 ulen = min_t(u32, info.nr_map_ids, ulen); 2807 if (ulen) { 2808 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 2809 u32 i; 2810 2811 for (i = 0; i < ulen; i++) 2812 if (put_user(prog->aux->used_maps[i]->id, 2813 &user_map_ids[i])) 2814 return -EFAULT; 2815 } 2816 2817 err = set_info_rec_size(&info); 2818 if (err) 2819 return err; 2820 2821 bpf_prog_get_stats(prog, &stats); 2822 info.run_time_ns = stats.nsecs; 2823 info.run_cnt = stats.cnt; 2824 2825 if (!capable(CAP_SYS_ADMIN)) { 2826 info.jited_prog_len = 0; 2827 info.xlated_prog_len = 0; 2828 info.nr_jited_ksyms = 0; 2829 info.nr_jited_func_lens = 0; 2830 info.nr_func_info = 0; 2831 info.nr_line_info = 0; 2832 info.nr_jited_line_info = 0; 2833 goto done; 2834 } 2835 2836 ulen = info.xlated_prog_len; 2837 info.xlated_prog_len = bpf_prog_insn_size(prog); 2838 if (info.xlated_prog_len && ulen) { 2839 struct bpf_insn *insns_sanitized; 2840 bool fault; 2841 2842 if (prog->blinded && !bpf_dump_raw_ok()) { 2843 info.xlated_prog_insns = 0; 2844 goto done; 2845 } 2846 insns_sanitized = bpf_insn_prepare_dump(prog); 2847 if (!insns_sanitized) 2848 return -ENOMEM; 2849 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 2850 ulen = min_t(u32, info.xlated_prog_len, ulen); 2851 fault = copy_to_user(uinsns, insns_sanitized, ulen); 2852 kfree(insns_sanitized); 2853 if (fault) 2854 return -EFAULT; 2855 } 2856 2857 if (bpf_prog_is_dev_bound(prog->aux)) { 2858 err = bpf_prog_offload_info_fill(&info, prog); 2859 if (err) 2860 return err; 2861 goto done; 2862 } 2863 2864 /* NOTE: the following code is supposed to be skipped for offload. 2865 * bpf_prog_offload_info_fill() is the place to fill similar fields 2866 * for offload. 2867 */ 2868 ulen = info.jited_prog_len; 2869 if (prog->aux->func_cnt) { 2870 u32 i; 2871 2872 info.jited_prog_len = 0; 2873 for (i = 0; i < prog->aux->func_cnt; i++) 2874 info.jited_prog_len += prog->aux->func[i]->jited_len; 2875 } else { 2876 info.jited_prog_len = prog->jited_len; 2877 } 2878 2879 if (info.jited_prog_len && ulen) { 2880 if (bpf_dump_raw_ok()) { 2881 uinsns = u64_to_user_ptr(info.jited_prog_insns); 2882 ulen = min_t(u32, info.jited_prog_len, ulen); 2883 2884 /* for multi-function programs, copy the JITed 2885 * instructions for all the functions 2886 */ 2887 if (prog->aux->func_cnt) { 2888 u32 len, free, i; 2889 u8 *img; 2890 2891 free = ulen; 2892 for (i = 0; i < prog->aux->func_cnt; i++) { 2893 len = prog->aux->func[i]->jited_len; 2894 len = min_t(u32, len, free); 2895 img = (u8 *) prog->aux->func[i]->bpf_func; 2896 if (copy_to_user(uinsns, img, len)) 2897 return -EFAULT; 2898 uinsns += len; 2899 free -= len; 2900 if (!free) 2901 break; 2902 } 2903 } else { 2904 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 2905 return -EFAULT; 2906 } 2907 } else { 2908 info.jited_prog_insns = 0; 2909 } 2910 } 2911 2912 ulen = info.nr_jited_ksyms; 2913 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 2914 if (ulen) { 2915 if (bpf_dump_raw_ok()) { 2916 unsigned long ksym_addr; 2917 u64 __user *user_ksyms; 2918 u32 i; 2919 2920 /* copy the address of the kernel symbol 2921 * corresponding to each function 2922 */ 2923 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 2924 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 2925 if (prog->aux->func_cnt) { 2926 for (i = 0; i < ulen; i++) { 2927 ksym_addr = (unsigned long) 2928 prog->aux->func[i]->bpf_func; 2929 if (put_user((u64) ksym_addr, 2930 &user_ksyms[i])) 2931 return -EFAULT; 2932 } 2933 } else { 2934 ksym_addr = (unsigned long) prog->bpf_func; 2935 if (put_user((u64) ksym_addr, &user_ksyms[0])) 2936 return -EFAULT; 2937 } 2938 } else { 2939 info.jited_ksyms = 0; 2940 } 2941 } 2942 2943 ulen = info.nr_jited_func_lens; 2944 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 2945 if (ulen) { 2946 if (bpf_dump_raw_ok()) { 2947 u32 __user *user_lens; 2948 u32 func_len, i; 2949 2950 /* copy the JITed image lengths for each function */ 2951 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 2952 user_lens = u64_to_user_ptr(info.jited_func_lens); 2953 if (prog->aux->func_cnt) { 2954 for (i = 0; i < ulen; i++) { 2955 func_len = 2956 prog->aux->func[i]->jited_len; 2957 if (put_user(func_len, &user_lens[i])) 2958 return -EFAULT; 2959 } 2960 } else { 2961 func_len = prog->jited_len; 2962 if (put_user(func_len, &user_lens[0])) 2963 return -EFAULT; 2964 } 2965 } else { 2966 info.jited_func_lens = 0; 2967 } 2968 } 2969 2970 if (prog->aux->btf) 2971 info.btf_id = btf_id(prog->aux->btf); 2972 2973 ulen = info.nr_func_info; 2974 info.nr_func_info = prog->aux->func_info_cnt; 2975 if (info.nr_func_info && ulen) { 2976 char __user *user_finfo; 2977 2978 user_finfo = u64_to_user_ptr(info.func_info); 2979 ulen = min_t(u32, info.nr_func_info, ulen); 2980 if (copy_to_user(user_finfo, prog->aux->func_info, 2981 info.func_info_rec_size * ulen)) 2982 return -EFAULT; 2983 } 2984 2985 ulen = info.nr_line_info; 2986 info.nr_line_info = prog->aux->nr_linfo; 2987 if (info.nr_line_info && ulen) { 2988 __u8 __user *user_linfo; 2989 2990 user_linfo = u64_to_user_ptr(info.line_info); 2991 ulen = min_t(u32, info.nr_line_info, ulen); 2992 if (copy_to_user(user_linfo, prog->aux->linfo, 2993 info.line_info_rec_size * ulen)) 2994 return -EFAULT; 2995 } 2996 2997 ulen = info.nr_jited_line_info; 2998 if (prog->aux->jited_linfo) 2999 info.nr_jited_line_info = prog->aux->nr_linfo; 3000 else 3001 info.nr_jited_line_info = 0; 3002 if (info.nr_jited_line_info && ulen) { 3003 if (bpf_dump_raw_ok()) { 3004 __u64 __user *user_linfo; 3005 u32 i; 3006 3007 user_linfo = u64_to_user_ptr(info.jited_line_info); 3008 ulen = min_t(u32, info.nr_jited_line_info, ulen); 3009 for (i = 0; i < ulen; i++) { 3010 if (put_user((__u64)(long)prog->aux->jited_linfo[i], 3011 &user_linfo[i])) 3012 return -EFAULT; 3013 } 3014 } else { 3015 info.jited_line_info = 0; 3016 } 3017 } 3018 3019 ulen = info.nr_prog_tags; 3020 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 3021 if (ulen) { 3022 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 3023 u32 i; 3024 3025 user_prog_tags = u64_to_user_ptr(info.prog_tags); 3026 ulen = min_t(u32, info.nr_prog_tags, ulen); 3027 if (prog->aux->func_cnt) { 3028 for (i = 0; i < ulen; i++) { 3029 if (copy_to_user(user_prog_tags[i], 3030 prog->aux->func[i]->tag, 3031 BPF_TAG_SIZE)) 3032 return -EFAULT; 3033 } 3034 } else { 3035 if (copy_to_user(user_prog_tags[0], 3036 prog->tag, BPF_TAG_SIZE)) 3037 return -EFAULT; 3038 } 3039 } 3040 3041 done: 3042 if (copy_to_user(uinfo, &info, info_len) || 3043 put_user(info_len, &uattr->info.info_len)) 3044 return -EFAULT; 3045 3046 return 0; 3047 } 3048 3049 static int bpf_map_get_info_by_fd(struct bpf_map *map, 3050 const union bpf_attr *attr, 3051 union bpf_attr __user *uattr) 3052 { 3053 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3054 struct bpf_map_info info = {}; 3055 u32 info_len = attr->info.info_len; 3056 int err; 3057 3058 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 3059 if (err) 3060 return err; 3061 info_len = min_t(u32, sizeof(info), info_len); 3062 3063 info.type = map->map_type; 3064 info.id = map->id; 3065 info.key_size = map->key_size; 3066 info.value_size = map->value_size; 3067 info.max_entries = map->max_entries; 3068 info.map_flags = map->map_flags; 3069 memcpy(info.name, map->name, sizeof(map->name)); 3070 3071 if (map->btf) { 3072 info.btf_id = btf_id(map->btf); 3073 info.btf_key_type_id = map->btf_key_type_id; 3074 info.btf_value_type_id = map->btf_value_type_id; 3075 } 3076 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 3077 3078 if (bpf_map_is_dev_bound(map)) { 3079 err = bpf_map_offload_info_fill(&info, map); 3080 if (err) 3081 return err; 3082 } 3083 3084 if (copy_to_user(uinfo, &info, info_len) || 3085 put_user(info_len, &uattr->info.info_len)) 3086 return -EFAULT; 3087 3088 return 0; 3089 } 3090 3091 static int bpf_btf_get_info_by_fd(struct btf *btf, 3092 const union bpf_attr *attr, 3093 union bpf_attr __user *uattr) 3094 { 3095 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3096 u32 info_len = attr->info.info_len; 3097 int err; 3098 3099 err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len); 3100 if (err) 3101 return err; 3102 3103 return btf_get_info_by_fd(btf, attr, uattr); 3104 } 3105 3106 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 3107 3108 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 3109 union bpf_attr __user *uattr) 3110 { 3111 int ufd = attr->info.bpf_fd; 3112 struct fd f; 3113 int err; 3114 3115 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 3116 return -EINVAL; 3117 3118 f = fdget(ufd); 3119 if (!f.file) 3120 return -EBADFD; 3121 3122 if (f.file->f_op == &bpf_prog_fops) 3123 err = bpf_prog_get_info_by_fd(f.file->private_data, attr, 3124 uattr); 3125 else if (f.file->f_op == &bpf_map_fops) 3126 err = bpf_map_get_info_by_fd(f.file->private_data, attr, 3127 uattr); 3128 else if (f.file->f_op == &btf_fops) 3129 err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr); 3130 else 3131 err = -EINVAL; 3132 3133 fdput(f); 3134 return err; 3135 } 3136 3137 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level 3138 3139 static int bpf_btf_load(const union bpf_attr *attr) 3140 { 3141 if (CHECK_ATTR(BPF_BTF_LOAD)) 3142 return -EINVAL; 3143 3144 if (!capable(CAP_SYS_ADMIN)) 3145 return -EPERM; 3146 3147 return btf_new_fd(attr); 3148 } 3149 3150 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 3151 3152 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 3153 { 3154 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 3155 return -EINVAL; 3156 3157 if (!capable(CAP_SYS_ADMIN)) 3158 return -EPERM; 3159 3160 return btf_get_fd_by_id(attr->btf_id); 3161 } 3162 3163 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 3164 union bpf_attr __user *uattr, 3165 u32 prog_id, u32 fd_type, 3166 const char *buf, u64 probe_offset, 3167 u64 probe_addr) 3168 { 3169 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 3170 u32 len = buf ? strlen(buf) : 0, input_len; 3171 int err = 0; 3172 3173 if (put_user(len, &uattr->task_fd_query.buf_len)) 3174 return -EFAULT; 3175 input_len = attr->task_fd_query.buf_len; 3176 if (input_len && ubuf) { 3177 if (!len) { 3178 /* nothing to copy, just make ubuf NULL terminated */ 3179 char zero = '\0'; 3180 3181 if (put_user(zero, ubuf)) 3182 return -EFAULT; 3183 } else if (input_len >= len + 1) { 3184 /* ubuf can hold the string with NULL terminator */ 3185 if (copy_to_user(ubuf, buf, len + 1)) 3186 return -EFAULT; 3187 } else { 3188 /* ubuf cannot hold the string with NULL terminator, 3189 * do a partial copy with NULL terminator. 3190 */ 3191 char zero = '\0'; 3192 3193 err = -ENOSPC; 3194 if (copy_to_user(ubuf, buf, input_len - 1)) 3195 return -EFAULT; 3196 if (put_user(zero, ubuf + input_len - 1)) 3197 return -EFAULT; 3198 } 3199 } 3200 3201 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 3202 put_user(fd_type, &uattr->task_fd_query.fd_type) || 3203 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 3204 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 3205 return -EFAULT; 3206 3207 return err; 3208 } 3209 3210 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 3211 3212 static int bpf_task_fd_query(const union bpf_attr *attr, 3213 union bpf_attr __user *uattr) 3214 { 3215 pid_t pid = attr->task_fd_query.pid; 3216 u32 fd = attr->task_fd_query.fd; 3217 const struct perf_event *event; 3218 struct files_struct *files; 3219 struct task_struct *task; 3220 struct file *file; 3221 int err; 3222 3223 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 3224 return -EINVAL; 3225 3226 if (!capable(CAP_SYS_ADMIN)) 3227 return -EPERM; 3228 3229 if (attr->task_fd_query.flags != 0) 3230 return -EINVAL; 3231 3232 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 3233 if (!task) 3234 return -ENOENT; 3235 3236 files = get_files_struct(task); 3237 put_task_struct(task); 3238 if (!files) 3239 return -ENOENT; 3240 3241 err = 0; 3242 spin_lock(&files->file_lock); 3243 file = fcheck_files(files, fd); 3244 if (!file) 3245 err = -EBADF; 3246 else 3247 get_file(file); 3248 spin_unlock(&files->file_lock); 3249 put_files_struct(files); 3250 3251 if (err) 3252 goto out; 3253 3254 if (file->f_op == &bpf_raw_tp_fops) { 3255 struct bpf_raw_tracepoint *raw_tp = file->private_data; 3256 struct bpf_raw_event_map *btp = raw_tp->btp; 3257 3258 err = bpf_task_fd_query_copy(attr, uattr, 3259 raw_tp->prog->aux->id, 3260 BPF_FD_TYPE_RAW_TRACEPOINT, 3261 btp->tp->name, 0, 0); 3262 goto put_file; 3263 } 3264 3265 event = perf_get_event(file); 3266 if (!IS_ERR(event)) { 3267 u64 probe_offset, probe_addr; 3268 u32 prog_id, fd_type; 3269 const char *buf; 3270 3271 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 3272 &buf, &probe_offset, 3273 &probe_addr); 3274 if (!err) 3275 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 3276 fd_type, buf, 3277 probe_offset, 3278 probe_addr); 3279 goto put_file; 3280 } 3281 3282 err = -ENOTSUPP; 3283 put_file: 3284 fput(file); 3285 out: 3286 return err; 3287 } 3288 3289 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 3290 3291 #define BPF_DO_BATCH(fn) \ 3292 do { \ 3293 if (!fn) { \ 3294 err = -ENOTSUPP; \ 3295 goto err_put; \ 3296 } \ 3297 err = fn(map, attr, uattr); \ 3298 } while (0) 3299 3300 static int bpf_map_do_batch(const union bpf_attr *attr, 3301 union bpf_attr __user *uattr, 3302 int cmd) 3303 { 3304 struct bpf_map *map; 3305 int err, ufd; 3306 struct fd f; 3307 3308 if (CHECK_ATTR(BPF_MAP_BATCH)) 3309 return -EINVAL; 3310 3311 ufd = attr->batch.map_fd; 3312 f = fdget(ufd); 3313 map = __bpf_map_get(f); 3314 if (IS_ERR(map)) 3315 return PTR_ERR(map); 3316 3317 if ((cmd == BPF_MAP_LOOKUP_BATCH || 3318 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) && 3319 !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 3320 err = -EPERM; 3321 goto err_put; 3322 } 3323 3324 if (cmd != BPF_MAP_LOOKUP_BATCH && 3325 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 3326 err = -EPERM; 3327 goto err_put; 3328 } 3329 3330 if (cmd == BPF_MAP_LOOKUP_BATCH) 3331 BPF_DO_BATCH(map->ops->map_lookup_batch); 3332 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 3333 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch); 3334 else if (cmd == BPF_MAP_UPDATE_BATCH) 3335 BPF_DO_BATCH(map->ops->map_update_batch); 3336 else 3337 BPF_DO_BATCH(map->ops->map_delete_batch); 3338 3339 err_put: 3340 fdput(f); 3341 return err; 3342 } 3343 3344 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 3345 { 3346 union bpf_attr attr = {}; 3347 int err; 3348 3349 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) 3350 return -EPERM; 3351 3352 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 3353 if (err) 3354 return err; 3355 size = min_t(u32, size, sizeof(attr)); 3356 3357 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 3358 if (copy_from_user(&attr, uattr, size) != 0) 3359 return -EFAULT; 3360 3361 err = security_bpf(cmd, &attr, size); 3362 if (err < 0) 3363 return err; 3364 3365 switch (cmd) { 3366 case BPF_MAP_CREATE: 3367 err = map_create(&attr); 3368 break; 3369 case BPF_MAP_LOOKUP_ELEM: 3370 err = map_lookup_elem(&attr); 3371 break; 3372 case BPF_MAP_UPDATE_ELEM: 3373 err = map_update_elem(&attr); 3374 break; 3375 case BPF_MAP_DELETE_ELEM: 3376 err = map_delete_elem(&attr); 3377 break; 3378 case BPF_MAP_GET_NEXT_KEY: 3379 err = map_get_next_key(&attr); 3380 break; 3381 case BPF_MAP_FREEZE: 3382 err = map_freeze(&attr); 3383 break; 3384 case BPF_PROG_LOAD: 3385 err = bpf_prog_load(&attr, uattr); 3386 break; 3387 case BPF_OBJ_PIN: 3388 err = bpf_obj_pin(&attr); 3389 break; 3390 case BPF_OBJ_GET: 3391 err = bpf_obj_get(&attr); 3392 break; 3393 case BPF_PROG_ATTACH: 3394 err = bpf_prog_attach(&attr); 3395 break; 3396 case BPF_PROG_DETACH: 3397 err = bpf_prog_detach(&attr); 3398 break; 3399 case BPF_PROG_QUERY: 3400 err = bpf_prog_query(&attr, uattr); 3401 break; 3402 case BPF_PROG_TEST_RUN: 3403 err = bpf_prog_test_run(&attr, uattr); 3404 break; 3405 case BPF_PROG_GET_NEXT_ID: 3406 err = bpf_obj_get_next_id(&attr, uattr, 3407 &prog_idr, &prog_idr_lock); 3408 break; 3409 case BPF_MAP_GET_NEXT_ID: 3410 err = bpf_obj_get_next_id(&attr, uattr, 3411 &map_idr, &map_idr_lock); 3412 break; 3413 case BPF_BTF_GET_NEXT_ID: 3414 err = bpf_obj_get_next_id(&attr, uattr, 3415 &btf_idr, &btf_idr_lock); 3416 break; 3417 case BPF_PROG_GET_FD_BY_ID: 3418 err = bpf_prog_get_fd_by_id(&attr); 3419 break; 3420 case BPF_MAP_GET_FD_BY_ID: 3421 err = bpf_map_get_fd_by_id(&attr); 3422 break; 3423 case BPF_OBJ_GET_INFO_BY_FD: 3424 err = bpf_obj_get_info_by_fd(&attr, uattr); 3425 break; 3426 case BPF_RAW_TRACEPOINT_OPEN: 3427 err = bpf_raw_tracepoint_open(&attr); 3428 break; 3429 case BPF_BTF_LOAD: 3430 err = bpf_btf_load(&attr); 3431 break; 3432 case BPF_BTF_GET_FD_BY_ID: 3433 err = bpf_btf_get_fd_by_id(&attr); 3434 break; 3435 case BPF_TASK_FD_QUERY: 3436 err = bpf_task_fd_query(&attr, uattr); 3437 break; 3438 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 3439 err = map_lookup_and_delete_elem(&attr); 3440 break; 3441 case BPF_MAP_LOOKUP_BATCH: 3442 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH); 3443 break; 3444 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 3445 err = bpf_map_do_batch(&attr, uattr, 3446 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 3447 break; 3448 case BPF_MAP_UPDATE_BATCH: 3449 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH); 3450 break; 3451 case BPF_MAP_DELETE_BATCH: 3452 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH); 3453 break; 3454 default: 3455 err = -EINVAL; 3456 break; 3457 } 3458 3459 return err; 3460 } 3461