1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf_trace.h> 6 #include <linux/bpf_lirc.h> 7 #include <linux/bpf_verifier.h> 8 #include <linux/btf.h> 9 #include <linux/syscalls.h> 10 #include <linux/slab.h> 11 #include <linux/sched/signal.h> 12 #include <linux/vmalloc.h> 13 #include <linux/mmzone.h> 14 #include <linux/anon_inodes.h> 15 #include <linux/fdtable.h> 16 #include <linux/file.h> 17 #include <linux/fs.h> 18 #include <linux/license.h> 19 #include <linux/filter.h> 20 #include <linux/version.h> 21 #include <linux/kernel.h> 22 #include <linux/idr.h> 23 #include <linux/cred.h> 24 #include <linux/timekeeping.h> 25 #include <linux/ctype.h> 26 #include <linux/nospec.h> 27 #include <linux/audit.h> 28 #include <uapi/linux/btf.h> 29 #include <linux/pgtable.h> 30 #include <linux/bpf_lsm.h> 31 #include <linux/poll.h> 32 #include <linux/bpf-netns.h> 33 #include <linux/rcupdate_trace.h> 34 #include <linux/memcontrol.h> 35 36 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 37 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 38 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 39 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 40 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 41 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 42 IS_FD_HASH(map)) 43 44 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 45 46 DEFINE_PER_CPU(int, bpf_prog_active); 47 static DEFINE_IDR(prog_idr); 48 static DEFINE_SPINLOCK(prog_idr_lock); 49 static DEFINE_IDR(map_idr); 50 static DEFINE_SPINLOCK(map_idr_lock); 51 static DEFINE_IDR(link_idr); 52 static DEFINE_SPINLOCK(link_idr_lock); 53 54 int sysctl_unprivileged_bpf_disabled __read_mostly; 55 56 static const struct bpf_map_ops * const bpf_map_types[] = { 57 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 58 #define BPF_MAP_TYPE(_id, _ops) \ 59 [_id] = &_ops, 60 #define BPF_LINK_TYPE(_id, _name) 61 #include <linux/bpf_types.h> 62 #undef BPF_PROG_TYPE 63 #undef BPF_MAP_TYPE 64 #undef BPF_LINK_TYPE 65 }; 66 67 /* 68 * If we're handed a bigger struct than we know of, ensure all the unknown bits 69 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 70 * we don't know about yet. 71 * 72 * There is a ToCToU between this function call and the following 73 * copy_from_user() call. However, this is not a concern since this function is 74 * meant to be a future-proofing of bits. 75 */ 76 int bpf_check_uarg_tail_zero(void __user *uaddr, 77 size_t expected_size, 78 size_t actual_size) 79 { 80 unsigned char __user *addr = uaddr + expected_size; 81 int res; 82 83 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 84 return -E2BIG; 85 86 if (actual_size <= expected_size) 87 return 0; 88 89 res = check_zeroed_user(addr, actual_size - expected_size); 90 if (res < 0) 91 return res; 92 return res ? 0 : -E2BIG; 93 } 94 95 const struct bpf_map_ops bpf_map_offload_ops = { 96 .map_meta_equal = bpf_map_meta_equal, 97 .map_alloc = bpf_map_offload_map_alloc, 98 .map_free = bpf_map_offload_map_free, 99 .map_check_btf = map_check_no_btf, 100 }; 101 102 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) 103 { 104 const struct bpf_map_ops *ops; 105 u32 type = attr->map_type; 106 struct bpf_map *map; 107 int err; 108 109 if (type >= ARRAY_SIZE(bpf_map_types)) 110 return ERR_PTR(-EINVAL); 111 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types)); 112 ops = bpf_map_types[type]; 113 if (!ops) 114 return ERR_PTR(-EINVAL); 115 116 if (ops->map_alloc_check) { 117 err = ops->map_alloc_check(attr); 118 if (err) 119 return ERR_PTR(err); 120 } 121 if (attr->map_ifindex) 122 ops = &bpf_map_offload_ops; 123 map = ops->map_alloc(attr); 124 if (IS_ERR(map)) 125 return map; 126 map->ops = ops; 127 map->map_type = type; 128 return map; 129 } 130 131 static u32 bpf_map_value_size(const struct bpf_map *map) 132 { 133 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 134 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 135 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 136 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 137 return round_up(map->value_size, 8) * num_possible_cpus(); 138 else if (IS_FD_MAP(map)) 139 return sizeof(u32); 140 else 141 return map->value_size; 142 } 143 144 static void maybe_wait_bpf_programs(struct bpf_map *map) 145 { 146 /* Wait for any running BPF programs to complete so that 147 * userspace, when we return to it, knows that all programs 148 * that could be running use the new map value. 149 */ 150 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 151 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 152 synchronize_rcu(); 153 } 154 155 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, 156 void *value, __u64 flags) 157 { 158 int err; 159 160 /* Need to create a kthread, thus must support schedule */ 161 if (bpf_map_is_dev_bound(map)) { 162 return bpf_map_offload_update_elem(map, key, value, flags); 163 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 164 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 165 return map->ops->map_update_elem(map, key, value, flags); 166 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || 167 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 168 return sock_map_update_elem_sys(map, key, value, flags); 169 } else if (IS_FD_PROG_ARRAY(map)) { 170 return bpf_fd_array_map_update_elem(map, f.file, key, value, 171 flags); 172 } 173 174 bpf_disable_instrumentation(); 175 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 176 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 177 err = bpf_percpu_hash_update(map, key, value, flags); 178 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 179 err = bpf_percpu_array_update(map, key, value, flags); 180 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 181 err = bpf_percpu_cgroup_storage_update(map, key, value, 182 flags); 183 } else if (IS_FD_ARRAY(map)) { 184 rcu_read_lock(); 185 err = bpf_fd_array_map_update_elem(map, f.file, key, value, 186 flags); 187 rcu_read_unlock(); 188 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 189 rcu_read_lock(); 190 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, 191 flags); 192 rcu_read_unlock(); 193 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 194 /* rcu_read_lock() is not needed */ 195 err = bpf_fd_reuseport_array_update_elem(map, key, value, 196 flags); 197 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 198 map->map_type == BPF_MAP_TYPE_STACK) { 199 err = map->ops->map_push_elem(map, value, flags); 200 } else { 201 rcu_read_lock(); 202 err = map->ops->map_update_elem(map, key, value, flags); 203 rcu_read_unlock(); 204 } 205 bpf_enable_instrumentation(); 206 maybe_wait_bpf_programs(map); 207 208 return err; 209 } 210 211 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 212 __u64 flags) 213 { 214 void *ptr; 215 int err; 216 217 if (bpf_map_is_dev_bound(map)) 218 return bpf_map_offload_lookup_elem(map, key, value); 219 220 bpf_disable_instrumentation(); 221 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 222 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 223 err = bpf_percpu_hash_copy(map, key, value); 224 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 225 err = bpf_percpu_array_copy(map, key, value); 226 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 227 err = bpf_percpu_cgroup_storage_copy(map, key, value); 228 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 229 err = bpf_stackmap_copy(map, key, value); 230 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 231 err = bpf_fd_array_map_lookup_elem(map, key, value); 232 } else if (IS_FD_HASH(map)) { 233 err = bpf_fd_htab_map_lookup_elem(map, key, value); 234 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 235 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 236 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 237 map->map_type == BPF_MAP_TYPE_STACK) { 238 err = map->ops->map_peek_elem(map, value); 239 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 240 /* struct_ops map requires directly updating "value" */ 241 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 242 } else { 243 rcu_read_lock(); 244 if (map->ops->map_lookup_elem_sys_only) 245 ptr = map->ops->map_lookup_elem_sys_only(map, key); 246 else 247 ptr = map->ops->map_lookup_elem(map, key); 248 if (IS_ERR(ptr)) { 249 err = PTR_ERR(ptr); 250 } else if (!ptr) { 251 err = -ENOENT; 252 } else { 253 err = 0; 254 if (flags & BPF_F_LOCK) 255 /* lock 'ptr' and copy everything but lock */ 256 copy_map_value_locked(map, value, ptr, true); 257 else 258 copy_map_value(map, value, ptr); 259 /* mask lock, since value wasn't zero inited */ 260 check_and_init_map_lock(map, value); 261 } 262 rcu_read_unlock(); 263 } 264 265 bpf_enable_instrumentation(); 266 maybe_wait_bpf_programs(map); 267 268 return err; 269 } 270 271 /* Please, do not use this function outside from the map creation path 272 * (e.g. in map update path) without taking care of setting the active 273 * memory cgroup (see at bpf_map_kmalloc_node() for example). 274 */ 275 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 276 { 277 /* We really just want to fail instead of triggering OOM killer 278 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 279 * which is used for lower order allocation requests. 280 * 281 * It has been observed that higher order allocation requests done by 282 * vmalloc with __GFP_NORETRY being set might fail due to not trying 283 * to reclaim memory from the page cache, thus we set 284 * __GFP_RETRY_MAYFAIL to avoid such situations. 285 */ 286 287 const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT; 288 unsigned int flags = 0; 289 unsigned long align = 1; 290 void *area; 291 292 if (size >= SIZE_MAX) 293 return NULL; 294 295 /* kmalloc()'ed memory can't be mmap()'ed */ 296 if (mmapable) { 297 BUG_ON(!PAGE_ALIGNED(size)); 298 align = SHMLBA; 299 flags = VM_USERMAP; 300 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 301 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, 302 numa_node); 303 if (area != NULL) 304 return area; 305 } 306 307 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 308 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, 309 flags, numa_node, __builtin_return_address(0)); 310 } 311 312 void *bpf_map_area_alloc(u64 size, int numa_node) 313 { 314 return __bpf_map_area_alloc(size, numa_node, false); 315 } 316 317 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 318 { 319 return __bpf_map_area_alloc(size, numa_node, true); 320 } 321 322 void bpf_map_area_free(void *area) 323 { 324 kvfree(area); 325 } 326 327 static u32 bpf_map_flags_retain_permanent(u32 flags) 328 { 329 /* Some map creation flags are not tied to the map object but 330 * rather to the map fd instead, so they have no meaning upon 331 * map object inspection since multiple file descriptors with 332 * different (access) properties can exist here. Thus, given 333 * this has zero meaning for the map itself, lets clear these 334 * from here. 335 */ 336 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 337 } 338 339 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 340 { 341 map->map_type = attr->map_type; 342 map->key_size = attr->key_size; 343 map->value_size = attr->value_size; 344 map->max_entries = attr->max_entries; 345 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 346 map->numa_node = bpf_map_attr_numa_node(attr); 347 } 348 349 static int bpf_map_alloc_id(struct bpf_map *map) 350 { 351 int id; 352 353 idr_preload(GFP_KERNEL); 354 spin_lock_bh(&map_idr_lock); 355 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 356 if (id > 0) 357 map->id = id; 358 spin_unlock_bh(&map_idr_lock); 359 idr_preload_end(); 360 361 if (WARN_ON_ONCE(!id)) 362 return -ENOSPC; 363 364 return id > 0 ? 0 : id; 365 } 366 367 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) 368 { 369 unsigned long flags; 370 371 /* Offloaded maps are removed from the IDR store when their device 372 * disappears - even if someone holds an fd to them they are unusable, 373 * the memory is gone, all ops will fail; they are simply waiting for 374 * refcnt to drop to be freed. 375 */ 376 if (!map->id) 377 return; 378 379 if (do_idr_lock) 380 spin_lock_irqsave(&map_idr_lock, flags); 381 else 382 __acquire(&map_idr_lock); 383 384 idr_remove(&map_idr, map->id); 385 map->id = 0; 386 387 if (do_idr_lock) 388 spin_unlock_irqrestore(&map_idr_lock, flags); 389 else 390 __release(&map_idr_lock); 391 } 392 393 #ifdef CONFIG_MEMCG_KMEM 394 static void bpf_map_save_memcg(struct bpf_map *map) 395 { 396 map->memcg = get_mem_cgroup_from_mm(current->mm); 397 } 398 399 static void bpf_map_release_memcg(struct bpf_map *map) 400 { 401 mem_cgroup_put(map->memcg); 402 } 403 404 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 405 int node) 406 { 407 struct mem_cgroup *old_memcg; 408 void *ptr; 409 410 old_memcg = set_active_memcg(map->memcg); 411 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node); 412 set_active_memcg(old_memcg); 413 414 return ptr; 415 } 416 417 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 418 { 419 struct mem_cgroup *old_memcg; 420 void *ptr; 421 422 old_memcg = set_active_memcg(map->memcg); 423 ptr = kzalloc(size, flags | __GFP_ACCOUNT); 424 set_active_memcg(old_memcg); 425 426 return ptr; 427 } 428 429 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 430 size_t align, gfp_t flags) 431 { 432 struct mem_cgroup *old_memcg; 433 void __percpu *ptr; 434 435 old_memcg = set_active_memcg(map->memcg); 436 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT); 437 set_active_memcg(old_memcg); 438 439 return ptr; 440 } 441 442 #else 443 static void bpf_map_save_memcg(struct bpf_map *map) 444 { 445 } 446 447 static void bpf_map_release_memcg(struct bpf_map *map) 448 { 449 } 450 #endif 451 452 /* called from workqueue */ 453 static void bpf_map_free_deferred(struct work_struct *work) 454 { 455 struct bpf_map *map = container_of(work, struct bpf_map, work); 456 457 security_bpf_map_free(map); 458 bpf_map_release_memcg(map); 459 /* implementation dependent freeing */ 460 map->ops->map_free(map); 461 } 462 463 static void bpf_map_put_uref(struct bpf_map *map) 464 { 465 if (atomic64_dec_and_test(&map->usercnt)) { 466 if (map->ops->map_release_uref) 467 map->ops->map_release_uref(map); 468 } 469 } 470 471 /* decrement map refcnt and schedule it for freeing via workqueue 472 * (unrelying map implementation ops->map_free() might sleep) 473 */ 474 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) 475 { 476 if (atomic64_dec_and_test(&map->refcnt)) { 477 /* bpf_map_free_id() must be called first */ 478 bpf_map_free_id(map, do_idr_lock); 479 btf_put(map->btf); 480 INIT_WORK(&map->work, bpf_map_free_deferred); 481 schedule_work(&map->work); 482 } 483 } 484 485 void bpf_map_put(struct bpf_map *map) 486 { 487 __bpf_map_put(map, true); 488 } 489 EXPORT_SYMBOL_GPL(bpf_map_put); 490 491 void bpf_map_put_with_uref(struct bpf_map *map) 492 { 493 bpf_map_put_uref(map); 494 bpf_map_put(map); 495 } 496 497 static int bpf_map_release(struct inode *inode, struct file *filp) 498 { 499 struct bpf_map *map = filp->private_data; 500 501 if (map->ops->map_release) 502 map->ops->map_release(map, filp); 503 504 bpf_map_put_with_uref(map); 505 return 0; 506 } 507 508 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 509 { 510 fmode_t mode = f.file->f_mode; 511 512 /* Our file permissions may have been overridden by global 513 * map permissions facing syscall side. 514 */ 515 if (READ_ONCE(map->frozen)) 516 mode &= ~FMODE_CAN_WRITE; 517 return mode; 518 } 519 520 #ifdef CONFIG_PROC_FS 521 /* Provides an approximation of the map's memory footprint. 522 * Used only to provide a backward compatibility and display 523 * a reasonable "memlock" info. 524 */ 525 static unsigned long bpf_map_memory_footprint(const struct bpf_map *map) 526 { 527 unsigned long size; 528 529 size = round_up(map->key_size + bpf_map_value_size(map), 8); 530 531 return round_up(map->max_entries * size, PAGE_SIZE); 532 } 533 534 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 535 { 536 const struct bpf_map *map = filp->private_data; 537 const struct bpf_array *array; 538 u32 type = 0, jited = 0; 539 540 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { 541 array = container_of(map, struct bpf_array, map); 542 type = array->aux->type; 543 jited = array->aux->jited; 544 } 545 546 seq_printf(m, 547 "map_type:\t%u\n" 548 "key_size:\t%u\n" 549 "value_size:\t%u\n" 550 "max_entries:\t%u\n" 551 "map_flags:\t%#x\n" 552 "memlock:\t%lu\n" 553 "map_id:\t%u\n" 554 "frozen:\t%u\n", 555 map->map_type, 556 map->key_size, 557 map->value_size, 558 map->max_entries, 559 map->map_flags, 560 bpf_map_memory_footprint(map), 561 map->id, 562 READ_ONCE(map->frozen)); 563 if (type) { 564 seq_printf(m, "owner_prog_type:\t%u\n", type); 565 seq_printf(m, "owner_jited:\t%u\n", jited); 566 } 567 } 568 #endif 569 570 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 571 loff_t *ppos) 572 { 573 /* We need this handler such that alloc_file() enables 574 * f_mode with FMODE_CAN_READ. 575 */ 576 return -EINVAL; 577 } 578 579 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 580 size_t siz, loff_t *ppos) 581 { 582 /* We need this handler such that alloc_file() enables 583 * f_mode with FMODE_CAN_WRITE. 584 */ 585 return -EINVAL; 586 } 587 588 /* called for any extra memory-mapped regions (except initial) */ 589 static void bpf_map_mmap_open(struct vm_area_struct *vma) 590 { 591 struct bpf_map *map = vma->vm_file->private_data; 592 593 if (vma->vm_flags & VM_MAYWRITE) { 594 mutex_lock(&map->freeze_mutex); 595 map->writecnt++; 596 mutex_unlock(&map->freeze_mutex); 597 } 598 } 599 600 /* called for all unmapped memory region (including initial) */ 601 static void bpf_map_mmap_close(struct vm_area_struct *vma) 602 { 603 struct bpf_map *map = vma->vm_file->private_data; 604 605 if (vma->vm_flags & VM_MAYWRITE) { 606 mutex_lock(&map->freeze_mutex); 607 map->writecnt--; 608 mutex_unlock(&map->freeze_mutex); 609 } 610 } 611 612 static const struct vm_operations_struct bpf_map_default_vmops = { 613 .open = bpf_map_mmap_open, 614 .close = bpf_map_mmap_close, 615 }; 616 617 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 618 { 619 struct bpf_map *map = filp->private_data; 620 int err; 621 622 if (!map->ops->map_mmap || map_value_has_spin_lock(map)) 623 return -ENOTSUPP; 624 625 if (!(vma->vm_flags & VM_SHARED)) 626 return -EINVAL; 627 628 mutex_lock(&map->freeze_mutex); 629 630 if (vma->vm_flags & VM_WRITE) { 631 if (map->frozen) { 632 err = -EPERM; 633 goto out; 634 } 635 /* map is meant to be read-only, so do not allow mapping as 636 * writable, because it's possible to leak a writable page 637 * reference and allows user-space to still modify it after 638 * freezing, while verifier will assume contents do not change 639 */ 640 if (map->map_flags & BPF_F_RDONLY_PROG) { 641 err = -EACCES; 642 goto out; 643 } 644 } 645 646 /* set default open/close callbacks */ 647 vma->vm_ops = &bpf_map_default_vmops; 648 vma->vm_private_data = map; 649 vma->vm_flags &= ~VM_MAYEXEC; 650 if (!(vma->vm_flags & VM_WRITE)) 651 /* disallow re-mapping with PROT_WRITE */ 652 vma->vm_flags &= ~VM_MAYWRITE; 653 654 err = map->ops->map_mmap(map, vma); 655 if (err) 656 goto out; 657 658 if (vma->vm_flags & VM_MAYWRITE) 659 map->writecnt++; 660 out: 661 mutex_unlock(&map->freeze_mutex); 662 return err; 663 } 664 665 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts) 666 { 667 struct bpf_map *map = filp->private_data; 668 669 if (map->ops->map_poll) 670 return map->ops->map_poll(map, filp, pts); 671 672 return EPOLLERR; 673 } 674 675 const struct file_operations bpf_map_fops = { 676 #ifdef CONFIG_PROC_FS 677 .show_fdinfo = bpf_map_show_fdinfo, 678 #endif 679 .release = bpf_map_release, 680 .read = bpf_dummy_read, 681 .write = bpf_dummy_write, 682 .mmap = bpf_map_mmap, 683 .poll = bpf_map_poll, 684 }; 685 686 int bpf_map_new_fd(struct bpf_map *map, int flags) 687 { 688 int ret; 689 690 ret = security_bpf_map(map, OPEN_FMODE(flags)); 691 if (ret < 0) 692 return ret; 693 694 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 695 flags | O_CLOEXEC); 696 } 697 698 int bpf_get_file_flag(int flags) 699 { 700 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 701 return -EINVAL; 702 if (flags & BPF_F_RDONLY) 703 return O_RDONLY; 704 if (flags & BPF_F_WRONLY) 705 return O_WRONLY; 706 return O_RDWR; 707 } 708 709 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 710 #define CHECK_ATTR(CMD) \ 711 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 712 sizeof(attr->CMD##_LAST_FIELD), 0, \ 713 sizeof(*attr) - \ 714 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 715 sizeof(attr->CMD##_LAST_FIELD)) != NULL 716 717 /* dst and src must have at least "size" number of bytes. 718 * Return strlen on success and < 0 on error. 719 */ 720 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 721 { 722 const char *end = src + size; 723 const char *orig_src = src; 724 725 memset(dst, 0, size); 726 /* Copy all isalnum(), '_' and '.' chars. */ 727 while (src < end && *src) { 728 if (!isalnum(*src) && 729 *src != '_' && *src != '.') 730 return -EINVAL; 731 *dst++ = *src++; 732 } 733 734 /* No '\0' found in "size" number of bytes */ 735 if (src == end) 736 return -EINVAL; 737 738 return src - orig_src; 739 } 740 741 int map_check_no_btf(const struct bpf_map *map, 742 const struct btf *btf, 743 const struct btf_type *key_type, 744 const struct btf_type *value_type) 745 { 746 return -ENOTSUPP; 747 } 748 749 static int map_check_btf(struct bpf_map *map, const struct btf *btf, 750 u32 btf_key_id, u32 btf_value_id) 751 { 752 const struct btf_type *key_type, *value_type; 753 u32 key_size, value_size; 754 int ret = 0; 755 756 /* Some maps allow key to be unspecified. */ 757 if (btf_key_id) { 758 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 759 if (!key_type || key_size != map->key_size) 760 return -EINVAL; 761 } else { 762 key_type = btf_type_by_id(btf, 0); 763 if (!map->ops->map_check_btf) 764 return -EINVAL; 765 } 766 767 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 768 if (!value_type || value_size != map->value_size) 769 return -EINVAL; 770 771 map->spin_lock_off = btf_find_spin_lock(btf, value_type); 772 773 if (map_value_has_spin_lock(map)) { 774 if (map->map_flags & BPF_F_RDONLY_PROG) 775 return -EACCES; 776 if (map->map_type != BPF_MAP_TYPE_HASH && 777 map->map_type != BPF_MAP_TYPE_ARRAY && 778 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 779 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 780 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 781 map->map_type != BPF_MAP_TYPE_TASK_STORAGE) 782 return -ENOTSUPP; 783 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > 784 map->value_size) { 785 WARN_ONCE(1, 786 "verifier bug spin_lock_off %d value_size %d\n", 787 map->spin_lock_off, map->value_size); 788 return -EFAULT; 789 } 790 } 791 792 if (map->ops->map_check_btf) 793 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 794 795 return ret; 796 } 797 798 #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id 799 /* called via syscall */ 800 static int map_create(union bpf_attr *attr) 801 { 802 int numa_node = bpf_map_attr_numa_node(attr); 803 struct bpf_map *map; 804 int f_flags; 805 int err; 806 807 err = CHECK_ATTR(BPF_MAP_CREATE); 808 if (err) 809 return -EINVAL; 810 811 if (attr->btf_vmlinux_value_type_id) { 812 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 813 attr->btf_key_type_id || attr->btf_value_type_id) 814 return -EINVAL; 815 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 816 return -EINVAL; 817 } 818 819 f_flags = bpf_get_file_flag(attr->map_flags); 820 if (f_flags < 0) 821 return f_flags; 822 823 if (numa_node != NUMA_NO_NODE && 824 ((unsigned int)numa_node >= nr_node_ids || 825 !node_online(numa_node))) 826 return -EINVAL; 827 828 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 829 map = find_and_alloc_map(attr); 830 if (IS_ERR(map)) 831 return PTR_ERR(map); 832 833 err = bpf_obj_name_cpy(map->name, attr->map_name, 834 sizeof(attr->map_name)); 835 if (err < 0) 836 goto free_map; 837 838 atomic64_set(&map->refcnt, 1); 839 atomic64_set(&map->usercnt, 1); 840 mutex_init(&map->freeze_mutex); 841 842 map->spin_lock_off = -EINVAL; 843 if (attr->btf_key_type_id || attr->btf_value_type_id || 844 /* Even the map's value is a kernel's struct, 845 * the bpf_prog.o must have BTF to begin with 846 * to figure out the corresponding kernel's 847 * counter part. Thus, attr->btf_fd has 848 * to be valid also. 849 */ 850 attr->btf_vmlinux_value_type_id) { 851 struct btf *btf; 852 853 btf = btf_get_by_fd(attr->btf_fd); 854 if (IS_ERR(btf)) { 855 err = PTR_ERR(btf); 856 goto free_map; 857 } 858 map->btf = btf; 859 860 if (attr->btf_value_type_id) { 861 err = map_check_btf(map, btf, attr->btf_key_type_id, 862 attr->btf_value_type_id); 863 if (err) 864 goto free_map; 865 } 866 867 map->btf_key_type_id = attr->btf_key_type_id; 868 map->btf_value_type_id = attr->btf_value_type_id; 869 map->btf_vmlinux_value_type_id = 870 attr->btf_vmlinux_value_type_id; 871 } 872 873 err = security_bpf_map_alloc(map); 874 if (err) 875 goto free_map; 876 877 err = bpf_map_alloc_id(map); 878 if (err) 879 goto free_map_sec; 880 881 bpf_map_save_memcg(map); 882 883 err = bpf_map_new_fd(map, f_flags); 884 if (err < 0) { 885 /* failed to allocate fd. 886 * bpf_map_put_with_uref() is needed because the above 887 * bpf_map_alloc_id() has published the map 888 * to the userspace and the userspace may 889 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 890 */ 891 bpf_map_put_with_uref(map); 892 return err; 893 } 894 895 return err; 896 897 free_map_sec: 898 security_bpf_map_free(map); 899 free_map: 900 btf_put(map->btf); 901 map->ops->map_free(map); 902 return err; 903 } 904 905 /* if error is returned, fd is released. 906 * On success caller should complete fd access with matching fdput() 907 */ 908 struct bpf_map *__bpf_map_get(struct fd f) 909 { 910 if (!f.file) 911 return ERR_PTR(-EBADF); 912 if (f.file->f_op != &bpf_map_fops) { 913 fdput(f); 914 return ERR_PTR(-EINVAL); 915 } 916 917 return f.file->private_data; 918 } 919 920 void bpf_map_inc(struct bpf_map *map) 921 { 922 atomic64_inc(&map->refcnt); 923 } 924 EXPORT_SYMBOL_GPL(bpf_map_inc); 925 926 void bpf_map_inc_with_uref(struct bpf_map *map) 927 { 928 atomic64_inc(&map->refcnt); 929 atomic64_inc(&map->usercnt); 930 } 931 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 932 933 struct bpf_map *bpf_map_get(u32 ufd) 934 { 935 struct fd f = fdget(ufd); 936 struct bpf_map *map; 937 938 map = __bpf_map_get(f); 939 if (IS_ERR(map)) 940 return map; 941 942 bpf_map_inc(map); 943 fdput(f); 944 945 return map; 946 } 947 948 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 949 { 950 struct fd f = fdget(ufd); 951 struct bpf_map *map; 952 953 map = __bpf_map_get(f); 954 if (IS_ERR(map)) 955 return map; 956 957 bpf_map_inc_with_uref(map); 958 fdput(f); 959 960 return map; 961 } 962 963 /* map_idr_lock should have been held */ 964 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 965 { 966 int refold; 967 968 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 969 if (!refold) 970 return ERR_PTR(-ENOENT); 971 if (uref) 972 atomic64_inc(&map->usercnt); 973 974 return map; 975 } 976 977 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 978 { 979 spin_lock_bh(&map_idr_lock); 980 map = __bpf_map_inc_not_zero(map, false); 981 spin_unlock_bh(&map_idr_lock); 982 983 return map; 984 } 985 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 986 987 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 988 { 989 return -ENOTSUPP; 990 } 991 992 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 993 { 994 if (key_size) 995 return memdup_user(ukey, key_size); 996 997 if (ukey) 998 return ERR_PTR(-EINVAL); 999 1000 return NULL; 1001 } 1002 1003 /* last field in 'union bpf_attr' used by this command */ 1004 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 1005 1006 static int map_lookup_elem(union bpf_attr *attr) 1007 { 1008 void __user *ukey = u64_to_user_ptr(attr->key); 1009 void __user *uvalue = u64_to_user_ptr(attr->value); 1010 int ufd = attr->map_fd; 1011 struct bpf_map *map; 1012 void *key, *value; 1013 u32 value_size; 1014 struct fd f; 1015 int err; 1016 1017 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 1018 return -EINVAL; 1019 1020 if (attr->flags & ~BPF_F_LOCK) 1021 return -EINVAL; 1022 1023 f = fdget(ufd); 1024 map = __bpf_map_get(f); 1025 if (IS_ERR(map)) 1026 return PTR_ERR(map); 1027 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1028 err = -EPERM; 1029 goto err_put; 1030 } 1031 1032 if ((attr->flags & BPF_F_LOCK) && 1033 !map_value_has_spin_lock(map)) { 1034 err = -EINVAL; 1035 goto err_put; 1036 } 1037 1038 key = __bpf_copy_key(ukey, map->key_size); 1039 if (IS_ERR(key)) { 1040 err = PTR_ERR(key); 1041 goto err_put; 1042 } 1043 1044 value_size = bpf_map_value_size(map); 1045 1046 err = -ENOMEM; 1047 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1048 if (!value) 1049 goto free_key; 1050 1051 err = bpf_map_copy_value(map, key, value, attr->flags); 1052 if (err) 1053 goto free_value; 1054 1055 err = -EFAULT; 1056 if (copy_to_user(uvalue, value, value_size) != 0) 1057 goto free_value; 1058 1059 err = 0; 1060 1061 free_value: 1062 kfree(value); 1063 free_key: 1064 kfree(key); 1065 err_put: 1066 fdput(f); 1067 return err; 1068 } 1069 1070 1071 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1072 1073 static int map_update_elem(union bpf_attr *attr) 1074 { 1075 void __user *ukey = u64_to_user_ptr(attr->key); 1076 void __user *uvalue = u64_to_user_ptr(attr->value); 1077 int ufd = attr->map_fd; 1078 struct bpf_map *map; 1079 void *key, *value; 1080 u32 value_size; 1081 struct fd f; 1082 int err; 1083 1084 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1085 return -EINVAL; 1086 1087 f = fdget(ufd); 1088 map = __bpf_map_get(f); 1089 if (IS_ERR(map)) 1090 return PTR_ERR(map); 1091 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1092 err = -EPERM; 1093 goto err_put; 1094 } 1095 1096 if ((attr->flags & BPF_F_LOCK) && 1097 !map_value_has_spin_lock(map)) { 1098 err = -EINVAL; 1099 goto err_put; 1100 } 1101 1102 key = __bpf_copy_key(ukey, map->key_size); 1103 if (IS_ERR(key)) { 1104 err = PTR_ERR(key); 1105 goto err_put; 1106 } 1107 1108 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 1109 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 1110 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 1111 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 1112 value_size = round_up(map->value_size, 8) * num_possible_cpus(); 1113 else 1114 value_size = map->value_size; 1115 1116 err = -ENOMEM; 1117 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1118 if (!value) 1119 goto free_key; 1120 1121 err = -EFAULT; 1122 if (copy_from_user(value, uvalue, value_size) != 0) 1123 goto free_value; 1124 1125 err = bpf_map_update_value(map, f, key, value, attr->flags); 1126 1127 free_value: 1128 kfree(value); 1129 free_key: 1130 kfree(key); 1131 err_put: 1132 fdput(f); 1133 return err; 1134 } 1135 1136 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1137 1138 static int map_delete_elem(union bpf_attr *attr) 1139 { 1140 void __user *ukey = u64_to_user_ptr(attr->key); 1141 int ufd = attr->map_fd; 1142 struct bpf_map *map; 1143 struct fd f; 1144 void *key; 1145 int err; 1146 1147 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1148 return -EINVAL; 1149 1150 f = fdget(ufd); 1151 map = __bpf_map_get(f); 1152 if (IS_ERR(map)) 1153 return PTR_ERR(map); 1154 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1155 err = -EPERM; 1156 goto err_put; 1157 } 1158 1159 key = __bpf_copy_key(ukey, map->key_size); 1160 if (IS_ERR(key)) { 1161 err = PTR_ERR(key); 1162 goto err_put; 1163 } 1164 1165 if (bpf_map_is_dev_bound(map)) { 1166 err = bpf_map_offload_delete_elem(map, key); 1167 goto out; 1168 } else if (IS_FD_PROG_ARRAY(map) || 1169 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1170 /* These maps require sleepable context */ 1171 err = map->ops->map_delete_elem(map, key); 1172 goto out; 1173 } 1174 1175 bpf_disable_instrumentation(); 1176 rcu_read_lock(); 1177 err = map->ops->map_delete_elem(map, key); 1178 rcu_read_unlock(); 1179 bpf_enable_instrumentation(); 1180 maybe_wait_bpf_programs(map); 1181 out: 1182 kfree(key); 1183 err_put: 1184 fdput(f); 1185 return err; 1186 } 1187 1188 /* last field in 'union bpf_attr' used by this command */ 1189 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1190 1191 static int map_get_next_key(union bpf_attr *attr) 1192 { 1193 void __user *ukey = u64_to_user_ptr(attr->key); 1194 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1195 int ufd = attr->map_fd; 1196 struct bpf_map *map; 1197 void *key, *next_key; 1198 struct fd f; 1199 int err; 1200 1201 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1202 return -EINVAL; 1203 1204 f = fdget(ufd); 1205 map = __bpf_map_get(f); 1206 if (IS_ERR(map)) 1207 return PTR_ERR(map); 1208 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1209 err = -EPERM; 1210 goto err_put; 1211 } 1212 1213 if (ukey) { 1214 key = __bpf_copy_key(ukey, map->key_size); 1215 if (IS_ERR(key)) { 1216 err = PTR_ERR(key); 1217 goto err_put; 1218 } 1219 } else { 1220 key = NULL; 1221 } 1222 1223 err = -ENOMEM; 1224 next_key = kmalloc(map->key_size, GFP_USER); 1225 if (!next_key) 1226 goto free_key; 1227 1228 if (bpf_map_is_dev_bound(map)) { 1229 err = bpf_map_offload_get_next_key(map, key, next_key); 1230 goto out; 1231 } 1232 1233 rcu_read_lock(); 1234 err = map->ops->map_get_next_key(map, key, next_key); 1235 rcu_read_unlock(); 1236 out: 1237 if (err) 1238 goto free_next_key; 1239 1240 err = -EFAULT; 1241 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1242 goto free_next_key; 1243 1244 err = 0; 1245 1246 free_next_key: 1247 kfree(next_key); 1248 free_key: 1249 kfree(key); 1250 err_put: 1251 fdput(f); 1252 return err; 1253 } 1254 1255 int generic_map_delete_batch(struct bpf_map *map, 1256 const union bpf_attr *attr, 1257 union bpf_attr __user *uattr) 1258 { 1259 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1260 u32 cp, max_count; 1261 int err = 0; 1262 void *key; 1263 1264 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1265 return -EINVAL; 1266 1267 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1268 !map_value_has_spin_lock(map)) { 1269 return -EINVAL; 1270 } 1271 1272 max_count = attr->batch.count; 1273 if (!max_count) 1274 return 0; 1275 1276 key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1277 if (!key) 1278 return -ENOMEM; 1279 1280 for (cp = 0; cp < max_count; cp++) { 1281 err = -EFAULT; 1282 if (copy_from_user(key, keys + cp * map->key_size, 1283 map->key_size)) 1284 break; 1285 1286 if (bpf_map_is_dev_bound(map)) { 1287 err = bpf_map_offload_delete_elem(map, key); 1288 break; 1289 } 1290 1291 bpf_disable_instrumentation(); 1292 rcu_read_lock(); 1293 err = map->ops->map_delete_elem(map, key); 1294 rcu_read_unlock(); 1295 bpf_enable_instrumentation(); 1296 maybe_wait_bpf_programs(map); 1297 if (err) 1298 break; 1299 } 1300 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1301 err = -EFAULT; 1302 1303 kfree(key); 1304 return err; 1305 } 1306 1307 int generic_map_update_batch(struct bpf_map *map, 1308 const union bpf_attr *attr, 1309 union bpf_attr __user *uattr) 1310 { 1311 void __user *values = u64_to_user_ptr(attr->batch.values); 1312 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1313 u32 value_size, cp, max_count; 1314 int ufd = attr->map_fd; 1315 void *key, *value; 1316 struct fd f; 1317 int err = 0; 1318 1319 f = fdget(ufd); 1320 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1321 return -EINVAL; 1322 1323 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1324 !map_value_has_spin_lock(map)) { 1325 return -EINVAL; 1326 } 1327 1328 value_size = bpf_map_value_size(map); 1329 1330 max_count = attr->batch.count; 1331 if (!max_count) 1332 return 0; 1333 1334 key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1335 if (!key) 1336 return -ENOMEM; 1337 1338 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1339 if (!value) { 1340 kfree(key); 1341 return -ENOMEM; 1342 } 1343 1344 for (cp = 0; cp < max_count; cp++) { 1345 err = -EFAULT; 1346 if (copy_from_user(key, keys + cp * map->key_size, 1347 map->key_size) || 1348 copy_from_user(value, values + cp * value_size, value_size)) 1349 break; 1350 1351 err = bpf_map_update_value(map, f, key, value, 1352 attr->batch.elem_flags); 1353 1354 if (err) 1355 break; 1356 } 1357 1358 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1359 err = -EFAULT; 1360 1361 kfree(value); 1362 kfree(key); 1363 return err; 1364 } 1365 1366 #define MAP_LOOKUP_RETRIES 3 1367 1368 int generic_map_lookup_batch(struct bpf_map *map, 1369 const union bpf_attr *attr, 1370 union bpf_attr __user *uattr) 1371 { 1372 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 1373 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1374 void __user *values = u64_to_user_ptr(attr->batch.values); 1375 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1376 void *buf, *buf_prevkey, *prev_key, *key, *value; 1377 int err, retry = MAP_LOOKUP_RETRIES; 1378 u32 value_size, cp, max_count; 1379 1380 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1381 return -EINVAL; 1382 1383 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1384 !map_value_has_spin_lock(map)) 1385 return -EINVAL; 1386 1387 value_size = bpf_map_value_size(map); 1388 1389 max_count = attr->batch.count; 1390 if (!max_count) 1391 return 0; 1392 1393 if (put_user(0, &uattr->batch.count)) 1394 return -EFAULT; 1395 1396 buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1397 if (!buf_prevkey) 1398 return -ENOMEM; 1399 1400 buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 1401 if (!buf) { 1402 kfree(buf_prevkey); 1403 return -ENOMEM; 1404 } 1405 1406 err = -EFAULT; 1407 prev_key = NULL; 1408 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 1409 goto free_buf; 1410 key = buf; 1411 value = key + map->key_size; 1412 if (ubatch) 1413 prev_key = buf_prevkey; 1414 1415 for (cp = 0; cp < max_count;) { 1416 rcu_read_lock(); 1417 err = map->ops->map_get_next_key(map, prev_key, key); 1418 rcu_read_unlock(); 1419 if (err) 1420 break; 1421 err = bpf_map_copy_value(map, key, value, 1422 attr->batch.elem_flags); 1423 1424 if (err == -ENOENT) { 1425 if (retry) { 1426 retry--; 1427 continue; 1428 } 1429 err = -EINTR; 1430 break; 1431 } 1432 1433 if (err) 1434 goto free_buf; 1435 1436 if (copy_to_user(keys + cp * map->key_size, key, 1437 map->key_size)) { 1438 err = -EFAULT; 1439 goto free_buf; 1440 } 1441 if (copy_to_user(values + cp * value_size, value, value_size)) { 1442 err = -EFAULT; 1443 goto free_buf; 1444 } 1445 1446 if (!prev_key) 1447 prev_key = buf_prevkey; 1448 1449 swap(prev_key, key); 1450 retry = MAP_LOOKUP_RETRIES; 1451 cp++; 1452 } 1453 1454 if (err == -EFAULT) 1455 goto free_buf; 1456 1457 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 1458 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 1459 err = -EFAULT; 1460 1461 free_buf: 1462 kfree(buf_prevkey); 1463 kfree(buf); 1464 return err; 1465 } 1466 1467 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value 1468 1469 static int map_lookup_and_delete_elem(union bpf_attr *attr) 1470 { 1471 void __user *ukey = u64_to_user_ptr(attr->key); 1472 void __user *uvalue = u64_to_user_ptr(attr->value); 1473 int ufd = attr->map_fd; 1474 struct bpf_map *map; 1475 void *key, *value; 1476 u32 value_size; 1477 struct fd f; 1478 int err; 1479 1480 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 1481 return -EINVAL; 1482 1483 f = fdget(ufd); 1484 map = __bpf_map_get(f); 1485 if (IS_ERR(map)) 1486 return PTR_ERR(map); 1487 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || 1488 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1489 err = -EPERM; 1490 goto err_put; 1491 } 1492 1493 key = __bpf_copy_key(ukey, map->key_size); 1494 if (IS_ERR(key)) { 1495 err = PTR_ERR(key); 1496 goto err_put; 1497 } 1498 1499 value_size = map->value_size; 1500 1501 err = -ENOMEM; 1502 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1503 if (!value) 1504 goto free_key; 1505 1506 if (map->map_type == BPF_MAP_TYPE_QUEUE || 1507 map->map_type == BPF_MAP_TYPE_STACK) { 1508 err = map->ops->map_pop_elem(map, value); 1509 } else { 1510 err = -ENOTSUPP; 1511 } 1512 1513 if (err) 1514 goto free_value; 1515 1516 if (copy_to_user(uvalue, value, value_size) != 0) { 1517 err = -EFAULT; 1518 goto free_value; 1519 } 1520 1521 err = 0; 1522 1523 free_value: 1524 kfree(value); 1525 free_key: 1526 kfree(key); 1527 err_put: 1528 fdput(f); 1529 return err; 1530 } 1531 1532 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 1533 1534 static int map_freeze(const union bpf_attr *attr) 1535 { 1536 int err = 0, ufd = attr->map_fd; 1537 struct bpf_map *map; 1538 struct fd f; 1539 1540 if (CHECK_ATTR(BPF_MAP_FREEZE)) 1541 return -EINVAL; 1542 1543 f = fdget(ufd); 1544 map = __bpf_map_get(f); 1545 if (IS_ERR(map)) 1546 return PTR_ERR(map); 1547 1548 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1549 fdput(f); 1550 return -ENOTSUPP; 1551 } 1552 1553 mutex_lock(&map->freeze_mutex); 1554 1555 if (map->writecnt) { 1556 err = -EBUSY; 1557 goto err_put; 1558 } 1559 if (READ_ONCE(map->frozen)) { 1560 err = -EBUSY; 1561 goto err_put; 1562 } 1563 if (!bpf_capable()) { 1564 err = -EPERM; 1565 goto err_put; 1566 } 1567 1568 WRITE_ONCE(map->frozen, true); 1569 err_put: 1570 mutex_unlock(&map->freeze_mutex); 1571 fdput(f); 1572 return err; 1573 } 1574 1575 static const struct bpf_prog_ops * const bpf_prog_types[] = { 1576 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1577 [_id] = & _name ## _prog_ops, 1578 #define BPF_MAP_TYPE(_id, _ops) 1579 #define BPF_LINK_TYPE(_id, _name) 1580 #include <linux/bpf_types.h> 1581 #undef BPF_PROG_TYPE 1582 #undef BPF_MAP_TYPE 1583 #undef BPF_LINK_TYPE 1584 }; 1585 1586 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 1587 { 1588 const struct bpf_prog_ops *ops; 1589 1590 if (type >= ARRAY_SIZE(bpf_prog_types)) 1591 return -EINVAL; 1592 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 1593 ops = bpf_prog_types[type]; 1594 if (!ops) 1595 return -EINVAL; 1596 1597 if (!bpf_prog_is_dev_bound(prog->aux)) 1598 prog->aux->ops = ops; 1599 else 1600 prog->aux->ops = &bpf_offload_prog_ops; 1601 prog->type = type; 1602 return 0; 1603 } 1604 1605 enum bpf_audit { 1606 BPF_AUDIT_LOAD, 1607 BPF_AUDIT_UNLOAD, 1608 BPF_AUDIT_MAX, 1609 }; 1610 1611 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 1612 [BPF_AUDIT_LOAD] = "LOAD", 1613 [BPF_AUDIT_UNLOAD] = "UNLOAD", 1614 }; 1615 1616 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 1617 { 1618 struct audit_context *ctx = NULL; 1619 struct audit_buffer *ab; 1620 1621 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 1622 return; 1623 if (audit_enabled == AUDIT_OFF) 1624 return; 1625 if (op == BPF_AUDIT_LOAD) 1626 ctx = audit_context(); 1627 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 1628 if (unlikely(!ab)) 1629 return; 1630 audit_log_format(ab, "prog-id=%u op=%s", 1631 prog->aux->id, bpf_audit_str[op]); 1632 audit_log_end(ab); 1633 } 1634 1635 static int bpf_prog_alloc_id(struct bpf_prog *prog) 1636 { 1637 int id; 1638 1639 idr_preload(GFP_KERNEL); 1640 spin_lock_bh(&prog_idr_lock); 1641 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 1642 if (id > 0) 1643 prog->aux->id = id; 1644 spin_unlock_bh(&prog_idr_lock); 1645 idr_preload_end(); 1646 1647 /* id is in [1, INT_MAX) */ 1648 if (WARN_ON_ONCE(!id)) 1649 return -ENOSPC; 1650 1651 return id > 0 ? 0 : id; 1652 } 1653 1654 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) 1655 { 1656 /* cBPF to eBPF migrations are currently not in the idr store. 1657 * Offloaded programs are removed from the store when their device 1658 * disappears - even if someone grabs an fd to them they are unusable, 1659 * simply waiting for refcnt to drop to be freed. 1660 */ 1661 if (!prog->aux->id) 1662 return; 1663 1664 if (do_idr_lock) 1665 spin_lock_bh(&prog_idr_lock); 1666 else 1667 __acquire(&prog_idr_lock); 1668 1669 idr_remove(&prog_idr, prog->aux->id); 1670 prog->aux->id = 0; 1671 1672 if (do_idr_lock) 1673 spin_unlock_bh(&prog_idr_lock); 1674 else 1675 __release(&prog_idr_lock); 1676 } 1677 1678 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 1679 { 1680 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 1681 1682 kvfree(aux->func_info); 1683 kfree(aux->func_info_aux); 1684 free_uid(aux->user); 1685 security_bpf_prog_free(aux); 1686 bpf_prog_free(aux->prog); 1687 } 1688 1689 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 1690 { 1691 bpf_prog_kallsyms_del_all(prog); 1692 btf_put(prog->aux->btf); 1693 bpf_prog_free_linfo(prog); 1694 if (prog->aux->attach_btf) 1695 btf_put(prog->aux->attach_btf); 1696 1697 if (deferred) { 1698 if (prog->aux->sleepable) 1699 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); 1700 else 1701 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 1702 } else { 1703 __bpf_prog_put_rcu(&prog->aux->rcu); 1704 } 1705 } 1706 1707 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 1708 { 1709 if (atomic64_dec_and_test(&prog->aux->refcnt)) { 1710 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 1711 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 1712 /* bpf_prog_free_id() must be called first */ 1713 bpf_prog_free_id(prog, do_idr_lock); 1714 __bpf_prog_put_noref(prog, true); 1715 } 1716 } 1717 1718 void bpf_prog_put(struct bpf_prog *prog) 1719 { 1720 __bpf_prog_put(prog, true); 1721 } 1722 EXPORT_SYMBOL_GPL(bpf_prog_put); 1723 1724 static int bpf_prog_release(struct inode *inode, struct file *filp) 1725 { 1726 struct bpf_prog *prog = filp->private_data; 1727 1728 bpf_prog_put(prog); 1729 return 0; 1730 } 1731 1732 static void bpf_prog_get_stats(const struct bpf_prog *prog, 1733 struct bpf_prog_stats *stats) 1734 { 1735 u64 nsecs = 0, cnt = 0; 1736 int cpu; 1737 1738 for_each_possible_cpu(cpu) { 1739 const struct bpf_prog_stats *st; 1740 unsigned int start; 1741 u64 tnsecs, tcnt; 1742 1743 st = per_cpu_ptr(prog->aux->stats, cpu); 1744 do { 1745 start = u64_stats_fetch_begin_irq(&st->syncp); 1746 tnsecs = st->nsecs; 1747 tcnt = st->cnt; 1748 } while (u64_stats_fetch_retry_irq(&st->syncp, start)); 1749 nsecs += tnsecs; 1750 cnt += tcnt; 1751 } 1752 stats->nsecs = nsecs; 1753 stats->cnt = cnt; 1754 } 1755 1756 #ifdef CONFIG_PROC_FS 1757 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 1758 { 1759 const struct bpf_prog *prog = filp->private_data; 1760 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 1761 struct bpf_prog_stats stats; 1762 1763 bpf_prog_get_stats(prog, &stats); 1764 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 1765 seq_printf(m, 1766 "prog_type:\t%u\n" 1767 "prog_jited:\t%u\n" 1768 "prog_tag:\t%s\n" 1769 "memlock:\t%llu\n" 1770 "prog_id:\t%u\n" 1771 "run_time_ns:\t%llu\n" 1772 "run_cnt:\t%llu\n", 1773 prog->type, 1774 prog->jited, 1775 prog_tag, 1776 prog->pages * 1ULL << PAGE_SHIFT, 1777 prog->aux->id, 1778 stats.nsecs, 1779 stats.cnt); 1780 } 1781 #endif 1782 1783 const struct file_operations bpf_prog_fops = { 1784 #ifdef CONFIG_PROC_FS 1785 .show_fdinfo = bpf_prog_show_fdinfo, 1786 #endif 1787 .release = bpf_prog_release, 1788 .read = bpf_dummy_read, 1789 .write = bpf_dummy_write, 1790 }; 1791 1792 int bpf_prog_new_fd(struct bpf_prog *prog) 1793 { 1794 int ret; 1795 1796 ret = security_bpf_prog(prog); 1797 if (ret < 0) 1798 return ret; 1799 1800 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 1801 O_RDWR | O_CLOEXEC); 1802 } 1803 1804 static struct bpf_prog *____bpf_prog_get(struct fd f) 1805 { 1806 if (!f.file) 1807 return ERR_PTR(-EBADF); 1808 if (f.file->f_op != &bpf_prog_fops) { 1809 fdput(f); 1810 return ERR_PTR(-EINVAL); 1811 } 1812 1813 return f.file->private_data; 1814 } 1815 1816 void bpf_prog_add(struct bpf_prog *prog, int i) 1817 { 1818 atomic64_add(i, &prog->aux->refcnt); 1819 } 1820 EXPORT_SYMBOL_GPL(bpf_prog_add); 1821 1822 void bpf_prog_sub(struct bpf_prog *prog, int i) 1823 { 1824 /* Only to be used for undoing previous bpf_prog_add() in some 1825 * error path. We still know that another entity in our call 1826 * path holds a reference to the program, thus atomic_sub() can 1827 * be safely used in such cases! 1828 */ 1829 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 1830 } 1831 EXPORT_SYMBOL_GPL(bpf_prog_sub); 1832 1833 void bpf_prog_inc(struct bpf_prog *prog) 1834 { 1835 atomic64_inc(&prog->aux->refcnt); 1836 } 1837 EXPORT_SYMBOL_GPL(bpf_prog_inc); 1838 1839 /* prog_idr_lock should have been held */ 1840 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 1841 { 1842 int refold; 1843 1844 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 1845 1846 if (!refold) 1847 return ERR_PTR(-ENOENT); 1848 1849 return prog; 1850 } 1851 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1852 1853 bool bpf_prog_get_ok(struct bpf_prog *prog, 1854 enum bpf_prog_type *attach_type, bool attach_drv) 1855 { 1856 /* not an attachment, just a refcount inc, always allow */ 1857 if (!attach_type) 1858 return true; 1859 1860 if (prog->type != *attach_type) 1861 return false; 1862 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) 1863 return false; 1864 1865 return true; 1866 } 1867 1868 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 1869 bool attach_drv) 1870 { 1871 struct fd f = fdget(ufd); 1872 struct bpf_prog *prog; 1873 1874 prog = ____bpf_prog_get(f); 1875 if (IS_ERR(prog)) 1876 return prog; 1877 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 1878 prog = ERR_PTR(-EINVAL); 1879 goto out; 1880 } 1881 1882 bpf_prog_inc(prog); 1883 out: 1884 fdput(f); 1885 return prog; 1886 } 1887 1888 struct bpf_prog *bpf_prog_get(u32 ufd) 1889 { 1890 return __bpf_prog_get(ufd, NULL, false); 1891 } 1892 1893 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1894 bool attach_drv) 1895 { 1896 return __bpf_prog_get(ufd, &type, attach_drv); 1897 } 1898 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 1899 1900 /* Initially all BPF programs could be loaded w/o specifying 1901 * expected_attach_type. Later for some of them specifying expected_attach_type 1902 * at load time became required so that program could be validated properly. 1903 * Programs of types that are allowed to be loaded both w/ and w/o (for 1904 * backward compatibility) expected_attach_type, should have the default attach 1905 * type assigned to expected_attach_type for the latter case, so that it can be 1906 * validated later at attach time. 1907 * 1908 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 1909 * prog type requires it but has some attach types that have to be backward 1910 * compatible. 1911 */ 1912 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 1913 { 1914 switch (attr->prog_type) { 1915 case BPF_PROG_TYPE_CGROUP_SOCK: 1916 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 1917 * exist so checking for non-zero is the way to go here. 1918 */ 1919 if (!attr->expected_attach_type) 1920 attr->expected_attach_type = 1921 BPF_CGROUP_INET_SOCK_CREATE; 1922 break; 1923 } 1924 } 1925 1926 static int 1927 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 1928 enum bpf_attach_type expected_attach_type, 1929 struct btf *attach_btf, u32 btf_id, 1930 struct bpf_prog *dst_prog) 1931 { 1932 if (btf_id) { 1933 if (btf_id > BTF_MAX_TYPE) 1934 return -EINVAL; 1935 1936 if (!attach_btf && !dst_prog) 1937 return -EINVAL; 1938 1939 switch (prog_type) { 1940 case BPF_PROG_TYPE_TRACING: 1941 case BPF_PROG_TYPE_LSM: 1942 case BPF_PROG_TYPE_STRUCT_OPS: 1943 case BPF_PROG_TYPE_EXT: 1944 break; 1945 default: 1946 return -EINVAL; 1947 } 1948 } 1949 1950 if (attach_btf && (!btf_id || dst_prog)) 1951 return -EINVAL; 1952 1953 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING && 1954 prog_type != BPF_PROG_TYPE_EXT) 1955 return -EINVAL; 1956 1957 switch (prog_type) { 1958 case BPF_PROG_TYPE_CGROUP_SOCK: 1959 switch (expected_attach_type) { 1960 case BPF_CGROUP_INET_SOCK_CREATE: 1961 case BPF_CGROUP_INET_SOCK_RELEASE: 1962 case BPF_CGROUP_INET4_POST_BIND: 1963 case BPF_CGROUP_INET6_POST_BIND: 1964 return 0; 1965 default: 1966 return -EINVAL; 1967 } 1968 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1969 switch (expected_attach_type) { 1970 case BPF_CGROUP_INET4_BIND: 1971 case BPF_CGROUP_INET6_BIND: 1972 case BPF_CGROUP_INET4_CONNECT: 1973 case BPF_CGROUP_INET6_CONNECT: 1974 case BPF_CGROUP_INET4_GETPEERNAME: 1975 case BPF_CGROUP_INET6_GETPEERNAME: 1976 case BPF_CGROUP_INET4_GETSOCKNAME: 1977 case BPF_CGROUP_INET6_GETSOCKNAME: 1978 case BPF_CGROUP_UDP4_SENDMSG: 1979 case BPF_CGROUP_UDP6_SENDMSG: 1980 case BPF_CGROUP_UDP4_RECVMSG: 1981 case BPF_CGROUP_UDP6_RECVMSG: 1982 return 0; 1983 default: 1984 return -EINVAL; 1985 } 1986 case BPF_PROG_TYPE_CGROUP_SKB: 1987 switch (expected_attach_type) { 1988 case BPF_CGROUP_INET_INGRESS: 1989 case BPF_CGROUP_INET_EGRESS: 1990 return 0; 1991 default: 1992 return -EINVAL; 1993 } 1994 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 1995 switch (expected_attach_type) { 1996 case BPF_CGROUP_SETSOCKOPT: 1997 case BPF_CGROUP_GETSOCKOPT: 1998 return 0; 1999 default: 2000 return -EINVAL; 2001 } 2002 case BPF_PROG_TYPE_SK_LOOKUP: 2003 if (expected_attach_type == BPF_SK_LOOKUP) 2004 return 0; 2005 return -EINVAL; 2006 case BPF_PROG_TYPE_EXT: 2007 if (expected_attach_type) 2008 return -EINVAL; 2009 fallthrough; 2010 default: 2011 return 0; 2012 } 2013 } 2014 2015 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) 2016 { 2017 switch (prog_type) { 2018 case BPF_PROG_TYPE_SCHED_CLS: 2019 case BPF_PROG_TYPE_SCHED_ACT: 2020 case BPF_PROG_TYPE_XDP: 2021 case BPF_PROG_TYPE_LWT_IN: 2022 case BPF_PROG_TYPE_LWT_OUT: 2023 case BPF_PROG_TYPE_LWT_XMIT: 2024 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2025 case BPF_PROG_TYPE_SK_SKB: 2026 case BPF_PROG_TYPE_SK_MSG: 2027 case BPF_PROG_TYPE_LIRC_MODE2: 2028 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2029 case BPF_PROG_TYPE_CGROUP_DEVICE: 2030 case BPF_PROG_TYPE_CGROUP_SOCK: 2031 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2032 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2033 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2034 case BPF_PROG_TYPE_SOCK_OPS: 2035 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2036 return true; 2037 case BPF_PROG_TYPE_CGROUP_SKB: 2038 /* always unpriv */ 2039 case BPF_PROG_TYPE_SK_REUSEPORT: 2040 /* equivalent to SOCKET_FILTER. need CAP_BPF only */ 2041 default: 2042 return false; 2043 } 2044 } 2045 2046 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) 2047 { 2048 switch (prog_type) { 2049 case BPF_PROG_TYPE_KPROBE: 2050 case BPF_PROG_TYPE_TRACEPOINT: 2051 case BPF_PROG_TYPE_PERF_EVENT: 2052 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2053 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2054 case BPF_PROG_TYPE_TRACING: 2055 case BPF_PROG_TYPE_LSM: 2056 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ 2057 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2058 return true; 2059 default: 2060 return false; 2061 } 2062 } 2063 2064 /* last field in 'union bpf_attr' used by this command */ 2065 #define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd 2066 2067 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) 2068 { 2069 enum bpf_prog_type type = attr->prog_type; 2070 struct bpf_prog *prog, *dst_prog = NULL; 2071 struct btf *attach_btf = NULL; 2072 int err; 2073 char license[128]; 2074 bool is_gpl; 2075 2076 if (CHECK_ATTR(BPF_PROG_LOAD)) 2077 return -EINVAL; 2078 2079 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2080 BPF_F_ANY_ALIGNMENT | 2081 BPF_F_TEST_STATE_FREQ | 2082 BPF_F_SLEEPABLE | 2083 BPF_F_TEST_RND_HI32)) 2084 return -EINVAL; 2085 2086 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2087 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2088 !bpf_capable()) 2089 return -EPERM; 2090 2091 /* copy eBPF program license from user space */ 2092 if (strncpy_from_user(license, u64_to_user_ptr(attr->license), 2093 sizeof(license) - 1) < 0) 2094 return -EFAULT; 2095 license[sizeof(license) - 1] = 0; 2096 2097 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2098 is_gpl = license_is_gpl_compatible(license); 2099 2100 if (attr->insn_cnt == 0 || 2101 attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) 2102 return -E2BIG; 2103 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2104 type != BPF_PROG_TYPE_CGROUP_SKB && 2105 !bpf_capable()) 2106 return -EPERM; 2107 2108 if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN)) 2109 return -EPERM; 2110 if (is_perfmon_prog_type(type) && !perfmon_capable()) 2111 return -EPERM; 2112 2113 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog 2114 * or btf, we need to check which one it is 2115 */ 2116 if (attr->attach_prog_fd) { 2117 dst_prog = bpf_prog_get(attr->attach_prog_fd); 2118 if (IS_ERR(dst_prog)) { 2119 dst_prog = NULL; 2120 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); 2121 if (IS_ERR(attach_btf)) 2122 return -EINVAL; 2123 if (!btf_is_kernel(attach_btf)) { 2124 btf_put(attach_btf); 2125 return -EINVAL; 2126 } 2127 } 2128 } else if (attr->attach_btf_id) { 2129 /* fall back to vmlinux BTF, if BTF type ID is specified */ 2130 attach_btf = bpf_get_btf_vmlinux(); 2131 if (IS_ERR(attach_btf)) 2132 return PTR_ERR(attach_btf); 2133 if (!attach_btf) 2134 return -EINVAL; 2135 btf_get(attach_btf); 2136 } 2137 2138 bpf_prog_load_fixup_attach_type(attr); 2139 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2140 attach_btf, attr->attach_btf_id, 2141 dst_prog)) { 2142 if (dst_prog) 2143 bpf_prog_put(dst_prog); 2144 if (attach_btf) 2145 btf_put(attach_btf); 2146 return -EINVAL; 2147 } 2148 2149 /* plain bpf_prog allocation */ 2150 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2151 if (!prog) { 2152 if (dst_prog) 2153 bpf_prog_put(dst_prog); 2154 if (attach_btf) 2155 btf_put(attach_btf); 2156 return -ENOMEM; 2157 } 2158 2159 prog->expected_attach_type = attr->expected_attach_type; 2160 prog->aux->attach_btf = attach_btf; 2161 prog->aux->attach_btf_id = attr->attach_btf_id; 2162 prog->aux->dst_prog = dst_prog; 2163 prog->aux->offload_requested = !!attr->prog_ifindex; 2164 prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; 2165 2166 err = security_bpf_prog_alloc(prog->aux); 2167 if (err) 2168 goto free_prog; 2169 2170 prog->aux->user = get_current_user(); 2171 prog->len = attr->insn_cnt; 2172 2173 err = -EFAULT; 2174 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), 2175 bpf_prog_insn_size(prog)) != 0) 2176 goto free_prog_sec; 2177 2178 prog->orig_prog = NULL; 2179 prog->jited = 0; 2180 2181 atomic64_set(&prog->aux->refcnt, 1); 2182 prog->gpl_compatible = is_gpl ? 1 : 0; 2183 2184 if (bpf_prog_is_dev_bound(prog->aux)) { 2185 err = bpf_prog_offload_init(prog, attr); 2186 if (err) 2187 goto free_prog_sec; 2188 } 2189 2190 /* find program type: socket_filter vs tracing_filter */ 2191 err = find_prog_type(type, prog); 2192 if (err < 0) 2193 goto free_prog_sec; 2194 2195 prog->aux->load_time = ktime_get_boottime_ns(); 2196 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2197 sizeof(attr->prog_name)); 2198 if (err < 0) 2199 goto free_prog_sec; 2200 2201 /* run eBPF verifier */ 2202 err = bpf_check(&prog, attr, uattr); 2203 if (err < 0) 2204 goto free_used_maps; 2205 2206 prog = bpf_prog_select_runtime(prog, &err); 2207 if (err < 0) 2208 goto free_used_maps; 2209 2210 err = bpf_prog_alloc_id(prog); 2211 if (err) 2212 goto free_used_maps; 2213 2214 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2215 * effectively publicly exposed. However, retrieving via 2216 * bpf_prog_get_fd_by_id() will take another reference, 2217 * therefore it cannot be gone underneath us. 2218 * 2219 * Only for the time /after/ successful bpf_prog_new_fd() 2220 * and before returning to userspace, we might just hold 2221 * one reference and any parallel close on that fd could 2222 * rip everything out. Hence, below notifications must 2223 * happen before bpf_prog_new_fd(). 2224 * 2225 * Also, any failure handling from this point onwards must 2226 * be using bpf_prog_put() given the program is exposed. 2227 */ 2228 bpf_prog_kallsyms_add(prog); 2229 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 2230 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 2231 2232 err = bpf_prog_new_fd(prog); 2233 if (err < 0) 2234 bpf_prog_put(prog); 2235 return err; 2236 2237 free_used_maps: 2238 /* In case we have subprogs, we need to wait for a grace 2239 * period before we can tear down JIT memory since symbols 2240 * are already exposed under kallsyms. 2241 */ 2242 __bpf_prog_put_noref(prog, prog->aux->func_cnt); 2243 return err; 2244 free_prog_sec: 2245 free_uid(prog->aux->user); 2246 security_bpf_prog_free(prog->aux); 2247 free_prog: 2248 if (prog->aux->attach_btf) 2249 btf_put(prog->aux->attach_btf); 2250 bpf_prog_free(prog); 2251 return err; 2252 } 2253 2254 #define BPF_OBJ_LAST_FIELD file_flags 2255 2256 static int bpf_obj_pin(const union bpf_attr *attr) 2257 { 2258 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) 2259 return -EINVAL; 2260 2261 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); 2262 } 2263 2264 static int bpf_obj_get(const union bpf_attr *attr) 2265 { 2266 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 2267 attr->file_flags & ~BPF_OBJ_FLAG_MASK) 2268 return -EINVAL; 2269 2270 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), 2271 attr->file_flags); 2272 } 2273 2274 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 2275 const struct bpf_link_ops *ops, struct bpf_prog *prog) 2276 { 2277 atomic64_set(&link->refcnt, 1); 2278 link->type = type; 2279 link->id = 0; 2280 link->ops = ops; 2281 link->prog = prog; 2282 } 2283 2284 static void bpf_link_free_id(int id) 2285 { 2286 if (!id) 2287 return; 2288 2289 spin_lock_bh(&link_idr_lock); 2290 idr_remove(&link_idr, id); 2291 spin_unlock_bh(&link_idr_lock); 2292 } 2293 2294 /* Clean up bpf_link and corresponding anon_inode file and FD. After 2295 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred 2296 * anon_inode's release() call. This helper marksbpf_link as 2297 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt 2298 * is not decremented, it's the responsibility of a calling code that failed 2299 * to complete bpf_link initialization. 2300 */ 2301 void bpf_link_cleanup(struct bpf_link_primer *primer) 2302 { 2303 primer->link->prog = NULL; 2304 bpf_link_free_id(primer->id); 2305 fput(primer->file); 2306 put_unused_fd(primer->fd); 2307 } 2308 2309 void bpf_link_inc(struct bpf_link *link) 2310 { 2311 atomic64_inc(&link->refcnt); 2312 } 2313 2314 /* bpf_link_free is guaranteed to be called from process context */ 2315 static void bpf_link_free(struct bpf_link *link) 2316 { 2317 bpf_link_free_id(link->id); 2318 if (link->prog) { 2319 /* detach BPF program, clean up used resources */ 2320 link->ops->release(link); 2321 bpf_prog_put(link->prog); 2322 } 2323 /* free bpf_link and its containing memory */ 2324 link->ops->dealloc(link); 2325 } 2326 2327 static void bpf_link_put_deferred(struct work_struct *work) 2328 { 2329 struct bpf_link *link = container_of(work, struct bpf_link, work); 2330 2331 bpf_link_free(link); 2332 } 2333 2334 /* bpf_link_put can be called from atomic context, but ensures that resources 2335 * are freed from process context 2336 */ 2337 void bpf_link_put(struct bpf_link *link) 2338 { 2339 if (!atomic64_dec_and_test(&link->refcnt)) 2340 return; 2341 2342 if (in_atomic()) { 2343 INIT_WORK(&link->work, bpf_link_put_deferred); 2344 schedule_work(&link->work); 2345 } else { 2346 bpf_link_free(link); 2347 } 2348 } 2349 2350 static int bpf_link_release(struct inode *inode, struct file *filp) 2351 { 2352 struct bpf_link *link = filp->private_data; 2353 2354 bpf_link_put(link); 2355 return 0; 2356 } 2357 2358 #ifdef CONFIG_PROC_FS 2359 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 2360 #define BPF_MAP_TYPE(_id, _ops) 2361 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name, 2362 static const char *bpf_link_type_strs[] = { 2363 [BPF_LINK_TYPE_UNSPEC] = "<invalid>", 2364 #include <linux/bpf_types.h> 2365 }; 2366 #undef BPF_PROG_TYPE 2367 #undef BPF_MAP_TYPE 2368 #undef BPF_LINK_TYPE 2369 2370 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) 2371 { 2372 const struct bpf_link *link = filp->private_data; 2373 const struct bpf_prog *prog = link->prog; 2374 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2375 2376 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2377 seq_printf(m, 2378 "link_type:\t%s\n" 2379 "link_id:\t%u\n" 2380 "prog_tag:\t%s\n" 2381 "prog_id:\t%u\n", 2382 bpf_link_type_strs[link->type], 2383 link->id, 2384 prog_tag, 2385 prog->aux->id); 2386 if (link->ops->show_fdinfo) 2387 link->ops->show_fdinfo(link, m); 2388 } 2389 #endif 2390 2391 static const struct file_operations bpf_link_fops = { 2392 #ifdef CONFIG_PROC_FS 2393 .show_fdinfo = bpf_link_show_fdinfo, 2394 #endif 2395 .release = bpf_link_release, 2396 .read = bpf_dummy_read, 2397 .write = bpf_dummy_write, 2398 }; 2399 2400 static int bpf_link_alloc_id(struct bpf_link *link) 2401 { 2402 int id; 2403 2404 idr_preload(GFP_KERNEL); 2405 spin_lock_bh(&link_idr_lock); 2406 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC); 2407 spin_unlock_bh(&link_idr_lock); 2408 idr_preload_end(); 2409 2410 return id; 2411 } 2412 2413 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file, 2414 * reserving unused FD and allocating ID from link_idr. This is to be paired 2415 * with bpf_link_settle() to install FD and ID and expose bpf_link to 2416 * user-space, if bpf_link is successfully attached. If not, bpf_link and 2417 * pre-allocated resources are to be freed with bpf_cleanup() call. All the 2418 * transient state is passed around in struct bpf_link_primer. 2419 * This is preferred way to create and initialize bpf_link, especially when 2420 * there are complicated and expensive operations inbetween creating bpf_link 2421 * itself and attaching it to BPF hook. By using bpf_link_prime() and 2422 * bpf_link_settle() kernel code using bpf_link doesn't have to perform 2423 * expensive (and potentially failing) roll back operations in a rare case 2424 * that file, FD, or ID can't be allocated. 2425 */ 2426 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) 2427 { 2428 struct file *file; 2429 int fd, id; 2430 2431 fd = get_unused_fd_flags(O_CLOEXEC); 2432 if (fd < 0) 2433 return fd; 2434 2435 2436 id = bpf_link_alloc_id(link); 2437 if (id < 0) { 2438 put_unused_fd(fd); 2439 return id; 2440 } 2441 2442 file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC); 2443 if (IS_ERR(file)) { 2444 bpf_link_free_id(id); 2445 put_unused_fd(fd); 2446 return PTR_ERR(file); 2447 } 2448 2449 primer->link = link; 2450 primer->file = file; 2451 primer->fd = fd; 2452 primer->id = id; 2453 return 0; 2454 } 2455 2456 int bpf_link_settle(struct bpf_link_primer *primer) 2457 { 2458 /* make bpf_link fetchable by ID */ 2459 spin_lock_bh(&link_idr_lock); 2460 primer->link->id = primer->id; 2461 spin_unlock_bh(&link_idr_lock); 2462 /* make bpf_link fetchable by FD */ 2463 fd_install(primer->fd, primer->file); 2464 /* pass through installed FD */ 2465 return primer->fd; 2466 } 2467 2468 int bpf_link_new_fd(struct bpf_link *link) 2469 { 2470 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC); 2471 } 2472 2473 struct bpf_link *bpf_link_get_from_fd(u32 ufd) 2474 { 2475 struct fd f = fdget(ufd); 2476 struct bpf_link *link; 2477 2478 if (!f.file) 2479 return ERR_PTR(-EBADF); 2480 if (f.file->f_op != &bpf_link_fops) { 2481 fdput(f); 2482 return ERR_PTR(-EINVAL); 2483 } 2484 2485 link = f.file->private_data; 2486 bpf_link_inc(link); 2487 fdput(f); 2488 2489 return link; 2490 } 2491 2492 struct bpf_tracing_link { 2493 struct bpf_link link; 2494 enum bpf_attach_type attach_type; 2495 struct bpf_trampoline *trampoline; 2496 struct bpf_prog *tgt_prog; 2497 }; 2498 2499 static void bpf_tracing_link_release(struct bpf_link *link) 2500 { 2501 struct bpf_tracing_link *tr_link = 2502 container_of(link, struct bpf_tracing_link, link); 2503 2504 WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog, 2505 tr_link->trampoline)); 2506 2507 bpf_trampoline_put(tr_link->trampoline); 2508 2509 /* tgt_prog is NULL if target is a kernel function */ 2510 if (tr_link->tgt_prog) 2511 bpf_prog_put(tr_link->tgt_prog); 2512 } 2513 2514 static void bpf_tracing_link_dealloc(struct bpf_link *link) 2515 { 2516 struct bpf_tracing_link *tr_link = 2517 container_of(link, struct bpf_tracing_link, link); 2518 2519 kfree(tr_link); 2520 } 2521 2522 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, 2523 struct seq_file *seq) 2524 { 2525 struct bpf_tracing_link *tr_link = 2526 container_of(link, struct bpf_tracing_link, link); 2527 2528 seq_printf(seq, 2529 "attach_type:\t%d\n", 2530 tr_link->attach_type); 2531 } 2532 2533 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, 2534 struct bpf_link_info *info) 2535 { 2536 struct bpf_tracing_link *tr_link = 2537 container_of(link, struct bpf_tracing_link, link); 2538 2539 info->tracing.attach_type = tr_link->attach_type; 2540 2541 return 0; 2542 } 2543 2544 static const struct bpf_link_ops bpf_tracing_link_lops = { 2545 .release = bpf_tracing_link_release, 2546 .dealloc = bpf_tracing_link_dealloc, 2547 .show_fdinfo = bpf_tracing_link_show_fdinfo, 2548 .fill_link_info = bpf_tracing_link_fill_link_info, 2549 }; 2550 2551 static int bpf_tracing_prog_attach(struct bpf_prog *prog, 2552 int tgt_prog_fd, 2553 u32 btf_id) 2554 { 2555 struct bpf_link_primer link_primer; 2556 struct bpf_prog *tgt_prog = NULL; 2557 struct bpf_trampoline *tr = NULL; 2558 struct bpf_tracing_link *link; 2559 u64 key = 0; 2560 int err; 2561 2562 switch (prog->type) { 2563 case BPF_PROG_TYPE_TRACING: 2564 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 2565 prog->expected_attach_type != BPF_TRACE_FEXIT && 2566 prog->expected_attach_type != BPF_MODIFY_RETURN) { 2567 err = -EINVAL; 2568 goto out_put_prog; 2569 } 2570 break; 2571 case BPF_PROG_TYPE_EXT: 2572 if (prog->expected_attach_type != 0) { 2573 err = -EINVAL; 2574 goto out_put_prog; 2575 } 2576 break; 2577 case BPF_PROG_TYPE_LSM: 2578 if (prog->expected_attach_type != BPF_LSM_MAC) { 2579 err = -EINVAL; 2580 goto out_put_prog; 2581 } 2582 break; 2583 default: 2584 err = -EINVAL; 2585 goto out_put_prog; 2586 } 2587 2588 if (!!tgt_prog_fd != !!btf_id) { 2589 err = -EINVAL; 2590 goto out_put_prog; 2591 } 2592 2593 if (tgt_prog_fd) { 2594 /* For now we only allow new targets for BPF_PROG_TYPE_EXT */ 2595 if (prog->type != BPF_PROG_TYPE_EXT) { 2596 err = -EINVAL; 2597 goto out_put_prog; 2598 } 2599 2600 tgt_prog = bpf_prog_get(tgt_prog_fd); 2601 if (IS_ERR(tgt_prog)) { 2602 err = PTR_ERR(tgt_prog); 2603 tgt_prog = NULL; 2604 goto out_put_prog; 2605 } 2606 2607 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id); 2608 } 2609 2610 link = kzalloc(sizeof(*link), GFP_USER); 2611 if (!link) { 2612 err = -ENOMEM; 2613 goto out_put_prog; 2614 } 2615 bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING, 2616 &bpf_tracing_link_lops, prog); 2617 link->attach_type = prog->expected_attach_type; 2618 2619 mutex_lock(&prog->aux->dst_mutex); 2620 2621 /* There are a few possible cases here: 2622 * 2623 * - if prog->aux->dst_trampoline is set, the program was just loaded 2624 * and not yet attached to anything, so we can use the values stored 2625 * in prog->aux 2626 * 2627 * - if prog->aux->dst_trampoline is NULL, the program has already been 2628 * attached to a target and its initial target was cleared (below) 2629 * 2630 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + 2631 * target_btf_id using the link_create API. 2632 * 2633 * - if tgt_prog == NULL when this function was called using the old 2634 * raw_tracepoint_open API, and we need a target from prog->aux 2635 * 2636 * The combination of no saved target in prog->aux, and no target 2637 * specified on load is illegal, and we reject that here. 2638 */ 2639 if (!prog->aux->dst_trampoline && !tgt_prog) { 2640 err = -ENOENT; 2641 goto out_unlock; 2642 } 2643 2644 if (!prog->aux->dst_trampoline || 2645 (key && key != prog->aux->dst_trampoline->key)) { 2646 /* If there is no saved target, or the specified target is 2647 * different from the destination specified at load time, we 2648 * need a new trampoline and a check for compatibility 2649 */ 2650 struct bpf_attach_target_info tgt_info = {}; 2651 2652 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id, 2653 &tgt_info); 2654 if (err) 2655 goto out_unlock; 2656 2657 tr = bpf_trampoline_get(key, &tgt_info); 2658 if (!tr) { 2659 err = -ENOMEM; 2660 goto out_unlock; 2661 } 2662 } else { 2663 /* The caller didn't specify a target, or the target was the 2664 * same as the destination supplied during program load. This 2665 * means we can reuse the trampoline and reference from program 2666 * load time, and there is no need to allocate a new one. This 2667 * can only happen once for any program, as the saved values in 2668 * prog->aux are cleared below. 2669 */ 2670 tr = prog->aux->dst_trampoline; 2671 tgt_prog = prog->aux->dst_prog; 2672 } 2673 2674 err = bpf_link_prime(&link->link, &link_primer); 2675 if (err) 2676 goto out_unlock; 2677 2678 err = bpf_trampoline_link_prog(prog, tr); 2679 if (err) { 2680 bpf_link_cleanup(&link_primer); 2681 link = NULL; 2682 goto out_unlock; 2683 } 2684 2685 link->tgt_prog = tgt_prog; 2686 link->trampoline = tr; 2687 2688 /* Always clear the trampoline and target prog from prog->aux to make 2689 * sure the original attach destination is not kept alive after a 2690 * program is (re-)attached to another target. 2691 */ 2692 if (prog->aux->dst_prog && 2693 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) 2694 /* got extra prog ref from syscall, or attaching to different prog */ 2695 bpf_prog_put(prog->aux->dst_prog); 2696 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) 2697 /* we allocated a new trampoline, so free the old one */ 2698 bpf_trampoline_put(prog->aux->dst_trampoline); 2699 2700 prog->aux->dst_prog = NULL; 2701 prog->aux->dst_trampoline = NULL; 2702 mutex_unlock(&prog->aux->dst_mutex); 2703 2704 return bpf_link_settle(&link_primer); 2705 out_unlock: 2706 if (tr && tr != prog->aux->dst_trampoline) 2707 bpf_trampoline_put(tr); 2708 mutex_unlock(&prog->aux->dst_mutex); 2709 kfree(link); 2710 out_put_prog: 2711 if (tgt_prog_fd && tgt_prog) 2712 bpf_prog_put(tgt_prog); 2713 bpf_prog_put(prog); 2714 return err; 2715 } 2716 2717 struct bpf_raw_tp_link { 2718 struct bpf_link link; 2719 struct bpf_raw_event_map *btp; 2720 }; 2721 2722 static void bpf_raw_tp_link_release(struct bpf_link *link) 2723 { 2724 struct bpf_raw_tp_link *raw_tp = 2725 container_of(link, struct bpf_raw_tp_link, link); 2726 2727 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog); 2728 bpf_put_raw_tracepoint(raw_tp->btp); 2729 } 2730 2731 static void bpf_raw_tp_link_dealloc(struct bpf_link *link) 2732 { 2733 struct bpf_raw_tp_link *raw_tp = 2734 container_of(link, struct bpf_raw_tp_link, link); 2735 2736 kfree(raw_tp); 2737 } 2738 2739 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link, 2740 struct seq_file *seq) 2741 { 2742 struct bpf_raw_tp_link *raw_tp_link = 2743 container_of(link, struct bpf_raw_tp_link, link); 2744 2745 seq_printf(seq, 2746 "tp_name:\t%s\n", 2747 raw_tp_link->btp->tp->name); 2748 } 2749 2750 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, 2751 struct bpf_link_info *info) 2752 { 2753 struct bpf_raw_tp_link *raw_tp_link = 2754 container_of(link, struct bpf_raw_tp_link, link); 2755 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); 2756 const char *tp_name = raw_tp_link->btp->tp->name; 2757 u32 ulen = info->raw_tracepoint.tp_name_len; 2758 size_t tp_len = strlen(tp_name); 2759 2760 if (!ulen ^ !ubuf) 2761 return -EINVAL; 2762 2763 info->raw_tracepoint.tp_name_len = tp_len + 1; 2764 2765 if (!ubuf) 2766 return 0; 2767 2768 if (ulen >= tp_len + 1) { 2769 if (copy_to_user(ubuf, tp_name, tp_len + 1)) 2770 return -EFAULT; 2771 } else { 2772 char zero = '\0'; 2773 2774 if (copy_to_user(ubuf, tp_name, ulen - 1)) 2775 return -EFAULT; 2776 if (put_user(zero, ubuf + ulen - 1)) 2777 return -EFAULT; 2778 return -ENOSPC; 2779 } 2780 2781 return 0; 2782 } 2783 2784 static const struct bpf_link_ops bpf_raw_tp_link_lops = { 2785 .release = bpf_raw_tp_link_release, 2786 .dealloc = bpf_raw_tp_link_dealloc, 2787 .show_fdinfo = bpf_raw_tp_link_show_fdinfo, 2788 .fill_link_info = bpf_raw_tp_link_fill_link_info, 2789 }; 2790 2791 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 2792 2793 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 2794 { 2795 struct bpf_link_primer link_primer; 2796 struct bpf_raw_tp_link *link; 2797 struct bpf_raw_event_map *btp; 2798 struct bpf_prog *prog; 2799 const char *tp_name; 2800 char buf[128]; 2801 int err; 2802 2803 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 2804 return -EINVAL; 2805 2806 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 2807 if (IS_ERR(prog)) 2808 return PTR_ERR(prog); 2809 2810 switch (prog->type) { 2811 case BPF_PROG_TYPE_TRACING: 2812 case BPF_PROG_TYPE_EXT: 2813 case BPF_PROG_TYPE_LSM: 2814 if (attr->raw_tracepoint.name) { 2815 /* The attach point for this category of programs 2816 * should be specified via btf_id during program load. 2817 */ 2818 err = -EINVAL; 2819 goto out_put_prog; 2820 } 2821 if (prog->type == BPF_PROG_TYPE_TRACING && 2822 prog->expected_attach_type == BPF_TRACE_RAW_TP) { 2823 tp_name = prog->aux->attach_func_name; 2824 break; 2825 } 2826 return bpf_tracing_prog_attach(prog, 0, 0); 2827 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2828 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2829 if (strncpy_from_user(buf, 2830 u64_to_user_ptr(attr->raw_tracepoint.name), 2831 sizeof(buf) - 1) < 0) { 2832 err = -EFAULT; 2833 goto out_put_prog; 2834 } 2835 buf[sizeof(buf) - 1] = 0; 2836 tp_name = buf; 2837 break; 2838 default: 2839 err = -EINVAL; 2840 goto out_put_prog; 2841 } 2842 2843 btp = bpf_get_raw_tracepoint(tp_name); 2844 if (!btp) { 2845 err = -ENOENT; 2846 goto out_put_prog; 2847 } 2848 2849 link = kzalloc(sizeof(*link), GFP_USER); 2850 if (!link) { 2851 err = -ENOMEM; 2852 goto out_put_btp; 2853 } 2854 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, 2855 &bpf_raw_tp_link_lops, prog); 2856 link->btp = btp; 2857 2858 err = bpf_link_prime(&link->link, &link_primer); 2859 if (err) { 2860 kfree(link); 2861 goto out_put_btp; 2862 } 2863 2864 err = bpf_probe_register(link->btp, prog); 2865 if (err) { 2866 bpf_link_cleanup(&link_primer); 2867 goto out_put_btp; 2868 } 2869 2870 return bpf_link_settle(&link_primer); 2871 2872 out_put_btp: 2873 bpf_put_raw_tracepoint(btp); 2874 out_put_prog: 2875 bpf_prog_put(prog); 2876 return err; 2877 } 2878 2879 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 2880 enum bpf_attach_type attach_type) 2881 { 2882 switch (prog->type) { 2883 case BPF_PROG_TYPE_CGROUP_SOCK: 2884 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2885 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2886 case BPF_PROG_TYPE_SK_LOOKUP: 2887 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 2888 case BPF_PROG_TYPE_CGROUP_SKB: 2889 if (!capable(CAP_NET_ADMIN)) 2890 /* cg-skb progs can be loaded by unpriv user. 2891 * check permissions at attach time. 2892 */ 2893 return -EPERM; 2894 return prog->enforce_expected_attach_type && 2895 prog->expected_attach_type != attach_type ? 2896 -EINVAL : 0; 2897 default: 2898 return 0; 2899 } 2900 } 2901 2902 static enum bpf_prog_type 2903 attach_type_to_prog_type(enum bpf_attach_type attach_type) 2904 { 2905 switch (attach_type) { 2906 case BPF_CGROUP_INET_INGRESS: 2907 case BPF_CGROUP_INET_EGRESS: 2908 return BPF_PROG_TYPE_CGROUP_SKB; 2909 case BPF_CGROUP_INET_SOCK_CREATE: 2910 case BPF_CGROUP_INET_SOCK_RELEASE: 2911 case BPF_CGROUP_INET4_POST_BIND: 2912 case BPF_CGROUP_INET6_POST_BIND: 2913 return BPF_PROG_TYPE_CGROUP_SOCK; 2914 case BPF_CGROUP_INET4_BIND: 2915 case BPF_CGROUP_INET6_BIND: 2916 case BPF_CGROUP_INET4_CONNECT: 2917 case BPF_CGROUP_INET6_CONNECT: 2918 case BPF_CGROUP_INET4_GETPEERNAME: 2919 case BPF_CGROUP_INET6_GETPEERNAME: 2920 case BPF_CGROUP_INET4_GETSOCKNAME: 2921 case BPF_CGROUP_INET6_GETSOCKNAME: 2922 case BPF_CGROUP_UDP4_SENDMSG: 2923 case BPF_CGROUP_UDP6_SENDMSG: 2924 case BPF_CGROUP_UDP4_RECVMSG: 2925 case BPF_CGROUP_UDP6_RECVMSG: 2926 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 2927 case BPF_CGROUP_SOCK_OPS: 2928 return BPF_PROG_TYPE_SOCK_OPS; 2929 case BPF_CGROUP_DEVICE: 2930 return BPF_PROG_TYPE_CGROUP_DEVICE; 2931 case BPF_SK_MSG_VERDICT: 2932 return BPF_PROG_TYPE_SK_MSG; 2933 case BPF_SK_SKB_STREAM_PARSER: 2934 case BPF_SK_SKB_STREAM_VERDICT: 2935 return BPF_PROG_TYPE_SK_SKB; 2936 case BPF_LIRC_MODE2: 2937 return BPF_PROG_TYPE_LIRC_MODE2; 2938 case BPF_FLOW_DISSECTOR: 2939 return BPF_PROG_TYPE_FLOW_DISSECTOR; 2940 case BPF_CGROUP_SYSCTL: 2941 return BPF_PROG_TYPE_CGROUP_SYSCTL; 2942 case BPF_CGROUP_GETSOCKOPT: 2943 case BPF_CGROUP_SETSOCKOPT: 2944 return BPF_PROG_TYPE_CGROUP_SOCKOPT; 2945 case BPF_TRACE_ITER: 2946 return BPF_PROG_TYPE_TRACING; 2947 case BPF_SK_LOOKUP: 2948 return BPF_PROG_TYPE_SK_LOOKUP; 2949 case BPF_XDP: 2950 return BPF_PROG_TYPE_XDP; 2951 default: 2952 return BPF_PROG_TYPE_UNSPEC; 2953 } 2954 } 2955 2956 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd 2957 2958 #define BPF_F_ATTACH_MASK \ 2959 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE) 2960 2961 static int bpf_prog_attach(const union bpf_attr *attr) 2962 { 2963 enum bpf_prog_type ptype; 2964 struct bpf_prog *prog; 2965 int ret; 2966 2967 if (CHECK_ATTR(BPF_PROG_ATTACH)) 2968 return -EINVAL; 2969 2970 if (attr->attach_flags & ~BPF_F_ATTACH_MASK) 2971 return -EINVAL; 2972 2973 ptype = attach_type_to_prog_type(attr->attach_type); 2974 if (ptype == BPF_PROG_TYPE_UNSPEC) 2975 return -EINVAL; 2976 2977 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 2978 if (IS_ERR(prog)) 2979 return PTR_ERR(prog); 2980 2981 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 2982 bpf_prog_put(prog); 2983 return -EINVAL; 2984 } 2985 2986 switch (ptype) { 2987 case BPF_PROG_TYPE_SK_SKB: 2988 case BPF_PROG_TYPE_SK_MSG: 2989 ret = sock_map_get_from_fd(attr, prog); 2990 break; 2991 case BPF_PROG_TYPE_LIRC_MODE2: 2992 ret = lirc_prog_attach(attr, prog); 2993 break; 2994 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2995 ret = netns_bpf_prog_attach(attr, prog); 2996 break; 2997 case BPF_PROG_TYPE_CGROUP_DEVICE: 2998 case BPF_PROG_TYPE_CGROUP_SKB: 2999 case BPF_PROG_TYPE_CGROUP_SOCK: 3000 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3001 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3002 case BPF_PROG_TYPE_CGROUP_SYSCTL: 3003 case BPF_PROG_TYPE_SOCK_OPS: 3004 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 3005 break; 3006 default: 3007 ret = -EINVAL; 3008 } 3009 3010 if (ret) 3011 bpf_prog_put(prog); 3012 return ret; 3013 } 3014 3015 #define BPF_PROG_DETACH_LAST_FIELD attach_type 3016 3017 static int bpf_prog_detach(const union bpf_attr *attr) 3018 { 3019 enum bpf_prog_type ptype; 3020 3021 if (CHECK_ATTR(BPF_PROG_DETACH)) 3022 return -EINVAL; 3023 3024 ptype = attach_type_to_prog_type(attr->attach_type); 3025 3026 switch (ptype) { 3027 case BPF_PROG_TYPE_SK_MSG: 3028 case BPF_PROG_TYPE_SK_SKB: 3029 return sock_map_prog_detach(attr, ptype); 3030 case BPF_PROG_TYPE_LIRC_MODE2: 3031 return lirc_prog_detach(attr); 3032 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3033 return netns_bpf_prog_detach(attr, ptype); 3034 case BPF_PROG_TYPE_CGROUP_DEVICE: 3035 case BPF_PROG_TYPE_CGROUP_SKB: 3036 case BPF_PROG_TYPE_CGROUP_SOCK: 3037 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3038 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3039 case BPF_PROG_TYPE_CGROUP_SYSCTL: 3040 case BPF_PROG_TYPE_SOCK_OPS: 3041 return cgroup_bpf_prog_detach(attr, ptype); 3042 default: 3043 return -EINVAL; 3044 } 3045 } 3046 3047 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt 3048 3049 static int bpf_prog_query(const union bpf_attr *attr, 3050 union bpf_attr __user *uattr) 3051 { 3052 if (!capable(CAP_NET_ADMIN)) 3053 return -EPERM; 3054 if (CHECK_ATTR(BPF_PROG_QUERY)) 3055 return -EINVAL; 3056 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 3057 return -EINVAL; 3058 3059 switch (attr->query.attach_type) { 3060 case BPF_CGROUP_INET_INGRESS: 3061 case BPF_CGROUP_INET_EGRESS: 3062 case BPF_CGROUP_INET_SOCK_CREATE: 3063 case BPF_CGROUP_INET_SOCK_RELEASE: 3064 case BPF_CGROUP_INET4_BIND: 3065 case BPF_CGROUP_INET6_BIND: 3066 case BPF_CGROUP_INET4_POST_BIND: 3067 case BPF_CGROUP_INET6_POST_BIND: 3068 case BPF_CGROUP_INET4_CONNECT: 3069 case BPF_CGROUP_INET6_CONNECT: 3070 case BPF_CGROUP_INET4_GETPEERNAME: 3071 case BPF_CGROUP_INET6_GETPEERNAME: 3072 case BPF_CGROUP_INET4_GETSOCKNAME: 3073 case BPF_CGROUP_INET6_GETSOCKNAME: 3074 case BPF_CGROUP_UDP4_SENDMSG: 3075 case BPF_CGROUP_UDP6_SENDMSG: 3076 case BPF_CGROUP_UDP4_RECVMSG: 3077 case BPF_CGROUP_UDP6_RECVMSG: 3078 case BPF_CGROUP_SOCK_OPS: 3079 case BPF_CGROUP_DEVICE: 3080 case BPF_CGROUP_SYSCTL: 3081 case BPF_CGROUP_GETSOCKOPT: 3082 case BPF_CGROUP_SETSOCKOPT: 3083 return cgroup_bpf_prog_query(attr, uattr); 3084 case BPF_LIRC_MODE2: 3085 return lirc_prog_query(attr, uattr); 3086 case BPF_FLOW_DISSECTOR: 3087 case BPF_SK_LOOKUP: 3088 return netns_bpf_prog_query(attr, uattr); 3089 default: 3090 return -EINVAL; 3091 } 3092 } 3093 3094 #define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu 3095 3096 static int bpf_prog_test_run(const union bpf_attr *attr, 3097 union bpf_attr __user *uattr) 3098 { 3099 struct bpf_prog *prog; 3100 int ret = -ENOTSUPP; 3101 3102 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 3103 return -EINVAL; 3104 3105 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 3106 (!attr->test.ctx_size_in && attr->test.ctx_in)) 3107 return -EINVAL; 3108 3109 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 3110 (!attr->test.ctx_size_out && attr->test.ctx_out)) 3111 return -EINVAL; 3112 3113 prog = bpf_prog_get(attr->test.prog_fd); 3114 if (IS_ERR(prog)) 3115 return PTR_ERR(prog); 3116 3117 if (prog->aux->ops->test_run) 3118 ret = prog->aux->ops->test_run(prog, attr, uattr); 3119 3120 bpf_prog_put(prog); 3121 return ret; 3122 } 3123 3124 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 3125 3126 static int bpf_obj_get_next_id(const union bpf_attr *attr, 3127 union bpf_attr __user *uattr, 3128 struct idr *idr, 3129 spinlock_t *lock) 3130 { 3131 u32 next_id = attr->start_id; 3132 int err = 0; 3133 3134 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 3135 return -EINVAL; 3136 3137 if (!capable(CAP_SYS_ADMIN)) 3138 return -EPERM; 3139 3140 next_id++; 3141 spin_lock_bh(lock); 3142 if (!idr_get_next(idr, &next_id)) 3143 err = -ENOENT; 3144 spin_unlock_bh(lock); 3145 3146 if (!err) 3147 err = put_user(next_id, &uattr->next_id); 3148 3149 return err; 3150 } 3151 3152 struct bpf_map *bpf_map_get_curr_or_next(u32 *id) 3153 { 3154 struct bpf_map *map; 3155 3156 spin_lock_bh(&map_idr_lock); 3157 again: 3158 map = idr_get_next(&map_idr, id); 3159 if (map) { 3160 map = __bpf_map_inc_not_zero(map, false); 3161 if (IS_ERR(map)) { 3162 (*id)++; 3163 goto again; 3164 } 3165 } 3166 spin_unlock_bh(&map_idr_lock); 3167 3168 return map; 3169 } 3170 3171 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) 3172 { 3173 struct bpf_prog *prog; 3174 3175 spin_lock_bh(&prog_idr_lock); 3176 again: 3177 prog = idr_get_next(&prog_idr, id); 3178 if (prog) { 3179 prog = bpf_prog_inc_not_zero(prog); 3180 if (IS_ERR(prog)) { 3181 (*id)++; 3182 goto again; 3183 } 3184 } 3185 spin_unlock_bh(&prog_idr_lock); 3186 3187 return prog; 3188 } 3189 3190 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 3191 3192 struct bpf_prog *bpf_prog_by_id(u32 id) 3193 { 3194 struct bpf_prog *prog; 3195 3196 if (!id) 3197 return ERR_PTR(-ENOENT); 3198 3199 spin_lock_bh(&prog_idr_lock); 3200 prog = idr_find(&prog_idr, id); 3201 if (prog) 3202 prog = bpf_prog_inc_not_zero(prog); 3203 else 3204 prog = ERR_PTR(-ENOENT); 3205 spin_unlock_bh(&prog_idr_lock); 3206 return prog; 3207 } 3208 3209 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 3210 { 3211 struct bpf_prog *prog; 3212 u32 id = attr->prog_id; 3213 int fd; 3214 3215 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 3216 return -EINVAL; 3217 3218 if (!capable(CAP_SYS_ADMIN)) 3219 return -EPERM; 3220 3221 prog = bpf_prog_by_id(id); 3222 if (IS_ERR(prog)) 3223 return PTR_ERR(prog); 3224 3225 fd = bpf_prog_new_fd(prog); 3226 if (fd < 0) 3227 bpf_prog_put(prog); 3228 3229 return fd; 3230 } 3231 3232 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 3233 3234 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 3235 { 3236 struct bpf_map *map; 3237 u32 id = attr->map_id; 3238 int f_flags; 3239 int fd; 3240 3241 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 3242 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 3243 return -EINVAL; 3244 3245 if (!capable(CAP_SYS_ADMIN)) 3246 return -EPERM; 3247 3248 f_flags = bpf_get_file_flag(attr->open_flags); 3249 if (f_flags < 0) 3250 return f_flags; 3251 3252 spin_lock_bh(&map_idr_lock); 3253 map = idr_find(&map_idr, id); 3254 if (map) 3255 map = __bpf_map_inc_not_zero(map, true); 3256 else 3257 map = ERR_PTR(-ENOENT); 3258 spin_unlock_bh(&map_idr_lock); 3259 3260 if (IS_ERR(map)) 3261 return PTR_ERR(map); 3262 3263 fd = bpf_map_new_fd(map, f_flags); 3264 if (fd < 0) 3265 bpf_map_put_with_uref(map); 3266 3267 return fd; 3268 } 3269 3270 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 3271 unsigned long addr, u32 *off, 3272 u32 *type) 3273 { 3274 const struct bpf_map *map; 3275 int i; 3276 3277 mutex_lock(&prog->aux->used_maps_mutex); 3278 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 3279 map = prog->aux->used_maps[i]; 3280 if (map == (void *)addr) { 3281 *type = BPF_PSEUDO_MAP_FD; 3282 goto out; 3283 } 3284 if (!map->ops->map_direct_value_meta) 3285 continue; 3286 if (!map->ops->map_direct_value_meta(map, addr, off)) { 3287 *type = BPF_PSEUDO_MAP_VALUE; 3288 goto out; 3289 } 3290 } 3291 map = NULL; 3292 3293 out: 3294 mutex_unlock(&prog->aux->used_maps_mutex); 3295 return map; 3296 } 3297 3298 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, 3299 const struct cred *f_cred) 3300 { 3301 const struct bpf_map *map; 3302 struct bpf_insn *insns; 3303 u32 off, type; 3304 u64 imm; 3305 u8 code; 3306 int i; 3307 3308 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 3309 GFP_USER); 3310 if (!insns) 3311 return insns; 3312 3313 for (i = 0; i < prog->len; i++) { 3314 code = insns[i].code; 3315 3316 if (code == (BPF_JMP | BPF_TAIL_CALL)) { 3317 insns[i].code = BPF_JMP | BPF_CALL; 3318 insns[i].imm = BPF_FUNC_tail_call; 3319 /* fall-through */ 3320 } 3321 if (code == (BPF_JMP | BPF_CALL) || 3322 code == (BPF_JMP | BPF_CALL_ARGS)) { 3323 if (code == (BPF_JMP | BPF_CALL_ARGS)) 3324 insns[i].code = BPF_JMP | BPF_CALL; 3325 if (!bpf_dump_raw_ok(f_cred)) 3326 insns[i].imm = 0; 3327 continue; 3328 } 3329 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { 3330 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; 3331 continue; 3332 } 3333 3334 if (code != (BPF_LD | BPF_IMM | BPF_DW)) 3335 continue; 3336 3337 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 3338 map = bpf_map_from_imm(prog, imm, &off, &type); 3339 if (map) { 3340 insns[i].src_reg = type; 3341 insns[i].imm = map->id; 3342 insns[i + 1].imm = off; 3343 continue; 3344 } 3345 } 3346 3347 return insns; 3348 } 3349 3350 static int set_info_rec_size(struct bpf_prog_info *info) 3351 { 3352 /* 3353 * Ensure info.*_rec_size is the same as kernel expected size 3354 * 3355 * or 3356 * 3357 * Only allow zero *_rec_size if both _rec_size and _cnt are 3358 * zero. In this case, the kernel will set the expected 3359 * _rec_size back to the info. 3360 */ 3361 3362 if ((info->nr_func_info || info->func_info_rec_size) && 3363 info->func_info_rec_size != sizeof(struct bpf_func_info)) 3364 return -EINVAL; 3365 3366 if ((info->nr_line_info || info->line_info_rec_size) && 3367 info->line_info_rec_size != sizeof(struct bpf_line_info)) 3368 return -EINVAL; 3369 3370 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 3371 info->jited_line_info_rec_size != sizeof(__u64)) 3372 return -EINVAL; 3373 3374 info->func_info_rec_size = sizeof(struct bpf_func_info); 3375 info->line_info_rec_size = sizeof(struct bpf_line_info); 3376 info->jited_line_info_rec_size = sizeof(__u64); 3377 3378 return 0; 3379 } 3380 3381 static int bpf_prog_get_info_by_fd(struct file *file, 3382 struct bpf_prog *prog, 3383 const union bpf_attr *attr, 3384 union bpf_attr __user *uattr) 3385 { 3386 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3387 struct bpf_prog_info info; 3388 u32 info_len = attr->info.info_len; 3389 struct bpf_prog_stats stats; 3390 char __user *uinsns; 3391 u32 ulen; 3392 int err; 3393 3394 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 3395 if (err) 3396 return err; 3397 info_len = min_t(u32, sizeof(info), info_len); 3398 3399 memset(&info, 0, sizeof(info)); 3400 if (copy_from_user(&info, uinfo, info_len)) 3401 return -EFAULT; 3402 3403 info.type = prog->type; 3404 info.id = prog->aux->id; 3405 info.load_time = prog->aux->load_time; 3406 info.created_by_uid = from_kuid_munged(current_user_ns(), 3407 prog->aux->user->uid); 3408 info.gpl_compatible = prog->gpl_compatible; 3409 3410 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 3411 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 3412 3413 mutex_lock(&prog->aux->used_maps_mutex); 3414 ulen = info.nr_map_ids; 3415 info.nr_map_ids = prog->aux->used_map_cnt; 3416 ulen = min_t(u32, info.nr_map_ids, ulen); 3417 if (ulen) { 3418 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 3419 u32 i; 3420 3421 for (i = 0; i < ulen; i++) 3422 if (put_user(prog->aux->used_maps[i]->id, 3423 &user_map_ids[i])) { 3424 mutex_unlock(&prog->aux->used_maps_mutex); 3425 return -EFAULT; 3426 } 3427 } 3428 mutex_unlock(&prog->aux->used_maps_mutex); 3429 3430 err = set_info_rec_size(&info); 3431 if (err) 3432 return err; 3433 3434 bpf_prog_get_stats(prog, &stats); 3435 info.run_time_ns = stats.nsecs; 3436 info.run_cnt = stats.cnt; 3437 3438 if (!bpf_capable()) { 3439 info.jited_prog_len = 0; 3440 info.xlated_prog_len = 0; 3441 info.nr_jited_ksyms = 0; 3442 info.nr_jited_func_lens = 0; 3443 info.nr_func_info = 0; 3444 info.nr_line_info = 0; 3445 info.nr_jited_line_info = 0; 3446 goto done; 3447 } 3448 3449 ulen = info.xlated_prog_len; 3450 info.xlated_prog_len = bpf_prog_insn_size(prog); 3451 if (info.xlated_prog_len && ulen) { 3452 struct bpf_insn *insns_sanitized; 3453 bool fault; 3454 3455 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { 3456 info.xlated_prog_insns = 0; 3457 goto done; 3458 } 3459 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); 3460 if (!insns_sanitized) 3461 return -ENOMEM; 3462 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 3463 ulen = min_t(u32, info.xlated_prog_len, ulen); 3464 fault = copy_to_user(uinsns, insns_sanitized, ulen); 3465 kfree(insns_sanitized); 3466 if (fault) 3467 return -EFAULT; 3468 } 3469 3470 if (bpf_prog_is_dev_bound(prog->aux)) { 3471 err = bpf_prog_offload_info_fill(&info, prog); 3472 if (err) 3473 return err; 3474 goto done; 3475 } 3476 3477 /* NOTE: the following code is supposed to be skipped for offload. 3478 * bpf_prog_offload_info_fill() is the place to fill similar fields 3479 * for offload. 3480 */ 3481 ulen = info.jited_prog_len; 3482 if (prog->aux->func_cnt) { 3483 u32 i; 3484 3485 info.jited_prog_len = 0; 3486 for (i = 0; i < prog->aux->func_cnt; i++) 3487 info.jited_prog_len += prog->aux->func[i]->jited_len; 3488 } else { 3489 info.jited_prog_len = prog->jited_len; 3490 } 3491 3492 if (info.jited_prog_len && ulen) { 3493 if (bpf_dump_raw_ok(file->f_cred)) { 3494 uinsns = u64_to_user_ptr(info.jited_prog_insns); 3495 ulen = min_t(u32, info.jited_prog_len, ulen); 3496 3497 /* for multi-function programs, copy the JITed 3498 * instructions for all the functions 3499 */ 3500 if (prog->aux->func_cnt) { 3501 u32 len, free, i; 3502 u8 *img; 3503 3504 free = ulen; 3505 for (i = 0; i < prog->aux->func_cnt; i++) { 3506 len = prog->aux->func[i]->jited_len; 3507 len = min_t(u32, len, free); 3508 img = (u8 *) prog->aux->func[i]->bpf_func; 3509 if (copy_to_user(uinsns, img, len)) 3510 return -EFAULT; 3511 uinsns += len; 3512 free -= len; 3513 if (!free) 3514 break; 3515 } 3516 } else { 3517 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 3518 return -EFAULT; 3519 } 3520 } else { 3521 info.jited_prog_insns = 0; 3522 } 3523 } 3524 3525 ulen = info.nr_jited_ksyms; 3526 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 3527 if (ulen) { 3528 if (bpf_dump_raw_ok(file->f_cred)) { 3529 unsigned long ksym_addr; 3530 u64 __user *user_ksyms; 3531 u32 i; 3532 3533 /* copy the address of the kernel symbol 3534 * corresponding to each function 3535 */ 3536 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 3537 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 3538 if (prog->aux->func_cnt) { 3539 for (i = 0; i < ulen; i++) { 3540 ksym_addr = (unsigned long) 3541 prog->aux->func[i]->bpf_func; 3542 if (put_user((u64) ksym_addr, 3543 &user_ksyms[i])) 3544 return -EFAULT; 3545 } 3546 } else { 3547 ksym_addr = (unsigned long) prog->bpf_func; 3548 if (put_user((u64) ksym_addr, &user_ksyms[0])) 3549 return -EFAULT; 3550 } 3551 } else { 3552 info.jited_ksyms = 0; 3553 } 3554 } 3555 3556 ulen = info.nr_jited_func_lens; 3557 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 3558 if (ulen) { 3559 if (bpf_dump_raw_ok(file->f_cred)) { 3560 u32 __user *user_lens; 3561 u32 func_len, i; 3562 3563 /* copy the JITed image lengths for each function */ 3564 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 3565 user_lens = u64_to_user_ptr(info.jited_func_lens); 3566 if (prog->aux->func_cnt) { 3567 for (i = 0; i < ulen; i++) { 3568 func_len = 3569 prog->aux->func[i]->jited_len; 3570 if (put_user(func_len, &user_lens[i])) 3571 return -EFAULT; 3572 } 3573 } else { 3574 func_len = prog->jited_len; 3575 if (put_user(func_len, &user_lens[0])) 3576 return -EFAULT; 3577 } 3578 } else { 3579 info.jited_func_lens = 0; 3580 } 3581 } 3582 3583 if (prog->aux->btf) 3584 info.btf_id = btf_obj_id(prog->aux->btf); 3585 3586 ulen = info.nr_func_info; 3587 info.nr_func_info = prog->aux->func_info_cnt; 3588 if (info.nr_func_info && ulen) { 3589 char __user *user_finfo; 3590 3591 user_finfo = u64_to_user_ptr(info.func_info); 3592 ulen = min_t(u32, info.nr_func_info, ulen); 3593 if (copy_to_user(user_finfo, prog->aux->func_info, 3594 info.func_info_rec_size * ulen)) 3595 return -EFAULT; 3596 } 3597 3598 ulen = info.nr_line_info; 3599 info.nr_line_info = prog->aux->nr_linfo; 3600 if (info.nr_line_info && ulen) { 3601 __u8 __user *user_linfo; 3602 3603 user_linfo = u64_to_user_ptr(info.line_info); 3604 ulen = min_t(u32, info.nr_line_info, ulen); 3605 if (copy_to_user(user_linfo, prog->aux->linfo, 3606 info.line_info_rec_size * ulen)) 3607 return -EFAULT; 3608 } 3609 3610 ulen = info.nr_jited_line_info; 3611 if (prog->aux->jited_linfo) 3612 info.nr_jited_line_info = prog->aux->nr_linfo; 3613 else 3614 info.nr_jited_line_info = 0; 3615 if (info.nr_jited_line_info && ulen) { 3616 if (bpf_dump_raw_ok(file->f_cred)) { 3617 __u64 __user *user_linfo; 3618 u32 i; 3619 3620 user_linfo = u64_to_user_ptr(info.jited_line_info); 3621 ulen = min_t(u32, info.nr_jited_line_info, ulen); 3622 for (i = 0; i < ulen; i++) { 3623 if (put_user((__u64)(long)prog->aux->jited_linfo[i], 3624 &user_linfo[i])) 3625 return -EFAULT; 3626 } 3627 } else { 3628 info.jited_line_info = 0; 3629 } 3630 } 3631 3632 ulen = info.nr_prog_tags; 3633 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 3634 if (ulen) { 3635 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 3636 u32 i; 3637 3638 user_prog_tags = u64_to_user_ptr(info.prog_tags); 3639 ulen = min_t(u32, info.nr_prog_tags, ulen); 3640 if (prog->aux->func_cnt) { 3641 for (i = 0; i < ulen; i++) { 3642 if (copy_to_user(user_prog_tags[i], 3643 prog->aux->func[i]->tag, 3644 BPF_TAG_SIZE)) 3645 return -EFAULT; 3646 } 3647 } else { 3648 if (copy_to_user(user_prog_tags[0], 3649 prog->tag, BPF_TAG_SIZE)) 3650 return -EFAULT; 3651 } 3652 } 3653 3654 done: 3655 if (copy_to_user(uinfo, &info, info_len) || 3656 put_user(info_len, &uattr->info.info_len)) 3657 return -EFAULT; 3658 3659 return 0; 3660 } 3661 3662 static int bpf_map_get_info_by_fd(struct file *file, 3663 struct bpf_map *map, 3664 const union bpf_attr *attr, 3665 union bpf_attr __user *uattr) 3666 { 3667 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3668 struct bpf_map_info info; 3669 u32 info_len = attr->info.info_len; 3670 int err; 3671 3672 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 3673 if (err) 3674 return err; 3675 info_len = min_t(u32, sizeof(info), info_len); 3676 3677 memset(&info, 0, sizeof(info)); 3678 info.type = map->map_type; 3679 info.id = map->id; 3680 info.key_size = map->key_size; 3681 info.value_size = map->value_size; 3682 info.max_entries = map->max_entries; 3683 info.map_flags = map->map_flags; 3684 memcpy(info.name, map->name, sizeof(map->name)); 3685 3686 if (map->btf) { 3687 info.btf_id = btf_obj_id(map->btf); 3688 info.btf_key_type_id = map->btf_key_type_id; 3689 info.btf_value_type_id = map->btf_value_type_id; 3690 } 3691 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 3692 3693 if (bpf_map_is_dev_bound(map)) { 3694 err = bpf_map_offload_info_fill(&info, map); 3695 if (err) 3696 return err; 3697 } 3698 3699 if (copy_to_user(uinfo, &info, info_len) || 3700 put_user(info_len, &uattr->info.info_len)) 3701 return -EFAULT; 3702 3703 return 0; 3704 } 3705 3706 static int bpf_btf_get_info_by_fd(struct file *file, 3707 struct btf *btf, 3708 const union bpf_attr *attr, 3709 union bpf_attr __user *uattr) 3710 { 3711 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3712 u32 info_len = attr->info.info_len; 3713 int err; 3714 3715 err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len); 3716 if (err) 3717 return err; 3718 3719 return btf_get_info_by_fd(btf, attr, uattr); 3720 } 3721 3722 static int bpf_link_get_info_by_fd(struct file *file, 3723 struct bpf_link *link, 3724 const union bpf_attr *attr, 3725 union bpf_attr __user *uattr) 3726 { 3727 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3728 struct bpf_link_info info; 3729 u32 info_len = attr->info.info_len; 3730 int err; 3731 3732 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 3733 if (err) 3734 return err; 3735 info_len = min_t(u32, sizeof(info), info_len); 3736 3737 memset(&info, 0, sizeof(info)); 3738 if (copy_from_user(&info, uinfo, info_len)) 3739 return -EFAULT; 3740 3741 info.type = link->type; 3742 info.id = link->id; 3743 info.prog_id = link->prog->aux->id; 3744 3745 if (link->ops->fill_link_info) { 3746 err = link->ops->fill_link_info(link, &info); 3747 if (err) 3748 return err; 3749 } 3750 3751 if (copy_to_user(uinfo, &info, info_len) || 3752 put_user(info_len, &uattr->info.info_len)) 3753 return -EFAULT; 3754 3755 return 0; 3756 } 3757 3758 3759 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 3760 3761 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 3762 union bpf_attr __user *uattr) 3763 { 3764 int ufd = attr->info.bpf_fd; 3765 struct fd f; 3766 int err; 3767 3768 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 3769 return -EINVAL; 3770 3771 f = fdget(ufd); 3772 if (!f.file) 3773 return -EBADFD; 3774 3775 if (f.file->f_op == &bpf_prog_fops) 3776 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, 3777 uattr); 3778 else if (f.file->f_op == &bpf_map_fops) 3779 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, 3780 uattr); 3781 else if (f.file->f_op == &btf_fops) 3782 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); 3783 else if (f.file->f_op == &bpf_link_fops) 3784 err = bpf_link_get_info_by_fd(f.file, f.file->private_data, 3785 attr, uattr); 3786 else 3787 err = -EINVAL; 3788 3789 fdput(f); 3790 return err; 3791 } 3792 3793 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level 3794 3795 static int bpf_btf_load(const union bpf_attr *attr) 3796 { 3797 if (CHECK_ATTR(BPF_BTF_LOAD)) 3798 return -EINVAL; 3799 3800 if (!bpf_capable()) 3801 return -EPERM; 3802 3803 return btf_new_fd(attr); 3804 } 3805 3806 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 3807 3808 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 3809 { 3810 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 3811 return -EINVAL; 3812 3813 if (!capable(CAP_SYS_ADMIN)) 3814 return -EPERM; 3815 3816 return btf_get_fd_by_id(attr->btf_id); 3817 } 3818 3819 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 3820 union bpf_attr __user *uattr, 3821 u32 prog_id, u32 fd_type, 3822 const char *buf, u64 probe_offset, 3823 u64 probe_addr) 3824 { 3825 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 3826 u32 len = buf ? strlen(buf) : 0, input_len; 3827 int err = 0; 3828 3829 if (put_user(len, &uattr->task_fd_query.buf_len)) 3830 return -EFAULT; 3831 input_len = attr->task_fd_query.buf_len; 3832 if (input_len && ubuf) { 3833 if (!len) { 3834 /* nothing to copy, just make ubuf NULL terminated */ 3835 char zero = '\0'; 3836 3837 if (put_user(zero, ubuf)) 3838 return -EFAULT; 3839 } else if (input_len >= len + 1) { 3840 /* ubuf can hold the string with NULL terminator */ 3841 if (copy_to_user(ubuf, buf, len + 1)) 3842 return -EFAULT; 3843 } else { 3844 /* ubuf cannot hold the string with NULL terminator, 3845 * do a partial copy with NULL terminator. 3846 */ 3847 char zero = '\0'; 3848 3849 err = -ENOSPC; 3850 if (copy_to_user(ubuf, buf, input_len - 1)) 3851 return -EFAULT; 3852 if (put_user(zero, ubuf + input_len - 1)) 3853 return -EFAULT; 3854 } 3855 } 3856 3857 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 3858 put_user(fd_type, &uattr->task_fd_query.fd_type) || 3859 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 3860 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 3861 return -EFAULT; 3862 3863 return err; 3864 } 3865 3866 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 3867 3868 static int bpf_task_fd_query(const union bpf_attr *attr, 3869 union bpf_attr __user *uattr) 3870 { 3871 pid_t pid = attr->task_fd_query.pid; 3872 u32 fd = attr->task_fd_query.fd; 3873 const struct perf_event *event; 3874 struct files_struct *files; 3875 struct task_struct *task; 3876 struct file *file; 3877 int err; 3878 3879 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 3880 return -EINVAL; 3881 3882 if (!capable(CAP_SYS_ADMIN)) 3883 return -EPERM; 3884 3885 if (attr->task_fd_query.flags != 0) 3886 return -EINVAL; 3887 3888 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 3889 if (!task) 3890 return -ENOENT; 3891 3892 files = get_files_struct(task); 3893 put_task_struct(task); 3894 if (!files) 3895 return -ENOENT; 3896 3897 err = 0; 3898 spin_lock(&files->file_lock); 3899 file = fcheck_files(files, fd); 3900 if (!file) 3901 err = -EBADF; 3902 else 3903 get_file(file); 3904 spin_unlock(&files->file_lock); 3905 put_files_struct(files); 3906 3907 if (err) 3908 goto out; 3909 3910 if (file->f_op == &bpf_link_fops) { 3911 struct bpf_link *link = file->private_data; 3912 3913 if (link->ops == &bpf_raw_tp_link_lops) { 3914 struct bpf_raw_tp_link *raw_tp = 3915 container_of(link, struct bpf_raw_tp_link, link); 3916 struct bpf_raw_event_map *btp = raw_tp->btp; 3917 3918 err = bpf_task_fd_query_copy(attr, uattr, 3919 raw_tp->link.prog->aux->id, 3920 BPF_FD_TYPE_RAW_TRACEPOINT, 3921 btp->tp->name, 0, 0); 3922 goto put_file; 3923 } 3924 goto out_not_supp; 3925 } 3926 3927 event = perf_get_event(file); 3928 if (!IS_ERR(event)) { 3929 u64 probe_offset, probe_addr; 3930 u32 prog_id, fd_type; 3931 const char *buf; 3932 3933 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 3934 &buf, &probe_offset, 3935 &probe_addr); 3936 if (!err) 3937 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 3938 fd_type, buf, 3939 probe_offset, 3940 probe_addr); 3941 goto put_file; 3942 } 3943 3944 out_not_supp: 3945 err = -ENOTSUPP; 3946 put_file: 3947 fput(file); 3948 out: 3949 return err; 3950 } 3951 3952 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 3953 3954 #define BPF_DO_BATCH(fn) \ 3955 do { \ 3956 if (!fn) { \ 3957 err = -ENOTSUPP; \ 3958 goto err_put; \ 3959 } \ 3960 err = fn(map, attr, uattr); \ 3961 } while (0) 3962 3963 static int bpf_map_do_batch(const union bpf_attr *attr, 3964 union bpf_attr __user *uattr, 3965 int cmd) 3966 { 3967 struct bpf_map *map; 3968 int err, ufd; 3969 struct fd f; 3970 3971 if (CHECK_ATTR(BPF_MAP_BATCH)) 3972 return -EINVAL; 3973 3974 ufd = attr->batch.map_fd; 3975 f = fdget(ufd); 3976 map = __bpf_map_get(f); 3977 if (IS_ERR(map)) 3978 return PTR_ERR(map); 3979 3980 if ((cmd == BPF_MAP_LOOKUP_BATCH || 3981 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) && 3982 !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 3983 err = -EPERM; 3984 goto err_put; 3985 } 3986 3987 if (cmd != BPF_MAP_LOOKUP_BATCH && 3988 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 3989 err = -EPERM; 3990 goto err_put; 3991 } 3992 3993 if (cmd == BPF_MAP_LOOKUP_BATCH) 3994 BPF_DO_BATCH(map->ops->map_lookup_batch); 3995 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 3996 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch); 3997 else if (cmd == BPF_MAP_UPDATE_BATCH) 3998 BPF_DO_BATCH(map->ops->map_update_batch); 3999 else 4000 BPF_DO_BATCH(map->ops->map_delete_batch); 4001 4002 err_put: 4003 fdput(f); 4004 return err; 4005 } 4006 4007 static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 4008 { 4009 if (attr->link_create.attach_type != prog->expected_attach_type) 4010 return -EINVAL; 4011 4012 if (prog->expected_attach_type == BPF_TRACE_ITER) 4013 return bpf_iter_link_attach(attr, prog); 4014 else if (prog->type == BPF_PROG_TYPE_EXT) 4015 return bpf_tracing_prog_attach(prog, 4016 attr->link_create.target_fd, 4017 attr->link_create.target_btf_id); 4018 return -EINVAL; 4019 } 4020 4021 #define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len 4022 static int link_create(union bpf_attr *attr) 4023 { 4024 enum bpf_prog_type ptype; 4025 struct bpf_prog *prog; 4026 int ret; 4027 4028 if (CHECK_ATTR(BPF_LINK_CREATE)) 4029 return -EINVAL; 4030 4031 prog = bpf_prog_get(attr->link_create.prog_fd); 4032 if (IS_ERR(prog)) 4033 return PTR_ERR(prog); 4034 4035 ret = bpf_prog_attach_check_attach_type(prog, 4036 attr->link_create.attach_type); 4037 if (ret) 4038 goto out; 4039 4040 if (prog->type == BPF_PROG_TYPE_EXT) { 4041 ret = tracing_bpf_link_attach(attr, prog); 4042 goto out; 4043 } 4044 4045 ptype = attach_type_to_prog_type(attr->link_create.attach_type); 4046 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) { 4047 ret = -EINVAL; 4048 goto out; 4049 } 4050 4051 switch (ptype) { 4052 case BPF_PROG_TYPE_CGROUP_SKB: 4053 case BPF_PROG_TYPE_CGROUP_SOCK: 4054 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4055 case BPF_PROG_TYPE_SOCK_OPS: 4056 case BPF_PROG_TYPE_CGROUP_DEVICE: 4057 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4058 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4059 ret = cgroup_bpf_link_attach(attr, prog); 4060 break; 4061 case BPF_PROG_TYPE_TRACING: 4062 ret = tracing_bpf_link_attach(attr, prog); 4063 break; 4064 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4065 case BPF_PROG_TYPE_SK_LOOKUP: 4066 ret = netns_bpf_link_create(attr, prog); 4067 break; 4068 #ifdef CONFIG_NET 4069 case BPF_PROG_TYPE_XDP: 4070 ret = bpf_xdp_link_attach(attr, prog); 4071 break; 4072 #endif 4073 default: 4074 ret = -EINVAL; 4075 } 4076 4077 out: 4078 if (ret < 0) 4079 bpf_prog_put(prog); 4080 return ret; 4081 } 4082 4083 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd 4084 4085 static int link_update(union bpf_attr *attr) 4086 { 4087 struct bpf_prog *old_prog = NULL, *new_prog; 4088 struct bpf_link *link; 4089 u32 flags; 4090 int ret; 4091 4092 if (CHECK_ATTR(BPF_LINK_UPDATE)) 4093 return -EINVAL; 4094 4095 flags = attr->link_update.flags; 4096 if (flags & ~BPF_F_REPLACE) 4097 return -EINVAL; 4098 4099 link = bpf_link_get_from_fd(attr->link_update.link_fd); 4100 if (IS_ERR(link)) 4101 return PTR_ERR(link); 4102 4103 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); 4104 if (IS_ERR(new_prog)) { 4105 ret = PTR_ERR(new_prog); 4106 goto out_put_link; 4107 } 4108 4109 if (flags & BPF_F_REPLACE) { 4110 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); 4111 if (IS_ERR(old_prog)) { 4112 ret = PTR_ERR(old_prog); 4113 old_prog = NULL; 4114 goto out_put_progs; 4115 } 4116 } else if (attr->link_update.old_prog_fd) { 4117 ret = -EINVAL; 4118 goto out_put_progs; 4119 } 4120 4121 if (link->ops->update_prog) 4122 ret = link->ops->update_prog(link, new_prog, old_prog); 4123 else 4124 ret = -EINVAL; 4125 4126 out_put_progs: 4127 if (old_prog) 4128 bpf_prog_put(old_prog); 4129 if (ret) 4130 bpf_prog_put(new_prog); 4131 out_put_link: 4132 bpf_link_put(link); 4133 return ret; 4134 } 4135 4136 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd 4137 4138 static int link_detach(union bpf_attr *attr) 4139 { 4140 struct bpf_link *link; 4141 int ret; 4142 4143 if (CHECK_ATTR(BPF_LINK_DETACH)) 4144 return -EINVAL; 4145 4146 link = bpf_link_get_from_fd(attr->link_detach.link_fd); 4147 if (IS_ERR(link)) 4148 return PTR_ERR(link); 4149 4150 if (link->ops->detach) 4151 ret = link->ops->detach(link); 4152 else 4153 ret = -EOPNOTSUPP; 4154 4155 bpf_link_put(link); 4156 return ret; 4157 } 4158 4159 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 4160 { 4161 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); 4162 } 4163 4164 struct bpf_link *bpf_link_by_id(u32 id) 4165 { 4166 struct bpf_link *link; 4167 4168 if (!id) 4169 return ERR_PTR(-ENOENT); 4170 4171 spin_lock_bh(&link_idr_lock); 4172 /* before link is "settled", ID is 0, pretend it doesn't exist yet */ 4173 link = idr_find(&link_idr, id); 4174 if (link) { 4175 if (link->id) 4176 link = bpf_link_inc_not_zero(link); 4177 else 4178 link = ERR_PTR(-EAGAIN); 4179 } else { 4180 link = ERR_PTR(-ENOENT); 4181 } 4182 spin_unlock_bh(&link_idr_lock); 4183 return link; 4184 } 4185 4186 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id 4187 4188 static int bpf_link_get_fd_by_id(const union bpf_attr *attr) 4189 { 4190 struct bpf_link *link; 4191 u32 id = attr->link_id; 4192 int fd; 4193 4194 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) 4195 return -EINVAL; 4196 4197 if (!capable(CAP_SYS_ADMIN)) 4198 return -EPERM; 4199 4200 link = bpf_link_by_id(id); 4201 if (IS_ERR(link)) 4202 return PTR_ERR(link); 4203 4204 fd = bpf_link_new_fd(link); 4205 if (fd < 0) 4206 bpf_link_put(link); 4207 4208 return fd; 4209 } 4210 4211 DEFINE_MUTEX(bpf_stats_enabled_mutex); 4212 4213 static int bpf_stats_release(struct inode *inode, struct file *file) 4214 { 4215 mutex_lock(&bpf_stats_enabled_mutex); 4216 static_key_slow_dec(&bpf_stats_enabled_key.key); 4217 mutex_unlock(&bpf_stats_enabled_mutex); 4218 return 0; 4219 } 4220 4221 static const struct file_operations bpf_stats_fops = { 4222 .release = bpf_stats_release, 4223 }; 4224 4225 static int bpf_enable_runtime_stats(void) 4226 { 4227 int fd; 4228 4229 mutex_lock(&bpf_stats_enabled_mutex); 4230 4231 /* Set a very high limit to avoid overflow */ 4232 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { 4233 mutex_unlock(&bpf_stats_enabled_mutex); 4234 return -EBUSY; 4235 } 4236 4237 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); 4238 if (fd >= 0) 4239 static_key_slow_inc(&bpf_stats_enabled_key.key); 4240 4241 mutex_unlock(&bpf_stats_enabled_mutex); 4242 return fd; 4243 } 4244 4245 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type 4246 4247 static int bpf_enable_stats(union bpf_attr *attr) 4248 { 4249 4250 if (CHECK_ATTR(BPF_ENABLE_STATS)) 4251 return -EINVAL; 4252 4253 if (!capable(CAP_SYS_ADMIN)) 4254 return -EPERM; 4255 4256 switch (attr->enable_stats.type) { 4257 case BPF_STATS_RUN_TIME: 4258 return bpf_enable_runtime_stats(); 4259 default: 4260 break; 4261 } 4262 return -EINVAL; 4263 } 4264 4265 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags 4266 4267 static int bpf_iter_create(union bpf_attr *attr) 4268 { 4269 struct bpf_link *link; 4270 int err; 4271 4272 if (CHECK_ATTR(BPF_ITER_CREATE)) 4273 return -EINVAL; 4274 4275 if (attr->iter_create.flags) 4276 return -EINVAL; 4277 4278 link = bpf_link_get_from_fd(attr->iter_create.link_fd); 4279 if (IS_ERR(link)) 4280 return PTR_ERR(link); 4281 4282 err = bpf_iter_new_fd(link); 4283 bpf_link_put(link); 4284 4285 return err; 4286 } 4287 4288 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags 4289 4290 static int bpf_prog_bind_map(union bpf_attr *attr) 4291 { 4292 struct bpf_prog *prog; 4293 struct bpf_map *map; 4294 struct bpf_map **used_maps_old, **used_maps_new; 4295 int i, ret = 0; 4296 4297 if (CHECK_ATTR(BPF_PROG_BIND_MAP)) 4298 return -EINVAL; 4299 4300 if (attr->prog_bind_map.flags) 4301 return -EINVAL; 4302 4303 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); 4304 if (IS_ERR(prog)) 4305 return PTR_ERR(prog); 4306 4307 map = bpf_map_get(attr->prog_bind_map.map_fd); 4308 if (IS_ERR(map)) { 4309 ret = PTR_ERR(map); 4310 goto out_prog_put; 4311 } 4312 4313 mutex_lock(&prog->aux->used_maps_mutex); 4314 4315 used_maps_old = prog->aux->used_maps; 4316 4317 for (i = 0; i < prog->aux->used_map_cnt; i++) 4318 if (used_maps_old[i] == map) { 4319 bpf_map_put(map); 4320 goto out_unlock; 4321 } 4322 4323 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, 4324 sizeof(used_maps_new[0]), 4325 GFP_KERNEL); 4326 if (!used_maps_new) { 4327 ret = -ENOMEM; 4328 goto out_unlock; 4329 } 4330 4331 memcpy(used_maps_new, used_maps_old, 4332 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); 4333 used_maps_new[prog->aux->used_map_cnt] = map; 4334 4335 prog->aux->used_map_cnt++; 4336 prog->aux->used_maps = used_maps_new; 4337 4338 kfree(used_maps_old); 4339 4340 out_unlock: 4341 mutex_unlock(&prog->aux->used_maps_mutex); 4342 4343 if (ret) 4344 bpf_map_put(map); 4345 out_prog_put: 4346 bpf_prog_put(prog); 4347 return ret; 4348 } 4349 4350 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 4351 { 4352 union bpf_attr attr; 4353 int err; 4354 4355 if (sysctl_unprivileged_bpf_disabled && !bpf_capable()) 4356 return -EPERM; 4357 4358 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 4359 if (err) 4360 return err; 4361 size = min_t(u32, size, sizeof(attr)); 4362 4363 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 4364 memset(&attr, 0, sizeof(attr)); 4365 if (copy_from_user(&attr, uattr, size) != 0) 4366 return -EFAULT; 4367 4368 err = security_bpf(cmd, &attr, size); 4369 if (err < 0) 4370 return err; 4371 4372 switch (cmd) { 4373 case BPF_MAP_CREATE: 4374 err = map_create(&attr); 4375 break; 4376 case BPF_MAP_LOOKUP_ELEM: 4377 err = map_lookup_elem(&attr); 4378 break; 4379 case BPF_MAP_UPDATE_ELEM: 4380 err = map_update_elem(&attr); 4381 break; 4382 case BPF_MAP_DELETE_ELEM: 4383 err = map_delete_elem(&attr); 4384 break; 4385 case BPF_MAP_GET_NEXT_KEY: 4386 err = map_get_next_key(&attr); 4387 break; 4388 case BPF_MAP_FREEZE: 4389 err = map_freeze(&attr); 4390 break; 4391 case BPF_PROG_LOAD: 4392 err = bpf_prog_load(&attr, uattr); 4393 break; 4394 case BPF_OBJ_PIN: 4395 err = bpf_obj_pin(&attr); 4396 break; 4397 case BPF_OBJ_GET: 4398 err = bpf_obj_get(&attr); 4399 break; 4400 case BPF_PROG_ATTACH: 4401 err = bpf_prog_attach(&attr); 4402 break; 4403 case BPF_PROG_DETACH: 4404 err = bpf_prog_detach(&attr); 4405 break; 4406 case BPF_PROG_QUERY: 4407 err = bpf_prog_query(&attr, uattr); 4408 break; 4409 case BPF_PROG_TEST_RUN: 4410 err = bpf_prog_test_run(&attr, uattr); 4411 break; 4412 case BPF_PROG_GET_NEXT_ID: 4413 err = bpf_obj_get_next_id(&attr, uattr, 4414 &prog_idr, &prog_idr_lock); 4415 break; 4416 case BPF_MAP_GET_NEXT_ID: 4417 err = bpf_obj_get_next_id(&attr, uattr, 4418 &map_idr, &map_idr_lock); 4419 break; 4420 case BPF_BTF_GET_NEXT_ID: 4421 err = bpf_obj_get_next_id(&attr, uattr, 4422 &btf_idr, &btf_idr_lock); 4423 break; 4424 case BPF_PROG_GET_FD_BY_ID: 4425 err = bpf_prog_get_fd_by_id(&attr); 4426 break; 4427 case BPF_MAP_GET_FD_BY_ID: 4428 err = bpf_map_get_fd_by_id(&attr); 4429 break; 4430 case BPF_OBJ_GET_INFO_BY_FD: 4431 err = bpf_obj_get_info_by_fd(&attr, uattr); 4432 break; 4433 case BPF_RAW_TRACEPOINT_OPEN: 4434 err = bpf_raw_tracepoint_open(&attr); 4435 break; 4436 case BPF_BTF_LOAD: 4437 err = bpf_btf_load(&attr); 4438 break; 4439 case BPF_BTF_GET_FD_BY_ID: 4440 err = bpf_btf_get_fd_by_id(&attr); 4441 break; 4442 case BPF_TASK_FD_QUERY: 4443 err = bpf_task_fd_query(&attr, uattr); 4444 break; 4445 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 4446 err = map_lookup_and_delete_elem(&attr); 4447 break; 4448 case BPF_MAP_LOOKUP_BATCH: 4449 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH); 4450 break; 4451 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 4452 err = bpf_map_do_batch(&attr, uattr, 4453 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 4454 break; 4455 case BPF_MAP_UPDATE_BATCH: 4456 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH); 4457 break; 4458 case BPF_MAP_DELETE_BATCH: 4459 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH); 4460 break; 4461 case BPF_LINK_CREATE: 4462 err = link_create(&attr); 4463 break; 4464 case BPF_LINK_UPDATE: 4465 err = link_update(&attr); 4466 break; 4467 case BPF_LINK_GET_FD_BY_ID: 4468 err = bpf_link_get_fd_by_id(&attr); 4469 break; 4470 case BPF_LINK_GET_NEXT_ID: 4471 err = bpf_obj_get_next_id(&attr, uattr, 4472 &link_idr, &link_idr_lock); 4473 break; 4474 case BPF_ENABLE_STATS: 4475 err = bpf_enable_stats(&attr); 4476 break; 4477 case BPF_ITER_CREATE: 4478 err = bpf_iter_create(&attr); 4479 break; 4480 case BPF_LINK_DETACH: 4481 err = link_detach(&attr); 4482 break; 4483 case BPF_PROG_BIND_MAP: 4484 err = bpf_prog_bind_map(&attr); 4485 break; 4486 default: 4487 err = -EINVAL; 4488 break; 4489 } 4490 4491 return err; 4492 } 4493