1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/btf.h> 6 #include <linux/bpf-cgroup.h> 7 #include <linux/cgroup.h> 8 #include <linux/rcupdate.h> 9 #include <linux/random.h> 10 #include <linux/smp.h> 11 #include <linux/topology.h> 12 #include <linux/ktime.h> 13 #include <linux/sched.h> 14 #include <linux/uidgid.h> 15 #include <linux/filter.h> 16 #include <linux/ctype.h> 17 #include <linux/jiffies.h> 18 #include <linux/pid_namespace.h> 19 #include <linux/poison.h> 20 #include <linux/proc_ns.h> 21 #include <linux/sched/task.h> 22 #include <linux/security.h> 23 #include <linux/btf_ids.h> 24 #include <linux/bpf_mem_alloc.h> 25 #include <linux/kasan.h> 26 #include <linux/bpf_verifier.h> 27 28 #include "../../lib/kstrtox.h" 29 30 /* If kernel subsystem is allowing eBPF programs to call this function, 31 * inside its own verifier_ops->get_func_proto() callback it should return 32 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments 33 * 34 * Different map implementations will rely on rcu in map methods 35 * lookup/update/delete, therefore eBPF programs must run under rcu lock 36 * if program is allowed to access maps, so check rcu_read_lock_held() or 37 * rcu_read_lock_trace_held() in all three functions. 38 */ 39 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) 40 { 41 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 42 !rcu_read_lock_bh_held()); 43 return (unsigned long) map->ops->map_lookup_elem(map, key); 44 } 45 46 const struct bpf_func_proto bpf_map_lookup_elem_proto = { 47 .func = bpf_map_lookup_elem, 48 .gpl_only = false, 49 .pkt_access = true, 50 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 51 .arg1_type = ARG_CONST_MAP_PTR, 52 .arg2_type = ARG_PTR_TO_MAP_KEY, 53 }; 54 55 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, 56 void *, value, u64, flags) 57 { 58 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 59 !rcu_read_lock_bh_held()); 60 return map->ops->map_update_elem(map, key, value, flags); 61 } 62 63 const struct bpf_func_proto bpf_map_update_elem_proto = { 64 .func = bpf_map_update_elem, 65 .gpl_only = false, 66 .pkt_access = true, 67 .ret_type = RET_INTEGER, 68 .arg1_type = ARG_CONST_MAP_PTR, 69 .arg2_type = ARG_PTR_TO_MAP_KEY, 70 .arg3_type = ARG_PTR_TO_MAP_VALUE, 71 .arg4_type = ARG_ANYTHING, 72 }; 73 74 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) 75 { 76 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 77 !rcu_read_lock_bh_held()); 78 return map->ops->map_delete_elem(map, key); 79 } 80 81 const struct bpf_func_proto bpf_map_delete_elem_proto = { 82 .func = bpf_map_delete_elem, 83 .gpl_only = false, 84 .pkt_access = true, 85 .ret_type = RET_INTEGER, 86 .arg1_type = ARG_CONST_MAP_PTR, 87 .arg2_type = ARG_PTR_TO_MAP_KEY, 88 }; 89 90 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) 91 { 92 return map->ops->map_push_elem(map, value, flags); 93 } 94 95 const struct bpf_func_proto bpf_map_push_elem_proto = { 96 .func = bpf_map_push_elem, 97 .gpl_only = false, 98 .pkt_access = true, 99 .ret_type = RET_INTEGER, 100 .arg1_type = ARG_CONST_MAP_PTR, 101 .arg2_type = ARG_PTR_TO_MAP_VALUE, 102 .arg3_type = ARG_ANYTHING, 103 }; 104 105 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) 106 { 107 return map->ops->map_pop_elem(map, value); 108 } 109 110 const struct bpf_func_proto bpf_map_pop_elem_proto = { 111 .func = bpf_map_pop_elem, 112 .gpl_only = false, 113 .ret_type = RET_INTEGER, 114 .arg1_type = ARG_CONST_MAP_PTR, 115 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE, 116 }; 117 118 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) 119 { 120 return map->ops->map_peek_elem(map, value); 121 } 122 123 const struct bpf_func_proto bpf_map_peek_elem_proto = { 124 .func = bpf_map_peek_elem, 125 .gpl_only = false, 126 .ret_type = RET_INTEGER, 127 .arg1_type = ARG_CONST_MAP_PTR, 128 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE, 129 }; 130 131 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) 132 { 133 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 134 !rcu_read_lock_bh_held()); 135 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); 136 } 137 138 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = { 139 .func = bpf_map_lookup_percpu_elem, 140 .gpl_only = false, 141 .pkt_access = true, 142 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 143 .arg1_type = ARG_CONST_MAP_PTR, 144 .arg2_type = ARG_PTR_TO_MAP_KEY, 145 .arg3_type = ARG_ANYTHING, 146 }; 147 148 const struct bpf_func_proto bpf_get_prandom_u32_proto = { 149 .func = bpf_user_rnd_u32, 150 .gpl_only = false, 151 .ret_type = RET_INTEGER, 152 }; 153 154 BPF_CALL_0(bpf_get_smp_processor_id) 155 { 156 return smp_processor_id(); 157 } 158 159 const struct bpf_func_proto bpf_get_smp_processor_id_proto = { 160 .func = bpf_get_smp_processor_id, 161 .gpl_only = false, 162 .ret_type = RET_INTEGER, 163 .allow_fastcall = true, 164 }; 165 166 BPF_CALL_0(bpf_get_numa_node_id) 167 { 168 return numa_node_id(); 169 } 170 171 const struct bpf_func_proto bpf_get_numa_node_id_proto = { 172 .func = bpf_get_numa_node_id, 173 .gpl_only = false, 174 .ret_type = RET_INTEGER, 175 }; 176 177 BPF_CALL_0(bpf_ktime_get_ns) 178 { 179 /* NMI safe access to clock monotonic */ 180 return ktime_get_mono_fast_ns(); 181 } 182 183 const struct bpf_func_proto bpf_ktime_get_ns_proto = { 184 .func = bpf_ktime_get_ns, 185 .gpl_only = false, 186 .ret_type = RET_INTEGER, 187 }; 188 189 BPF_CALL_0(bpf_ktime_get_boot_ns) 190 { 191 /* NMI safe access to clock boottime */ 192 return ktime_get_boot_fast_ns(); 193 } 194 195 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = { 196 .func = bpf_ktime_get_boot_ns, 197 .gpl_only = false, 198 .ret_type = RET_INTEGER, 199 }; 200 201 BPF_CALL_0(bpf_ktime_get_coarse_ns) 202 { 203 return ktime_get_coarse_ns(); 204 } 205 206 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = { 207 .func = bpf_ktime_get_coarse_ns, 208 .gpl_only = false, 209 .ret_type = RET_INTEGER, 210 }; 211 212 BPF_CALL_0(bpf_ktime_get_tai_ns) 213 { 214 /* NMI safe access to clock tai */ 215 return ktime_get_tai_fast_ns(); 216 } 217 218 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = { 219 .func = bpf_ktime_get_tai_ns, 220 .gpl_only = false, 221 .ret_type = RET_INTEGER, 222 }; 223 224 BPF_CALL_0(bpf_get_current_pid_tgid) 225 { 226 struct task_struct *task = current; 227 228 if (unlikely(!task)) 229 return -EINVAL; 230 231 return (u64) task->tgid << 32 | task->pid; 232 } 233 234 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { 235 .func = bpf_get_current_pid_tgid, 236 .gpl_only = false, 237 .ret_type = RET_INTEGER, 238 }; 239 240 BPF_CALL_0(bpf_get_current_uid_gid) 241 { 242 struct task_struct *task = current; 243 kuid_t uid; 244 kgid_t gid; 245 246 if (unlikely(!task)) 247 return -EINVAL; 248 249 current_uid_gid(&uid, &gid); 250 return (u64) from_kgid(&init_user_ns, gid) << 32 | 251 from_kuid(&init_user_ns, uid); 252 } 253 254 const struct bpf_func_proto bpf_get_current_uid_gid_proto = { 255 .func = bpf_get_current_uid_gid, 256 .gpl_only = false, 257 .ret_type = RET_INTEGER, 258 }; 259 260 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) 261 { 262 struct task_struct *task = current; 263 264 if (unlikely(!task)) 265 goto err_clear; 266 267 /* Verifier guarantees that size > 0 */ 268 strscpy_pad(buf, task->comm, size); 269 return 0; 270 err_clear: 271 memset(buf, 0, size); 272 return -EINVAL; 273 } 274 275 const struct bpf_func_proto bpf_get_current_comm_proto = { 276 .func = bpf_get_current_comm, 277 .gpl_only = false, 278 .ret_type = RET_INTEGER, 279 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 280 .arg2_type = ARG_CONST_SIZE, 281 }; 282 283 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) 284 285 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 286 { 287 arch_spinlock_t *l = (void *)lock; 288 union { 289 __u32 val; 290 arch_spinlock_t lock; 291 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; 292 293 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); 294 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); 295 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); 296 preempt_disable(); 297 arch_spin_lock(l); 298 } 299 300 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 301 { 302 arch_spinlock_t *l = (void *)lock; 303 304 arch_spin_unlock(l); 305 preempt_enable(); 306 } 307 308 #else 309 310 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 311 { 312 atomic_t *l = (void *)lock; 313 314 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); 315 do { 316 atomic_cond_read_relaxed(l, !VAL); 317 } while (atomic_xchg(l, 1)); 318 } 319 320 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 321 { 322 atomic_t *l = (void *)lock; 323 324 atomic_set_release(l, 0); 325 } 326 327 #endif 328 329 static DEFINE_PER_CPU(unsigned long, irqsave_flags); 330 331 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock) 332 { 333 unsigned long flags; 334 335 local_irq_save(flags); 336 __bpf_spin_lock(lock); 337 __this_cpu_write(irqsave_flags, flags); 338 } 339 340 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) 341 { 342 __bpf_spin_lock_irqsave(lock); 343 return 0; 344 } 345 346 const struct bpf_func_proto bpf_spin_lock_proto = { 347 .func = bpf_spin_lock, 348 .gpl_only = false, 349 .ret_type = RET_VOID, 350 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 351 .arg1_btf_id = BPF_PTR_POISON, 352 }; 353 354 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock) 355 { 356 unsigned long flags; 357 358 flags = __this_cpu_read(irqsave_flags); 359 __bpf_spin_unlock(lock); 360 local_irq_restore(flags); 361 } 362 363 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) 364 { 365 __bpf_spin_unlock_irqrestore(lock); 366 return 0; 367 } 368 369 const struct bpf_func_proto bpf_spin_unlock_proto = { 370 .func = bpf_spin_unlock, 371 .gpl_only = false, 372 .ret_type = RET_VOID, 373 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 374 .arg1_btf_id = BPF_PTR_POISON, 375 }; 376 377 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 378 bool lock_src) 379 { 380 struct bpf_spin_lock *lock; 381 382 if (lock_src) 383 lock = src + map->record->spin_lock_off; 384 else 385 lock = dst + map->record->spin_lock_off; 386 preempt_disable(); 387 __bpf_spin_lock_irqsave(lock); 388 copy_map_value(map, dst, src); 389 __bpf_spin_unlock_irqrestore(lock); 390 preempt_enable(); 391 } 392 393 BPF_CALL_0(bpf_jiffies64) 394 { 395 return get_jiffies_64(); 396 } 397 398 const struct bpf_func_proto bpf_jiffies64_proto = { 399 .func = bpf_jiffies64, 400 .gpl_only = false, 401 .ret_type = RET_INTEGER, 402 }; 403 404 #ifdef CONFIG_CGROUPS 405 BPF_CALL_0(bpf_get_current_cgroup_id) 406 { 407 struct cgroup *cgrp; 408 u64 cgrp_id; 409 410 rcu_read_lock(); 411 cgrp = task_dfl_cgroup(current); 412 cgrp_id = cgroup_id(cgrp); 413 rcu_read_unlock(); 414 415 return cgrp_id; 416 } 417 418 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { 419 .func = bpf_get_current_cgroup_id, 420 .gpl_only = false, 421 .ret_type = RET_INTEGER, 422 }; 423 424 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) 425 { 426 struct cgroup *cgrp; 427 struct cgroup *ancestor; 428 u64 cgrp_id; 429 430 rcu_read_lock(); 431 cgrp = task_dfl_cgroup(current); 432 ancestor = cgroup_ancestor(cgrp, ancestor_level); 433 cgrp_id = ancestor ? cgroup_id(ancestor) : 0; 434 rcu_read_unlock(); 435 436 return cgrp_id; 437 } 438 439 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = { 440 .func = bpf_get_current_ancestor_cgroup_id, 441 .gpl_only = false, 442 .ret_type = RET_INTEGER, 443 .arg1_type = ARG_ANYTHING, 444 }; 445 #endif /* CONFIG_CGROUPS */ 446 447 #define BPF_STRTOX_BASE_MASK 0x1F 448 449 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, 450 unsigned long long *res, bool *is_negative) 451 { 452 unsigned int base = flags & BPF_STRTOX_BASE_MASK; 453 const char *cur_buf = buf; 454 size_t cur_len = buf_len; 455 unsigned int consumed; 456 size_t val_len; 457 char str[64]; 458 459 if (!buf || !buf_len || !res || !is_negative) 460 return -EINVAL; 461 462 if (base != 0 && base != 8 && base != 10 && base != 16) 463 return -EINVAL; 464 465 if (flags & ~BPF_STRTOX_BASE_MASK) 466 return -EINVAL; 467 468 while (cur_buf < buf + buf_len && isspace(*cur_buf)) 469 ++cur_buf; 470 471 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); 472 if (*is_negative) 473 ++cur_buf; 474 475 consumed = cur_buf - buf; 476 cur_len -= consumed; 477 if (!cur_len) 478 return -EINVAL; 479 480 cur_len = min(cur_len, sizeof(str) - 1); 481 memcpy(str, cur_buf, cur_len); 482 str[cur_len] = '\0'; 483 cur_buf = str; 484 485 cur_buf = _parse_integer_fixup_radix(cur_buf, &base); 486 val_len = _parse_integer(cur_buf, base, res); 487 488 if (val_len & KSTRTOX_OVERFLOW) 489 return -ERANGE; 490 491 if (val_len == 0) 492 return -EINVAL; 493 494 cur_buf += val_len; 495 consumed += cur_buf - str; 496 497 return consumed; 498 } 499 500 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, 501 long long *res) 502 { 503 unsigned long long _res; 504 bool is_negative; 505 int err; 506 507 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 508 if (err < 0) 509 return err; 510 if (is_negative) { 511 if ((long long)-_res > 0) 512 return -ERANGE; 513 *res = -_res; 514 } else { 515 if ((long long)_res < 0) 516 return -ERANGE; 517 *res = _res; 518 } 519 return err; 520 } 521 522 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, 523 s64 *, res) 524 { 525 long long _res; 526 int err; 527 528 *res = 0; 529 err = __bpf_strtoll(buf, buf_len, flags, &_res); 530 if (err < 0) 531 return err; 532 *res = _res; 533 return err; 534 } 535 536 const struct bpf_func_proto bpf_strtol_proto = { 537 .func = bpf_strtol, 538 .gpl_only = false, 539 .ret_type = RET_INTEGER, 540 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 541 .arg2_type = ARG_CONST_SIZE, 542 .arg3_type = ARG_ANYTHING, 543 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 544 .arg4_size = sizeof(s64), 545 }; 546 547 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, 548 u64 *, res) 549 { 550 unsigned long long _res; 551 bool is_negative; 552 int err; 553 554 *res = 0; 555 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 556 if (err < 0) 557 return err; 558 if (is_negative) 559 return -EINVAL; 560 *res = _res; 561 return err; 562 } 563 564 const struct bpf_func_proto bpf_strtoul_proto = { 565 .func = bpf_strtoul, 566 .gpl_only = false, 567 .ret_type = RET_INTEGER, 568 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 569 .arg2_type = ARG_CONST_SIZE, 570 .arg3_type = ARG_ANYTHING, 571 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 572 .arg4_size = sizeof(u64), 573 }; 574 575 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) 576 { 577 return strncmp(s1, s2, s1_sz); 578 } 579 580 static const struct bpf_func_proto bpf_strncmp_proto = { 581 .func = bpf_strncmp, 582 .gpl_only = false, 583 .ret_type = RET_INTEGER, 584 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 585 .arg2_type = ARG_CONST_SIZE, 586 .arg3_type = ARG_PTR_TO_CONST_STR, 587 }; 588 589 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino, 590 struct bpf_pidns_info *, nsdata, u32, size) 591 { 592 struct task_struct *task = current; 593 struct pid_namespace *pidns; 594 int err = -EINVAL; 595 596 if (unlikely(size != sizeof(struct bpf_pidns_info))) 597 goto clear; 598 599 if (unlikely((u64)(dev_t)dev != dev)) 600 goto clear; 601 602 if (unlikely(!task)) 603 goto clear; 604 605 pidns = task_active_pid_ns(task); 606 if (unlikely(!pidns)) { 607 err = -ENOENT; 608 goto clear; 609 } 610 611 if (!ns_match(&pidns->ns, (dev_t)dev, ino)) 612 goto clear; 613 614 nsdata->pid = task_pid_nr_ns(task, pidns); 615 nsdata->tgid = task_tgid_nr_ns(task, pidns); 616 return 0; 617 clear: 618 memset((void *)nsdata, 0, (size_t) size); 619 return err; 620 } 621 622 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = { 623 .func = bpf_get_ns_current_pid_tgid, 624 .gpl_only = false, 625 .ret_type = RET_INTEGER, 626 .arg1_type = ARG_ANYTHING, 627 .arg2_type = ARG_ANYTHING, 628 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 629 .arg4_type = ARG_CONST_SIZE, 630 }; 631 632 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { 633 .func = bpf_get_raw_cpu_id, 634 .gpl_only = false, 635 .ret_type = RET_INTEGER, 636 }; 637 638 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, 639 u64, flags, void *, data, u64, size) 640 { 641 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 642 return -EINVAL; 643 644 return bpf_event_output(map, flags, data, size, NULL, 0, NULL); 645 } 646 647 const struct bpf_func_proto bpf_event_output_data_proto = { 648 .func = bpf_event_output_data, 649 .gpl_only = true, 650 .ret_type = RET_INTEGER, 651 .arg1_type = ARG_PTR_TO_CTX, 652 .arg2_type = ARG_CONST_MAP_PTR, 653 .arg3_type = ARG_ANYTHING, 654 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 655 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 656 }; 657 658 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size, 659 const void __user *, user_ptr) 660 { 661 int ret = copy_from_user(dst, user_ptr, size); 662 663 if (unlikely(ret)) { 664 memset(dst, 0, size); 665 ret = -EFAULT; 666 } 667 668 return ret; 669 } 670 671 const struct bpf_func_proto bpf_copy_from_user_proto = { 672 .func = bpf_copy_from_user, 673 .gpl_only = false, 674 .might_sleep = true, 675 .ret_type = RET_INTEGER, 676 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 677 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 678 .arg3_type = ARG_ANYTHING, 679 }; 680 681 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size, 682 const void __user *, user_ptr, struct task_struct *, tsk, u64, flags) 683 { 684 int ret; 685 686 /* flags is not used yet */ 687 if (unlikely(flags)) 688 return -EINVAL; 689 690 if (unlikely(!size)) 691 return 0; 692 693 ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0); 694 if (ret == size) 695 return 0; 696 697 memset(dst, 0, size); 698 /* Return -EFAULT for partial read */ 699 return ret < 0 ? ret : -EFAULT; 700 } 701 702 const struct bpf_func_proto bpf_copy_from_user_task_proto = { 703 .func = bpf_copy_from_user_task, 704 .gpl_only = true, 705 .might_sleep = true, 706 .ret_type = RET_INTEGER, 707 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 708 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 709 .arg3_type = ARG_ANYTHING, 710 .arg4_type = ARG_PTR_TO_BTF_ID, 711 .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 712 .arg5_type = ARG_ANYTHING 713 }; 714 715 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) 716 { 717 if (cpu >= nr_cpu_ids) 718 return (unsigned long)NULL; 719 720 return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu); 721 } 722 723 const struct bpf_func_proto bpf_per_cpu_ptr_proto = { 724 .func = bpf_per_cpu_ptr, 725 .gpl_only = false, 726 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY, 727 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 728 .arg2_type = ARG_ANYTHING, 729 }; 730 731 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr) 732 { 733 return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr); 734 } 735 736 const struct bpf_func_proto bpf_this_cpu_ptr_proto = { 737 .func = bpf_this_cpu_ptr, 738 .gpl_only = false, 739 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY, 740 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 741 }; 742 743 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, 744 size_t bufsz) 745 { 746 void __user *user_ptr = (__force void __user *)unsafe_ptr; 747 748 buf[0] = 0; 749 750 switch (fmt_ptype) { 751 case 's': 752 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 753 if ((unsigned long)unsafe_ptr < TASK_SIZE) 754 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 755 fallthrough; 756 #endif 757 case 'k': 758 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz); 759 case 'u': 760 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 761 } 762 763 return -EINVAL; 764 } 765 766 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary 767 * arguments representation. 768 */ 769 #define MAX_BPRINTF_BIN_ARGS 512 770 771 /* Support executing three nested bprintf helper calls on a given CPU */ 772 #define MAX_BPRINTF_NEST_LEVEL 3 773 struct bpf_bprintf_buffers { 774 char bin_args[MAX_BPRINTF_BIN_ARGS]; 775 char buf[MAX_BPRINTF_BUF]; 776 }; 777 778 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs); 779 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level); 780 781 static int try_get_buffers(struct bpf_bprintf_buffers **bufs) 782 { 783 int nest_level; 784 785 preempt_disable(); 786 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level); 787 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) { 788 this_cpu_dec(bpf_bprintf_nest_level); 789 preempt_enable(); 790 return -EBUSY; 791 } 792 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); 793 794 return 0; 795 } 796 797 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data) 798 { 799 if (!data->bin_args && !data->buf) 800 return; 801 if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0)) 802 return; 803 this_cpu_dec(bpf_bprintf_nest_level); 804 preempt_enable(); 805 } 806 807 /* 808 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers 809 * 810 * Returns a negative value if fmt is an invalid format string or 0 otherwise. 811 * 812 * This can be used in two ways: 813 * - Format string verification only: when data->get_bin_args is false 814 * - Arguments preparation: in addition to the above verification, it writes in 815 * data->bin_args a binary representation of arguments usable by bstr_printf 816 * where pointers from BPF have been sanitized. 817 * 818 * In argument preparation mode, if 0 is returned, safe temporary buffers are 819 * allocated and bpf_bprintf_cleanup should be called to free them after use. 820 */ 821 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, 822 u32 num_args, struct bpf_bprintf_data *data) 823 { 824 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; 825 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end; 826 struct bpf_bprintf_buffers *buffers = NULL; 827 size_t sizeof_cur_arg, sizeof_cur_ip; 828 int err, i, num_spec = 0; 829 u64 cur_arg; 830 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX"; 831 832 fmt_end = strnchr(fmt, fmt_size, 0); 833 if (!fmt_end) 834 return -EINVAL; 835 fmt_size = fmt_end - fmt; 836 837 if (get_buffers && try_get_buffers(&buffers)) 838 return -EBUSY; 839 840 if (data->get_bin_args) { 841 if (num_args) 842 tmp_buf = buffers->bin_args; 843 tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS; 844 data->bin_args = (u32 *)tmp_buf; 845 } 846 847 if (data->get_buf) 848 data->buf = buffers->buf; 849 850 for (i = 0; i < fmt_size; i++) { 851 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { 852 err = -EINVAL; 853 goto out; 854 } 855 856 if (fmt[i] != '%') 857 continue; 858 859 if (fmt[i + 1] == '%') { 860 i++; 861 continue; 862 } 863 864 if (num_spec >= num_args) { 865 err = -EINVAL; 866 goto out; 867 } 868 869 /* The string is zero-terminated so if fmt[i] != 0, we can 870 * always access fmt[i + 1], in the worst case it will be a 0 871 */ 872 i++; 873 874 /* skip optional "[0 +-][num]" width formatting field */ 875 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || 876 fmt[i] == ' ') 877 i++; 878 if (fmt[i] >= '1' && fmt[i] <= '9') { 879 i++; 880 while (fmt[i] >= '0' && fmt[i] <= '9') 881 i++; 882 } 883 884 if (fmt[i] == 'p') { 885 sizeof_cur_arg = sizeof(long); 886 887 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) || 888 ispunct(fmt[i + 1])) { 889 if (tmp_buf) 890 cur_arg = raw_args[num_spec]; 891 goto nocopy_fmt; 892 } 893 894 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') && 895 fmt[i + 2] == 's') { 896 fmt_ptype = fmt[i + 1]; 897 i += 2; 898 goto fmt_str; 899 } 900 901 if (fmt[i + 1] == 'K' || 902 fmt[i + 1] == 'x' || fmt[i + 1] == 's' || 903 fmt[i + 1] == 'S') { 904 if (tmp_buf) 905 cur_arg = raw_args[num_spec]; 906 i++; 907 goto nocopy_fmt; 908 } 909 910 if (fmt[i + 1] == 'B') { 911 if (tmp_buf) { 912 err = snprintf(tmp_buf, 913 (tmp_buf_end - tmp_buf), 914 "%pB", 915 (void *)(long)raw_args[num_spec]); 916 tmp_buf += (err + 1); 917 } 918 919 i++; 920 num_spec++; 921 continue; 922 } 923 924 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ 925 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') || 926 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) { 927 err = -EINVAL; 928 goto out; 929 } 930 931 i += 2; 932 if (!tmp_buf) 933 goto nocopy_fmt; 934 935 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16; 936 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { 937 err = -ENOSPC; 938 goto out; 939 } 940 941 unsafe_ptr = (char *)(long)raw_args[num_spec]; 942 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr, 943 sizeof_cur_ip); 944 if (err < 0) 945 memset(cur_ip, 0, sizeof_cur_ip); 946 947 /* hack: bstr_printf expects IP addresses to be 948 * pre-formatted as strings, ironically, the easiest way 949 * to do that is to call snprintf. 950 */ 951 ip_spec[2] = fmt[i - 1]; 952 ip_spec[3] = fmt[i]; 953 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf, 954 ip_spec, &cur_ip); 955 956 tmp_buf += err + 1; 957 num_spec++; 958 959 continue; 960 } else if (fmt[i] == 's') { 961 fmt_ptype = fmt[i]; 962 fmt_str: 963 if (fmt[i + 1] != 0 && 964 !isspace(fmt[i + 1]) && 965 !ispunct(fmt[i + 1])) { 966 err = -EINVAL; 967 goto out; 968 } 969 970 if (!tmp_buf) 971 goto nocopy_fmt; 972 973 if (tmp_buf_end == tmp_buf) { 974 err = -ENOSPC; 975 goto out; 976 } 977 978 unsafe_ptr = (char *)(long)raw_args[num_spec]; 979 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr, 980 fmt_ptype, 981 tmp_buf_end - tmp_buf); 982 if (err < 0) { 983 tmp_buf[0] = '\0'; 984 err = 1; 985 } 986 987 tmp_buf += err; 988 num_spec++; 989 990 continue; 991 } else if (fmt[i] == 'c') { 992 if (!tmp_buf) 993 goto nocopy_fmt; 994 995 if (tmp_buf_end == tmp_buf) { 996 err = -ENOSPC; 997 goto out; 998 } 999 1000 *tmp_buf = raw_args[num_spec]; 1001 tmp_buf++; 1002 num_spec++; 1003 1004 continue; 1005 } 1006 1007 sizeof_cur_arg = sizeof(int); 1008 1009 if (fmt[i] == 'l') { 1010 sizeof_cur_arg = sizeof(long); 1011 i++; 1012 } 1013 if (fmt[i] == 'l') { 1014 sizeof_cur_arg = sizeof(long long); 1015 i++; 1016 } 1017 1018 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' && 1019 fmt[i] != 'x' && fmt[i] != 'X') { 1020 err = -EINVAL; 1021 goto out; 1022 } 1023 1024 if (tmp_buf) 1025 cur_arg = raw_args[num_spec]; 1026 nocopy_fmt: 1027 if (tmp_buf) { 1028 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32)); 1029 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { 1030 err = -ENOSPC; 1031 goto out; 1032 } 1033 1034 if (sizeof_cur_arg == 8) { 1035 *(u32 *)tmp_buf = *(u32 *)&cur_arg; 1036 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1); 1037 } else { 1038 *(u32 *)tmp_buf = (u32)(long)cur_arg; 1039 } 1040 tmp_buf += sizeof_cur_arg; 1041 } 1042 num_spec++; 1043 } 1044 1045 err = 0; 1046 out: 1047 if (err) 1048 bpf_bprintf_cleanup(data); 1049 return err; 1050 } 1051 1052 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, 1053 const void *, args, u32, data_len) 1054 { 1055 struct bpf_bprintf_data data = { 1056 .get_bin_args = true, 1057 }; 1058 int err, num_args; 1059 1060 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 || 1061 (data_len && !args)) 1062 return -EINVAL; 1063 num_args = data_len / 8; 1064 1065 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we 1066 * can safely give an unbounded size. 1067 */ 1068 err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data); 1069 if (err < 0) 1070 return err; 1071 1072 err = bstr_printf(str, str_size, fmt, data.bin_args); 1073 1074 bpf_bprintf_cleanup(&data); 1075 1076 return err + 1; 1077 } 1078 1079 const struct bpf_func_proto bpf_snprintf_proto = { 1080 .func = bpf_snprintf, 1081 .gpl_only = true, 1082 .ret_type = RET_INTEGER, 1083 .arg1_type = ARG_PTR_TO_MEM_OR_NULL, 1084 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1085 .arg3_type = ARG_PTR_TO_CONST_STR, 1086 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 1087 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1088 }; 1089 1090 struct bpf_async_cb { 1091 struct bpf_map *map; 1092 struct bpf_prog *prog; 1093 void __rcu *callback_fn; 1094 void *value; 1095 union { 1096 struct rcu_head rcu; 1097 struct work_struct delete_work; 1098 }; 1099 u64 flags; 1100 }; 1101 1102 /* BPF map elements can contain 'struct bpf_timer'. 1103 * Such map owns all of its BPF timers. 1104 * 'struct bpf_timer' is allocated as part of map element allocation 1105 * and it's zero initialized. 1106 * That space is used to keep 'struct bpf_async_kern'. 1107 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and 1108 * remembers 'struct bpf_map *' pointer it's part of. 1109 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn. 1110 * bpf_timer_start() arms the timer. 1111 * If user space reference to a map goes to zero at this point 1112 * ops->map_release_uref callback is responsible for cancelling the timers, 1113 * freeing their memory, and decrementing prog's refcnts. 1114 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt. 1115 * Inner maps can contain bpf timers as well. ops->map_release_uref is 1116 * freeing the timers when inner map is replaced or deleted by user space. 1117 */ 1118 struct bpf_hrtimer { 1119 struct bpf_async_cb cb; 1120 struct hrtimer timer; 1121 atomic_t cancelling; 1122 }; 1123 1124 struct bpf_work { 1125 struct bpf_async_cb cb; 1126 struct work_struct work; 1127 struct work_struct delete_work; 1128 }; 1129 1130 /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */ 1131 struct bpf_async_kern { 1132 union { 1133 struct bpf_async_cb *cb; 1134 struct bpf_hrtimer *timer; 1135 struct bpf_work *work; 1136 }; 1137 /* bpf_spin_lock is used here instead of spinlock_t to make 1138 * sure that it always fits into space reserved by struct bpf_timer 1139 * regardless of LOCKDEP and spinlock debug flags. 1140 */ 1141 struct bpf_spin_lock lock; 1142 } __attribute__((aligned(8))); 1143 1144 enum bpf_async_type { 1145 BPF_ASYNC_TYPE_TIMER = 0, 1146 BPF_ASYNC_TYPE_WQ, 1147 }; 1148 1149 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); 1150 1151 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) 1152 { 1153 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); 1154 struct bpf_map *map = t->cb.map; 1155 void *value = t->cb.value; 1156 bpf_callback_t callback_fn; 1157 void *key; 1158 u32 idx; 1159 1160 BTF_TYPE_EMIT(struct bpf_timer); 1161 callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held()); 1162 if (!callback_fn) 1163 goto out; 1164 1165 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and 1166 * cannot be preempted by another bpf_timer_cb() on the same cpu. 1167 * Remember the timer this callback is servicing to prevent 1168 * deadlock if callback_fn() calls bpf_timer_cancel() or 1169 * bpf_map_delete_elem() on the same timer. 1170 */ 1171 this_cpu_write(hrtimer_running, t); 1172 if (map->map_type == BPF_MAP_TYPE_ARRAY) { 1173 struct bpf_array *array = container_of(map, struct bpf_array, map); 1174 1175 /* compute the key */ 1176 idx = ((char *)value - array->value) / array->elem_size; 1177 key = &idx; 1178 } else { /* hash or lru */ 1179 key = value - round_up(map->key_size, 8); 1180 } 1181 1182 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); 1183 /* The verifier checked that return value is zero. */ 1184 1185 this_cpu_write(hrtimer_running, NULL); 1186 out: 1187 return HRTIMER_NORESTART; 1188 } 1189 1190 static void bpf_wq_work(struct work_struct *work) 1191 { 1192 struct bpf_work *w = container_of(work, struct bpf_work, work); 1193 struct bpf_async_cb *cb = &w->cb; 1194 struct bpf_map *map = cb->map; 1195 bpf_callback_t callback_fn; 1196 void *value = cb->value; 1197 void *key; 1198 u32 idx; 1199 1200 BTF_TYPE_EMIT(struct bpf_wq); 1201 1202 callback_fn = READ_ONCE(cb->callback_fn); 1203 if (!callback_fn) 1204 return; 1205 1206 if (map->map_type == BPF_MAP_TYPE_ARRAY) { 1207 struct bpf_array *array = container_of(map, struct bpf_array, map); 1208 1209 /* compute the key */ 1210 idx = ((char *)value - array->value) / array->elem_size; 1211 key = &idx; 1212 } else { /* hash or lru */ 1213 key = value - round_up(map->key_size, 8); 1214 } 1215 1216 rcu_read_lock_trace(); 1217 migrate_disable(); 1218 1219 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); 1220 1221 migrate_enable(); 1222 rcu_read_unlock_trace(); 1223 } 1224 1225 static void bpf_wq_delete_work(struct work_struct *work) 1226 { 1227 struct bpf_work *w = container_of(work, struct bpf_work, delete_work); 1228 1229 cancel_work_sync(&w->work); 1230 1231 kfree_rcu(w, cb.rcu); 1232 } 1233 1234 static void bpf_timer_delete_work(struct work_struct *work) 1235 { 1236 struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work); 1237 1238 /* Cancel the timer and wait for callback to complete if it was running. 1239 * If hrtimer_cancel() can be safely called it's safe to call 1240 * kfree_rcu(t) right after for both preallocated and non-preallocated 1241 * maps. The async->cb = NULL was already done and no code path can see 1242 * address 't' anymore. Timer if armed for existing bpf_hrtimer before 1243 * bpf_timer_cancel_and_free will have been cancelled. 1244 */ 1245 hrtimer_cancel(&t->timer); 1246 kfree_rcu(t, cb.rcu); 1247 } 1248 1249 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags, 1250 enum bpf_async_type type) 1251 { 1252 struct bpf_async_cb *cb; 1253 struct bpf_hrtimer *t; 1254 struct bpf_work *w; 1255 clockid_t clockid; 1256 size_t size; 1257 int ret = 0; 1258 1259 if (in_nmi()) 1260 return -EOPNOTSUPP; 1261 1262 switch (type) { 1263 case BPF_ASYNC_TYPE_TIMER: 1264 size = sizeof(struct bpf_hrtimer); 1265 break; 1266 case BPF_ASYNC_TYPE_WQ: 1267 size = sizeof(struct bpf_work); 1268 break; 1269 default: 1270 return -EINVAL; 1271 } 1272 1273 __bpf_spin_lock_irqsave(&async->lock); 1274 t = async->timer; 1275 if (t) { 1276 ret = -EBUSY; 1277 goto out; 1278 } 1279 1280 /* allocate hrtimer via map_kmalloc to use memcg accounting */ 1281 cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node); 1282 if (!cb) { 1283 ret = -ENOMEM; 1284 goto out; 1285 } 1286 1287 switch (type) { 1288 case BPF_ASYNC_TYPE_TIMER: 1289 clockid = flags & (MAX_CLOCKS - 1); 1290 t = (struct bpf_hrtimer *)cb; 1291 1292 atomic_set(&t->cancelling, 0); 1293 INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work); 1294 hrtimer_setup(&t->timer, bpf_timer_cb, clockid, HRTIMER_MODE_REL_SOFT); 1295 cb->value = (void *)async - map->record->timer_off; 1296 break; 1297 case BPF_ASYNC_TYPE_WQ: 1298 w = (struct bpf_work *)cb; 1299 1300 INIT_WORK(&w->work, bpf_wq_work); 1301 INIT_WORK(&w->delete_work, bpf_wq_delete_work); 1302 cb->value = (void *)async - map->record->wq_off; 1303 break; 1304 } 1305 cb->map = map; 1306 cb->prog = NULL; 1307 cb->flags = flags; 1308 rcu_assign_pointer(cb->callback_fn, NULL); 1309 1310 WRITE_ONCE(async->cb, cb); 1311 /* Guarantee the order between async->cb and map->usercnt. So 1312 * when there are concurrent uref release and bpf timer init, either 1313 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL 1314 * timer or atomic64_read() below returns a zero usercnt. 1315 */ 1316 smp_mb(); 1317 if (!atomic64_read(&map->usercnt)) { 1318 /* maps with timers must be either held by user space 1319 * or pinned in bpffs. 1320 */ 1321 WRITE_ONCE(async->cb, NULL); 1322 kfree(cb); 1323 ret = -EPERM; 1324 } 1325 out: 1326 __bpf_spin_unlock_irqrestore(&async->lock); 1327 return ret; 1328 } 1329 1330 BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map, 1331 u64, flags) 1332 { 1333 clock_t clockid = flags & (MAX_CLOCKS - 1); 1334 1335 BUILD_BUG_ON(MAX_CLOCKS != 16); 1336 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer)); 1337 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer)); 1338 1339 if (flags >= MAX_CLOCKS || 1340 /* similar to timerfd except _ALARM variants are not supported */ 1341 (clockid != CLOCK_MONOTONIC && 1342 clockid != CLOCK_REALTIME && 1343 clockid != CLOCK_BOOTTIME)) 1344 return -EINVAL; 1345 1346 return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER); 1347 } 1348 1349 static const struct bpf_func_proto bpf_timer_init_proto = { 1350 .func = bpf_timer_init, 1351 .gpl_only = true, 1352 .ret_type = RET_INTEGER, 1353 .arg1_type = ARG_PTR_TO_TIMER, 1354 .arg2_type = ARG_CONST_MAP_PTR, 1355 .arg3_type = ARG_ANYTHING, 1356 }; 1357 1358 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn, 1359 struct bpf_prog_aux *aux, unsigned int flags, 1360 enum bpf_async_type type) 1361 { 1362 struct bpf_prog *prev, *prog = aux->prog; 1363 struct bpf_async_cb *cb; 1364 int ret = 0; 1365 1366 if (in_nmi()) 1367 return -EOPNOTSUPP; 1368 __bpf_spin_lock_irqsave(&async->lock); 1369 cb = async->cb; 1370 if (!cb) { 1371 ret = -EINVAL; 1372 goto out; 1373 } 1374 if (!atomic64_read(&cb->map->usercnt)) { 1375 /* maps with timers must be either held by user space 1376 * or pinned in bpffs. Otherwise timer might still be 1377 * running even when bpf prog is detached and user space 1378 * is gone, since map_release_uref won't ever be called. 1379 */ 1380 ret = -EPERM; 1381 goto out; 1382 } 1383 prev = cb->prog; 1384 if (prev != prog) { 1385 /* Bump prog refcnt once. Every bpf_timer_set_callback() 1386 * can pick different callback_fn-s within the same prog. 1387 */ 1388 prog = bpf_prog_inc_not_zero(prog); 1389 if (IS_ERR(prog)) { 1390 ret = PTR_ERR(prog); 1391 goto out; 1392 } 1393 if (prev) 1394 /* Drop prev prog refcnt when swapping with new prog */ 1395 bpf_prog_put(prev); 1396 cb->prog = prog; 1397 } 1398 rcu_assign_pointer(cb->callback_fn, callback_fn); 1399 out: 1400 __bpf_spin_unlock_irqrestore(&async->lock); 1401 return ret; 1402 } 1403 1404 BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn, 1405 struct bpf_prog_aux *, aux) 1406 { 1407 return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER); 1408 } 1409 1410 static const struct bpf_func_proto bpf_timer_set_callback_proto = { 1411 .func = bpf_timer_set_callback, 1412 .gpl_only = true, 1413 .ret_type = RET_INTEGER, 1414 .arg1_type = ARG_PTR_TO_TIMER, 1415 .arg2_type = ARG_PTR_TO_FUNC, 1416 }; 1417 1418 BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags) 1419 { 1420 struct bpf_hrtimer *t; 1421 int ret = 0; 1422 enum hrtimer_mode mode; 1423 1424 if (in_nmi()) 1425 return -EOPNOTSUPP; 1426 if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN)) 1427 return -EINVAL; 1428 __bpf_spin_lock_irqsave(&timer->lock); 1429 t = timer->timer; 1430 if (!t || !t->cb.prog) { 1431 ret = -EINVAL; 1432 goto out; 1433 } 1434 1435 if (flags & BPF_F_TIMER_ABS) 1436 mode = HRTIMER_MODE_ABS_SOFT; 1437 else 1438 mode = HRTIMER_MODE_REL_SOFT; 1439 1440 if (flags & BPF_F_TIMER_CPU_PIN) 1441 mode |= HRTIMER_MODE_PINNED; 1442 1443 hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); 1444 out: 1445 __bpf_spin_unlock_irqrestore(&timer->lock); 1446 return ret; 1447 } 1448 1449 static const struct bpf_func_proto bpf_timer_start_proto = { 1450 .func = bpf_timer_start, 1451 .gpl_only = true, 1452 .ret_type = RET_INTEGER, 1453 .arg1_type = ARG_PTR_TO_TIMER, 1454 .arg2_type = ARG_ANYTHING, 1455 .arg3_type = ARG_ANYTHING, 1456 }; 1457 1458 static void drop_prog_refcnt(struct bpf_async_cb *async) 1459 { 1460 struct bpf_prog *prog = async->prog; 1461 1462 if (prog) { 1463 bpf_prog_put(prog); 1464 async->prog = NULL; 1465 rcu_assign_pointer(async->callback_fn, NULL); 1466 } 1467 } 1468 1469 BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer) 1470 { 1471 struct bpf_hrtimer *t, *cur_t; 1472 bool inc = false; 1473 int ret = 0; 1474 1475 if (in_nmi()) 1476 return -EOPNOTSUPP; 1477 rcu_read_lock(); 1478 __bpf_spin_lock_irqsave(&timer->lock); 1479 t = timer->timer; 1480 if (!t) { 1481 ret = -EINVAL; 1482 goto out; 1483 } 1484 1485 cur_t = this_cpu_read(hrtimer_running); 1486 if (cur_t == t) { 1487 /* If bpf callback_fn is trying to bpf_timer_cancel() 1488 * its own timer the hrtimer_cancel() will deadlock 1489 * since it waits for callback_fn to finish. 1490 */ 1491 ret = -EDEADLK; 1492 goto out; 1493 } 1494 1495 /* Only account in-flight cancellations when invoked from a timer 1496 * callback, since we want to avoid waiting only if other _callbacks_ 1497 * are waiting on us, to avoid introducing lockups. Non-callback paths 1498 * are ok, since nobody would synchronously wait for their completion. 1499 */ 1500 if (!cur_t) 1501 goto drop; 1502 atomic_inc(&t->cancelling); 1503 /* Need full barrier after relaxed atomic_inc */ 1504 smp_mb__after_atomic(); 1505 inc = true; 1506 if (atomic_read(&cur_t->cancelling)) { 1507 /* We're cancelling timer t, while some other timer callback is 1508 * attempting to cancel us. In such a case, it might be possible 1509 * that timer t belongs to the other callback, or some other 1510 * callback waiting upon it (creating transitive dependencies 1511 * upon us), and we will enter a deadlock if we continue 1512 * cancelling and waiting for it synchronously, since it might 1513 * do the same. Bail! 1514 */ 1515 ret = -EDEADLK; 1516 goto out; 1517 } 1518 drop: 1519 drop_prog_refcnt(&t->cb); 1520 out: 1521 __bpf_spin_unlock_irqrestore(&timer->lock); 1522 /* Cancel the timer and wait for associated callback to finish 1523 * if it was running. 1524 */ 1525 ret = ret ?: hrtimer_cancel(&t->timer); 1526 if (inc) 1527 atomic_dec(&t->cancelling); 1528 rcu_read_unlock(); 1529 return ret; 1530 } 1531 1532 static const struct bpf_func_proto bpf_timer_cancel_proto = { 1533 .func = bpf_timer_cancel, 1534 .gpl_only = true, 1535 .ret_type = RET_INTEGER, 1536 .arg1_type = ARG_PTR_TO_TIMER, 1537 }; 1538 1539 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async) 1540 { 1541 struct bpf_async_cb *cb; 1542 1543 /* Performance optimization: read async->cb without lock first. */ 1544 if (!READ_ONCE(async->cb)) 1545 return NULL; 1546 1547 __bpf_spin_lock_irqsave(&async->lock); 1548 /* re-read it under lock */ 1549 cb = async->cb; 1550 if (!cb) 1551 goto out; 1552 drop_prog_refcnt(cb); 1553 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use 1554 * this timer, since it won't be initialized. 1555 */ 1556 WRITE_ONCE(async->cb, NULL); 1557 out: 1558 __bpf_spin_unlock_irqrestore(&async->lock); 1559 return cb; 1560 } 1561 1562 /* This function is called by map_delete/update_elem for individual element and 1563 * by ops->map_release_uref when the user space reference to a map reaches zero. 1564 */ 1565 void bpf_timer_cancel_and_free(void *val) 1566 { 1567 struct bpf_hrtimer *t; 1568 1569 t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val); 1570 1571 if (!t) 1572 return; 1573 /* We check that bpf_map_delete/update_elem() was called from timer 1574 * callback_fn. In such case we don't call hrtimer_cancel() (since it 1575 * will deadlock) and don't call hrtimer_try_to_cancel() (since it will 1576 * just return -1). Though callback_fn is still running on this cpu it's 1577 * safe to do kfree(t) because bpf_timer_cb() read everything it needed 1578 * from 't'. The bpf subprog callback_fn won't be able to access 't', 1579 * since async->cb = NULL was already done. The timer will be 1580 * effectively cancelled because bpf_timer_cb() will return 1581 * HRTIMER_NORESTART. 1582 * 1583 * However, it is possible the timer callback_fn calling us armed the 1584 * timer _before_ calling us, such that failing to cancel it here will 1585 * cause it to possibly use struct hrtimer after freeing bpf_hrtimer. 1586 * Therefore, we _need_ to cancel any outstanding timers before we do 1587 * kfree_rcu, even though no more timers can be armed. 1588 * 1589 * Moreover, we need to schedule work even if timer does not belong to 1590 * the calling callback_fn, as on two different CPUs, we can end up in a 1591 * situation where both sides run in parallel, try to cancel one 1592 * another, and we end up waiting on both sides in hrtimer_cancel 1593 * without making forward progress, since timer1 depends on time2 1594 * callback to finish, and vice versa. 1595 * 1596 * CPU 1 (timer1_cb) CPU 2 (timer2_cb) 1597 * bpf_timer_cancel_and_free(timer2) bpf_timer_cancel_and_free(timer1) 1598 * 1599 * To avoid these issues, punt to workqueue context when we are in a 1600 * timer callback. 1601 */ 1602 if (this_cpu_read(hrtimer_running)) { 1603 queue_work(system_unbound_wq, &t->cb.delete_work); 1604 return; 1605 } 1606 1607 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 1608 /* If the timer is running on other CPU, also use a kworker to 1609 * wait for the completion of the timer instead of trying to 1610 * acquire a sleepable lock in hrtimer_cancel() to wait for its 1611 * completion. 1612 */ 1613 if (hrtimer_try_to_cancel(&t->timer) >= 0) 1614 kfree_rcu(t, cb.rcu); 1615 else 1616 queue_work(system_unbound_wq, &t->cb.delete_work); 1617 } else { 1618 bpf_timer_delete_work(&t->cb.delete_work); 1619 } 1620 } 1621 1622 /* This function is called by map_delete/update_elem for individual element and 1623 * by ops->map_release_uref when the user space reference to a map reaches zero. 1624 */ 1625 void bpf_wq_cancel_and_free(void *val) 1626 { 1627 struct bpf_work *work; 1628 1629 BTF_TYPE_EMIT(struct bpf_wq); 1630 1631 work = (struct bpf_work *)__bpf_async_cancel_and_free(val); 1632 if (!work) 1633 return; 1634 /* Trigger cancel of the sleepable work, but *do not* wait for 1635 * it to finish if it was running as we might not be in a 1636 * sleepable context. 1637 * kfree will be called once the work has finished. 1638 */ 1639 schedule_work(&work->delete_work); 1640 } 1641 1642 BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr) 1643 { 1644 unsigned long *kptr = dst; 1645 1646 /* This helper may be inlined by verifier. */ 1647 return xchg(kptr, (unsigned long)ptr); 1648 } 1649 1650 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg() 1651 * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to 1652 * denote type that verifier will determine. 1653 */ 1654 static const struct bpf_func_proto bpf_kptr_xchg_proto = { 1655 .func = bpf_kptr_xchg, 1656 .gpl_only = false, 1657 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 1658 .ret_btf_id = BPF_PTR_POISON, 1659 .arg1_type = ARG_KPTR_XCHG_DEST, 1660 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE, 1661 .arg2_btf_id = BPF_PTR_POISON, 1662 }; 1663 1664 /* Since the upper 8 bits of dynptr->size is reserved, the 1665 * maximum supported size is 2^24 - 1. 1666 */ 1667 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1) 1668 #define DYNPTR_TYPE_SHIFT 28 1669 #define DYNPTR_SIZE_MASK 0xFFFFFF 1670 #define DYNPTR_RDONLY_BIT BIT(31) 1671 1672 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr) 1673 { 1674 return ptr->size & DYNPTR_RDONLY_BIT; 1675 } 1676 1677 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) 1678 { 1679 ptr->size |= DYNPTR_RDONLY_BIT; 1680 } 1681 1682 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) 1683 { 1684 ptr->size |= type << DYNPTR_TYPE_SHIFT; 1685 } 1686 1687 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr) 1688 { 1689 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT; 1690 } 1691 1692 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) 1693 { 1694 return ptr->size & DYNPTR_SIZE_MASK; 1695 } 1696 1697 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size) 1698 { 1699 u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK; 1700 1701 ptr->size = new_size | metadata; 1702 } 1703 1704 int bpf_dynptr_check_size(u32 size) 1705 { 1706 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; 1707 } 1708 1709 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, 1710 enum bpf_dynptr_type type, u32 offset, u32 size) 1711 { 1712 ptr->data = data; 1713 ptr->offset = offset; 1714 ptr->size = size; 1715 bpf_dynptr_set_type(ptr, type); 1716 } 1717 1718 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) 1719 { 1720 memset(ptr, 0, sizeof(*ptr)); 1721 } 1722 1723 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) 1724 { 1725 int err; 1726 1727 BTF_TYPE_EMIT(struct bpf_dynptr); 1728 1729 err = bpf_dynptr_check_size(size); 1730 if (err) 1731 goto error; 1732 1733 /* flags is currently unsupported */ 1734 if (flags) { 1735 err = -EINVAL; 1736 goto error; 1737 } 1738 1739 bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size); 1740 1741 return 0; 1742 1743 error: 1744 bpf_dynptr_set_null(ptr); 1745 return err; 1746 } 1747 1748 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = { 1749 .func = bpf_dynptr_from_mem, 1750 .gpl_only = false, 1751 .ret_type = RET_INTEGER, 1752 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1753 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1754 .arg3_type = ARG_ANYTHING, 1755 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE, 1756 }; 1757 1758 static int __bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr_kern *src, 1759 u32 offset, u64 flags) 1760 { 1761 enum bpf_dynptr_type type; 1762 int err; 1763 1764 if (!src->data || flags) 1765 return -EINVAL; 1766 1767 err = bpf_dynptr_check_off_len(src, offset, len); 1768 if (err) 1769 return err; 1770 1771 type = bpf_dynptr_get_type(src); 1772 1773 switch (type) { 1774 case BPF_DYNPTR_TYPE_LOCAL: 1775 case BPF_DYNPTR_TYPE_RINGBUF: 1776 /* Source and destination may possibly overlap, hence use memmove to 1777 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr 1778 * pointing to overlapping PTR_TO_MAP_VALUE regions. 1779 */ 1780 memmove(dst, src->data + src->offset + offset, len); 1781 return 0; 1782 case BPF_DYNPTR_TYPE_SKB: 1783 return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len); 1784 case BPF_DYNPTR_TYPE_XDP: 1785 return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len); 1786 default: 1787 WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type); 1788 return -EFAULT; 1789 } 1790 } 1791 1792 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src, 1793 u32, offset, u64, flags) 1794 { 1795 return __bpf_dynptr_read(dst, len, src, offset, flags); 1796 } 1797 1798 static const struct bpf_func_proto bpf_dynptr_read_proto = { 1799 .func = bpf_dynptr_read, 1800 .gpl_only = false, 1801 .ret_type = RET_INTEGER, 1802 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1803 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1804 .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1805 .arg4_type = ARG_ANYTHING, 1806 .arg5_type = ARG_ANYTHING, 1807 }; 1808 1809 int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, void *src, 1810 u32 len, u64 flags) 1811 { 1812 enum bpf_dynptr_type type; 1813 int err; 1814 1815 if (!dst->data || __bpf_dynptr_is_rdonly(dst)) 1816 return -EINVAL; 1817 1818 err = bpf_dynptr_check_off_len(dst, offset, len); 1819 if (err) 1820 return err; 1821 1822 type = bpf_dynptr_get_type(dst); 1823 1824 switch (type) { 1825 case BPF_DYNPTR_TYPE_LOCAL: 1826 case BPF_DYNPTR_TYPE_RINGBUF: 1827 if (flags) 1828 return -EINVAL; 1829 /* Source and destination may possibly overlap, hence use memmove to 1830 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr 1831 * pointing to overlapping PTR_TO_MAP_VALUE regions. 1832 */ 1833 memmove(dst->data + dst->offset + offset, src, len); 1834 return 0; 1835 case BPF_DYNPTR_TYPE_SKB: 1836 return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len, 1837 flags); 1838 case BPF_DYNPTR_TYPE_XDP: 1839 if (flags) 1840 return -EINVAL; 1841 return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len); 1842 default: 1843 WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type); 1844 return -EFAULT; 1845 } 1846 } 1847 1848 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src, 1849 u32, len, u64, flags) 1850 { 1851 return __bpf_dynptr_write(dst, offset, src, len, flags); 1852 } 1853 1854 static const struct bpf_func_proto bpf_dynptr_write_proto = { 1855 .func = bpf_dynptr_write, 1856 .gpl_only = false, 1857 .ret_type = RET_INTEGER, 1858 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1859 .arg2_type = ARG_ANYTHING, 1860 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1861 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 1862 .arg5_type = ARG_ANYTHING, 1863 }; 1864 1865 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len) 1866 { 1867 enum bpf_dynptr_type type; 1868 int err; 1869 1870 if (!ptr->data) 1871 return 0; 1872 1873 err = bpf_dynptr_check_off_len(ptr, offset, len); 1874 if (err) 1875 return 0; 1876 1877 if (__bpf_dynptr_is_rdonly(ptr)) 1878 return 0; 1879 1880 type = bpf_dynptr_get_type(ptr); 1881 1882 switch (type) { 1883 case BPF_DYNPTR_TYPE_LOCAL: 1884 case BPF_DYNPTR_TYPE_RINGBUF: 1885 return (unsigned long)(ptr->data + ptr->offset + offset); 1886 case BPF_DYNPTR_TYPE_SKB: 1887 case BPF_DYNPTR_TYPE_XDP: 1888 /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */ 1889 return 0; 1890 default: 1891 WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type); 1892 return 0; 1893 } 1894 } 1895 1896 static const struct bpf_func_proto bpf_dynptr_data_proto = { 1897 .func = bpf_dynptr_data, 1898 .gpl_only = false, 1899 .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL, 1900 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1901 .arg2_type = ARG_ANYTHING, 1902 .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO, 1903 }; 1904 1905 const struct bpf_func_proto bpf_get_current_task_proto __weak; 1906 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak; 1907 const struct bpf_func_proto bpf_probe_read_user_proto __weak; 1908 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; 1909 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak; 1910 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; 1911 const struct bpf_func_proto bpf_task_pt_regs_proto __weak; 1912 const struct bpf_func_proto bpf_perf_event_read_proto __weak; 1913 const struct bpf_func_proto bpf_send_signal_proto __weak; 1914 const struct bpf_func_proto bpf_send_signal_thread_proto __weak; 1915 const struct bpf_func_proto bpf_get_task_stack_sleepable_proto __weak; 1916 const struct bpf_func_proto bpf_get_task_stack_proto __weak; 1917 const struct bpf_func_proto bpf_get_branch_snapshot_proto __weak; 1918 1919 const struct bpf_func_proto * 1920 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1921 { 1922 switch (func_id) { 1923 case BPF_FUNC_map_lookup_elem: 1924 return &bpf_map_lookup_elem_proto; 1925 case BPF_FUNC_map_update_elem: 1926 return &bpf_map_update_elem_proto; 1927 case BPF_FUNC_map_delete_elem: 1928 return &bpf_map_delete_elem_proto; 1929 case BPF_FUNC_map_push_elem: 1930 return &bpf_map_push_elem_proto; 1931 case BPF_FUNC_map_pop_elem: 1932 return &bpf_map_pop_elem_proto; 1933 case BPF_FUNC_map_peek_elem: 1934 return &bpf_map_peek_elem_proto; 1935 case BPF_FUNC_map_lookup_percpu_elem: 1936 return &bpf_map_lookup_percpu_elem_proto; 1937 case BPF_FUNC_get_prandom_u32: 1938 return &bpf_get_prandom_u32_proto; 1939 case BPF_FUNC_get_smp_processor_id: 1940 return &bpf_get_raw_smp_processor_id_proto; 1941 case BPF_FUNC_get_numa_node_id: 1942 return &bpf_get_numa_node_id_proto; 1943 case BPF_FUNC_tail_call: 1944 return &bpf_tail_call_proto; 1945 case BPF_FUNC_ktime_get_ns: 1946 return &bpf_ktime_get_ns_proto; 1947 case BPF_FUNC_ktime_get_boot_ns: 1948 return &bpf_ktime_get_boot_ns_proto; 1949 case BPF_FUNC_ktime_get_tai_ns: 1950 return &bpf_ktime_get_tai_ns_proto; 1951 case BPF_FUNC_ringbuf_output: 1952 return &bpf_ringbuf_output_proto; 1953 case BPF_FUNC_ringbuf_reserve: 1954 return &bpf_ringbuf_reserve_proto; 1955 case BPF_FUNC_ringbuf_submit: 1956 return &bpf_ringbuf_submit_proto; 1957 case BPF_FUNC_ringbuf_discard: 1958 return &bpf_ringbuf_discard_proto; 1959 case BPF_FUNC_ringbuf_query: 1960 return &bpf_ringbuf_query_proto; 1961 case BPF_FUNC_strncmp: 1962 return &bpf_strncmp_proto; 1963 case BPF_FUNC_strtol: 1964 return &bpf_strtol_proto; 1965 case BPF_FUNC_strtoul: 1966 return &bpf_strtoul_proto; 1967 case BPF_FUNC_get_current_pid_tgid: 1968 return &bpf_get_current_pid_tgid_proto; 1969 case BPF_FUNC_get_ns_current_pid_tgid: 1970 return &bpf_get_ns_current_pid_tgid_proto; 1971 case BPF_FUNC_get_current_uid_gid: 1972 return &bpf_get_current_uid_gid_proto; 1973 default: 1974 break; 1975 } 1976 1977 if (!bpf_token_capable(prog->aux->token, CAP_BPF)) 1978 return NULL; 1979 1980 switch (func_id) { 1981 case BPF_FUNC_spin_lock: 1982 return &bpf_spin_lock_proto; 1983 case BPF_FUNC_spin_unlock: 1984 return &bpf_spin_unlock_proto; 1985 case BPF_FUNC_jiffies64: 1986 return &bpf_jiffies64_proto; 1987 case BPF_FUNC_per_cpu_ptr: 1988 return &bpf_per_cpu_ptr_proto; 1989 case BPF_FUNC_this_cpu_ptr: 1990 return &bpf_this_cpu_ptr_proto; 1991 case BPF_FUNC_timer_init: 1992 return &bpf_timer_init_proto; 1993 case BPF_FUNC_timer_set_callback: 1994 return &bpf_timer_set_callback_proto; 1995 case BPF_FUNC_timer_start: 1996 return &bpf_timer_start_proto; 1997 case BPF_FUNC_timer_cancel: 1998 return &bpf_timer_cancel_proto; 1999 case BPF_FUNC_kptr_xchg: 2000 return &bpf_kptr_xchg_proto; 2001 case BPF_FUNC_for_each_map_elem: 2002 return &bpf_for_each_map_elem_proto; 2003 case BPF_FUNC_loop: 2004 return &bpf_loop_proto; 2005 case BPF_FUNC_user_ringbuf_drain: 2006 return &bpf_user_ringbuf_drain_proto; 2007 case BPF_FUNC_ringbuf_reserve_dynptr: 2008 return &bpf_ringbuf_reserve_dynptr_proto; 2009 case BPF_FUNC_ringbuf_submit_dynptr: 2010 return &bpf_ringbuf_submit_dynptr_proto; 2011 case BPF_FUNC_ringbuf_discard_dynptr: 2012 return &bpf_ringbuf_discard_dynptr_proto; 2013 case BPF_FUNC_dynptr_from_mem: 2014 return &bpf_dynptr_from_mem_proto; 2015 case BPF_FUNC_dynptr_read: 2016 return &bpf_dynptr_read_proto; 2017 case BPF_FUNC_dynptr_write: 2018 return &bpf_dynptr_write_proto; 2019 case BPF_FUNC_dynptr_data: 2020 return &bpf_dynptr_data_proto; 2021 #ifdef CONFIG_CGROUPS 2022 case BPF_FUNC_cgrp_storage_get: 2023 return &bpf_cgrp_storage_get_proto; 2024 case BPF_FUNC_cgrp_storage_delete: 2025 return &bpf_cgrp_storage_delete_proto; 2026 case BPF_FUNC_get_current_cgroup_id: 2027 return &bpf_get_current_cgroup_id_proto; 2028 case BPF_FUNC_get_current_ancestor_cgroup_id: 2029 return &bpf_get_current_ancestor_cgroup_id_proto; 2030 case BPF_FUNC_current_task_under_cgroup: 2031 return &bpf_current_task_under_cgroup_proto; 2032 #endif 2033 #ifdef CONFIG_CGROUP_NET_CLASSID 2034 case BPF_FUNC_get_cgroup_classid: 2035 return &bpf_get_cgroup_classid_curr_proto; 2036 #endif 2037 case BPF_FUNC_task_storage_get: 2038 if (bpf_prog_check_recur(prog)) 2039 return &bpf_task_storage_get_recur_proto; 2040 return &bpf_task_storage_get_proto; 2041 case BPF_FUNC_task_storage_delete: 2042 if (bpf_prog_check_recur(prog)) 2043 return &bpf_task_storage_delete_recur_proto; 2044 return &bpf_task_storage_delete_proto; 2045 default: 2046 break; 2047 } 2048 2049 if (!bpf_token_capable(prog->aux->token, CAP_PERFMON)) 2050 return NULL; 2051 2052 switch (func_id) { 2053 case BPF_FUNC_trace_printk: 2054 return bpf_get_trace_printk_proto(); 2055 case BPF_FUNC_get_current_task: 2056 return &bpf_get_current_task_proto; 2057 case BPF_FUNC_get_current_task_btf: 2058 return &bpf_get_current_task_btf_proto; 2059 case BPF_FUNC_get_current_comm: 2060 return &bpf_get_current_comm_proto; 2061 case BPF_FUNC_probe_read_user: 2062 return &bpf_probe_read_user_proto; 2063 case BPF_FUNC_probe_read_kernel: 2064 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 2065 NULL : &bpf_probe_read_kernel_proto; 2066 case BPF_FUNC_probe_read_user_str: 2067 return &bpf_probe_read_user_str_proto; 2068 case BPF_FUNC_probe_read_kernel_str: 2069 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 2070 NULL : &bpf_probe_read_kernel_str_proto; 2071 case BPF_FUNC_copy_from_user: 2072 return &bpf_copy_from_user_proto; 2073 case BPF_FUNC_copy_from_user_task: 2074 return &bpf_copy_from_user_task_proto; 2075 case BPF_FUNC_snprintf_btf: 2076 return &bpf_snprintf_btf_proto; 2077 case BPF_FUNC_snprintf: 2078 return &bpf_snprintf_proto; 2079 case BPF_FUNC_task_pt_regs: 2080 return &bpf_task_pt_regs_proto; 2081 case BPF_FUNC_trace_vprintk: 2082 return bpf_get_trace_vprintk_proto(); 2083 case BPF_FUNC_perf_event_read_value: 2084 return bpf_get_perf_event_read_value_proto(); 2085 case BPF_FUNC_perf_event_read: 2086 return &bpf_perf_event_read_proto; 2087 case BPF_FUNC_send_signal: 2088 return &bpf_send_signal_proto; 2089 case BPF_FUNC_send_signal_thread: 2090 return &bpf_send_signal_thread_proto; 2091 case BPF_FUNC_get_task_stack: 2092 return prog->sleepable ? &bpf_get_task_stack_sleepable_proto 2093 : &bpf_get_task_stack_proto; 2094 case BPF_FUNC_get_branch_snapshot: 2095 return &bpf_get_branch_snapshot_proto; 2096 case BPF_FUNC_find_vma: 2097 return &bpf_find_vma_proto; 2098 default: 2099 return NULL; 2100 } 2101 } 2102 EXPORT_SYMBOL_GPL(bpf_base_func_proto); 2103 2104 void bpf_list_head_free(const struct btf_field *field, void *list_head, 2105 struct bpf_spin_lock *spin_lock) 2106 { 2107 struct list_head *head = list_head, *orig_head = list_head; 2108 2109 BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head)); 2110 BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head)); 2111 2112 /* Do the actual list draining outside the lock to not hold the lock for 2113 * too long, and also prevent deadlocks if tracing programs end up 2114 * executing on entry/exit of functions called inside the critical 2115 * section, and end up doing map ops that call bpf_list_head_free for 2116 * the same map value again. 2117 */ 2118 __bpf_spin_lock_irqsave(spin_lock); 2119 if (!head->next || list_empty(head)) 2120 goto unlock; 2121 head = head->next; 2122 unlock: 2123 INIT_LIST_HEAD(orig_head); 2124 __bpf_spin_unlock_irqrestore(spin_lock); 2125 2126 while (head != orig_head) { 2127 void *obj = head; 2128 2129 obj -= field->graph_root.node_offset; 2130 head = head->next; 2131 /* The contained type can also have resources, including a 2132 * bpf_list_head which needs to be freed. 2133 */ 2134 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); 2135 } 2136 } 2137 2138 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are 2139 * 'rb_node *', so field name of rb_node within containing struct is not 2140 * needed. 2141 * 2142 * Since bpf_rb_tree's node type has a corresponding struct btf_field with 2143 * graph_root.node_offset, it's not necessary to know field name 2144 * or type of node struct 2145 */ 2146 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \ 2147 for (pos = rb_first_postorder(root); \ 2148 pos && ({ n = rb_next_postorder(pos); 1; }); \ 2149 pos = n) 2150 2151 void bpf_rb_root_free(const struct btf_field *field, void *rb_root, 2152 struct bpf_spin_lock *spin_lock) 2153 { 2154 struct rb_root_cached orig_root, *root = rb_root; 2155 struct rb_node *pos, *n; 2156 void *obj; 2157 2158 BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root)); 2159 BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root)); 2160 2161 __bpf_spin_lock_irqsave(spin_lock); 2162 orig_root = *root; 2163 *root = RB_ROOT_CACHED; 2164 __bpf_spin_unlock_irqrestore(spin_lock); 2165 2166 bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) { 2167 obj = pos; 2168 obj -= field->graph_root.node_offset; 2169 2170 2171 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); 2172 } 2173 } 2174 2175 __bpf_kfunc_start_defs(); 2176 2177 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) 2178 { 2179 struct btf_struct_meta *meta = meta__ign; 2180 u64 size = local_type_id__k; 2181 void *p; 2182 2183 p = bpf_mem_alloc(&bpf_global_ma, size); 2184 if (!p) 2185 return NULL; 2186 if (meta) 2187 bpf_obj_init(meta->record, p); 2188 return p; 2189 } 2190 2191 __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign) 2192 { 2193 u64 size = local_type_id__k; 2194 2195 /* The verifier has ensured that meta__ign must be NULL */ 2196 return bpf_mem_alloc(&bpf_global_percpu_ma, size); 2197 } 2198 2199 /* Must be called under migrate_disable(), as required by bpf_mem_free */ 2200 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu) 2201 { 2202 struct bpf_mem_alloc *ma; 2203 2204 if (rec && rec->refcount_off >= 0 && 2205 !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) { 2206 /* Object is refcounted and refcount_dec didn't result in 0 2207 * refcount. Return without freeing the object 2208 */ 2209 return; 2210 } 2211 2212 if (rec) 2213 bpf_obj_free_fields(rec, p); 2214 2215 if (percpu) 2216 ma = &bpf_global_percpu_ma; 2217 else 2218 ma = &bpf_global_ma; 2219 bpf_mem_free_rcu(ma, p); 2220 } 2221 2222 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) 2223 { 2224 struct btf_struct_meta *meta = meta__ign; 2225 void *p = p__alloc; 2226 2227 __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false); 2228 } 2229 2230 __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign) 2231 { 2232 /* The verifier has ensured that meta__ign must be NULL */ 2233 bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc); 2234 } 2235 2236 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign) 2237 { 2238 struct btf_struct_meta *meta = meta__ign; 2239 struct bpf_refcount *ref; 2240 2241 /* Could just cast directly to refcount_t *, but need some code using 2242 * bpf_refcount type so that it is emitted in vmlinux BTF 2243 */ 2244 ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off); 2245 if (!refcount_inc_not_zero((refcount_t *)ref)) 2246 return NULL; 2247 2248 /* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null 2249 * in verifier.c 2250 */ 2251 return (void *)p__refcounted_kptr; 2252 } 2253 2254 static int __bpf_list_add(struct bpf_list_node_kern *node, 2255 struct bpf_list_head *head, 2256 bool tail, struct btf_record *rec, u64 off) 2257 { 2258 struct list_head *n = &node->list_head, *h = (void *)head; 2259 2260 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't 2261 * called on its fields, so init here 2262 */ 2263 if (unlikely(!h->next)) 2264 INIT_LIST_HEAD(h); 2265 2266 /* node->owner != NULL implies !list_empty(n), no need to separately 2267 * check the latter 2268 */ 2269 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { 2270 /* Only called from BPF prog, no need to migrate_disable */ 2271 __bpf_obj_drop_impl((void *)n - off, rec, false); 2272 return -EINVAL; 2273 } 2274 2275 tail ? list_add_tail(n, h) : list_add(n, h); 2276 WRITE_ONCE(node->owner, head); 2277 2278 return 0; 2279 } 2280 2281 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head, 2282 struct bpf_list_node *node, 2283 void *meta__ign, u64 off) 2284 { 2285 struct bpf_list_node_kern *n = (void *)node; 2286 struct btf_struct_meta *meta = meta__ign; 2287 2288 return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off); 2289 } 2290 2291 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head, 2292 struct bpf_list_node *node, 2293 void *meta__ign, u64 off) 2294 { 2295 struct bpf_list_node_kern *n = (void *)node; 2296 struct btf_struct_meta *meta = meta__ign; 2297 2298 return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off); 2299 } 2300 2301 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail) 2302 { 2303 struct list_head *n, *h = (void *)head; 2304 struct bpf_list_node_kern *node; 2305 2306 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't 2307 * called on its fields, so init here 2308 */ 2309 if (unlikely(!h->next)) 2310 INIT_LIST_HEAD(h); 2311 if (list_empty(h)) 2312 return NULL; 2313 2314 n = tail ? h->prev : h->next; 2315 node = container_of(n, struct bpf_list_node_kern, list_head); 2316 if (WARN_ON_ONCE(READ_ONCE(node->owner) != head)) 2317 return NULL; 2318 2319 list_del_init(n); 2320 WRITE_ONCE(node->owner, NULL); 2321 return (struct bpf_list_node *)n; 2322 } 2323 2324 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) 2325 { 2326 return __bpf_list_del(head, false); 2327 } 2328 2329 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) 2330 { 2331 return __bpf_list_del(head, true); 2332 } 2333 2334 __bpf_kfunc struct bpf_list_node *bpf_list_front(struct bpf_list_head *head) 2335 { 2336 struct list_head *h = (struct list_head *)head; 2337 2338 if (list_empty(h) || unlikely(!h->next)) 2339 return NULL; 2340 2341 return (struct bpf_list_node *)h->next; 2342 } 2343 2344 __bpf_kfunc struct bpf_list_node *bpf_list_back(struct bpf_list_head *head) 2345 { 2346 struct list_head *h = (struct list_head *)head; 2347 2348 if (list_empty(h) || unlikely(!h->next)) 2349 return NULL; 2350 2351 return (struct bpf_list_node *)h->prev; 2352 } 2353 2354 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, 2355 struct bpf_rb_node *node) 2356 { 2357 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; 2358 struct rb_root_cached *r = (struct rb_root_cached *)root; 2359 struct rb_node *n = &node_internal->rb_node; 2360 2361 /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or 2362 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n) 2363 */ 2364 if (READ_ONCE(node_internal->owner) != root) 2365 return NULL; 2366 2367 rb_erase_cached(n, r); 2368 RB_CLEAR_NODE(n); 2369 WRITE_ONCE(node_internal->owner, NULL); 2370 return (struct bpf_rb_node *)n; 2371 } 2372 2373 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF 2374 * program 2375 */ 2376 static int __bpf_rbtree_add(struct bpf_rb_root *root, 2377 struct bpf_rb_node_kern *node, 2378 void *less, struct btf_record *rec, u64 off) 2379 { 2380 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; 2381 struct rb_node *parent = NULL, *n = &node->rb_node; 2382 bpf_callback_t cb = (bpf_callback_t)less; 2383 bool leftmost = true; 2384 2385 /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately 2386 * check the latter 2387 */ 2388 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { 2389 /* Only called from BPF prog, no need to migrate_disable */ 2390 __bpf_obj_drop_impl((void *)n - off, rec, false); 2391 return -EINVAL; 2392 } 2393 2394 while (*link) { 2395 parent = *link; 2396 if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) { 2397 link = &parent->rb_left; 2398 } else { 2399 link = &parent->rb_right; 2400 leftmost = false; 2401 } 2402 } 2403 2404 rb_link_node(n, parent, link); 2405 rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost); 2406 WRITE_ONCE(node->owner, root); 2407 return 0; 2408 } 2409 2410 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, 2411 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), 2412 void *meta__ign, u64 off) 2413 { 2414 struct btf_struct_meta *meta = meta__ign; 2415 struct bpf_rb_node_kern *n = (void *)node; 2416 2417 return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off); 2418 } 2419 2420 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) 2421 { 2422 struct rb_root_cached *r = (struct rb_root_cached *)root; 2423 2424 return (struct bpf_rb_node *)rb_first_cached(r); 2425 } 2426 2427 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_root(struct bpf_rb_root *root) 2428 { 2429 struct rb_root_cached *r = (struct rb_root_cached *)root; 2430 2431 return (struct bpf_rb_node *)r->rb_root.rb_node; 2432 } 2433 2434 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct bpf_rb_node *node) 2435 { 2436 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; 2437 2438 if (READ_ONCE(node_internal->owner) != root) 2439 return NULL; 2440 2441 return (struct bpf_rb_node *)node_internal->rb_node.rb_left; 2442 } 2443 2444 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struct bpf_rb_node *node) 2445 { 2446 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; 2447 2448 if (READ_ONCE(node_internal->owner) != root) 2449 return NULL; 2450 2451 return (struct bpf_rb_node *)node_internal->rb_node.rb_right; 2452 } 2453 2454 /** 2455 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this 2456 * kfunc which is not stored in a map as a kptr, must be released by calling 2457 * bpf_task_release(). 2458 * @p: The task on which a reference is being acquired. 2459 */ 2460 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p) 2461 { 2462 if (refcount_inc_not_zero(&p->rcu_users)) 2463 return p; 2464 return NULL; 2465 } 2466 2467 /** 2468 * bpf_task_release - Release the reference acquired on a task. 2469 * @p: The task on which a reference is being released. 2470 */ 2471 __bpf_kfunc void bpf_task_release(struct task_struct *p) 2472 { 2473 put_task_struct_rcu_user(p); 2474 } 2475 2476 __bpf_kfunc void bpf_task_release_dtor(void *p) 2477 { 2478 put_task_struct_rcu_user(p); 2479 } 2480 CFI_NOSEAL(bpf_task_release_dtor); 2481 2482 #ifdef CONFIG_CGROUPS 2483 /** 2484 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by 2485 * this kfunc which is not stored in a map as a kptr, must be released by 2486 * calling bpf_cgroup_release(). 2487 * @cgrp: The cgroup on which a reference is being acquired. 2488 */ 2489 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) 2490 { 2491 return cgroup_tryget(cgrp) ? cgrp : NULL; 2492 } 2493 2494 /** 2495 * bpf_cgroup_release - Release the reference acquired on a cgroup. 2496 * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to 2497 * not be freed until the current grace period has ended, even if its refcount 2498 * drops to 0. 2499 * @cgrp: The cgroup on which a reference is being released. 2500 */ 2501 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) 2502 { 2503 cgroup_put(cgrp); 2504 } 2505 2506 __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp) 2507 { 2508 cgroup_put(cgrp); 2509 } 2510 CFI_NOSEAL(bpf_cgroup_release_dtor); 2511 2512 /** 2513 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor 2514 * array. A cgroup returned by this kfunc which is not subsequently stored in a 2515 * map, must be released by calling bpf_cgroup_release(). 2516 * @cgrp: The cgroup for which we're performing a lookup. 2517 * @level: The level of ancestor to look up. 2518 */ 2519 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) 2520 { 2521 struct cgroup *ancestor; 2522 2523 if (level > cgrp->level || level < 0) 2524 return NULL; 2525 2526 /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */ 2527 ancestor = cgrp->ancestors[level]; 2528 if (!cgroup_tryget(ancestor)) 2529 return NULL; 2530 return ancestor; 2531 } 2532 2533 /** 2534 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this 2535 * kfunc which is not subsequently stored in a map, must be released by calling 2536 * bpf_cgroup_release(). 2537 * @cgid: cgroup id. 2538 */ 2539 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid) 2540 { 2541 struct cgroup *cgrp; 2542 2543 cgrp = cgroup_get_from_id(cgid); 2544 if (IS_ERR(cgrp)) 2545 return NULL; 2546 return cgrp; 2547 } 2548 2549 /** 2550 * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test 2551 * task's membership of cgroup ancestry. 2552 * @task: the task to be tested 2553 * @ancestor: possible ancestor of @task's cgroup 2554 * 2555 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. 2556 * It follows all the same rules as cgroup_is_descendant, and only applies 2557 * to the default hierarchy. 2558 */ 2559 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task, 2560 struct cgroup *ancestor) 2561 { 2562 long ret; 2563 2564 rcu_read_lock(); 2565 ret = task_under_cgroup_hierarchy(task, ancestor); 2566 rcu_read_unlock(); 2567 return ret; 2568 } 2569 2570 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) 2571 { 2572 struct bpf_array *array = container_of(map, struct bpf_array, map); 2573 struct cgroup *cgrp; 2574 2575 if (unlikely(idx >= array->map.max_entries)) 2576 return -E2BIG; 2577 2578 cgrp = READ_ONCE(array->ptrs[idx]); 2579 if (unlikely(!cgrp)) 2580 return -EAGAIN; 2581 2582 return task_under_cgroup_hierarchy(current, cgrp); 2583 } 2584 2585 const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { 2586 .func = bpf_current_task_under_cgroup, 2587 .gpl_only = false, 2588 .ret_type = RET_INTEGER, 2589 .arg1_type = ARG_CONST_MAP_PTR, 2590 .arg2_type = ARG_ANYTHING, 2591 }; 2592 2593 /** 2594 * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a 2595 * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its 2596 * hierarchy ID. 2597 * @task: The target task 2598 * @hierarchy_id: The ID of a cgroup1 hierarchy 2599 * 2600 * On success, the cgroup is returen. On failure, NULL is returned. 2601 */ 2602 __bpf_kfunc struct cgroup * 2603 bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) 2604 { 2605 struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id); 2606 2607 if (IS_ERR(cgrp)) 2608 return NULL; 2609 return cgrp; 2610 } 2611 #endif /* CONFIG_CGROUPS */ 2612 2613 /** 2614 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up 2615 * in the root pid namespace idr. If a task is returned, it must either be 2616 * stored in a map, or released with bpf_task_release(). 2617 * @pid: The pid of the task being looked up. 2618 */ 2619 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) 2620 { 2621 struct task_struct *p; 2622 2623 rcu_read_lock(); 2624 p = find_task_by_pid_ns(pid, &init_pid_ns); 2625 if (p) 2626 p = bpf_task_acquire(p); 2627 rcu_read_unlock(); 2628 2629 return p; 2630 } 2631 2632 /** 2633 * bpf_task_from_vpid - Find a struct task_struct from its vpid by looking it up 2634 * in the pid namespace of the current task. If a task is returned, it must 2635 * either be stored in a map, or released with bpf_task_release(). 2636 * @vpid: The vpid of the task being looked up. 2637 */ 2638 __bpf_kfunc struct task_struct *bpf_task_from_vpid(s32 vpid) 2639 { 2640 struct task_struct *p; 2641 2642 rcu_read_lock(); 2643 p = find_task_by_vpid(vpid); 2644 if (p) 2645 p = bpf_task_acquire(p); 2646 rcu_read_unlock(); 2647 2648 return p; 2649 } 2650 2651 /** 2652 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data. 2653 * @p: The dynptr whose data slice to retrieve 2654 * @offset: Offset into the dynptr 2655 * @buffer__opt: User-provided buffer to copy contents into. May be NULL 2656 * @buffer__szk: Size (in bytes) of the buffer if present. This is the 2657 * length of the requested slice. This must be a constant. 2658 * 2659 * For non-skb and non-xdp type dynptrs, there is no difference between 2660 * bpf_dynptr_slice and bpf_dynptr_data. 2661 * 2662 * If buffer__opt is NULL, the call will fail if buffer_opt was needed. 2663 * 2664 * If the intention is to write to the data slice, please use 2665 * bpf_dynptr_slice_rdwr. 2666 * 2667 * The user must check that the returned pointer is not null before using it. 2668 * 2669 * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice 2670 * does not change the underlying packet data pointers, so a call to 2671 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in 2672 * the bpf program. 2673 * 2674 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only 2675 * data slice (can be either direct pointer to the data or a pointer to the user 2676 * provided buffer, with its contents containing the data, if unable to obtain 2677 * direct pointer) 2678 */ 2679 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset, 2680 void *buffer__opt, u32 buffer__szk) 2681 { 2682 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2683 enum bpf_dynptr_type type; 2684 u32 len = buffer__szk; 2685 int err; 2686 2687 if (!ptr->data) 2688 return NULL; 2689 2690 err = bpf_dynptr_check_off_len(ptr, offset, len); 2691 if (err) 2692 return NULL; 2693 2694 type = bpf_dynptr_get_type(ptr); 2695 2696 switch (type) { 2697 case BPF_DYNPTR_TYPE_LOCAL: 2698 case BPF_DYNPTR_TYPE_RINGBUF: 2699 return ptr->data + ptr->offset + offset; 2700 case BPF_DYNPTR_TYPE_SKB: 2701 if (buffer__opt) 2702 return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt); 2703 else 2704 return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len); 2705 case BPF_DYNPTR_TYPE_XDP: 2706 { 2707 void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len); 2708 if (!IS_ERR_OR_NULL(xdp_ptr)) 2709 return xdp_ptr; 2710 2711 if (!buffer__opt) 2712 return NULL; 2713 bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false); 2714 return buffer__opt; 2715 } 2716 default: 2717 WARN_ONCE(true, "unknown dynptr type %d\n", type); 2718 return NULL; 2719 } 2720 } 2721 2722 /** 2723 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data. 2724 * @p: The dynptr whose data slice to retrieve 2725 * @offset: Offset into the dynptr 2726 * @buffer__opt: User-provided buffer to copy contents into. May be NULL 2727 * @buffer__szk: Size (in bytes) of the buffer if present. This is the 2728 * length of the requested slice. This must be a constant. 2729 * 2730 * For non-skb and non-xdp type dynptrs, there is no difference between 2731 * bpf_dynptr_slice and bpf_dynptr_data. 2732 * 2733 * If buffer__opt is NULL, the call will fail if buffer_opt was needed. 2734 * 2735 * The returned pointer is writable and may point to either directly the dynptr 2736 * data at the requested offset or to the buffer if unable to obtain a direct 2737 * data pointer to (example: the requested slice is to the paged area of an skb 2738 * packet). In the case where the returned pointer is to the buffer, the user 2739 * is responsible for persisting writes through calling bpf_dynptr_write(). This 2740 * usually looks something like this pattern: 2741 * 2742 * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer)); 2743 * if (!eth) 2744 * return TC_ACT_SHOT; 2745 * 2746 * // mutate eth header // 2747 * 2748 * if (eth == buffer) 2749 * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0); 2750 * 2751 * Please note that, as in the example above, the user must check that the 2752 * returned pointer is not null before using it. 2753 * 2754 * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr 2755 * does not change the underlying packet data pointers, so a call to 2756 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in 2757 * the bpf program. 2758 * 2759 * Return: NULL if the call failed (eg invalid dynptr), pointer to a 2760 * data slice (can be either direct pointer to the data or a pointer to the user 2761 * provided buffer, with its contents containing the data, if unable to obtain 2762 * direct pointer) 2763 */ 2764 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset, 2765 void *buffer__opt, u32 buffer__szk) 2766 { 2767 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2768 2769 if (!ptr->data || __bpf_dynptr_is_rdonly(ptr)) 2770 return NULL; 2771 2772 /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice. 2773 * 2774 * For skb-type dynptrs, it is safe to write into the returned pointer 2775 * if the bpf program allows skb data writes. There are two possibilities 2776 * that may occur when calling bpf_dynptr_slice_rdwr: 2777 * 2778 * 1) The requested slice is in the head of the skb. In this case, the 2779 * returned pointer is directly to skb data, and if the skb is cloned, the 2780 * verifier will have uncloned it (see bpf_unclone_prologue()) already. 2781 * The pointer can be directly written into. 2782 * 2783 * 2) Some portion of the requested slice is in the paged buffer area. 2784 * In this case, the requested data will be copied out into the buffer 2785 * and the returned pointer will be a pointer to the buffer. The skb 2786 * will not be pulled. To persist the write, the user will need to call 2787 * bpf_dynptr_write(), which will pull the skb and commit the write. 2788 * 2789 * Similarly for xdp programs, if the requested slice is not across xdp 2790 * fragments, then a direct pointer will be returned, otherwise the data 2791 * will be copied out into the buffer and the user will need to call 2792 * bpf_dynptr_write() to commit changes. 2793 */ 2794 return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk); 2795 } 2796 2797 __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end) 2798 { 2799 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2800 u32 size; 2801 2802 if (!ptr->data || start > end) 2803 return -EINVAL; 2804 2805 size = __bpf_dynptr_size(ptr); 2806 2807 if (start > size || end > size) 2808 return -ERANGE; 2809 2810 ptr->offset += start; 2811 bpf_dynptr_set_size(ptr, end - start); 2812 2813 return 0; 2814 } 2815 2816 __bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p) 2817 { 2818 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2819 2820 return !ptr->data; 2821 } 2822 2823 __bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p) 2824 { 2825 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2826 2827 if (!ptr->data) 2828 return false; 2829 2830 return __bpf_dynptr_is_rdonly(ptr); 2831 } 2832 2833 __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p) 2834 { 2835 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2836 2837 if (!ptr->data) 2838 return -EINVAL; 2839 2840 return __bpf_dynptr_size(ptr); 2841 } 2842 2843 __bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p, 2844 struct bpf_dynptr *clone__uninit) 2845 { 2846 struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit; 2847 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2848 2849 if (!ptr->data) { 2850 bpf_dynptr_set_null(clone); 2851 return -EINVAL; 2852 } 2853 2854 *clone = *ptr; 2855 2856 return 0; 2857 } 2858 2859 /** 2860 * bpf_dynptr_copy() - Copy data from one dynptr to another. 2861 * @dst_ptr: Destination dynptr - where data should be copied to 2862 * @dst_off: Offset into the destination dynptr 2863 * @src_ptr: Source dynptr - where data should be copied from 2864 * @src_off: Offset into the source dynptr 2865 * @size: Length of the data to copy from source to destination 2866 * 2867 * Copies data from source dynptr to destination dynptr. 2868 * Returns 0 on success; negative error, otherwise. 2869 */ 2870 __bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u32 dst_off, 2871 struct bpf_dynptr *src_ptr, u32 src_off, u32 size) 2872 { 2873 struct bpf_dynptr_kern *dst = (struct bpf_dynptr_kern *)dst_ptr; 2874 struct bpf_dynptr_kern *src = (struct bpf_dynptr_kern *)src_ptr; 2875 void *src_slice, *dst_slice; 2876 char buf[256]; 2877 u32 off; 2878 2879 src_slice = bpf_dynptr_slice(src_ptr, src_off, NULL, size); 2880 dst_slice = bpf_dynptr_slice_rdwr(dst_ptr, dst_off, NULL, size); 2881 2882 if (src_slice && dst_slice) { 2883 memmove(dst_slice, src_slice, size); 2884 return 0; 2885 } 2886 2887 if (src_slice) 2888 return __bpf_dynptr_write(dst, dst_off, src_slice, size, 0); 2889 2890 if (dst_slice) 2891 return __bpf_dynptr_read(dst_slice, size, src, src_off, 0); 2892 2893 if (bpf_dynptr_check_off_len(dst, dst_off, size) || 2894 bpf_dynptr_check_off_len(src, src_off, size)) 2895 return -E2BIG; 2896 2897 off = 0; 2898 while (off < size) { 2899 u32 chunk_sz = min_t(u32, sizeof(buf), size - off); 2900 int err; 2901 2902 err = __bpf_dynptr_read(buf, chunk_sz, src, src_off + off, 0); 2903 if (err) 2904 return err; 2905 err = __bpf_dynptr_write(dst, dst_off + off, buf, chunk_sz, 0); 2906 if (err) 2907 return err; 2908 2909 off += chunk_sz; 2910 } 2911 return 0; 2912 } 2913 2914 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) 2915 { 2916 return obj; 2917 } 2918 2919 __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k) 2920 { 2921 return (void *)obj__ign; 2922 } 2923 2924 __bpf_kfunc void bpf_rcu_read_lock(void) 2925 { 2926 rcu_read_lock(); 2927 } 2928 2929 __bpf_kfunc void bpf_rcu_read_unlock(void) 2930 { 2931 rcu_read_unlock(); 2932 } 2933 2934 struct bpf_throw_ctx { 2935 struct bpf_prog_aux *aux; 2936 u64 sp; 2937 u64 bp; 2938 int cnt; 2939 }; 2940 2941 static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp) 2942 { 2943 struct bpf_throw_ctx *ctx = cookie; 2944 struct bpf_prog *prog; 2945 2946 if (!is_bpf_text_address(ip)) 2947 return !ctx->cnt; 2948 prog = bpf_prog_ksym_find(ip); 2949 ctx->cnt++; 2950 if (bpf_is_subprog(prog)) 2951 return true; 2952 ctx->aux = prog->aux; 2953 ctx->sp = sp; 2954 ctx->bp = bp; 2955 return false; 2956 } 2957 2958 __bpf_kfunc void bpf_throw(u64 cookie) 2959 { 2960 struct bpf_throw_ctx ctx = {}; 2961 2962 arch_bpf_stack_walk(bpf_stack_walker, &ctx); 2963 WARN_ON_ONCE(!ctx.aux); 2964 if (ctx.aux) 2965 WARN_ON_ONCE(!ctx.aux->exception_boundary); 2966 WARN_ON_ONCE(!ctx.bp); 2967 WARN_ON_ONCE(!ctx.cnt); 2968 /* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning 2969 * deeper stack depths than ctx.sp as we do not return from bpf_throw, 2970 * which skips compiler generated instrumentation to do the same. 2971 */ 2972 kasan_unpoison_task_stack_below((void *)(long)ctx.sp); 2973 ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0); 2974 WARN(1, "A call to BPF exception callback should never return\n"); 2975 } 2976 2977 __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) 2978 { 2979 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 2980 struct bpf_map *map = p__map; 2981 2982 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq)); 2983 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq)); 2984 2985 if (flags) 2986 return -EINVAL; 2987 2988 return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ); 2989 } 2990 2991 __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) 2992 { 2993 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 2994 struct bpf_work *w; 2995 2996 if (in_nmi()) 2997 return -EOPNOTSUPP; 2998 if (flags) 2999 return -EINVAL; 3000 w = READ_ONCE(async->work); 3001 if (!w || !READ_ONCE(w->cb.prog)) 3002 return -EINVAL; 3003 3004 schedule_work(&w->work); 3005 return 0; 3006 } 3007 3008 __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, 3009 int (callback_fn)(void *map, int *key, void *value), 3010 unsigned int flags, 3011 void *aux__prog) 3012 { 3013 struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__prog; 3014 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 3015 3016 if (flags) 3017 return -EINVAL; 3018 3019 return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ); 3020 } 3021 3022 __bpf_kfunc void bpf_preempt_disable(void) 3023 { 3024 preempt_disable(); 3025 } 3026 3027 __bpf_kfunc void bpf_preempt_enable(void) 3028 { 3029 preempt_enable(); 3030 } 3031 3032 struct bpf_iter_bits { 3033 __u64 __opaque[2]; 3034 } __aligned(8); 3035 3036 #define BITS_ITER_NR_WORDS_MAX 511 3037 3038 struct bpf_iter_bits_kern { 3039 union { 3040 __u64 *bits; 3041 __u64 bits_copy; 3042 }; 3043 int nr_bits; 3044 int bit; 3045 } __aligned(8); 3046 3047 /* On 64-bit hosts, unsigned long and u64 have the same size, so passing 3048 * a u64 pointer and an unsigned long pointer to find_next_bit() will 3049 * return the same result, as both point to the same 8-byte area. 3050 * 3051 * For 32-bit little-endian hosts, using a u64 pointer or unsigned long 3052 * pointer also makes no difference. This is because the first iterated 3053 * unsigned long is composed of bits 0-31 of the u64 and the second unsigned 3054 * long is composed of bits 32-63 of the u64. 3055 * 3056 * However, for 32-bit big-endian hosts, this is not the case. The first 3057 * iterated unsigned long will be bits 32-63 of the u64, so swap these two 3058 * ulong values within the u64. 3059 */ 3060 static void swap_ulong_in_u64(u64 *bits, unsigned int nr) 3061 { 3062 #if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN) 3063 unsigned int i; 3064 3065 for (i = 0; i < nr; i++) 3066 bits[i] = (bits[i] >> 32) | ((u64)(u32)bits[i] << 32); 3067 #endif 3068 } 3069 3070 /** 3071 * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area 3072 * @it: The new bpf_iter_bits to be created 3073 * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over 3074 * @nr_words: The size of the specified memory area, measured in 8-byte units. 3075 * The maximum value of @nr_words is @BITS_ITER_NR_WORDS_MAX. This limit may be 3076 * further reduced by the BPF memory allocator implementation. 3077 * 3078 * This function initializes a new bpf_iter_bits structure for iterating over 3079 * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It 3080 * copies the data of the memory area to the newly created bpf_iter_bits @it for 3081 * subsequent iteration operations. 3082 * 3083 * On success, 0 is returned. On failure, ERR is returned. 3084 */ 3085 __bpf_kfunc int 3086 bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) 3087 { 3088 struct bpf_iter_bits_kern *kit = (void *)it; 3089 u32 nr_bytes = nr_words * sizeof(u64); 3090 u32 nr_bits = BYTES_TO_BITS(nr_bytes); 3091 int err; 3092 3093 BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits)); 3094 BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) != 3095 __alignof__(struct bpf_iter_bits)); 3096 3097 kit->nr_bits = 0; 3098 kit->bits_copy = 0; 3099 kit->bit = -1; 3100 3101 if (!unsafe_ptr__ign || !nr_words) 3102 return -EINVAL; 3103 if (nr_words > BITS_ITER_NR_WORDS_MAX) 3104 return -E2BIG; 3105 3106 /* Optimization for u64 mask */ 3107 if (nr_bits == 64) { 3108 err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign); 3109 if (err) 3110 return -EFAULT; 3111 3112 swap_ulong_in_u64(&kit->bits_copy, nr_words); 3113 3114 kit->nr_bits = nr_bits; 3115 return 0; 3116 } 3117 3118 if (bpf_mem_alloc_check_size(false, nr_bytes)) 3119 return -E2BIG; 3120 3121 /* Fallback to memalloc */ 3122 kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes); 3123 if (!kit->bits) 3124 return -ENOMEM; 3125 3126 err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign); 3127 if (err) { 3128 bpf_mem_free(&bpf_global_ma, kit->bits); 3129 return err; 3130 } 3131 3132 swap_ulong_in_u64(kit->bits, nr_words); 3133 3134 kit->nr_bits = nr_bits; 3135 return 0; 3136 } 3137 3138 /** 3139 * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits 3140 * @it: The bpf_iter_bits to be checked 3141 * 3142 * This function returns a pointer to a number representing the value of the 3143 * next bit in the bits. 3144 * 3145 * If there are no further bits available, it returns NULL. 3146 */ 3147 __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it) 3148 { 3149 struct bpf_iter_bits_kern *kit = (void *)it; 3150 int bit = kit->bit, nr_bits = kit->nr_bits; 3151 const void *bits; 3152 3153 if (!nr_bits || bit >= nr_bits) 3154 return NULL; 3155 3156 bits = nr_bits == 64 ? &kit->bits_copy : kit->bits; 3157 bit = find_next_bit(bits, nr_bits, bit + 1); 3158 if (bit >= nr_bits) { 3159 kit->bit = bit; 3160 return NULL; 3161 } 3162 3163 kit->bit = bit; 3164 return &kit->bit; 3165 } 3166 3167 /** 3168 * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits 3169 * @it: The bpf_iter_bits to be destroyed 3170 * 3171 * Destroy the resource associated with the bpf_iter_bits. 3172 */ 3173 __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it) 3174 { 3175 struct bpf_iter_bits_kern *kit = (void *)it; 3176 3177 if (kit->nr_bits <= 64) 3178 return; 3179 bpf_mem_free(&bpf_global_ma, kit->bits); 3180 } 3181 3182 /** 3183 * bpf_copy_from_user_str() - Copy a string from an unsafe user address 3184 * @dst: Destination address, in kernel space. This buffer must be 3185 * at least @dst__sz bytes long. 3186 * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL. 3187 * @unsafe_ptr__ign: Source address, in user space. 3188 * @flags: The only supported flag is BPF_F_PAD_ZEROS 3189 * 3190 * Copies a NUL-terminated string from userspace to BPF space. If user string is 3191 * too long this will still ensure zero termination in the dst buffer unless 3192 * buffer size is 0. 3193 * 3194 * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and 3195 * memset all of @dst on failure. 3196 */ 3197 __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags) 3198 { 3199 int ret; 3200 3201 if (unlikely(flags & ~BPF_F_PAD_ZEROS)) 3202 return -EINVAL; 3203 3204 if (unlikely(!dst__sz)) 3205 return 0; 3206 3207 ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1); 3208 if (ret < 0) { 3209 if (flags & BPF_F_PAD_ZEROS) 3210 memset((char *)dst, 0, dst__sz); 3211 3212 return ret; 3213 } 3214 3215 if (flags & BPF_F_PAD_ZEROS) 3216 memset((char *)dst + ret, 0, dst__sz - ret); 3217 else 3218 ((char *)dst)[ret] = '\0'; 3219 3220 return ret + 1; 3221 } 3222 3223 /** 3224 * bpf_copy_from_user_task_str() - Copy a string from an task's address space 3225 * @dst: Destination address, in kernel space. This buffer must be 3226 * at least @dst__sz bytes long. 3227 * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL. 3228 * @unsafe_ptr__ign: Source address in the task's address space. 3229 * @tsk: The task whose address space will be used 3230 * @flags: The only supported flag is BPF_F_PAD_ZEROS 3231 * 3232 * Copies a NUL terminated string from a task's address space to @dst__sz 3233 * buffer. If user string is too long this will still ensure zero termination 3234 * in the @dst__sz buffer unless buffer size is 0. 3235 * 3236 * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst__sz to 0 on success 3237 * and memset all of @dst__sz on failure. 3238 * 3239 * Return: The number of copied bytes on success including the NUL terminator. 3240 * A negative error code on failure. 3241 */ 3242 __bpf_kfunc int bpf_copy_from_user_task_str(void *dst, u32 dst__sz, 3243 const void __user *unsafe_ptr__ign, 3244 struct task_struct *tsk, u64 flags) 3245 { 3246 int ret; 3247 3248 if (unlikely(flags & ~BPF_F_PAD_ZEROS)) 3249 return -EINVAL; 3250 3251 if (unlikely(dst__sz == 0)) 3252 return 0; 3253 3254 ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_ptr__ign, dst, dst__sz, 0); 3255 if (ret < 0) { 3256 if (flags & BPF_F_PAD_ZEROS) 3257 memset(dst, 0, dst__sz); 3258 return ret; 3259 } 3260 3261 if (flags & BPF_F_PAD_ZEROS) 3262 memset(dst + ret, 0, dst__sz - ret); 3263 3264 return ret + 1; 3265 } 3266 3267 /* Keep unsinged long in prototype so that kfunc is usable when emitted to 3268 * vmlinux.h in BPF programs directly, but note that while in BPF prog, the 3269 * unsigned long always points to 8-byte region on stack, the kernel may only 3270 * read and write the 4-bytes on 32-bit. 3271 */ 3272 __bpf_kfunc void bpf_local_irq_save(unsigned long *flags__irq_flag) 3273 { 3274 local_irq_save(*flags__irq_flag); 3275 } 3276 3277 __bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag) 3278 { 3279 local_irq_restore(*flags__irq_flag); 3280 } 3281 3282 __bpf_kfunc void __bpf_trap(void) 3283 { 3284 } 3285 3286 __bpf_kfunc_end_defs(); 3287 3288 BTF_KFUNCS_START(generic_btf_ids) 3289 #ifdef CONFIG_CRASH_DUMP 3290 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) 3291 #endif 3292 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) 3293 BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) 3294 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE) 3295 BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE) 3296 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU) 3297 BTF_ID_FLAGS(func, bpf_list_push_front_impl) 3298 BTF_ID_FLAGS(func, bpf_list_push_back_impl) 3299 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) 3300 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) 3301 BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL) 3302 BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL) 3303 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3304 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) 3305 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) 3306 BTF_ID_FLAGS(func, bpf_rbtree_add_impl) 3307 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) 3308 BTF_ID_FLAGS(func, bpf_rbtree_root, KF_RET_NULL) 3309 BTF_ID_FLAGS(func, bpf_rbtree_left, KF_RET_NULL) 3310 BTF_ID_FLAGS(func, bpf_rbtree_right, KF_RET_NULL) 3311 3312 #ifdef CONFIG_CGROUPS 3313 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3314 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE) 3315 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3316 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL) 3317 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU) 3318 BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3319 #endif 3320 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL) 3321 BTF_ID_FLAGS(func, bpf_task_from_vpid, KF_ACQUIRE | KF_RET_NULL) 3322 BTF_ID_FLAGS(func, bpf_throw) 3323 #ifdef CONFIG_BPF_EVENTS 3324 BTF_ID_FLAGS(func, bpf_send_signal_task, KF_TRUSTED_ARGS) 3325 #endif 3326 BTF_KFUNCS_END(generic_btf_ids) 3327 3328 static const struct btf_kfunc_id_set generic_kfunc_set = { 3329 .owner = THIS_MODULE, 3330 .set = &generic_btf_ids, 3331 }; 3332 3333 3334 BTF_ID_LIST(generic_dtor_ids) 3335 BTF_ID(struct, task_struct) 3336 BTF_ID(func, bpf_task_release_dtor) 3337 #ifdef CONFIG_CGROUPS 3338 BTF_ID(struct, cgroup) 3339 BTF_ID(func, bpf_cgroup_release_dtor) 3340 #endif 3341 3342 BTF_KFUNCS_START(common_btf_ids) 3343 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx, KF_FASTCALL) 3344 BTF_ID_FLAGS(func, bpf_rdonly_cast, KF_FASTCALL) 3345 BTF_ID_FLAGS(func, bpf_rcu_read_lock) 3346 BTF_ID_FLAGS(func, bpf_rcu_read_unlock) 3347 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL) 3348 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL) 3349 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW) 3350 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL) 3351 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY) 3352 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU) 3353 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL) 3354 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY) 3355 #ifdef CONFIG_CGROUPS 3356 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) 3357 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL) 3358 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY) 3359 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) 3360 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL) 3361 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY) 3362 #endif 3363 BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) 3364 BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) 3365 BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) 3366 BTF_ID_FLAGS(func, bpf_dynptr_adjust) 3367 BTF_ID_FLAGS(func, bpf_dynptr_is_null) 3368 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) 3369 BTF_ID_FLAGS(func, bpf_dynptr_size) 3370 BTF_ID_FLAGS(func, bpf_dynptr_clone) 3371 BTF_ID_FLAGS(func, bpf_dynptr_copy) 3372 #ifdef CONFIG_NET 3373 BTF_ID_FLAGS(func, bpf_modify_return_test_tp) 3374 #endif 3375 BTF_ID_FLAGS(func, bpf_wq_init) 3376 BTF_ID_FLAGS(func, bpf_wq_set_callback_impl) 3377 BTF_ID_FLAGS(func, bpf_wq_start) 3378 BTF_ID_FLAGS(func, bpf_preempt_disable) 3379 BTF_ID_FLAGS(func, bpf_preempt_enable) 3380 BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW) 3381 BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL) 3382 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY) 3383 BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE) 3384 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str, KF_SLEEPABLE) 3385 BTF_ID_FLAGS(func, bpf_get_kmem_cache) 3386 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE) 3387 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) 3388 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) 3389 BTF_ID_FLAGS(func, bpf_local_irq_save) 3390 BTF_ID_FLAGS(func, bpf_local_irq_restore) 3391 BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr) 3392 BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr) 3393 BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr) 3394 BTF_ID_FLAGS(func, bpf_probe_read_kernel_str_dynptr) 3395 BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE) 3396 BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE) 3397 BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) 3398 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) 3399 #ifdef CONFIG_DMA_SHARED_BUFFER 3400 BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE) 3401 BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) 3402 BTF_ID_FLAGS(func, bpf_iter_dmabuf_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) 3403 #endif 3404 BTF_ID_FLAGS(func, __bpf_trap) 3405 BTF_KFUNCS_END(common_btf_ids) 3406 3407 static const struct btf_kfunc_id_set common_kfunc_set = { 3408 .owner = THIS_MODULE, 3409 .set = &common_btf_ids, 3410 }; 3411 3412 static int __init kfunc_init(void) 3413 { 3414 int ret; 3415 const struct btf_id_dtor_kfunc generic_dtors[] = { 3416 { 3417 .btf_id = generic_dtor_ids[0], 3418 .kfunc_btf_id = generic_dtor_ids[1] 3419 }, 3420 #ifdef CONFIG_CGROUPS 3421 { 3422 .btf_id = generic_dtor_ids[2], 3423 .kfunc_btf_id = generic_dtor_ids[3] 3424 }, 3425 #endif 3426 }; 3427 3428 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set); 3429 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set); 3430 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set); 3431 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set); 3432 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set); 3433 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set); 3434 ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors, 3435 ARRAY_SIZE(generic_dtors), 3436 THIS_MODULE); 3437 return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set); 3438 } 3439 3440 late_initcall(kfunc_init); 3441 3442 /* Get a pointer to dynptr data up to len bytes for read only access. If 3443 * the dynptr doesn't have continuous data up to len bytes, return NULL. 3444 */ 3445 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len) 3446 { 3447 const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr; 3448 3449 return bpf_dynptr_slice(p, 0, NULL, len); 3450 } 3451 3452 /* Get a pointer to dynptr data up to len bytes for read write access. If 3453 * the dynptr doesn't have continuous data up to len bytes, or the dynptr 3454 * is read only, return NULL. 3455 */ 3456 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len) 3457 { 3458 if (__bpf_dynptr_is_rdonly(ptr)) 3459 return NULL; 3460 return (void *)__bpf_dynptr_data(ptr, len); 3461 } 3462