1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/btf.h> 6 #include <linux/bpf-cgroup.h> 7 #include <linux/cgroup.h> 8 #include <linux/rcupdate.h> 9 #include <linux/random.h> 10 #include <linux/smp.h> 11 #include <linux/topology.h> 12 #include <linux/ktime.h> 13 #include <linux/sched.h> 14 #include <linux/uidgid.h> 15 #include <linux/filter.h> 16 #include <linux/ctype.h> 17 #include <linux/jiffies.h> 18 #include <linux/pid_namespace.h> 19 #include <linux/poison.h> 20 #include <linux/proc_ns.h> 21 #include <linux/sched/task.h> 22 #include <linux/security.h> 23 #include <linux/btf_ids.h> 24 #include <linux/bpf_mem_alloc.h> 25 #include <linux/kasan.h> 26 27 #include "../../lib/kstrtox.h" 28 29 /* If kernel subsystem is allowing eBPF programs to call this function, 30 * inside its own verifier_ops->get_func_proto() callback it should return 31 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments 32 * 33 * Different map implementations will rely on rcu in map methods 34 * lookup/update/delete, therefore eBPF programs must run under rcu lock 35 * if program is allowed to access maps, so check rcu_read_lock_held() or 36 * rcu_read_lock_trace_held() in all three functions. 37 */ 38 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) 39 { 40 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 41 !rcu_read_lock_bh_held()); 42 return (unsigned long) map->ops->map_lookup_elem(map, key); 43 } 44 45 const struct bpf_func_proto bpf_map_lookup_elem_proto = { 46 .func = bpf_map_lookup_elem, 47 .gpl_only = false, 48 .pkt_access = true, 49 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 50 .arg1_type = ARG_CONST_MAP_PTR, 51 .arg2_type = ARG_PTR_TO_MAP_KEY, 52 }; 53 54 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, 55 void *, value, u64, flags) 56 { 57 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 58 !rcu_read_lock_bh_held()); 59 return map->ops->map_update_elem(map, key, value, flags); 60 } 61 62 const struct bpf_func_proto bpf_map_update_elem_proto = { 63 .func = bpf_map_update_elem, 64 .gpl_only = false, 65 .pkt_access = true, 66 .ret_type = RET_INTEGER, 67 .arg1_type = ARG_CONST_MAP_PTR, 68 .arg2_type = ARG_PTR_TO_MAP_KEY, 69 .arg3_type = ARG_PTR_TO_MAP_VALUE, 70 .arg4_type = ARG_ANYTHING, 71 }; 72 73 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) 74 { 75 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 76 !rcu_read_lock_bh_held()); 77 return map->ops->map_delete_elem(map, key); 78 } 79 80 const struct bpf_func_proto bpf_map_delete_elem_proto = { 81 .func = bpf_map_delete_elem, 82 .gpl_only = false, 83 .pkt_access = true, 84 .ret_type = RET_INTEGER, 85 .arg1_type = ARG_CONST_MAP_PTR, 86 .arg2_type = ARG_PTR_TO_MAP_KEY, 87 }; 88 89 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) 90 { 91 return map->ops->map_push_elem(map, value, flags); 92 } 93 94 const struct bpf_func_proto bpf_map_push_elem_proto = { 95 .func = bpf_map_push_elem, 96 .gpl_only = false, 97 .pkt_access = true, 98 .ret_type = RET_INTEGER, 99 .arg1_type = ARG_CONST_MAP_PTR, 100 .arg2_type = ARG_PTR_TO_MAP_VALUE, 101 .arg3_type = ARG_ANYTHING, 102 }; 103 104 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) 105 { 106 return map->ops->map_pop_elem(map, value); 107 } 108 109 const struct bpf_func_proto bpf_map_pop_elem_proto = { 110 .func = bpf_map_pop_elem, 111 .gpl_only = false, 112 .ret_type = RET_INTEGER, 113 .arg1_type = ARG_CONST_MAP_PTR, 114 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE, 115 }; 116 117 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) 118 { 119 return map->ops->map_peek_elem(map, value); 120 } 121 122 const struct bpf_func_proto bpf_map_peek_elem_proto = { 123 .func = bpf_map_peek_elem, 124 .gpl_only = false, 125 .ret_type = RET_INTEGER, 126 .arg1_type = ARG_CONST_MAP_PTR, 127 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE, 128 }; 129 130 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) 131 { 132 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 133 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); 134 } 135 136 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = { 137 .func = bpf_map_lookup_percpu_elem, 138 .gpl_only = false, 139 .pkt_access = true, 140 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 141 .arg1_type = ARG_CONST_MAP_PTR, 142 .arg2_type = ARG_PTR_TO_MAP_KEY, 143 .arg3_type = ARG_ANYTHING, 144 }; 145 146 const struct bpf_func_proto bpf_get_prandom_u32_proto = { 147 .func = bpf_user_rnd_u32, 148 .gpl_only = false, 149 .ret_type = RET_INTEGER, 150 }; 151 152 BPF_CALL_0(bpf_get_smp_processor_id) 153 { 154 return smp_processor_id(); 155 } 156 157 const struct bpf_func_proto bpf_get_smp_processor_id_proto = { 158 .func = bpf_get_smp_processor_id, 159 .gpl_only = false, 160 .ret_type = RET_INTEGER, 161 .allow_fastcall = true, 162 }; 163 164 BPF_CALL_0(bpf_get_numa_node_id) 165 { 166 return numa_node_id(); 167 } 168 169 const struct bpf_func_proto bpf_get_numa_node_id_proto = { 170 .func = bpf_get_numa_node_id, 171 .gpl_only = false, 172 .ret_type = RET_INTEGER, 173 }; 174 175 BPF_CALL_0(bpf_ktime_get_ns) 176 { 177 /* NMI safe access to clock monotonic */ 178 return ktime_get_mono_fast_ns(); 179 } 180 181 const struct bpf_func_proto bpf_ktime_get_ns_proto = { 182 .func = bpf_ktime_get_ns, 183 .gpl_only = false, 184 .ret_type = RET_INTEGER, 185 }; 186 187 BPF_CALL_0(bpf_ktime_get_boot_ns) 188 { 189 /* NMI safe access to clock boottime */ 190 return ktime_get_boot_fast_ns(); 191 } 192 193 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = { 194 .func = bpf_ktime_get_boot_ns, 195 .gpl_only = false, 196 .ret_type = RET_INTEGER, 197 }; 198 199 BPF_CALL_0(bpf_ktime_get_coarse_ns) 200 { 201 return ktime_get_coarse_ns(); 202 } 203 204 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = { 205 .func = bpf_ktime_get_coarse_ns, 206 .gpl_only = false, 207 .ret_type = RET_INTEGER, 208 }; 209 210 BPF_CALL_0(bpf_ktime_get_tai_ns) 211 { 212 /* NMI safe access to clock tai */ 213 return ktime_get_tai_fast_ns(); 214 } 215 216 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = { 217 .func = bpf_ktime_get_tai_ns, 218 .gpl_only = false, 219 .ret_type = RET_INTEGER, 220 }; 221 222 BPF_CALL_0(bpf_get_current_pid_tgid) 223 { 224 struct task_struct *task = current; 225 226 if (unlikely(!task)) 227 return -EINVAL; 228 229 return (u64) task->tgid << 32 | task->pid; 230 } 231 232 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { 233 .func = bpf_get_current_pid_tgid, 234 .gpl_only = false, 235 .ret_type = RET_INTEGER, 236 }; 237 238 BPF_CALL_0(bpf_get_current_uid_gid) 239 { 240 struct task_struct *task = current; 241 kuid_t uid; 242 kgid_t gid; 243 244 if (unlikely(!task)) 245 return -EINVAL; 246 247 current_uid_gid(&uid, &gid); 248 return (u64) from_kgid(&init_user_ns, gid) << 32 | 249 from_kuid(&init_user_ns, uid); 250 } 251 252 const struct bpf_func_proto bpf_get_current_uid_gid_proto = { 253 .func = bpf_get_current_uid_gid, 254 .gpl_only = false, 255 .ret_type = RET_INTEGER, 256 }; 257 258 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) 259 { 260 struct task_struct *task = current; 261 262 if (unlikely(!task)) 263 goto err_clear; 264 265 /* Verifier guarantees that size > 0 */ 266 strscpy_pad(buf, task->comm, size); 267 return 0; 268 err_clear: 269 memset(buf, 0, size); 270 return -EINVAL; 271 } 272 273 const struct bpf_func_proto bpf_get_current_comm_proto = { 274 .func = bpf_get_current_comm, 275 .gpl_only = false, 276 .ret_type = RET_INTEGER, 277 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 278 .arg2_type = ARG_CONST_SIZE, 279 }; 280 281 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) 282 283 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 284 { 285 arch_spinlock_t *l = (void *)lock; 286 union { 287 __u32 val; 288 arch_spinlock_t lock; 289 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; 290 291 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); 292 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); 293 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); 294 preempt_disable(); 295 arch_spin_lock(l); 296 } 297 298 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 299 { 300 arch_spinlock_t *l = (void *)lock; 301 302 arch_spin_unlock(l); 303 preempt_enable(); 304 } 305 306 #else 307 308 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 309 { 310 atomic_t *l = (void *)lock; 311 312 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); 313 do { 314 atomic_cond_read_relaxed(l, !VAL); 315 } while (atomic_xchg(l, 1)); 316 } 317 318 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 319 { 320 atomic_t *l = (void *)lock; 321 322 atomic_set_release(l, 0); 323 } 324 325 #endif 326 327 static DEFINE_PER_CPU(unsigned long, irqsave_flags); 328 329 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock) 330 { 331 unsigned long flags; 332 333 local_irq_save(flags); 334 __bpf_spin_lock(lock); 335 __this_cpu_write(irqsave_flags, flags); 336 } 337 338 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) 339 { 340 __bpf_spin_lock_irqsave(lock); 341 return 0; 342 } 343 344 const struct bpf_func_proto bpf_spin_lock_proto = { 345 .func = bpf_spin_lock, 346 .gpl_only = false, 347 .ret_type = RET_VOID, 348 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 349 .arg1_btf_id = BPF_PTR_POISON, 350 }; 351 352 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock) 353 { 354 unsigned long flags; 355 356 flags = __this_cpu_read(irqsave_flags); 357 __bpf_spin_unlock(lock); 358 local_irq_restore(flags); 359 } 360 361 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) 362 { 363 __bpf_spin_unlock_irqrestore(lock); 364 return 0; 365 } 366 367 const struct bpf_func_proto bpf_spin_unlock_proto = { 368 .func = bpf_spin_unlock, 369 .gpl_only = false, 370 .ret_type = RET_VOID, 371 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 372 .arg1_btf_id = BPF_PTR_POISON, 373 }; 374 375 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 376 bool lock_src) 377 { 378 struct bpf_spin_lock *lock; 379 380 if (lock_src) 381 lock = src + map->record->spin_lock_off; 382 else 383 lock = dst + map->record->spin_lock_off; 384 preempt_disable(); 385 __bpf_spin_lock_irqsave(lock); 386 copy_map_value(map, dst, src); 387 __bpf_spin_unlock_irqrestore(lock); 388 preempt_enable(); 389 } 390 391 BPF_CALL_0(bpf_jiffies64) 392 { 393 return get_jiffies_64(); 394 } 395 396 const struct bpf_func_proto bpf_jiffies64_proto = { 397 .func = bpf_jiffies64, 398 .gpl_only = false, 399 .ret_type = RET_INTEGER, 400 }; 401 402 #ifdef CONFIG_CGROUPS 403 BPF_CALL_0(bpf_get_current_cgroup_id) 404 { 405 struct cgroup *cgrp; 406 u64 cgrp_id; 407 408 rcu_read_lock(); 409 cgrp = task_dfl_cgroup(current); 410 cgrp_id = cgroup_id(cgrp); 411 rcu_read_unlock(); 412 413 return cgrp_id; 414 } 415 416 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { 417 .func = bpf_get_current_cgroup_id, 418 .gpl_only = false, 419 .ret_type = RET_INTEGER, 420 }; 421 422 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) 423 { 424 struct cgroup *cgrp; 425 struct cgroup *ancestor; 426 u64 cgrp_id; 427 428 rcu_read_lock(); 429 cgrp = task_dfl_cgroup(current); 430 ancestor = cgroup_ancestor(cgrp, ancestor_level); 431 cgrp_id = ancestor ? cgroup_id(ancestor) : 0; 432 rcu_read_unlock(); 433 434 return cgrp_id; 435 } 436 437 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = { 438 .func = bpf_get_current_ancestor_cgroup_id, 439 .gpl_only = false, 440 .ret_type = RET_INTEGER, 441 .arg1_type = ARG_ANYTHING, 442 }; 443 #endif /* CONFIG_CGROUPS */ 444 445 #define BPF_STRTOX_BASE_MASK 0x1F 446 447 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, 448 unsigned long long *res, bool *is_negative) 449 { 450 unsigned int base = flags & BPF_STRTOX_BASE_MASK; 451 const char *cur_buf = buf; 452 size_t cur_len = buf_len; 453 unsigned int consumed; 454 size_t val_len; 455 char str[64]; 456 457 if (!buf || !buf_len || !res || !is_negative) 458 return -EINVAL; 459 460 if (base != 0 && base != 8 && base != 10 && base != 16) 461 return -EINVAL; 462 463 if (flags & ~BPF_STRTOX_BASE_MASK) 464 return -EINVAL; 465 466 while (cur_buf < buf + buf_len && isspace(*cur_buf)) 467 ++cur_buf; 468 469 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); 470 if (*is_negative) 471 ++cur_buf; 472 473 consumed = cur_buf - buf; 474 cur_len -= consumed; 475 if (!cur_len) 476 return -EINVAL; 477 478 cur_len = min(cur_len, sizeof(str) - 1); 479 memcpy(str, cur_buf, cur_len); 480 str[cur_len] = '\0'; 481 cur_buf = str; 482 483 cur_buf = _parse_integer_fixup_radix(cur_buf, &base); 484 val_len = _parse_integer(cur_buf, base, res); 485 486 if (val_len & KSTRTOX_OVERFLOW) 487 return -ERANGE; 488 489 if (val_len == 0) 490 return -EINVAL; 491 492 cur_buf += val_len; 493 consumed += cur_buf - str; 494 495 return consumed; 496 } 497 498 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, 499 long long *res) 500 { 501 unsigned long long _res; 502 bool is_negative; 503 int err; 504 505 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 506 if (err < 0) 507 return err; 508 if (is_negative) { 509 if ((long long)-_res > 0) 510 return -ERANGE; 511 *res = -_res; 512 } else { 513 if ((long long)_res < 0) 514 return -ERANGE; 515 *res = _res; 516 } 517 return err; 518 } 519 520 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, 521 s64 *, res) 522 { 523 long long _res; 524 int err; 525 526 *res = 0; 527 err = __bpf_strtoll(buf, buf_len, flags, &_res); 528 if (err < 0) 529 return err; 530 *res = _res; 531 return err; 532 } 533 534 const struct bpf_func_proto bpf_strtol_proto = { 535 .func = bpf_strtol, 536 .gpl_only = false, 537 .ret_type = RET_INTEGER, 538 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 539 .arg2_type = ARG_CONST_SIZE, 540 .arg3_type = ARG_ANYTHING, 541 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 542 .arg4_size = sizeof(s64), 543 }; 544 545 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, 546 u64 *, res) 547 { 548 unsigned long long _res; 549 bool is_negative; 550 int err; 551 552 *res = 0; 553 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 554 if (err < 0) 555 return err; 556 if (is_negative) 557 return -EINVAL; 558 *res = _res; 559 return err; 560 } 561 562 const struct bpf_func_proto bpf_strtoul_proto = { 563 .func = bpf_strtoul, 564 .gpl_only = false, 565 .ret_type = RET_INTEGER, 566 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 567 .arg2_type = ARG_CONST_SIZE, 568 .arg3_type = ARG_ANYTHING, 569 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 570 .arg4_size = sizeof(u64), 571 }; 572 573 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) 574 { 575 return strncmp(s1, s2, s1_sz); 576 } 577 578 static const struct bpf_func_proto bpf_strncmp_proto = { 579 .func = bpf_strncmp, 580 .gpl_only = false, 581 .ret_type = RET_INTEGER, 582 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 583 .arg2_type = ARG_CONST_SIZE, 584 .arg3_type = ARG_PTR_TO_CONST_STR, 585 }; 586 587 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino, 588 struct bpf_pidns_info *, nsdata, u32, size) 589 { 590 struct task_struct *task = current; 591 struct pid_namespace *pidns; 592 int err = -EINVAL; 593 594 if (unlikely(size != sizeof(struct bpf_pidns_info))) 595 goto clear; 596 597 if (unlikely((u64)(dev_t)dev != dev)) 598 goto clear; 599 600 if (unlikely(!task)) 601 goto clear; 602 603 pidns = task_active_pid_ns(task); 604 if (unlikely(!pidns)) { 605 err = -ENOENT; 606 goto clear; 607 } 608 609 if (!ns_match(&pidns->ns, (dev_t)dev, ino)) 610 goto clear; 611 612 nsdata->pid = task_pid_nr_ns(task, pidns); 613 nsdata->tgid = task_tgid_nr_ns(task, pidns); 614 return 0; 615 clear: 616 memset((void *)nsdata, 0, (size_t) size); 617 return err; 618 } 619 620 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = { 621 .func = bpf_get_ns_current_pid_tgid, 622 .gpl_only = false, 623 .ret_type = RET_INTEGER, 624 .arg1_type = ARG_ANYTHING, 625 .arg2_type = ARG_ANYTHING, 626 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 627 .arg4_type = ARG_CONST_SIZE, 628 }; 629 630 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { 631 .func = bpf_get_raw_cpu_id, 632 .gpl_only = false, 633 .ret_type = RET_INTEGER, 634 }; 635 636 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, 637 u64, flags, void *, data, u64, size) 638 { 639 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 640 return -EINVAL; 641 642 return bpf_event_output(map, flags, data, size, NULL, 0, NULL); 643 } 644 645 const struct bpf_func_proto bpf_event_output_data_proto = { 646 .func = bpf_event_output_data, 647 .gpl_only = true, 648 .ret_type = RET_INTEGER, 649 .arg1_type = ARG_PTR_TO_CTX, 650 .arg2_type = ARG_CONST_MAP_PTR, 651 .arg3_type = ARG_ANYTHING, 652 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 653 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 654 }; 655 656 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size, 657 const void __user *, user_ptr) 658 { 659 int ret = copy_from_user(dst, user_ptr, size); 660 661 if (unlikely(ret)) { 662 memset(dst, 0, size); 663 ret = -EFAULT; 664 } 665 666 return ret; 667 } 668 669 const struct bpf_func_proto bpf_copy_from_user_proto = { 670 .func = bpf_copy_from_user, 671 .gpl_only = false, 672 .might_sleep = true, 673 .ret_type = RET_INTEGER, 674 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 675 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 676 .arg3_type = ARG_ANYTHING, 677 }; 678 679 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size, 680 const void __user *, user_ptr, struct task_struct *, tsk, u64, flags) 681 { 682 int ret; 683 684 /* flags is not used yet */ 685 if (unlikely(flags)) 686 return -EINVAL; 687 688 if (unlikely(!size)) 689 return 0; 690 691 ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0); 692 if (ret == size) 693 return 0; 694 695 memset(dst, 0, size); 696 /* Return -EFAULT for partial read */ 697 return ret < 0 ? ret : -EFAULT; 698 } 699 700 const struct bpf_func_proto bpf_copy_from_user_task_proto = { 701 .func = bpf_copy_from_user_task, 702 .gpl_only = true, 703 .might_sleep = true, 704 .ret_type = RET_INTEGER, 705 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 706 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 707 .arg3_type = ARG_ANYTHING, 708 .arg4_type = ARG_PTR_TO_BTF_ID, 709 .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 710 .arg5_type = ARG_ANYTHING 711 }; 712 713 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) 714 { 715 if (cpu >= nr_cpu_ids) 716 return (unsigned long)NULL; 717 718 return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu); 719 } 720 721 const struct bpf_func_proto bpf_per_cpu_ptr_proto = { 722 .func = bpf_per_cpu_ptr, 723 .gpl_only = false, 724 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY, 725 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 726 .arg2_type = ARG_ANYTHING, 727 }; 728 729 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr) 730 { 731 return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr); 732 } 733 734 const struct bpf_func_proto bpf_this_cpu_ptr_proto = { 735 .func = bpf_this_cpu_ptr, 736 .gpl_only = false, 737 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY, 738 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 739 }; 740 741 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, 742 size_t bufsz) 743 { 744 void __user *user_ptr = (__force void __user *)unsafe_ptr; 745 746 buf[0] = 0; 747 748 switch (fmt_ptype) { 749 case 's': 750 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 751 if ((unsigned long)unsafe_ptr < TASK_SIZE) 752 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 753 fallthrough; 754 #endif 755 case 'k': 756 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz); 757 case 'u': 758 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 759 } 760 761 return -EINVAL; 762 } 763 764 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary 765 * arguments representation. 766 */ 767 #define MAX_BPRINTF_BIN_ARGS 512 768 769 /* Support executing three nested bprintf helper calls on a given CPU */ 770 #define MAX_BPRINTF_NEST_LEVEL 3 771 struct bpf_bprintf_buffers { 772 char bin_args[MAX_BPRINTF_BIN_ARGS]; 773 char buf[MAX_BPRINTF_BUF]; 774 }; 775 776 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs); 777 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level); 778 779 static int try_get_buffers(struct bpf_bprintf_buffers **bufs) 780 { 781 int nest_level; 782 783 preempt_disable(); 784 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level); 785 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) { 786 this_cpu_dec(bpf_bprintf_nest_level); 787 preempt_enable(); 788 return -EBUSY; 789 } 790 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); 791 792 return 0; 793 } 794 795 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data) 796 { 797 if (!data->bin_args && !data->buf) 798 return; 799 if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0)) 800 return; 801 this_cpu_dec(bpf_bprintf_nest_level); 802 preempt_enable(); 803 } 804 805 /* 806 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers 807 * 808 * Returns a negative value if fmt is an invalid format string or 0 otherwise. 809 * 810 * This can be used in two ways: 811 * - Format string verification only: when data->get_bin_args is false 812 * - Arguments preparation: in addition to the above verification, it writes in 813 * data->bin_args a binary representation of arguments usable by bstr_printf 814 * where pointers from BPF have been sanitized. 815 * 816 * In argument preparation mode, if 0 is returned, safe temporary buffers are 817 * allocated and bpf_bprintf_cleanup should be called to free them after use. 818 */ 819 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, 820 u32 num_args, struct bpf_bprintf_data *data) 821 { 822 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; 823 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end; 824 struct bpf_bprintf_buffers *buffers = NULL; 825 size_t sizeof_cur_arg, sizeof_cur_ip; 826 int err, i, num_spec = 0; 827 u64 cur_arg; 828 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX"; 829 830 fmt_end = strnchr(fmt, fmt_size, 0); 831 if (!fmt_end) 832 return -EINVAL; 833 fmt_size = fmt_end - fmt; 834 835 if (get_buffers && try_get_buffers(&buffers)) 836 return -EBUSY; 837 838 if (data->get_bin_args) { 839 if (num_args) 840 tmp_buf = buffers->bin_args; 841 tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS; 842 data->bin_args = (u32 *)tmp_buf; 843 } 844 845 if (data->get_buf) 846 data->buf = buffers->buf; 847 848 for (i = 0; i < fmt_size; i++) { 849 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { 850 err = -EINVAL; 851 goto out; 852 } 853 854 if (fmt[i] != '%') 855 continue; 856 857 if (fmt[i + 1] == '%') { 858 i++; 859 continue; 860 } 861 862 if (num_spec >= num_args) { 863 err = -EINVAL; 864 goto out; 865 } 866 867 /* The string is zero-terminated so if fmt[i] != 0, we can 868 * always access fmt[i + 1], in the worst case it will be a 0 869 */ 870 i++; 871 872 /* skip optional "[0 +-][num]" width formatting field */ 873 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || 874 fmt[i] == ' ') 875 i++; 876 if (fmt[i] >= '1' && fmt[i] <= '9') { 877 i++; 878 while (fmt[i] >= '0' && fmt[i] <= '9') 879 i++; 880 } 881 882 if (fmt[i] == 'p') { 883 sizeof_cur_arg = sizeof(long); 884 885 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') && 886 fmt[i + 2] == 's') { 887 fmt_ptype = fmt[i + 1]; 888 i += 2; 889 goto fmt_str; 890 } 891 892 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) || 893 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' || 894 fmt[i + 1] == 'x' || fmt[i + 1] == 's' || 895 fmt[i + 1] == 'S') { 896 /* just kernel pointers */ 897 if (tmp_buf) 898 cur_arg = raw_args[num_spec]; 899 i++; 900 goto nocopy_fmt; 901 } 902 903 if (fmt[i + 1] == 'B') { 904 if (tmp_buf) { 905 err = snprintf(tmp_buf, 906 (tmp_buf_end - tmp_buf), 907 "%pB", 908 (void *)(long)raw_args[num_spec]); 909 tmp_buf += (err + 1); 910 } 911 912 i++; 913 num_spec++; 914 continue; 915 } 916 917 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ 918 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') || 919 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) { 920 err = -EINVAL; 921 goto out; 922 } 923 924 i += 2; 925 if (!tmp_buf) 926 goto nocopy_fmt; 927 928 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16; 929 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { 930 err = -ENOSPC; 931 goto out; 932 } 933 934 unsafe_ptr = (char *)(long)raw_args[num_spec]; 935 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr, 936 sizeof_cur_ip); 937 if (err < 0) 938 memset(cur_ip, 0, sizeof_cur_ip); 939 940 /* hack: bstr_printf expects IP addresses to be 941 * pre-formatted as strings, ironically, the easiest way 942 * to do that is to call snprintf. 943 */ 944 ip_spec[2] = fmt[i - 1]; 945 ip_spec[3] = fmt[i]; 946 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf, 947 ip_spec, &cur_ip); 948 949 tmp_buf += err + 1; 950 num_spec++; 951 952 continue; 953 } else if (fmt[i] == 's') { 954 fmt_ptype = fmt[i]; 955 fmt_str: 956 if (fmt[i + 1] != 0 && 957 !isspace(fmt[i + 1]) && 958 !ispunct(fmt[i + 1])) { 959 err = -EINVAL; 960 goto out; 961 } 962 963 if (!tmp_buf) 964 goto nocopy_fmt; 965 966 if (tmp_buf_end == tmp_buf) { 967 err = -ENOSPC; 968 goto out; 969 } 970 971 unsafe_ptr = (char *)(long)raw_args[num_spec]; 972 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr, 973 fmt_ptype, 974 tmp_buf_end - tmp_buf); 975 if (err < 0) { 976 tmp_buf[0] = '\0'; 977 err = 1; 978 } 979 980 tmp_buf += err; 981 num_spec++; 982 983 continue; 984 } else if (fmt[i] == 'c') { 985 if (!tmp_buf) 986 goto nocopy_fmt; 987 988 if (tmp_buf_end == tmp_buf) { 989 err = -ENOSPC; 990 goto out; 991 } 992 993 *tmp_buf = raw_args[num_spec]; 994 tmp_buf++; 995 num_spec++; 996 997 continue; 998 } 999 1000 sizeof_cur_arg = sizeof(int); 1001 1002 if (fmt[i] == 'l') { 1003 sizeof_cur_arg = sizeof(long); 1004 i++; 1005 } 1006 if (fmt[i] == 'l') { 1007 sizeof_cur_arg = sizeof(long long); 1008 i++; 1009 } 1010 1011 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' && 1012 fmt[i] != 'x' && fmt[i] != 'X') { 1013 err = -EINVAL; 1014 goto out; 1015 } 1016 1017 if (tmp_buf) 1018 cur_arg = raw_args[num_spec]; 1019 nocopy_fmt: 1020 if (tmp_buf) { 1021 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32)); 1022 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { 1023 err = -ENOSPC; 1024 goto out; 1025 } 1026 1027 if (sizeof_cur_arg == 8) { 1028 *(u32 *)tmp_buf = *(u32 *)&cur_arg; 1029 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1); 1030 } else { 1031 *(u32 *)tmp_buf = (u32)(long)cur_arg; 1032 } 1033 tmp_buf += sizeof_cur_arg; 1034 } 1035 num_spec++; 1036 } 1037 1038 err = 0; 1039 out: 1040 if (err) 1041 bpf_bprintf_cleanup(data); 1042 return err; 1043 } 1044 1045 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, 1046 const void *, args, u32, data_len) 1047 { 1048 struct bpf_bprintf_data data = { 1049 .get_bin_args = true, 1050 }; 1051 int err, num_args; 1052 1053 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 || 1054 (data_len && !args)) 1055 return -EINVAL; 1056 num_args = data_len / 8; 1057 1058 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we 1059 * can safely give an unbounded size. 1060 */ 1061 err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data); 1062 if (err < 0) 1063 return err; 1064 1065 err = bstr_printf(str, str_size, fmt, data.bin_args); 1066 1067 bpf_bprintf_cleanup(&data); 1068 1069 return err + 1; 1070 } 1071 1072 const struct bpf_func_proto bpf_snprintf_proto = { 1073 .func = bpf_snprintf, 1074 .gpl_only = true, 1075 .ret_type = RET_INTEGER, 1076 .arg1_type = ARG_PTR_TO_MEM_OR_NULL, 1077 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1078 .arg3_type = ARG_PTR_TO_CONST_STR, 1079 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 1080 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1081 }; 1082 1083 struct bpf_async_cb { 1084 struct bpf_map *map; 1085 struct bpf_prog *prog; 1086 void __rcu *callback_fn; 1087 void *value; 1088 union { 1089 struct rcu_head rcu; 1090 struct work_struct delete_work; 1091 }; 1092 u64 flags; 1093 }; 1094 1095 /* BPF map elements can contain 'struct bpf_timer'. 1096 * Such map owns all of its BPF timers. 1097 * 'struct bpf_timer' is allocated as part of map element allocation 1098 * and it's zero initialized. 1099 * That space is used to keep 'struct bpf_async_kern'. 1100 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and 1101 * remembers 'struct bpf_map *' pointer it's part of. 1102 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn. 1103 * bpf_timer_start() arms the timer. 1104 * If user space reference to a map goes to zero at this point 1105 * ops->map_release_uref callback is responsible for cancelling the timers, 1106 * freeing their memory, and decrementing prog's refcnts. 1107 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt. 1108 * Inner maps can contain bpf timers as well. ops->map_release_uref is 1109 * freeing the timers when inner map is replaced or deleted by user space. 1110 */ 1111 struct bpf_hrtimer { 1112 struct bpf_async_cb cb; 1113 struct hrtimer timer; 1114 atomic_t cancelling; 1115 }; 1116 1117 struct bpf_work { 1118 struct bpf_async_cb cb; 1119 struct work_struct work; 1120 struct work_struct delete_work; 1121 }; 1122 1123 /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */ 1124 struct bpf_async_kern { 1125 union { 1126 struct bpf_async_cb *cb; 1127 struct bpf_hrtimer *timer; 1128 struct bpf_work *work; 1129 }; 1130 /* bpf_spin_lock is used here instead of spinlock_t to make 1131 * sure that it always fits into space reserved by struct bpf_timer 1132 * regardless of LOCKDEP and spinlock debug flags. 1133 */ 1134 struct bpf_spin_lock lock; 1135 } __attribute__((aligned(8))); 1136 1137 enum bpf_async_type { 1138 BPF_ASYNC_TYPE_TIMER = 0, 1139 BPF_ASYNC_TYPE_WQ, 1140 }; 1141 1142 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); 1143 1144 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) 1145 { 1146 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); 1147 struct bpf_map *map = t->cb.map; 1148 void *value = t->cb.value; 1149 bpf_callback_t callback_fn; 1150 void *key; 1151 u32 idx; 1152 1153 BTF_TYPE_EMIT(struct bpf_timer); 1154 callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held()); 1155 if (!callback_fn) 1156 goto out; 1157 1158 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and 1159 * cannot be preempted by another bpf_timer_cb() on the same cpu. 1160 * Remember the timer this callback is servicing to prevent 1161 * deadlock if callback_fn() calls bpf_timer_cancel() or 1162 * bpf_map_delete_elem() on the same timer. 1163 */ 1164 this_cpu_write(hrtimer_running, t); 1165 if (map->map_type == BPF_MAP_TYPE_ARRAY) { 1166 struct bpf_array *array = container_of(map, struct bpf_array, map); 1167 1168 /* compute the key */ 1169 idx = ((char *)value - array->value) / array->elem_size; 1170 key = &idx; 1171 } else { /* hash or lru */ 1172 key = value - round_up(map->key_size, 8); 1173 } 1174 1175 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); 1176 /* The verifier checked that return value is zero. */ 1177 1178 this_cpu_write(hrtimer_running, NULL); 1179 out: 1180 return HRTIMER_NORESTART; 1181 } 1182 1183 static void bpf_wq_work(struct work_struct *work) 1184 { 1185 struct bpf_work *w = container_of(work, struct bpf_work, work); 1186 struct bpf_async_cb *cb = &w->cb; 1187 struct bpf_map *map = cb->map; 1188 bpf_callback_t callback_fn; 1189 void *value = cb->value; 1190 void *key; 1191 u32 idx; 1192 1193 BTF_TYPE_EMIT(struct bpf_wq); 1194 1195 callback_fn = READ_ONCE(cb->callback_fn); 1196 if (!callback_fn) 1197 return; 1198 1199 if (map->map_type == BPF_MAP_TYPE_ARRAY) { 1200 struct bpf_array *array = container_of(map, struct bpf_array, map); 1201 1202 /* compute the key */ 1203 idx = ((char *)value - array->value) / array->elem_size; 1204 key = &idx; 1205 } else { /* hash or lru */ 1206 key = value - round_up(map->key_size, 8); 1207 } 1208 1209 rcu_read_lock_trace(); 1210 migrate_disable(); 1211 1212 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); 1213 1214 migrate_enable(); 1215 rcu_read_unlock_trace(); 1216 } 1217 1218 static void bpf_wq_delete_work(struct work_struct *work) 1219 { 1220 struct bpf_work *w = container_of(work, struct bpf_work, delete_work); 1221 1222 cancel_work_sync(&w->work); 1223 1224 kfree_rcu(w, cb.rcu); 1225 } 1226 1227 static void bpf_timer_delete_work(struct work_struct *work) 1228 { 1229 struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work); 1230 1231 /* Cancel the timer and wait for callback to complete if it was running. 1232 * If hrtimer_cancel() can be safely called it's safe to call 1233 * kfree_rcu(t) right after for both preallocated and non-preallocated 1234 * maps. The async->cb = NULL was already done and no code path can see 1235 * address 't' anymore. Timer if armed for existing bpf_hrtimer before 1236 * bpf_timer_cancel_and_free will have been cancelled. 1237 */ 1238 hrtimer_cancel(&t->timer); 1239 kfree_rcu(t, cb.rcu); 1240 } 1241 1242 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags, 1243 enum bpf_async_type type) 1244 { 1245 struct bpf_async_cb *cb; 1246 struct bpf_hrtimer *t; 1247 struct bpf_work *w; 1248 clockid_t clockid; 1249 size_t size; 1250 int ret = 0; 1251 1252 if (in_nmi()) 1253 return -EOPNOTSUPP; 1254 1255 switch (type) { 1256 case BPF_ASYNC_TYPE_TIMER: 1257 size = sizeof(struct bpf_hrtimer); 1258 break; 1259 case BPF_ASYNC_TYPE_WQ: 1260 size = sizeof(struct bpf_work); 1261 break; 1262 default: 1263 return -EINVAL; 1264 } 1265 1266 __bpf_spin_lock_irqsave(&async->lock); 1267 t = async->timer; 1268 if (t) { 1269 ret = -EBUSY; 1270 goto out; 1271 } 1272 1273 /* allocate hrtimer via map_kmalloc to use memcg accounting */ 1274 cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node); 1275 if (!cb) { 1276 ret = -ENOMEM; 1277 goto out; 1278 } 1279 1280 switch (type) { 1281 case BPF_ASYNC_TYPE_TIMER: 1282 clockid = flags & (MAX_CLOCKS - 1); 1283 t = (struct bpf_hrtimer *)cb; 1284 1285 atomic_set(&t->cancelling, 0); 1286 INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work); 1287 hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); 1288 t->timer.function = bpf_timer_cb; 1289 cb->value = (void *)async - map->record->timer_off; 1290 break; 1291 case BPF_ASYNC_TYPE_WQ: 1292 w = (struct bpf_work *)cb; 1293 1294 INIT_WORK(&w->work, bpf_wq_work); 1295 INIT_WORK(&w->delete_work, bpf_wq_delete_work); 1296 cb->value = (void *)async - map->record->wq_off; 1297 break; 1298 } 1299 cb->map = map; 1300 cb->prog = NULL; 1301 cb->flags = flags; 1302 rcu_assign_pointer(cb->callback_fn, NULL); 1303 1304 WRITE_ONCE(async->cb, cb); 1305 /* Guarantee the order between async->cb and map->usercnt. So 1306 * when there are concurrent uref release and bpf timer init, either 1307 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL 1308 * timer or atomic64_read() below returns a zero usercnt. 1309 */ 1310 smp_mb(); 1311 if (!atomic64_read(&map->usercnt)) { 1312 /* maps with timers must be either held by user space 1313 * or pinned in bpffs. 1314 */ 1315 WRITE_ONCE(async->cb, NULL); 1316 kfree(cb); 1317 ret = -EPERM; 1318 } 1319 out: 1320 __bpf_spin_unlock_irqrestore(&async->lock); 1321 return ret; 1322 } 1323 1324 BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map, 1325 u64, flags) 1326 { 1327 clock_t clockid = flags & (MAX_CLOCKS - 1); 1328 1329 BUILD_BUG_ON(MAX_CLOCKS != 16); 1330 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer)); 1331 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer)); 1332 1333 if (flags >= MAX_CLOCKS || 1334 /* similar to timerfd except _ALARM variants are not supported */ 1335 (clockid != CLOCK_MONOTONIC && 1336 clockid != CLOCK_REALTIME && 1337 clockid != CLOCK_BOOTTIME)) 1338 return -EINVAL; 1339 1340 return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER); 1341 } 1342 1343 static const struct bpf_func_proto bpf_timer_init_proto = { 1344 .func = bpf_timer_init, 1345 .gpl_only = true, 1346 .ret_type = RET_INTEGER, 1347 .arg1_type = ARG_PTR_TO_TIMER, 1348 .arg2_type = ARG_CONST_MAP_PTR, 1349 .arg3_type = ARG_ANYTHING, 1350 }; 1351 1352 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn, 1353 struct bpf_prog_aux *aux, unsigned int flags, 1354 enum bpf_async_type type) 1355 { 1356 struct bpf_prog *prev, *prog = aux->prog; 1357 struct bpf_async_cb *cb; 1358 int ret = 0; 1359 1360 if (in_nmi()) 1361 return -EOPNOTSUPP; 1362 __bpf_spin_lock_irqsave(&async->lock); 1363 cb = async->cb; 1364 if (!cb) { 1365 ret = -EINVAL; 1366 goto out; 1367 } 1368 if (!atomic64_read(&cb->map->usercnt)) { 1369 /* maps with timers must be either held by user space 1370 * or pinned in bpffs. Otherwise timer might still be 1371 * running even when bpf prog is detached and user space 1372 * is gone, since map_release_uref won't ever be called. 1373 */ 1374 ret = -EPERM; 1375 goto out; 1376 } 1377 prev = cb->prog; 1378 if (prev != prog) { 1379 /* Bump prog refcnt once. Every bpf_timer_set_callback() 1380 * can pick different callback_fn-s within the same prog. 1381 */ 1382 prog = bpf_prog_inc_not_zero(prog); 1383 if (IS_ERR(prog)) { 1384 ret = PTR_ERR(prog); 1385 goto out; 1386 } 1387 if (prev) 1388 /* Drop prev prog refcnt when swapping with new prog */ 1389 bpf_prog_put(prev); 1390 cb->prog = prog; 1391 } 1392 rcu_assign_pointer(cb->callback_fn, callback_fn); 1393 out: 1394 __bpf_spin_unlock_irqrestore(&async->lock); 1395 return ret; 1396 } 1397 1398 BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn, 1399 struct bpf_prog_aux *, aux) 1400 { 1401 return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER); 1402 } 1403 1404 static const struct bpf_func_proto bpf_timer_set_callback_proto = { 1405 .func = bpf_timer_set_callback, 1406 .gpl_only = true, 1407 .ret_type = RET_INTEGER, 1408 .arg1_type = ARG_PTR_TO_TIMER, 1409 .arg2_type = ARG_PTR_TO_FUNC, 1410 }; 1411 1412 BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags) 1413 { 1414 struct bpf_hrtimer *t; 1415 int ret = 0; 1416 enum hrtimer_mode mode; 1417 1418 if (in_nmi()) 1419 return -EOPNOTSUPP; 1420 if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN)) 1421 return -EINVAL; 1422 __bpf_spin_lock_irqsave(&timer->lock); 1423 t = timer->timer; 1424 if (!t || !t->cb.prog) { 1425 ret = -EINVAL; 1426 goto out; 1427 } 1428 1429 if (flags & BPF_F_TIMER_ABS) 1430 mode = HRTIMER_MODE_ABS_SOFT; 1431 else 1432 mode = HRTIMER_MODE_REL_SOFT; 1433 1434 if (flags & BPF_F_TIMER_CPU_PIN) 1435 mode |= HRTIMER_MODE_PINNED; 1436 1437 hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); 1438 out: 1439 __bpf_spin_unlock_irqrestore(&timer->lock); 1440 return ret; 1441 } 1442 1443 static const struct bpf_func_proto bpf_timer_start_proto = { 1444 .func = bpf_timer_start, 1445 .gpl_only = true, 1446 .ret_type = RET_INTEGER, 1447 .arg1_type = ARG_PTR_TO_TIMER, 1448 .arg2_type = ARG_ANYTHING, 1449 .arg3_type = ARG_ANYTHING, 1450 }; 1451 1452 static void drop_prog_refcnt(struct bpf_async_cb *async) 1453 { 1454 struct bpf_prog *prog = async->prog; 1455 1456 if (prog) { 1457 bpf_prog_put(prog); 1458 async->prog = NULL; 1459 rcu_assign_pointer(async->callback_fn, NULL); 1460 } 1461 } 1462 1463 BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer) 1464 { 1465 struct bpf_hrtimer *t, *cur_t; 1466 bool inc = false; 1467 int ret = 0; 1468 1469 if (in_nmi()) 1470 return -EOPNOTSUPP; 1471 rcu_read_lock(); 1472 __bpf_spin_lock_irqsave(&timer->lock); 1473 t = timer->timer; 1474 if (!t) { 1475 ret = -EINVAL; 1476 goto out; 1477 } 1478 1479 cur_t = this_cpu_read(hrtimer_running); 1480 if (cur_t == t) { 1481 /* If bpf callback_fn is trying to bpf_timer_cancel() 1482 * its own timer the hrtimer_cancel() will deadlock 1483 * since it waits for callback_fn to finish. 1484 */ 1485 ret = -EDEADLK; 1486 goto out; 1487 } 1488 1489 /* Only account in-flight cancellations when invoked from a timer 1490 * callback, since we want to avoid waiting only if other _callbacks_ 1491 * are waiting on us, to avoid introducing lockups. Non-callback paths 1492 * are ok, since nobody would synchronously wait for their completion. 1493 */ 1494 if (!cur_t) 1495 goto drop; 1496 atomic_inc(&t->cancelling); 1497 /* Need full barrier after relaxed atomic_inc */ 1498 smp_mb__after_atomic(); 1499 inc = true; 1500 if (atomic_read(&cur_t->cancelling)) { 1501 /* We're cancelling timer t, while some other timer callback is 1502 * attempting to cancel us. In such a case, it might be possible 1503 * that timer t belongs to the other callback, or some other 1504 * callback waiting upon it (creating transitive dependencies 1505 * upon us), and we will enter a deadlock if we continue 1506 * cancelling and waiting for it synchronously, since it might 1507 * do the same. Bail! 1508 */ 1509 ret = -EDEADLK; 1510 goto out; 1511 } 1512 drop: 1513 drop_prog_refcnt(&t->cb); 1514 out: 1515 __bpf_spin_unlock_irqrestore(&timer->lock); 1516 /* Cancel the timer and wait for associated callback to finish 1517 * if it was running. 1518 */ 1519 ret = ret ?: hrtimer_cancel(&t->timer); 1520 if (inc) 1521 atomic_dec(&t->cancelling); 1522 rcu_read_unlock(); 1523 return ret; 1524 } 1525 1526 static const struct bpf_func_proto bpf_timer_cancel_proto = { 1527 .func = bpf_timer_cancel, 1528 .gpl_only = true, 1529 .ret_type = RET_INTEGER, 1530 .arg1_type = ARG_PTR_TO_TIMER, 1531 }; 1532 1533 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async) 1534 { 1535 struct bpf_async_cb *cb; 1536 1537 /* Performance optimization: read async->cb without lock first. */ 1538 if (!READ_ONCE(async->cb)) 1539 return NULL; 1540 1541 __bpf_spin_lock_irqsave(&async->lock); 1542 /* re-read it under lock */ 1543 cb = async->cb; 1544 if (!cb) 1545 goto out; 1546 drop_prog_refcnt(cb); 1547 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use 1548 * this timer, since it won't be initialized. 1549 */ 1550 WRITE_ONCE(async->cb, NULL); 1551 out: 1552 __bpf_spin_unlock_irqrestore(&async->lock); 1553 return cb; 1554 } 1555 1556 /* This function is called by map_delete/update_elem for individual element and 1557 * by ops->map_release_uref when the user space reference to a map reaches zero. 1558 */ 1559 void bpf_timer_cancel_and_free(void *val) 1560 { 1561 struct bpf_hrtimer *t; 1562 1563 t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val); 1564 1565 if (!t) 1566 return; 1567 /* We check that bpf_map_delete/update_elem() was called from timer 1568 * callback_fn. In such case we don't call hrtimer_cancel() (since it 1569 * will deadlock) and don't call hrtimer_try_to_cancel() (since it will 1570 * just return -1). Though callback_fn is still running on this cpu it's 1571 * safe to do kfree(t) because bpf_timer_cb() read everything it needed 1572 * from 't'. The bpf subprog callback_fn won't be able to access 't', 1573 * since async->cb = NULL was already done. The timer will be 1574 * effectively cancelled because bpf_timer_cb() will return 1575 * HRTIMER_NORESTART. 1576 * 1577 * However, it is possible the timer callback_fn calling us armed the 1578 * timer _before_ calling us, such that failing to cancel it here will 1579 * cause it to possibly use struct hrtimer after freeing bpf_hrtimer. 1580 * Therefore, we _need_ to cancel any outstanding timers before we do 1581 * kfree_rcu, even though no more timers can be armed. 1582 * 1583 * Moreover, we need to schedule work even if timer does not belong to 1584 * the calling callback_fn, as on two different CPUs, we can end up in a 1585 * situation where both sides run in parallel, try to cancel one 1586 * another, and we end up waiting on both sides in hrtimer_cancel 1587 * without making forward progress, since timer1 depends on time2 1588 * callback to finish, and vice versa. 1589 * 1590 * CPU 1 (timer1_cb) CPU 2 (timer2_cb) 1591 * bpf_timer_cancel_and_free(timer2) bpf_timer_cancel_and_free(timer1) 1592 * 1593 * To avoid these issues, punt to workqueue context when we are in a 1594 * timer callback. 1595 */ 1596 if (this_cpu_read(hrtimer_running)) { 1597 queue_work(system_unbound_wq, &t->cb.delete_work); 1598 return; 1599 } 1600 1601 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 1602 /* If the timer is running on other CPU, also use a kworker to 1603 * wait for the completion of the timer instead of trying to 1604 * acquire a sleepable lock in hrtimer_cancel() to wait for its 1605 * completion. 1606 */ 1607 if (hrtimer_try_to_cancel(&t->timer) >= 0) 1608 kfree_rcu(t, cb.rcu); 1609 else 1610 queue_work(system_unbound_wq, &t->cb.delete_work); 1611 } else { 1612 bpf_timer_delete_work(&t->cb.delete_work); 1613 } 1614 } 1615 1616 /* This function is called by map_delete/update_elem for individual element and 1617 * by ops->map_release_uref when the user space reference to a map reaches zero. 1618 */ 1619 void bpf_wq_cancel_and_free(void *val) 1620 { 1621 struct bpf_work *work; 1622 1623 BTF_TYPE_EMIT(struct bpf_wq); 1624 1625 work = (struct bpf_work *)__bpf_async_cancel_and_free(val); 1626 if (!work) 1627 return; 1628 /* Trigger cancel of the sleepable work, but *do not* wait for 1629 * it to finish if it was running as we might not be in a 1630 * sleepable context. 1631 * kfree will be called once the work has finished. 1632 */ 1633 schedule_work(&work->delete_work); 1634 } 1635 1636 BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr) 1637 { 1638 unsigned long *kptr = dst; 1639 1640 /* This helper may be inlined by verifier. */ 1641 return xchg(kptr, (unsigned long)ptr); 1642 } 1643 1644 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg() 1645 * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to 1646 * denote type that verifier will determine. 1647 */ 1648 static const struct bpf_func_proto bpf_kptr_xchg_proto = { 1649 .func = bpf_kptr_xchg, 1650 .gpl_only = false, 1651 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 1652 .ret_btf_id = BPF_PTR_POISON, 1653 .arg1_type = ARG_KPTR_XCHG_DEST, 1654 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE, 1655 .arg2_btf_id = BPF_PTR_POISON, 1656 }; 1657 1658 /* Since the upper 8 bits of dynptr->size is reserved, the 1659 * maximum supported size is 2^24 - 1. 1660 */ 1661 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1) 1662 #define DYNPTR_TYPE_SHIFT 28 1663 #define DYNPTR_SIZE_MASK 0xFFFFFF 1664 #define DYNPTR_RDONLY_BIT BIT(31) 1665 1666 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr) 1667 { 1668 return ptr->size & DYNPTR_RDONLY_BIT; 1669 } 1670 1671 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) 1672 { 1673 ptr->size |= DYNPTR_RDONLY_BIT; 1674 } 1675 1676 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) 1677 { 1678 ptr->size |= type << DYNPTR_TYPE_SHIFT; 1679 } 1680 1681 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr) 1682 { 1683 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT; 1684 } 1685 1686 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) 1687 { 1688 return ptr->size & DYNPTR_SIZE_MASK; 1689 } 1690 1691 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size) 1692 { 1693 u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK; 1694 1695 ptr->size = new_size | metadata; 1696 } 1697 1698 int bpf_dynptr_check_size(u32 size) 1699 { 1700 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; 1701 } 1702 1703 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, 1704 enum bpf_dynptr_type type, u32 offset, u32 size) 1705 { 1706 ptr->data = data; 1707 ptr->offset = offset; 1708 ptr->size = size; 1709 bpf_dynptr_set_type(ptr, type); 1710 } 1711 1712 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) 1713 { 1714 memset(ptr, 0, sizeof(*ptr)); 1715 } 1716 1717 static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len) 1718 { 1719 u32 size = __bpf_dynptr_size(ptr); 1720 1721 if (len > size || offset > size - len) 1722 return -E2BIG; 1723 1724 return 0; 1725 } 1726 1727 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) 1728 { 1729 int err; 1730 1731 BTF_TYPE_EMIT(struct bpf_dynptr); 1732 1733 err = bpf_dynptr_check_size(size); 1734 if (err) 1735 goto error; 1736 1737 /* flags is currently unsupported */ 1738 if (flags) { 1739 err = -EINVAL; 1740 goto error; 1741 } 1742 1743 bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size); 1744 1745 return 0; 1746 1747 error: 1748 bpf_dynptr_set_null(ptr); 1749 return err; 1750 } 1751 1752 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = { 1753 .func = bpf_dynptr_from_mem, 1754 .gpl_only = false, 1755 .ret_type = RET_INTEGER, 1756 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1757 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1758 .arg3_type = ARG_ANYTHING, 1759 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE, 1760 }; 1761 1762 static int __bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr_kern *src, 1763 u32 offset, u64 flags) 1764 { 1765 enum bpf_dynptr_type type; 1766 int err; 1767 1768 if (!src->data || flags) 1769 return -EINVAL; 1770 1771 err = bpf_dynptr_check_off_len(src, offset, len); 1772 if (err) 1773 return err; 1774 1775 type = bpf_dynptr_get_type(src); 1776 1777 switch (type) { 1778 case BPF_DYNPTR_TYPE_LOCAL: 1779 case BPF_DYNPTR_TYPE_RINGBUF: 1780 /* Source and destination may possibly overlap, hence use memmove to 1781 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr 1782 * pointing to overlapping PTR_TO_MAP_VALUE regions. 1783 */ 1784 memmove(dst, src->data + src->offset + offset, len); 1785 return 0; 1786 case BPF_DYNPTR_TYPE_SKB: 1787 return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len); 1788 case BPF_DYNPTR_TYPE_XDP: 1789 return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len); 1790 default: 1791 WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type); 1792 return -EFAULT; 1793 } 1794 } 1795 1796 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src, 1797 u32, offset, u64, flags) 1798 { 1799 return __bpf_dynptr_read(dst, len, src, offset, flags); 1800 } 1801 1802 static const struct bpf_func_proto bpf_dynptr_read_proto = { 1803 .func = bpf_dynptr_read, 1804 .gpl_only = false, 1805 .ret_type = RET_INTEGER, 1806 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1807 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1808 .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1809 .arg4_type = ARG_ANYTHING, 1810 .arg5_type = ARG_ANYTHING, 1811 }; 1812 1813 static int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, void *src, 1814 u32 len, u64 flags) 1815 { 1816 enum bpf_dynptr_type type; 1817 int err; 1818 1819 if (!dst->data || __bpf_dynptr_is_rdonly(dst)) 1820 return -EINVAL; 1821 1822 err = bpf_dynptr_check_off_len(dst, offset, len); 1823 if (err) 1824 return err; 1825 1826 type = bpf_dynptr_get_type(dst); 1827 1828 switch (type) { 1829 case BPF_DYNPTR_TYPE_LOCAL: 1830 case BPF_DYNPTR_TYPE_RINGBUF: 1831 if (flags) 1832 return -EINVAL; 1833 /* Source and destination may possibly overlap, hence use memmove to 1834 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr 1835 * pointing to overlapping PTR_TO_MAP_VALUE regions. 1836 */ 1837 memmove(dst->data + dst->offset + offset, src, len); 1838 return 0; 1839 case BPF_DYNPTR_TYPE_SKB: 1840 return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len, 1841 flags); 1842 case BPF_DYNPTR_TYPE_XDP: 1843 if (flags) 1844 return -EINVAL; 1845 return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len); 1846 default: 1847 WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type); 1848 return -EFAULT; 1849 } 1850 } 1851 1852 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src, 1853 u32, len, u64, flags) 1854 { 1855 return __bpf_dynptr_write(dst, offset, src, len, flags); 1856 } 1857 1858 static const struct bpf_func_proto bpf_dynptr_write_proto = { 1859 .func = bpf_dynptr_write, 1860 .gpl_only = false, 1861 .ret_type = RET_INTEGER, 1862 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1863 .arg2_type = ARG_ANYTHING, 1864 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1865 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 1866 .arg5_type = ARG_ANYTHING, 1867 }; 1868 1869 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len) 1870 { 1871 enum bpf_dynptr_type type; 1872 int err; 1873 1874 if (!ptr->data) 1875 return 0; 1876 1877 err = bpf_dynptr_check_off_len(ptr, offset, len); 1878 if (err) 1879 return 0; 1880 1881 if (__bpf_dynptr_is_rdonly(ptr)) 1882 return 0; 1883 1884 type = bpf_dynptr_get_type(ptr); 1885 1886 switch (type) { 1887 case BPF_DYNPTR_TYPE_LOCAL: 1888 case BPF_DYNPTR_TYPE_RINGBUF: 1889 return (unsigned long)(ptr->data + ptr->offset + offset); 1890 case BPF_DYNPTR_TYPE_SKB: 1891 case BPF_DYNPTR_TYPE_XDP: 1892 /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */ 1893 return 0; 1894 default: 1895 WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type); 1896 return 0; 1897 } 1898 } 1899 1900 static const struct bpf_func_proto bpf_dynptr_data_proto = { 1901 .func = bpf_dynptr_data, 1902 .gpl_only = false, 1903 .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL, 1904 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1905 .arg2_type = ARG_ANYTHING, 1906 .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO, 1907 }; 1908 1909 const struct bpf_func_proto bpf_get_current_task_proto __weak; 1910 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak; 1911 const struct bpf_func_proto bpf_probe_read_user_proto __weak; 1912 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; 1913 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak; 1914 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; 1915 const struct bpf_func_proto bpf_task_pt_regs_proto __weak; 1916 1917 const struct bpf_func_proto * 1918 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1919 { 1920 switch (func_id) { 1921 case BPF_FUNC_map_lookup_elem: 1922 return &bpf_map_lookup_elem_proto; 1923 case BPF_FUNC_map_update_elem: 1924 return &bpf_map_update_elem_proto; 1925 case BPF_FUNC_map_delete_elem: 1926 return &bpf_map_delete_elem_proto; 1927 case BPF_FUNC_map_push_elem: 1928 return &bpf_map_push_elem_proto; 1929 case BPF_FUNC_map_pop_elem: 1930 return &bpf_map_pop_elem_proto; 1931 case BPF_FUNC_map_peek_elem: 1932 return &bpf_map_peek_elem_proto; 1933 case BPF_FUNC_map_lookup_percpu_elem: 1934 return &bpf_map_lookup_percpu_elem_proto; 1935 case BPF_FUNC_get_prandom_u32: 1936 return &bpf_get_prandom_u32_proto; 1937 case BPF_FUNC_get_smp_processor_id: 1938 return &bpf_get_raw_smp_processor_id_proto; 1939 case BPF_FUNC_get_numa_node_id: 1940 return &bpf_get_numa_node_id_proto; 1941 case BPF_FUNC_tail_call: 1942 return &bpf_tail_call_proto; 1943 case BPF_FUNC_ktime_get_ns: 1944 return &bpf_ktime_get_ns_proto; 1945 case BPF_FUNC_ktime_get_boot_ns: 1946 return &bpf_ktime_get_boot_ns_proto; 1947 case BPF_FUNC_ktime_get_tai_ns: 1948 return &bpf_ktime_get_tai_ns_proto; 1949 case BPF_FUNC_ringbuf_output: 1950 return &bpf_ringbuf_output_proto; 1951 case BPF_FUNC_ringbuf_reserve: 1952 return &bpf_ringbuf_reserve_proto; 1953 case BPF_FUNC_ringbuf_submit: 1954 return &bpf_ringbuf_submit_proto; 1955 case BPF_FUNC_ringbuf_discard: 1956 return &bpf_ringbuf_discard_proto; 1957 case BPF_FUNC_ringbuf_query: 1958 return &bpf_ringbuf_query_proto; 1959 case BPF_FUNC_strncmp: 1960 return &bpf_strncmp_proto; 1961 case BPF_FUNC_strtol: 1962 return &bpf_strtol_proto; 1963 case BPF_FUNC_strtoul: 1964 return &bpf_strtoul_proto; 1965 case BPF_FUNC_get_current_pid_tgid: 1966 return &bpf_get_current_pid_tgid_proto; 1967 case BPF_FUNC_get_ns_current_pid_tgid: 1968 return &bpf_get_ns_current_pid_tgid_proto; 1969 default: 1970 break; 1971 } 1972 1973 if (!bpf_token_capable(prog->aux->token, CAP_BPF)) 1974 return NULL; 1975 1976 switch (func_id) { 1977 case BPF_FUNC_spin_lock: 1978 return &bpf_spin_lock_proto; 1979 case BPF_FUNC_spin_unlock: 1980 return &bpf_spin_unlock_proto; 1981 case BPF_FUNC_jiffies64: 1982 return &bpf_jiffies64_proto; 1983 case BPF_FUNC_per_cpu_ptr: 1984 return &bpf_per_cpu_ptr_proto; 1985 case BPF_FUNC_this_cpu_ptr: 1986 return &bpf_this_cpu_ptr_proto; 1987 case BPF_FUNC_timer_init: 1988 return &bpf_timer_init_proto; 1989 case BPF_FUNC_timer_set_callback: 1990 return &bpf_timer_set_callback_proto; 1991 case BPF_FUNC_timer_start: 1992 return &bpf_timer_start_proto; 1993 case BPF_FUNC_timer_cancel: 1994 return &bpf_timer_cancel_proto; 1995 case BPF_FUNC_kptr_xchg: 1996 return &bpf_kptr_xchg_proto; 1997 case BPF_FUNC_for_each_map_elem: 1998 return &bpf_for_each_map_elem_proto; 1999 case BPF_FUNC_loop: 2000 return &bpf_loop_proto; 2001 case BPF_FUNC_user_ringbuf_drain: 2002 return &bpf_user_ringbuf_drain_proto; 2003 case BPF_FUNC_ringbuf_reserve_dynptr: 2004 return &bpf_ringbuf_reserve_dynptr_proto; 2005 case BPF_FUNC_ringbuf_submit_dynptr: 2006 return &bpf_ringbuf_submit_dynptr_proto; 2007 case BPF_FUNC_ringbuf_discard_dynptr: 2008 return &bpf_ringbuf_discard_dynptr_proto; 2009 case BPF_FUNC_dynptr_from_mem: 2010 return &bpf_dynptr_from_mem_proto; 2011 case BPF_FUNC_dynptr_read: 2012 return &bpf_dynptr_read_proto; 2013 case BPF_FUNC_dynptr_write: 2014 return &bpf_dynptr_write_proto; 2015 case BPF_FUNC_dynptr_data: 2016 return &bpf_dynptr_data_proto; 2017 #ifdef CONFIG_CGROUPS 2018 case BPF_FUNC_cgrp_storage_get: 2019 return &bpf_cgrp_storage_get_proto; 2020 case BPF_FUNC_cgrp_storage_delete: 2021 return &bpf_cgrp_storage_delete_proto; 2022 case BPF_FUNC_get_current_cgroup_id: 2023 return &bpf_get_current_cgroup_id_proto; 2024 case BPF_FUNC_get_current_ancestor_cgroup_id: 2025 return &bpf_get_current_ancestor_cgroup_id_proto; 2026 #endif 2027 default: 2028 break; 2029 } 2030 2031 if (!bpf_token_capable(prog->aux->token, CAP_PERFMON)) 2032 return NULL; 2033 2034 switch (func_id) { 2035 case BPF_FUNC_trace_printk: 2036 return bpf_get_trace_printk_proto(); 2037 case BPF_FUNC_get_current_task: 2038 return &bpf_get_current_task_proto; 2039 case BPF_FUNC_get_current_task_btf: 2040 return &bpf_get_current_task_btf_proto; 2041 case BPF_FUNC_probe_read_user: 2042 return &bpf_probe_read_user_proto; 2043 case BPF_FUNC_probe_read_kernel: 2044 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 2045 NULL : &bpf_probe_read_kernel_proto; 2046 case BPF_FUNC_probe_read_user_str: 2047 return &bpf_probe_read_user_str_proto; 2048 case BPF_FUNC_probe_read_kernel_str: 2049 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 2050 NULL : &bpf_probe_read_kernel_str_proto; 2051 case BPF_FUNC_snprintf_btf: 2052 return &bpf_snprintf_btf_proto; 2053 case BPF_FUNC_snprintf: 2054 return &bpf_snprintf_proto; 2055 case BPF_FUNC_task_pt_regs: 2056 return &bpf_task_pt_regs_proto; 2057 case BPF_FUNC_trace_vprintk: 2058 return bpf_get_trace_vprintk_proto(); 2059 default: 2060 return NULL; 2061 } 2062 } 2063 EXPORT_SYMBOL_GPL(bpf_base_func_proto); 2064 2065 void bpf_list_head_free(const struct btf_field *field, void *list_head, 2066 struct bpf_spin_lock *spin_lock) 2067 { 2068 struct list_head *head = list_head, *orig_head = list_head; 2069 2070 BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head)); 2071 BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head)); 2072 2073 /* Do the actual list draining outside the lock to not hold the lock for 2074 * too long, and also prevent deadlocks if tracing programs end up 2075 * executing on entry/exit of functions called inside the critical 2076 * section, and end up doing map ops that call bpf_list_head_free for 2077 * the same map value again. 2078 */ 2079 __bpf_spin_lock_irqsave(spin_lock); 2080 if (!head->next || list_empty(head)) 2081 goto unlock; 2082 head = head->next; 2083 unlock: 2084 INIT_LIST_HEAD(orig_head); 2085 __bpf_spin_unlock_irqrestore(spin_lock); 2086 2087 while (head != orig_head) { 2088 void *obj = head; 2089 2090 obj -= field->graph_root.node_offset; 2091 head = head->next; 2092 /* The contained type can also have resources, including a 2093 * bpf_list_head which needs to be freed. 2094 */ 2095 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); 2096 } 2097 } 2098 2099 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are 2100 * 'rb_node *', so field name of rb_node within containing struct is not 2101 * needed. 2102 * 2103 * Since bpf_rb_tree's node type has a corresponding struct btf_field with 2104 * graph_root.node_offset, it's not necessary to know field name 2105 * or type of node struct 2106 */ 2107 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \ 2108 for (pos = rb_first_postorder(root); \ 2109 pos && ({ n = rb_next_postorder(pos); 1; }); \ 2110 pos = n) 2111 2112 void bpf_rb_root_free(const struct btf_field *field, void *rb_root, 2113 struct bpf_spin_lock *spin_lock) 2114 { 2115 struct rb_root_cached orig_root, *root = rb_root; 2116 struct rb_node *pos, *n; 2117 void *obj; 2118 2119 BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root)); 2120 BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root)); 2121 2122 __bpf_spin_lock_irqsave(spin_lock); 2123 orig_root = *root; 2124 *root = RB_ROOT_CACHED; 2125 __bpf_spin_unlock_irqrestore(spin_lock); 2126 2127 bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) { 2128 obj = pos; 2129 obj -= field->graph_root.node_offset; 2130 2131 2132 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); 2133 } 2134 } 2135 2136 __bpf_kfunc_start_defs(); 2137 2138 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) 2139 { 2140 struct btf_struct_meta *meta = meta__ign; 2141 u64 size = local_type_id__k; 2142 void *p; 2143 2144 p = bpf_mem_alloc(&bpf_global_ma, size); 2145 if (!p) 2146 return NULL; 2147 if (meta) 2148 bpf_obj_init(meta->record, p); 2149 return p; 2150 } 2151 2152 __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign) 2153 { 2154 u64 size = local_type_id__k; 2155 2156 /* The verifier has ensured that meta__ign must be NULL */ 2157 return bpf_mem_alloc(&bpf_global_percpu_ma, size); 2158 } 2159 2160 /* Must be called under migrate_disable(), as required by bpf_mem_free */ 2161 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu) 2162 { 2163 struct bpf_mem_alloc *ma; 2164 2165 if (rec && rec->refcount_off >= 0 && 2166 !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) { 2167 /* Object is refcounted and refcount_dec didn't result in 0 2168 * refcount. Return without freeing the object 2169 */ 2170 return; 2171 } 2172 2173 if (rec) 2174 bpf_obj_free_fields(rec, p); 2175 2176 if (percpu) 2177 ma = &bpf_global_percpu_ma; 2178 else 2179 ma = &bpf_global_ma; 2180 bpf_mem_free_rcu(ma, p); 2181 } 2182 2183 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) 2184 { 2185 struct btf_struct_meta *meta = meta__ign; 2186 void *p = p__alloc; 2187 2188 __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false); 2189 } 2190 2191 __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign) 2192 { 2193 /* The verifier has ensured that meta__ign must be NULL */ 2194 bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc); 2195 } 2196 2197 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign) 2198 { 2199 struct btf_struct_meta *meta = meta__ign; 2200 struct bpf_refcount *ref; 2201 2202 /* Could just cast directly to refcount_t *, but need some code using 2203 * bpf_refcount type so that it is emitted in vmlinux BTF 2204 */ 2205 ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off); 2206 if (!refcount_inc_not_zero((refcount_t *)ref)) 2207 return NULL; 2208 2209 /* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null 2210 * in verifier.c 2211 */ 2212 return (void *)p__refcounted_kptr; 2213 } 2214 2215 static int __bpf_list_add(struct bpf_list_node_kern *node, 2216 struct bpf_list_head *head, 2217 bool tail, struct btf_record *rec, u64 off) 2218 { 2219 struct list_head *n = &node->list_head, *h = (void *)head; 2220 2221 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't 2222 * called on its fields, so init here 2223 */ 2224 if (unlikely(!h->next)) 2225 INIT_LIST_HEAD(h); 2226 2227 /* node->owner != NULL implies !list_empty(n), no need to separately 2228 * check the latter 2229 */ 2230 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { 2231 /* Only called from BPF prog, no need to migrate_disable */ 2232 __bpf_obj_drop_impl((void *)n - off, rec, false); 2233 return -EINVAL; 2234 } 2235 2236 tail ? list_add_tail(n, h) : list_add(n, h); 2237 WRITE_ONCE(node->owner, head); 2238 2239 return 0; 2240 } 2241 2242 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head, 2243 struct bpf_list_node *node, 2244 void *meta__ign, u64 off) 2245 { 2246 struct bpf_list_node_kern *n = (void *)node; 2247 struct btf_struct_meta *meta = meta__ign; 2248 2249 return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off); 2250 } 2251 2252 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head, 2253 struct bpf_list_node *node, 2254 void *meta__ign, u64 off) 2255 { 2256 struct bpf_list_node_kern *n = (void *)node; 2257 struct btf_struct_meta *meta = meta__ign; 2258 2259 return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off); 2260 } 2261 2262 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail) 2263 { 2264 struct list_head *n, *h = (void *)head; 2265 struct bpf_list_node_kern *node; 2266 2267 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't 2268 * called on its fields, so init here 2269 */ 2270 if (unlikely(!h->next)) 2271 INIT_LIST_HEAD(h); 2272 if (list_empty(h)) 2273 return NULL; 2274 2275 n = tail ? h->prev : h->next; 2276 node = container_of(n, struct bpf_list_node_kern, list_head); 2277 if (WARN_ON_ONCE(READ_ONCE(node->owner) != head)) 2278 return NULL; 2279 2280 list_del_init(n); 2281 WRITE_ONCE(node->owner, NULL); 2282 return (struct bpf_list_node *)n; 2283 } 2284 2285 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) 2286 { 2287 return __bpf_list_del(head, false); 2288 } 2289 2290 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) 2291 { 2292 return __bpf_list_del(head, true); 2293 } 2294 2295 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, 2296 struct bpf_rb_node *node) 2297 { 2298 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; 2299 struct rb_root_cached *r = (struct rb_root_cached *)root; 2300 struct rb_node *n = &node_internal->rb_node; 2301 2302 /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or 2303 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n) 2304 */ 2305 if (READ_ONCE(node_internal->owner) != root) 2306 return NULL; 2307 2308 rb_erase_cached(n, r); 2309 RB_CLEAR_NODE(n); 2310 WRITE_ONCE(node_internal->owner, NULL); 2311 return (struct bpf_rb_node *)n; 2312 } 2313 2314 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF 2315 * program 2316 */ 2317 static int __bpf_rbtree_add(struct bpf_rb_root *root, 2318 struct bpf_rb_node_kern *node, 2319 void *less, struct btf_record *rec, u64 off) 2320 { 2321 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; 2322 struct rb_node *parent = NULL, *n = &node->rb_node; 2323 bpf_callback_t cb = (bpf_callback_t)less; 2324 bool leftmost = true; 2325 2326 /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately 2327 * check the latter 2328 */ 2329 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { 2330 /* Only called from BPF prog, no need to migrate_disable */ 2331 __bpf_obj_drop_impl((void *)n - off, rec, false); 2332 return -EINVAL; 2333 } 2334 2335 while (*link) { 2336 parent = *link; 2337 if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) { 2338 link = &parent->rb_left; 2339 } else { 2340 link = &parent->rb_right; 2341 leftmost = false; 2342 } 2343 } 2344 2345 rb_link_node(n, parent, link); 2346 rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost); 2347 WRITE_ONCE(node->owner, root); 2348 return 0; 2349 } 2350 2351 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, 2352 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), 2353 void *meta__ign, u64 off) 2354 { 2355 struct btf_struct_meta *meta = meta__ign; 2356 struct bpf_rb_node_kern *n = (void *)node; 2357 2358 return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off); 2359 } 2360 2361 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) 2362 { 2363 struct rb_root_cached *r = (struct rb_root_cached *)root; 2364 2365 return (struct bpf_rb_node *)rb_first_cached(r); 2366 } 2367 2368 /** 2369 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this 2370 * kfunc which is not stored in a map as a kptr, must be released by calling 2371 * bpf_task_release(). 2372 * @p: The task on which a reference is being acquired. 2373 */ 2374 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p) 2375 { 2376 if (refcount_inc_not_zero(&p->rcu_users)) 2377 return p; 2378 return NULL; 2379 } 2380 2381 /** 2382 * bpf_task_release - Release the reference acquired on a task. 2383 * @p: The task on which a reference is being released. 2384 */ 2385 __bpf_kfunc void bpf_task_release(struct task_struct *p) 2386 { 2387 put_task_struct_rcu_user(p); 2388 } 2389 2390 __bpf_kfunc void bpf_task_release_dtor(void *p) 2391 { 2392 put_task_struct_rcu_user(p); 2393 } 2394 CFI_NOSEAL(bpf_task_release_dtor); 2395 2396 #ifdef CONFIG_CGROUPS 2397 /** 2398 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by 2399 * this kfunc which is not stored in a map as a kptr, must be released by 2400 * calling bpf_cgroup_release(). 2401 * @cgrp: The cgroup on which a reference is being acquired. 2402 */ 2403 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) 2404 { 2405 return cgroup_tryget(cgrp) ? cgrp : NULL; 2406 } 2407 2408 /** 2409 * bpf_cgroup_release - Release the reference acquired on a cgroup. 2410 * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to 2411 * not be freed until the current grace period has ended, even if its refcount 2412 * drops to 0. 2413 * @cgrp: The cgroup on which a reference is being released. 2414 */ 2415 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) 2416 { 2417 cgroup_put(cgrp); 2418 } 2419 2420 __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp) 2421 { 2422 cgroup_put(cgrp); 2423 } 2424 CFI_NOSEAL(bpf_cgroup_release_dtor); 2425 2426 /** 2427 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor 2428 * array. A cgroup returned by this kfunc which is not subsequently stored in a 2429 * map, must be released by calling bpf_cgroup_release(). 2430 * @cgrp: The cgroup for which we're performing a lookup. 2431 * @level: The level of ancestor to look up. 2432 */ 2433 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) 2434 { 2435 struct cgroup *ancestor; 2436 2437 if (level > cgrp->level || level < 0) 2438 return NULL; 2439 2440 /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */ 2441 ancestor = cgrp->ancestors[level]; 2442 if (!cgroup_tryget(ancestor)) 2443 return NULL; 2444 return ancestor; 2445 } 2446 2447 /** 2448 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this 2449 * kfunc which is not subsequently stored in a map, must be released by calling 2450 * bpf_cgroup_release(). 2451 * @cgid: cgroup id. 2452 */ 2453 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid) 2454 { 2455 struct cgroup *cgrp; 2456 2457 cgrp = cgroup_get_from_id(cgid); 2458 if (IS_ERR(cgrp)) 2459 return NULL; 2460 return cgrp; 2461 } 2462 2463 /** 2464 * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test 2465 * task's membership of cgroup ancestry. 2466 * @task: the task to be tested 2467 * @ancestor: possible ancestor of @task's cgroup 2468 * 2469 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. 2470 * It follows all the same rules as cgroup_is_descendant, and only applies 2471 * to the default hierarchy. 2472 */ 2473 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task, 2474 struct cgroup *ancestor) 2475 { 2476 long ret; 2477 2478 rcu_read_lock(); 2479 ret = task_under_cgroup_hierarchy(task, ancestor); 2480 rcu_read_unlock(); 2481 return ret; 2482 } 2483 2484 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) 2485 { 2486 struct bpf_array *array = container_of(map, struct bpf_array, map); 2487 struct cgroup *cgrp; 2488 2489 if (unlikely(idx >= array->map.max_entries)) 2490 return -E2BIG; 2491 2492 cgrp = READ_ONCE(array->ptrs[idx]); 2493 if (unlikely(!cgrp)) 2494 return -EAGAIN; 2495 2496 return task_under_cgroup_hierarchy(current, cgrp); 2497 } 2498 2499 const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { 2500 .func = bpf_current_task_under_cgroup, 2501 .gpl_only = false, 2502 .ret_type = RET_INTEGER, 2503 .arg1_type = ARG_CONST_MAP_PTR, 2504 .arg2_type = ARG_ANYTHING, 2505 }; 2506 2507 /** 2508 * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a 2509 * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its 2510 * hierarchy ID. 2511 * @task: The target task 2512 * @hierarchy_id: The ID of a cgroup1 hierarchy 2513 * 2514 * On success, the cgroup is returen. On failure, NULL is returned. 2515 */ 2516 __bpf_kfunc struct cgroup * 2517 bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) 2518 { 2519 struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id); 2520 2521 if (IS_ERR(cgrp)) 2522 return NULL; 2523 return cgrp; 2524 } 2525 #endif /* CONFIG_CGROUPS */ 2526 2527 /** 2528 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up 2529 * in the root pid namespace idr. If a task is returned, it must either be 2530 * stored in a map, or released with bpf_task_release(). 2531 * @pid: The pid of the task being looked up. 2532 */ 2533 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) 2534 { 2535 struct task_struct *p; 2536 2537 rcu_read_lock(); 2538 p = find_task_by_pid_ns(pid, &init_pid_ns); 2539 if (p) 2540 p = bpf_task_acquire(p); 2541 rcu_read_unlock(); 2542 2543 return p; 2544 } 2545 2546 /** 2547 * bpf_task_from_vpid - Find a struct task_struct from its vpid by looking it up 2548 * in the pid namespace of the current task. If a task is returned, it must 2549 * either be stored in a map, or released with bpf_task_release(). 2550 * @vpid: The vpid of the task being looked up. 2551 */ 2552 __bpf_kfunc struct task_struct *bpf_task_from_vpid(s32 vpid) 2553 { 2554 struct task_struct *p; 2555 2556 rcu_read_lock(); 2557 p = find_task_by_vpid(vpid); 2558 if (p) 2559 p = bpf_task_acquire(p); 2560 rcu_read_unlock(); 2561 2562 return p; 2563 } 2564 2565 /** 2566 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data. 2567 * @p: The dynptr whose data slice to retrieve 2568 * @offset: Offset into the dynptr 2569 * @buffer__opt: User-provided buffer to copy contents into. May be NULL 2570 * @buffer__szk: Size (in bytes) of the buffer if present. This is the 2571 * length of the requested slice. This must be a constant. 2572 * 2573 * For non-skb and non-xdp type dynptrs, there is no difference between 2574 * bpf_dynptr_slice and bpf_dynptr_data. 2575 * 2576 * If buffer__opt is NULL, the call will fail if buffer_opt was needed. 2577 * 2578 * If the intention is to write to the data slice, please use 2579 * bpf_dynptr_slice_rdwr. 2580 * 2581 * The user must check that the returned pointer is not null before using it. 2582 * 2583 * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice 2584 * does not change the underlying packet data pointers, so a call to 2585 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in 2586 * the bpf program. 2587 * 2588 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only 2589 * data slice (can be either direct pointer to the data or a pointer to the user 2590 * provided buffer, with its contents containing the data, if unable to obtain 2591 * direct pointer) 2592 */ 2593 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset, 2594 void *buffer__opt, u32 buffer__szk) 2595 { 2596 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2597 enum bpf_dynptr_type type; 2598 u32 len = buffer__szk; 2599 int err; 2600 2601 if (!ptr->data) 2602 return NULL; 2603 2604 err = bpf_dynptr_check_off_len(ptr, offset, len); 2605 if (err) 2606 return NULL; 2607 2608 type = bpf_dynptr_get_type(ptr); 2609 2610 switch (type) { 2611 case BPF_DYNPTR_TYPE_LOCAL: 2612 case BPF_DYNPTR_TYPE_RINGBUF: 2613 return ptr->data + ptr->offset + offset; 2614 case BPF_DYNPTR_TYPE_SKB: 2615 if (buffer__opt) 2616 return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt); 2617 else 2618 return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len); 2619 case BPF_DYNPTR_TYPE_XDP: 2620 { 2621 void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len); 2622 if (!IS_ERR_OR_NULL(xdp_ptr)) 2623 return xdp_ptr; 2624 2625 if (!buffer__opt) 2626 return NULL; 2627 bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false); 2628 return buffer__opt; 2629 } 2630 default: 2631 WARN_ONCE(true, "unknown dynptr type %d\n", type); 2632 return NULL; 2633 } 2634 } 2635 2636 /** 2637 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data. 2638 * @p: The dynptr whose data slice to retrieve 2639 * @offset: Offset into the dynptr 2640 * @buffer__opt: User-provided buffer to copy contents into. May be NULL 2641 * @buffer__szk: Size (in bytes) of the buffer if present. This is the 2642 * length of the requested slice. This must be a constant. 2643 * 2644 * For non-skb and non-xdp type dynptrs, there is no difference between 2645 * bpf_dynptr_slice and bpf_dynptr_data. 2646 * 2647 * If buffer__opt is NULL, the call will fail if buffer_opt was needed. 2648 * 2649 * The returned pointer is writable and may point to either directly the dynptr 2650 * data at the requested offset or to the buffer if unable to obtain a direct 2651 * data pointer to (example: the requested slice is to the paged area of an skb 2652 * packet). In the case where the returned pointer is to the buffer, the user 2653 * is responsible for persisting writes through calling bpf_dynptr_write(). This 2654 * usually looks something like this pattern: 2655 * 2656 * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer)); 2657 * if (!eth) 2658 * return TC_ACT_SHOT; 2659 * 2660 * // mutate eth header // 2661 * 2662 * if (eth == buffer) 2663 * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0); 2664 * 2665 * Please note that, as in the example above, the user must check that the 2666 * returned pointer is not null before using it. 2667 * 2668 * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr 2669 * does not change the underlying packet data pointers, so a call to 2670 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in 2671 * the bpf program. 2672 * 2673 * Return: NULL if the call failed (eg invalid dynptr), pointer to a 2674 * data slice (can be either direct pointer to the data or a pointer to the user 2675 * provided buffer, with its contents containing the data, if unable to obtain 2676 * direct pointer) 2677 */ 2678 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset, 2679 void *buffer__opt, u32 buffer__szk) 2680 { 2681 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2682 2683 if (!ptr->data || __bpf_dynptr_is_rdonly(ptr)) 2684 return NULL; 2685 2686 /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice. 2687 * 2688 * For skb-type dynptrs, it is safe to write into the returned pointer 2689 * if the bpf program allows skb data writes. There are two possibilities 2690 * that may occur when calling bpf_dynptr_slice_rdwr: 2691 * 2692 * 1) The requested slice is in the head of the skb. In this case, the 2693 * returned pointer is directly to skb data, and if the skb is cloned, the 2694 * verifier will have uncloned it (see bpf_unclone_prologue()) already. 2695 * The pointer can be directly written into. 2696 * 2697 * 2) Some portion of the requested slice is in the paged buffer area. 2698 * In this case, the requested data will be copied out into the buffer 2699 * and the returned pointer will be a pointer to the buffer. The skb 2700 * will not be pulled. To persist the write, the user will need to call 2701 * bpf_dynptr_write(), which will pull the skb and commit the write. 2702 * 2703 * Similarly for xdp programs, if the requested slice is not across xdp 2704 * fragments, then a direct pointer will be returned, otherwise the data 2705 * will be copied out into the buffer and the user will need to call 2706 * bpf_dynptr_write() to commit changes. 2707 */ 2708 return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk); 2709 } 2710 2711 __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end) 2712 { 2713 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2714 u32 size; 2715 2716 if (!ptr->data || start > end) 2717 return -EINVAL; 2718 2719 size = __bpf_dynptr_size(ptr); 2720 2721 if (start > size || end > size) 2722 return -ERANGE; 2723 2724 ptr->offset += start; 2725 bpf_dynptr_set_size(ptr, end - start); 2726 2727 return 0; 2728 } 2729 2730 __bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p) 2731 { 2732 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2733 2734 return !ptr->data; 2735 } 2736 2737 __bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p) 2738 { 2739 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2740 2741 if (!ptr->data) 2742 return false; 2743 2744 return __bpf_dynptr_is_rdonly(ptr); 2745 } 2746 2747 __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p) 2748 { 2749 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2750 2751 if (!ptr->data) 2752 return -EINVAL; 2753 2754 return __bpf_dynptr_size(ptr); 2755 } 2756 2757 __bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p, 2758 struct bpf_dynptr *clone__uninit) 2759 { 2760 struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit; 2761 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2762 2763 if (!ptr->data) { 2764 bpf_dynptr_set_null(clone); 2765 return -EINVAL; 2766 } 2767 2768 *clone = *ptr; 2769 2770 return 0; 2771 } 2772 2773 /** 2774 * bpf_dynptr_copy() - Copy data from one dynptr to another. 2775 * @dst_ptr: Destination dynptr - where data should be copied to 2776 * @dst_off: Offset into the destination dynptr 2777 * @src_ptr: Source dynptr - where data should be copied from 2778 * @src_off: Offset into the source dynptr 2779 * @size: Length of the data to copy from source to destination 2780 * 2781 * Copies data from source dynptr to destination dynptr. 2782 * Returns 0 on success; negative error, otherwise. 2783 */ 2784 __bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u32 dst_off, 2785 struct bpf_dynptr *src_ptr, u32 src_off, u32 size) 2786 { 2787 struct bpf_dynptr_kern *dst = (struct bpf_dynptr_kern *)dst_ptr; 2788 struct bpf_dynptr_kern *src = (struct bpf_dynptr_kern *)src_ptr; 2789 void *src_slice, *dst_slice; 2790 char buf[256]; 2791 u32 off; 2792 2793 src_slice = bpf_dynptr_slice(src_ptr, src_off, NULL, size); 2794 dst_slice = bpf_dynptr_slice_rdwr(dst_ptr, dst_off, NULL, size); 2795 2796 if (src_slice && dst_slice) { 2797 memmove(dst_slice, src_slice, size); 2798 return 0; 2799 } 2800 2801 if (src_slice) 2802 return __bpf_dynptr_write(dst, dst_off, src_slice, size, 0); 2803 2804 if (dst_slice) 2805 return __bpf_dynptr_read(dst_slice, size, src, src_off, 0); 2806 2807 if (bpf_dynptr_check_off_len(dst, dst_off, size) || 2808 bpf_dynptr_check_off_len(src, src_off, size)) 2809 return -E2BIG; 2810 2811 off = 0; 2812 while (off < size) { 2813 u32 chunk_sz = min_t(u32, sizeof(buf), size - off); 2814 int err; 2815 2816 err = __bpf_dynptr_read(buf, chunk_sz, src, src_off + off, 0); 2817 if (err) 2818 return err; 2819 err = __bpf_dynptr_write(dst, dst_off + off, buf, chunk_sz, 0); 2820 if (err) 2821 return err; 2822 2823 off += chunk_sz; 2824 } 2825 return 0; 2826 } 2827 2828 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) 2829 { 2830 return obj; 2831 } 2832 2833 __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k) 2834 { 2835 return (void *)obj__ign; 2836 } 2837 2838 __bpf_kfunc void bpf_rcu_read_lock(void) 2839 { 2840 rcu_read_lock(); 2841 } 2842 2843 __bpf_kfunc void bpf_rcu_read_unlock(void) 2844 { 2845 rcu_read_unlock(); 2846 } 2847 2848 struct bpf_throw_ctx { 2849 struct bpf_prog_aux *aux; 2850 u64 sp; 2851 u64 bp; 2852 int cnt; 2853 }; 2854 2855 static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp) 2856 { 2857 struct bpf_throw_ctx *ctx = cookie; 2858 struct bpf_prog *prog; 2859 2860 if (!is_bpf_text_address(ip)) 2861 return !ctx->cnt; 2862 prog = bpf_prog_ksym_find(ip); 2863 ctx->cnt++; 2864 if (bpf_is_subprog(prog)) 2865 return true; 2866 ctx->aux = prog->aux; 2867 ctx->sp = sp; 2868 ctx->bp = bp; 2869 return false; 2870 } 2871 2872 __bpf_kfunc void bpf_throw(u64 cookie) 2873 { 2874 struct bpf_throw_ctx ctx = {}; 2875 2876 arch_bpf_stack_walk(bpf_stack_walker, &ctx); 2877 WARN_ON_ONCE(!ctx.aux); 2878 if (ctx.aux) 2879 WARN_ON_ONCE(!ctx.aux->exception_boundary); 2880 WARN_ON_ONCE(!ctx.bp); 2881 WARN_ON_ONCE(!ctx.cnt); 2882 /* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning 2883 * deeper stack depths than ctx.sp as we do not return from bpf_throw, 2884 * which skips compiler generated instrumentation to do the same. 2885 */ 2886 kasan_unpoison_task_stack_below((void *)(long)ctx.sp); 2887 ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0); 2888 WARN(1, "A call to BPF exception callback should never return\n"); 2889 } 2890 2891 __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) 2892 { 2893 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 2894 struct bpf_map *map = p__map; 2895 2896 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq)); 2897 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq)); 2898 2899 if (flags) 2900 return -EINVAL; 2901 2902 return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ); 2903 } 2904 2905 __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) 2906 { 2907 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 2908 struct bpf_work *w; 2909 2910 if (in_nmi()) 2911 return -EOPNOTSUPP; 2912 if (flags) 2913 return -EINVAL; 2914 w = READ_ONCE(async->work); 2915 if (!w || !READ_ONCE(w->cb.prog)) 2916 return -EINVAL; 2917 2918 schedule_work(&w->work); 2919 return 0; 2920 } 2921 2922 __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, 2923 int (callback_fn)(void *map, int *key, void *value), 2924 unsigned int flags, 2925 void *aux__ign) 2926 { 2927 struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__ign; 2928 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 2929 2930 if (flags) 2931 return -EINVAL; 2932 2933 return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ); 2934 } 2935 2936 __bpf_kfunc void bpf_preempt_disable(void) 2937 { 2938 preempt_disable(); 2939 } 2940 2941 __bpf_kfunc void bpf_preempt_enable(void) 2942 { 2943 preempt_enable(); 2944 } 2945 2946 struct bpf_iter_bits { 2947 __u64 __opaque[2]; 2948 } __aligned(8); 2949 2950 #define BITS_ITER_NR_WORDS_MAX 511 2951 2952 struct bpf_iter_bits_kern { 2953 union { 2954 __u64 *bits; 2955 __u64 bits_copy; 2956 }; 2957 int nr_bits; 2958 int bit; 2959 } __aligned(8); 2960 2961 /* On 64-bit hosts, unsigned long and u64 have the same size, so passing 2962 * a u64 pointer and an unsigned long pointer to find_next_bit() will 2963 * return the same result, as both point to the same 8-byte area. 2964 * 2965 * For 32-bit little-endian hosts, using a u64 pointer or unsigned long 2966 * pointer also makes no difference. This is because the first iterated 2967 * unsigned long is composed of bits 0-31 of the u64 and the second unsigned 2968 * long is composed of bits 32-63 of the u64. 2969 * 2970 * However, for 32-bit big-endian hosts, this is not the case. The first 2971 * iterated unsigned long will be bits 32-63 of the u64, so swap these two 2972 * ulong values within the u64. 2973 */ 2974 static void swap_ulong_in_u64(u64 *bits, unsigned int nr) 2975 { 2976 #if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN) 2977 unsigned int i; 2978 2979 for (i = 0; i < nr; i++) 2980 bits[i] = (bits[i] >> 32) | ((u64)(u32)bits[i] << 32); 2981 #endif 2982 } 2983 2984 /** 2985 * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area 2986 * @it: The new bpf_iter_bits to be created 2987 * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over 2988 * @nr_words: The size of the specified memory area, measured in 8-byte units. 2989 * The maximum value of @nr_words is @BITS_ITER_NR_WORDS_MAX. This limit may be 2990 * further reduced by the BPF memory allocator implementation. 2991 * 2992 * This function initializes a new bpf_iter_bits structure for iterating over 2993 * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It 2994 * copies the data of the memory area to the newly created bpf_iter_bits @it for 2995 * subsequent iteration operations. 2996 * 2997 * On success, 0 is returned. On failure, ERR is returned. 2998 */ 2999 __bpf_kfunc int 3000 bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) 3001 { 3002 struct bpf_iter_bits_kern *kit = (void *)it; 3003 u32 nr_bytes = nr_words * sizeof(u64); 3004 u32 nr_bits = BYTES_TO_BITS(nr_bytes); 3005 int err; 3006 3007 BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits)); 3008 BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) != 3009 __alignof__(struct bpf_iter_bits)); 3010 3011 kit->nr_bits = 0; 3012 kit->bits_copy = 0; 3013 kit->bit = -1; 3014 3015 if (!unsafe_ptr__ign || !nr_words) 3016 return -EINVAL; 3017 if (nr_words > BITS_ITER_NR_WORDS_MAX) 3018 return -E2BIG; 3019 3020 /* Optimization for u64 mask */ 3021 if (nr_bits == 64) { 3022 err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign); 3023 if (err) 3024 return -EFAULT; 3025 3026 swap_ulong_in_u64(&kit->bits_copy, nr_words); 3027 3028 kit->nr_bits = nr_bits; 3029 return 0; 3030 } 3031 3032 if (bpf_mem_alloc_check_size(false, nr_bytes)) 3033 return -E2BIG; 3034 3035 /* Fallback to memalloc */ 3036 kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes); 3037 if (!kit->bits) 3038 return -ENOMEM; 3039 3040 err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign); 3041 if (err) { 3042 bpf_mem_free(&bpf_global_ma, kit->bits); 3043 return err; 3044 } 3045 3046 swap_ulong_in_u64(kit->bits, nr_words); 3047 3048 kit->nr_bits = nr_bits; 3049 return 0; 3050 } 3051 3052 /** 3053 * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits 3054 * @it: The bpf_iter_bits to be checked 3055 * 3056 * This function returns a pointer to a number representing the value of the 3057 * next bit in the bits. 3058 * 3059 * If there are no further bits available, it returns NULL. 3060 */ 3061 __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it) 3062 { 3063 struct bpf_iter_bits_kern *kit = (void *)it; 3064 int bit = kit->bit, nr_bits = kit->nr_bits; 3065 const void *bits; 3066 3067 if (!nr_bits || bit >= nr_bits) 3068 return NULL; 3069 3070 bits = nr_bits == 64 ? &kit->bits_copy : kit->bits; 3071 bit = find_next_bit(bits, nr_bits, bit + 1); 3072 if (bit >= nr_bits) { 3073 kit->bit = bit; 3074 return NULL; 3075 } 3076 3077 kit->bit = bit; 3078 return &kit->bit; 3079 } 3080 3081 /** 3082 * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits 3083 * @it: The bpf_iter_bits to be destroyed 3084 * 3085 * Destroy the resource associated with the bpf_iter_bits. 3086 */ 3087 __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it) 3088 { 3089 struct bpf_iter_bits_kern *kit = (void *)it; 3090 3091 if (kit->nr_bits <= 64) 3092 return; 3093 bpf_mem_free(&bpf_global_ma, kit->bits); 3094 } 3095 3096 /** 3097 * bpf_copy_from_user_str() - Copy a string from an unsafe user address 3098 * @dst: Destination address, in kernel space. This buffer must be 3099 * at least @dst__sz bytes long. 3100 * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL. 3101 * @unsafe_ptr__ign: Source address, in user space. 3102 * @flags: The only supported flag is BPF_F_PAD_ZEROS 3103 * 3104 * Copies a NUL-terminated string from userspace to BPF space. If user string is 3105 * too long this will still ensure zero termination in the dst buffer unless 3106 * buffer size is 0. 3107 * 3108 * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and 3109 * memset all of @dst on failure. 3110 */ 3111 __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags) 3112 { 3113 int ret; 3114 3115 if (unlikely(flags & ~BPF_F_PAD_ZEROS)) 3116 return -EINVAL; 3117 3118 if (unlikely(!dst__sz)) 3119 return 0; 3120 3121 ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1); 3122 if (ret < 0) { 3123 if (flags & BPF_F_PAD_ZEROS) 3124 memset((char *)dst, 0, dst__sz); 3125 3126 return ret; 3127 } 3128 3129 if (flags & BPF_F_PAD_ZEROS) 3130 memset((char *)dst + ret, 0, dst__sz - ret); 3131 else 3132 ((char *)dst)[ret] = '\0'; 3133 3134 return ret + 1; 3135 } 3136 3137 /** 3138 * bpf_copy_from_user_task_str() - Copy a string from an task's address space 3139 * @dst: Destination address, in kernel space. This buffer must be 3140 * at least @dst__sz bytes long. 3141 * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL. 3142 * @unsafe_ptr__ign: Source address in the task's address space. 3143 * @tsk: The task whose address space will be used 3144 * @flags: The only supported flag is BPF_F_PAD_ZEROS 3145 * 3146 * Copies a NUL terminated string from a task's address space to @dst__sz 3147 * buffer. If user string is too long this will still ensure zero termination 3148 * in the @dst__sz buffer unless buffer size is 0. 3149 * 3150 * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst__sz to 0 on success 3151 * and memset all of @dst__sz on failure. 3152 * 3153 * Return: The number of copied bytes on success including the NUL terminator. 3154 * A negative error code on failure. 3155 */ 3156 __bpf_kfunc int bpf_copy_from_user_task_str(void *dst, u32 dst__sz, 3157 const void __user *unsafe_ptr__ign, 3158 struct task_struct *tsk, u64 flags) 3159 { 3160 int ret; 3161 3162 if (unlikely(flags & ~BPF_F_PAD_ZEROS)) 3163 return -EINVAL; 3164 3165 if (unlikely(dst__sz == 0)) 3166 return 0; 3167 3168 ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_ptr__ign, dst, dst__sz, 0); 3169 if (ret < 0) { 3170 if (flags & BPF_F_PAD_ZEROS) 3171 memset(dst, 0, dst__sz); 3172 return ret; 3173 } 3174 3175 if (flags & BPF_F_PAD_ZEROS) 3176 memset(dst + ret, 0, dst__sz - ret); 3177 3178 return ret + 1; 3179 } 3180 3181 /* Keep unsinged long in prototype so that kfunc is usable when emitted to 3182 * vmlinux.h in BPF programs directly, but note that while in BPF prog, the 3183 * unsigned long always points to 8-byte region on stack, the kernel may only 3184 * read and write the 4-bytes on 32-bit. 3185 */ 3186 __bpf_kfunc void bpf_local_irq_save(unsigned long *flags__irq_flag) 3187 { 3188 local_irq_save(*flags__irq_flag); 3189 } 3190 3191 __bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag) 3192 { 3193 local_irq_restore(*flags__irq_flag); 3194 } 3195 3196 __bpf_kfunc_end_defs(); 3197 3198 BTF_KFUNCS_START(generic_btf_ids) 3199 #ifdef CONFIG_CRASH_DUMP 3200 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) 3201 #endif 3202 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) 3203 BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) 3204 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE) 3205 BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE) 3206 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU) 3207 BTF_ID_FLAGS(func, bpf_list_push_front_impl) 3208 BTF_ID_FLAGS(func, bpf_list_push_back_impl) 3209 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) 3210 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) 3211 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3212 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) 3213 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) 3214 BTF_ID_FLAGS(func, bpf_rbtree_add_impl) 3215 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) 3216 3217 #ifdef CONFIG_CGROUPS 3218 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3219 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE) 3220 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3221 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL) 3222 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU) 3223 BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3224 #endif 3225 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL) 3226 BTF_ID_FLAGS(func, bpf_task_from_vpid, KF_ACQUIRE | KF_RET_NULL) 3227 BTF_ID_FLAGS(func, bpf_throw) 3228 #ifdef CONFIG_BPF_EVENTS 3229 BTF_ID_FLAGS(func, bpf_send_signal_task, KF_TRUSTED_ARGS) 3230 #endif 3231 BTF_KFUNCS_END(generic_btf_ids) 3232 3233 static const struct btf_kfunc_id_set generic_kfunc_set = { 3234 .owner = THIS_MODULE, 3235 .set = &generic_btf_ids, 3236 }; 3237 3238 3239 BTF_ID_LIST(generic_dtor_ids) 3240 BTF_ID(struct, task_struct) 3241 BTF_ID(func, bpf_task_release_dtor) 3242 #ifdef CONFIG_CGROUPS 3243 BTF_ID(struct, cgroup) 3244 BTF_ID(func, bpf_cgroup_release_dtor) 3245 #endif 3246 3247 BTF_KFUNCS_START(common_btf_ids) 3248 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx, KF_FASTCALL) 3249 BTF_ID_FLAGS(func, bpf_rdonly_cast, KF_FASTCALL) 3250 BTF_ID_FLAGS(func, bpf_rcu_read_lock) 3251 BTF_ID_FLAGS(func, bpf_rcu_read_unlock) 3252 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL) 3253 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL) 3254 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW) 3255 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL) 3256 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY) 3257 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU) 3258 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL) 3259 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY) 3260 #ifdef CONFIG_CGROUPS 3261 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) 3262 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL) 3263 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY) 3264 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) 3265 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL) 3266 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY) 3267 #endif 3268 BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) 3269 BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) 3270 BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) 3271 BTF_ID_FLAGS(func, bpf_dynptr_adjust) 3272 BTF_ID_FLAGS(func, bpf_dynptr_is_null) 3273 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) 3274 BTF_ID_FLAGS(func, bpf_dynptr_size) 3275 BTF_ID_FLAGS(func, bpf_dynptr_clone) 3276 BTF_ID_FLAGS(func, bpf_dynptr_copy) 3277 #ifdef CONFIG_NET 3278 BTF_ID_FLAGS(func, bpf_modify_return_test_tp) 3279 #endif 3280 BTF_ID_FLAGS(func, bpf_wq_init) 3281 BTF_ID_FLAGS(func, bpf_wq_set_callback_impl) 3282 BTF_ID_FLAGS(func, bpf_wq_start) 3283 BTF_ID_FLAGS(func, bpf_preempt_disable) 3284 BTF_ID_FLAGS(func, bpf_preempt_enable) 3285 BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW) 3286 BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL) 3287 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY) 3288 BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE) 3289 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str, KF_SLEEPABLE) 3290 BTF_ID_FLAGS(func, bpf_get_kmem_cache) 3291 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE) 3292 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) 3293 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) 3294 BTF_ID_FLAGS(func, bpf_local_irq_save) 3295 BTF_ID_FLAGS(func, bpf_local_irq_restore) 3296 BTF_KFUNCS_END(common_btf_ids) 3297 3298 static const struct btf_kfunc_id_set common_kfunc_set = { 3299 .owner = THIS_MODULE, 3300 .set = &common_btf_ids, 3301 }; 3302 3303 static int __init kfunc_init(void) 3304 { 3305 int ret; 3306 const struct btf_id_dtor_kfunc generic_dtors[] = { 3307 { 3308 .btf_id = generic_dtor_ids[0], 3309 .kfunc_btf_id = generic_dtor_ids[1] 3310 }, 3311 #ifdef CONFIG_CGROUPS 3312 { 3313 .btf_id = generic_dtor_ids[2], 3314 .kfunc_btf_id = generic_dtor_ids[3] 3315 }, 3316 #endif 3317 }; 3318 3319 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set); 3320 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set); 3321 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set); 3322 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set); 3323 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set); 3324 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set); 3325 ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors, 3326 ARRAY_SIZE(generic_dtors), 3327 THIS_MODULE); 3328 return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set); 3329 } 3330 3331 late_initcall(kfunc_init); 3332 3333 /* Get a pointer to dynptr data up to len bytes for read only access. If 3334 * the dynptr doesn't have continuous data up to len bytes, return NULL. 3335 */ 3336 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len) 3337 { 3338 const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr; 3339 3340 return bpf_dynptr_slice(p, 0, NULL, len); 3341 } 3342 3343 /* Get a pointer to dynptr data up to len bytes for read write access. If 3344 * the dynptr doesn't have continuous data up to len bytes, or the dynptr 3345 * is read only, return NULL. 3346 */ 3347 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len) 3348 { 3349 if (__bpf_dynptr_is_rdonly(ptr)) 3350 return NULL; 3351 return (void *)__bpf_dynptr_data(ptr, len); 3352 } 3353