1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/btf.h> 6 #include <linux/bpf-cgroup.h> 7 #include <linux/cgroup.h> 8 #include <linux/rcupdate.h> 9 #include <linux/random.h> 10 #include <linux/smp.h> 11 #include <linux/topology.h> 12 #include <linux/ktime.h> 13 #include <linux/sched.h> 14 #include <linux/uidgid.h> 15 #include <linux/filter.h> 16 #include <linux/ctype.h> 17 #include <linux/jiffies.h> 18 #include <linux/pid_namespace.h> 19 #include <linux/poison.h> 20 #include <linux/proc_ns.h> 21 #include <linux/sched/task.h> 22 #include <linux/security.h> 23 #include <linux/btf_ids.h> 24 #include <linux/bpf_mem_alloc.h> 25 #include <linux/kasan.h> 26 #include <linux/bpf_verifier.h> 27 #include <linux/uaccess.h> 28 29 #include "../../lib/kstrtox.h" 30 31 /* If kernel subsystem is allowing eBPF programs to call this function, 32 * inside its own verifier_ops->get_func_proto() callback it should return 33 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments 34 * 35 * Different map implementations will rely on rcu in map methods 36 * lookup/update/delete, therefore eBPF programs must run under rcu lock 37 * if program is allowed to access maps, so check rcu_read_lock_held() or 38 * rcu_read_lock_trace_held() in all three functions. 39 */ 40 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) 41 { 42 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 43 !rcu_read_lock_bh_held()); 44 return (unsigned long) map->ops->map_lookup_elem(map, key); 45 } 46 47 const struct bpf_func_proto bpf_map_lookup_elem_proto = { 48 .func = bpf_map_lookup_elem, 49 .gpl_only = false, 50 .pkt_access = true, 51 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 52 .arg1_type = ARG_CONST_MAP_PTR, 53 .arg2_type = ARG_PTR_TO_MAP_KEY, 54 }; 55 56 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, 57 void *, value, u64, flags) 58 { 59 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 60 !rcu_read_lock_bh_held()); 61 return map->ops->map_update_elem(map, key, value, flags); 62 } 63 64 const struct bpf_func_proto bpf_map_update_elem_proto = { 65 .func = bpf_map_update_elem, 66 .gpl_only = false, 67 .pkt_access = true, 68 .ret_type = RET_INTEGER, 69 .arg1_type = ARG_CONST_MAP_PTR, 70 .arg2_type = ARG_PTR_TO_MAP_KEY, 71 .arg3_type = ARG_PTR_TO_MAP_VALUE, 72 .arg4_type = ARG_ANYTHING, 73 }; 74 75 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) 76 { 77 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 78 !rcu_read_lock_bh_held()); 79 return map->ops->map_delete_elem(map, key); 80 } 81 82 const struct bpf_func_proto bpf_map_delete_elem_proto = { 83 .func = bpf_map_delete_elem, 84 .gpl_only = false, 85 .pkt_access = true, 86 .ret_type = RET_INTEGER, 87 .arg1_type = ARG_CONST_MAP_PTR, 88 .arg2_type = ARG_PTR_TO_MAP_KEY, 89 }; 90 91 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) 92 { 93 return map->ops->map_push_elem(map, value, flags); 94 } 95 96 const struct bpf_func_proto bpf_map_push_elem_proto = { 97 .func = bpf_map_push_elem, 98 .gpl_only = false, 99 .pkt_access = true, 100 .ret_type = RET_INTEGER, 101 .arg1_type = ARG_CONST_MAP_PTR, 102 .arg2_type = ARG_PTR_TO_MAP_VALUE, 103 .arg3_type = ARG_ANYTHING, 104 }; 105 106 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) 107 { 108 return map->ops->map_pop_elem(map, value); 109 } 110 111 const struct bpf_func_proto bpf_map_pop_elem_proto = { 112 .func = bpf_map_pop_elem, 113 .gpl_only = false, 114 .ret_type = RET_INTEGER, 115 .arg1_type = ARG_CONST_MAP_PTR, 116 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE, 117 }; 118 119 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) 120 { 121 return map->ops->map_peek_elem(map, value); 122 } 123 124 const struct bpf_func_proto bpf_map_peek_elem_proto = { 125 .func = bpf_map_peek_elem, 126 .gpl_only = false, 127 .ret_type = RET_INTEGER, 128 .arg1_type = ARG_CONST_MAP_PTR, 129 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE, 130 }; 131 132 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) 133 { 134 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 135 !rcu_read_lock_bh_held()); 136 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); 137 } 138 139 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = { 140 .func = bpf_map_lookup_percpu_elem, 141 .gpl_only = false, 142 .pkt_access = true, 143 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 144 .arg1_type = ARG_CONST_MAP_PTR, 145 .arg2_type = ARG_PTR_TO_MAP_KEY, 146 .arg3_type = ARG_ANYTHING, 147 }; 148 149 const struct bpf_func_proto bpf_get_prandom_u32_proto = { 150 .func = bpf_user_rnd_u32, 151 .gpl_only = false, 152 .ret_type = RET_INTEGER, 153 }; 154 155 BPF_CALL_0(bpf_get_smp_processor_id) 156 { 157 return smp_processor_id(); 158 } 159 160 const struct bpf_func_proto bpf_get_smp_processor_id_proto = { 161 .func = bpf_get_smp_processor_id, 162 .gpl_only = false, 163 .ret_type = RET_INTEGER, 164 .allow_fastcall = true, 165 }; 166 167 BPF_CALL_0(bpf_get_numa_node_id) 168 { 169 return numa_node_id(); 170 } 171 172 const struct bpf_func_proto bpf_get_numa_node_id_proto = { 173 .func = bpf_get_numa_node_id, 174 .gpl_only = false, 175 .ret_type = RET_INTEGER, 176 }; 177 178 BPF_CALL_0(bpf_ktime_get_ns) 179 { 180 /* NMI safe access to clock monotonic */ 181 return ktime_get_mono_fast_ns(); 182 } 183 184 const struct bpf_func_proto bpf_ktime_get_ns_proto = { 185 .func = bpf_ktime_get_ns, 186 .gpl_only = false, 187 .ret_type = RET_INTEGER, 188 }; 189 190 BPF_CALL_0(bpf_ktime_get_boot_ns) 191 { 192 /* NMI safe access to clock boottime */ 193 return ktime_get_boot_fast_ns(); 194 } 195 196 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = { 197 .func = bpf_ktime_get_boot_ns, 198 .gpl_only = false, 199 .ret_type = RET_INTEGER, 200 }; 201 202 BPF_CALL_0(bpf_ktime_get_coarse_ns) 203 { 204 return ktime_get_coarse_ns(); 205 } 206 207 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = { 208 .func = bpf_ktime_get_coarse_ns, 209 .gpl_only = false, 210 .ret_type = RET_INTEGER, 211 }; 212 213 BPF_CALL_0(bpf_ktime_get_tai_ns) 214 { 215 /* NMI safe access to clock tai */ 216 return ktime_get_tai_fast_ns(); 217 } 218 219 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = { 220 .func = bpf_ktime_get_tai_ns, 221 .gpl_only = false, 222 .ret_type = RET_INTEGER, 223 }; 224 225 BPF_CALL_0(bpf_get_current_pid_tgid) 226 { 227 struct task_struct *task = current; 228 229 if (unlikely(!task)) 230 return -EINVAL; 231 232 return (u64) task->tgid << 32 | task->pid; 233 } 234 235 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { 236 .func = bpf_get_current_pid_tgid, 237 .gpl_only = false, 238 .ret_type = RET_INTEGER, 239 }; 240 241 BPF_CALL_0(bpf_get_current_uid_gid) 242 { 243 struct task_struct *task = current; 244 kuid_t uid; 245 kgid_t gid; 246 247 if (unlikely(!task)) 248 return -EINVAL; 249 250 current_uid_gid(&uid, &gid); 251 return (u64) from_kgid(&init_user_ns, gid) << 32 | 252 from_kuid(&init_user_ns, uid); 253 } 254 255 const struct bpf_func_proto bpf_get_current_uid_gid_proto = { 256 .func = bpf_get_current_uid_gid, 257 .gpl_only = false, 258 .ret_type = RET_INTEGER, 259 }; 260 261 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) 262 { 263 struct task_struct *task = current; 264 265 if (unlikely(!task)) 266 goto err_clear; 267 268 /* Verifier guarantees that size > 0 */ 269 strscpy_pad(buf, task->comm, size); 270 return 0; 271 err_clear: 272 memset(buf, 0, size); 273 return -EINVAL; 274 } 275 276 const struct bpf_func_proto bpf_get_current_comm_proto = { 277 .func = bpf_get_current_comm, 278 .gpl_only = false, 279 .ret_type = RET_INTEGER, 280 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 281 .arg2_type = ARG_CONST_SIZE, 282 }; 283 284 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) 285 286 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 287 { 288 arch_spinlock_t *l = (void *)lock; 289 union { 290 __u32 val; 291 arch_spinlock_t lock; 292 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; 293 294 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); 295 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); 296 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); 297 preempt_disable(); 298 arch_spin_lock(l); 299 } 300 301 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 302 { 303 arch_spinlock_t *l = (void *)lock; 304 305 arch_spin_unlock(l); 306 preempt_enable(); 307 } 308 309 #else 310 311 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 312 { 313 atomic_t *l = (void *)lock; 314 315 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); 316 do { 317 atomic_cond_read_relaxed(l, !VAL); 318 } while (atomic_xchg(l, 1)); 319 } 320 321 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 322 { 323 atomic_t *l = (void *)lock; 324 325 atomic_set_release(l, 0); 326 } 327 328 #endif 329 330 static DEFINE_PER_CPU(unsigned long, irqsave_flags); 331 332 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock) 333 { 334 unsigned long flags; 335 336 local_irq_save(flags); 337 __bpf_spin_lock(lock); 338 __this_cpu_write(irqsave_flags, flags); 339 } 340 341 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) 342 { 343 __bpf_spin_lock_irqsave(lock); 344 return 0; 345 } 346 347 const struct bpf_func_proto bpf_spin_lock_proto = { 348 .func = bpf_spin_lock, 349 .gpl_only = false, 350 .ret_type = RET_VOID, 351 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 352 .arg1_btf_id = BPF_PTR_POISON, 353 }; 354 355 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock) 356 { 357 unsigned long flags; 358 359 flags = __this_cpu_read(irqsave_flags); 360 __bpf_spin_unlock(lock); 361 local_irq_restore(flags); 362 } 363 364 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) 365 { 366 __bpf_spin_unlock_irqrestore(lock); 367 return 0; 368 } 369 370 const struct bpf_func_proto bpf_spin_unlock_proto = { 371 .func = bpf_spin_unlock, 372 .gpl_only = false, 373 .ret_type = RET_VOID, 374 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 375 .arg1_btf_id = BPF_PTR_POISON, 376 }; 377 378 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 379 bool lock_src) 380 { 381 struct bpf_spin_lock *lock; 382 383 if (lock_src) 384 lock = src + map->record->spin_lock_off; 385 else 386 lock = dst + map->record->spin_lock_off; 387 preempt_disable(); 388 __bpf_spin_lock_irqsave(lock); 389 copy_map_value(map, dst, src); 390 __bpf_spin_unlock_irqrestore(lock); 391 preempt_enable(); 392 } 393 394 BPF_CALL_0(bpf_jiffies64) 395 { 396 return get_jiffies_64(); 397 } 398 399 const struct bpf_func_proto bpf_jiffies64_proto = { 400 .func = bpf_jiffies64, 401 .gpl_only = false, 402 .ret_type = RET_INTEGER, 403 }; 404 405 #ifdef CONFIG_CGROUPS 406 BPF_CALL_0(bpf_get_current_cgroup_id) 407 { 408 struct cgroup *cgrp; 409 u64 cgrp_id; 410 411 rcu_read_lock(); 412 cgrp = task_dfl_cgroup(current); 413 cgrp_id = cgroup_id(cgrp); 414 rcu_read_unlock(); 415 416 return cgrp_id; 417 } 418 419 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { 420 .func = bpf_get_current_cgroup_id, 421 .gpl_only = false, 422 .ret_type = RET_INTEGER, 423 }; 424 425 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) 426 { 427 struct cgroup *cgrp; 428 struct cgroup *ancestor; 429 u64 cgrp_id; 430 431 rcu_read_lock(); 432 cgrp = task_dfl_cgroup(current); 433 ancestor = cgroup_ancestor(cgrp, ancestor_level); 434 cgrp_id = ancestor ? cgroup_id(ancestor) : 0; 435 rcu_read_unlock(); 436 437 return cgrp_id; 438 } 439 440 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = { 441 .func = bpf_get_current_ancestor_cgroup_id, 442 .gpl_only = false, 443 .ret_type = RET_INTEGER, 444 .arg1_type = ARG_ANYTHING, 445 }; 446 #endif /* CONFIG_CGROUPS */ 447 448 #define BPF_STRTOX_BASE_MASK 0x1F 449 450 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, 451 unsigned long long *res, bool *is_negative) 452 { 453 unsigned int base = flags & BPF_STRTOX_BASE_MASK; 454 const char *cur_buf = buf; 455 size_t cur_len = buf_len; 456 unsigned int consumed; 457 size_t val_len; 458 char str[64]; 459 460 if (!buf || !buf_len || !res || !is_negative) 461 return -EINVAL; 462 463 if (base != 0 && base != 8 && base != 10 && base != 16) 464 return -EINVAL; 465 466 if (flags & ~BPF_STRTOX_BASE_MASK) 467 return -EINVAL; 468 469 while (cur_buf < buf + buf_len && isspace(*cur_buf)) 470 ++cur_buf; 471 472 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); 473 if (*is_negative) 474 ++cur_buf; 475 476 consumed = cur_buf - buf; 477 cur_len -= consumed; 478 if (!cur_len) 479 return -EINVAL; 480 481 cur_len = min(cur_len, sizeof(str) - 1); 482 memcpy(str, cur_buf, cur_len); 483 str[cur_len] = '\0'; 484 cur_buf = str; 485 486 cur_buf = _parse_integer_fixup_radix(cur_buf, &base); 487 val_len = _parse_integer(cur_buf, base, res); 488 489 if (val_len & KSTRTOX_OVERFLOW) 490 return -ERANGE; 491 492 if (val_len == 0) 493 return -EINVAL; 494 495 cur_buf += val_len; 496 consumed += cur_buf - str; 497 498 return consumed; 499 } 500 501 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, 502 long long *res) 503 { 504 unsigned long long _res; 505 bool is_negative; 506 int err; 507 508 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 509 if (err < 0) 510 return err; 511 if (is_negative) { 512 if ((long long)-_res > 0) 513 return -ERANGE; 514 *res = -_res; 515 } else { 516 if ((long long)_res < 0) 517 return -ERANGE; 518 *res = _res; 519 } 520 return err; 521 } 522 523 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, 524 s64 *, res) 525 { 526 long long _res; 527 int err; 528 529 *res = 0; 530 err = __bpf_strtoll(buf, buf_len, flags, &_res); 531 if (err < 0) 532 return err; 533 *res = _res; 534 return err; 535 } 536 537 const struct bpf_func_proto bpf_strtol_proto = { 538 .func = bpf_strtol, 539 .gpl_only = false, 540 .ret_type = RET_INTEGER, 541 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 542 .arg2_type = ARG_CONST_SIZE, 543 .arg3_type = ARG_ANYTHING, 544 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 545 .arg4_size = sizeof(s64), 546 }; 547 548 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, 549 u64 *, res) 550 { 551 unsigned long long _res; 552 bool is_negative; 553 int err; 554 555 *res = 0; 556 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 557 if (err < 0) 558 return err; 559 if (is_negative) 560 return -EINVAL; 561 *res = _res; 562 return err; 563 } 564 565 const struct bpf_func_proto bpf_strtoul_proto = { 566 .func = bpf_strtoul, 567 .gpl_only = false, 568 .ret_type = RET_INTEGER, 569 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 570 .arg2_type = ARG_CONST_SIZE, 571 .arg3_type = ARG_ANYTHING, 572 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 573 .arg4_size = sizeof(u64), 574 }; 575 576 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) 577 { 578 return strncmp(s1, s2, s1_sz); 579 } 580 581 static const struct bpf_func_proto bpf_strncmp_proto = { 582 .func = bpf_strncmp, 583 .gpl_only = false, 584 .ret_type = RET_INTEGER, 585 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 586 .arg2_type = ARG_CONST_SIZE, 587 .arg3_type = ARG_PTR_TO_CONST_STR, 588 }; 589 590 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino, 591 struct bpf_pidns_info *, nsdata, u32, size) 592 { 593 struct task_struct *task = current; 594 struct pid_namespace *pidns; 595 int err = -EINVAL; 596 597 if (unlikely(size != sizeof(struct bpf_pidns_info))) 598 goto clear; 599 600 if (unlikely((u64)(dev_t)dev != dev)) 601 goto clear; 602 603 if (unlikely(!task)) 604 goto clear; 605 606 pidns = task_active_pid_ns(task); 607 if (unlikely(!pidns)) { 608 err = -ENOENT; 609 goto clear; 610 } 611 612 if (!ns_match(&pidns->ns, (dev_t)dev, ino)) 613 goto clear; 614 615 nsdata->pid = task_pid_nr_ns(task, pidns); 616 nsdata->tgid = task_tgid_nr_ns(task, pidns); 617 return 0; 618 clear: 619 memset((void *)nsdata, 0, (size_t) size); 620 return err; 621 } 622 623 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = { 624 .func = bpf_get_ns_current_pid_tgid, 625 .gpl_only = false, 626 .ret_type = RET_INTEGER, 627 .arg1_type = ARG_ANYTHING, 628 .arg2_type = ARG_ANYTHING, 629 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 630 .arg4_type = ARG_CONST_SIZE, 631 }; 632 633 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { 634 .func = bpf_get_raw_cpu_id, 635 .gpl_only = false, 636 .ret_type = RET_INTEGER, 637 }; 638 639 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, 640 u64, flags, void *, data, u64, size) 641 { 642 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 643 return -EINVAL; 644 645 return bpf_event_output(map, flags, data, size, NULL, 0, NULL); 646 } 647 648 const struct bpf_func_proto bpf_event_output_data_proto = { 649 .func = bpf_event_output_data, 650 .gpl_only = true, 651 .ret_type = RET_INTEGER, 652 .arg1_type = ARG_PTR_TO_CTX, 653 .arg2_type = ARG_CONST_MAP_PTR, 654 .arg3_type = ARG_ANYTHING, 655 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 656 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 657 }; 658 659 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size, 660 const void __user *, user_ptr) 661 { 662 int ret = copy_from_user(dst, user_ptr, size); 663 664 if (unlikely(ret)) { 665 memset(dst, 0, size); 666 ret = -EFAULT; 667 } 668 669 return ret; 670 } 671 672 const struct bpf_func_proto bpf_copy_from_user_proto = { 673 .func = bpf_copy_from_user, 674 .gpl_only = false, 675 .might_sleep = true, 676 .ret_type = RET_INTEGER, 677 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 678 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 679 .arg3_type = ARG_ANYTHING, 680 }; 681 682 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size, 683 const void __user *, user_ptr, struct task_struct *, tsk, u64, flags) 684 { 685 int ret; 686 687 /* flags is not used yet */ 688 if (unlikely(flags)) 689 return -EINVAL; 690 691 if (unlikely(!size)) 692 return 0; 693 694 ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0); 695 if (ret == size) 696 return 0; 697 698 memset(dst, 0, size); 699 /* Return -EFAULT for partial read */ 700 return ret < 0 ? ret : -EFAULT; 701 } 702 703 const struct bpf_func_proto bpf_copy_from_user_task_proto = { 704 .func = bpf_copy_from_user_task, 705 .gpl_only = true, 706 .might_sleep = true, 707 .ret_type = RET_INTEGER, 708 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 709 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 710 .arg3_type = ARG_ANYTHING, 711 .arg4_type = ARG_PTR_TO_BTF_ID, 712 .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 713 .arg5_type = ARG_ANYTHING 714 }; 715 716 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) 717 { 718 if (cpu >= nr_cpu_ids) 719 return (unsigned long)NULL; 720 721 return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu); 722 } 723 724 const struct bpf_func_proto bpf_per_cpu_ptr_proto = { 725 .func = bpf_per_cpu_ptr, 726 .gpl_only = false, 727 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY, 728 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 729 .arg2_type = ARG_ANYTHING, 730 }; 731 732 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr) 733 { 734 return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr); 735 } 736 737 const struct bpf_func_proto bpf_this_cpu_ptr_proto = { 738 .func = bpf_this_cpu_ptr, 739 .gpl_only = false, 740 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY, 741 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 742 }; 743 744 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, 745 size_t bufsz) 746 { 747 void __user *user_ptr = (__force void __user *)unsafe_ptr; 748 749 buf[0] = 0; 750 751 switch (fmt_ptype) { 752 case 's': 753 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 754 if ((unsigned long)unsafe_ptr < TASK_SIZE) 755 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 756 fallthrough; 757 #endif 758 case 'k': 759 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz); 760 case 'u': 761 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 762 } 763 764 return -EINVAL; 765 } 766 767 /* Support executing three nested bprintf helper calls on a given CPU */ 768 #define MAX_BPRINTF_NEST_LEVEL 3 769 770 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs); 771 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level); 772 773 int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs) 774 { 775 int nest_level; 776 777 preempt_disable(); 778 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level); 779 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) { 780 this_cpu_dec(bpf_bprintf_nest_level); 781 preempt_enable(); 782 return -EBUSY; 783 } 784 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); 785 786 return 0; 787 } 788 789 void bpf_put_buffers(void) 790 { 791 if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0)) 792 return; 793 this_cpu_dec(bpf_bprintf_nest_level); 794 preempt_enable(); 795 } 796 797 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data) 798 { 799 if (!data->bin_args && !data->buf) 800 return; 801 bpf_put_buffers(); 802 } 803 804 /* 805 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers 806 * 807 * Returns a negative value if fmt is an invalid format string or 0 otherwise. 808 * 809 * This can be used in two ways: 810 * - Format string verification only: when data->get_bin_args is false 811 * - Arguments preparation: in addition to the above verification, it writes in 812 * data->bin_args a binary representation of arguments usable by bstr_printf 813 * where pointers from BPF have been sanitized. 814 * 815 * In argument preparation mode, if 0 is returned, safe temporary buffers are 816 * allocated and bpf_bprintf_cleanup should be called to free them after use. 817 */ 818 int bpf_bprintf_prepare(const char *fmt, u32 fmt_size, const u64 *raw_args, 819 u32 num_args, struct bpf_bprintf_data *data) 820 { 821 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; 822 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end; 823 struct bpf_bprintf_buffers *buffers = NULL; 824 size_t sizeof_cur_arg, sizeof_cur_ip; 825 int err, i, num_spec = 0; 826 u64 cur_arg; 827 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX"; 828 829 fmt_end = strnchr(fmt, fmt_size, 0); 830 if (!fmt_end) 831 return -EINVAL; 832 fmt_size = fmt_end - fmt; 833 834 if (get_buffers && bpf_try_get_buffers(&buffers)) 835 return -EBUSY; 836 837 if (data->get_bin_args) { 838 if (num_args) 839 tmp_buf = buffers->bin_args; 840 tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS; 841 data->bin_args = (u32 *)tmp_buf; 842 } 843 844 if (data->get_buf) 845 data->buf = buffers->buf; 846 847 for (i = 0; i < fmt_size; i++) { 848 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { 849 err = -EINVAL; 850 goto out; 851 } 852 853 if (fmt[i] != '%') 854 continue; 855 856 if (fmt[i + 1] == '%') { 857 i++; 858 continue; 859 } 860 861 if (num_spec >= num_args) { 862 err = -EINVAL; 863 goto out; 864 } 865 866 /* The string is zero-terminated so if fmt[i] != 0, we can 867 * always access fmt[i + 1], in the worst case it will be a 0 868 */ 869 i++; 870 871 /* skip optional "[0 +-][num]" width formatting field */ 872 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || 873 fmt[i] == ' ') 874 i++; 875 if (fmt[i] >= '1' && fmt[i] <= '9') { 876 i++; 877 while (fmt[i] >= '0' && fmt[i] <= '9') 878 i++; 879 } 880 881 if (fmt[i] == 'p') { 882 sizeof_cur_arg = sizeof(long); 883 884 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) || 885 ispunct(fmt[i + 1])) { 886 if (tmp_buf) 887 cur_arg = raw_args[num_spec]; 888 goto nocopy_fmt; 889 } 890 891 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') && 892 fmt[i + 2] == 's') { 893 fmt_ptype = fmt[i + 1]; 894 i += 2; 895 goto fmt_str; 896 } 897 898 if (fmt[i + 1] == 'K' || 899 fmt[i + 1] == 'x' || fmt[i + 1] == 's' || 900 fmt[i + 1] == 'S') { 901 if (tmp_buf) 902 cur_arg = raw_args[num_spec]; 903 i++; 904 goto nocopy_fmt; 905 } 906 907 if (fmt[i + 1] == 'B') { 908 if (tmp_buf) { 909 err = snprintf(tmp_buf, 910 (tmp_buf_end - tmp_buf), 911 "%pB", 912 (void *)(long)raw_args[num_spec]); 913 tmp_buf += (err + 1); 914 } 915 916 i++; 917 num_spec++; 918 continue; 919 } 920 921 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ 922 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') || 923 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) { 924 err = -EINVAL; 925 goto out; 926 } 927 928 i += 2; 929 if (!tmp_buf) 930 goto nocopy_fmt; 931 932 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16; 933 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { 934 err = -ENOSPC; 935 goto out; 936 } 937 938 unsafe_ptr = (char *)(long)raw_args[num_spec]; 939 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr, 940 sizeof_cur_ip); 941 if (err < 0) 942 memset(cur_ip, 0, sizeof_cur_ip); 943 944 /* hack: bstr_printf expects IP addresses to be 945 * pre-formatted as strings, ironically, the easiest way 946 * to do that is to call snprintf. 947 */ 948 ip_spec[2] = fmt[i - 1]; 949 ip_spec[3] = fmt[i]; 950 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf, 951 ip_spec, &cur_ip); 952 953 tmp_buf += err + 1; 954 num_spec++; 955 956 continue; 957 } else if (fmt[i] == 's') { 958 fmt_ptype = fmt[i]; 959 fmt_str: 960 if (fmt[i + 1] != 0 && 961 !isspace(fmt[i + 1]) && 962 !ispunct(fmt[i + 1])) { 963 err = -EINVAL; 964 goto out; 965 } 966 967 if (!tmp_buf) 968 goto nocopy_fmt; 969 970 if (tmp_buf_end == tmp_buf) { 971 err = -ENOSPC; 972 goto out; 973 } 974 975 unsafe_ptr = (char *)(long)raw_args[num_spec]; 976 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr, 977 fmt_ptype, 978 tmp_buf_end - tmp_buf); 979 if (err < 0) { 980 tmp_buf[0] = '\0'; 981 err = 1; 982 } 983 984 tmp_buf += err; 985 num_spec++; 986 987 continue; 988 } else if (fmt[i] == 'c') { 989 if (!tmp_buf) 990 goto nocopy_fmt; 991 992 if (tmp_buf_end == tmp_buf) { 993 err = -ENOSPC; 994 goto out; 995 } 996 997 *tmp_buf = raw_args[num_spec]; 998 tmp_buf++; 999 num_spec++; 1000 1001 continue; 1002 } 1003 1004 sizeof_cur_arg = sizeof(int); 1005 1006 if (fmt[i] == 'l') { 1007 sizeof_cur_arg = sizeof(long); 1008 i++; 1009 } 1010 if (fmt[i] == 'l') { 1011 sizeof_cur_arg = sizeof(long long); 1012 i++; 1013 } 1014 1015 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' && 1016 fmt[i] != 'x' && fmt[i] != 'X') { 1017 err = -EINVAL; 1018 goto out; 1019 } 1020 1021 if (tmp_buf) 1022 cur_arg = raw_args[num_spec]; 1023 nocopy_fmt: 1024 if (tmp_buf) { 1025 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32)); 1026 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { 1027 err = -ENOSPC; 1028 goto out; 1029 } 1030 1031 if (sizeof_cur_arg == 8) { 1032 *(u32 *)tmp_buf = *(u32 *)&cur_arg; 1033 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1); 1034 } else { 1035 *(u32 *)tmp_buf = (u32)(long)cur_arg; 1036 } 1037 tmp_buf += sizeof_cur_arg; 1038 } 1039 num_spec++; 1040 } 1041 1042 err = 0; 1043 out: 1044 if (err) 1045 bpf_bprintf_cleanup(data); 1046 return err; 1047 } 1048 1049 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, 1050 const void *, args, u32, data_len) 1051 { 1052 struct bpf_bprintf_data data = { 1053 .get_bin_args = true, 1054 }; 1055 int err, num_args; 1056 1057 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 || 1058 (data_len && !args)) 1059 return -EINVAL; 1060 num_args = data_len / 8; 1061 1062 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we 1063 * can safely give an unbounded size. 1064 */ 1065 err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data); 1066 if (err < 0) 1067 return err; 1068 1069 err = bstr_printf(str, str_size, fmt, data.bin_args); 1070 1071 bpf_bprintf_cleanup(&data); 1072 1073 return err + 1; 1074 } 1075 1076 const struct bpf_func_proto bpf_snprintf_proto = { 1077 .func = bpf_snprintf, 1078 .gpl_only = true, 1079 .ret_type = RET_INTEGER, 1080 .arg1_type = ARG_PTR_TO_MEM_OR_NULL, 1081 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1082 .arg3_type = ARG_PTR_TO_CONST_STR, 1083 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 1084 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1085 }; 1086 1087 struct bpf_async_cb { 1088 struct bpf_map *map; 1089 struct bpf_prog *prog; 1090 void __rcu *callback_fn; 1091 void *value; 1092 union { 1093 struct rcu_head rcu; 1094 struct work_struct delete_work; 1095 }; 1096 u64 flags; 1097 }; 1098 1099 /* BPF map elements can contain 'struct bpf_timer'. 1100 * Such map owns all of its BPF timers. 1101 * 'struct bpf_timer' is allocated as part of map element allocation 1102 * and it's zero initialized. 1103 * That space is used to keep 'struct bpf_async_kern'. 1104 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and 1105 * remembers 'struct bpf_map *' pointer it's part of. 1106 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn. 1107 * bpf_timer_start() arms the timer. 1108 * If user space reference to a map goes to zero at this point 1109 * ops->map_release_uref callback is responsible for cancelling the timers, 1110 * freeing their memory, and decrementing prog's refcnts. 1111 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt. 1112 * Inner maps can contain bpf timers as well. ops->map_release_uref is 1113 * freeing the timers when inner map is replaced or deleted by user space. 1114 */ 1115 struct bpf_hrtimer { 1116 struct bpf_async_cb cb; 1117 struct hrtimer timer; 1118 atomic_t cancelling; 1119 }; 1120 1121 struct bpf_work { 1122 struct bpf_async_cb cb; 1123 struct work_struct work; 1124 struct work_struct delete_work; 1125 }; 1126 1127 /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */ 1128 struct bpf_async_kern { 1129 union { 1130 struct bpf_async_cb *cb; 1131 struct bpf_hrtimer *timer; 1132 struct bpf_work *work; 1133 }; 1134 /* bpf_spin_lock is used here instead of spinlock_t to make 1135 * sure that it always fits into space reserved by struct bpf_timer 1136 * regardless of LOCKDEP and spinlock debug flags. 1137 */ 1138 struct bpf_spin_lock lock; 1139 } __attribute__((aligned(8))); 1140 1141 enum bpf_async_type { 1142 BPF_ASYNC_TYPE_TIMER = 0, 1143 BPF_ASYNC_TYPE_WQ, 1144 }; 1145 1146 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); 1147 1148 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) 1149 { 1150 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); 1151 struct bpf_map *map = t->cb.map; 1152 void *value = t->cb.value; 1153 bpf_callback_t callback_fn; 1154 void *key; 1155 u32 idx; 1156 1157 BTF_TYPE_EMIT(struct bpf_timer); 1158 callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held()); 1159 if (!callback_fn) 1160 goto out; 1161 1162 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and 1163 * cannot be preempted by another bpf_timer_cb() on the same cpu. 1164 * Remember the timer this callback is servicing to prevent 1165 * deadlock if callback_fn() calls bpf_timer_cancel() or 1166 * bpf_map_delete_elem() on the same timer. 1167 */ 1168 this_cpu_write(hrtimer_running, t); 1169 if (map->map_type == BPF_MAP_TYPE_ARRAY) { 1170 struct bpf_array *array = container_of(map, struct bpf_array, map); 1171 1172 /* compute the key */ 1173 idx = ((char *)value - array->value) / array->elem_size; 1174 key = &idx; 1175 } else { /* hash or lru */ 1176 key = value - round_up(map->key_size, 8); 1177 } 1178 1179 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); 1180 /* The verifier checked that return value is zero. */ 1181 1182 this_cpu_write(hrtimer_running, NULL); 1183 out: 1184 return HRTIMER_NORESTART; 1185 } 1186 1187 static void bpf_wq_work(struct work_struct *work) 1188 { 1189 struct bpf_work *w = container_of(work, struct bpf_work, work); 1190 struct bpf_async_cb *cb = &w->cb; 1191 struct bpf_map *map = cb->map; 1192 bpf_callback_t callback_fn; 1193 void *value = cb->value; 1194 void *key; 1195 u32 idx; 1196 1197 BTF_TYPE_EMIT(struct bpf_wq); 1198 1199 callback_fn = READ_ONCE(cb->callback_fn); 1200 if (!callback_fn) 1201 return; 1202 1203 if (map->map_type == BPF_MAP_TYPE_ARRAY) { 1204 struct bpf_array *array = container_of(map, struct bpf_array, map); 1205 1206 /* compute the key */ 1207 idx = ((char *)value - array->value) / array->elem_size; 1208 key = &idx; 1209 } else { /* hash or lru */ 1210 key = value - round_up(map->key_size, 8); 1211 } 1212 1213 rcu_read_lock_trace(); 1214 migrate_disable(); 1215 1216 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); 1217 1218 migrate_enable(); 1219 rcu_read_unlock_trace(); 1220 } 1221 1222 static void bpf_wq_delete_work(struct work_struct *work) 1223 { 1224 struct bpf_work *w = container_of(work, struct bpf_work, delete_work); 1225 1226 cancel_work_sync(&w->work); 1227 1228 kfree_rcu(w, cb.rcu); 1229 } 1230 1231 static void bpf_timer_delete_work(struct work_struct *work) 1232 { 1233 struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work); 1234 1235 /* Cancel the timer and wait for callback to complete if it was running. 1236 * If hrtimer_cancel() can be safely called it's safe to call 1237 * kfree_rcu(t) right after for both preallocated and non-preallocated 1238 * maps. The async->cb = NULL was already done and no code path can see 1239 * address 't' anymore. Timer if armed for existing bpf_hrtimer before 1240 * bpf_timer_cancel_and_free will have been cancelled. 1241 */ 1242 hrtimer_cancel(&t->timer); 1243 kfree_rcu(t, cb.rcu); 1244 } 1245 1246 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags, 1247 enum bpf_async_type type) 1248 { 1249 struct bpf_async_cb *cb; 1250 struct bpf_hrtimer *t; 1251 struct bpf_work *w; 1252 clockid_t clockid; 1253 size_t size; 1254 int ret = 0; 1255 1256 if (in_nmi()) 1257 return -EOPNOTSUPP; 1258 1259 switch (type) { 1260 case BPF_ASYNC_TYPE_TIMER: 1261 size = sizeof(struct bpf_hrtimer); 1262 break; 1263 case BPF_ASYNC_TYPE_WQ: 1264 size = sizeof(struct bpf_work); 1265 break; 1266 default: 1267 return -EINVAL; 1268 } 1269 1270 __bpf_spin_lock_irqsave(&async->lock); 1271 t = async->timer; 1272 if (t) { 1273 ret = -EBUSY; 1274 goto out; 1275 } 1276 1277 /* allocate hrtimer via map_kmalloc to use memcg accounting */ 1278 cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node); 1279 if (!cb) { 1280 ret = -ENOMEM; 1281 goto out; 1282 } 1283 1284 switch (type) { 1285 case BPF_ASYNC_TYPE_TIMER: 1286 clockid = flags & (MAX_CLOCKS - 1); 1287 t = (struct bpf_hrtimer *)cb; 1288 1289 atomic_set(&t->cancelling, 0); 1290 INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work); 1291 hrtimer_setup(&t->timer, bpf_timer_cb, clockid, HRTIMER_MODE_REL_SOFT); 1292 cb->value = (void *)async - map->record->timer_off; 1293 break; 1294 case BPF_ASYNC_TYPE_WQ: 1295 w = (struct bpf_work *)cb; 1296 1297 INIT_WORK(&w->work, bpf_wq_work); 1298 INIT_WORK(&w->delete_work, bpf_wq_delete_work); 1299 cb->value = (void *)async - map->record->wq_off; 1300 break; 1301 } 1302 cb->map = map; 1303 cb->prog = NULL; 1304 cb->flags = flags; 1305 rcu_assign_pointer(cb->callback_fn, NULL); 1306 1307 WRITE_ONCE(async->cb, cb); 1308 /* Guarantee the order between async->cb and map->usercnt. So 1309 * when there are concurrent uref release and bpf timer init, either 1310 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL 1311 * timer or atomic64_read() below returns a zero usercnt. 1312 */ 1313 smp_mb(); 1314 if (!atomic64_read(&map->usercnt)) { 1315 /* maps with timers must be either held by user space 1316 * or pinned in bpffs. 1317 */ 1318 WRITE_ONCE(async->cb, NULL); 1319 kfree(cb); 1320 ret = -EPERM; 1321 } 1322 out: 1323 __bpf_spin_unlock_irqrestore(&async->lock); 1324 return ret; 1325 } 1326 1327 BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map, 1328 u64, flags) 1329 { 1330 clock_t clockid = flags & (MAX_CLOCKS - 1); 1331 1332 BUILD_BUG_ON(MAX_CLOCKS != 16); 1333 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer)); 1334 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer)); 1335 1336 if (flags >= MAX_CLOCKS || 1337 /* similar to timerfd except _ALARM variants are not supported */ 1338 (clockid != CLOCK_MONOTONIC && 1339 clockid != CLOCK_REALTIME && 1340 clockid != CLOCK_BOOTTIME)) 1341 return -EINVAL; 1342 1343 return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER); 1344 } 1345 1346 static const struct bpf_func_proto bpf_timer_init_proto = { 1347 .func = bpf_timer_init, 1348 .gpl_only = true, 1349 .ret_type = RET_INTEGER, 1350 .arg1_type = ARG_PTR_TO_TIMER, 1351 .arg2_type = ARG_CONST_MAP_PTR, 1352 .arg3_type = ARG_ANYTHING, 1353 }; 1354 1355 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn, 1356 struct bpf_prog_aux *aux, unsigned int flags, 1357 enum bpf_async_type type) 1358 { 1359 struct bpf_prog *prev, *prog = aux->prog; 1360 struct bpf_async_cb *cb; 1361 int ret = 0; 1362 1363 if (in_nmi()) 1364 return -EOPNOTSUPP; 1365 __bpf_spin_lock_irqsave(&async->lock); 1366 cb = async->cb; 1367 if (!cb) { 1368 ret = -EINVAL; 1369 goto out; 1370 } 1371 if (!atomic64_read(&cb->map->usercnt)) { 1372 /* maps with timers must be either held by user space 1373 * or pinned in bpffs. Otherwise timer might still be 1374 * running even when bpf prog is detached and user space 1375 * is gone, since map_release_uref won't ever be called. 1376 */ 1377 ret = -EPERM; 1378 goto out; 1379 } 1380 prev = cb->prog; 1381 if (prev != prog) { 1382 /* Bump prog refcnt once. Every bpf_timer_set_callback() 1383 * can pick different callback_fn-s within the same prog. 1384 */ 1385 prog = bpf_prog_inc_not_zero(prog); 1386 if (IS_ERR(prog)) { 1387 ret = PTR_ERR(prog); 1388 goto out; 1389 } 1390 if (prev) 1391 /* Drop prev prog refcnt when swapping with new prog */ 1392 bpf_prog_put(prev); 1393 cb->prog = prog; 1394 } 1395 rcu_assign_pointer(cb->callback_fn, callback_fn); 1396 out: 1397 __bpf_spin_unlock_irqrestore(&async->lock); 1398 return ret; 1399 } 1400 1401 BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn, 1402 struct bpf_prog_aux *, aux) 1403 { 1404 return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER); 1405 } 1406 1407 static const struct bpf_func_proto bpf_timer_set_callback_proto = { 1408 .func = bpf_timer_set_callback, 1409 .gpl_only = true, 1410 .ret_type = RET_INTEGER, 1411 .arg1_type = ARG_PTR_TO_TIMER, 1412 .arg2_type = ARG_PTR_TO_FUNC, 1413 }; 1414 1415 BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags) 1416 { 1417 struct bpf_hrtimer *t; 1418 int ret = 0; 1419 enum hrtimer_mode mode; 1420 1421 if (in_nmi()) 1422 return -EOPNOTSUPP; 1423 if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN)) 1424 return -EINVAL; 1425 __bpf_spin_lock_irqsave(&timer->lock); 1426 t = timer->timer; 1427 if (!t || !t->cb.prog) { 1428 ret = -EINVAL; 1429 goto out; 1430 } 1431 1432 if (flags & BPF_F_TIMER_ABS) 1433 mode = HRTIMER_MODE_ABS_SOFT; 1434 else 1435 mode = HRTIMER_MODE_REL_SOFT; 1436 1437 if (flags & BPF_F_TIMER_CPU_PIN) 1438 mode |= HRTIMER_MODE_PINNED; 1439 1440 hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); 1441 out: 1442 __bpf_spin_unlock_irqrestore(&timer->lock); 1443 return ret; 1444 } 1445 1446 static const struct bpf_func_proto bpf_timer_start_proto = { 1447 .func = bpf_timer_start, 1448 .gpl_only = true, 1449 .ret_type = RET_INTEGER, 1450 .arg1_type = ARG_PTR_TO_TIMER, 1451 .arg2_type = ARG_ANYTHING, 1452 .arg3_type = ARG_ANYTHING, 1453 }; 1454 1455 static void drop_prog_refcnt(struct bpf_async_cb *async) 1456 { 1457 struct bpf_prog *prog = async->prog; 1458 1459 if (prog) { 1460 bpf_prog_put(prog); 1461 async->prog = NULL; 1462 rcu_assign_pointer(async->callback_fn, NULL); 1463 } 1464 } 1465 1466 BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer) 1467 { 1468 struct bpf_hrtimer *t, *cur_t; 1469 bool inc = false; 1470 int ret = 0; 1471 1472 if (in_nmi()) 1473 return -EOPNOTSUPP; 1474 rcu_read_lock(); 1475 __bpf_spin_lock_irqsave(&timer->lock); 1476 t = timer->timer; 1477 if (!t) { 1478 ret = -EINVAL; 1479 goto out; 1480 } 1481 1482 cur_t = this_cpu_read(hrtimer_running); 1483 if (cur_t == t) { 1484 /* If bpf callback_fn is trying to bpf_timer_cancel() 1485 * its own timer the hrtimer_cancel() will deadlock 1486 * since it waits for callback_fn to finish. 1487 */ 1488 ret = -EDEADLK; 1489 goto out; 1490 } 1491 1492 /* Only account in-flight cancellations when invoked from a timer 1493 * callback, since we want to avoid waiting only if other _callbacks_ 1494 * are waiting on us, to avoid introducing lockups. Non-callback paths 1495 * are ok, since nobody would synchronously wait for their completion. 1496 */ 1497 if (!cur_t) 1498 goto drop; 1499 atomic_inc(&t->cancelling); 1500 /* Need full barrier after relaxed atomic_inc */ 1501 smp_mb__after_atomic(); 1502 inc = true; 1503 if (atomic_read(&cur_t->cancelling)) { 1504 /* We're cancelling timer t, while some other timer callback is 1505 * attempting to cancel us. In such a case, it might be possible 1506 * that timer t belongs to the other callback, or some other 1507 * callback waiting upon it (creating transitive dependencies 1508 * upon us), and we will enter a deadlock if we continue 1509 * cancelling and waiting for it synchronously, since it might 1510 * do the same. Bail! 1511 */ 1512 ret = -EDEADLK; 1513 goto out; 1514 } 1515 drop: 1516 drop_prog_refcnt(&t->cb); 1517 out: 1518 __bpf_spin_unlock_irqrestore(&timer->lock); 1519 /* Cancel the timer and wait for associated callback to finish 1520 * if it was running. 1521 */ 1522 ret = ret ?: hrtimer_cancel(&t->timer); 1523 if (inc) 1524 atomic_dec(&t->cancelling); 1525 rcu_read_unlock(); 1526 return ret; 1527 } 1528 1529 static const struct bpf_func_proto bpf_timer_cancel_proto = { 1530 .func = bpf_timer_cancel, 1531 .gpl_only = true, 1532 .ret_type = RET_INTEGER, 1533 .arg1_type = ARG_PTR_TO_TIMER, 1534 }; 1535 1536 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async) 1537 { 1538 struct bpf_async_cb *cb; 1539 1540 /* Performance optimization: read async->cb without lock first. */ 1541 if (!READ_ONCE(async->cb)) 1542 return NULL; 1543 1544 __bpf_spin_lock_irqsave(&async->lock); 1545 /* re-read it under lock */ 1546 cb = async->cb; 1547 if (!cb) 1548 goto out; 1549 drop_prog_refcnt(cb); 1550 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use 1551 * this timer, since it won't be initialized. 1552 */ 1553 WRITE_ONCE(async->cb, NULL); 1554 out: 1555 __bpf_spin_unlock_irqrestore(&async->lock); 1556 return cb; 1557 } 1558 1559 /* This function is called by map_delete/update_elem for individual element and 1560 * by ops->map_release_uref when the user space reference to a map reaches zero. 1561 */ 1562 void bpf_timer_cancel_and_free(void *val) 1563 { 1564 struct bpf_hrtimer *t; 1565 1566 t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val); 1567 1568 if (!t) 1569 return; 1570 /* We check that bpf_map_delete/update_elem() was called from timer 1571 * callback_fn. In such case we don't call hrtimer_cancel() (since it 1572 * will deadlock) and don't call hrtimer_try_to_cancel() (since it will 1573 * just return -1). Though callback_fn is still running on this cpu it's 1574 * safe to do kfree(t) because bpf_timer_cb() read everything it needed 1575 * from 't'. The bpf subprog callback_fn won't be able to access 't', 1576 * since async->cb = NULL was already done. The timer will be 1577 * effectively cancelled because bpf_timer_cb() will return 1578 * HRTIMER_NORESTART. 1579 * 1580 * However, it is possible the timer callback_fn calling us armed the 1581 * timer _before_ calling us, such that failing to cancel it here will 1582 * cause it to possibly use struct hrtimer after freeing bpf_hrtimer. 1583 * Therefore, we _need_ to cancel any outstanding timers before we do 1584 * kfree_rcu, even though no more timers can be armed. 1585 * 1586 * Moreover, we need to schedule work even if timer does not belong to 1587 * the calling callback_fn, as on two different CPUs, we can end up in a 1588 * situation where both sides run in parallel, try to cancel one 1589 * another, and we end up waiting on both sides in hrtimer_cancel 1590 * without making forward progress, since timer1 depends on time2 1591 * callback to finish, and vice versa. 1592 * 1593 * CPU 1 (timer1_cb) CPU 2 (timer2_cb) 1594 * bpf_timer_cancel_and_free(timer2) bpf_timer_cancel_and_free(timer1) 1595 * 1596 * To avoid these issues, punt to workqueue context when we are in a 1597 * timer callback. 1598 */ 1599 if (this_cpu_read(hrtimer_running)) { 1600 queue_work(system_unbound_wq, &t->cb.delete_work); 1601 return; 1602 } 1603 1604 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 1605 /* If the timer is running on other CPU, also use a kworker to 1606 * wait for the completion of the timer instead of trying to 1607 * acquire a sleepable lock in hrtimer_cancel() to wait for its 1608 * completion. 1609 */ 1610 if (hrtimer_try_to_cancel(&t->timer) >= 0) 1611 kfree_rcu(t, cb.rcu); 1612 else 1613 queue_work(system_unbound_wq, &t->cb.delete_work); 1614 } else { 1615 bpf_timer_delete_work(&t->cb.delete_work); 1616 } 1617 } 1618 1619 /* This function is called by map_delete/update_elem for individual element and 1620 * by ops->map_release_uref when the user space reference to a map reaches zero. 1621 */ 1622 void bpf_wq_cancel_and_free(void *val) 1623 { 1624 struct bpf_work *work; 1625 1626 BTF_TYPE_EMIT(struct bpf_wq); 1627 1628 work = (struct bpf_work *)__bpf_async_cancel_and_free(val); 1629 if (!work) 1630 return; 1631 /* Trigger cancel of the sleepable work, but *do not* wait for 1632 * it to finish if it was running as we might not be in a 1633 * sleepable context. 1634 * kfree will be called once the work has finished. 1635 */ 1636 schedule_work(&work->delete_work); 1637 } 1638 1639 BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr) 1640 { 1641 unsigned long *kptr = dst; 1642 1643 /* This helper may be inlined by verifier. */ 1644 return xchg(kptr, (unsigned long)ptr); 1645 } 1646 1647 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg() 1648 * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to 1649 * denote type that verifier will determine. 1650 */ 1651 static const struct bpf_func_proto bpf_kptr_xchg_proto = { 1652 .func = bpf_kptr_xchg, 1653 .gpl_only = false, 1654 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 1655 .ret_btf_id = BPF_PTR_POISON, 1656 .arg1_type = ARG_KPTR_XCHG_DEST, 1657 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE, 1658 .arg2_btf_id = BPF_PTR_POISON, 1659 }; 1660 1661 /* Since the upper 8 bits of dynptr->size is reserved, the 1662 * maximum supported size is 2^24 - 1. 1663 */ 1664 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1) 1665 #define DYNPTR_TYPE_SHIFT 28 1666 #define DYNPTR_SIZE_MASK 0xFFFFFF 1667 #define DYNPTR_RDONLY_BIT BIT(31) 1668 1669 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr) 1670 { 1671 return ptr->size & DYNPTR_RDONLY_BIT; 1672 } 1673 1674 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) 1675 { 1676 ptr->size |= DYNPTR_RDONLY_BIT; 1677 } 1678 1679 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) 1680 { 1681 ptr->size |= type << DYNPTR_TYPE_SHIFT; 1682 } 1683 1684 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr) 1685 { 1686 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT; 1687 } 1688 1689 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) 1690 { 1691 return ptr->size & DYNPTR_SIZE_MASK; 1692 } 1693 1694 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size) 1695 { 1696 u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK; 1697 1698 ptr->size = new_size | metadata; 1699 } 1700 1701 int bpf_dynptr_check_size(u32 size) 1702 { 1703 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; 1704 } 1705 1706 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, 1707 enum bpf_dynptr_type type, u32 offset, u32 size) 1708 { 1709 ptr->data = data; 1710 ptr->offset = offset; 1711 ptr->size = size; 1712 bpf_dynptr_set_type(ptr, type); 1713 } 1714 1715 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) 1716 { 1717 memset(ptr, 0, sizeof(*ptr)); 1718 } 1719 1720 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) 1721 { 1722 int err; 1723 1724 BTF_TYPE_EMIT(struct bpf_dynptr); 1725 1726 err = bpf_dynptr_check_size(size); 1727 if (err) 1728 goto error; 1729 1730 /* flags is currently unsupported */ 1731 if (flags) { 1732 err = -EINVAL; 1733 goto error; 1734 } 1735 1736 bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size); 1737 1738 return 0; 1739 1740 error: 1741 bpf_dynptr_set_null(ptr); 1742 return err; 1743 } 1744 1745 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = { 1746 .func = bpf_dynptr_from_mem, 1747 .gpl_only = false, 1748 .ret_type = RET_INTEGER, 1749 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1750 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1751 .arg3_type = ARG_ANYTHING, 1752 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE, 1753 }; 1754 1755 static int __bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr_kern *src, 1756 u32 offset, u64 flags) 1757 { 1758 enum bpf_dynptr_type type; 1759 int err; 1760 1761 if (!src->data || flags) 1762 return -EINVAL; 1763 1764 err = bpf_dynptr_check_off_len(src, offset, len); 1765 if (err) 1766 return err; 1767 1768 type = bpf_dynptr_get_type(src); 1769 1770 switch (type) { 1771 case BPF_DYNPTR_TYPE_LOCAL: 1772 case BPF_DYNPTR_TYPE_RINGBUF: 1773 /* Source and destination may possibly overlap, hence use memmove to 1774 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr 1775 * pointing to overlapping PTR_TO_MAP_VALUE regions. 1776 */ 1777 memmove(dst, src->data + src->offset + offset, len); 1778 return 0; 1779 case BPF_DYNPTR_TYPE_SKB: 1780 return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len); 1781 case BPF_DYNPTR_TYPE_XDP: 1782 return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len); 1783 default: 1784 WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type); 1785 return -EFAULT; 1786 } 1787 } 1788 1789 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src, 1790 u32, offset, u64, flags) 1791 { 1792 return __bpf_dynptr_read(dst, len, src, offset, flags); 1793 } 1794 1795 static const struct bpf_func_proto bpf_dynptr_read_proto = { 1796 .func = bpf_dynptr_read, 1797 .gpl_only = false, 1798 .ret_type = RET_INTEGER, 1799 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1800 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1801 .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1802 .arg4_type = ARG_ANYTHING, 1803 .arg5_type = ARG_ANYTHING, 1804 }; 1805 1806 int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, void *src, 1807 u32 len, u64 flags) 1808 { 1809 enum bpf_dynptr_type type; 1810 int err; 1811 1812 if (!dst->data || __bpf_dynptr_is_rdonly(dst)) 1813 return -EINVAL; 1814 1815 err = bpf_dynptr_check_off_len(dst, offset, len); 1816 if (err) 1817 return err; 1818 1819 type = bpf_dynptr_get_type(dst); 1820 1821 switch (type) { 1822 case BPF_DYNPTR_TYPE_LOCAL: 1823 case BPF_DYNPTR_TYPE_RINGBUF: 1824 if (flags) 1825 return -EINVAL; 1826 /* Source and destination may possibly overlap, hence use memmove to 1827 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr 1828 * pointing to overlapping PTR_TO_MAP_VALUE regions. 1829 */ 1830 memmove(dst->data + dst->offset + offset, src, len); 1831 return 0; 1832 case BPF_DYNPTR_TYPE_SKB: 1833 return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len, 1834 flags); 1835 case BPF_DYNPTR_TYPE_XDP: 1836 if (flags) 1837 return -EINVAL; 1838 return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len); 1839 default: 1840 WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type); 1841 return -EFAULT; 1842 } 1843 } 1844 1845 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src, 1846 u32, len, u64, flags) 1847 { 1848 return __bpf_dynptr_write(dst, offset, src, len, flags); 1849 } 1850 1851 static const struct bpf_func_proto bpf_dynptr_write_proto = { 1852 .func = bpf_dynptr_write, 1853 .gpl_only = false, 1854 .ret_type = RET_INTEGER, 1855 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1856 .arg2_type = ARG_ANYTHING, 1857 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1858 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 1859 .arg5_type = ARG_ANYTHING, 1860 }; 1861 1862 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len) 1863 { 1864 enum bpf_dynptr_type type; 1865 int err; 1866 1867 if (!ptr->data) 1868 return 0; 1869 1870 err = bpf_dynptr_check_off_len(ptr, offset, len); 1871 if (err) 1872 return 0; 1873 1874 if (__bpf_dynptr_is_rdonly(ptr)) 1875 return 0; 1876 1877 type = bpf_dynptr_get_type(ptr); 1878 1879 switch (type) { 1880 case BPF_DYNPTR_TYPE_LOCAL: 1881 case BPF_DYNPTR_TYPE_RINGBUF: 1882 return (unsigned long)(ptr->data + ptr->offset + offset); 1883 case BPF_DYNPTR_TYPE_SKB: 1884 case BPF_DYNPTR_TYPE_XDP: 1885 /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */ 1886 return 0; 1887 default: 1888 WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type); 1889 return 0; 1890 } 1891 } 1892 1893 static const struct bpf_func_proto bpf_dynptr_data_proto = { 1894 .func = bpf_dynptr_data, 1895 .gpl_only = false, 1896 .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL, 1897 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1898 .arg2_type = ARG_ANYTHING, 1899 .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO, 1900 }; 1901 1902 const struct bpf_func_proto bpf_get_current_task_proto __weak; 1903 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak; 1904 const struct bpf_func_proto bpf_probe_read_user_proto __weak; 1905 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; 1906 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak; 1907 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; 1908 const struct bpf_func_proto bpf_task_pt_regs_proto __weak; 1909 const struct bpf_func_proto bpf_perf_event_read_proto __weak; 1910 const struct bpf_func_proto bpf_send_signal_proto __weak; 1911 const struct bpf_func_proto bpf_send_signal_thread_proto __weak; 1912 const struct bpf_func_proto bpf_get_task_stack_sleepable_proto __weak; 1913 const struct bpf_func_proto bpf_get_task_stack_proto __weak; 1914 const struct bpf_func_proto bpf_get_branch_snapshot_proto __weak; 1915 1916 const struct bpf_func_proto * 1917 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1918 { 1919 switch (func_id) { 1920 case BPF_FUNC_map_lookup_elem: 1921 return &bpf_map_lookup_elem_proto; 1922 case BPF_FUNC_map_update_elem: 1923 return &bpf_map_update_elem_proto; 1924 case BPF_FUNC_map_delete_elem: 1925 return &bpf_map_delete_elem_proto; 1926 case BPF_FUNC_map_push_elem: 1927 return &bpf_map_push_elem_proto; 1928 case BPF_FUNC_map_pop_elem: 1929 return &bpf_map_pop_elem_proto; 1930 case BPF_FUNC_map_peek_elem: 1931 return &bpf_map_peek_elem_proto; 1932 case BPF_FUNC_map_lookup_percpu_elem: 1933 return &bpf_map_lookup_percpu_elem_proto; 1934 case BPF_FUNC_get_prandom_u32: 1935 return &bpf_get_prandom_u32_proto; 1936 case BPF_FUNC_get_smp_processor_id: 1937 return &bpf_get_raw_smp_processor_id_proto; 1938 case BPF_FUNC_get_numa_node_id: 1939 return &bpf_get_numa_node_id_proto; 1940 case BPF_FUNC_tail_call: 1941 return &bpf_tail_call_proto; 1942 case BPF_FUNC_ktime_get_ns: 1943 return &bpf_ktime_get_ns_proto; 1944 case BPF_FUNC_ktime_get_boot_ns: 1945 return &bpf_ktime_get_boot_ns_proto; 1946 case BPF_FUNC_ktime_get_tai_ns: 1947 return &bpf_ktime_get_tai_ns_proto; 1948 case BPF_FUNC_ringbuf_output: 1949 return &bpf_ringbuf_output_proto; 1950 case BPF_FUNC_ringbuf_reserve: 1951 return &bpf_ringbuf_reserve_proto; 1952 case BPF_FUNC_ringbuf_submit: 1953 return &bpf_ringbuf_submit_proto; 1954 case BPF_FUNC_ringbuf_discard: 1955 return &bpf_ringbuf_discard_proto; 1956 case BPF_FUNC_ringbuf_query: 1957 return &bpf_ringbuf_query_proto; 1958 case BPF_FUNC_strncmp: 1959 return &bpf_strncmp_proto; 1960 case BPF_FUNC_strtol: 1961 return &bpf_strtol_proto; 1962 case BPF_FUNC_strtoul: 1963 return &bpf_strtoul_proto; 1964 case BPF_FUNC_get_current_pid_tgid: 1965 return &bpf_get_current_pid_tgid_proto; 1966 case BPF_FUNC_get_ns_current_pid_tgid: 1967 return &bpf_get_ns_current_pid_tgid_proto; 1968 case BPF_FUNC_get_current_uid_gid: 1969 return &bpf_get_current_uid_gid_proto; 1970 default: 1971 break; 1972 } 1973 1974 if (!bpf_token_capable(prog->aux->token, CAP_BPF)) 1975 return NULL; 1976 1977 switch (func_id) { 1978 case BPF_FUNC_spin_lock: 1979 return &bpf_spin_lock_proto; 1980 case BPF_FUNC_spin_unlock: 1981 return &bpf_spin_unlock_proto; 1982 case BPF_FUNC_jiffies64: 1983 return &bpf_jiffies64_proto; 1984 case BPF_FUNC_per_cpu_ptr: 1985 return &bpf_per_cpu_ptr_proto; 1986 case BPF_FUNC_this_cpu_ptr: 1987 return &bpf_this_cpu_ptr_proto; 1988 case BPF_FUNC_timer_init: 1989 return &bpf_timer_init_proto; 1990 case BPF_FUNC_timer_set_callback: 1991 return &bpf_timer_set_callback_proto; 1992 case BPF_FUNC_timer_start: 1993 return &bpf_timer_start_proto; 1994 case BPF_FUNC_timer_cancel: 1995 return &bpf_timer_cancel_proto; 1996 case BPF_FUNC_kptr_xchg: 1997 return &bpf_kptr_xchg_proto; 1998 case BPF_FUNC_for_each_map_elem: 1999 return &bpf_for_each_map_elem_proto; 2000 case BPF_FUNC_loop: 2001 return &bpf_loop_proto; 2002 case BPF_FUNC_user_ringbuf_drain: 2003 return &bpf_user_ringbuf_drain_proto; 2004 case BPF_FUNC_ringbuf_reserve_dynptr: 2005 return &bpf_ringbuf_reserve_dynptr_proto; 2006 case BPF_FUNC_ringbuf_submit_dynptr: 2007 return &bpf_ringbuf_submit_dynptr_proto; 2008 case BPF_FUNC_ringbuf_discard_dynptr: 2009 return &bpf_ringbuf_discard_dynptr_proto; 2010 case BPF_FUNC_dynptr_from_mem: 2011 return &bpf_dynptr_from_mem_proto; 2012 case BPF_FUNC_dynptr_read: 2013 return &bpf_dynptr_read_proto; 2014 case BPF_FUNC_dynptr_write: 2015 return &bpf_dynptr_write_proto; 2016 case BPF_FUNC_dynptr_data: 2017 return &bpf_dynptr_data_proto; 2018 #ifdef CONFIG_CGROUPS 2019 case BPF_FUNC_cgrp_storage_get: 2020 return &bpf_cgrp_storage_get_proto; 2021 case BPF_FUNC_cgrp_storage_delete: 2022 return &bpf_cgrp_storage_delete_proto; 2023 case BPF_FUNC_get_current_cgroup_id: 2024 return &bpf_get_current_cgroup_id_proto; 2025 case BPF_FUNC_get_current_ancestor_cgroup_id: 2026 return &bpf_get_current_ancestor_cgroup_id_proto; 2027 case BPF_FUNC_current_task_under_cgroup: 2028 return &bpf_current_task_under_cgroup_proto; 2029 #endif 2030 #ifdef CONFIG_CGROUP_NET_CLASSID 2031 case BPF_FUNC_get_cgroup_classid: 2032 return &bpf_get_cgroup_classid_curr_proto; 2033 #endif 2034 case BPF_FUNC_task_storage_get: 2035 if (bpf_prog_check_recur(prog)) 2036 return &bpf_task_storage_get_recur_proto; 2037 return &bpf_task_storage_get_proto; 2038 case BPF_FUNC_task_storage_delete: 2039 if (bpf_prog_check_recur(prog)) 2040 return &bpf_task_storage_delete_recur_proto; 2041 return &bpf_task_storage_delete_proto; 2042 default: 2043 break; 2044 } 2045 2046 if (!bpf_token_capable(prog->aux->token, CAP_PERFMON)) 2047 return NULL; 2048 2049 switch (func_id) { 2050 case BPF_FUNC_trace_printk: 2051 return bpf_get_trace_printk_proto(); 2052 case BPF_FUNC_get_current_task: 2053 return &bpf_get_current_task_proto; 2054 case BPF_FUNC_get_current_task_btf: 2055 return &bpf_get_current_task_btf_proto; 2056 case BPF_FUNC_get_current_comm: 2057 return &bpf_get_current_comm_proto; 2058 case BPF_FUNC_probe_read_user: 2059 return &bpf_probe_read_user_proto; 2060 case BPF_FUNC_probe_read_kernel: 2061 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 2062 NULL : &bpf_probe_read_kernel_proto; 2063 case BPF_FUNC_probe_read_user_str: 2064 return &bpf_probe_read_user_str_proto; 2065 case BPF_FUNC_probe_read_kernel_str: 2066 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 2067 NULL : &bpf_probe_read_kernel_str_proto; 2068 case BPF_FUNC_copy_from_user: 2069 return &bpf_copy_from_user_proto; 2070 case BPF_FUNC_copy_from_user_task: 2071 return &bpf_copy_from_user_task_proto; 2072 case BPF_FUNC_snprintf_btf: 2073 return &bpf_snprintf_btf_proto; 2074 case BPF_FUNC_snprintf: 2075 return &bpf_snprintf_proto; 2076 case BPF_FUNC_task_pt_regs: 2077 return &bpf_task_pt_regs_proto; 2078 case BPF_FUNC_trace_vprintk: 2079 return bpf_get_trace_vprintk_proto(); 2080 case BPF_FUNC_perf_event_read_value: 2081 return bpf_get_perf_event_read_value_proto(); 2082 case BPF_FUNC_perf_event_read: 2083 return &bpf_perf_event_read_proto; 2084 case BPF_FUNC_send_signal: 2085 return &bpf_send_signal_proto; 2086 case BPF_FUNC_send_signal_thread: 2087 return &bpf_send_signal_thread_proto; 2088 case BPF_FUNC_get_task_stack: 2089 return prog->sleepable ? &bpf_get_task_stack_sleepable_proto 2090 : &bpf_get_task_stack_proto; 2091 case BPF_FUNC_get_branch_snapshot: 2092 return &bpf_get_branch_snapshot_proto; 2093 case BPF_FUNC_find_vma: 2094 return &bpf_find_vma_proto; 2095 default: 2096 return NULL; 2097 } 2098 } 2099 EXPORT_SYMBOL_GPL(bpf_base_func_proto); 2100 2101 void bpf_list_head_free(const struct btf_field *field, void *list_head, 2102 struct bpf_spin_lock *spin_lock) 2103 { 2104 struct list_head *head = list_head, *orig_head = list_head; 2105 2106 BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head)); 2107 BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head)); 2108 2109 /* Do the actual list draining outside the lock to not hold the lock for 2110 * too long, and also prevent deadlocks if tracing programs end up 2111 * executing on entry/exit of functions called inside the critical 2112 * section, and end up doing map ops that call bpf_list_head_free for 2113 * the same map value again. 2114 */ 2115 __bpf_spin_lock_irqsave(spin_lock); 2116 if (!head->next || list_empty(head)) 2117 goto unlock; 2118 head = head->next; 2119 unlock: 2120 INIT_LIST_HEAD(orig_head); 2121 __bpf_spin_unlock_irqrestore(spin_lock); 2122 2123 while (head != orig_head) { 2124 void *obj = head; 2125 2126 obj -= field->graph_root.node_offset; 2127 head = head->next; 2128 /* The contained type can also have resources, including a 2129 * bpf_list_head which needs to be freed. 2130 */ 2131 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); 2132 } 2133 } 2134 2135 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are 2136 * 'rb_node *', so field name of rb_node within containing struct is not 2137 * needed. 2138 * 2139 * Since bpf_rb_tree's node type has a corresponding struct btf_field with 2140 * graph_root.node_offset, it's not necessary to know field name 2141 * or type of node struct 2142 */ 2143 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \ 2144 for (pos = rb_first_postorder(root); \ 2145 pos && ({ n = rb_next_postorder(pos); 1; }); \ 2146 pos = n) 2147 2148 void bpf_rb_root_free(const struct btf_field *field, void *rb_root, 2149 struct bpf_spin_lock *spin_lock) 2150 { 2151 struct rb_root_cached orig_root, *root = rb_root; 2152 struct rb_node *pos, *n; 2153 void *obj; 2154 2155 BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root)); 2156 BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root)); 2157 2158 __bpf_spin_lock_irqsave(spin_lock); 2159 orig_root = *root; 2160 *root = RB_ROOT_CACHED; 2161 __bpf_spin_unlock_irqrestore(spin_lock); 2162 2163 bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) { 2164 obj = pos; 2165 obj -= field->graph_root.node_offset; 2166 2167 2168 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); 2169 } 2170 } 2171 2172 __bpf_kfunc_start_defs(); 2173 2174 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) 2175 { 2176 struct btf_struct_meta *meta = meta__ign; 2177 u64 size = local_type_id__k; 2178 void *p; 2179 2180 p = bpf_mem_alloc(&bpf_global_ma, size); 2181 if (!p) 2182 return NULL; 2183 if (meta) 2184 bpf_obj_init(meta->record, p); 2185 return p; 2186 } 2187 2188 __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign) 2189 { 2190 u64 size = local_type_id__k; 2191 2192 /* The verifier has ensured that meta__ign must be NULL */ 2193 return bpf_mem_alloc(&bpf_global_percpu_ma, size); 2194 } 2195 2196 /* Must be called under migrate_disable(), as required by bpf_mem_free */ 2197 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu) 2198 { 2199 struct bpf_mem_alloc *ma; 2200 2201 if (rec && rec->refcount_off >= 0 && 2202 !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) { 2203 /* Object is refcounted and refcount_dec didn't result in 0 2204 * refcount. Return without freeing the object 2205 */ 2206 return; 2207 } 2208 2209 if (rec) 2210 bpf_obj_free_fields(rec, p); 2211 2212 if (percpu) 2213 ma = &bpf_global_percpu_ma; 2214 else 2215 ma = &bpf_global_ma; 2216 bpf_mem_free_rcu(ma, p); 2217 } 2218 2219 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) 2220 { 2221 struct btf_struct_meta *meta = meta__ign; 2222 void *p = p__alloc; 2223 2224 __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false); 2225 } 2226 2227 __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign) 2228 { 2229 /* The verifier has ensured that meta__ign must be NULL */ 2230 bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc); 2231 } 2232 2233 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign) 2234 { 2235 struct btf_struct_meta *meta = meta__ign; 2236 struct bpf_refcount *ref; 2237 2238 /* Could just cast directly to refcount_t *, but need some code using 2239 * bpf_refcount type so that it is emitted in vmlinux BTF 2240 */ 2241 ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off); 2242 if (!refcount_inc_not_zero((refcount_t *)ref)) 2243 return NULL; 2244 2245 /* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null 2246 * in verifier.c 2247 */ 2248 return (void *)p__refcounted_kptr; 2249 } 2250 2251 static int __bpf_list_add(struct bpf_list_node_kern *node, 2252 struct bpf_list_head *head, 2253 bool tail, struct btf_record *rec, u64 off) 2254 { 2255 struct list_head *n = &node->list_head, *h = (void *)head; 2256 2257 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't 2258 * called on its fields, so init here 2259 */ 2260 if (unlikely(!h->next)) 2261 INIT_LIST_HEAD(h); 2262 2263 /* node->owner != NULL implies !list_empty(n), no need to separately 2264 * check the latter 2265 */ 2266 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { 2267 /* Only called from BPF prog, no need to migrate_disable */ 2268 __bpf_obj_drop_impl((void *)n - off, rec, false); 2269 return -EINVAL; 2270 } 2271 2272 tail ? list_add_tail(n, h) : list_add(n, h); 2273 WRITE_ONCE(node->owner, head); 2274 2275 return 0; 2276 } 2277 2278 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head, 2279 struct bpf_list_node *node, 2280 void *meta__ign, u64 off) 2281 { 2282 struct bpf_list_node_kern *n = (void *)node; 2283 struct btf_struct_meta *meta = meta__ign; 2284 2285 return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off); 2286 } 2287 2288 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head, 2289 struct bpf_list_node *node, 2290 void *meta__ign, u64 off) 2291 { 2292 struct bpf_list_node_kern *n = (void *)node; 2293 struct btf_struct_meta *meta = meta__ign; 2294 2295 return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off); 2296 } 2297 2298 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail) 2299 { 2300 struct list_head *n, *h = (void *)head; 2301 struct bpf_list_node_kern *node; 2302 2303 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't 2304 * called on its fields, so init here 2305 */ 2306 if (unlikely(!h->next)) 2307 INIT_LIST_HEAD(h); 2308 if (list_empty(h)) 2309 return NULL; 2310 2311 n = tail ? h->prev : h->next; 2312 node = container_of(n, struct bpf_list_node_kern, list_head); 2313 if (WARN_ON_ONCE(READ_ONCE(node->owner) != head)) 2314 return NULL; 2315 2316 list_del_init(n); 2317 WRITE_ONCE(node->owner, NULL); 2318 return (struct bpf_list_node *)n; 2319 } 2320 2321 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) 2322 { 2323 return __bpf_list_del(head, false); 2324 } 2325 2326 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) 2327 { 2328 return __bpf_list_del(head, true); 2329 } 2330 2331 __bpf_kfunc struct bpf_list_node *bpf_list_front(struct bpf_list_head *head) 2332 { 2333 struct list_head *h = (struct list_head *)head; 2334 2335 if (list_empty(h) || unlikely(!h->next)) 2336 return NULL; 2337 2338 return (struct bpf_list_node *)h->next; 2339 } 2340 2341 __bpf_kfunc struct bpf_list_node *bpf_list_back(struct bpf_list_head *head) 2342 { 2343 struct list_head *h = (struct list_head *)head; 2344 2345 if (list_empty(h) || unlikely(!h->next)) 2346 return NULL; 2347 2348 return (struct bpf_list_node *)h->prev; 2349 } 2350 2351 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, 2352 struct bpf_rb_node *node) 2353 { 2354 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; 2355 struct rb_root_cached *r = (struct rb_root_cached *)root; 2356 struct rb_node *n = &node_internal->rb_node; 2357 2358 /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or 2359 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n) 2360 */ 2361 if (READ_ONCE(node_internal->owner) != root) 2362 return NULL; 2363 2364 rb_erase_cached(n, r); 2365 RB_CLEAR_NODE(n); 2366 WRITE_ONCE(node_internal->owner, NULL); 2367 return (struct bpf_rb_node *)n; 2368 } 2369 2370 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF 2371 * program 2372 */ 2373 static int __bpf_rbtree_add(struct bpf_rb_root *root, 2374 struct bpf_rb_node_kern *node, 2375 void *less, struct btf_record *rec, u64 off) 2376 { 2377 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; 2378 struct rb_node *parent = NULL, *n = &node->rb_node; 2379 bpf_callback_t cb = (bpf_callback_t)less; 2380 bool leftmost = true; 2381 2382 /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately 2383 * check the latter 2384 */ 2385 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { 2386 /* Only called from BPF prog, no need to migrate_disable */ 2387 __bpf_obj_drop_impl((void *)n - off, rec, false); 2388 return -EINVAL; 2389 } 2390 2391 while (*link) { 2392 parent = *link; 2393 if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) { 2394 link = &parent->rb_left; 2395 } else { 2396 link = &parent->rb_right; 2397 leftmost = false; 2398 } 2399 } 2400 2401 rb_link_node(n, parent, link); 2402 rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost); 2403 WRITE_ONCE(node->owner, root); 2404 return 0; 2405 } 2406 2407 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, 2408 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), 2409 void *meta__ign, u64 off) 2410 { 2411 struct btf_struct_meta *meta = meta__ign; 2412 struct bpf_rb_node_kern *n = (void *)node; 2413 2414 return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off); 2415 } 2416 2417 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) 2418 { 2419 struct rb_root_cached *r = (struct rb_root_cached *)root; 2420 2421 return (struct bpf_rb_node *)rb_first_cached(r); 2422 } 2423 2424 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_root(struct bpf_rb_root *root) 2425 { 2426 struct rb_root_cached *r = (struct rb_root_cached *)root; 2427 2428 return (struct bpf_rb_node *)r->rb_root.rb_node; 2429 } 2430 2431 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct bpf_rb_node *node) 2432 { 2433 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; 2434 2435 if (READ_ONCE(node_internal->owner) != root) 2436 return NULL; 2437 2438 return (struct bpf_rb_node *)node_internal->rb_node.rb_left; 2439 } 2440 2441 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struct bpf_rb_node *node) 2442 { 2443 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; 2444 2445 if (READ_ONCE(node_internal->owner) != root) 2446 return NULL; 2447 2448 return (struct bpf_rb_node *)node_internal->rb_node.rb_right; 2449 } 2450 2451 /** 2452 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this 2453 * kfunc which is not stored in a map as a kptr, must be released by calling 2454 * bpf_task_release(). 2455 * @p: The task on which a reference is being acquired. 2456 */ 2457 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p) 2458 { 2459 if (refcount_inc_not_zero(&p->rcu_users)) 2460 return p; 2461 return NULL; 2462 } 2463 2464 /** 2465 * bpf_task_release - Release the reference acquired on a task. 2466 * @p: The task on which a reference is being released. 2467 */ 2468 __bpf_kfunc void bpf_task_release(struct task_struct *p) 2469 { 2470 put_task_struct_rcu_user(p); 2471 } 2472 2473 __bpf_kfunc void bpf_task_release_dtor(void *p) 2474 { 2475 put_task_struct_rcu_user(p); 2476 } 2477 CFI_NOSEAL(bpf_task_release_dtor); 2478 2479 #ifdef CONFIG_CGROUPS 2480 /** 2481 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by 2482 * this kfunc which is not stored in a map as a kptr, must be released by 2483 * calling bpf_cgroup_release(). 2484 * @cgrp: The cgroup on which a reference is being acquired. 2485 */ 2486 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) 2487 { 2488 return cgroup_tryget(cgrp) ? cgrp : NULL; 2489 } 2490 2491 /** 2492 * bpf_cgroup_release - Release the reference acquired on a cgroup. 2493 * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to 2494 * not be freed until the current grace period has ended, even if its refcount 2495 * drops to 0. 2496 * @cgrp: The cgroup on which a reference is being released. 2497 */ 2498 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) 2499 { 2500 cgroup_put(cgrp); 2501 } 2502 2503 __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp) 2504 { 2505 cgroup_put(cgrp); 2506 } 2507 CFI_NOSEAL(bpf_cgroup_release_dtor); 2508 2509 /** 2510 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor 2511 * array. A cgroup returned by this kfunc which is not subsequently stored in a 2512 * map, must be released by calling bpf_cgroup_release(). 2513 * @cgrp: The cgroup for which we're performing a lookup. 2514 * @level: The level of ancestor to look up. 2515 */ 2516 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) 2517 { 2518 struct cgroup *ancestor; 2519 2520 if (level > cgrp->level || level < 0) 2521 return NULL; 2522 2523 /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */ 2524 ancestor = cgrp->ancestors[level]; 2525 if (!cgroup_tryget(ancestor)) 2526 return NULL; 2527 return ancestor; 2528 } 2529 2530 /** 2531 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this 2532 * kfunc which is not subsequently stored in a map, must be released by calling 2533 * bpf_cgroup_release(). 2534 * @cgid: cgroup id. 2535 */ 2536 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid) 2537 { 2538 struct cgroup *cgrp; 2539 2540 cgrp = cgroup_get_from_id(cgid); 2541 if (IS_ERR(cgrp)) 2542 return NULL; 2543 return cgrp; 2544 } 2545 2546 /** 2547 * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test 2548 * task's membership of cgroup ancestry. 2549 * @task: the task to be tested 2550 * @ancestor: possible ancestor of @task's cgroup 2551 * 2552 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. 2553 * It follows all the same rules as cgroup_is_descendant, and only applies 2554 * to the default hierarchy. 2555 */ 2556 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task, 2557 struct cgroup *ancestor) 2558 { 2559 long ret; 2560 2561 rcu_read_lock(); 2562 ret = task_under_cgroup_hierarchy(task, ancestor); 2563 rcu_read_unlock(); 2564 return ret; 2565 } 2566 2567 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) 2568 { 2569 struct bpf_array *array = container_of(map, struct bpf_array, map); 2570 struct cgroup *cgrp; 2571 2572 if (unlikely(idx >= array->map.max_entries)) 2573 return -E2BIG; 2574 2575 cgrp = READ_ONCE(array->ptrs[idx]); 2576 if (unlikely(!cgrp)) 2577 return -EAGAIN; 2578 2579 return task_under_cgroup_hierarchy(current, cgrp); 2580 } 2581 2582 const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { 2583 .func = bpf_current_task_under_cgroup, 2584 .gpl_only = false, 2585 .ret_type = RET_INTEGER, 2586 .arg1_type = ARG_CONST_MAP_PTR, 2587 .arg2_type = ARG_ANYTHING, 2588 }; 2589 2590 /** 2591 * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a 2592 * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its 2593 * hierarchy ID. 2594 * @task: The target task 2595 * @hierarchy_id: The ID of a cgroup1 hierarchy 2596 * 2597 * On success, the cgroup is returen. On failure, NULL is returned. 2598 */ 2599 __bpf_kfunc struct cgroup * 2600 bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) 2601 { 2602 struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id); 2603 2604 if (IS_ERR(cgrp)) 2605 return NULL; 2606 return cgrp; 2607 } 2608 #endif /* CONFIG_CGROUPS */ 2609 2610 /** 2611 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up 2612 * in the root pid namespace idr. If a task is returned, it must either be 2613 * stored in a map, or released with bpf_task_release(). 2614 * @pid: The pid of the task being looked up. 2615 */ 2616 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) 2617 { 2618 struct task_struct *p; 2619 2620 rcu_read_lock(); 2621 p = find_task_by_pid_ns(pid, &init_pid_ns); 2622 if (p) 2623 p = bpf_task_acquire(p); 2624 rcu_read_unlock(); 2625 2626 return p; 2627 } 2628 2629 /** 2630 * bpf_task_from_vpid - Find a struct task_struct from its vpid by looking it up 2631 * in the pid namespace of the current task. If a task is returned, it must 2632 * either be stored in a map, or released with bpf_task_release(). 2633 * @vpid: The vpid of the task being looked up. 2634 */ 2635 __bpf_kfunc struct task_struct *bpf_task_from_vpid(s32 vpid) 2636 { 2637 struct task_struct *p; 2638 2639 rcu_read_lock(); 2640 p = find_task_by_vpid(vpid); 2641 if (p) 2642 p = bpf_task_acquire(p); 2643 rcu_read_unlock(); 2644 2645 return p; 2646 } 2647 2648 /** 2649 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data. 2650 * @p: The dynptr whose data slice to retrieve 2651 * @offset: Offset into the dynptr 2652 * @buffer__opt: User-provided buffer to copy contents into. May be NULL 2653 * @buffer__szk: Size (in bytes) of the buffer if present. This is the 2654 * length of the requested slice. This must be a constant. 2655 * 2656 * For non-skb and non-xdp type dynptrs, there is no difference between 2657 * bpf_dynptr_slice and bpf_dynptr_data. 2658 * 2659 * If buffer__opt is NULL, the call will fail if buffer_opt was needed. 2660 * 2661 * If the intention is to write to the data slice, please use 2662 * bpf_dynptr_slice_rdwr. 2663 * 2664 * The user must check that the returned pointer is not null before using it. 2665 * 2666 * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice 2667 * does not change the underlying packet data pointers, so a call to 2668 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in 2669 * the bpf program. 2670 * 2671 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only 2672 * data slice (can be either direct pointer to the data or a pointer to the user 2673 * provided buffer, with its contents containing the data, if unable to obtain 2674 * direct pointer) 2675 */ 2676 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset, 2677 void *buffer__opt, u32 buffer__szk) 2678 { 2679 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2680 enum bpf_dynptr_type type; 2681 u32 len = buffer__szk; 2682 int err; 2683 2684 if (!ptr->data) 2685 return NULL; 2686 2687 err = bpf_dynptr_check_off_len(ptr, offset, len); 2688 if (err) 2689 return NULL; 2690 2691 type = bpf_dynptr_get_type(ptr); 2692 2693 switch (type) { 2694 case BPF_DYNPTR_TYPE_LOCAL: 2695 case BPF_DYNPTR_TYPE_RINGBUF: 2696 return ptr->data + ptr->offset + offset; 2697 case BPF_DYNPTR_TYPE_SKB: 2698 if (buffer__opt) 2699 return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt); 2700 else 2701 return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len); 2702 case BPF_DYNPTR_TYPE_XDP: 2703 { 2704 void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len); 2705 if (!IS_ERR_OR_NULL(xdp_ptr)) 2706 return xdp_ptr; 2707 2708 if (!buffer__opt) 2709 return NULL; 2710 bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false); 2711 return buffer__opt; 2712 } 2713 default: 2714 WARN_ONCE(true, "unknown dynptr type %d\n", type); 2715 return NULL; 2716 } 2717 } 2718 2719 /** 2720 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data. 2721 * @p: The dynptr whose data slice to retrieve 2722 * @offset: Offset into the dynptr 2723 * @buffer__opt: User-provided buffer to copy contents into. May be NULL 2724 * @buffer__szk: Size (in bytes) of the buffer if present. This is the 2725 * length of the requested slice. This must be a constant. 2726 * 2727 * For non-skb and non-xdp type dynptrs, there is no difference between 2728 * bpf_dynptr_slice and bpf_dynptr_data. 2729 * 2730 * If buffer__opt is NULL, the call will fail if buffer_opt was needed. 2731 * 2732 * The returned pointer is writable and may point to either directly the dynptr 2733 * data at the requested offset or to the buffer if unable to obtain a direct 2734 * data pointer to (example: the requested slice is to the paged area of an skb 2735 * packet). In the case where the returned pointer is to the buffer, the user 2736 * is responsible for persisting writes through calling bpf_dynptr_write(). This 2737 * usually looks something like this pattern: 2738 * 2739 * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer)); 2740 * if (!eth) 2741 * return TC_ACT_SHOT; 2742 * 2743 * // mutate eth header // 2744 * 2745 * if (eth == buffer) 2746 * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0); 2747 * 2748 * Please note that, as in the example above, the user must check that the 2749 * returned pointer is not null before using it. 2750 * 2751 * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr 2752 * does not change the underlying packet data pointers, so a call to 2753 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in 2754 * the bpf program. 2755 * 2756 * Return: NULL if the call failed (eg invalid dynptr), pointer to a 2757 * data slice (can be either direct pointer to the data or a pointer to the user 2758 * provided buffer, with its contents containing the data, if unable to obtain 2759 * direct pointer) 2760 */ 2761 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset, 2762 void *buffer__opt, u32 buffer__szk) 2763 { 2764 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2765 2766 if (!ptr->data || __bpf_dynptr_is_rdonly(ptr)) 2767 return NULL; 2768 2769 /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice. 2770 * 2771 * For skb-type dynptrs, it is safe to write into the returned pointer 2772 * if the bpf program allows skb data writes. There are two possibilities 2773 * that may occur when calling bpf_dynptr_slice_rdwr: 2774 * 2775 * 1) The requested slice is in the head of the skb. In this case, the 2776 * returned pointer is directly to skb data, and if the skb is cloned, the 2777 * verifier will have uncloned it (see bpf_unclone_prologue()) already. 2778 * The pointer can be directly written into. 2779 * 2780 * 2) Some portion of the requested slice is in the paged buffer area. 2781 * In this case, the requested data will be copied out into the buffer 2782 * and the returned pointer will be a pointer to the buffer. The skb 2783 * will not be pulled. To persist the write, the user will need to call 2784 * bpf_dynptr_write(), which will pull the skb and commit the write. 2785 * 2786 * Similarly for xdp programs, if the requested slice is not across xdp 2787 * fragments, then a direct pointer will be returned, otherwise the data 2788 * will be copied out into the buffer and the user will need to call 2789 * bpf_dynptr_write() to commit changes. 2790 */ 2791 return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk); 2792 } 2793 2794 __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end) 2795 { 2796 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2797 u32 size; 2798 2799 if (!ptr->data || start > end) 2800 return -EINVAL; 2801 2802 size = __bpf_dynptr_size(ptr); 2803 2804 if (start > size || end > size) 2805 return -ERANGE; 2806 2807 ptr->offset += start; 2808 bpf_dynptr_set_size(ptr, end - start); 2809 2810 return 0; 2811 } 2812 2813 __bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p) 2814 { 2815 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2816 2817 return !ptr->data; 2818 } 2819 2820 __bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p) 2821 { 2822 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2823 2824 if (!ptr->data) 2825 return false; 2826 2827 return __bpf_dynptr_is_rdonly(ptr); 2828 } 2829 2830 __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p) 2831 { 2832 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2833 2834 if (!ptr->data) 2835 return -EINVAL; 2836 2837 return __bpf_dynptr_size(ptr); 2838 } 2839 2840 __bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p, 2841 struct bpf_dynptr *clone__uninit) 2842 { 2843 struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit; 2844 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2845 2846 if (!ptr->data) { 2847 bpf_dynptr_set_null(clone); 2848 return -EINVAL; 2849 } 2850 2851 *clone = *ptr; 2852 2853 return 0; 2854 } 2855 2856 /** 2857 * bpf_dynptr_copy() - Copy data from one dynptr to another. 2858 * @dst_ptr: Destination dynptr - where data should be copied to 2859 * @dst_off: Offset into the destination dynptr 2860 * @src_ptr: Source dynptr - where data should be copied from 2861 * @src_off: Offset into the source dynptr 2862 * @size: Length of the data to copy from source to destination 2863 * 2864 * Copies data from source dynptr to destination dynptr. 2865 * Returns 0 on success; negative error, otherwise. 2866 */ 2867 __bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u32 dst_off, 2868 struct bpf_dynptr *src_ptr, u32 src_off, u32 size) 2869 { 2870 struct bpf_dynptr_kern *dst = (struct bpf_dynptr_kern *)dst_ptr; 2871 struct bpf_dynptr_kern *src = (struct bpf_dynptr_kern *)src_ptr; 2872 void *src_slice, *dst_slice; 2873 char buf[256]; 2874 u32 off; 2875 2876 src_slice = bpf_dynptr_slice(src_ptr, src_off, NULL, size); 2877 dst_slice = bpf_dynptr_slice_rdwr(dst_ptr, dst_off, NULL, size); 2878 2879 if (src_slice && dst_slice) { 2880 memmove(dst_slice, src_slice, size); 2881 return 0; 2882 } 2883 2884 if (src_slice) 2885 return __bpf_dynptr_write(dst, dst_off, src_slice, size, 0); 2886 2887 if (dst_slice) 2888 return __bpf_dynptr_read(dst_slice, size, src, src_off, 0); 2889 2890 if (bpf_dynptr_check_off_len(dst, dst_off, size) || 2891 bpf_dynptr_check_off_len(src, src_off, size)) 2892 return -E2BIG; 2893 2894 off = 0; 2895 while (off < size) { 2896 u32 chunk_sz = min_t(u32, sizeof(buf), size - off); 2897 int err; 2898 2899 err = __bpf_dynptr_read(buf, chunk_sz, src, src_off + off, 0); 2900 if (err) 2901 return err; 2902 err = __bpf_dynptr_write(dst, dst_off + off, buf, chunk_sz, 0); 2903 if (err) 2904 return err; 2905 2906 off += chunk_sz; 2907 } 2908 return 0; 2909 } 2910 2911 /** 2912 * bpf_dynptr_memset() - Fill dynptr memory with a constant byte. 2913 * @p: Destination dynptr - where data will be filled 2914 * @offset: Offset into the dynptr to start filling from 2915 * @size: Number of bytes to fill 2916 * @val: Constant byte to fill the memory with 2917 * 2918 * Fills the @size bytes of the memory area pointed to by @p 2919 * at @offset with the constant byte @val. 2920 * Returns 0 on success; negative error, otherwise. 2921 */ 2922 __bpf_kfunc int bpf_dynptr_memset(struct bpf_dynptr *p, u32 offset, u32 size, u8 val) 2923 { 2924 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2925 u32 chunk_sz, write_off; 2926 char buf[256]; 2927 void* slice; 2928 int err; 2929 2930 slice = bpf_dynptr_slice_rdwr(p, offset, NULL, size); 2931 if (likely(slice)) { 2932 memset(slice, val, size); 2933 return 0; 2934 } 2935 2936 if (__bpf_dynptr_is_rdonly(ptr)) 2937 return -EINVAL; 2938 2939 err = bpf_dynptr_check_off_len(ptr, offset, size); 2940 if (err) 2941 return err; 2942 2943 /* Non-linear data under the dynptr, write from a local buffer */ 2944 chunk_sz = min_t(u32, sizeof(buf), size); 2945 memset(buf, val, chunk_sz); 2946 2947 for (write_off = 0; write_off < size; write_off += chunk_sz) { 2948 chunk_sz = min_t(u32, sizeof(buf), size - write_off); 2949 err = __bpf_dynptr_write(ptr, offset + write_off, buf, chunk_sz, 0); 2950 if (err) 2951 return err; 2952 } 2953 2954 return 0; 2955 } 2956 2957 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) 2958 { 2959 return obj; 2960 } 2961 2962 __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k) 2963 { 2964 return (void *)obj__ign; 2965 } 2966 2967 __bpf_kfunc void bpf_rcu_read_lock(void) 2968 { 2969 rcu_read_lock(); 2970 } 2971 2972 __bpf_kfunc void bpf_rcu_read_unlock(void) 2973 { 2974 rcu_read_unlock(); 2975 } 2976 2977 struct bpf_throw_ctx { 2978 struct bpf_prog_aux *aux; 2979 u64 sp; 2980 u64 bp; 2981 int cnt; 2982 }; 2983 2984 static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp) 2985 { 2986 struct bpf_throw_ctx *ctx = cookie; 2987 struct bpf_prog *prog; 2988 2989 /* 2990 * The RCU read lock is held to safely traverse the latch tree, but we 2991 * don't need its protection when accessing the prog, since it has an 2992 * active stack frame on the current stack trace, and won't disappear. 2993 */ 2994 rcu_read_lock(); 2995 prog = bpf_prog_ksym_find(ip); 2996 rcu_read_unlock(); 2997 if (!prog) 2998 return !ctx->cnt; 2999 ctx->cnt++; 3000 if (bpf_is_subprog(prog)) 3001 return true; 3002 ctx->aux = prog->aux; 3003 ctx->sp = sp; 3004 ctx->bp = bp; 3005 return false; 3006 } 3007 3008 __bpf_kfunc void bpf_throw(u64 cookie) 3009 { 3010 struct bpf_throw_ctx ctx = {}; 3011 3012 arch_bpf_stack_walk(bpf_stack_walker, &ctx); 3013 WARN_ON_ONCE(!ctx.aux); 3014 if (ctx.aux) 3015 WARN_ON_ONCE(!ctx.aux->exception_boundary); 3016 WARN_ON_ONCE(!ctx.bp); 3017 WARN_ON_ONCE(!ctx.cnt); 3018 /* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning 3019 * deeper stack depths than ctx.sp as we do not return from bpf_throw, 3020 * which skips compiler generated instrumentation to do the same. 3021 */ 3022 kasan_unpoison_task_stack_below((void *)(long)ctx.sp); 3023 ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0); 3024 WARN(1, "A call to BPF exception callback should never return\n"); 3025 } 3026 3027 __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) 3028 { 3029 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 3030 struct bpf_map *map = p__map; 3031 3032 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq)); 3033 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq)); 3034 3035 if (flags) 3036 return -EINVAL; 3037 3038 return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ); 3039 } 3040 3041 __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) 3042 { 3043 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 3044 struct bpf_work *w; 3045 3046 if (in_nmi()) 3047 return -EOPNOTSUPP; 3048 if (flags) 3049 return -EINVAL; 3050 w = READ_ONCE(async->work); 3051 if (!w || !READ_ONCE(w->cb.prog)) 3052 return -EINVAL; 3053 3054 schedule_work(&w->work); 3055 return 0; 3056 } 3057 3058 __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, 3059 int (callback_fn)(void *map, int *key, void *value), 3060 unsigned int flags, 3061 void *aux__prog) 3062 { 3063 struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__prog; 3064 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 3065 3066 if (flags) 3067 return -EINVAL; 3068 3069 return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ); 3070 } 3071 3072 __bpf_kfunc void bpf_preempt_disable(void) 3073 { 3074 preempt_disable(); 3075 } 3076 3077 __bpf_kfunc void bpf_preempt_enable(void) 3078 { 3079 preempt_enable(); 3080 } 3081 3082 struct bpf_iter_bits { 3083 __u64 __opaque[2]; 3084 } __aligned(8); 3085 3086 #define BITS_ITER_NR_WORDS_MAX 511 3087 3088 struct bpf_iter_bits_kern { 3089 union { 3090 __u64 *bits; 3091 __u64 bits_copy; 3092 }; 3093 int nr_bits; 3094 int bit; 3095 } __aligned(8); 3096 3097 /* On 64-bit hosts, unsigned long and u64 have the same size, so passing 3098 * a u64 pointer and an unsigned long pointer to find_next_bit() will 3099 * return the same result, as both point to the same 8-byte area. 3100 * 3101 * For 32-bit little-endian hosts, using a u64 pointer or unsigned long 3102 * pointer also makes no difference. This is because the first iterated 3103 * unsigned long is composed of bits 0-31 of the u64 and the second unsigned 3104 * long is composed of bits 32-63 of the u64. 3105 * 3106 * However, for 32-bit big-endian hosts, this is not the case. The first 3107 * iterated unsigned long will be bits 32-63 of the u64, so swap these two 3108 * ulong values within the u64. 3109 */ 3110 static void swap_ulong_in_u64(u64 *bits, unsigned int nr) 3111 { 3112 #if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN) 3113 unsigned int i; 3114 3115 for (i = 0; i < nr; i++) 3116 bits[i] = (bits[i] >> 32) | ((u64)(u32)bits[i] << 32); 3117 #endif 3118 } 3119 3120 /** 3121 * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area 3122 * @it: The new bpf_iter_bits to be created 3123 * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over 3124 * @nr_words: The size of the specified memory area, measured in 8-byte units. 3125 * The maximum value of @nr_words is @BITS_ITER_NR_WORDS_MAX. This limit may be 3126 * further reduced by the BPF memory allocator implementation. 3127 * 3128 * This function initializes a new bpf_iter_bits structure for iterating over 3129 * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It 3130 * copies the data of the memory area to the newly created bpf_iter_bits @it for 3131 * subsequent iteration operations. 3132 * 3133 * On success, 0 is returned. On failure, ERR is returned. 3134 */ 3135 __bpf_kfunc int 3136 bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) 3137 { 3138 struct bpf_iter_bits_kern *kit = (void *)it; 3139 u32 nr_bytes = nr_words * sizeof(u64); 3140 u32 nr_bits = BYTES_TO_BITS(nr_bytes); 3141 int err; 3142 3143 BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits)); 3144 BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) != 3145 __alignof__(struct bpf_iter_bits)); 3146 3147 kit->nr_bits = 0; 3148 kit->bits_copy = 0; 3149 kit->bit = -1; 3150 3151 if (!unsafe_ptr__ign || !nr_words) 3152 return -EINVAL; 3153 if (nr_words > BITS_ITER_NR_WORDS_MAX) 3154 return -E2BIG; 3155 3156 /* Optimization for u64 mask */ 3157 if (nr_bits == 64) { 3158 err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign); 3159 if (err) 3160 return -EFAULT; 3161 3162 swap_ulong_in_u64(&kit->bits_copy, nr_words); 3163 3164 kit->nr_bits = nr_bits; 3165 return 0; 3166 } 3167 3168 if (bpf_mem_alloc_check_size(false, nr_bytes)) 3169 return -E2BIG; 3170 3171 /* Fallback to memalloc */ 3172 kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes); 3173 if (!kit->bits) 3174 return -ENOMEM; 3175 3176 err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign); 3177 if (err) { 3178 bpf_mem_free(&bpf_global_ma, kit->bits); 3179 return err; 3180 } 3181 3182 swap_ulong_in_u64(kit->bits, nr_words); 3183 3184 kit->nr_bits = nr_bits; 3185 return 0; 3186 } 3187 3188 /** 3189 * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits 3190 * @it: The bpf_iter_bits to be checked 3191 * 3192 * This function returns a pointer to a number representing the value of the 3193 * next bit in the bits. 3194 * 3195 * If there are no further bits available, it returns NULL. 3196 */ 3197 __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it) 3198 { 3199 struct bpf_iter_bits_kern *kit = (void *)it; 3200 int bit = kit->bit, nr_bits = kit->nr_bits; 3201 const void *bits; 3202 3203 if (!nr_bits || bit >= nr_bits) 3204 return NULL; 3205 3206 bits = nr_bits == 64 ? &kit->bits_copy : kit->bits; 3207 bit = find_next_bit(bits, nr_bits, bit + 1); 3208 if (bit >= nr_bits) { 3209 kit->bit = bit; 3210 return NULL; 3211 } 3212 3213 kit->bit = bit; 3214 return &kit->bit; 3215 } 3216 3217 /** 3218 * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits 3219 * @it: The bpf_iter_bits to be destroyed 3220 * 3221 * Destroy the resource associated with the bpf_iter_bits. 3222 */ 3223 __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it) 3224 { 3225 struct bpf_iter_bits_kern *kit = (void *)it; 3226 3227 if (kit->nr_bits <= 64) 3228 return; 3229 bpf_mem_free(&bpf_global_ma, kit->bits); 3230 } 3231 3232 /** 3233 * bpf_copy_from_user_str() - Copy a string from an unsafe user address 3234 * @dst: Destination address, in kernel space. This buffer must be 3235 * at least @dst__sz bytes long. 3236 * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL. 3237 * @unsafe_ptr__ign: Source address, in user space. 3238 * @flags: The only supported flag is BPF_F_PAD_ZEROS 3239 * 3240 * Copies a NUL-terminated string from userspace to BPF space. If user string is 3241 * too long this will still ensure zero termination in the dst buffer unless 3242 * buffer size is 0. 3243 * 3244 * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and 3245 * memset all of @dst on failure. 3246 */ 3247 __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags) 3248 { 3249 int ret; 3250 3251 if (unlikely(flags & ~BPF_F_PAD_ZEROS)) 3252 return -EINVAL; 3253 3254 if (unlikely(!dst__sz)) 3255 return 0; 3256 3257 ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1); 3258 if (ret < 0) { 3259 if (flags & BPF_F_PAD_ZEROS) 3260 memset((char *)dst, 0, dst__sz); 3261 3262 return ret; 3263 } 3264 3265 if (flags & BPF_F_PAD_ZEROS) 3266 memset((char *)dst + ret, 0, dst__sz - ret); 3267 else 3268 ((char *)dst)[ret] = '\0'; 3269 3270 return ret + 1; 3271 } 3272 3273 /** 3274 * bpf_copy_from_user_task_str() - Copy a string from an task's address space 3275 * @dst: Destination address, in kernel space. This buffer must be 3276 * at least @dst__sz bytes long. 3277 * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL. 3278 * @unsafe_ptr__ign: Source address in the task's address space. 3279 * @tsk: The task whose address space will be used 3280 * @flags: The only supported flag is BPF_F_PAD_ZEROS 3281 * 3282 * Copies a NUL terminated string from a task's address space to @dst__sz 3283 * buffer. If user string is too long this will still ensure zero termination 3284 * in the @dst__sz buffer unless buffer size is 0. 3285 * 3286 * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst__sz to 0 on success 3287 * and memset all of @dst__sz on failure. 3288 * 3289 * Return: The number of copied bytes on success including the NUL terminator. 3290 * A negative error code on failure. 3291 */ 3292 __bpf_kfunc int bpf_copy_from_user_task_str(void *dst, u32 dst__sz, 3293 const void __user *unsafe_ptr__ign, 3294 struct task_struct *tsk, u64 flags) 3295 { 3296 int ret; 3297 3298 if (unlikely(flags & ~BPF_F_PAD_ZEROS)) 3299 return -EINVAL; 3300 3301 if (unlikely(dst__sz == 0)) 3302 return 0; 3303 3304 ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_ptr__ign, dst, dst__sz, 0); 3305 if (ret < 0) { 3306 if (flags & BPF_F_PAD_ZEROS) 3307 memset(dst, 0, dst__sz); 3308 return ret; 3309 } 3310 3311 if (flags & BPF_F_PAD_ZEROS) 3312 memset(dst + ret, 0, dst__sz - ret); 3313 3314 return ret + 1; 3315 } 3316 3317 /* Keep unsinged long in prototype so that kfunc is usable when emitted to 3318 * vmlinux.h in BPF programs directly, but note that while in BPF prog, the 3319 * unsigned long always points to 8-byte region on stack, the kernel may only 3320 * read and write the 4-bytes on 32-bit. 3321 */ 3322 __bpf_kfunc void bpf_local_irq_save(unsigned long *flags__irq_flag) 3323 { 3324 local_irq_save(*flags__irq_flag); 3325 } 3326 3327 __bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag) 3328 { 3329 local_irq_restore(*flags__irq_flag); 3330 } 3331 3332 __bpf_kfunc void __bpf_trap(void) 3333 { 3334 } 3335 3336 /* 3337 * Kfuncs for string operations. 3338 * 3339 * Since strings are not necessarily %NUL-terminated, we cannot directly call 3340 * in-kernel implementations. Instead, we open-code the implementations using 3341 * __get_kernel_nofault instead of plain dereference to make them safe. 3342 */ 3343 3344 /** 3345 * bpf_strcmp - Compare two strings 3346 * @s1__ign: One string 3347 * @s2__ign: Another string 3348 * 3349 * Return: 3350 * * %0 - Strings are equal 3351 * * %-1 - @s1__ign is smaller 3352 * * %1 - @s2__ign is smaller 3353 * * %-EFAULT - Cannot read one of the strings 3354 * * %-E2BIG - One of strings is too large 3355 * * %-ERANGE - One of strings is outside of kernel address space 3356 */ 3357 __bpf_kfunc int bpf_strcmp(const char *s1__ign, const char *s2__ign) 3358 { 3359 char c1, c2; 3360 int i; 3361 3362 if (!copy_from_kernel_nofault_allowed(s1__ign, 1) || 3363 !copy_from_kernel_nofault_allowed(s2__ign, 1)) { 3364 return -ERANGE; 3365 } 3366 3367 guard(pagefault)(); 3368 for (i = 0; i < XATTR_SIZE_MAX; i++) { 3369 __get_kernel_nofault(&c1, s1__ign, char, err_out); 3370 __get_kernel_nofault(&c2, s2__ign, char, err_out); 3371 if (c1 != c2) 3372 return c1 < c2 ? -1 : 1; 3373 if (c1 == '\0') 3374 return 0; 3375 s1__ign++; 3376 s2__ign++; 3377 } 3378 return -E2BIG; 3379 err_out: 3380 return -EFAULT; 3381 } 3382 3383 /** 3384 * bpf_strnchr - Find a character in a length limited string 3385 * @s__ign: The string to be searched 3386 * @count: The number of characters to be searched 3387 * @c: The character to search for 3388 * 3389 * Note that the %NUL-terminator is considered part of the string, and can 3390 * be searched for. 3391 * 3392 * Return: 3393 * * >=0 - Index of the first occurrence of @c within @s__ign 3394 * * %-ENOENT - @c not found in the first @count characters of @s__ign 3395 * * %-EFAULT - Cannot read @s__ign 3396 * * %-E2BIG - @s__ign is too large 3397 * * %-ERANGE - @s__ign is outside of kernel address space 3398 */ 3399 __bpf_kfunc int bpf_strnchr(const char *s__ign, size_t count, char c) 3400 { 3401 char sc; 3402 int i; 3403 3404 if (!copy_from_kernel_nofault_allowed(s__ign, 1)) 3405 return -ERANGE; 3406 3407 guard(pagefault)(); 3408 for (i = 0; i < count && i < XATTR_SIZE_MAX; i++) { 3409 __get_kernel_nofault(&sc, s__ign, char, err_out); 3410 if (sc == c) 3411 return i; 3412 if (sc == '\0') 3413 return -ENOENT; 3414 s__ign++; 3415 } 3416 return i == XATTR_SIZE_MAX ? -E2BIG : -ENOENT; 3417 err_out: 3418 return -EFAULT; 3419 } 3420 3421 /** 3422 * bpf_strchr - Find the first occurrence of a character in a string 3423 * @s__ign: The string to be searched 3424 * @c: The character to search for 3425 * 3426 * Note that the %NUL-terminator is considered part of the string, and can 3427 * be searched for. 3428 * 3429 * Return: 3430 * * >=0 - The index of the first occurrence of @c within @s__ign 3431 * * %-ENOENT - @c not found in @s__ign 3432 * * %-EFAULT - Cannot read @s__ign 3433 * * %-E2BIG - @s__ign is too large 3434 * * %-ERANGE - @s__ign is outside of kernel address space 3435 */ 3436 __bpf_kfunc int bpf_strchr(const char *s__ign, char c) 3437 { 3438 return bpf_strnchr(s__ign, XATTR_SIZE_MAX, c); 3439 } 3440 3441 /** 3442 * bpf_strchrnul - Find and return a character in a string, or end of string 3443 * @s__ign: The string to be searched 3444 * @c: The character to search for 3445 * 3446 * Return: 3447 * * >=0 - Index of the first occurrence of @c within @s__ign or index of 3448 * the null byte at the end of @s__ign when @c is not found 3449 * * %-EFAULT - Cannot read @s__ign 3450 * * %-E2BIG - @s__ign is too large 3451 * * %-ERANGE - @s__ign is outside of kernel address space 3452 */ 3453 __bpf_kfunc int bpf_strchrnul(const char *s__ign, char c) 3454 { 3455 char sc; 3456 int i; 3457 3458 if (!copy_from_kernel_nofault_allowed(s__ign, 1)) 3459 return -ERANGE; 3460 3461 guard(pagefault)(); 3462 for (i = 0; i < XATTR_SIZE_MAX; i++) { 3463 __get_kernel_nofault(&sc, s__ign, char, err_out); 3464 if (sc == '\0' || sc == c) 3465 return i; 3466 s__ign++; 3467 } 3468 return -E2BIG; 3469 err_out: 3470 return -EFAULT; 3471 } 3472 3473 /** 3474 * bpf_strrchr - Find the last occurrence of a character in a string 3475 * @s__ign: The string to be searched 3476 * @c: The character to search for 3477 * 3478 * Return: 3479 * * >=0 - Index of the last occurrence of @c within @s__ign 3480 * * %-ENOENT - @c not found in @s__ign 3481 * * %-EFAULT - Cannot read @s__ign 3482 * * %-E2BIG - @s__ign is too large 3483 * * %-ERANGE - @s__ign is outside of kernel address space 3484 */ 3485 __bpf_kfunc int bpf_strrchr(const char *s__ign, int c) 3486 { 3487 char sc; 3488 int i, last = -ENOENT; 3489 3490 if (!copy_from_kernel_nofault_allowed(s__ign, 1)) 3491 return -ERANGE; 3492 3493 guard(pagefault)(); 3494 for (i = 0; i < XATTR_SIZE_MAX; i++) { 3495 __get_kernel_nofault(&sc, s__ign, char, err_out); 3496 if (sc == c) 3497 last = i; 3498 if (sc == '\0') 3499 return last; 3500 s__ign++; 3501 } 3502 return -E2BIG; 3503 err_out: 3504 return -EFAULT; 3505 } 3506 3507 /** 3508 * bpf_strnlen - Calculate the length of a length-limited string 3509 * @s__ign: The string 3510 * @count: The maximum number of characters to count 3511 * 3512 * Return: 3513 * * >=0 - The length of @s__ign 3514 * * %-EFAULT - Cannot read @s__ign 3515 * * %-E2BIG - @s__ign is too large 3516 * * %-ERANGE - @s__ign is outside of kernel address space 3517 */ 3518 __bpf_kfunc int bpf_strnlen(const char *s__ign, size_t count) 3519 { 3520 char c; 3521 int i; 3522 3523 if (!copy_from_kernel_nofault_allowed(s__ign, 1)) 3524 return -ERANGE; 3525 3526 guard(pagefault)(); 3527 for (i = 0; i < count && i < XATTR_SIZE_MAX; i++) { 3528 __get_kernel_nofault(&c, s__ign, char, err_out); 3529 if (c == '\0') 3530 return i; 3531 s__ign++; 3532 } 3533 return i == XATTR_SIZE_MAX ? -E2BIG : i; 3534 err_out: 3535 return -EFAULT; 3536 } 3537 3538 /** 3539 * bpf_strlen - Calculate the length of a string 3540 * @s__ign: The string 3541 * 3542 * Return: 3543 * * >=0 - The length of @s__ign 3544 * * %-EFAULT - Cannot read @s__ign 3545 * * %-E2BIG - @s__ign is too large 3546 * * %-ERANGE - @s__ign is outside of kernel address space 3547 */ 3548 __bpf_kfunc int bpf_strlen(const char *s__ign) 3549 { 3550 return bpf_strnlen(s__ign, XATTR_SIZE_MAX); 3551 } 3552 3553 /** 3554 * bpf_strspn - Calculate the length of the initial substring of @s__ign which 3555 * only contains letters in @accept__ign 3556 * @s__ign: The string to be searched 3557 * @accept__ign: The string to search for 3558 * 3559 * Return: 3560 * * >=0 - The length of the initial substring of @s__ign which only 3561 * contains letters from @accept__ign 3562 * * %-EFAULT - Cannot read one of the strings 3563 * * %-E2BIG - One of the strings is too large 3564 * * %-ERANGE - One of the strings is outside of kernel address space 3565 */ 3566 __bpf_kfunc int bpf_strspn(const char *s__ign, const char *accept__ign) 3567 { 3568 char cs, ca; 3569 int i, j; 3570 3571 if (!copy_from_kernel_nofault_allowed(s__ign, 1) || 3572 !copy_from_kernel_nofault_allowed(accept__ign, 1)) { 3573 return -ERANGE; 3574 } 3575 3576 guard(pagefault)(); 3577 for (i = 0; i < XATTR_SIZE_MAX; i++) { 3578 __get_kernel_nofault(&cs, s__ign, char, err_out); 3579 if (cs == '\0') 3580 return i; 3581 for (j = 0; j < XATTR_SIZE_MAX; j++) { 3582 __get_kernel_nofault(&ca, accept__ign + j, char, err_out); 3583 if (cs == ca || ca == '\0') 3584 break; 3585 } 3586 if (j == XATTR_SIZE_MAX) 3587 return -E2BIG; 3588 if (ca == '\0') 3589 return i; 3590 s__ign++; 3591 } 3592 return -E2BIG; 3593 err_out: 3594 return -EFAULT; 3595 } 3596 3597 /** 3598 * bpf_strcspn - Calculate the length of the initial substring of @s__ign which 3599 * does not contain letters in @reject__ign 3600 * @s__ign: The string to be searched 3601 * @reject__ign: The string to search for 3602 * 3603 * Return: 3604 * * >=0 - The length of the initial substring of @s__ign which does not 3605 * contain letters from @reject__ign 3606 * * %-EFAULT - Cannot read one of the strings 3607 * * %-E2BIG - One of the strings is too large 3608 * * %-ERANGE - One of the strings is outside of kernel address space 3609 */ 3610 __bpf_kfunc int bpf_strcspn(const char *s__ign, const char *reject__ign) 3611 { 3612 char cs, cr; 3613 int i, j; 3614 3615 if (!copy_from_kernel_nofault_allowed(s__ign, 1) || 3616 !copy_from_kernel_nofault_allowed(reject__ign, 1)) { 3617 return -ERANGE; 3618 } 3619 3620 guard(pagefault)(); 3621 for (i = 0; i < XATTR_SIZE_MAX; i++) { 3622 __get_kernel_nofault(&cs, s__ign, char, err_out); 3623 if (cs == '\0') 3624 return i; 3625 for (j = 0; j < XATTR_SIZE_MAX; j++) { 3626 __get_kernel_nofault(&cr, reject__ign + j, char, err_out); 3627 if (cs == cr || cr == '\0') 3628 break; 3629 } 3630 if (j == XATTR_SIZE_MAX) 3631 return -E2BIG; 3632 if (cr != '\0') 3633 return i; 3634 s__ign++; 3635 } 3636 return -E2BIG; 3637 err_out: 3638 return -EFAULT; 3639 } 3640 3641 /** 3642 * bpf_strnstr - Find the first substring in a length-limited string 3643 * @s1__ign: The string to be searched 3644 * @s2__ign: The string to search for 3645 * @len: the maximum number of characters to search 3646 * 3647 * Return: 3648 * * >=0 - Index of the first character of the first occurrence of @s2__ign 3649 * within the first @len characters of @s1__ign 3650 * * %-ENOENT - @s2__ign not found in the first @len characters of @s1__ign 3651 * * %-EFAULT - Cannot read one of the strings 3652 * * %-E2BIG - One of the strings is too large 3653 * * %-ERANGE - One of the strings is outside of kernel address space 3654 */ 3655 __bpf_kfunc int bpf_strnstr(const char *s1__ign, const char *s2__ign, size_t len) 3656 { 3657 char c1, c2; 3658 int i, j; 3659 3660 if (!copy_from_kernel_nofault_allowed(s1__ign, 1) || 3661 !copy_from_kernel_nofault_allowed(s2__ign, 1)) { 3662 return -ERANGE; 3663 } 3664 3665 guard(pagefault)(); 3666 for (i = 0; i < XATTR_SIZE_MAX; i++) { 3667 for (j = 0; i + j < len && j < XATTR_SIZE_MAX; j++) { 3668 __get_kernel_nofault(&c2, s2__ign + j, char, err_out); 3669 if (c2 == '\0') 3670 return i; 3671 __get_kernel_nofault(&c1, s1__ign + j, char, err_out); 3672 if (c1 == '\0') 3673 return -ENOENT; 3674 if (c1 != c2) 3675 break; 3676 } 3677 if (j == XATTR_SIZE_MAX) 3678 return -E2BIG; 3679 if (i + j == len) 3680 return -ENOENT; 3681 s1__ign++; 3682 } 3683 return -E2BIG; 3684 err_out: 3685 return -EFAULT; 3686 } 3687 3688 /** 3689 * bpf_strstr - Find the first substring in a string 3690 * @s1__ign: The string to be searched 3691 * @s2__ign: The string to search for 3692 * 3693 * Return: 3694 * * >=0 - Index of the first character of the first occurrence of @s2__ign 3695 * within @s1__ign 3696 * * %-ENOENT - @s2__ign is not a substring of @s1__ign 3697 * * %-EFAULT - Cannot read one of the strings 3698 * * %-E2BIG - One of the strings is too large 3699 * * %-ERANGE - One of the strings is outside of kernel address space 3700 */ 3701 __bpf_kfunc int bpf_strstr(const char *s1__ign, const char *s2__ign) 3702 { 3703 return bpf_strnstr(s1__ign, s2__ign, XATTR_SIZE_MAX); 3704 } 3705 3706 __bpf_kfunc_end_defs(); 3707 3708 BTF_KFUNCS_START(generic_btf_ids) 3709 #ifdef CONFIG_CRASH_DUMP 3710 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) 3711 #endif 3712 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) 3713 BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) 3714 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE) 3715 BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE) 3716 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU) 3717 BTF_ID_FLAGS(func, bpf_list_push_front_impl) 3718 BTF_ID_FLAGS(func, bpf_list_push_back_impl) 3719 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) 3720 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) 3721 BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL) 3722 BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL) 3723 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3724 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) 3725 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) 3726 BTF_ID_FLAGS(func, bpf_rbtree_add_impl) 3727 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) 3728 BTF_ID_FLAGS(func, bpf_rbtree_root, KF_RET_NULL) 3729 BTF_ID_FLAGS(func, bpf_rbtree_left, KF_RET_NULL) 3730 BTF_ID_FLAGS(func, bpf_rbtree_right, KF_RET_NULL) 3731 3732 #ifdef CONFIG_CGROUPS 3733 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3734 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE) 3735 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3736 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL) 3737 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU) 3738 BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3739 #endif 3740 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL) 3741 BTF_ID_FLAGS(func, bpf_task_from_vpid, KF_ACQUIRE | KF_RET_NULL) 3742 BTF_ID_FLAGS(func, bpf_throw) 3743 #ifdef CONFIG_BPF_EVENTS 3744 BTF_ID_FLAGS(func, bpf_send_signal_task, KF_TRUSTED_ARGS) 3745 #endif 3746 BTF_KFUNCS_END(generic_btf_ids) 3747 3748 static const struct btf_kfunc_id_set generic_kfunc_set = { 3749 .owner = THIS_MODULE, 3750 .set = &generic_btf_ids, 3751 }; 3752 3753 3754 BTF_ID_LIST(generic_dtor_ids) 3755 BTF_ID(struct, task_struct) 3756 BTF_ID(func, bpf_task_release_dtor) 3757 #ifdef CONFIG_CGROUPS 3758 BTF_ID(struct, cgroup) 3759 BTF_ID(func, bpf_cgroup_release_dtor) 3760 #endif 3761 3762 BTF_KFUNCS_START(common_btf_ids) 3763 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx, KF_FASTCALL) 3764 BTF_ID_FLAGS(func, bpf_rdonly_cast, KF_FASTCALL) 3765 BTF_ID_FLAGS(func, bpf_rcu_read_lock) 3766 BTF_ID_FLAGS(func, bpf_rcu_read_unlock) 3767 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL) 3768 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL) 3769 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW) 3770 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL) 3771 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY) 3772 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU) 3773 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL) 3774 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY) 3775 #ifdef CONFIG_CGROUPS 3776 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) 3777 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL) 3778 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY) 3779 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) 3780 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL) 3781 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY) 3782 #endif 3783 BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) 3784 BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) 3785 BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) 3786 BTF_ID_FLAGS(func, bpf_dynptr_adjust) 3787 BTF_ID_FLAGS(func, bpf_dynptr_is_null) 3788 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) 3789 BTF_ID_FLAGS(func, bpf_dynptr_size) 3790 BTF_ID_FLAGS(func, bpf_dynptr_clone) 3791 BTF_ID_FLAGS(func, bpf_dynptr_copy) 3792 BTF_ID_FLAGS(func, bpf_dynptr_memset) 3793 #ifdef CONFIG_NET 3794 BTF_ID_FLAGS(func, bpf_modify_return_test_tp) 3795 #endif 3796 BTF_ID_FLAGS(func, bpf_wq_init) 3797 BTF_ID_FLAGS(func, bpf_wq_set_callback_impl) 3798 BTF_ID_FLAGS(func, bpf_wq_start) 3799 BTF_ID_FLAGS(func, bpf_preempt_disable) 3800 BTF_ID_FLAGS(func, bpf_preempt_enable) 3801 BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW) 3802 BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL) 3803 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY) 3804 BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE) 3805 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str, KF_SLEEPABLE) 3806 BTF_ID_FLAGS(func, bpf_get_kmem_cache) 3807 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE) 3808 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) 3809 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) 3810 BTF_ID_FLAGS(func, bpf_local_irq_save) 3811 BTF_ID_FLAGS(func, bpf_local_irq_restore) 3812 BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr) 3813 BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr) 3814 BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr) 3815 BTF_ID_FLAGS(func, bpf_probe_read_kernel_str_dynptr) 3816 BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE) 3817 BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE) 3818 BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) 3819 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) 3820 #ifdef CONFIG_DMA_SHARED_BUFFER 3821 BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE) 3822 BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) 3823 BTF_ID_FLAGS(func, bpf_iter_dmabuf_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) 3824 #endif 3825 BTF_ID_FLAGS(func, __bpf_trap) 3826 BTF_ID_FLAGS(func, bpf_strcmp); 3827 BTF_ID_FLAGS(func, bpf_strchr); 3828 BTF_ID_FLAGS(func, bpf_strchrnul); 3829 BTF_ID_FLAGS(func, bpf_strnchr); 3830 BTF_ID_FLAGS(func, bpf_strrchr); 3831 BTF_ID_FLAGS(func, bpf_strlen); 3832 BTF_ID_FLAGS(func, bpf_strnlen); 3833 BTF_ID_FLAGS(func, bpf_strspn); 3834 BTF_ID_FLAGS(func, bpf_strcspn); 3835 BTF_ID_FLAGS(func, bpf_strstr); 3836 BTF_ID_FLAGS(func, bpf_strnstr); 3837 #if defined(CONFIG_BPF_LSM) && defined(CONFIG_CGROUPS) 3838 BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU) 3839 #endif 3840 BTF_ID_FLAGS(func, bpf_stream_vprintk, KF_TRUSTED_ARGS) 3841 BTF_KFUNCS_END(common_btf_ids) 3842 3843 static const struct btf_kfunc_id_set common_kfunc_set = { 3844 .owner = THIS_MODULE, 3845 .set = &common_btf_ids, 3846 }; 3847 3848 static int __init kfunc_init(void) 3849 { 3850 int ret; 3851 const struct btf_id_dtor_kfunc generic_dtors[] = { 3852 { 3853 .btf_id = generic_dtor_ids[0], 3854 .kfunc_btf_id = generic_dtor_ids[1] 3855 }, 3856 #ifdef CONFIG_CGROUPS 3857 { 3858 .btf_id = generic_dtor_ids[2], 3859 .kfunc_btf_id = generic_dtor_ids[3] 3860 }, 3861 #endif 3862 }; 3863 3864 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set); 3865 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set); 3866 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set); 3867 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set); 3868 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set); 3869 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set); 3870 ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors, 3871 ARRAY_SIZE(generic_dtors), 3872 THIS_MODULE); 3873 return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set); 3874 } 3875 3876 late_initcall(kfunc_init); 3877 3878 /* Get a pointer to dynptr data up to len bytes for read only access. If 3879 * the dynptr doesn't have continuous data up to len bytes, return NULL. 3880 */ 3881 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len) 3882 { 3883 const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr; 3884 3885 return bpf_dynptr_slice(p, 0, NULL, len); 3886 } 3887 3888 /* Get a pointer to dynptr data up to len bytes for read write access. If 3889 * the dynptr doesn't have continuous data up to len bytes, or the dynptr 3890 * is read only, return NULL. 3891 */ 3892 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len) 3893 { 3894 if (__bpf_dynptr_is_rdonly(ptr)) 3895 return NULL; 3896 return (void *)__bpf_dynptr_data(ptr, len); 3897 } 3898