1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/btf.h> 6 #include <linux/bpf-cgroup.h> 7 #include <linux/cgroup.h> 8 #include <linux/rcupdate.h> 9 #include <linux/random.h> 10 #include <linux/smp.h> 11 #include <linux/topology.h> 12 #include <linux/ktime.h> 13 #include <linux/sched.h> 14 #include <linux/uidgid.h> 15 #include <linux/filter.h> 16 #include <linux/ctype.h> 17 #include <linux/jiffies.h> 18 #include <linux/pid_namespace.h> 19 #include <linux/poison.h> 20 #include <linux/proc_ns.h> 21 #include <linux/sched/task.h> 22 #include <linux/security.h> 23 #include <linux/btf_ids.h> 24 #include <linux/bpf_mem_alloc.h> 25 #include <linux/kasan.h> 26 #include <linux/bpf_verifier.h> 27 #include <linux/uaccess.h> 28 29 #include "../../lib/kstrtox.h" 30 31 /* If kernel subsystem is allowing eBPF programs to call this function, 32 * inside its own verifier_ops->get_func_proto() callback it should return 33 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments 34 * 35 * Different map implementations will rely on rcu in map methods 36 * lookup/update/delete, therefore eBPF programs must run under rcu lock 37 * if program is allowed to access maps, so check rcu_read_lock_held() or 38 * rcu_read_lock_trace_held() in all three functions. 39 */ 40 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) 41 { 42 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 43 !rcu_read_lock_bh_held()); 44 return (unsigned long) map->ops->map_lookup_elem(map, key); 45 } 46 47 const struct bpf_func_proto bpf_map_lookup_elem_proto = { 48 .func = bpf_map_lookup_elem, 49 .gpl_only = false, 50 .pkt_access = true, 51 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 52 .arg1_type = ARG_CONST_MAP_PTR, 53 .arg2_type = ARG_PTR_TO_MAP_KEY, 54 }; 55 56 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, 57 void *, value, u64, flags) 58 { 59 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 60 !rcu_read_lock_bh_held()); 61 return map->ops->map_update_elem(map, key, value, flags); 62 } 63 64 const struct bpf_func_proto bpf_map_update_elem_proto = { 65 .func = bpf_map_update_elem, 66 .gpl_only = false, 67 .pkt_access = true, 68 .ret_type = RET_INTEGER, 69 .arg1_type = ARG_CONST_MAP_PTR, 70 .arg2_type = ARG_PTR_TO_MAP_KEY, 71 .arg3_type = ARG_PTR_TO_MAP_VALUE, 72 .arg4_type = ARG_ANYTHING, 73 }; 74 75 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) 76 { 77 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 78 !rcu_read_lock_bh_held()); 79 return map->ops->map_delete_elem(map, key); 80 } 81 82 const struct bpf_func_proto bpf_map_delete_elem_proto = { 83 .func = bpf_map_delete_elem, 84 .gpl_only = false, 85 .pkt_access = true, 86 .ret_type = RET_INTEGER, 87 .arg1_type = ARG_CONST_MAP_PTR, 88 .arg2_type = ARG_PTR_TO_MAP_KEY, 89 }; 90 91 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) 92 { 93 return map->ops->map_push_elem(map, value, flags); 94 } 95 96 const struct bpf_func_proto bpf_map_push_elem_proto = { 97 .func = bpf_map_push_elem, 98 .gpl_only = false, 99 .pkt_access = true, 100 .ret_type = RET_INTEGER, 101 .arg1_type = ARG_CONST_MAP_PTR, 102 .arg2_type = ARG_PTR_TO_MAP_VALUE, 103 .arg3_type = ARG_ANYTHING, 104 }; 105 106 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) 107 { 108 return map->ops->map_pop_elem(map, value); 109 } 110 111 const struct bpf_func_proto bpf_map_pop_elem_proto = { 112 .func = bpf_map_pop_elem, 113 .gpl_only = false, 114 .ret_type = RET_INTEGER, 115 .arg1_type = ARG_CONST_MAP_PTR, 116 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE, 117 }; 118 119 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) 120 { 121 return map->ops->map_peek_elem(map, value); 122 } 123 124 const struct bpf_func_proto bpf_map_peek_elem_proto = { 125 .func = bpf_map_peek_elem, 126 .gpl_only = false, 127 .ret_type = RET_INTEGER, 128 .arg1_type = ARG_CONST_MAP_PTR, 129 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE, 130 }; 131 132 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) 133 { 134 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 135 !rcu_read_lock_bh_held()); 136 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); 137 } 138 139 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = { 140 .func = bpf_map_lookup_percpu_elem, 141 .gpl_only = false, 142 .pkt_access = true, 143 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 144 .arg1_type = ARG_CONST_MAP_PTR, 145 .arg2_type = ARG_PTR_TO_MAP_KEY, 146 .arg3_type = ARG_ANYTHING, 147 }; 148 149 const struct bpf_func_proto bpf_get_prandom_u32_proto = { 150 .func = bpf_user_rnd_u32, 151 .gpl_only = false, 152 .ret_type = RET_INTEGER, 153 }; 154 155 BPF_CALL_0(bpf_get_smp_processor_id) 156 { 157 return smp_processor_id(); 158 } 159 160 const struct bpf_func_proto bpf_get_smp_processor_id_proto = { 161 .func = bpf_get_smp_processor_id, 162 .gpl_only = false, 163 .ret_type = RET_INTEGER, 164 .allow_fastcall = true, 165 }; 166 167 BPF_CALL_0(bpf_get_numa_node_id) 168 { 169 return numa_node_id(); 170 } 171 172 const struct bpf_func_proto bpf_get_numa_node_id_proto = { 173 .func = bpf_get_numa_node_id, 174 .gpl_only = false, 175 .ret_type = RET_INTEGER, 176 }; 177 178 BPF_CALL_0(bpf_ktime_get_ns) 179 { 180 /* NMI safe access to clock monotonic */ 181 return ktime_get_mono_fast_ns(); 182 } 183 184 const struct bpf_func_proto bpf_ktime_get_ns_proto = { 185 .func = bpf_ktime_get_ns, 186 .gpl_only = false, 187 .ret_type = RET_INTEGER, 188 }; 189 190 BPF_CALL_0(bpf_ktime_get_boot_ns) 191 { 192 /* NMI safe access to clock boottime */ 193 return ktime_get_boot_fast_ns(); 194 } 195 196 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = { 197 .func = bpf_ktime_get_boot_ns, 198 .gpl_only = false, 199 .ret_type = RET_INTEGER, 200 }; 201 202 BPF_CALL_0(bpf_ktime_get_coarse_ns) 203 { 204 return ktime_get_coarse_ns(); 205 } 206 207 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = { 208 .func = bpf_ktime_get_coarse_ns, 209 .gpl_only = false, 210 .ret_type = RET_INTEGER, 211 }; 212 213 BPF_CALL_0(bpf_ktime_get_tai_ns) 214 { 215 /* NMI safe access to clock tai */ 216 return ktime_get_tai_fast_ns(); 217 } 218 219 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = { 220 .func = bpf_ktime_get_tai_ns, 221 .gpl_only = false, 222 .ret_type = RET_INTEGER, 223 }; 224 225 BPF_CALL_0(bpf_get_current_pid_tgid) 226 { 227 struct task_struct *task = current; 228 229 if (unlikely(!task)) 230 return -EINVAL; 231 232 return (u64) task->tgid << 32 | task->pid; 233 } 234 235 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { 236 .func = bpf_get_current_pid_tgid, 237 .gpl_only = false, 238 .ret_type = RET_INTEGER, 239 }; 240 241 BPF_CALL_0(bpf_get_current_uid_gid) 242 { 243 struct task_struct *task = current; 244 kuid_t uid; 245 kgid_t gid; 246 247 if (unlikely(!task)) 248 return -EINVAL; 249 250 current_uid_gid(&uid, &gid); 251 return (u64) from_kgid(&init_user_ns, gid) << 32 | 252 from_kuid(&init_user_ns, uid); 253 } 254 255 const struct bpf_func_proto bpf_get_current_uid_gid_proto = { 256 .func = bpf_get_current_uid_gid, 257 .gpl_only = false, 258 .ret_type = RET_INTEGER, 259 }; 260 261 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) 262 { 263 struct task_struct *task = current; 264 265 if (unlikely(!task)) 266 goto err_clear; 267 268 /* Verifier guarantees that size > 0 */ 269 strscpy_pad(buf, task->comm, size); 270 return 0; 271 err_clear: 272 memset(buf, 0, size); 273 return -EINVAL; 274 } 275 276 const struct bpf_func_proto bpf_get_current_comm_proto = { 277 .func = bpf_get_current_comm, 278 .gpl_only = false, 279 .ret_type = RET_INTEGER, 280 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 281 .arg2_type = ARG_CONST_SIZE, 282 }; 283 284 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) 285 286 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 287 { 288 arch_spinlock_t *l = (void *)lock; 289 union { 290 __u32 val; 291 arch_spinlock_t lock; 292 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; 293 294 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); 295 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); 296 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); 297 preempt_disable(); 298 arch_spin_lock(l); 299 } 300 301 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 302 { 303 arch_spinlock_t *l = (void *)lock; 304 305 arch_spin_unlock(l); 306 preempt_enable(); 307 } 308 309 #else 310 311 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 312 { 313 atomic_t *l = (void *)lock; 314 315 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); 316 do { 317 atomic_cond_read_relaxed(l, !VAL); 318 } while (atomic_xchg(l, 1)); 319 } 320 321 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 322 { 323 atomic_t *l = (void *)lock; 324 325 atomic_set_release(l, 0); 326 } 327 328 #endif 329 330 static DEFINE_PER_CPU(unsigned long, irqsave_flags); 331 332 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock) 333 { 334 unsigned long flags; 335 336 local_irq_save(flags); 337 __bpf_spin_lock(lock); 338 __this_cpu_write(irqsave_flags, flags); 339 } 340 341 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) 342 { 343 __bpf_spin_lock_irqsave(lock); 344 return 0; 345 } 346 347 const struct bpf_func_proto bpf_spin_lock_proto = { 348 .func = bpf_spin_lock, 349 .gpl_only = false, 350 .ret_type = RET_VOID, 351 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 352 .arg1_btf_id = BPF_PTR_POISON, 353 }; 354 355 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock) 356 { 357 unsigned long flags; 358 359 flags = __this_cpu_read(irqsave_flags); 360 __bpf_spin_unlock(lock); 361 local_irq_restore(flags); 362 } 363 364 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) 365 { 366 __bpf_spin_unlock_irqrestore(lock); 367 return 0; 368 } 369 370 const struct bpf_func_proto bpf_spin_unlock_proto = { 371 .func = bpf_spin_unlock, 372 .gpl_only = false, 373 .ret_type = RET_VOID, 374 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 375 .arg1_btf_id = BPF_PTR_POISON, 376 }; 377 378 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 379 bool lock_src) 380 { 381 struct bpf_spin_lock *lock; 382 383 if (lock_src) 384 lock = src + map->record->spin_lock_off; 385 else 386 lock = dst + map->record->spin_lock_off; 387 preempt_disable(); 388 __bpf_spin_lock_irqsave(lock); 389 copy_map_value(map, dst, src); 390 __bpf_spin_unlock_irqrestore(lock); 391 preempt_enable(); 392 } 393 394 BPF_CALL_0(bpf_jiffies64) 395 { 396 return get_jiffies_64(); 397 } 398 399 const struct bpf_func_proto bpf_jiffies64_proto = { 400 .func = bpf_jiffies64, 401 .gpl_only = false, 402 .ret_type = RET_INTEGER, 403 }; 404 405 #ifdef CONFIG_CGROUPS 406 BPF_CALL_0(bpf_get_current_cgroup_id) 407 { 408 struct cgroup *cgrp; 409 u64 cgrp_id; 410 411 rcu_read_lock(); 412 cgrp = task_dfl_cgroup(current); 413 cgrp_id = cgroup_id(cgrp); 414 rcu_read_unlock(); 415 416 return cgrp_id; 417 } 418 419 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { 420 .func = bpf_get_current_cgroup_id, 421 .gpl_only = false, 422 .ret_type = RET_INTEGER, 423 }; 424 425 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) 426 { 427 struct cgroup *cgrp; 428 struct cgroup *ancestor; 429 u64 cgrp_id; 430 431 rcu_read_lock(); 432 cgrp = task_dfl_cgroup(current); 433 ancestor = cgroup_ancestor(cgrp, ancestor_level); 434 cgrp_id = ancestor ? cgroup_id(ancestor) : 0; 435 rcu_read_unlock(); 436 437 return cgrp_id; 438 } 439 440 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = { 441 .func = bpf_get_current_ancestor_cgroup_id, 442 .gpl_only = false, 443 .ret_type = RET_INTEGER, 444 .arg1_type = ARG_ANYTHING, 445 }; 446 #endif /* CONFIG_CGROUPS */ 447 448 #define BPF_STRTOX_BASE_MASK 0x1F 449 450 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, 451 unsigned long long *res, bool *is_negative) 452 { 453 unsigned int base = flags & BPF_STRTOX_BASE_MASK; 454 const char *cur_buf = buf; 455 size_t cur_len = buf_len; 456 unsigned int consumed; 457 size_t val_len; 458 char str[64]; 459 460 if (!buf || !buf_len || !res || !is_negative) 461 return -EINVAL; 462 463 if (base != 0 && base != 8 && base != 10 && base != 16) 464 return -EINVAL; 465 466 if (flags & ~BPF_STRTOX_BASE_MASK) 467 return -EINVAL; 468 469 while (cur_buf < buf + buf_len && isspace(*cur_buf)) 470 ++cur_buf; 471 472 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); 473 if (*is_negative) 474 ++cur_buf; 475 476 consumed = cur_buf - buf; 477 cur_len -= consumed; 478 if (!cur_len) 479 return -EINVAL; 480 481 cur_len = min(cur_len, sizeof(str) - 1); 482 memcpy(str, cur_buf, cur_len); 483 str[cur_len] = '\0'; 484 cur_buf = str; 485 486 cur_buf = _parse_integer_fixup_radix(cur_buf, &base); 487 val_len = _parse_integer(cur_buf, base, res); 488 489 if (val_len & KSTRTOX_OVERFLOW) 490 return -ERANGE; 491 492 if (val_len == 0) 493 return -EINVAL; 494 495 cur_buf += val_len; 496 consumed += cur_buf - str; 497 498 return consumed; 499 } 500 501 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, 502 long long *res) 503 { 504 unsigned long long _res; 505 bool is_negative; 506 int err; 507 508 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 509 if (err < 0) 510 return err; 511 if (is_negative) { 512 if ((long long)-_res > 0) 513 return -ERANGE; 514 *res = -_res; 515 } else { 516 if ((long long)_res < 0) 517 return -ERANGE; 518 *res = _res; 519 } 520 return err; 521 } 522 523 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, 524 s64 *, res) 525 { 526 long long _res; 527 int err; 528 529 *res = 0; 530 err = __bpf_strtoll(buf, buf_len, flags, &_res); 531 if (err < 0) 532 return err; 533 *res = _res; 534 return err; 535 } 536 537 const struct bpf_func_proto bpf_strtol_proto = { 538 .func = bpf_strtol, 539 .gpl_only = false, 540 .ret_type = RET_INTEGER, 541 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 542 .arg2_type = ARG_CONST_SIZE, 543 .arg3_type = ARG_ANYTHING, 544 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 545 .arg4_size = sizeof(s64), 546 }; 547 548 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, 549 u64 *, res) 550 { 551 unsigned long long _res; 552 bool is_negative; 553 int err; 554 555 *res = 0; 556 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 557 if (err < 0) 558 return err; 559 if (is_negative) 560 return -EINVAL; 561 *res = _res; 562 return err; 563 } 564 565 const struct bpf_func_proto bpf_strtoul_proto = { 566 .func = bpf_strtoul, 567 .gpl_only = false, 568 .ret_type = RET_INTEGER, 569 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 570 .arg2_type = ARG_CONST_SIZE, 571 .arg3_type = ARG_ANYTHING, 572 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 573 .arg4_size = sizeof(u64), 574 }; 575 576 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) 577 { 578 return strncmp(s1, s2, s1_sz); 579 } 580 581 static const struct bpf_func_proto bpf_strncmp_proto = { 582 .func = bpf_strncmp, 583 .gpl_only = false, 584 .ret_type = RET_INTEGER, 585 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 586 .arg2_type = ARG_CONST_SIZE, 587 .arg3_type = ARG_PTR_TO_CONST_STR, 588 }; 589 590 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino, 591 struct bpf_pidns_info *, nsdata, u32, size) 592 { 593 struct task_struct *task = current; 594 struct pid_namespace *pidns; 595 int err = -EINVAL; 596 597 if (unlikely(size != sizeof(struct bpf_pidns_info))) 598 goto clear; 599 600 if (unlikely((u64)(dev_t)dev != dev)) 601 goto clear; 602 603 if (unlikely(!task)) 604 goto clear; 605 606 pidns = task_active_pid_ns(task); 607 if (unlikely(!pidns)) { 608 err = -ENOENT; 609 goto clear; 610 } 611 612 if (!ns_match(&pidns->ns, (dev_t)dev, ino)) 613 goto clear; 614 615 nsdata->pid = task_pid_nr_ns(task, pidns); 616 nsdata->tgid = task_tgid_nr_ns(task, pidns); 617 return 0; 618 clear: 619 memset((void *)nsdata, 0, (size_t) size); 620 return err; 621 } 622 623 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = { 624 .func = bpf_get_ns_current_pid_tgid, 625 .gpl_only = false, 626 .ret_type = RET_INTEGER, 627 .arg1_type = ARG_ANYTHING, 628 .arg2_type = ARG_ANYTHING, 629 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 630 .arg4_type = ARG_CONST_SIZE, 631 }; 632 633 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { 634 .func = bpf_get_raw_cpu_id, 635 .gpl_only = false, 636 .ret_type = RET_INTEGER, 637 }; 638 639 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, 640 u64, flags, void *, data, u64, size) 641 { 642 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 643 return -EINVAL; 644 645 return bpf_event_output(map, flags, data, size, NULL, 0, NULL); 646 } 647 648 const struct bpf_func_proto bpf_event_output_data_proto = { 649 .func = bpf_event_output_data, 650 .gpl_only = true, 651 .ret_type = RET_INTEGER, 652 .arg1_type = ARG_PTR_TO_CTX, 653 .arg2_type = ARG_CONST_MAP_PTR, 654 .arg3_type = ARG_ANYTHING, 655 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 656 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 657 }; 658 659 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size, 660 const void __user *, user_ptr) 661 { 662 int ret = copy_from_user(dst, user_ptr, size); 663 664 if (unlikely(ret)) { 665 memset(dst, 0, size); 666 ret = -EFAULT; 667 } 668 669 return ret; 670 } 671 672 const struct bpf_func_proto bpf_copy_from_user_proto = { 673 .func = bpf_copy_from_user, 674 .gpl_only = false, 675 .might_sleep = true, 676 .ret_type = RET_INTEGER, 677 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 678 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 679 .arg3_type = ARG_ANYTHING, 680 }; 681 682 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size, 683 const void __user *, user_ptr, struct task_struct *, tsk, u64, flags) 684 { 685 int ret; 686 687 /* flags is not used yet */ 688 if (unlikely(flags)) 689 return -EINVAL; 690 691 if (unlikely(!size)) 692 return 0; 693 694 ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0); 695 if (ret == size) 696 return 0; 697 698 memset(dst, 0, size); 699 /* Return -EFAULT for partial read */ 700 return ret < 0 ? ret : -EFAULT; 701 } 702 703 const struct bpf_func_proto bpf_copy_from_user_task_proto = { 704 .func = bpf_copy_from_user_task, 705 .gpl_only = true, 706 .might_sleep = true, 707 .ret_type = RET_INTEGER, 708 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 709 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 710 .arg3_type = ARG_ANYTHING, 711 .arg4_type = ARG_PTR_TO_BTF_ID, 712 .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 713 .arg5_type = ARG_ANYTHING 714 }; 715 716 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) 717 { 718 if (cpu >= nr_cpu_ids) 719 return (unsigned long)NULL; 720 721 return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu); 722 } 723 724 const struct bpf_func_proto bpf_per_cpu_ptr_proto = { 725 .func = bpf_per_cpu_ptr, 726 .gpl_only = false, 727 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY, 728 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 729 .arg2_type = ARG_ANYTHING, 730 }; 731 732 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr) 733 { 734 return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr); 735 } 736 737 const struct bpf_func_proto bpf_this_cpu_ptr_proto = { 738 .func = bpf_this_cpu_ptr, 739 .gpl_only = false, 740 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY, 741 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 742 }; 743 744 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, 745 size_t bufsz) 746 { 747 void __user *user_ptr = (__force void __user *)unsafe_ptr; 748 749 buf[0] = 0; 750 751 switch (fmt_ptype) { 752 case 's': 753 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 754 if ((unsigned long)unsafe_ptr < TASK_SIZE) 755 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 756 fallthrough; 757 #endif 758 case 'k': 759 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz); 760 case 'u': 761 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 762 } 763 764 return -EINVAL; 765 } 766 767 /* Support executing three nested bprintf helper calls on a given CPU */ 768 #define MAX_BPRINTF_NEST_LEVEL 3 769 770 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs); 771 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level); 772 773 int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs) 774 { 775 int nest_level; 776 777 preempt_disable(); 778 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level); 779 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) { 780 this_cpu_dec(bpf_bprintf_nest_level); 781 preempt_enable(); 782 return -EBUSY; 783 } 784 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); 785 786 return 0; 787 } 788 789 void bpf_put_buffers(void) 790 { 791 if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0)) 792 return; 793 this_cpu_dec(bpf_bprintf_nest_level); 794 preempt_enable(); 795 } 796 797 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data) 798 { 799 if (!data->bin_args && !data->buf) 800 return; 801 bpf_put_buffers(); 802 } 803 804 /* 805 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers 806 * 807 * Returns a negative value if fmt is an invalid format string or 0 otherwise. 808 * 809 * This can be used in two ways: 810 * - Format string verification only: when data->get_bin_args is false 811 * - Arguments preparation: in addition to the above verification, it writes in 812 * data->bin_args a binary representation of arguments usable by bstr_printf 813 * where pointers from BPF have been sanitized. 814 * 815 * In argument preparation mode, if 0 is returned, safe temporary buffers are 816 * allocated and bpf_bprintf_cleanup should be called to free them after use. 817 */ 818 int bpf_bprintf_prepare(const char *fmt, u32 fmt_size, const u64 *raw_args, 819 u32 num_args, struct bpf_bprintf_data *data) 820 { 821 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; 822 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end; 823 struct bpf_bprintf_buffers *buffers = NULL; 824 size_t sizeof_cur_arg, sizeof_cur_ip; 825 int err, i, num_spec = 0; 826 u64 cur_arg; 827 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX"; 828 829 fmt_end = strnchr(fmt, fmt_size, 0); 830 if (!fmt_end) 831 return -EINVAL; 832 fmt_size = fmt_end - fmt; 833 834 if (get_buffers && bpf_try_get_buffers(&buffers)) 835 return -EBUSY; 836 837 if (data->get_bin_args) { 838 if (num_args) 839 tmp_buf = buffers->bin_args; 840 tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS; 841 data->bin_args = (u32 *)tmp_buf; 842 } 843 844 if (data->get_buf) 845 data->buf = buffers->buf; 846 847 for (i = 0; i < fmt_size; i++) { 848 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { 849 err = -EINVAL; 850 goto out; 851 } 852 853 if (fmt[i] != '%') 854 continue; 855 856 if (fmt[i + 1] == '%') { 857 i++; 858 continue; 859 } 860 861 if (num_spec >= num_args) { 862 err = -EINVAL; 863 goto out; 864 } 865 866 /* The string is zero-terminated so if fmt[i] != 0, we can 867 * always access fmt[i + 1], in the worst case it will be a 0 868 */ 869 i++; 870 871 /* skip optional "[0 +-][num]" width formatting field */ 872 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || 873 fmt[i] == ' ') 874 i++; 875 if (fmt[i] >= '1' && fmt[i] <= '9') { 876 i++; 877 while (fmt[i] >= '0' && fmt[i] <= '9') 878 i++; 879 } 880 881 if (fmt[i] == 'p') { 882 sizeof_cur_arg = sizeof(long); 883 884 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') && 885 fmt[i + 2] == 's') { 886 fmt_ptype = fmt[i + 1]; 887 i += 2; 888 goto fmt_str; 889 } 890 891 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) || 892 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' || 893 fmt[i + 1] == 'x' || fmt[i + 1] == 's' || 894 fmt[i + 1] == 'S') { 895 /* just kernel pointers */ 896 if (tmp_buf) 897 cur_arg = raw_args[num_spec]; 898 i++; 899 goto nocopy_fmt; 900 } 901 902 if (fmt[i + 1] == 'B') { 903 if (tmp_buf) { 904 err = snprintf(tmp_buf, 905 (tmp_buf_end - tmp_buf), 906 "%pB", 907 (void *)(long)raw_args[num_spec]); 908 tmp_buf += (err + 1); 909 } 910 911 i++; 912 num_spec++; 913 continue; 914 } 915 916 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ 917 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') || 918 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) { 919 err = -EINVAL; 920 goto out; 921 } 922 923 i += 2; 924 if (!tmp_buf) 925 goto nocopy_fmt; 926 927 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16; 928 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { 929 err = -ENOSPC; 930 goto out; 931 } 932 933 unsafe_ptr = (char *)(long)raw_args[num_spec]; 934 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr, 935 sizeof_cur_ip); 936 if (err < 0) 937 memset(cur_ip, 0, sizeof_cur_ip); 938 939 /* hack: bstr_printf expects IP addresses to be 940 * pre-formatted as strings, ironically, the easiest way 941 * to do that is to call snprintf. 942 */ 943 ip_spec[2] = fmt[i - 1]; 944 ip_spec[3] = fmt[i]; 945 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf, 946 ip_spec, &cur_ip); 947 948 tmp_buf += err + 1; 949 num_spec++; 950 951 continue; 952 } else if (fmt[i] == 's') { 953 fmt_ptype = fmt[i]; 954 fmt_str: 955 if (fmt[i + 1] != 0 && 956 !isspace(fmt[i + 1]) && 957 !ispunct(fmt[i + 1])) { 958 err = -EINVAL; 959 goto out; 960 } 961 962 if (!tmp_buf) 963 goto nocopy_fmt; 964 965 if (tmp_buf_end == tmp_buf) { 966 err = -ENOSPC; 967 goto out; 968 } 969 970 unsafe_ptr = (char *)(long)raw_args[num_spec]; 971 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr, 972 fmt_ptype, 973 tmp_buf_end - tmp_buf); 974 if (err < 0) { 975 tmp_buf[0] = '\0'; 976 err = 1; 977 } 978 979 tmp_buf += err; 980 num_spec++; 981 982 continue; 983 } else if (fmt[i] == 'c') { 984 if (!tmp_buf) 985 goto nocopy_fmt; 986 987 if (tmp_buf_end == tmp_buf) { 988 err = -ENOSPC; 989 goto out; 990 } 991 992 *tmp_buf = raw_args[num_spec]; 993 tmp_buf++; 994 num_spec++; 995 996 continue; 997 } 998 999 sizeof_cur_arg = sizeof(int); 1000 1001 if (fmt[i] == 'l') { 1002 sizeof_cur_arg = sizeof(long); 1003 i++; 1004 } 1005 if (fmt[i] == 'l') { 1006 sizeof_cur_arg = sizeof(long long); 1007 i++; 1008 } 1009 1010 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' && 1011 fmt[i] != 'x' && fmt[i] != 'X') { 1012 err = -EINVAL; 1013 goto out; 1014 } 1015 1016 if (tmp_buf) 1017 cur_arg = raw_args[num_spec]; 1018 nocopy_fmt: 1019 if (tmp_buf) { 1020 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32)); 1021 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { 1022 err = -ENOSPC; 1023 goto out; 1024 } 1025 1026 if (sizeof_cur_arg == 8) { 1027 *(u32 *)tmp_buf = *(u32 *)&cur_arg; 1028 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1); 1029 } else { 1030 *(u32 *)tmp_buf = (u32)(long)cur_arg; 1031 } 1032 tmp_buf += sizeof_cur_arg; 1033 } 1034 num_spec++; 1035 } 1036 1037 err = 0; 1038 out: 1039 if (err) 1040 bpf_bprintf_cleanup(data); 1041 return err; 1042 } 1043 1044 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, 1045 const void *, args, u32, data_len) 1046 { 1047 struct bpf_bprintf_data data = { 1048 .get_bin_args = true, 1049 }; 1050 int err, num_args; 1051 1052 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 || 1053 (data_len && !args)) 1054 return -EINVAL; 1055 num_args = data_len / 8; 1056 1057 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we 1058 * can safely give an unbounded size. 1059 */ 1060 err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data); 1061 if (err < 0) 1062 return err; 1063 1064 err = bstr_printf(str, str_size, fmt, data.bin_args); 1065 1066 bpf_bprintf_cleanup(&data); 1067 1068 return err + 1; 1069 } 1070 1071 const struct bpf_func_proto bpf_snprintf_proto = { 1072 .func = bpf_snprintf, 1073 .gpl_only = true, 1074 .ret_type = RET_INTEGER, 1075 .arg1_type = ARG_PTR_TO_MEM_OR_NULL, 1076 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1077 .arg3_type = ARG_PTR_TO_CONST_STR, 1078 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 1079 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1080 }; 1081 1082 struct bpf_async_cb { 1083 struct bpf_map *map; 1084 struct bpf_prog *prog; 1085 void __rcu *callback_fn; 1086 void *value; 1087 union { 1088 struct rcu_head rcu; 1089 struct work_struct delete_work; 1090 }; 1091 u64 flags; 1092 }; 1093 1094 /* BPF map elements can contain 'struct bpf_timer'. 1095 * Such map owns all of its BPF timers. 1096 * 'struct bpf_timer' is allocated as part of map element allocation 1097 * and it's zero initialized. 1098 * That space is used to keep 'struct bpf_async_kern'. 1099 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and 1100 * remembers 'struct bpf_map *' pointer it's part of. 1101 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn. 1102 * bpf_timer_start() arms the timer. 1103 * If user space reference to a map goes to zero at this point 1104 * ops->map_release_uref callback is responsible for cancelling the timers, 1105 * freeing their memory, and decrementing prog's refcnts. 1106 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt. 1107 * Inner maps can contain bpf timers as well. ops->map_release_uref is 1108 * freeing the timers when inner map is replaced or deleted by user space. 1109 */ 1110 struct bpf_hrtimer { 1111 struct bpf_async_cb cb; 1112 struct hrtimer timer; 1113 atomic_t cancelling; 1114 }; 1115 1116 struct bpf_work { 1117 struct bpf_async_cb cb; 1118 struct work_struct work; 1119 struct work_struct delete_work; 1120 }; 1121 1122 /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */ 1123 struct bpf_async_kern { 1124 union { 1125 struct bpf_async_cb *cb; 1126 struct bpf_hrtimer *timer; 1127 struct bpf_work *work; 1128 }; 1129 /* bpf_spin_lock is used here instead of spinlock_t to make 1130 * sure that it always fits into space reserved by struct bpf_timer 1131 * regardless of LOCKDEP and spinlock debug flags. 1132 */ 1133 struct bpf_spin_lock lock; 1134 } __attribute__((aligned(8))); 1135 1136 enum bpf_async_type { 1137 BPF_ASYNC_TYPE_TIMER = 0, 1138 BPF_ASYNC_TYPE_WQ, 1139 }; 1140 1141 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); 1142 1143 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) 1144 { 1145 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); 1146 struct bpf_map *map = t->cb.map; 1147 void *value = t->cb.value; 1148 bpf_callback_t callback_fn; 1149 void *key; 1150 u32 idx; 1151 1152 BTF_TYPE_EMIT(struct bpf_timer); 1153 callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held()); 1154 if (!callback_fn) 1155 goto out; 1156 1157 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and 1158 * cannot be preempted by another bpf_timer_cb() on the same cpu. 1159 * Remember the timer this callback is servicing to prevent 1160 * deadlock if callback_fn() calls bpf_timer_cancel() or 1161 * bpf_map_delete_elem() on the same timer. 1162 */ 1163 this_cpu_write(hrtimer_running, t); 1164 if (map->map_type == BPF_MAP_TYPE_ARRAY) { 1165 struct bpf_array *array = container_of(map, struct bpf_array, map); 1166 1167 /* compute the key */ 1168 idx = ((char *)value - array->value) / array->elem_size; 1169 key = &idx; 1170 } else { /* hash or lru */ 1171 key = value - round_up(map->key_size, 8); 1172 } 1173 1174 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); 1175 /* The verifier checked that return value is zero. */ 1176 1177 this_cpu_write(hrtimer_running, NULL); 1178 out: 1179 return HRTIMER_NORESTART; 1180 } 1181 1182 static void bpf_wq_work(struct work_struct *work) 1183 { 1184 struct bpf_work *w = container_of(work, struct bpf_work, work); 1185 struct bpf_async_cb *cb = &w->cb; 1186 struct bpf_map *map = cb->map; 1187 bpf_callback_t callback_fn; 1188 void *value = cb->value; 1189 void *key; 1190 u32 idx; 1191 1192 BTF_TYPE_EMIT(struct bpf_wq); 1193 1194 callback_fn = READ_ONCE(cb->callback_fn); 1195 if (!callback_fn) 1196 return; 1197 1198 if (map->map_type == BPF_MAP_TYPE_ARRAY) { 1199 struct bpf_array *array = container_of(map, struct bpf_array, map); 1200 1201 /* compute the key */ 1202 idx = ((char *)value - array->value) / array->elem_size; 1203 key = &idx; 1204 } else { /* hash or lru */ 1205 key = value - round_up(map->key_size, 8); 1206 } 1207 1208 rcu_read_lock_trace(); 1209 migrate_disable(); 1210 1211 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); 1212 1213 migrate_enable(); 1214 rcu_read_unlock_trace(); 1215 } 1216 1217 static void bpf_wq_delete_work(struct work_struct *work) 1218 { 1219 struct bpf_work *w = container_of(work, struct bpf_work, delete_work); 1220 1221 cancel_work_sync(&w->work); 1222 1223 kfree_rcu(w, cb.rcu); 1224 } 1225 1226 static void bpf_timer_delete_work(struct work_struct *work) 1227 { 1228 struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work); 1229 1230 /* Cancel the timer and wait for callback to complete if it was running. 1231 * If hrtimer_cancel() can be safely called it's safe to call 1232 * kfree_rcu(t) right after for both preallocated and non-preallocated 1233 * maps. The async->cb = NULL was already done and no code path can see 1234 * address 't' anymore. Timer if armed for existing bpf_hrtimer before 1235 * bpf_timer_cancel_and_free will have been cancelled. 1236 */ 1237 hrtimer_cancel(&t->timer); 1238 kfree_rcu(t, cb.rcu); 1239 } 1240 1241 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags, 1242 enum bpf_async_type type) 1243 { 1244 struct bpf_async_cb *cb; 1245 struct bpf_hrtimer *t; 1246 struct bpf_work *w; 1247 clockid_t clockid; 1248 size_t size; 1249 int ret = 0; 1250 1251 if (in_nmi()) 1252 return -EOPNOTSUPP; 1253 1254 switch (type) { 1255 case BPF_ASYNC_TYPE_TIMER: 1256 size = sizeof(struct bpf_hrtimer); 1257 break; 1258 case BPF_ASYNC_TYPE_WQ: 1259 size = sizeof(struct bpf_work); 1260 break; 1261 default: 1262 return -EINVAL; 1263 } 1264 1265 __bpf_spin_lock_irqsave(&async->lock); 1266 t = async->timer; 1267 if (t) { 1268 ret = -EBUSY; 1269 goto out; 1270 } 1271 1272 /* allocate hrtimer via map_kmalloc to use memcg accounting */ 1273 cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node); 1274 if (!cb) { 1275 ret = -ENOMEM; 1276 goto out; 1277 } 1278 1279 switch (type) { 1280 case BPF_ASYNC_TYPE_TIMER: 1281 clockid = flags & (MAX_CLOCKS - 1); 1282 t = (struct bpf_hrtimer *)cb; 1283 1284 atomic_set(&t->cancelling, 0); 1285 INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work); 1286 hrtimer_setup(&t->timer, bpf_timer_cb, clockid, HRTIMER_MODE_REL_SOFT); 1287 cb->value = (void *)async - map->record->timer_off; 1288 break; 1289 case BPF_ASYNC_TYPE_WQ: 1290 w = (struct bpf_work *)cb; 1291 1292 INIT_WORK(&w->work, bpf_wq_work); 1293 INIT_WORK(&w->delete_work, bpf_wq_delete_work); 1294 cb->value = (void *)async - map->record->wq_off; 1295 break; 1296 } 1297 cb->map = map; 1298 cb->prog = NULL; 1299 cb->flags = flags; 1300 rcu_assign_pointer(cb->callback_fn, NULL); 1301 1302 WRITE_ONCE(async->cb, cb); 1303 /* Guarantee the order between async->cb and map->usercnt. So 1304 * when there are concurrent uref release and bpf timer init, either 1305 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL 1306 * timer or atomic64_read() below returns a zero usercnt. 1307 */ 1308 smp_mb(); 1309 if (!atomic64_read(&map->usercnt)) { 1310 /* maps with timers must be either held by user space 1311 * or pinned in bpffs. 1312 */ 1313 WRITE_ONCE(async->cb, NULL); 1314 kfree(cb); 1315 ret = -EPERM; 1316 } 1317 out: 1318 __bpf_spin_unlock_irqrestore(&async->lock); 1319 return ret; 1320 } 1321 1322 BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map, 1323 u64, flags) 1324 { 1325 clock_t clockid = flags & (MAX_CLOCKS - 1); 1326 1327 BUILD_BUG_ON(MAX_CLOCKS != 16); 1328 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer)); 1329 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer)); 1330 1331 if (flags >= MAX_CLOCKS || 1332 /* similar to timerfd except _ALARM variants are not supported */ 1333 (clockid != CLOCK_MONOTONIC && 1334 clockid != CLOCK_REALTIME && 1335 clockid != CLOCK_BOOTTIME)) 1336 return -EINVAL; 1337 1338 return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER); 1339 } 1340 1341 static const struct bpf_func_proto bpf_timer_init_proto = { 1342 .func = bpf_timer_init, 1343 .gpl_only = true, 1344 .ret_type = RET_INTEGER, 1345 .arg1_type = ARG_PTR_TO_TIMER, 1346 .arg2_type = ARG_CONST_MAP_PTR, 1347 .arg3_type = ARG_ANYTHING, 1348 }; 1349 1350 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn, 1351 struct bpf_prog_aux *aux, unsigned int flags, 1352 enum bpf_async_type type) 1353 { 1354 struct bpf_prog *prev, *prog = aux->prog; 1355 struct bpf_async_cb *cb; 1356 int ret = 0; 1357 1358 if (in_nmi()) 1359 return -EOPNOTSUPP; 1360 __bpf_spin_lock_irqsave(&async->lock); 1361 cb = async->cb; 1362 if (!cb) { 1363 ret = -EINVAL; 1364 goto out; 1365 } 1366 if (!atomic64_read(&cb->map->usercnt)) { 1367 /* maps with timers must be either held by user space 1368 * or pinned in bpffs. Otherwise timer might still be 1369 * running even when bpf prog is detached and user space 1370 * is gone, since map_release_uref won't ever be called. 1371 */ 1372 ret = -EPERM; 1373 goto out; 1374 } 1375 prev = cb->prog; 1376 if (prev != prog) { 1377 /* Bump prog refcnt once. Every bpf_timer_set_callback() 1378 * can pick different callback_fn-s within the same prog. 1379 */ 1380 prog = bpf_prog_inc_not_zero(prog); 1381 if (IS_ERR(prog)) { 1382 ret = PTR_ERR(prog); 1383 goto out; 1384 } 1385 if (prev) 1386 /* Drop prev prog refcnt when swapping with new prog */ 1387 bpf_prog_put(prev); 1388 cb->prog = prog; 1389 } 1390 rcu_assign_pointer(cb->callback_fn, callback_fn); 1391 out: 1392 __bpf_spin_unlock_irqrestore(&async->lock); 1393 return ret; 1394 } 1395 1396 BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn, 1397 struct bpf_prog_aux *, aux) 1398 { 1399 return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER); 1400 } 1401 1402 static const struct bpf_func_proto bpf_timer_set_callback_proto = { 1403 .func = bpf_timer_set_callback, 1404 .gpl_only = true, 1405 .ret_type = RET_INTEGER, 1406 .arg1_type = ARG_PTR_TO_TIMER, 1407 .arg2_type = ARG_PTR_TO_FUNC, 1408 }; 1409 1410 BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags) 1411 { 1412 struct bpf_hrtimer *t; 1413 int ret = 0; 1414 enum hrtimer_mode mode; 1415 1416 if (in_nmi()) 1417 return -EOPNOTSUPP; 1418 if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN)) 1419 return -EINVAL; 1420 __bpf_spin_lock_irqsave(&timer->lock); 1421 t = timer->timer; 1422 if (!t || !t->cb.prog) { 1423 ret = -EINVAL; 1424 goto out; 1425 } 1426 1427 if (flags & BPF_F_TIMER_ABS) 1428 mode = HRTIMER_MODE_ABS_SOFT; 1429 else 1430 mode = HRTIMER_MODE_REL_SOFT; 1431 1432 if (flags & BPF_F_TIMER_CPU_PIN) 1433 mode |= HRTIMER_MODE_PINNED; 1434 1435 hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); 1436 out: 1437 __bpf_spin_unlock_irqrestore(&timer->lock); 1438 return ret; 1439 } 1440 1441 static const struct bpf_func_proto bpf_timer_start_proto = { 1442 .func = bpf_timer_start, 1443 .gpl_only = true, 1444 .ret_type = RET_INTEGER, 1445 .arg1_type = ARG_PTR_TO_TIMER, 1446 .arg2_type = ARG_ANYTHING, 1447 .arg3_type = ARG_ANYTHING, 1448 }; 1449 1450 static void drop_prog_refcnt(struct bpf_async_cb *async) 1451 { 1452 struct bpf_prog *prog = async->prog; 1453 1454 if (prog) { 1455 bpf_prog_put(prog); 1456 async->prog = NULL; 1457 rcu_assign_pointer(async->callback_fn, NULL); 1458 } 1459 } 1460 1461 BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer) 1462 { 1463 struct bpf_hrtimer *t, *cur_t; 1464 bool inc = false; 1465 int ret = 0; 1466 1467 if (in_nmi()) 1468 return -EOPNOTSUPP; 1469 rcu_read_lock(); 1470 __bpf_spin_lock_irqsave(&timer->lock); 1471 t = timer->timer; 1472 if (!t) { 1473 ret = -EINVAL; 1474 goto out; 1475 } 1476 1477 cur_t = this_cpu_read(hrtimer_running); 1478 if (cur_t == t) { 1479 /* If bpf callback_fn is trying to bpf_timer_cancel() 1480 * its own timer the hrtimer_cancel() will deadlock 1481 * since it waits for callback_fn to finish. 1482 */ 1483 ret = -EDEADLK; 1484 goto out; 1485 } 1486 1487 /* Only account in-flight cancellations when invoked from a timer 1488 * callback, since we want to avoid waiting only if other _callbacks_ 1489 * are waiting on us, to avoid introducing lockups. Non-callback paths 1490 * are ok, since nobody would synchronously wait for their completion. 1491 */ 1492 if (!cur_t) 1493 goto drop; 1494 atomic_inc(&t->cancelling); 1495 /* Need full barrier after relaxed atomic_inc */ 1496 smp_mb__after_atomic(); 1497 inc = true; 1498 if (atomic_read(&cur_t->cancelling)) { 1499 /* We're cancelling timer t, while some other timer callback is 1500 * attempting to cancel us. In such a case, it might be possible 1501 * that timer t belongs to the other callback, or some other 1502 * callback waiting upon it (creating transitive dependencies 1503 * upon us), and we will enter a deadlock if we continue 1504 * cancelling and waiting for it synchronously, since it might 1505 * do the same. Bail! 1506 */ 1507 ret = -EDEADLK; 1508 goto out; 1509 } 1510 drop: 1511 drop_prog_refcnt(&t->cb); 1512 out: 1513 __bpf_spin_unlock_irqrestore(&timer->lock); 1514 /* Cancel the timer and wait for associated callback to finish 1515 * if it was running. 1516 */ 1517 ret = ret ?: hrtimer_cancel(&t->timer); 1518 if (inc) 1519 atomic_dec(&t->cancelling); 1520 rcu_read_unlock(); 1521 return ret; 1522 } 1523 1524 static const struct bpf_func_proto bpf_timer_cancel_proto = { 1525 .func = bpf_timer_cancel, 1526 .gpl_only = true, 1527 .ret_type = RET_INTEGER, 1528 .arg1_type = ARG_PTR_TO_TIMER, 1529 }; 1530 1531 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async) 1532 { 1533 struct bpf_async_cb *cb; 1534 1535 /* Performance optimization: read async->cb without lock first. */ 1536 if (!READ_ONCE(async->cb)) 1537 return NULL; 1538 1539 __bpf_spin_lock_irqsave(&async->lock); 1540 /* re-read it under lock */ 1541 cb = async->cb; 1542 if (!cb) 1543 goto out; 1544 drop_prog_refcnt(cb); 1545 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use 1546 * this timer, since it won't be initialized. 1547 */ 1548 WRITE_ONCE(async->cb, NULL); 1549 out: 1550 __bpf_spin_unlock_irqrestore(&async->lock); 1551 return cb; 1552 } 1553 1554 /* This function is called by map_delete/update_elem for individual element and 1555 * by ops->map_release_uref when the user space reference to a map reaches zero. 1556 */ 1557 void bpf_timer_cancel_and_free(void *val) 1558 { 1559 struct bpf_hrtimer *t; 1560 1561 t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val); 1562 1563 if (!t) 1564 return; 1565 /* We check that bpf_map_delete/update_elem() was called from timer 1566 * callback_fn. In such case we don't call hrtimer_cancel() (since it 1567 * will deadlock) and don't call hrtimer_try_to_cancel() (since it will 1568 * just return -1). Though callback_fn is still running on this cpu it's 1569 * safe to do kfree(t) because bpf_timer_cb() read everything it needed 1570 * from 't'. The bpf subprog callback_fn won't be able to access 't', 1571 * since async->cb = NULL was already done. The timer will be 1572 * effectively cancelled because bpf_timer_cb() will return 1573 * HRTIMER_NORESTART. 1574 * 1575 * However, it is possible the timer callback_fn calling us armed the 1576 * timer _before_ calling us, such that failing to cancel it here will 1577 * cause it to possibly use struct hrtimer after freeing bpf_hrtimer. 1578 * Therefore, we _need_ to cancel any outstanding timers before we do 1579 * kfree_rcu, even though no more timers can be armed. 1580 * 1581 * Moreover, we need to schedule work even if timer does not belong to 1582 * the calling callback_fn, as on two different CPUs, we can end up in a 1583 * situation where both sides run in parallel, try to cancel one 1584 * another, and we end up waiting on both sides in hrtimer_cancel 1585 * without making forward progress, since timer1 depends on time2 1586 * callback to finish, and vice versa. 1587 * 1588 * CPU 1 (timer1_cb) CPU 2 (timer2_cb) 1589 * bpf_timer_cancel_and_free(timer2) bpf_timer_cancel_and_free(timer1) 1590 * 1591 * To avoid these issues, punt to workqueue context when we are in a 1592 * timer callback. 1593 */ 1594 if (this_cpu_read(hrtimer_running)) { 1595 queue_work(system_unbound_wq, &t->cb.delete_work); 1596 return; 1597 } 1598 1599 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 1600 /* If the timer is running on other CPU, also use a kworker to 1601 * wait for the completion of the timer instead of trying to 1602 * acquire a sleepable lock in hrtimer_cancel() to wait for its 1603 * completion. 1604 */ 1605 if (hrtimer_try_to_cancel(&t->timer) >= 0) 1606 kfree_rcu(t, cb.rcu); 1607 else 1608 queue_work(system_unbound_wq, &t->cb.delete_work); 1609 } else { 1610 bpf_timer_delete_work(&t->cb.delete_work); 1611 } 1612 } 1613 1614 /* This function is called by map_delete/update_elem for individual element and 1615 * by ops->map_release_uref when the user space reference to a map reaches zero. 1616 */ 1617 void bpf_wq_cancel_and_free(void *val) 1618 { 1619 struct bpf_work *work; 1620 1621 BTF_TYPE_EMIT(struct bpf_wq); 1622 1623 work = (struct bpf_work *)__bpf_async_cancel_and_free(val); 1624 if (!work) 1625 return; 1626 /* Trigger cancel of the sleepable work, but *do not* wait for 1627 * it to finish if it was running as we might not be in a 1628 * sleepable context. 1629 * kfree will be called once the work has finished. 1630 */ 1631 schedule_work(&work->delete_work); 1632 } 1633 1634 BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr) 1635 { 1636 unsigned long *kptr = dst; 1637 1638 /* This helper may be inlined by verifier. */ 1639 return xchg(kptr, (unsigned long)ptr); 1640 } 1641 1642 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg() 1643 * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to 1644 * denote type that verifier will determine. 1645 */ 1646 static const struct bpf_func_proto bpf_kptr_xchg_proto = { 1647 .func = bpf_kptr_xchg, 1648 .gpl_only = false, 1649 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 1650 .ret_btf_id = BPF_PTR_POISON, 1651 .arg1_type = ARG_KPTR_XCHG_DEST, 1652 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE, 1653 .arg2_btf_id = BPF_PTR_POISON, 1654 }; 1655 1656 /* Since the upper 8 bits of dynptr->size is reserved, the 1657 * maximum supported size is 2^24 - 1. 1658 */ 1659 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1) 1660 #define DYNPTR_TYPE_SHIFT 28 1661 #define DYNPTR_SIZE_MASK 0xFFFFFF 1662 #define DYNPTR_RDONLY_BIT BIT(31) 1663 1664 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr) 1665 { 1666 return ptr->size & DYNPTR_RDONLY_BIT; 1667 } 1668 1669 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) 1670 { 1671 ptr->size |= DYNPTR_RDONLY_BIT; 1672 } 1673 1674 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) 1675 { 1676 ptr->size |= type << DYNPTR_TYPE_SHIFT; 1677 } 1678 1679 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr) 1680 { 1681 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT; 1682 } 1683 1684 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) 1685 { 1686 return ptr->size & DYNPTR_SIZE_MASK; 1687 } 1688 1689 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size) 1690 { 1691 u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK; 1692 1693 ptr->size = new_size | metadata; 1694 } 1695 1696 int bpf_dynptr_check_size(u32 size) 1697 { 1698 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; 1699 } 1700 1701 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, 1702 enum bpf_dynptr_type type, u32 offset, u32 size) 1703 { 1704 ptr->data = data; 1705 ptr->offset = offset; 1706 ptr->size = size; 1707 bpf_dynptr_set_type(ptr, type); 1708 } 1709 1710 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) 1711 { 1712 memset(ptr, 0, sizeof(*ptr)); 1713 } 1714 1715 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) 1716 { 1717 int err; 1718 1719 BTF_TYPE_EMIT(struct bpf_dynptr); 1720 1721 err = bpf_dynptr_check_size(size); 1722 if (err) 1723 goto error; 1724 1725 /* flags is currently unsupported */ 1726 if (flags) { 1727 err = -EINVAL; 1728 goto error; 1729 } 1730 1731 bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size); 1732 1733 return 0; 1734 1735 error: 1736 bpf_dynptr_set_null(ptr); 1737 return err; 1738 } 1739 1740 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = { 1741 .func = bpf_dynptr_from_mem, 1742 .gpl_only = false, 1743 .ret_type = RET_INTEGER, 1744 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1745 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1746 .arg3_type = ARG_ANYTHING, 1747 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE, 1748 }; 1749 1750 static int __bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr_kern *src, 1751 u32 offset, u64 flags) 1752 { 1753 enum bpf_dynptr_type type; 1754 int err; 1755 1756 if (!src->data || flags) 1757 return -EINVAL; 1758 1759 err = bpf_dynptr_check_off_len(src, offset, len); 1760 if (err) 1761 return err; 1762 1763 type = bpf_dynptr_get_type(src); 1764 1765 switch (type) { 1766 case BPF_DYNPTR_TYPE_LOCAL: 1767 case BPF_DYNPTR_TYPE_RINGBUF: 1768 /* Source and destination may possibly overlap, hence use memmove to 1769 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr 1770 * pointing to overlapping PTR_TO_MAP_VALUE regions. 1771 */ 1772 memmove(dst, src->data + src->offset + offset, len); 1773 return 0; 1774 case BPF_DYNPTR_TYPE_SKB: 1775 return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len); 1776 case BPF_DYNPTR_TYPE_XDP: 1777 return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len); 1778 default: 1779 WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type); 1780 return -EFAULT; 1781 } 1782 } 1783 1784 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src, 1785 u32, offset, u64, flags) 1786 { 1787 return __bpf_dynptr_read(dst, len, src, offset, flags); 1788 } 1789 1790 static const struct bpf_func_proto bpf_dynptr_read_proto = { 1791 .func = bpf_dynptr_read, 1792 .gpl_only = false, 1793 .ret_type = RET_INTEGER, 1794 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1795 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1796 .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1797 .arg4_type = ARG_ANYTHING, 1798 .arg5_type = ARG_ANYTHING, 1799 }; 1800 1801 int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, void *src, 1802 u32 len, u64 flags) 1803 { 1804 enum bpf_dynptr_type type; 1805 int err; 1806 1807 if (!dst->data || __bpf_dynptr_is_rdonly(dst)) 1808 return -EINVAL; 1809 1810 err = bpf_dynptr_check_off_len(dst, offset, len); 1811 if (err) 1812 return err; 1813 1814 type = bpf_dynptr_get_type(dst); 1815 1816 switch (type) { 1817 case BPF_DYNPTR_TYPE_LOCAL: 1818 case BPF_DYNPTR_TYPE_RINGBUF: 1819 if (flags) 1820 return -EINVAL; 1821 /* Source and destination may possibly overlap, hence use memmove to 1822 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr 1823 * pointing to overlapping PTR_TO_MAP_VALUE regions. 1824 */ 1825 memmove(dst->data + dst->offset + offset, src, len); 1826 return 0; 1827 case BPF_DYNPTR_TYPE_SKB: 1828 return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len, 1829 flags); 1830 case BPF_DYNPTR_TYPE_XDP: 1831 if (flags) 1832 return -EINVAL; 1833 return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len); 1834 default: 1835 WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type); 1836 return -EFAULT; 1837 } 1838 } 1839 1840 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src, 1841 u32, len, u64, flags) 1842 { 1843 return __bpf_dynptr_write(dst, offset, src, len, flags); 1844 } 1845 1846 static const struct bpf_func_proto bpf_dynptr_write_proto = { 1847 .func = bpf_dynptr_write, 1848 .gpl_only = false, 1849 .ret_type = RET_INTEGER, 1850 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1851 .arg2_type = ARG_ANYTHING, 1852 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1853 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 1854 .arg5_type = ARG_ANYTHING, 1855 }; 1856 1857 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len) 1858 { 1859 enum bpf_dynptr_type type; 1860 int err; 1861 1862 if (!ptr->data) 1863 return 0; 1864 1865 err = bpf_dynptr_check_off_len(ptr, offset, len); 1866 if (err) 1867 return 0; 1868 1869 if (__bpf_dynptr_is_rdonly(ptr)) 1870 return 0; 1871 1872 type = bpf_dynptr_get_type(ptr); 1873 1874 switch (type) { 1875 case BPF_DYNPTR_TYPE_LOCAL: 1876 case BPF_DYNPTR_TYPE_RINGBUF: 1877 return (unsigned long)(ptr->data + ptr->offset + offset); 1878 case BPF_DYNPTR_TYPE_SKB: 1879 case BPF_DYNPTR_TYPE_XDP: 1880 /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */ 1881 return 0; 1882 default: 1883 WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type); 1884 return 0; 1885 } 1886 } 1887 1888 static const struct bpf_func_proto bpf_dynptr_data_proto = { 1889 .func = bpf_dynptr_data, 1890 .gpl_only = false, 1891 .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL, 1892 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1893 .arg2_type = ARG_ANYTHING, 1894 .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO, 1895 }; 1896 1897 const struct bpf_func_proto bpf_get_current_task_proto __weak; 1898 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak; 1899 const struct bpf_func_proto bpf_probe_read_user_proto __weak; 1900 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; 1901 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak; 1902 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; 1903 const struct bpf_func_proto bpf_task_pt_regs_proto __weak; 1904 const struct bpf_func_proto bpf_perf_event_read_proto __weak; 1905 const struct bpf_func_proto bpf_send_signal_proto __weak; 1906 const struct bpf_func_proto bpf_send_signal_thread_proto __weak; 1907 const struct bpf_func_proto bpf_get_task_stack_sleepable_proto __weak; 1908 const struct bpf_func_proto bpf_get_task_stack_proto __weak; 1909 const struct bpf_func_proto bpf_get_branch_snapshot_proto __weak; 1910 1911 const struct bpf_func_proto * 1912 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1913 { 1914 switch (func_id) { 1915 case BPF_FUNC_map_lookup_elem: 1916 return &bpf_map_lookup_elem_proto; 1917 case BPF_FUNC_map_update_elem: 1918 return &bpf_map_update_elem_proto; 1919 case BPF_FUNC_map_delete_elem: 1920 return &bpf_map_delete_elem_proto; 1921 case BPF_FUNC_map_push_elem: 1922 return &bpf_map_push_elem_proto; 1923 case BPF_FUNC_map_pop_elem: 1924 return &bpf_map_pop_elem_proto; 1925 case BPF_FUNC_map_peek_elem: 1926 return &bpf_map_peek_elem_proto; 1927 case BPF_FUNC_map_lookup_percpu_elem: 1928 return &bpf_map_lookup_percpu_elem_proto; 1929 case BPF_FUNC_get_prandom_u32: 1930 return &bpf_get_prandom_u32_proto; 1931 case BPF_FUNC_get_smp_processor_id: 1932 return &bpf_get_raw_smp_processor_id_proto; 1933 case BPF_FUNC_get_numa_node_id: 1934 return &bpf_get_numa_node_id_proto; 1935 case BPF_FUNC_tail_call: 1936 return &bpf_tail_call_proto; 1937 case BPF_FUNC_ktime_get_ns: 1938 return &bpf_ktime_get_ns_proto; 1939 case BPF_FUNC_ktime_get_boot_ns: 1940 return &bpf_ktime_get_boot_ns_proto; 1941 case BPF_FUNC_ktime_get_tai_ns: 1942 return &bpf_ktime_get_tai_ns_proto; 1943 case BPF_FUNC_ringbuf_output: 1944 return &bpf_ringbuf_output_proto; 1945 case BPF_FUNC_ringbuf_reserve: 1946 return &bpf_ringbuf_reserve_proto; 1947 case BPF_FUNC_ringbuf_submit: 1948 return &bpf_ringbuf_submit_proto; 1949 case BPF_FUNC_ringbuf_discard: 1950 return &bpf_ringbuf_discard_proto; 1951 case BPF_FUNC_ringbuf_query: 1952 return &bpf_ringbuf_query_proto; 1953 case BPF_FUNC_strncmp: 1954 return &bpf_strncmp_proto; 1955 case BPF_FUNC_strtol: 1956 return &bpf_strtol_proto; 1957 case BPF_FUNC_strtoul: 1958 return &bpf_strtoul_proto; 1959 case BPF_FUNC_get_current_pid_tgid: 1960 return &bpf_get_current_pid_tgid_proto; 1961 case BPF_FUNC_get_ns_current_pid_tgid: 1962 return &bpf_get_ns_current_pid_tgid_proto; 1963 case BPF_FUNC_get_current_uid_gid: 1964 return &bpf_get_current_uid_gid_proto; 1965 default: 1966 break; 1967 } 1968 1969 if (!bpf_token_capable(prog->aux->token, CAP_BPF)) 1970 return NULL; 1971 1972 switch (func_id) { 1973 case BPF_FUNC_spin_lock: 1974 return &bpf_spin_lock_proto; 1975 case BPF_FUNC_spin_unlock: 1976 return &bpf_spin_unlock_proto; 1977 case BPF_FUNC_jiffies64: 1978 return &bpf_jiffies64_proto; 1979 case BPF_FUNC_per_cpu_ptr: 1980 return &bpf_per_cpu_ptr_proto; 1981 case BPF_FUNC_this_cpu_ptr: 1982 return &bpf_this_cpu_ptr_proto; 1983 case BPF_FUNC_timer_init: 1984 return &bpf_timer_init_proto; 1985 case BPF_FUNC_timer_set_callback: 1986 return &bpf_timer_set_callback_proto; 1987 case BPF_FUNC_timer_start: 1988 return &bpf_timer_start_proto; 1989 case BPF_FUNC_timer_cancel: 1990 return &bpf_timer_cancel_proto; 1991 case BPF_FUNC_kptr_xchg: 1992 return &bpf_kptr_xchg_proto; 1993 case BPF_FUNC_for_each_map_elem: 1994 return &bpf_for_each_map_elem_proto; 1995 case BPF_FUNC_loop: 1996 return &bpf_loop_proto; 1997 case BPF_FUNC_user_ringbuf_drain: 1998 return &bpf_user_ringbuf_drain_proto; 1999 case BPF_FUNC_ringbuf_reserve_dynptr: 2000 return &bpf_ringbuf_reserve_dynptr_proto; 2001 case BPF_FUNC_ringbuf_submit_dynptr: 2002 return &bpf_ringbuf_submit_dynptr_proto; 2003 case BPF_FUNC_ringbuf_discard_dynptr: 2004 return &bpf_ringbuf_discard_dynptr_proto; 2005 case BPF_FUNC_dynptr_from_mem: 2006 return &bpf_dynptr_from_mem_proto; 2007 case BPF_FUNC_dynptr_read: 2008 return &bpf_dynptr_read_proto; 2009 case BPF_FUNC_dynptr_write: 2010 return &bpf_dynptr_write_proto; 2011 case BPF_FUNC_dynptr_data: 2012 return &bpf_dynptr_data_proto; 2013 #ifdef CONFIG_CGROUPS 2014 case BPF_FUNC_cgrp_storage_get: 2015 return &bpf_cgrp_storage_get_proto; 2016 case BPF_FUNC_cgrp_storage_delete: 2017 return &bpf_cgrp_storage_delete_proto; 2018 case BPF_FUNC_get_current_cgroup_id: 2019 return &bpf_get_current_cgroup_id_proto; 2020 case BPF_FUNC_get_current_ancestor_cgroup_id: 2021 return &bpf_get_current_ancestor_cgroup_id_proto; 2022 case BPF_FUNC_current_task_under_cgroup: 2023 return &bpf_current_task_under_cgroup_proto; 2024 #endif 2025 #ifdef CONFIG_CGROUP_NET_CLASSID 2026 case BPF_FUNC_get_cgroup_classid: 2027 return &bpf_get_cgroup_classid_curr_proto; 2028 #endif 2029 case BPF_FUNC_task_storage_get: 2030 if (bpf_prog_check_recur(prog)) 2031 return &bpf_task_storage_get_recur_proto; 2032 return &bpf_task_storage_get_proto; 2033 case BPF_FUNC_task_storage_delete: 2034 if (bpf_prog_check_recur(prog)) 2035 return &bpf_task_storage_delete_recur_proto; 2036 return &bpf_task_storage_delete_proto; 2037 default: 2038 break; 2039 } 2040 2041 if (!bpf_token_capable(prog->aux->token, CAP_PERFMON)) 2042 return NULL; 2043 2044 switch (func_id) { 2045 case BPF_FUNC_trace_printk: 2046 return bpf_get_trace_printk_proto(); 2047 case BPF_FUNC_get_current_task: 2048 return &bpf_get_current_task_proto; 2049 case BPF_FUNC_get_current_task_btf: 2050 return &bpf_get_current_task_btf_proto; 2051 case BPF_FUNC_get_current_comm: 2052 return &bpf_get_current_comm_proto; 2053 case BPF_FUNC_probe_read_user: 2054 return &bpf_probe_read_user_proto; 2055 case BPF_FUNC_probe_read_kernel: 2056 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 2057 NULL : &bpf_probe_read_kernel_proto; 2058 case BPF_FUNC_probe_read_user_str: 2059 return &bpf_probe_read_user_str_proto; 2060 case BPF_FUNC_probe_read_kernel_str: 2061 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 2062 NULL : &bpf_probe_read_kernel_str_proto; 2063 case BPF_FUNC_copy_from_user: 2064 return &bpf_copy_from_user_proto; 2065 case BPF_FUNC_copy_from_user_task: 2066 return &bpf_copy_from_user_task_proto; 2067 case BPF_FUNC_snprintf_btf: 2068 return &bpf_snprintf_btf_proto; 2069 case BPF_FUNC_snprintf: 2070 return &bpf_snprintf_proto; 2071 case BPF_FUNC_task_pt_regs: 2072 return &bpf_task_pt_regs_proto; 2073 case BPF_FUNC_trace_vprintk: 2074 return bpf_get_trace_vprintk_proto(); 2075 case BPF_FUNC_perf_event_read_value: 2076 return bpf_get_perf_event_read_value_proto(); 2077 case BPF_FUNC_perf_event_read: 2078 return &bpf_perf_event_read_proto; 2079 case BPF_FUNC_send_signal: 2080 return &bpf_send_signal_proto; 2081 case BPF_FUNC_send_signal_thread: 2082 return &bpf_send_signal_thread_proto; 2083 case BPF_FUNC_get_task_stack: 2084 return prog->sleepable ? &bpf_get_task_stack_sleepable_proto 2085 : &bpf_get_task_stack_proto; 2086 case BPF_FUNC_get_branch_snapshot: 2087 return &bpf_get_branch_snapshot_proto; 2088 case BPF_FUNC_find_vma: 2089 return &bpf_find_vma_proto; 2090 default: 2091 return NULL; 2092 } 2093 } 2094 EXPORT_SYMBOL_GPL(bpf_base_func_proto); 2095 2096 void bpf_list_head_free(const struct btf_field *field, void *list_head, 2097 struct bpf_spin_lock *spin_lock) 2098 { 2099 struct list_head *head = list_head, *orig_head = list_head; 2100 2101 BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head)); 2102 BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head)); 2103 2104 /* Do the actual list draining outside the lock to not hold the lock for 2105 * too long, and also prevent deadlocks if tracing programs end up 2106 * executing on entry/exit of functions called inside the critical 2107 * section, and end up doing map ops that call bpf_list_head_free for 2108 * the same map value again. 2109 */ 2110 __bpf_spin_lock_irqsave(spin_lock); 2111 if (!head->next || list_empty(head)) 2112 goto unlock; 2113 head = head->next; 2114 unlock: 2115 INIT_LIST_HEAD(orig_head); 2116 __bpf_spin_unlock_irqrestore(spin_lock); 2117 2118 while (head != orig_head) { 2119 void *obj = head; 2120 2121 obj -= field->graph_root.node_offset; 2122 head = head->next; 2123 /* The contained type can also have resources, including a 2124 * bpf_list_head which needs to be freed. 2125 */ 2126 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); 2127 } 2128 } 2129 2130 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are 2131 * 'rb_node *', so field name of rb_node within containing struct is not 2132 * needed. 2133 * 2134 * Since bpf_rb_tree's node type has a corresponding struct btf_field with 2135 * graph_root.node_offset, it's not necessary to know field name 2136 * or type of node struct 2137 */ 2138 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \ 2139 for (pos = rb_first_postorder(root); \ 2140 pos && ({ n = rb_next_postorder(pos); 1; }); \ 2141 pos = n) 2142 2143 void bpf_rb_root_free(const struct btf_field *field, void *rb_root, 2144 struct bpf_spin_lock *spin_lock) 2145 { 2146 struct rb_root_cached orig_root, *root = rb_root; 2147 struct rb_node *pos, *n; 2148 void *obj; 2149 2150 BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root)); 2151 BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root)); 2152 2153 __bpf_spin_lock_irqsave(spin_lock); 2154 orig_root = *root; 2155 *root = RB_ROOT_CACHED; 2156 __bpf_spin_unlock_irqrestore(spin_lock); 2157 2158 bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) { 2159 obj = pos; 2160 obj -= field->graph_root.node_offset; 2161 2162 2163 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); 2164 } 2165 } 2166 2167 __bpf_kfunc_start_defs(); 2168 2169 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) 2170 { 2171 struct btf_struct_meta *meta = meta__ign; 2172 u64 size = local_type_id__k; 2173 void *p; 2174 2175 p = bpf_mem_alloc(&bpf_global_ma, size); 2176 if (!p) 2177 return NULL; 2178 if (meta) 2179 bpf_obj_init(meta->record, p); 2180 return p; 2181 } 2182 2183 __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign) 2184 { 2185 u64 size = local_type_id__k; 2186 2187 /* The verifier has ensured that meta__ign must be NULL */ 2188 return bpf_mem_alloc(&bpf_global_percpu_ma, size); 2189 } 2190 2191 /* Must be called under migrate_disable(), as required by bpf_mem_free */ 2192 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu) 2193 { 2194 struct bpf_mem_alloc *ma; 2195 2196 if (rec && rec->refcount_off >= 0 && 2197 !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) { 2198 /* Object is refcounted and refcount_dec didn't result in 0 2199 * refcount. Return without freeing the object 2200 */ 2201 return; 2202 } 2203 2204 if (rec) 2205 bpf_obj_free_fields(rec, p); 2206 2207 if (percpu) 2208 ma = &bpf_global_percpu_ma; 2209 else 2210 ma = &bpf_global_ma; 2211 bpf_mem_free_rcu(ma, p); 2212 } 2213 2214 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) 2215 { 2216 struct btf_struct_meta *meta = meta__ign; 2217 void *p = p__alloc; 2218 2219 __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false); 2220 } 2221 2222 __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign) 2223 { 2224 /* The verifier has ensured that meta__ign must be NULL */ 2225 bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc); 2226 } 2227 2228 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign) 2229 { 2230 struct btf_struct_meta *meta = meta__ign; 2231 struct bpf_refcount *ref; 2232 2233 /* Could just cast directly to refcount_t *, but need some code using 2234 * bpf_refcount type so that it is emitted in vmlinux BTF 2235 */ 2236 ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off); 2237 if (!refcount_inc_not_zero((refcount_t *)ref)) 2238 return NULL; 2239 2240 /* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null 2241 * in verifier.c 2242 */ 2243 return (void *)p__refcounted_kptr; 2244 } 2245 2246 static int __bpf_list_add(struct bpf_list_node_kern *node, 2247 struct bpf_list_head *head, 2248 bool tail, struct btf_record *rec, u64 off) 2249 { 2250 struct list_head *n = &node->list_head, *h = (void *)head; 2251 2252 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't 2253 * called on its fields, so init here 2254 */ 2255 if (unlikely(!h->next)) 2256 INIT_LIST_HEAD(h); 2257 2258 /* node->owner != NULL implies !list_empty(n), no need to separately 2259 * check the latter 2260 */ 2261 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { 2262 /* Only called from BPF prog, no need to migrate_disable */ 2263 __bpf_obj_drop_impl((void *)n - off, rec, false); 2264 return -EINVAL; 2265 } 2266 2267 tail ? list_add_tail(n, h) : list_add(n, h); 2268 WRITE_ONCE(node->owner, head); 2269 2270 return 0; 2271 } 2272 2273 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head, 2274 struct bpf_list_node *node, 2275 void *meta__ign, u64 off) 2276 { 2277 struct bpf_list_node_kern *n = (void *)node; 2278 struct btf_struct_meta *meta = meta__ign; 2279 2280 return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off); 2281 } 2282 2283 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head, 2284 struct bpf_list_node *node, 2285 void *meta__ign, u64 off) 2286 { 2287 struct bpf_list_node_kern *n = (void *)node; 2288 struct btf_struct_meta *meta = meta__ign; 2289 2290 return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off); 2291 } 2292 2293 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail) 2294 { 2295 struct list_head *n, *h = (void *)head; 2296 struct bpf_list_node_kern *node; 2297 2298 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't 2299 * called on its fields, so init here 2300 */ 2301 if (unlikely(!h->next)) 2302 INIT_LIST_HEAD(h); 2303 if (list_empty(h)) 2304 return NULL; 2305 2306 n = tail ? h->prev : h->next; 2307 node = container_of(n, struct bpf_list_node_kern, list_head); 2308 if (WARN_ON_ONCE(READ_ONCE(node->owner) != head)) 2309 return NULL; 2310 2311 list_del_init(n); 2312 WRITE_ONCE(node->owner, NULL); 2313 return (struct bpf_list_node *)n; 2314 } 2315 2316 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) 2317 { 2318 return __bpf_list_del(head, false); 2319 } 2320 2321 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) 2322 { 2323 return __bpf_list_del(head, true); 2324 } 2325 2326 __bpf_kfunc struct bpf_list_node *bpf_list_front(struct bpf_list_head *head) 2327 { 2328 struct list_head *h = (struct list_head *)head; 2329 2330 if (list_empty(h) || unlikely(!h->next)) 2331 return NULL; 2332 2333 return (struct bpf_list_node *)h->next; 2334 } 2335 2336 __bpf_kfunc struct bpf_list_node *bpf_list_back(struct bpf_list_head *head) 2337 { 2338 struct list_head *h = (struct list_head *)head; 2339 2340 if (list_empty(h) || unlikely(!h->next)) 2341 return NULL; 2342 2343 return (struct bpf_list_node *)h->prev; 2344 } 2345 2346 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, 2347 struct bpf_rb_node *node) 2348 { 2349 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; 2350 struct rb_root_cached *r = (struct rb_root_cached *)root; 2351 struct rb_node *n = &node_internal->rb_node; 2352 2353 /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or 2354 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n) 2355 */ 2356 if (READ_ONCE(node_internal->owner) != root) 2357 return NULL; 2358 2359 rb_erase_cached(n, r); 2360 RB_CLEAR_NODE(n); 2361 WRITE_ONCE(node_internal->owner, NULL); 2362 return (struct bpf_rb_node *)n; 2363 } 2364 2365 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF 2366 * program 2367 */ 2368 static int __bpf_rbtree_add(struct bpf_rb_root *root, 2369 struct bpf_rb_node_kern *node, 2370 void *less, struct btf_record *rec, u64 off) 2371 { 2372 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; 2373 struct rb_node *parent = NULL, *n = &node->rb_node; 2374 bpf_callback_t cb = (bpf_callback_t)less; 2375 bool leftmost = true; 2376 2377 /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately 2378 * check the latter 2379 */ 2380 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { 2381 /* Only called from BPF prog, no need to migrate_disable */ 2382 __bpf_obj_drop_impl((void *)n - off, rec, false); 2383 return -EINVAL; 2384 } 2385 2386 while (*link) { 2387 parent = *link; 2388 if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) { 2389 link = &parent->rb_left; 2390 } else { 2391 link = &parent->rb_right; 2392 leftmost = false; 2393 } 2394 } 2395 2396 rb_link_node(n, parent, link); 2397 rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost); 2398 WRITE_ONCE(node->owner, root); 2399 return 0; 2400 } 2401 2402 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, 2403 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), 2404 void *meta__ign, u64 off) 2405 { 2406 struct btf_struct_meta *meta = meta__ign; 2407 struct bpf_rb_node_kern *n = (void *)node; 2408 2409 return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off); 2410 } 2411 2412 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) 2413 { 2414 struct rb_root_cached *r = (struct rb_root_cached *)root; 2415 2416 return (struct bpf_rb_node *)rb_first_cached(r); 2417 } 2418 2419 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_root(struct bpf_rb_root *root) 2420 { 2421 struct rb_root_cached *r = (struct rb_root_cached *)root; 2422 2423 return (struct bpf_rb_node *)r->rb_root.rb_node; 2424 } 2425 2426 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct bpf_rb_node *node) 2427 { 2428 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; 2429 2430 if (READ_ONCE(node_internal->owner) != root) 2431 return NULL; 2432 2433 return (struct bpf_rb_node *)node_internal->rb_node.rb_left; 2434 } 2435 2436 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struct bpf_rb_node *node) 2437 { 2438 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; 2439 2440 if (READ_ONCE(node_internal->owner) != root) 2441 return NULL; 2442 2443 return (struct bpf_rb_node *)node_internal->rb_node.rb_right; 2444 } 2445 2446 /** 2447 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this 2448 * kfunc which is not stored in a map as a kptr, must be released by calling 2449 * bpf_task_release(). 2450 * @p: The task on which a reference is being acquired. 2451 */ 2452 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p) 2453 { 2454 if (refcount_inc_not_zero(&p->rcu_users)) 2455 return p; 2456 return NULL; 2457 } 2458 2459 /** 2460 * bpf_task_release - Release the reference acquired on a task. 2461 * @p: The task on which a reference is being released. 2462 */ 2463 __bpf_kfunc void bpf_task_release(struct task_struct *p) 2464 { 2465 put_task_struct_rcu_user(p); 2466 } 2467 2468 __bpf_kfunc void bpf_task_release_dtor(void *p) 2469 { 2470 put_task_struct_rcu_user(p); 2471 } 2472 CFI_NOSEAL(bpf_task_release_dtor); 2473 2474 #ifdef CONFIG_CGROUPS 2475 /** 2476 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by 2477 * this kfunc which is not stored in a map as a kptr, must be released by 2478 * calling bpf_cgroup_release(). 2479 * @cgrp: The cgroup on which a reference is being acquired. 2480 */ 2481 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) 2482 { 2483 return cgroup_tryget(cgrp) ? cgrp : NULL; 2484 } 2485 2486 /** 2487 * bpf_cgroup_release - Release the reference acquired on a cgroup. 2488 * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to 2489 * not be freed until the current grace period has ended, even if its refcount 2490 * drops to 0. 2491 * @cgrp: The cgroup on which a reference is being released. 2492 */ 2493 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) 2494 { 2495 cgroup_put(cgrp); 2496 } 2497 2498 __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp) 2499 { 2500 cgroup_put(cgrp); 2501 } 2502 CFI_NOSEAL(bpf_cgroup_release_dtor); 2503 2504 /** 2505 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor 2506 * array. A cgroup returned by this kfunc which is not subsequently stored in a 2507 * map, must be released by calling bpf_cgroup_release(). 2508 * @cgrp: The cgroup for which we're performing a lookup. 2509 * @level: The level of ancestor to look up. 2510 */ 2511 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) 2512 { 2513 struct cgroup *ancestor; 2514 2515 if (level > cgrp->level || level < 0) 2516 return NULL; 2517 2518 /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */ 2519 ancestor = cgrp->ancestors[level]; 2520 if (!cgroup_tryget(ancestor)) 2521 return NULL; 2522 return ancestor; 2523 } 2524 2525 /** 2526 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this 2527 * kfunc which is not subsequently stored in a map, must be released by calling 2528 * bpf_cgroup_release(). 2529 * @cgid: cgroup id. 2530 */ 2531 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid) 2532 { 2533 struct cgroup *cgrp; 2534 2535 cgrp = cgroup_get_from_id(cgid); 2536 if (IS_ERR(cgrp)) 2537 return NULL; 2538 return cgrp; 2539 } 2540 2541 /** 2542 * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test 2543 * task's membership of cgroup ancestry. 2544 * @task: the task to be tested 2545 * @ancestor: possible ancestor of @task's cgroup 2546 * 2547 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. 2548 * It follows all the same rules as cgroup_is_descendant, and only applies 2549 * to the default hierarchy. 2550 */ 2551 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task, 2552 struct cgroup *ancestor) 2553 { 2554 long ret; 2555 2556 rcu_read_lock(); 2557 ret = task_under_cgroup_hierarchy(task, ancestor); 2558 rcu_read_unlock(); 2559 return ret; 2560 } 2561 2562 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) 2563 { 2564 struct bpf_array *array = container_of(map, struct bpf_array, map); 2565 struct cgroup *cgrp; 2566 2567 if (unlikely(idx >= array->map.max_entries)) 2568 return -E2BIG; 2569 2570 cgrp = READ_ONCE(array->ptrs[idx]); 2571 if (unlikely(!cgrp)) 2572 return -EAGAIN; 2573 2574 return task_under_cgroup_hierarchy(current, cgrp); 2575 } 2576 2577 const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { 2578 .func = bpf_current_task_under_cgroup, 2579 .gpl_only = false, 2580 .ret_type = RET_INTEGER, 2581 .arg1_type = ARG_CONST_MAP_PTR, 2582 .arg2_type = ARG_ANYTHING, 2583 }; 2584 2585 /** 2586 * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a 2587 * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its 2588 * hierarchy ID. 2589 * @task: The target task 2590 * @hierarchy_id: The ID of a cgroup1 hierarchy 2591 * 2592 * On success, the cgroup is returen. On failure, NULL is returned. 2593 */ 2594 __bpf_kfunc struct cgroup * 2595 bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) 2596 { 2597 struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id); 2598 2599 if (IS_ERR(cgrp)) 2600 return NULL; 2601 return cgrp; 2602 } 2603 #endif /* CONFIG_CGROUPS */ 2604 2605 /** 2606 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up 2607 * in the root pid namespace idr. If a task is returned, it must either be 2608 * stored in a map, or released with bpf_task_release(). 2609 * @pid: The pid of the task being looked up. 2610 */ 2611 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) 2612 { 2613 struct task_struct *p; 2614 2615 rcu_read_lock(); 2616 p = find_task_by_pid_ns(pid, &init_pid_ns); 2617 if (p) 2618 p = bpf_task_acquire(p); 2619 rcu_read_unlock(); 2620 2621 return p; 2622 } 2623 2624 /** 2625 * bpf_task_from_vpid - Find a struct task_struct from its vpid by looking it up 2626 * in the pid namespace of the current task. If a task is returned, it must 2627 * either be stored in a map, or released with bpf_task_release(). 2628 * @vpid: The vpid of the task being looked up. 2629 */ 2630 __bpf_kfunc struct task_struct *bpf_task_from_vpid(s32 vpid) 2631 { 2632 struct task_struct *p; 2633 2634 rcu_read_lock(); 2635 p = find_task_by_vpid(vpid); 2636 if (p) 2637 p = bpf_task_acquire(p); 2638 rcu_read_unlock(); 2639 2640 return p; 2641 } 2642 2643 /** 2644 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data. 2645 * @p: The dynptr whose data slice to retrieve 2646 * @offset: Offset into the dynptr 2647 * @buffer__opt: User-provided buffer to copy contents into. May be NULL 2648 * @buffer__szk: Size (in bytes) of the buffer if present. This is the 2649 * length of the requested slice. This must be a constant. 2650 * 2651 * For non-skb and non-xdp type dynptrs, there is no difference between 2652 * bpf_dynptr_slice and bpf_dynptr_data. 2653 * 2654 * If buffer__opt is NULL, the call will fail if buffer_opt was needed. 2655 * 2656 * If the intention is to write to the data slice, please use 2657 * bpf_dynptr_slice_rdwr. 2658 * 2659 * The user must check that the returned pointer is not null before using it. 2660 * 2661 * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice 2662 * does not change the underlying packet data pointers, so a call to 2663 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in 2664 * the bpf program. 2665 * 2666 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only 2667 * data slice (can be either direct pointer to the data or a pointer to the user 2668 * provided buffer, with its contents containing the data, if unable to obtain 2669 * direct pointer) 2670 */ 2671 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset, 2672 void *buffer__opt, u32 buffer__szk) 2673 { 2674 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2675 enum bpf_dynptr_type type; 2676 u32 len = buffer__szk; 2677 int err; 2678 2679 if (!ptr->data) 2680 return NULL; 2681 2682 err = bpf_dynptr_check_off_len(ptr, offset, len); 2683 if (err) 2684 return NULL; 2685 2686 type = bpf_dynptr_get_type(ptr); 2687 2688 switch (type) { 2689 case BPF_DYNPTR_TYPE_LOCAL: 2690 case BPF_DYNPTR_TYPE_RINGBUF: 2691 return ptr->data + ptr->offset + offset; 2692 case BPF_DYNPTR_TYPE_SKB: 2693 if (buffer__opt) 2694 return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt); 2695 else 2696 return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len); 2697 case BPF_DYNPTR_TYPE_XDP: 2698 { 2699 void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len); 2700 if (!IS_ERR_OR_NULL(xdp_ptr)) 2701 return xdp_ptr; 2702 2703 if (!buffer__opt) 2704 return NULL; 2705 bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false); 2706 return buffer__opt; 2707 } 2708 default: 2709 WARN_ONCE(true, "unknown dynptr type %d\n", type); 2710 return NULL; 2711 } 2712 } 2713 2714 /** 2715 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data. 2716 * @p: The dynptr whose data slice to retrieve 2717 * @offset: Offset into the dynptr 2718 * @buffer__opt: User-provided buffer to copy contents into. May be NULL 2719 * @buffer__szk: Size (in bytes) of the buffer if present. This is the 2720 * length of the requested slice. This must be a constant. 2721 * 2722 * For non-skb and non-xdp type dynptrs, there is no difference between 2723 * bpf_dynptr_slice and bpf_dynptr_data. 2724 * 2725 * If buffer__opt is NULL, the call will fail if buffer_opt was needed. 2726 * 2727 * The returned pointer is writable and may point to either directly the dynptr 2728 * data at the requested offset or to the buffer if unable to obtain a direct 2729 * data pointer to (example: the requested slice is to the paged area of an skb 2730 * packet). In the case where the returned pointer is to the buffer, the user 2731 * is responsible for persisting writes through calling bpf_dynptr_write(). This 2732 * usually looks something like this pattern: 2733 * 2734 * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer)); 2735 * if (!eth) 2736 * return TC_ACT_SHOT; 2737 * 2738 * // mutate eth header // 2739 * 2740 * if (eth == buffer) 2741 * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0); 2742 * 2743 * Please note that, as in the example above, the user must check that the 2744 * returned pointer is not null before using it. 2745 * 2746 * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr 2747 * does not change the underlying packet data pointers, so a call to 2748 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in 2749 * the bpf program. 2750 * 2751 * Return: NULL if the call failed (eg invalid dynptr), pointer to a 2752 * data slice (can be either direct pointer to the data or a pointer to the user 2753 * provided buffer, with its contents containing the data, if unable to obtain 2754 * direct pointer) 2755 */ 2756 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset, 2757 void *buffer__opt, u32 buffer__szk) 2758 { 2759 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2760 2761 if (!ptr->data || __bpf_dynptr_is_rdonly(ptr)) 2762 return NULL; 2763 2764 /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice. 2765 * 2766 * For skb-type dynptrs, it is safe to write into the returned pointer 2767 * if the bpf program allows skb data writes. There are two possibilities 2768 * that may occur when calling bpf_dynptr_slice_rdwr: 2769 * 2770 * 1) The requested slice is in the head of the skb. In this case, the 2771 * returned pointer is directly to skb data, and if the skb is cloned, the 2772 * verifier will have uncloned it (see bpf_unclone_prologue()) already. 2773 * The pointer can be directly written into. 2774 * 2775 * 2) Some portion of the requested slice is in the paged buffer area. 2776 * In this case, the requested data will be copied out into the buffer 2777 * and the returned pointer will be a pointer to the buffer. The skb 2778 * will not be pulled. To persist the write, the user will need to call 2779 * bpf_dynptr_write(), which will pull the skb and commit the write. 2780 * 2781 * Similarly for xdp programs, if the requested slice is not across xdp 2782 * fragments, then a direct pointer will be returned, otherwise the data 2783 * will be copied out into the buffer and the user will need to call 2784 * bpf_dynptr_write() to commit changes. 2785 */ 2786 return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk); 2787 } 2788 2789 __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end) 2790 { 2791 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2792 u32 size; 2793 2794 if (!ptr->data || start > end) 2795 return -EINVAL; 2796 2797 size = __bpf_dynptr_size(ptr); 2798 2799 if (start > size || end > size) 2800 return -ERANGE; 2801 2802 ptr->offset += start; 2803 bpf_dynptr_set_size(ptr, end - start); 2804 2805 return 0; 2806 } 2807 2808 __bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p) 2809 { 2810 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2811 2812 return !ptr->data; 2813 } 2814 2815 __bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p) 2816 { 2817 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2818 2819 if (!ptr->data) 2820 return false; 2821 2822 return __bpf_dynptr_is_rdonly(ptr); 2823 } 2824 2825 __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p) 2826 { 2827 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2828 2829 if (!ptr->data) 2830 return -EINVAL; 2831 2832 return __bpf_dynptr_size(ptr); 2833 } 2834 2835 __bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p, 2836 struct bpf_dynptr *clone__uninit) 2837 { 2838 struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit; 2839 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2840 2841 if (!ptr->data) { 2842 bpf_dynptr_set_null(clone); 2843 return -EINVAL; 2844 } 2845 2846 *clone = *ptr; 2847 2848 return 0; 2849 } 2850 2851 /** 2852 * bpf_dynptr_copy() - Copy data from one dynptr to another. 2853 * @dst_ptr: Destination dynptr - where data should be copied to 2854 * @dst_off: Offset into the destination dynptr 2855 * @src_ptr: Source dynptr - where data should be copied from 2856 * @src_off: Offset into the source dynptr 2857 * @size: Length of the data to copy from source to destination 2858 * 2859 * Copies data from source dynptr to destination dynptr. 2860 * Returns 0 on success; negative error, otherwise. 2861 */ 2862 __bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u32 dst_off, 2863 struct bpf_dynptr *src_ptr, u32 src_off, u32 size) 2864 { 2865 struct bpf_dynptr_kern *dst = (struct bpf_dynptr_kern *)dst_ptr; 2866 struct bpf_dynptr_kern *src = (struct bpf_dynptr_kern *)src_ptr; 2867 void *src_slice, *dst_slice; 2868 char buf[256]; 2869 u32 off; 2870 2871 src_slice = bpf_dynptr_slice(src_ptr, src_off, NULL, size); 2872 dst_slice = bpf_dynptr_slice_rdwr(dst_ptr, dst_off, NULL, size); 2873 2874 if (src_slice && dst_slice) { 2875 memmove(dst_slice, src_slice, size); 2876 return 0; 2877 } 2878 2879 if (src_slice) 2880 return __bpf_dynptr_write(dst, dst_off, src_slice, size, 0); 2881 2882 if (dst_slice) 2883 return __bpf_dynptr_read(dst_slice, size, src, src_off, 0); 2884 2885 if (bpf_dynptr_check_off_len(dst, dst_off, size) || 2886 bpf_dynptr_check_off_len(src, src_off, size)) 2887 return -E2BIG; 2888 2889 off = 0; 2890 while (off < size) { 2891 u32 chunk_sz = min_t(u32, sizeof(buf), size - off); 2892 int err; 2893 2894 err = __bpf_dynptr_read(buf, chunk_sz, src, src_off + off, 0); 2895 if (err) 2896 return err; 2897 err = __bpf_dynptr_write(dst, dst_off + off, buf, chunk_sz, 0); 2898 if (err) 2899 return err; 2900 2901 off += chunk_sz; 2902 } 2903 return 0; 2904 } 2905 2906 /** 2907 * bpf_dynptr_memset() - Fill dynptr memory with a constant byte. 2908 * @p: Destination dynptr - where data will be filled 2909 * @offset: Offset into the dynptr to start filling from 2910 * @size: Number of bytes to fill 2911 * @val: Constant byte to fill the memory with 2912 * 2913 * Fills the @size bytes of the memory area pointed to by @p 2914 * at @offset with the constant byte @val. 2915 * Returns 0 on success; negative error, otherwise. 2916 */ 2917 __bpf_kfunc int bpf_dynptr_memset(struct bpf_dynptr *p, u32 offset, u32 size, u8 val) 2918 { 2919 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2920 u32 chunk_sz, write_off; 2921 char buf[256]; 2922 void* slice; 2923 int err; 2924 2925 slice = bpf_dynptr_slice_rdwr(p, offset, NULL, size); 2926 if (likely(slice)) { 2927 memset(slice, val, size); 2928 return 0; 2929 } 2930 2931 if (__bpf_dynptr_is_rdonly(ptr)) 2932 return -EINVAL; 2933 2934 err = bpf_dynptr_check_off_len(ptr, offset, size); 2935 if (err) 2936 return err; 2937 2938 /* Non-linear data under the dynptr, write from a local buffer */ 2939 chunk_sz = min_t(u32, sizeof(buf), size); 2940 memset(buf, val, chunk_sz); 2941 2942 for (write_off = 0; write_off < size; write_off += chunk_sz) { 2943 chunk_sz = min_t(u32, sizeof(buf), size - write_off); 2944 err = __bpf_dynptr_write(ptr, offset + write_off, buf, chunk_sz, 0); 2945 if (err) 2946 return err; 2947 } 2948 2949 return 0; 2950 } 2951 2952 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) 2953 { 2954 return obj; 2955 } 2956 2957 __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k) 2958 { 2959 return (void *)obj__ign; 2960 } 2961 2962 __bpf_kfunc void bpf_rcu_read_lock(void) 2963 { 2964 rcu_read_lock(); 2965 } 2966 2967 __bpf_kfunc void bpf_rcu_read_unlock(void) 2968 { 2969 rcu_read_unlock(); 2970 } 2971 2972 struct bpf_throw_ctx { 2973 struct bpf_prog_aux *aux; 2974 u64 sp; 2975 u64 bp; 2976 int cnt; 2977 }; 2978 2979 static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp) 2980 { 2981 struct bpf_throw_ctx *ctx = cookie; 2982 struct bpf_prog *prog; 2983 2984 /* 2985 * The RCU read lock is held to safely traverse the latch tree, but we 2986 * don't need its protection when accessing the prog, since it has an 2987 * active stack frame on the current stack trace, and won't disappear. 2988 */ 2989 rcu_read_lock(); 2990 prog = bpf_prog_ksym_find(ip); 2991 rcu_read_unlock(); 2992 if (!prog) 2993 return !ctx->cnt; 2994 ctx->cnt++; 2995 if (bpf_is_subprog(prog)) 2996 return true; 2997 ctx->aux = prog->aux; 2998 ctx->sp = sp; 2999 ctx->bp = bp; 3000 return false; 3001 } 3002 3003 __bpf_kfunc void bpf_throw(u64 cookie) 3004 { 3005 struct bpf_throw_ctx ctx = {}; 3006 3007 arch_bpf_stack_walk(bpf_stack_walker, &ctx); 3008 WARN_ON_ONCE(!ctx.aux); 3009 if (ctx.aux) 3010 WARN_ON_ONCE(!ctx.aux->exception_boundary); 3011 WARN_ON_ONCE(!ctx.bp); 3012 WARN_ON_ONCE(!ctx.cnt); 3013 /* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning 3014 * deeper stack depths than ctx.sp as we do not return from bpf_throw, 3015 * which skips compiler generated instrumentation to do the same. 3016 */ 3017 kasan_unpoison_task_stack_below((void *)(long)ctx.sp); 3018 ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0); 3019 WARN(1, "A call to BPF exception callback should never return\n"); 3020 } 3021 3022 __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) 3023 { 3024 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 3025 struct bpf_map *map = p__map; 3026 3027 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq)); 3028 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq)); 3029 3030 if (flags) 3031 return -EINVAL; 3032 3033 return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ); 3034 } 3035 3036 __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) 3037 { 3038 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 3039 struct bpf_work *w; 3040 3041 if (in_nmi()) 3042 return -EOPNOTSUPP; 3043 if (flags) 3044 return -EINVAL; 3045 w = READ_ONCE(async->work); 3046 if (!w || !READ_ONCE(w->cb.prog)) 3047 return -EINVAL; 3048 3049 schedule_work(&w->work); 3050 return 0; 3051 } 3052 3053 __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, 3054 int (callback_fn)(void *map, int *key, void *value), 3055 unsigned int flags, 3056 void *aux__prog) 3057 { 3058 struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__prog; 3059 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 3060 3061 if (flags) 3062 return -EINVAL; 3063 3064 return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ); 3065 } 3066 3067 __bpf_kfunc void bpf_preempt_disable(void) 3068 { 3069 preempt_disable(); 3070 } 3071 3072 __bpf_kfunc void bpf_preempt_enable(void) 3073 { 3074 preempt_enable(); 3075 } 3076 3077 struct bpf_iter_bits { 3078 __u64 __opaque[2]; 3079 } __aligned(8); 3080 3081 #define BITS_ITER_NR_WORDS_MAX 511 3082 3083 struct bpf_iter_bits_kern { 3084 union { 3085 __u64 *bits; 3086 __u64 bits_copy; 3087 }; 3088 int nr_bits; 3089 int bit; 3090 } __aligned(8); 3091 3092 /* On 64-bit hosts, unsigned long and u64 have the same size, so passing 3093 * a u64 pointer and an unsigned long pointer to find_next_bit() will 3094 * return the same result, as both point to the same 8-byte area. 3095 * 3096 * For 32-bit little-endian hosts, using a u64 pointer or unsigned long 3097 * pointer also makes no difference. This is because the first iterated 3098 * unsigned long is composed of bits 0-31 of the u64 and the second unsigned 3099 * long is composed of bits 32-63 of the u64. 3100 * 3101 * However, for 32-bit big-endian hosts, this is not the case. The first 3102 * iterated unsigned long will be bits 32-63 of the u64, so swap these two 3103 * ulong values within the u64. 3104 */ 3105 static void swap_ulong_in_u64(u64 *bits, unsigned int nr) 3106 { 3107 #if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN) 3108 unsigned int i; 3109 3110 for (i = 0; i < nr; i++) 3111 bits[i] = (bits[i] >> 32) | ((u64)(u32)bits[i] << 32); 3112 #endif 3113 } 3114 3115 /** 3116 * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area 3117 * @it: The new bpf_iter_bits to be created 3118 * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over 3119 * @nr_words: The size of the specified memory area, measured in 8-byte units. 3120 * The maximum value of @nr_words is @BITS_ITER_NR_WORDS_MAX. This limit may be 3121 * further reduced by the BPF memory allocator implementation. 3122 * 3123 * This function initializes a new bpf_iter_bits structure for iterating over 3124 * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It 3125 * copies the data of the memory area to the newly created bpf_iter_bits @it for 3126 * subsequent iteration operations. 3127 * 3128 * On success, 0 is returned. On failure, ERR is returned. 3129 */ 3130 __bpf_kfunc int 3131 bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) 3132 { 3133 struct bpf_iter_bits_kern *kit = (void *)it; 3134 u32 nr_bytes = nr_words * sizeof(u64); 3135 u32 nr_bits = BYTES_TO_BITS(nr_bytes); 3136 int err; 3137 3138 BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits)); 3139 BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) != 3140 __alignof__(struct bpf_iter_bits)); 3141 3142 kit->nr_bits = 0; 3143 kit->bits_copy = 0; 3144 kit->bit = -1; 3145 3146 if (!unsafe_ptr__ign || !nr_words) 3147 return -EINVAL; 3148 if (nr_words > BITS_ITER_NR_WORDS_MAX) 3149 return -E2BIG; 3150 3151 /* Optimization for u64 mask */ 3152 if (nr_bits == 64) { 3153 err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign); 3154 if (err) 3155 return -EFAULT; 3156 3157 swap_ulong_in_u64(&kit->bits_copy, nr_words); 3158 3159 kit->nr_bits = nr_bits; 3160 return 0; 3161 } 3162 3163 if (bpf_mem_alloc_check_size(false, nr_bytes)) 3164 return -E2BIG; 3165 3166 /* Fallback to memalloc */ 3167 kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes); 3168 if (!kit->bits) 3169 return -ENOMEM; 3170 3171 err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign); 3172 if (err) { 3173 bpf_mem_free(&bpf_global_ma, kit->bits); 3174 return err; 3175 } 3176 3177 swap_ulong_in_u64(kit->bits, nr_words); 3178 3179 kit->nr_bits = nr_bits; 3180 return 0; 3181 } 3182 3183 /** 3184 * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits 3185 * @it: The bpf_iter_bits to be checked 3186 * 3187 * This function returns a pointer to a number representing the value of the 3188 * next bit in the bits. 3189 * 3190 * If there are no further bits available, it returns NULL. 3191 */ 3192 __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it) 3193 { 3194 struct bpf_iter_bits_kern *kit = (void *)it; 3195 int bit = kit->bit, nr_bits = kit->nr_bits; 3196 const void *bits; 3197 3198 if (!nr_bits || bit >= nr_bits) 3199 return NULL; 3200 3201 bits = nr_bits == 64 ? &kit->bits_copy : kit->bits; 3202 bit = find_next_bit(bits, nr_bits, bit + 1); 3203 if (bit >= nr_bits) { 3204 kit->bit = bit; 3205 return NULL; 3206 } 3207 3208 kit->bit = bit; 3209 return &kit->bit; 3210 } 3211 3212 /** 3213 * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits 3214 * @it: The bpf_iter_bits to be destroyed 3215 * 3216 * Destroy the resource associated with the bpf_iter_bits. 3217 */ 3218 __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it) 3219 { 3220 struct bpf_iter_bits_kern *kit = (void *)it; 3221 3222 if (kit->nr_bits <= 64) 3223 return; 3224 bpf_mem_free(&bpf_global_ma, kit->bits); 3225 } 3226 3227 /** 3228 * bpf_copy_from_user_str() - Copy a string from an unsafe user address 3229 * @dst: Destination address, in kernel space. This buffer must be 3230 * at least @dst__sz bytes long. 3231 * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL. 3232 * @unsafe_ptr__ign: Source address, in user space. 3233 * @flags: The only supported flag is BPF_F_PAD_ZEROS 3234 * 3235 * Copies a NUL-terminated string from userspace to BPF space. If user string is 3236 * too long this will still ensure zero termination in the dst buffer unless 3237 * buffer size is 0. 3238 * 3239 * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and 3240 * memset all of @dst on failure. 3241 */ 3242 __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags) 3243 { 3244 int ret; 3245 3246 if (unlikely(flags & ~BPF_F_PAD_ZEROS)) 3247 return -EINVAL; 3248 3249 if (unlikely(!dst__sz)) 3250 return 0; 3251 3252 ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1); 3253 if (ret < 0) { 3254 if (flags & BPF_F_PAD_ZEROS) 3255 memset((char *)dst, 0, dst__sz); 3256 3257 return ret; 3258 } 3259 3260 if (flags & BPF_F_PAD_ZEROS) 3261 memset((char *)dst + ret, 0, dst__sz - ret); 3262 else 3263 ((char *)dst)[ret] = '\0'; 3264 3265 return ret + 1; 3266 } 3267 3268 /** 3269 * bpf_copy_from_user_task_str() - Copy a string from an task's address space 3270 * @dst: Destination address, in kernel space. This buffer must be 3271 * at least @dst__sz bytes long. 3272 * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL. 3273 * @unsafe_ptr__ign: Source address in the task's address space. 3274 * @tsk: The task whose address space will be used 3275 * @flags: The only supported flag is BPF_F_PAD_ZEROS 3276 * 3277 * Copies a NUL terminated string from a task's address space to @dst__sz 3278 * buffer. If user string is too long this will still ensure zero termination 3279 * in the @dst__sz buffer unless buffer size is 0. 3280 * 3281 * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst__sz to 0 on success 3282 * and memset all of @dst__sz on failure. 3283 * 3284 * Return: The number of copied bytes on success including the NUL terminator. 3285 * A negative error code on failure. 3286 */ 3287 __bpf_kfunc int bpf_copy_from_user_task_str(void *dst, u32 dst__sz, 3288 const void __user *unsafe_ptr__ign, 3289 struct task_struct *tsk, u64 flags) 3290 { 3291 int ret; 3292 3293 if (unlikely(flags & ~BPF_F_PAD_ZEROS)) 3294 return -EINVAL; 3295 3296 if (unlikely(dst__sz == 0)) 3297 return 0; 3298 3299 ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_ptr__ign, dst, dst__sz, 0); 3300 if (ret < 0) { 3301 if (flags & BPF_F_PAD_ZEROS) 3302 memset(dst, 0, dst__sz); 3303 return ret; 3304 } 3305 3306 if (flags & BPF_F_PAD_ZEROS) 3307 memset(dst + ret, 0, dst__sz - ret); 3308 3309 return ret + 1; 3310 } 3311 3312 /* Keep unsinged long in prototype so that kfunc is usable when emitted to 3313 * vmlinux.h in BPF programs directly, but note that while in BPF prog, the 3314 * unsigned long always points to 8-byte region on stack, the kernel may only 3315 * read and write the 4-bytes on 32-bit. 3316 */ 3317 __bpf_kfunc void bpf_local_irq_save(unsigned long *flags__irq_flag) 3318 { 3319 local_irq_save(*flags__irq_flag); 3320 } 3321 3322 __bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag) 3323 { 3324 local_irq_restore(*flags__irq_flag); 3325 } 3326 3327 __bpf_kfunc void __bpf_trap(void) 3328 { 3329 } 3330 3331 /* 3332 * Kfuncs for string operations. 3333 * 3334 * Since strings are not necessarily %NUL-terminated, we cannot directly call 3335 * in-kernel implementations. Instead, we open-code the implementations using 3336 * __get_kernel_nofault instead of plain dereference to make them safe. 3337 */ 3338 3339 /** 3340 * bpf_strcmp - Compare two strings 3341 * @s1__ign: One string 3342 * @s2__ign: Another string 3343 * 3344 * Return: 3345 * * %0 - Strings are equal 3346 * * %-1 - @s1__ign is smaller 3347 * * %1 - @s2__ign is smaller 3348 * * %-EFAULT - Cannot read one of the strings 3349 * * %-E2BIG - One of strings is too large 3350 * * %-ERANGE - One of strings is outside of kernel address space 3351 */ 3352 __bpf_kfunc int bpf_strcmp(const char *s1__ign, const char *s2__ign) 3353 { 3354 char c1, c2; 3355 int i; 3356 3357 if (!copy_from_kernel_nofault_allowed(s1__ign, 1) || 3358 !copy_from_kernel_nofault_allowed(s2__ign, 1)) { 3359 return -ERANGE; 3360 } 3361 3362 guard(pagefault)(); 3363 for (i = 0; i < XATTR_SIZE_MAX; i++) { 3364 __get_kernel_nofault(&c1, s1__ign, char, err_out); 3365 __get_kernel_nofault(&c2, s2__ign, char, err_out); 3366 if (c1 != c2) 3367 return c1 < c2 ? -1 : 1; 3368 if (c1 == '\0') 3369 return 0; 3370 s1__ign++; 3371 s2__ign++; 3372 } 3373 return -E2BIG; 3374 err_out: 3375 return -EFAULT; 3376 } 3377 3378 /** 3379 * bpf_strnchr - Find a character in a length limited string 3380 * @s__ign: The string to be searched 3381 * @count: The number of characters to be searched 3382 * @c: The character to search for 3383 * 3384 * Note that the %NUL-terminator is considered part of the string, and can 3385 * be searched for. 3386 * 3387 * Return: 3388 * * >=0 - Index of the first occurrence of @c within @s__ign 3389 * * %-ENOENT - @c not found in the first @count characters of @s__ign 3390 * * %-EFAULT - Cannot read @s__ign 3391 * * %-E2BIG - @s__ign is too large 3392 * * %-ERANGE - @s__ign is outside of kernel address space 3393 */ 3394 __bpf_kfunc int bpf_strnchr(const char *s__ign, size_t count, char c) 3395 { 3396 char sc; 3397 int i; 3398 3399 if (!copy_from_kernel_nofault_allowed(s__ign, 1)) 3400 return -ERANGE; 3401 3402 guard(pagefault)(); 3403 for (i = 0; i < count && i < XATTR_SIZE_MAX; i++) { 3404 __get_kernel_nofault(&sc, s__ign, char, err_out); 3405 if (sc == c) 3406 return i; 3407 if (sc == '\0') 3408 return -ENOENT; 3409 s__ign++; 3410 } 3411 return i == XATTR_SIZE_MAX ? -E2BIG : -ENOENT; 3412 err_out: 3413 return -EFAULT; 3414 } 3415 3416 /** 3417 * bpf_strchr - Find the first occurrence of a character in a string 3418 * @s__ign: The string to be searched 3419 * @c: The character to search for 3420 * 3421 * Note that the %NUL-terminator is considered part of the string, and can 3422 * be searched for. 3423 * 3424 * Return: 3425 * * >=0 - The index of the first occurrence of @c within @s__ign 3426 * * %-ENOENT - @c not found in @s__ign 3427 * * %-EFAULT - Cannot read @s__ign 3428 * * %-E2BIG - @s__ign is too large 3429 * * %-ERANGE - @s__ign is outside of kernel address space 3430 */ 3431 __bpf_kfunc int bpf_strchr(const char *s__ign, char c) 3432 { 3433 return bpf_strnchr(s__ign, XATTR_SIZE_MAX, c); 3434 } 3435 3436 /** 3437 * bpf_strchrnul - Find and return a character in a string, or end of string 3438 * @s__ign: The string to be searched 3439 * @c: The character to search for 3440 * 3441 * Return: 3442 * * >=0 - Index of the first occurrence of @c within @s__ign or index of 3443 * the null byte at the end of @s__ign when @c is not found 3444 * * %-EFAULT - Cannot read @s__ign 3445 * * %-E2BIG - @s__ign is too large 3446 * * %-ERANGE - @s__ign is outside of kernel address space 3447 */ 3448 __bpf_kfunc int bpf_strchrnul(const char *s__ign, char c) 3449 { 3450 char sc; 3451 int i; 3452 3453 if (!copy_from_kernel_nofault_allowed(s__ign, 1)) 3454 return -ERANGE; 3455 3456 guard(pagefault)(); 3457 for (i = 0; i < XATTR_SIZE_MAX; i++) { 3458 __get_kernel_nofault(&sc, s__ign, char, err_out); 3459 if (sc == '\0' || sc == c) 3460 return i; 3461 s__ign++; 3462 } 3463 return -E2BIG; 3464 err_out: 3465 return -EFAULT; 3466 } 3467 3468 /** 3469 * bpf_strrchr - Find the last occurrence of a character in a string 3470 * @s__ign: The string to be searched 3471 * @c: The character to search for 3472 * 3473 * Return: 3474 * * >=0 - Index of the last occurrence of @c within @s__ign 3475 * * %-ENOENT - @c not found in @s__ign 3476 * * %-EFAULT - Cannot read @s__ign 3477 * * %-E2BIG - @s__ign is too large 3478 * * %-ERANGE - @s__ign is outside of kernel address space 3479 */ 3480 __bpf_kfunc int bpf_strrchr(const char *s__ign, int c) 3481 { 3482 char sc; 3483 int i, last = -ENOENT; 3484 3485 if (!copy_from_kernel_nofault_allowed(s__ign, 1)) 3486 return -ERANGE; 3487 3488 guard(pagefault)(); 3489 for (i = 0; i < XATTR_SIZE_MAX; i++) { 3490 __get_kernel_nofault(&sc, s__ign, char, err_out); 3491 if (sc == c) 3492 last = i; 3493 if (sc == '\0') 3494 return last; 3495 s__ign++; 3496 } 3497 return -E2BIG; 3498 err_out: 3499 return -EFAULT; 3500 } 3501 3502 /** 3503 * bpf_strnlen - Calculate the length of a length-limited string 3504 * @s__ign: The string 3505 * @count: The maximum number of characters to count 3506 * 3507 * Return: 3508 * * >=0 - The length of @s__ign 3509 * * %-EFAULT - Cannot read @s__ign 3510 * * %-E2BIG - @s__ign is too large 3511 * * %-ERANGE - @s__ign is outside of kernel address space 3512 */ 3513 __bpf_kfunc int bpf_strnlen(const char *s__ign, size_t count) 3514 { 3515 char c; 3516 int i; 3517 3518 if (!copy_from_kernel_nofault_allowed(s__ign, 1)) 3519 return -ERANGE; 3520 3521 guard(pagefault)(); 3522 for (i = 0; i < count && i < XATTR_SIZE_MAX; i++) { 3523 __get_kernel_nofault(&c, s__ign, char, err_out); 3524 if (c == '\0') 3525 return i; 3526 s__ign++; 3527 } 3528 return i == XATTR_SIZE_MAX ? -E2BIG : i; 3529 err_out: 3530 return -EFAULT; 3531 } 3532 3533 /** 3534 * bpf_strlen - Calculate the length of a string 3535 * @s__ign: The string 3536 * 3537 * Return: 3538 * * >=0 - The length of @s__ign 3539 * * %-EFAULT - Cannot read @s__ign 3540 * * %-E2BIG - @s__ign is too large 3541 * * %-ERANGE - @s__ign is outside of kernel address space 3542 */ 3543 __bpf_kfunc int bpf_strlen(const char *s__ign) 3544 { 3545 return bpf_strnlen(s__ign, XATTR_SIZE_MAX); 3546 } 3547 3548 /** 3549 * bpf_strspn - Calculate the length of the initial substring of @s__ign which 3550 * only contains letters in @accept__ign 3551 * @s__ign: The string to be searched 3552 * @accept__ign: The string to search for 3553 * 3554 * Return: 3555 * * >=0 - The length of the initial substring of @s__ign which only 3556 * contains letters from @accept__ign 3557 * * %-EFAULT - Cannot read one of the strings 3558 * * %-E2BIG - One of the strings is too large 3559 * * %-ERANGE - One of the strings is outside of kernel address space 3560 */ 3561 __bpf_kfunc int bpf_strspn(const char *s__ign, const char *accept__ign) 3562 { 3563 char cs, ca; 3564 int i, j; 3565 3566 if (!copy_from_kernel_nofault_allowed(s__ign, 1) || 3567 !copy_from_kernel_nofault_allowed(accept__ign, 1)) { 3568 return -ERANGE; 3569 } 3570 3571 guard(pagefault)(); 3572 for (i = 0; i < XATTR_SIZE_MAX; i++) { 3573 __get_kernel_nofault(&cs, s__ign, char, err_out); 3574 if (cs == '\0') 3575 return i; 3576 for (j = 0; j < XATTR_SIZE_MAX; j++) { 3577 __get_kernel_nofault(&ca, accept__ign + j, char, err_out); 3578 if (cs == ca || ca == '\0') 3579 break; 3580 } 3581 if (j == XATTR_SIZE_MAX) 3582 return -E2BIG; 3583 if (ca == '\0') 3584 return i; 3585 s__ign++; 3586 } 3587 return -E2BIG; 3588 err_out: 3589 return -EFAULT; 3590 } 3591 3592 /** 3593 * bpf_strcspn - Calculate the length of the initial substring of @s__ign which 3594 * does not contain letters in @reject__ign 3595 * @s__ign: The string to be searched 3596 * @reject__ign: The string to search for 3597 * 3598 * Return: 3599 * * >=0 - The length of the initial substring of @s__ign which does not 3600 * contain letters from @reject__ign 3601 * * %-EFAULT - Cannot read one of the strings 3602 * * %-E2BIG - One of the strings is too large 3603 * * %-ERANGE - One of the strings is outside of kernel address space 3604 */ 3605 __bpf_kfunc int bpf_strcspn(const char *s__ign, const char *reject__ign) 3606 { 3607 char cs, cr; 3608 int i, j; 3609 3610 if (!copy_from_kernel_nofault_allowed(s__ign, 1) || 3611 !copy_from_kernel_nofault_allowed(reject__ign, 1)) { 3612 return -ERANGE; 3613 } 3614 3615 guard(pagefault)(); 3616 for (i = 0; i < XATTR_SIZE_MAX; i++) { 3617 __get_kernel_nofault(&cs, s__ign, char, err_out); 3618 if (cs == '\0') 3619 return i; 3620 for (j = 0; j < XATTR_SIZE_MAX; j++) { 3621 __get_kernel_nofault(&cr, reject__ign + j, char, err_out); 3622 if (cs == cr || cr == '\0') 3623 break; 3624 } 3625 if (j == XATTR_SIZE_MAX) 3626 return -E2BIG; 3627 if (cr != '\0') 3628 return i; 3629 s__ign++; 3630 } 3631 return -E2BIG; 3632 err_out: 3633 return -EFAULT; 3634 } 3635 3636 /** 3637 * bpf_strnstr - Find the first substring in a length-limited string 3638 * @s1__ign: The string to be searched 3639 * @s2__ign: The string to search for 3640 * @len: the maximum number of characters to search 3641 * 3642 * Return: 3643 * * >=0 - Index of the first character of the first occurrence of @s2__ign 3644 * within the first @len characters of @s1__ign 3645 * * %-ENOENT - @s2__ign not found in the first @len characters of @s1__ign 3646 * * %-EFAULT - Cannot read one of the strings 3647 * * %-E2BIG - One of the strings is too large 3648 * * %-ERANGE - One of the strings is outside of kernel address space 3649 */ 3650 __bpf_kfunc int bpf_strnstr(const char *s1__ign, const char *s2__ign, size_t len) 3651 { 3652 char c1, c2; 3653 int i, j; 3654 3655 if (!copy_from_kernel_nofault_allowed(s1__ign, 1) || 3656 !copy_from_kernel_nofault_allowed(s2__ign, 1)) { 3657 return -ERANGE; 3658 } 3659 3660 guard(pagefault)(); 3661 for (i = 0; i < XATTR_SIZE_MAX; i++) { 3662 for (j = 0; i + j < len && j < XATTR_SIZE_MAX; j++) { 3663 __get_kernel_nofault(&c2, s2__ign + j, char, err_out); 3664 if (c2 == '\0') 3665 return i; 3666 __get_kernel_nofault(&c1, s1__ign + j, char, err_out); 3667 if (c1 == '\0') 3668 return -ENOENT; 3669 if (c1 != c2) 3670 break; 3671 } 3672 if (j == XATTR_SIZE_MAX) 3673 return -E2BIG; 3674 if (i + j == len) 3675 return -ENOENT; 3676 s1__ign++; 3677 } 3678 return -E2BIG; 3679 err_out: 3680 return -EFAULT; 3681 } 3682 3683 /** 3684 * bpf_strstr - Find the first substring in a string 3685 * @s1__ign: The string to be searched 3686 * @s2__ign: The string to search for 3687 * 3688 * Return: 3689 * * >=0 - Index of the first character of the first occurrence of @s2__ign 3690 * within @s1__ign 3691 * * %-ENOENT - @s2__ign is not a substring of @s1__ign 3692 * * %-EFAULT - Cannot read one of the strings 3693 * * %-E2BIG - One of the strings is too large 3694 * * %-ERANGE - One of the strings is outside of kernel address space 3695 */ 3696 __bpf_kfunc int bpf_strstr(const char *s1__ign, const char *s2__ign) 3697 { 3698 return bpf_strnstr(s1__ign, s2__ign, XATTR_SIZE_MAX); 3699 } 3700 3701 __bpf_kfunc_end_defs(); 3702 3703 BTF_KFUNCS_START(generic_btf_ids) 3704 #ifdef CONFIG_CRASH_DUMP 3705 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) 3706 #endif 3707 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) 3708 BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) 3709 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE) 3710 BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE) 3711 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU) 3712 BTF_ID_FLAGS(func, bpf_list_push_front_impl) 3713 BTF_ID_FLAGS(func, bpf_list_push_back_impl) 3714 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) 3715 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) 3716 BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL) 3717 BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL) 3718 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3719 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) 3720 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) 3721 BTF_ID_FLAGS(func, bpf_rbtree_add_impl) 3722 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) 3723 BTF_ID_FLAGS(func, bpf_rbtree_root, KF_RET_NULL) 3724 BTF_ID_FLAGS(func, bpf_rbtree_left, KF_RET_NULL) 3725 BTF_ID_FLAGS(func, bpf_rbtree_right, KF_RET_NULL) 3726 3727 #ifdef CONFIG_CGROUPS 3728 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3729 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE) 3730 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3731 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL) 3732 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU) 3733 BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 3734 #endif 3735 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL) 3736 BTF_ID_FLAGS(func, bpf_task_from_vpid, KF_ACQUIRE | KF_RET_NULL) 3737 BTF_ID_FLAGS(func, bpf_throw) 3738 #ifdef CONFIG_BPF_EVENTS 3739 BTF_ID_FLAGS(func, bpf_send_signal_task, KF_TRUSTED_ARGS) 3740 #endif 3741 BTF_KFUNCS_END(generic_btf_ids) 3742 3743 static const struct btf_kfunc_id_set generic_kfunc_set = { 3744 .owner = THIS_MODULE, 3745 .set = &generic_btf_ids, 3746 }; 3747 3748 3749 BTF_ID_LIST(generic_dtor_ids) 3750 BTF_ID(struct, task_struct) 3751 BTF_ID(func, bpf_task_release_dtor) 3752 #ifdef CONFIG_CGROUPS 3753 BTF_ID(struct, cgroup) 3754 BTF_ID(func, bpf_cgroup_release_dtor) 3755 #endif 3756 3757 BTF_KFUNCS_START(common_btf_ids) 3758 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx, KF_FASTCALL) 3759 BTF_ID_FLAGS(func, bpf_rdonly_cast, KF_FASTCALL) 3760 BTF_ID_FLAGS(func, bpf_rcu_read_lock) 3761 BTF_ID_FLAGS(func, bpf_rcu_read_unlock) 3762 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL) 3763 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL) 3764 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW) 3765 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL) 3766 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY) 3767 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU) 3768 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL) 3769 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY) 3770 #ifdef CONFIG_CGROUPS 3771 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) 3772 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL) 3773 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY) 3774 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) 3775 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL) 3776 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY) 3777 #endif 3778 BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) 3779 BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) 3780 BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) 3781 BTF_ID_FLAGS(func, bpf_dynptr_adjust) 3782 BTF_ID_FLAGS(func, bpf_dynptr_is_null) 3783 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) 3784 BTF_ID_FLAGS(func, bpf_dynptr_size) 3785 BTF_ID_FLAGS(func, bpf_dynptr_clone) 3786 BTF_ID_FLAGS(func, bpf_dynptr_copy) 3787 BTF_ID_FLAGS(func, bpf_dynptr_memset) 3788 #ifdef CONFIG_NET 3789 BTF_ID_FLAGS(func, bpf_modify_return_test_tp) 3790 #endif 3791 BTF_ID_FLAGS(func, bpf_wq_init) 3792 BTF_ID_FLAGS(func, bpf_wq_set_callback_impl) 3793 BTF_ID_FLAGS(func, bpf_wq_start) 3794 BTF_ID_FLAGS(func, bpf_preempt_disable) 3795 BTF_ID_FLAGS(func, bpf_preempt_enable) 3796 BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW) 3797 BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL) 3798 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY) 3799 BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE) 3800 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str, KF_SLEEPABLE) 3801 BTF_ID_FLAGS(func, bpf_get_kmem_cache) 3802 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE) 3803 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) 3804 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) 3805 BTF_ID_FLAGS(func, bpf_local_irq_save) 3806 BTF_ID_FLAGS(func, bpf_local_irq_restore) 3807 BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr) 3808 BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr) 3809 BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr) 3810 BTF_ID_FLAGS(func, bpf_probe_read_kernel_str_dynptr) 3811 BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE) 3812 BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE) 3813 BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) 3814 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) 3815 #ifdef CONFIG_DMA_SHARED_BUFFER 3816 BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE) 3817 BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) 3818 BTF_ID_FLAGS(func, bpf_iter_dmabuf_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) 3819 #endif 3820 BTF_ID_FLAGS(func, __bpf_trap) 3821 BTF_ID_FLAGS(func, bpf_strcmp); 3822 BTF_ID_FLAGS(func, bpf_strchr); 3823 BTF_ID_FLAGS(func, bpf_strchrnul); 3824 BTF_ID_FLAGS(func, bpf_strnchr); 3825 BTF_ID_FLAGS(func, bpf_strrchr); 3826 BTF_ID_FLAGS(func, bpf_strlen); 3827 BTF_ID_FLAGS(func, bpf_strnlen); 3828 BTF_ID_FLAGS(func, bpf_strspn); 3829 BTF_ID_FLAGS(func, bpf_strcspn); 3830 BTF_ID_FLAGS(func, bpf_strstr); 3831 BTF_ID_FLAGS(func, bpf_strnstr); 3832 #if defined(CONFIG_BPF_LSM) && defined(CONFIG_CGROUPS) 3833 BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU) 3834 #endif 3835 BTF_ID_FLAGS(func, bpf_stream_vprintk, KF_TRUSTED_ARGS) 3836 BTF_KFUNCS_END(common_btf_ids) 3837 3838 static const struct btf_kfunc_id_set common_kfunc_set = { 3839 .owner = THIS_MODULE, 3840 .set = &common_btf_ids, 3841 }; 3842 3843 static int __init kfunc_init(void) 3844 { 3845 int ret; 3846 const struct btf_id_dtor_kfunc generic_dtors[] = { 3847 { 3848 .btf_id = generic_dtor_ids[0], 3849 .kfunc_btf_id = generic_dtor_ids[1] 3850 }, 3851 #ifdef CONFIG_CGROUPS 3852 { 3853 .btf_id = generic_dtor_ids[2], 3854 .kfunc_btf_id = generic_dtor_ids[3] 3855 }, 3856 #endif 3857 }; 3858 3859 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set); 3860 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set); 3861 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set); 3862 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set); 3863 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set); 3864 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set); 3865 ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors, 3866 ARRAY_SIZE(generic_dtors), 3867 THIS_MODULE); 3868 return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set); 3869 } 3870 3871 late_initcall(kfunc_init); 3872 3873 /* Get a pointer to dynptr data up to len bytes for read only access. If 3874 * the dynptr doesn't have continuous data up to len bytes, return NULL. 3875 */ 3876 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len) 3877 { 3878 const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr; 3879 3880 return bpf_dynptr_slice(p, 0, NULL, len); 3881 } 3882 3883 /* Get a pointer to dynptr data up to len bytes for read write access. If 3884 * the dynptr doesn't have continuous data up to len bytes, or the dynptr 3885 * is read only, return NULL. 3886 */ 3887 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len) 3888 { 3889 if (__bpf_dynptr_is_rdonly(ptr)) 3890 return NULL; 3891 return (void *)__bpf_dynptr_data(ptr, len); 3892 } 3893