1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/btf.h> 6 #include <linux/bpf-cgroup.h> 7 #include <linux/cgroup.h> 8 #include <linux/rcupdate.h> 9 #include <linux/random.h> 10 #include <linux/smp.h> 11 #include <linux/topology.h> 12 #include <linux/ktime.h> 13 #include <linux/sched.h> 14 #include <linux/uidgid.h> 15 #include <linux/filter.h> 16 #include <linux/ctype.h> 17 #include <linux/jiffies.h> 18 #include <linux/pid_namespace.h> 19 #include <linux/poison.h> 20 #include <linux/proc_ns.h> 21 #include <linux/security.h> 22 #include <linux/btf_ids.h> 23 #include <linux/bpf_mem_alloc.h> 24 25 #include "../../lib/kstrtox.h" 26 27 /* If kernel subsystem is allowing eBPF programs to call this function, 28 * inside its own verifier_ops->get_func_proto() callback it should return 29 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments 30 * 31 * Different map implementations will rely on rcu in map methods 32 * lookup/update/delete, therefore eBPF programs must run under rcu lock 33 * if program is allowed to access maps, so check rcu_read_lock_held in 34 * all three functions. 35 */ 36 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) 37 { 38 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 39 return (unsigned long) map->ops->map_lookup_elem(map, key); 40 } 41 42 const struct bpf_func_proto bpf_map_lookup_elem_proto = { 43 .func = bpf_map_lookup_elem, 44 .gpl_only = false, 45 .pkt_access = true, 46 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 47 .arg1_type = ARG_CONST_MAP_PTR, 48 .arg2_type = ARG_PTR_TO_MAP_KEY, 49 }; 50 51 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, 52 void *, value, u64, flags) 53 { 54 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 55 return map->ops->map_update_elem(map, key, value, flags); 56 } 57 58 const struct bpf_func_proto bpf_map_update_elem_proto = { 59 .func = bpf_map_update_elem, 60 .gpl_only = false, 61 .pkt_access = true, 62 .ret_type = RET_INTEGER, 63 .arg1_type = ARG_CONST_MAP_PTR, 64 .arg2_type = ARG_PTR_TO_MAP_KEY, 65 .arg3_type = ARG_PTR_TO_MAP_VALUE, 66 .arg4_type = ARG_ANYTHING, 67 }; 68 69 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) 70 { 71 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 72 return map->ops->map_delete_elem(map, key); 73 } 74 75 const struct bpf_func_proto bpf_map_delete_elem_proto = { 76 .func = bpf_map_delete_elem, 77 .gpl_only = false, 78 .pkt_access = true, 79 .ret_type = RET_INTEGER, 80 .arg1_type = ARG_CONST_MAP_PTR, 81 .arg2_type = ARG_PTR_TO_MAP_KEY, 82 }; 83 84 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) 85 { 86 return map->ops->map_push_elem(map, value, flags); 87 } 88 89 const struct bpf_func_proto bpf_map_push_elem_proto = { 90 .func = bpf_map_push_elem, 91 .gpl_only = false, 92 .pkt_access = true, 93 .ret_type = RET_INTEGER, 94 .arg1_type = ARG_CONST_MAP_PTR, 95 .arg2_type = ARG_PTR_TO_MAP_VALUE, 96 .arg3_type = ARG_ANYTHING, 97 }; 98 99 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) 100 { 101 return map->ops->map_pop_elem(map, value); 102 } 103 104 const struct bpf_func_proto bpf_map_pop_elem_proto = { 105 .func = bpf_map_pop_elem, 106 .gpl_only = false, 107 .ret_type = RET_INTEGER, 108 .arg1_type = ARG_CONST_MAP_PTR, 109 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT, 110 }; 111 112 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) 113 { 114 return map->ops->map_peek_elem(map, value); 115 } 116 117 const struct bpf_func_proto bpf_map_peek_elem_proto = { 118 .func = bpf_map_peek_elem, 119 .gpl_only = false, 120 .ret_type = RET_INTEGER, 121 .arg1_type = ARG_CONST_MAP_PTR, 122 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT, 123 }; 124 125 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) 126 { 127 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 128 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); 129 } 130 131 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = { 132 .func = bpf_map_lookup_percpu_elem, 133 .gpl_only = false, 134 .pkt_access = true, 135 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 136 .arg1_type = ARG_CONST_MAP_PTR, 137 .arg2_type = ARG_PTR_TO_MAP_KEY, 138 .arg3_type = ARG_ANYTHING, 139 }; 140 141 const struct bpf_func_proto bpf_get_prandom_u32_proto = { 142 .func = bpf_user_rnd_u32, 143 .gpl_only = false, 144 .ret_type = RET_INTEGER, 145 }; 146 147 BPF_CALL_0(bpf_get_smp_processor_id) 148 { 149 return smp_processor_id(); 150 } 151 152 const struct bpf_func_proto bpf_get_smp_processor_id_proto = { 153 .func = bpf_get_smp_processor_id, 154 .gpl_only = false, 155 .ret_type = RET_INTEGER, 156 }; 157 158 BPF_CALL_0(bpf_get_numa_node_id) 159 { 160 return numa_node_id(); 161 } 162 163 const struct bpf_func_proto bpf_get_numa_node_id_proto = { 164 .func = bpf_get_numa_node_id, 165 .gpl_only = false, 166 .ret_type = RET_INTEGER, 167 }; 168 169 BPF_CALL_0(bpf_ktime_get_ns) 170 { 171 /* NMI safe access to clock monotonic */ 172 return ktime_get_mono_fast_ns(); 173 } 174 175 const struct bpf_func_proto bpf_ktime_get_ns_proto = { 176 .func = bpf_ktime_get_ns, 177 .gpl_only = false, 178 .ret_type = RET_INTEGER, 179 }; 180 181 BPF_CALL_0(bpf_ktime_get_boot_ns) 182 { 183 /* NMI safe access to clock boottime */ 184 return ktime_get_boot_fast_ns(); 185 } 186 187 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = { 188 .func = bpf_ktime_get_boot_ns, 189 .gpl_only = false, 190 .ret_type = RET_INTEGER, 191 }; 192 193 BPF_CALL_0(bpf_ktime_get_coarse_ns) 194 { 195 return ktime_get_coarse_ns(); 196 } 197 198 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = { 199 .func = bpf_ktime_get_coarse_ns, 200 .gpl_only = false, 201 .ret_type = RET_INTEGER, 202 }; 203 204 BPF_CALL_0(bpf_ktime_get_tai_ns) 205 { 206 /* NMI safe access to clock tai */ 207 return ktime_get_tai_fast_ns(); 208 } 209 210 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = { 211 .func = bpf_ktime_get_tai_ns, 212 .gpl_only = false, 213 .ret_type = RET_INTEGER, 214 }; 215 216 BPF_CALL_0(bpf_get_current_pid_tgid) 217 { 218 struct task_struct *task = current; 219 220 if (unlikely(!task)) 221 return -EINVAL; 222 223 return (u64) task->tgid << 32 | task->pid; 224 } 225 226 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { 227 .func = bpf_get_current_pid_tgid, 228 .gpl_only = false, 229 .ret_type = RET_INTEGER, 230 }; 231 232 BPF_CALL_0(bpf_get_current_uid_gid) 233 { 234 struct task_struct *task = current; 235 kuid_t uid; 236 kgid_t gid; 237 238 if (unlikely(!task)) 239 return -EINVAL; 240 241 current_uid_gid(&uid, &gid); 242 return (u64) from_kgid(&init_user_ns, gid) << 32 | 243 from_kuid(&init_user_ns, uid); 244 } 245 246 const struct bpf_func_proto bpf_get_current_uid_gid_proto = { 247 .func = bpf_get_current_uid_gid, 248 .gpl_only = false, 249 .ret_type = RET_INTEGER, 250 }; 251 252 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) 253 { 254 struct task_struct *task = current; 255 256 if (unlikely(!task)) 257 goto err_clear; 258 259 /* Verifier guarantees that size > 0 */ 260 strscpy(buf, task->comm, size); 261 return 0; 262 err_clear: 263 memset(buf, 0, size); 264 return -EINVAL; 265 } 266 267 const struct bpf_func_proto bpf_get_current_comm_proto = { 268 .func = bpf_get_current_comm, 269 .gpl_only = false, 270 .ret_type = RET_INTEGER, 271 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 272 .arg2_type = ARG_CONST_SIZE, 273 }; 274 275 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) 276 277 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 278 { 279 arch_spinlock_t *l = (void *)lock; 280 union { 281 __u32 val; 282 arch_spinlock_t lock; 283 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; 284 285 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); 286 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); 287 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); 288 arch_spin_lock(l); 289 } 290 291 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 292 { 293 arch_spinlock_t *l = (void *)lock; 294 295 arch_spin_unlock(l); 296 } 297 298 #else 299 300 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 301 { 302 atomic_t *l = (void *)lock; 303 304 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); 305 do { 306 atomic_cond_read_relaxed(l, !VAL); 307 } while (atomic_xchg(l, 1)); 308 } 309 310 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 311 { 312 atomic_t *l = (void *)lock; 313 314 atomic_set_release(l, 0); 315 } 316 317 #endif 318 319 static DEFINE_PER_CPU(unsigned long, irqsave_flags); 320 321 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock) 322 { 323 unsigned long flags; 324 325 local_irq_save(flags); 326 __bpf_spin_lock(lock); 327 __this_cpu_write(irqsave_flags, flags); 328 } 329 330 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) 331 { 332 __bpf_spin_lock_irqsave(lock); 333 return 0; 334 } 335 336 const struct bpf_func_proto bpf_spin_lock_proto = { 337 .func = bpf_spin_lock, 338 .gpl_only = false, 339 .ret_type = RET_VOID, 340 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 341 .arg1_btf_id = BPF_PTR_POISON, 342 }; 343 344 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock) 345 { 346 unsigned long flags; 347 348 flags = __this_cpu_read(irqsave_flags); 349 __bpf_spin_unlock(lock); 350 local_irq_restore(flags); 351 } 352 353 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) 354 { 355 __bpf_spin_unlock_irqrestore(lock); 356 return 0; 357 } 358 359 const struct bpf_func_proto bpf_spin_unlock_proto = { 360 .func = bpf_spin_unlock, 361 .gpl_only = false, 362 .ret_type = RET_VOID, 363 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 364 .arg1_btf_id = BPF_PTR_POISON, 365 }; 366 367 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 368 bool lock_src) 369 { 370 struct bpf_spin_lock *lock; 371 372 if (lock_src) 373 lock = src + map->record->spin_lock_off; 374 else 375 lock = dst + map->record->spin_lock_off; 376 preempt_disable(); 377 __bpf_spin_lock_irqsave(lock); 378 copy_map_value(map, dst, src); 379 __bpf_spin_unlock_irqrestore(lock); 380 preempt_enable(); 381 } 382 383 BPF_CALL_0(bpf_jiffies64) 384 { 385 return get_jiffies_64(); 386 } 387 388 const struct bpf_func_proto bpf_jiffies64_proto = { 389 .func = bpf_jiffies64, 390 .gpl_only = false, 391 .ret_type = RET_INTEGER, 392 }; 393 394 #ifdef CONFIG_CGROUPS 395 BPF_CALL_0(bpf_get_current_cgroup_id) 396 { 397 struct cgroup *cgrp; 398 u64 cgrp_id; 399 400 rcu_read_lock(); 401 cgrp = task_dfl_cgroup(current); 402 cgrp_id = cgroup_id(cgrp); 403 rcu_read_unlock(); 404 405 return cgrp_id; 406 } 407 408 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { 409 .func = bpf_get_current_cgroup_id, 410 .gpl_only = false, 411 .ret_type = RET_INTEGER, 412 }; 413 414 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) 415 { 416 struct cgroup *cgrp; 417 struct cgroup *ancestor; 418 u64 cgrp_id; 419 420 rcu_read_lock(); 421 cgrp = task_dfl_cgroup(current); 422 ancestor = cgroup_ancestor(cgrp, ancestor_level); 423 cgrp_id = ancestor ? cgroup_id(ancestor) : 0; 424 rcu_read_unlock(); 425 426 return cgrp_id; 427 } 428 429 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = { 430 .func = bpf_get_current_ancestor_cgroup_id, 431 .gpl_only = false, 432 .ret_type = RET_INTEGER, 433 .arg1_type = ARG_ANYTHING, 434 }; 435 #endif /* CONFIG_CGROUPS */ 436 437 #define BPF_STRTOX_BASE_MASK 0x1F 438 439 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, 440 unsigned long long *res, bool *is_negative) 441 { 442 unsigned int base = flags & BPF_STRTOX_BASE_MASK; 443 const char *cur_buf = buf; 444 size_t cur_len = buf_len; 445 unsigned int consumed; 446 size_t val_len; 447 char str[64]; 448 449 if (!buf || !buf_len || !res || !is_negative) 450 return -EINVAL; 451 452 if (base != 0 && base != 8 && base != 10 && base != 16) 453 return -EINVAL; 454 455 if (flags & ~BPF_STRTOX_BASE_MASK) 456 return -EINVAL; 457 458 while (cur_buf < buf + buf_len && isspace(*cur_buf)) 459 ++cur_buf; 460 461 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); 462 if (*is_negative) 463 ++cur_buf; 464 465 consumed = cur_buf - buf; 466 cur_len -= consumed; 467 if (!cur_len) 468 return -EINVAL; 469 470 cur_len = min(cur_len, sizeof(str) - 1); 471 memcpy(str, cur_buf, cur_len); 472 str[cur_len] = '\0'; 473 cur_buf = str; 474 475 cur_buf = _parse_integer_fixup_radix(cur_buf, &base); 476 val_len = _parse_integer(cur_buf, base, res); 477 478 if (val_len & KSTRTOX_OVERFLOW) 479 return -ERANGE; 480 481 if (val_len == 0) 482 return -EINVAL; 483 484 cur_buf += val_len; 485 consumed += cur_buf - str; 486 487 return consumed; 488 } 489 490 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, 491 long long *res) 492 { 493 unsigned long long _res; 494 bool is_negative; 495 int err; 496 497 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 498 if (err < 0) 499 return err; 500 if (is_negative) { 501 if ((long long)-_res > 0) 502 return -ERANGE; 503 *res = -_res; 504 } else { 505 if ((long long)_res < 0) 506 return -ERANGE; 507 *res = _res; 508 } 509 return err; 510 } 511 512 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, 513 long *, res) 514 { 515 long long _res; 516 int err; 517 518 err = __bpf_strtoll(buf, buf_len, flags, &_res); 519 if (err < 0) 520 return err; 521 if (_res != (long)_res) 522 return -ERANGE; 523 *res = _res; 524 return err; 525 } 526 527 const struct bpf_func_proto bpf_strtol_proto = { 528 .func = bpf_strtol, 529 .gpl_only = false, 530 .ret_type = RET_INTEGER, 531 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 532 .arg2_type = ARG_CONST_SIZE, 533 .arg3_type = ARG_ANYTHING, 534 .arg4_type = ARG_PTR_TO_LONG, 535 }; 536 537 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, 538 unsigned long *, res) 539 { 540 unsigned long long _res; 541 bool is_negative; 542 int err; 543 544 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 545 if (err < 0) 546 return err; 547 if (is_negative) 548 return -EINVAL; 549 if (_res != (unsigned long)_res) 550 return -ERANGE; 551 *res = _res; 552 return err; 553 } 554 555 const struct bpf_func_proto bpf_strtoul_proto = { 556 .func = bpf_strtoul, 557 .gpl_only = false, 558 .ret_type = RET_INTEGER, 559 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 560 .arg2_type = ARG_CONST_SIZE, 561 .arg3_type = ARG_ANYTHING, 562 .arg4_type = ARG_PTR_TO_LONG, 563 }; 564 565 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) 566 { 567 return strncmp(s1, s2, s1_sz); 568 } 569 570 static const struct bpf_func_proto bpf_strncmp_proto = { 571 .func = bpf_strncmp, 572 .gpl_only = false, 573 .ret_type = RET_INTEGER, 574 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 575 .arg2_type = ARG_CONST_SIZE, 576 .arg3_type = ARG_PTR_TO_CONST_STR, 577 }; 578 579 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino, 580 struct bpf_pidns_info *, nsdata, u32, size) 581 { 582 struct task_struct *task = current; 583 struct pid_namespace *pidns; 584 int err = -EINVAL; 585 586 if (unlikely(size != sizeof(struct bpf_pidns_info))) 587 goto clear; 588 589 if (unlikely((u64)(dev_t)dev != dev)) 590 goto clear; 591 592 if (unlikely(!task)) 593 goto clear; 594 595 pidns = task_active_pid_ns(task); 596 if (unlikely(!pidns)) { 597 err = -ENOENT; 598 goto clear; 599 } 600 601 if (!ns_match(&pidns->ns, (dev_t)dev, ino)) 602 goto clear; 603 604 nsdata->pid = task_pid_nr_ns(task, pidns); 605 nsdata->tgid = task_tgid_nr_ns(task, pidns); 606 return 0; 607 clear: 608 memset((void *)nsdata, 0, (size_t) size); 609 return err; 610 } 611 612 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = { 613 .func = bpf_get_ns_current_pid_tgid, 614 .gpl_only = false, 615 .ret_type = RET_INTEGER, 616 .arg1_type = ARG_ANYTHING, 617 .arg2_type = ARG_ANYTHING, 618 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 619 .arg4_type = ARG_CONST_SIZE, 620 }; 621 622 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { 623 .func = bpf_get_raw_cpu_id, 624 .gpl_only = false, 625 .ret_type = RET_INTEGER, 626 }; 627 628 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, 629 u64, flags, void *, data, u64, size) 630 { 631 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 632 return -EINVAL; 633 634 return bpf_event_output(map, flags, data, size, NULL, 0, NULL); 635 } 636 637 const struct bpf_func_proto bpf_event_output_data_proto = { 638 .func = bpf_event_output_data, 639 .gpl_only = true, 640 .ret_type = RET_INTEGER, 641 .arg1_type = ARG_PTR_TO_CTX, 642 .arg2_type = ARG_CONST_MAP_PTR, 643 .arg3_type = ARG_ANYTHING, 644 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 645 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 646 }; 647 648 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size, 649 const void __user *, user_ptr) 650 { 651 int ret = copy_from_user(dst, user_ptr, size); 652 653 if (unlikely(ret)) { 654 memset(dst, 0, size); 655 ret = -EFAULT; 656 } 657 658 return ret; 659 } 660 661 const struct bpf_func_proto bpf_copy_from_user_proto = { 662 .func = bpf_copy_from_user, 663 .gpl_only = false, 664 .might_sleep = true, 665 .ret_type = RET_INTEGER, 666 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 667 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 668 .arg3_type = ARG_ANYTHING, 669 }; 670 671 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size, 672 const void __user *, user_ptr, struct task_struct *, tsk, u64, flags) 673 { 674 int ret; 675 676 /* flags is not used yet */ 677 if (unlikely(flags)) 678 return -EINVAL; 679 680 if (unlikely(!size)) 681 return 0; 682 683 ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0); 684 if (ret == size) 685 return 0; 686 687 memset(dst, 0, size); 688 /* Return -EFAULT for partial read */ 689 return ret < 0 ? ret : -EFAULT; 690 } 691 692 const struct bpf_func_proto bpf_copy_from_user_task_proto = { 693 .func = bpf_copy_from_user_task, 694 .gpl_only = true, 695 .might_sleep = true, 696 .ret_type = RET_INTEGER, 697 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 698 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 699 .arg3_type = ARG_ANYTHING, 700 .arg4_type = ARG_PTR_TO_BTF_ID, 701 .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 702 .arg5_type = ARG_ANYTHING 703 }; 704 705 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) 706 { 707 if (cpu >= nr_cpu_ids) 708 return (unsigned long)NULL; 709 710 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu); 711 } 712 713 const struct bpf_func_proto bpf_per_cpu_ptr_proto = { 714 .func = bpf_per_cpu_ptr, 715 .gpl_only = false, 716 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY, 717 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 718 .arg2_type = ARG_ANYTHING, 719 }; 720 721 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr) 722 { 723 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr); 724 } 725 726 const struct bpf_func_proto bpf_this_cpu_ptr_proto = { 727 .func = bpf_this_cpu_ptr, 728 .gpl_only = false, 729 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY, 730 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 731 }; 732 733 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, 734 size_t bufsz) 735 { 736 void __user *user_ptr = (__force void __user *)unsafe_ptr; 737 738 buf[0] = 0; 739 740 switch (fmt_ptype) { 741 case 's': 742 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 743 if ((unsigned long)unsafe_ptr < TASK_SIZE) 744 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 745 fallthrough; 746 #endif 747 case 'k': 748 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz); 749 case 'u': 750 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 751 } 752 753 return -EINVAL; 754 } 755 756 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary 757 * arguments representation. 758 */ 759 #define MAX_BPRINTF_BIN_ARGS 512 760 761 /* Support executing three nested bprintf helper calls on a given CPU */ 762 #define MAX_BPRINTF_NEST_LEVEL 3 763 struct bpf_bprintf_buffers { 764 char bin_args[MAX_BPRINTF_BIN_ARGS]; 765 char buf[MAX_BPRINTF_BUF]; 766 }; 767 768 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs); 769 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level); 770 771 static int try_get_buffers(struct bpf_bprintf_buffers **bufs) 772 { 773 int nest_level; 774 775 preempt_disable(); 776 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level); 777 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) { 778 this_cpu_dec(bpf_bprintf_nest_level); 779 preempt_enable(); 780 return -EBUSY; 781 } 782 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); 783 784 return 0; 785 } 786 787 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data) 788 { 789 if (!data->bin_args && !data->buf) 790 return; 791 if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0)) 792 return; 793 this_cpu_dec(bpf_bprintf_nest_level); 794 preempt_enable(); 795 } 796 797 /* 798 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers 799 * 800 * Returns a negative value if fmt is an invalid format string or 0 otherwise. 801 * 802 * This can be used in two ways: 803 * - Format string verification only: when data->get_bin_args is false 804 * - Arguments preparation: in addition to the above verification, it writes in 805 * data->bin_args a binary representation of arguments usable by bstr_printf 806 * where pointers from BPF have been sanitized. 807 * 808 * In argument preparation mode, if 0 is returned, safe temporary buffers are 809 * allocated and bpf_bprintf_cleanup should be called to free them after use. 810 */ 811 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, 812 u32 num_args, struct bpf_bprintf_data *data) 813 { 814 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; 815 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end; 816 struct bpf_bprintf_buffers *buffers = NULL; 817 size_t sizeof_cur_arg, sizeof_cur_ip; 818 int err, i, num_spec = 0; 819 u64 cur_arg; 820 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX"; 821 822 fmt_end = strnchr(fmt, fmt_size, 0); 823 if (!fmt_end) 824 return -EINVAL; 825 fmt_size = fmt_end - fmt; 826 827 if (get_buffers && try_get_buffers(&buffers)) 828 return -EBUSY; 829 830 if (data->get_bin_args) { 831 if (num_args) 832 tmp_buf = buffers->bin_args; 833 tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS; 834 data->bin_args = (u32 *)tmp_buf; 835 } 836 837 if (data->get_buf) 838 data->buf = buffers->buf; 839 840 for (i = 0; i < fmt_size; i++) { 841 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { 842 err = -EINVAL; 843 goto out; 844 } 845 846 if (fmt[i] != '%') 847 continue; 848 849 if (fmt[i + 1] == '%') { 850 i++; 851 continue; 852 } 853 854 if (num_spec >= num_args) { 855 err = -EINVAL; 856 goto out; 857 } 858 859 /* The string is zero-terminated so if fmt[i] != 0, we can 860 * always access fmt[i + 1], in the worst case it will be a 0 861 */ 862 i++; 863 864 /* skip optional "[0 +-][num]" width formatting field */ 865 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || 866 fmt[i] == ' ') 867 i++; 868 if (fmt[i] >= '1' && fmt[i] <= '9') { 869 i++; 870 while (fmt[i] >= '0' && fmt[i] <= '9') 871 i++; 872 } 873 874 if (fmt[i] == 'p') { 875 sizeof_cur_arg = sizeof(long); 876 877 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') && 878 fmt[i + 2] == 's') { 879 fmt_ptype = fmt[i + 1]; 880 i += 2; 881 goto fmt_str; 882 } 883 884 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) || 885 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' || 886 fmt[i + 1] == 'x' || fmt[i + 1] == 's' || 887 fmt[i + 1] == 'S') { 888 /* just kernel pointers */ 889 if (tmp_buf) 890 cur_arg = raw_args[num_spec]; 891 i++; 892 goto nocopy_fmt; 893 } 894 895 if (fmt[i + 1] == 'B') { 896 if (tmp_buf) { 897 err = snprintf(tmp_buf, 898 (tmp_buf_end - tmp_buf), 899 "%pB", 900 (void *)(long)raw_args[num_spec]); 901 tmp_buf += (err + 1); 902 } 903 904 i++; 905 num_spec++; 906 continue; 907 } 908 909 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ 910 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') || 911 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) { 912 err = -EINVAL; 913 goto out; 914 } 915 916 i += 2; 917 if (!tmp_buf) 918 goto nocopy_fmt; 919 920 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16; 921 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { 922 err = -ENOSPC; 923 goto out; 924 } 925 926 unsafe_ptr = (char *)(long)raw_args[num_spec]; 927 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr, 928 sizeof_cur_ip); 929 if (err < 0) 930 memset(cur_ip, 0, sizeof_cur_ip); 931 932 /* hack: bstr_printf expects IP addresses to be 933 * pre-formatted as strings, ironically, the easiest way 934 * to do that is to call snprintf. 935 */ 936 ip_spec[2] = fmt[i - 1]; 937 ip_spec[3] = fmt[i]; 938 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf, 939 ip_spec, &cur_ip); 940 941 tmp_buf += err + 1; 942 num_spec++; 943 944 continue; 945 } else if (fmt[i] == 's') { 946 fmt_ptype = fmt[i]; 947 fmt_str: 948 if (fmt[i + 1] != 0 && 949 !isspace(fmt[i + 1]) && 950 !ispunct(fmt[i + 1])) { 951 err = -EINVAL; 952 goto out; 953 } 954 955 if (!tmp_buf) 956 goto nocopy_fmt; 957 958 if (tmp_buf_end == tmp_buf) { 959 err = -ENOSPC; 960 goto out; 961 } 962 963 unsafe_ptr = (char *)(long)raw_args[num_spec]; 964 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr, 965 fmt_ptype, 966 tmp_buf_end - tmp_buf); 967 if (err < 0) { 968 tmp_buf[0] = '\0'; 969 err = 1; 970 } 971 972 tmp_buf += err; 973 num_spec++; 974 975 continue; 976 } else if (fmt[i] == 'c') { 977 if (!tmp_buf) 978 goto nocopy_fmt; 979 980 if (tmp_buf_end == tmp_buf) { 981 err = -ENOSPC; 982 goto out; 983 } 984 985 *tmp_buf = raw_args[num_spec]; 986 tmp_buf++; 987 num_spec++; 988 989 continue; 990 } 991 992 sizeof_cur_arg = sizeof(int); 993 994 if (fmt[i] == 'l') { 995 sizeof_cur_arg = sizeof(long); 996 i++; 997 } 998 if (fmt[i] == 'l') { 999 sizeof_cur_arg = sizeof(long long); 1000 i++; 1001 } 1002 1003 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' && 1004 fmt[i] != 'x' && fmt[i] != 'X') { 1005 err = -EINVAL; 1006 goto out; 1007 } 1008 1009 if (tmp_buf) 1010 cur_arg = raw_args[num_spec]; 1011 nocopy_fmt: 1012 if (tmp_buf) { 1013 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32)); 1014 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { 1015 err = -ENOSPC; 1016 goto out; 1017 } 1018 1019 if (sizeof_cur_arg == 8) { 1020 *(u32 *)tmp_buf = *(u32 *)&cur_arg; 1021 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1); 1022 } else { 1023 *(u32 *)tmp_buf = (u32)(long)cur_arg; 1024 } 1025 tmp_buf += sizeof_cur_arg; 1026 } 1027 num_spec++; 1028 } 1029 1030 err = 0; 1031 out: 1032 if (err) 1033 bpf_bprintf_cleanup(data); 1034 return err; 1035 } 1036 1037 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, 1038 const void *, args, u32, data_len) 1039 { 1040 struct bpf_bprintf_data data = { 1041 .get_bin_args = true, 1042 }; 1043 int err, num_args; 1044 1045 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 || 1046 (data_len && !args)) 1047 return -EINVAL; 1048 num_args = data_len / 8; 1049 1050 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we 1051 * can safely give an unbounded size. 1052 */ 1053 err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data); 1054 if (err < 0) 1055 return err; 1056 1057 err = bstr_printf(str, str_size, fmt, data.bin_args); 1058 1059 bpf_bprintf_cleanup(&data); 1060 1061 return err + 1; 1062 } 1063 1064 const struct bpf_func_proto bpf_snprintf_proto = { 1065 .func = bpf_snprintf, 1066 .gpl_only = true, 1067 .ret_type = RET_INTEGER, 1068 .arg1_type = ARG_PTR_TO_MEM_OR_NULL, 1069 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1070 .arg3_type = ARG_PTR_TO_CONST_STR, 1071 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 1072 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1073 }; 1074 1075 /* BPF map elements can contain 'struct bpf_timer'. 1076 * Such map owns all of its BPF timers. 1077 * 'struct bpf_timer' is allocated as part of map element allocation 1078 * and it's zero initialized. 1079 * That space is used to keep 'struct bpf_timer_kern'. 1080 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and 1081 * remembers 'struct bpf_map *' pointer it's part of. 1082 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn. 1083 * bpf_timer_start() arms the timer. 1084 * If user space reference to a map goes to zero at this point 1085 * ops->map_release_uref callback is responsible for cancelling the timers, 1086 * freeing their memory, and decrementing prog's refcnts. 1087 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt. 1088 * Inner maps can contain bpf timers as well. ops->map_release_uref is 1089 * freeing the timers when inner map is replaced or deleted by user space. 1090 */ 1091 struct bpf_hrtimer { 1092 struct hrtimer timer; 1093 struct bpf_map *map; 1094 struct bpf_prog *prog; 1095 void __rcu *callback_fn; 1096 void *value; 1097 }; 1098 1099 /* the actual struct hidden inside uapi struct bpf_timer */ 1100 struct bpf_timer_kern { 1101 struct bpf_hrtimer *timer; 1102 /* bpf_spin_lock is used here instead of spinlock_t to make 1103 * sure that it always fits into space reserved by struct bpf_timer 1104 * regardless of LOCKDEP and spinlock debug flags. 1105 */ 1106 struct bpf_spin_lock lock; 1107 } __attribute__((aligned(8))); 1108 1109 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); 1110 1111 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) 1112 { 1113 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); 1114 struct bpf_map *map = t->map; 1115 void *value = t->value; 1116 bpf_callback_t callback_fn; 1117 void *key; 1118 u32 idx; 1119 1120 BTF_TYPE_EMIT(struct bpf_timer); 1121 callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held()); 1122 if (!callback_fn) 1123 goto out; 1124 1125 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and 1126 * cannot be preempted by another bpf_timer_cb() on the same cpu. 1127 * Remember the timer this callback is servicing to prevent 1128 * deadlock if callback_fn() calls bpf_timer_cancel() or 1129 * bpf_map_delete_elem() on the same timer. 1130 */ 1131 this_cpu_write(hrtimer_running, t); 1132 if (map->map_type == BPF_MAP_TYPE_ARRAY) { 1133 struct bpf_array *array = container_of(map, struct bpf_array, map); 1134 1135 /* compute the key */ 1136 idx = ((char *)value - array->value) / array->elem_size; 1137 key = &idx; 1138 } else { /* hash or lru */ 1139 key = value - round_up(map->key_size, 8); 1140 } 1141 1142 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); 1143 /* The verifier checked that return value is zero. */ 1144 1145 this_cpu_write(hrtimer_running, NULL); 1146 out: 1147 return HRTIMER_NORESTART; 1148 } 1149 1150 BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map, 1151 u64, flags) 1152 { 1153 clockid_t clockid = flags & (MAX_CLOCKS - 1); 1154 struct bpf_hrtimer *t; 1155 int ret = 0; 1156 1157 BUILD_BUG_ON(MAX_CLOCKS != 16); 1158 BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer)); 1159 BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer)); 1160 1161 if (in_nmi()) 1162 return -EOPNOTSUPP; 1163 1164 if (flags >= MAX_CLOCKS || 1165 /* similar to timerfd except _ALARM variants are not supported */ 1166 (clockid != CLOCK_MONOTONIC && 1167 clockid != CLOCK_REALTIME && 1168 clockid != CLOCK_BOOTTIME)) 1169 return -EINVAL; 1170 __bpf_spin_lock_irqsave(&timer->lock); 1171 t = timer->timer; 1172 if (t) { 1173 ret = -EBUSY; 1174 goto out; 1175 } 1176 if (!atomic64_read(&map->usercnt)) { 1177 /* maps with timers must be either held by user space 1178 * or pinned in bpffs. 1179 */ 1180 ret = -EPERM; 1181 goto out; 1182 } 1183 /* allocate hrtimer via map_kmalloc to use memcg accounting */ 1184 t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node); 1185 if (!t) { 1186 ret = -ENOMEM; 1187 goto out; 1188 } 1189 t->value = (void *)timer - map->record->timer_off; 1190 t->map = map; 1191 t->prog = NULL; 1192 rcu_assign_pointer(t->callback_fn, NULL); 1193 hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); 1194 t->timer.function = bpf_timer_cb; 1195 timer->timer = t; 1196 out: 1197 __bpf_spin_unlock_irqrestore(&timer->lock); 1198 return ret; 1199 } 1200 1201 static const struct bpf_func_proto bpf_timer_init_proto = { 1202 .func = bpf_timer_init, 1203 .gpl_only = true, 1204 .ret_type = RET_INTEGER, 1205 .arg1_type = ARG_PTR_TO_TIMER, 1206 .arg2_type = ARG_CONST_MAP_PTR, 1207 .arg3_type = ARG_ANYTHING, 1208 }; 1209 1210 BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn, 1211 struct bpf_prog_aux *, aux) 1212 { 1213 struct bpf_prog *prev, *prog = aux->prog; 1214 struct bpf_hrtimer *t; 1215 int ret = 0; 1216 1217 if (in_nmi()) 1218 return -EOPNOTSUPP; 1219 __bpf_spin_lock_irqsave(&timer->lock); 1220 t = timer->timer; 1221 if (!t) { 1222 ret = -EINVAL; 1223 goto out; 1224 } 1225 if (!atomic64_read(&t->map->usercnt)) { 1226 /* maps with timers must be either held by user space 1227 * or pinned in bpffs. Otherwise timer might still be 1228 * running even when bpf prog is detached and user space 1229 * is gone, since map_release_uref won't ever be called. 1230 */ 1231 ret = -EPERM; 1232 goto out; 1233 } 1234 prev = t->prog; 1235 if (prev != prog) { 1236 /* Bump prog refcnt once. Every bpf_timer_set_callback() 1237 * can pick different callback_fn-s within the same prog. 1238 */ 1239 prog = bpf_prog_inc_not_zero(prog); 1240 if (IS_ERR(prog)) { 1241 ret = PTR_ERR(prog); 1242 goto out; 1243 } 1244 if (prev) 1245 /* Drop prev prog refcnt when swapping with new prog */ 1246 bpf_prog_put(prev); 1247 t->prog = prog; 1248 } 1249 rcu_assign_pointer(t->callback_fn, callback_fn); 1250 out: 1251 __bpf_spin_unlock_irqrestore(&timer->lock); 1252 return ret; 1253 } 1254 1255 static const struct bpf_func_proto bpf_timer_set_callback_proto = { 1256 .func = bpf_timer_set_callback, 1257 .gpl_only = true, 1258 .ret_type = RET_INTEGER, 1259 .arg1_type = ARG_PTR_TO_TIMER, 1260 .arg2_type = ARG_PTR_TO_FUNC, 1261 }; 1262 1263 BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags) 1264 { 1265 struct bpf_hrtimer *t; 1266 int ret = 0; 1267 enum hrtimer_mode mode; 1268 1269 if (in_nmi()) 1270 return -EOPNOTSUPP; 1271 if (flags > BPF_F_TIMER_ABS) 1272 return -EINVAL; 1273 __bpf_spin_lock_irqsave(&timer->lock); 1274 t = timer->timer; 1275 if (!t || !t->prog) { 1276 ret = -EINVAL; 1277 goto out; 1278 } 1279 1280 if (flags & BPF_F_TIMER_ABS) 1281 mode = HRTIMER_MODE_ABS_SOFT; 1282 else 1283 mode = HRTIMER_MODE_REL_SOFT; 1284 1285 hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); 1286 out: 1287 __bpf_spin_unlock_irqrestore(&timer->lock); 1288 return ret; 1289 } 1290 1291 static const struct bpf_func_proto bpf_timer_start_proto = { 1292 .func = bpf_timer_start, 1293 .gpl_only = true, 1294 .ret_type = RET_INTEGER, 1295 .arg1_type = ARG_PTR_TO_TIMER, 1296 .arg2_type = ARG_ANYTHING, 1297 .arg3_type = ARG_ANYTHING, 1298 }; 1299 1300 static void drop_prog_refcnt(struct bpf_hrtimer *t) 1301 { 1302 struct bpf_prog *prog = t->prog; 1303 1304 if (prog) { 1305 bpf_prog_put(prog); 1306 t->prog = NULL; 1307 rcu_assign_pointer(t->callback_fn, NULL); 1308 } 1309 } 1310 1311 BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer) 1312 { 1313 struct bpf_hrtimer *t; 1314 int ret = 0; 1315 1316 if (in_nmi()) 1317 return -EOPNOTSUPP; 1318 __bpf_spin_lock_irqsave(&timer->lock); 1319 t = timer->timer; 1320 if (!t) { 1321 ret = -EINVAL; 1322 goto out; 1323 } 1324 if (this_cpu_read(hrtimer_running) == t) { 1325 /* If bpf callback_fn is trying to bpf_timer_cancel() 1326 * its own timer the hrtimer_cancel() will deadlock 1327 * since it waits for callback_fn to finish 1328 */ 1329 ret = -EDEADLK; 1330 goto out; 1331 } 1332 drop_prog_refcnt(t); 1333 out: 1334 __bpf_spin_unlock_irqrestore(&timer->lock); 1335 /* Cancel the timer and wait for associated callback to finish 1336 * if it was running. 1337 */ 1338 ret = ret ?: hrtimer_cancel(&t->timer); 1339 return ret; 1340 } 1341 1342 static const struct bpf_func_proto bpf_timer_cancel_proto = { 1343 .func = bpf_timer_cancel, 1344 .gpl_only = true, 1345 .ret_type = RET_INTEGER, 1346 .arg1_type = ARG_PTR_TO_TIMER, 1347 }; 1348 1349 /* This function is called by map_delete/update_elem for individual element and 1350 * by ops->map_release_uref when the user space reference to a map reaches zero. 1351 */ 1352 void bpf_timer_cancel_and_free(void *val) 1353 { 1354 struct bpf_timer_kern *timer = val; 1355 struct bpf_hrtimer *t; 1356 1357 /* Performance optimization: read timer->timer without lock first. */ 1358 if (!READ_ONCE(timer->timer)) 1359 return; 1360 1361 __bpf_spin_lock_irqsave(&timer->lock); 1362 /* re-read it under lock */ 1363 t = timer->timer; 1364 if (!t) 1365 goto out; 1366 drop_prog_refcnt(t); 1367 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use 1368 * this timer, since it won't be initialized. 1369 */ 1370 timer->timer = NULL; 1371 out: 1372 __bpf_spin_unlock_irqrestore(&timer->lock); 1373 if (!t) 1374 return; 1375 /* Cancel the timer and wait for callback to complete if it was running. 1376 * If hrtimer_cancel() can be safely called it's safe to call kfree(t) 1377 * right after for both preallocated and non-preallocated maps. 1378 * The timer->timer = NULL was already done and no code path can 1379 * see address 't' anymore. 1380 * 1381 * Check that bpf_map_delete/update_elem() wasn't called from timer 1382 * callback_fn. In such case don't call hrtimer_cancel() (since it will 1383 * deadlock) and don't call hrtimer_try_to_cancel() (since it will just 1384 * return -1). Though callback_fn is still running on this cpu it's 1385 * safe to do kfree(t) because bpf_timer_cb() read everything it needed 1386 * from 't'. The bpf subprog callback_fn won't be able to access 't', 1387 * since timer->timer = NULL was already done. The timer will be 1388 * effectively cancelled because bpf_timer_cb() will return 1389 * HRTIMER_NORESTART. 1390 */ 1391 if (this_cpu_read(hrtimer_running) != t) 1392 hrtimer_cancel(&t->timer); 1393 kfree(t); 1394 } 1395 1396 BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) 1397 { 1398 unsigned long *kptr = map_value; 1399 1400 return xchg(kptr, (unsigned long)ptr); 1401 } 1402 1403 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg() 1404 * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to 1405 * denote type that verifier will determine. 1406 */ 1407 static const struct bpf_func_proto bpf_kptr_xchg_proto = { 1408 .func = bpf_kptr_xchg, 1409 .gpl_only = false, 1410 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 1411 .ret_btf_id = BPF_PTR_POISON, 1412 .arg1_type = ARG_PTR_TO_KPTR, 1413 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE, 1414 .arg2_btf_id = BPF_PTR_POISON, 1415 }; 1416 1417 /* Since the upper 8 bits of dynptr->size is reserved, the 1418 * maximum supported size is 2^24 - 1. 1419 */ 1420 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1) 1421 #define DYNPTR_TYPE_SHIFT 28 1422 #define DYNPTR_SIZE_MASK 0xFFFFFF 1423 #define DYNPTR_RDONLY_BIT BIT(31) 1424 1425 static bool bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr) 1426 { 1427 return ptr->size & DYNPTR_RDONLY_BIT; 1428 } 1429 1430 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) 1431 { 1432 ptr->size |= DYNPTR_RDONLY_BIT; 1433 } 1434 1435 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) 1436 { 1437 ptr->size |= type << DYNPTR_TYPE_SHIFT; 1438 } 1439 1440 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr) 1441 { 1442 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT; 1443 } 1444 1445 u32 bpf_dynptr_get_size(const struct bpf_dynptr_kern *ptr) 1446 { 1447 return ptr->size & DYNPTR_SIZE_MASK; 1448 } 1449 1450 int bpf_dynptr_check_size(u32 size) 1451 { 1452 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; 1453 } 1454 1455 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, 1456 enum bpf_dynptr_type type, u32 offset, u32 size) 1457 { 1458 ptr->data = data; 1459 ptr->offset = offset; 1460 ptr->size = size; 1461 bpf_dynptr_set_type(ptr, type); 1462 } 1463 1464 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) 1465 { 1466 memset(ptr, 0, sizeof(*ptr)); 1467 } 1468 1469 static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len) 1470 { 1471 u32 size = bpf_dynptr_get_size(ptr); 1472 1473 if (len > size || offset > size - len) 1474 return -E2BIG; 1475 1476 return 0; 1477 } 1478 1479 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) 1480 { 1481 int err; 1482 1483 BTF_TYPE_EMIT(struct bpf_dynptr); 1484 1485 err = bpf_dynptr_check_size(size); 1486 if (err) 1487 goto error; 1488 1489 /* flags is currently unsupported */ 1490 if (flags) { 1491 err = -EINVAL; 1492 goto error; 1493 } 1494 1495 bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size); 1496 1497 return 0; 1498 1499 error: 1500 bpf_dynptr_set_null(ptr); 1501 return err; 1502 } 1503 1504 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = { 1505 .func = bpf_dynptr_from_mem, 1506 .gpl_only = false, 1507 .ret_type = RET_INTEGER, 1508 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1509 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1510 .arg3_type = ARG_ANYTHING, 1511 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT, 1512 }; 1513 1514 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src, 1515 u32, offset, u64, flags) 1516 { 1517 enum bpf_dynptr_type type; 1518 int err; 1519 1520 if (!src->data || flags) 1521 return -EINVAL; 1522 1523 err = bpf_dynptr_check_off_len(src, offset, len); 1524 if (err) 1525 return err; 1526 1527 type = bpf_dynptr_get_type(src); 1528 1529 switch (type) { 1530 case BPF_DYNPTR_TYPE_LOCAL: 1531 case BPF_DYNPTR_TYPE_RINGBUF: 1532 /* Source and destination may possibly overlap, hence use memmove to 1533 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr 1534 * pointing to overlapping PTR_TO_MAP_VALUE regions. 1535 */ 1536 memmove(dst, src->data + src->offset + offset, len); 1537 return 0; 1538 case BPF_DYNPTR_TYPE_SKB: 1539 return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len); 1540 case BPF_DYNPTR_TYPE_XDP: 1541 return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len); 1542 default: 1543 WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type); 1544 return -EFAULT; 1545 } 1546 } 1547 1548 static const struct bpf_func_proto bpf_dynptr_read_proto = { 1549 .func = bpf_dynptr_read, 1550 .gpl_only = false, 1551 .ret_type = RET_INTEGER, 1552 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1553 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1554 .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1555 .arg4_type = ARG_ANYTHING, 1556 .arg5_type = ARG_ANYTHING, 1557 }; 1558 1559 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src, 1560 u32, len, u64, flags) 1561 { 1562 enum bpf_dynptr_type type; 1563 int err; 1564 1565 if (!dst->data || bpf_dynptr_is_rdonly(dst)) 1566 return -EINVAL; 1567 1568 err = bpf_dynptr_check_off_len(dst, offset, len); 1569 if (err) 1570 return err; 1571 1572 type = bpf_dynptr_get_type(dst); 1573 1574 switch (type) { 1575 case BPF_DYNPTR_TYPE_LOCAL: 1576 case BPF_DYNPTR_TYPE_RINGBUF: 1577 if (flags) 1578 return -EINVAL; 1579 /* Source and destination may possibly overlap, hence use memmove to 1580 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr 1581 * pointing to overlapping PTR_TO_MAP_VALUE regions. 1582 */ 1583 memmove(dst->data + dst->offset + offset, src, len); 1584 return 0; 1585 case BPF_DYNPTR_TYPE_SKB: 1586 return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len, 1587 flags); 1588 case BPF_DYNPTR_TYPE_XDP: 1589 if (flags) 1590 return -EINVAL; 1591 return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len); 1592 default: 1593 WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type); 1594 return -EFAULT; 1595 } 1596 } 1597 1598 static const struct bpf_func_proto bpf_dynptr_write_proto = { 1599 .func = bpf_dynptr_write, 1600 .gpl_only = false, 1601 .ret_type = RET_INTEGER, 1602 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1603 .arg2_type = ARG_ANYTHING, 1604 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1605 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 1606 .arg5_type = ARG_ANYTHING, 1607 }; 1608 1609 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len) 1610 { 1611 enum bpf_dynptr_type type; 1612 int err; 1613 1614 if (!ptr->data) 1615 return 0; 1616 1617 err = bpf_dynptr_check_off_len(ptr, offset, len); 1618 if (err) 1619 return 0; 1620 1621 if (bpf_dynptr_is_rdonly(ptr)) 1622 return 0; 1623 1624 type = bpf_dynptr_get_type(ptr); 1625 1626 switch (type) { 1627 case BPF_DYNPTR_TYPE_LOCAL: 1628 case BPF_DYNPTR_TYPE_RINGBUF: 1629 return (unsigned long)(ptr->data + ptr->offset + offset); 1630 case BPF_DYNPTR_TYPE_SKB: 1631 case BPF_DYNPTR_TYPE_XDP: 1632 /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */ 1633 return 0; 1634 default: 1635 WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type); 1636 return 0; 1637 } 1638 } 1639 1640 static const struct bpf_func_proto bpf_dynptr_data_proto = { 1641 .func = bpf_dynptr_data, 1642 .gpl_only = false, 1643 .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL, 1644 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1645 .arg2_type = ARG_ANYTHING, 1646 .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO, 1647 }; 1648 1649 const struct bpf_func_proto bpf_get_current_task_proto __weak; 1650 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak; 1651 const struct bpf_func_proto bpf_probe_read_user_proto __weak; 1652 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; 1653 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak; 1654 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; 1655 const struct bpf_func_proto bpf_task_pt_regs_proto __weak; 1656 1657 const struct bpf_func_proto * 1658 bpf_base_func_proto(enum bpf_func_id func_id) 1659 { 1660 switch (func_id) { 1661 case BPF_FUNC_map_lookup_elem: 1662 return &bpf_map_lookup_elem_proto; 1663 case BPF_FUNC_map_update_elem: 1664 return &bpf_map_update_elem_proto; 1665 case BPF_FUNC_map_delete_elem: 1666 return &bpf_map_delete_elem_proto; 1667 case BPF_FUNC_map_push_elem: 1668 return &bpf_map_push_elem_proto; 1669 case BPF_FUNC_map_pop_elem: 1670 return &bpf_map_pop_elem_proto; 1671 case BPF_FUNC_map_peek_elem: 1672 return &bpf_map_peek_elem_proto; 1673 case BPF_FUNC_map_lookup_percpu_elem: 1674 return &bpf_map_lookup_percpu_elem_proto; 1675 case BPF_FUNC_get_prandom_u32: 1676 return &bpf_get_prandom_u32_proto; 1677 case BPF_FUNC_get_smp_processor_id: 1678 return &bpf_get_raw_smp_processor_id_proto; 1679 case BPF_FUNC_get_numa_node_id: 1680 return &bpf_get_numa_node_id_proto; 1681 case BPF_FUNC_tail_call: 1682 return &bpf_tail_call_proto; 1683 case BPF_FUNC_ktime_get_ns: 1684 return &bpf_ktime_get_ns_proto; 1685 case BPF_FUNC_ktime_get_boot_ns: 1686 return &bpf_ktime_get_boot_ns_proto; 1687 case BPF_FUNC_ktime_get_tai_ns: 1688 return &bpf_ktime_get_tai_ns_proto; 1689 case BPF_FUNC_ringbuf_output: 1690 return &bpf_ringbuf_output_proto; 1691 case BPF_FUNC_ringbuf_reserve: 1692 return &bpf_ringbuf_reserve_proto; 1693 case BPF_FUNC_ringbuf_submit: 1694 return &bpf_ringbuf_submit_proto; 1695 case BPF_FUNC_ringbuf_discard: 1696 return &bpf_ringbuf_discard_proto; 1697 case BPF_FUNC_ringbuf_query: 1698 return &bpf_ringbuf_query_proto; 1699 case BPF_FUNC_strncmp: 1700 return &bpf_strncmp_proto; 1701 case BPF_FUNC_strtol: 1702 return &bpf_strtol_proto; 1703 case BPF_FUNC_strtoul: 1704 return &bpf_strtoul_proto; 1705 default: 1706 break; 1707 } 1708 1709 if (!bpf_capable()) 1710 return NULL; 1711 1712 switch (func_id) { 1713 case BPF_FUNC_spin_lock: 1714 return &bpf_spin_lock_proto; 1715 case BPF_FUNC_spin_unlock: 1716 return &bpf_spin_unlock_proto; 1717 case BPF_FUNC_jiffies64: 1718 return &bpf_jiffies64_proto; 1719 case BPF_FUNC_per_cpu_ptr: 1720 return &bpf_per_cpu_ptr_proto; 1721 case BPF_FUNC_this_cpu_ptr: 1722 return &bpf_this_cpu_ptr_proto; 1723 case BPF_FUNC_timer_init: 1724 return &bpf_timer_init_proto; 1725 case BPF_FUNC_timer_set_callback: 1726 return &bpf_timer_set_callback_proto; 1727 case BPF_FUNC_timer_start: 1728 return &bpf_timer_start_proto; 1729 case BPF_FUNC_timer_cancel: 1730 return &bpf_timer_cancel_proto; 1731 case BPF_FUNC_kptr_xchg: 1732 return &bpf_kptr_xchg_proto; 1733 case BPF_FUNC_for_each_map_elem: 1734 return &bpf_for_each_map_elem_proto; 1735 case BPF_FUNC_loop: 1736 return &bpf_loop_proto; 1737 case BPF_FUNC_user_ringbuf_drain: 1738 return &bpf_user_ringbuf_drain_proto; 1739 case BPF_FUNC_ringbuf_reserve_dynptr: 1740 return &bpf_ringbuf_reserve_dynptr_proto; 1741 case BPF_FUNC_ringbuf_submit_dynptr: 1742 return &bpf_ringbuf_submit_dynptr_proto; 1743 case BPF_FUNC_ringbuf_discard_dynptr: 1744 return &bpf_ringbuf_discard_dynptr_proto; 1745 case BPF_FUNC_dynptr_from_mem: 1746 return &bpf_dynptr_from_mem_proto; 1747 case BPF_FUNC_dynptr_read: 1748 return &bpf_dynptr_read_proto; 1749 case BPF_FUNC_dynptr_write: 1750 return &bpf_dynptr_write_proto; 1751 case BPF_FUNC_dynptr_data: 1752 return &bpf_dynptr_data_proto; 1753 #ifdef CONFIG_CGROUPS 1754 case BPF_FUNC_cgrp_storage_get: 1755 return &bpf_cgrp_storage_get_proto; 1756 case BPF_FUNC_cgrp_storage_delete: 1757 return &bpf_cgrp_storage_delete_proto; 1758 case BPF_FUNC_get_current_cgroup_id: 1759 return &bpf_get_current_cgroup_id_proto; 1760 case BPF_FUNC_get_current_ancestor_cgroup_id: 1761 return &bpf_get_current_ancestor_cgroup_id_proto; 1762 #endif 1763 default: 1764 break; 1765 } 1766 1767 if (!perfmon_capable()) 1768 return NULL; 1769 1770 switch (func_id) { 1771 case BPF_FUNC_trace_printk: 1772 return bpf_get_trace_printk_proto(); 1773 case BPF_FUNC_get_current_task: 1774 return &bpf_get_current_task_proto; 1775 case BPF_FUNC_get_current_task_btf: 1776 return &bpf_get_current_task_btf_proto; 1777 case BPF_FUNC_probe_read_user: 1778 return &bpf_probe_read_user_proto; 1779 case BPF_FUNC_probe_read_kernel: 1780 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1781 NULL : &bpf_probe_read_kernel_proto; 1782 case BPF_FUNC_probe_read_user_str: 1783 return &bpf_probe_read_user_str_proto; 1784 case BPF_FUNC_probe_read_kernel_str: 1785 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1786 NULL : &bpf_probe_read_kernel_str_proto; 1787 case BPF_FUNC_snprintf_btf: 1788 return &bpf_snprintf_btf_proto; 1789 case BPF_FUNC_snprintf: 1790 return &bpf_snprintf_proto; 1791 case BPF_FUNC_task_pt_regs: 1792 return &bpf_task_pt_regs_proto; 1793 case BPF_FUNC_trace_vprintk: 1794 return bpf_get_trace_vprintk_proto(); 1795 default: 1796 return NULL; 1797 } 1798 } 1799 1800 void bpf_list_head_free(const struct btf_field *field, void *list_head, 1801 struct bpf_spin_lock *spin_lock) 1802 { 1803 struct list_head *head = list_head, *orig_head = list_head; 1804 1805 BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head)); 1806 BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head)); 1807 1808 /* Do the actual list draining outside the lock to not hold the lock for 1809 * too long, and also prevent deadlocks if tracing programs end up 1810 * executing on entry/exit of functions called inside the critical 1811 * section, and end up doing map ops that call bpf_list_head_free for 1812 * the same map value again. 1813 */ 1814 __bpf_spin_lock_irqsave(spin_lock); 1815 if (!head->next || list_empty(head)) 1816 goto unlock; 1817 head = head->next; 1818 unlock: 1819 INIT_LIST_HEAD(orig_head); 1820 __bpf_spin_unlock_irqrestore(spin_lock); 1821 1822 while (head != orig_head) { 1823 void *obj = head; 1824 1825 obj -= field->graph_root.node_offset; 1826 head = head->next; 1827 /* The contained type can also have resources, including a 1828 * bpf_list_head which needs to be freed. 1829 */ 1830 bpf_obj_free_fields(field->graph_root.value_rec, obj); 1831 /* bpf_mem_free requires migrate_disable(), since we can be 1832 * called from map free path as well apart from BPF program (as 1833 * part of map ops doing bpf_obj_free_fields). 1834 */ 1835 migrate_disable(); 1836 bpf_mem_free(&bpf_global_ma, obj); 1837 migrate_enable(); 1838 } 1839 } 1840 1841 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are 1842 * 'rb_node *', so field name of rb_node within containing struct is not 1843 * needed. 1844 * 1845 * Since bpf_rb_tree's node type has a corresponding struct btf_field with 1846 * graph_root.node_offset, it's not necessary to know field name 1847 * or type of node struct 1848 */ 1849 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \ 1850 for (pos = rb_first_postorder(root); \ 1851 pos && ({ n = rb_next_postorder(pos); 1; }); \ 1852 pos = n) 1853 1854 void bpf_rb_root_free(const struct btf_field *field, void *rb_root, 1855 struct bpf_spin_lock *spin_lock) 1856 { 1857 struct rb_root_cached orig_root, *root = rb_root; 1858 struct rb_node *pos, *n; 1859 void *obj; 1860 1861 BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root)); 1862 BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root)); 1863 1864 __bpf_spin_lock_irqsave(spin_lock); 1865 orig_root = *root; 1866 *root = RB_ROOT_CACHED; 1867 __bpf_spin_unlock_irqrestore(spin_lock); 1868 1869 bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) { 1870 obj = pos; 1871 obj -= field->graph_root.node_offset; 1872 1873 bpf_obj_free_fields(field->graph_root.value_rec, obj); 1874 1875 migrate_disable(); 1876 bpf_mem_free(&bpf_global_ma, obj); 1877 migrate_enable(); 1878 } 1879 } 1880 1881 __diag_push(); 1882 __diag_ignore_all("-Wmissing-prototypes", 1883 "Global functions as their definitions will be in vmlinux BTF"); 1884 1885 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) 1886 { 1887 struct btf_struct_meta *meta = meta__ign; 1888 u64 size = local_type_id__k; 1889 void *p; 1890 1891 p = bpf_mem_alloc(&bpf_global_ma, size); 1892 if (!p) 1893 return NULL; 1894 if (meta) 1895 bpf_obj_init(meta->field_offs, p); 1896 return p; 1897 } 1898 1899 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec) 1900 { 1901 if (rec) 1902 bpf_obj_free_fields(rec, p); 1903 bpf_mem_free(&bpf_global_ma, p); 1904 } 1905 1906 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) 1907 { 1908 struct btf_struct_meta *meta = meta__ign; 1909 void *p = p__alloc; 1910 1911 __bpf_obj_drop_impl(p, meta ? meta->record : NULL); 1912 } 1913 1914 static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *head, bool tail) 1915 { 1916 struct list_head *n = (void *)node, *h = (void *)head; 1917 1918 if (unlikely(!h->next)) 1919 INIT_LIST_HEAD(h); 1920 if (unlikely(!n->next)) 1921 INIT_LIST_HEAD(n); 1922 tail ? list_add_tail(n, h) : list_add(n, h); 1923 } 1924 1925 __bpf_kfunc void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) 1926 { 1927 return __bpf_list_add(node, head, false); 1928 } 1929 1930 __bpf_kfunc void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) 1931 { 1932 return __bpf_list_add(node, head, true); 1933 } 1934 1935 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail) 1936 { 1937 struct list_head *n, *h = (void *)head; 1938 1939 if (unlikely(!h->next)) 1940 INIT_LIST_HEAD(h); 1941 if (list_empty(h)) 1942 return NULL; 1943 n = tail ? h->prev : h->next; 1944 list_del_init(n); 1945 return (struct bpf_list_node *)n; 1946 } 1947 1948 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) 1949 { 1950 return __bpf_list_del(head, false); 1951 } 1952 1953 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) 1954 { 1955 return __bpf_list_del(head, true); 1956 } 1957 1958 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, 1959 struct bpf_rb_node *node) 1960 { 1961 struct rb_root_cached *r = (struct rb_root_cached *)root; 1962 struct rb_node *n = (struct rb_node *)node; 1963 1964 rb_erase_cached(n, r); 1965 RB_CLEAR_NODE(n); 1966 return (struct bpf_rb_node *)n; 1967 } 1968 1969 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF 1970 * program 1971 */ 1972 static void __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, 1973 void *less) 1974 { 1975 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; 1976 bpf_callback_t cb = (bpf_callback_t)less; 1977 struct rb_node *parent = NULL; 1978 bool leftmost = true; 1979 1980 while (*link) { 1981 parent = *link; 1982 if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) { 1983 link = &parent->rb_left; 1984 } else { 1985 link = &parent->rb_right; 1986 leftmost = false; 1987 } 1988 } 1989 1990 rb_link_node((struct rb_node *)node, parent, link); 1991 rb_insert_color_cached((struct rb_node *)node, 1992 (struct rb_root_cached *)root, leftmost); 1993 } 1994 1995 __bpf_kfunc void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, 1996 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)) 1997 { 1998 __bpf_rbtree_add(root, node, (void *)less); 1999 } 2000 2001 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) 2002 { 2003 struct rb_root_cached *r = (struct rb_root_cached *)root; 2004 2005 return (struct bpf_rb_node *)rb_first_cached(r); 2006 } 2007 2008 /** 2009 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this 2010 * kfunc which is not stored in a map as a kptr, must be released by calling 2011 * bpf_task_release(). 2012 * @p: The task on which a reference is being acquired. 2013 */ 2014 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p) 2015 { 2016 return get_task_struct(p); 2017 } 2018 2019 /** 2020 * bpf_task_acquire_not_zero - Acquire a reference to a rcu task object. A task 2021 * acquired by this kfunc which is not stored in a map as a kptr, must be 2022 * released by calling bpf_task_release(). 2023 * @p: The task on which a reference is being acquired. 2024 */ 2025 __bpf_kfunc struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p) 2026 { 2027 /* For the time being this function returns NULL, as it's not currently 2028 * possible to safely acquire a reference to a task with RCU protection 2029 * using get_task_struct() and put_task_struct(). This is due to the 2030 * slightly odd mechanics of p->rcu_users, and how task RCU protection 2031 * works. 2032 * 2033 * A struct task_struct is refcounted by two different refcount_t 2034 * fields: 2035 * 2036 * 1. p->usage: The "true" refcount field which tracks a task's 2037 * lifetime. The task is freed as soon as this 2038 * refcount drops to 0. 2039 * 2040 * 2. p->rcu_users: An "RCU users" refcount field which is statically 2041 * initialized to 2, and is co-located in a union with 2042 * a struct rcu_head field (p->rcu). p->rcu_users 2043 * essentially encapsulates a single p->usage 2044 * refcount, and when p->rcu_users goes to 0, an RCU 2045 * callback is scheduled on the struct rcu_head which 2046 * decrements the p->usage refcount. 2047 * 2048 * There are two important implications to this task refcounting logic 2049 * described above. The first is that 2050 * refcount_inc_not_zero(&p->rcu_users) cannot be used anywhere, as 2051 * after the refcount goes to 0, the RCU callback being scheduled will 2052 * cause the memory backing the refcount to again be nonzero due to the 2053 * fields sharing a union. The other is that we can't rely on RCU to 2054 * guarantee that a task is valid in a BPF program. This is because a 2055 * task could have already transitioned to being in the TASK_DEAD 2056 * state, had its rcu_users refcount go to 0, and its rcu callback 2057 * invoked in which it drops its single p->usage reference. At this 2058 * point the task will be freed as soon as the last p->usage reference 2059 * goes to 0, without waiting for another RCU gp to elapse. The only 2060 * way that a BPF program can guarantee that a task is valid is in this 2061 * scenario is to hold a p->usage refcount itself. 2062 * 2063 * Until we're able to resolve this issue, either by pulling 2064 * p->rcu_users and p->rcu out of the union, or by getting rid of 2065 * p->usage and just using p->rcu_users for refcounting, we'll just 2066 * return NULL here. 2067 */ 2068 return NULL; 2069 } 2070 2071 /** 2072 * bpf_task_kptr_get - Acquire a reference on a struct task_struct kptr. A task 2073 * kptr acquired by this kfunc which is not subsequently stored in a map, must 2074 * be released by calling bpf_task_release(). 2075 * @pp: A pointer to a task kptr on which a reference is being acquired. 2076 */ 2077 __bpf_kfunc struct task_struct *bpf_task_kptr_get(struct task_struct **pp) 2078 { 2079 /* We must return NULL here until we have clarity on how to properly 2080 * leverage RCU for ensuring a task's lifetime. See the comment above 2081 * in bpf_task_acquire_not_zero() for more details. 2082 */ 2083 return NULL; 2084 } 2085 2086 /** 2087 * bpf_task_release - Release the reference acquired on a task. 2088 * @p: The task on which a reference is being released. 2089 */ 2090 __bpf_kfunc void bpf_task_release(struct task_struct *p) 2091 { 2092 if (!p) 2093 return; 2094 2095 put_task_struct(p); 2096 } 2097 2098 #ifdef CONFIG_CGROUPS 2099 /** 2100 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by 2101 * this kfunc which is not stored in a map as a kptr, must be released by 2102 * calling bpf_cgroup_release(). 2103 * @cgrp: The cgroup on which a reference is being acquired. 2104 */ 2105 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) 2106 { 2107 cgroup_get(cgrp); 2108 return cgrp; 2109 } 2110 2111 /** 2112 * bpf_cgroup_kptr_get - Acquire a reference on a struct cgroup kptr. A cgroup 2113 * kptr acquired by this kfunc which is not subsequently stored in a map, must 2114 * be released by calling bpf_cgroup_release(). 2115 * @cgrpp: A pointer to a cgroup kptr on which a reference is being acquired. 2116 */ 2117 __bpf_kfunc struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp) 2118 { 2119 struct cgroup *cgrp; 2120 2121 rcu_read_lock(); 2122 /* Another context could remove the cgroup from the map and release it 2123 * at any time, including after we've done the lookup above. This is 2124 * safe because we're in an RCU read region, so the cgroup is 2125 * guaranteed to remain valid until at least the rcu_read_unlock() 2126 * below. 2127 */ 2128 cgrp = READ_ONCE(*cgrpp); 2129 2130 if (cgrp && !cgroup_tryget(cgrp)) 2131 /* If the cgroup had been removed from the map and freed as 2132 * described above, cgroup_tryget() will return false. The 2133 * cgroup will be freed at some point after the current RCU gp 2134 * has ended, so just return NULL to the user. 2135 */ 2136 cgrp = NULL; 2137 rcu_read_unlock(); 2138 2139 return cgrp; 2140 } 2141 2142 /** 2143 * bpf_cgroup_release - Release the reference acquired on a cgroup. 2144 * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to 2145 * not be freed until the current grace period has ended, even if its refcount 2146 * drops to 0. 2147 * @cgrp: The cgroup on which a reference is being released. 2148 */ 2149 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) 2150 { 2151 if (!cgrp) 2152 return; 2153 2154 cgroup_put(cgrp); 2155 } 2156 2157 /** 2158 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor 2159 * array. A cgroup returned by this kfunc which is not subsequently stored in a 2160 * map, must be released by calling bpf_cgroup_release(). 2161 * @cgrp: The cgroup for which we're performing a lookup. 2162 * @level: The level of ancestor to look up. 2163 */ 2164 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) 2165 { 2166 struct cgroup *ancestor; 2167 2168 if (level > cgrp->level || level < 0) 2169 return NULL; 2170 2171 /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */ 2172 ancestor = cgrp->ancestors[level]; 2173 if (!cgroup_tryget(ancestor)) 2174 return NULL; 2175 return ancestor; 2176 } 2177 2178 /** 2179 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this 2180 * kfunc which is not subsequently stored in a map, must be released by calling 2181 * bpf_cgroup_release(). 2182 * @cgid: cgroup id. 2183 */ 2184 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid) 2185 { 2186 struct cgroup *cgrp; 2187 2188 cgrp = cgroup_get_from_id(cgid); 2189 if (IS_ERR(cgrp)) 2190 return NULL; 2191 return cgrp; 2192 } 2193 #endif /* CONFIG_CGROUPS */ 2194 2195 /** 2196 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up 2197 * in the root pid namespace idr. If a task is returned, it must either be 2198 * stored in a map, or released with bpf_task_release(). 2199 * @pid: The pid of the task being looked up. 2200 */ 2201 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) 2202 { 2203 struct task_struct *p; 2204 2205 rcu_read_lock(); 2206 p = find_task_by_pid_ns(pid, &init_pid_ns); 2207 if (p) 2208 bpf_task_acquire(p); 2209 rcu_read_unlock(); 2210 2211 return p; 2212 } 2213 2214 /** 2215 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data. 2216 * @ptr: The dynptr whose data slice to retrieve 2217 * @offset: Offset into the dynptr 2218 * @buffer: User-provided buffer to copy contents into 2219 * @buffer__szk: Size (in bytes) of the buffer. This is the length of the 2220 * requested slice. This must be a constant. 2221 * 2222 * For non-skb and non-xdp type dynptrs, there is no difference between 2223 * bpf_dynptr_slice and bpf_dynptr_data. 2224 * 2225 * If the intention is to write to the data slice, please use 2226 * bpf_dynptr_slice_rdwr. 2227 * 2228 * The user must check that the returned pointer is not null before using it. 2229 * 2230 * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice 2231 * does not change the underlying packet data pointers, so a call to 2232 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in 2233 * the bpf program. 2234 * 2235 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only 2236 * data slice (can be either direct pointer to the data or a pointer to the user 2237 * provided buffer, with its contents containing the data, if unable to obtain 2238 * direct pointer) 2239 */ 2240 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset, 2241 void *buffer, u32 buffer__szk) 2242 { 2243 enum bpf_dynptr_type type; 2244 u32 len = buffer__szk; 2245 int err; 2246 2247 if (!ptr->data) 2248 return NULL; 2249 2250 err = bpf_dynptr_check_off_len(ptr, offset, len); 2251 if (err) 2252 return NULL; 2253 2254 type = bpf_dynptr_get_type(ptr); 2255 2256 switch (type) { 2257 case BPF_DYNPTR_TYPE_LOCAL: 2258 case BPF_DYNPTR_TYPE_RINGBUF: 2259 return ptr->data + ptr->offset + offset; 2260 case BPF_DYNPTR_TYPE_SKB: 2261 return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer); 2262 case BPF_DYNPTR_TYPE_XDP: 2263 { 2264 void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len); 2265 if (xdp_ptr) 2266 return xdp_ptr; 2267 2268 bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer, len, false); 2269 return buffer; 2270 } 2271 default: 2272 WARN_ONCE(true, "unknown dynptr type %d\n", type); 2273 return NULL; 2274 } 2275 } 2276 2277 /** 2278 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data. 2279 * @ptr: The dynptr whose data slice to retrieve 2280 * @offset: Offset into the dynptr 2281 * @buffer: User-provided buffer to copy contents into 2282 * @buffer__szk: Size (in bytes) of the buffer. This is the length of the 2283 * requested slice. This must be a constant. 2284 * 2285 * For non-skb and non-xdp type dynptrs, there is no difference between 2286 * bpf_dynptr_slice and bpf_dynptr_data. 2287 * 2288 * The returned pointer is writable and may point to either directly the dynptr 2289 * data at the requested offset or to the buffer if unable to obtain a direct 2290 * data pointer to (example: the requested slice is to the paged area of an skb 2291 * packet). In the case where the returned pointer is to the buffer, the user 2292 * is responsible for persisting writes through calling bpf_dynptr_write(). This 2293 * usually looks something like this pattern: 2294 * 2295 * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer)); 2296 * if (!eth) 2297 * return TC_ACT_SHOT; 2298 * 2299 * // mutate eth header // 2300 * 2301 * if (eth == buffer) 2302 * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0); 2303 * 2304 * Please note that, as in the example above, the user must check that the 2305 * returned pointer is not null before using it. 2306 * 2307 * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr 2308 * does not change the underlying packet data pointers, so a call to 2309 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in 2310 * the bpf program. 2311 * 2312 * Return: NULL if the call failed (eg invalid dynptr), pointer to a 2313 * data slice (can be either direct pointer to the data or a pointer to the user 2314 * provided buffer, with its contents containing the data, if unable to obtain 2315 * direct pointer) 2316 */ 2317 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 offset, 2318 void *buffer, u32 buffer__szk) 2319 { 2320 if (!ptr->data || bpf_dynptr_is_rdonly(ptr)) 2321 return NULL; 2322 2323 /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice. 2324 * 2325 * For skb-type dynptrs, it is safe to write into the returned pointer 2326 * if the bpf program allows skb data writes. There are two possiblities 2327 * that may occur when calling bpf_dynptr_slice_rdwr: 2328 * 2329 * 1) The requested slice is in the head of the skb. In this case, the 2330 * returned pointer is directly to skb data, and if the skb is cloned, the 2331 * verifier will have uncloned it (see bpf_unclone_prologue()) already. 2332 * The pointer can be directly written into. 2333 * 2334 * 2) Some portion of the requested slice is in the paged buffer area. 2335 * In this case, the requested data will be copied out into the buffer 2336 * and the returned pointer will be a pointer to the buffer. The skb 2337 * will not be pulled. To persist the write, the user will need to call 2338 * bpf_dynptr_write(), which will pull the skb and commit the write. 2339 * 2340 * Similarly for xdp programs, if the requested slice is not across xdp 2341 * fragments, then a direct pointer will be returned, otherwise the data 2342 * will be copied out into the buffer and the user will need to call 2343 * bpf_dynptr_write() to commit changes. 2344 */ 2345 return bpf_dynptr_slice(ptr, offset, buffer, buffer__szk); 2346 } 2347 2348 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) 2349 { 2350 return obj; 2351 } 2352 2353 __bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k) 2354 { 2355 return obj__ign; 2356 } 2357 2358 __bpf_kfunc void bpf_rcu_read_lock(void) 2359 { 2360 rcu_read_lock(); 2361 } 2362 2363 __bpf_kfunc void bpf_rcu_read_unlock(void) 2364 { 2365 rcu_read_unlock(); 2366 } 2367 2368 __diag_pop(); 2369 2370 BTF_SET8_START(generic_btf_ids) 2371 #ifdef CONFIG_KEXEC_CORE 2372 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) 2373 #endif 2374 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) 2375 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE) 2376 BTF_ID_FLAGS(func, bpf_list_push_front) 2377 BTF_ID_FLAGS(func, bpf_list_push_back) 2378 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) 2379 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) 2380 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) 2381 BTF_ID_FLAGS(func, bpf_task_acquire_not_zero, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 2382 BTF_ID_FLAGS(func, bpf_task_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL) 2383 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) 2384 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE) 2385 BTF_ID_FLAGS(func, bpf_rbtree_add) 2386 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) 2387 2388 #ifdef CONFIG_CGROUPS 2389 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) 2390 BTF_ID_FLAGS(func, bpf_cgroup_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL) 2391 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE) 2392 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 2393 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL) 2394 #endif 2395 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL) 2396 BTF_SET8_END(generic_btf_ids) 2397 2398 static const struct btf_kfunc_id_set generic_kfunc_set = { 2399 .owner = THIS_MODULE, 2400 .set = &generic_btf_ids, 2401 }; 2402 2403 2404 BTF_ID_LIST(generic_dtor_ids) 2405 BTF_ID(struct, task_struct) 2406 BTF_ID(func, bpf_task_release) 2407 #ifdef CONFIG_CGROUPS 2408 BTF_ID(struct, cgroup) 2409 BTF_ID(func, bpf_cgroup_release) 2410 #endif 2411 2412 BTF_SET8_START(common_btf_ids) 2413 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx) 2414 BTF_ID_FLAGS(func, bpf_rdonly_cast) 2415 BTF_ID_FLAGS(func, bpf_rcu_read_lock) 2416 BTF_ID_FLAGS(func, bpf_rcu_read_unlock) 2417 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL) 2418 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL) 2419 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW) 2420 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL) 2421 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY) 2422 BTF_SET8_END(common_btf_ids) 2423 2424 static const struct btf_kfunc_id_set common_kfunc_set = { 2425 .owner = THIS_MODULE, 2426 .set = &common_btf_ids, 2427 }; 2428 2429 static int __init kfunc_init(void) 2430 { 2431 int ret; 2432 const struct btf_id_dtor_kfunc generic_dtors[] = { 2433 { 2434 .btf_id = generic_dtor_ids[0], 2435 .kfunc_btf_id = generic_dtor_ids[1] 2436 }, 2437 #ifdef CONFIG_CGROUPS 2438 { 2439 .btf_id = generic_dtor_ids[2], 2440 .kfunc_btf_id = generic_dtor_ids[3] 2441 }, 2442 #endif 2443 }; 2444 2445 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set); 2446 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set); 2447 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set); 2448 ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors, 2449 ARRAY_SIZE(generic_dtors), 2450 THIS_MODULE); 2451 return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set); 2452 } 2453 2454 late_initcall(kfunc_init); 2455