1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/rcupdate.h> 6 #include <linux/random.h> 7 #include <linux/smp.h> 8 #include <linux/topology.h> 9 #include <linux/ktime.h> 10 #include <linux/sched.h> 11 #include <linux/uidgid.h> 12 #include <linux/filter.h> 13 #include <linux/ctype.h> 14 #include <linux/jiffies.h> 15 #include <linux/pid_namespace.h> 16 #include <linux/proc_ns.h> 17 #include <linux/security.h> 18 19 #include "../../lib/kstrtox.h" 20 21 /* If kernel subsystem is allowing eBPF programs to call this function, 22 * inside its own verifier_ops->get_func_proto() callback it should return 23 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments 24 * 25 * Different map implementations will rely on rcu in map methods 26 * lookup/update/delete, therefore eBPF programs must run under rcu lock 27 * if program is allowed to access maps, so check rcu_read_lock_held in 28 * all three functions. 29 */ 30 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) 31 { 32 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 33 return (unsigned long) map->ops->map_lookup_elem(map, key); 34 } 35 36 const struct bpf_func_proto bpf_map_lookup_elem_proto = { 37 .func = bpf_map_lookup_elem, 38 .gpl_only = false, 39 .pkt_access = true, 40 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 41 .arg1_type = ARG_CONST_MAP_PTR, 42 .arg2_type = ARG_PTR_TO_MAP_KEY, 43 }; 44 45 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, 46 void *, value, u64, flags) 47 { 48 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 49 return map->ops->map_update_elem(map, key, value, flags); 50 } 51 52 const struct bpf_func_proto bpf_map_update_elem_proto = { 53 .func = bpf_map_update_elem, 54 .gpl_only = false, 55 .pkt_access = true, 56 .ret_type = RET_INTEGER, 57 .arg1_type = ARG_CONST_MAP_PTR, 58 .arg2_type = ARG_PTR_TO_MAP_KEY, 59 .arg3_type = ARG_PTR_TO_MAP_VALUE, 60 .arg4_type = ARG_ANYTHING, 61 }; 62 63 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) 64 { 65 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 66 return map->ops->map_delete_elem(map, key); 67 } 68 69 const struct bpf_func_proto bpf_map_delete_elem_proto = { 70 .func = bpf_map_delete_elem, 71 .gpl_only = false, 72 .pkt_access = true, 73 .ret_type = RET_INTEGER, 74 .arg1_type = ARG_CONST_MAP_PTR, 75 .arg2_type = ARG_PTR_TO_MAP_KEY, 76 }; 77 78 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) 79 { 80 return map->ops->map_push_elem(map, value, flags); 81 } 82 83 const struct bpf_func_proto bpf_map_push_elem_proto = { 84 .func = bpf_map_push_elem, 85 .gpl_only = false, 86 .pkt_access = true, 87 .ret_type = RET_INTEGER, 88 .arg1_type = ARG_CONST_MAP_PTR, 89 .arg2_type = ARG_PTR_TO_MAP_VALUE, 90 .arg3_type = ARG_ANYTHING, 91 }; 92 93 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) 94 { 95 return map->ops->map_pop_elem(map, value); 96 } 97 98 const struct bpf_func_proto bpf_map_pop_elem_proto = { 99 .func = bpf_map_pop_elem, 100 .gpl_only = false, 101 .ret_type = RET_INTEGER, 102 .arg1_type = ARG_CONST_MAP_PTR, 103 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, 104 }; 105 106 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) 107 { 108 return map->ops->map_peek_elem(map, value); 109 } 110 111 const struct bpf_func_proto bpf_map_peek_elem_proto = { 112 .func = bpf_map_peek_elem, 113 .gpl_only = false, 114 .ret_type = RET_INTEGER, 115 .arg1_type = ARG_CONST_MAP_PTR, 116 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, 117 }; 118 119 const struct bpf_func_proto bpf_get_prandom_u32_proto = { 120 .func = bpf_user_rnd_u32, 121 .gpl_only = false, 122 .ret_type = RET_INTEGER, 123 }; 124 125 BPF_CALL_0(bpf_get_smp_processor_id) 126 { 127 return smp_processor_id(); 128 } 129 130 const struct bpf_func_proto bpf_get_smp_processor_id_proto = { 131 .func = bpf_get_smp_processor_id, 132 .gpl_only = false, 133 .ret_type = RET_INTEGER, 134 }; 135 136 BPF_CALL_0(bpf_get_numa_node_id) 137 { 138 return numa_node_id(); 139 } 140 141 const struct bpf_func_proto bpf_get_numa_node_id_proto = { 142 .func = bpf_get_numa_node_id, 143 .gpl_only = false, 144 .ret_type = RET_INTEGER, 145 }; 146 147 BPF_CALL_0(bpf_ktime_get_ns) 148 { 149 /* NMI safe access to clock monotonic */ 150 return ktime_get_mono_fast_ns(); 151 } 152 153 const struct bpf_func_proto bpf_ktime_get_ns_proto = { 154 .func = bpf_ktime_get_ns, 155 .gpl_only = false, 156 .ret_type = RET_INTEGER, 157 }; 158 159 BPF_CALL_0(bpf_ktime_get_boot_ns) 160 { 161 /* NMI safe access to clock boottime */ 162 return ktime_get_boot_fast_ns(); 163 } 164 165 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = { 166 .func = bpf_ktime_get_boot_ns, 167 .gpl_only = false, 168 .ret_type = RET_INTEGER, 169 }; 170 171 BPF_CALL_0(bpf_ktime_get_coarse_ns) 172 { 173 return ktime_get_coarse_ns(); 174 } 175 176 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = { 177 .func = bpf_ktime_get_coarse_ns, 178 .gpl_only = false, 179 .ret_type = RET_INTEGER, 180 }; 181 182 BPF_CALL_0(bpf_get_current_pid_tgid) 183 { 184 struct task_struct *task = current; 185 186 if (unlikely(!task)) 187 return -EINVAL; 188 189 return (u64) task->tgid << 32 | task->pid; 190 } 191 192 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { 193 .func = bpf_get_current_pid_tgid, 194 .gpl_only = false, 195 .ret_type = RET_INTEGER, 196 }; 197 198 BPF_CALL_0(bpf_get_current_uid_gid) 199 { 200 struct task_struct *task = current; 201 kuid_t uid; 202 kgid_t gid; 203 204 if (unlikely(!task)) 205 return -EINVAL; 206 207 current_uid_gid(&uid, &gid); 208 return (u64) from_kgid(&init_user_ns, gid) << 32 | 209 from_kuid(&init_user_ns, uid); 210 } 211 212 const struct bpf_func_proto bpf_get_current_uid_gid_proto = { 213 .func = bpf_get_current_uid_gid, 214 .gpl_only = false, 215 .ret_type = RET_INTEGER, 216 }; 217 218 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) 219 { 220 struct task_struct *task = current; 221 222 if (unlikely(!task)) 223 goto err_clear; 224 225 strncpy(buf, task->comm, size); 226 227 /* Verifier guarantees that size > 0. For task->comm exceeding 228 * size, guarantee that buf is %NUL-terminated. Unconditionally 229 * done here to save the size test. 230 */ 231 buf[size - 1] = 0; 232 return 0; 233 err_clear: 234 memset(buf, 0, size); 235 return -EINVAL; 236 } 237 238 const struct bpf_func_proto bpf_get_current_comm_proto = { 239 .func = bpf_get_current_comm, 240 .gpl_only = false, 241 .ret_type = RET_INTEGER, 242 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 243 .arg2_type = ARG_CONST_SIZE, 244 }; 245 246 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) 247 248 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 249 { 250 arch_spinlock_t *l = (void *)lock; 251 union { 252 __u32 val; 253 arch_spinlock_t lock; 254 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; 255 256 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); 257 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); 258 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); 259 arch_spin_lock(l); 260 } 261 262 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 263 { 264 arch_spinlock_t *l = (void *)lock; 265 266 arch_spin_unlock(l); 267 } 268 269 #else 270 271 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 272 { 273 atomic_t *l = (void *)lock; 274 275 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); 276 do { 277 atomic_cond_read_relaxed(l, !VAL); 278 } while (atomic_xchg(l, 1)); 279 } 280 281 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 282 { 283 atomic_t *l = (void *)lock; 284 285 atomic_set_release(l, 0); 286 } 287 288 #endif 289 290 static DEFINE_PER_CPU(unsigned long, irqsave_flags); 291 292 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock) 293 { 294 unsigned long flags; 295 296 local_irq_save(flags); 297 __bpf_spin_lock(lock); 298 __this_cpu_write(irqsave_flags, flags); 299 } 300 301 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) 302 { 303 __bpf_spin_lock_irqsave(lock); 304 return 0; 305 } 306 307 const struct bpf_func_proto bpf_spin_lock_proto = { 308 .func = bpf_spin_lock, 309 .gpl_only = false, 310 .ret_type = RET_VOID, 311 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 312 }; 313 314 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock) 315 { 316 unsigned long flags; 317 318 flags = __this_cpu_read(irqsave_flags); 319 __bpf_spin_unlock(lock); 320 local_irq_restore(flags); 321 } 322 323 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) 324 { 325 __bpf_spin_unlock_irqrestore(lock); 326 return 0; 327 } 328 329 const struct bpf_func_proto bpf_spin_unlock_proto = { 330 .func = bpf_spin_unlock, 331 .gpl_only = false, 332 .ret_type = RET_VOID, 333 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 334 }; 335 336 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 337 bool lock_src) 338 { 339 struct bpf_spin_lock *lock; 340 341 if (lock_src) 342 lock = src + map->spin_lock_off; 343 else 344 lock = dst + map->spin_lock_off; 345 preempt_disable(); 346 __bpf_spin_lock_irqsave(lock); 347 copy_map_value(map, dst, src); 348 __bpf_spin_unlock_irqrestore(lock); 349 preempt_enable(); 350 } 351 352 BPF_CALL_0(bpf_jiffies64) 353 { 354 return get_jiffies_64(); 355 } 356 357 const struct bpf_func_proto bpf_jiffies64_proto = { 358 .func = bpf_jiffies64, 359 .gpl_only = false, 360 .ret_type = RET_INTEGER, 361 }; 362 363 #ifdef CONFIG_CGROUPS 364 BPF_CALL_0(bpf_get_current_cgroup_id) 365 { 366 struct cgroup *cgrp = task_dfl_cgroup(current); 367 368 return cgroup_id(cgrp); 369 } 370 371 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { 372 .func = bpf_get_current_cgroup_id, 373 .gpl_only = false, 374 .ret_type = RET_INTEGER, 375 }; 376 377 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) 378 { 379 struct cgroup *cgrp = task_dfl_cgroup(current); 380 struct cgroup *ancestor; 381 382 ancestor = cgroup_ancestor(cgrp, ancestor_level); 383 if (!ancestor) 384 return 0; 385 return cgroup_id(ancestor); 386 } 387 388 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = { 389 .func = bpf_get_current_ancestor_cgroup_id, 390 .gpl_only = false, 391 .ret_type = RET_INTEGER, 392 .arg1_type = ARG_ANYTHING, 393 }; 394 395 #ifdef CONFIG_CGROUP_BPF 396 DECLARE_PER_CPU(struct bpf_cgroup_storage_info, 397 bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]); 398 399 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) 400 { 401 /* flags argument is not used now, 402 * but provides an ability to extend the API. 403 * verifier checks that its value is correct. 404 */ 405 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); 406 struct bpf_cgroup_storage *storage = NULL; 407 void *ptr; 408 int i; 409 410 for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) { 411 if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current)) 412 continue; 413 414 storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]); 415 break; 416 } 417 418 if (stype == BPF_CGROUP_STORAGE_SHARED) 419 ptr = &READ_ONCE(storage->buf)->data[0]; 420 else 421 ptr = this_cpu_ptr(storage->percpu_buf); 422 423 return (unsigned long)ptr; 424 } 425 426 const struct bpf_func_proto bpf_get_local_storage_proto = { 427 .func = bpf_get_local_storage, 428 .gpl_only = false, 429 .ret_type = RET_PTR_TO_MAP_VALUE, 430 .arg1_type = ARG_CONST_MAP_PTR, 431 .arg2_type = ARG_ANYTHING, 432 }; 433 #endif 434 435 #define BPF_STRTOX_BASE_MASK 0x1F 436 437 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, 438 unsigned long long *res, bool *is_negative) 439 { 440 unsigned int base = flags & BPF_STRTOX_BASE_MASK; 441 const char *cur_buf = buf; 442 size_t cur_len = buf_len; 443 unsigned int consumed; 444 size_t val_len; 445 char str[64]; 446 447 if (!buf || !buf_len || !res || !is_negative) 448 return -EINVAL; 449 450 if (base != 0 && base != 8 && base != 10 && base != 16) 451 return -EINVAL; 452 453 if (flags & ~BPF_STRTOX_BASE_MASK) 454 return -EINVAL; 455 456 while (cur_buf < buf + buf_len && isspace(*cur_buf)) 457 ++cur_buf; 458 459 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); 460 if (*is_negative) 461 ++cur_buf; 462 463 consumed = cur_buf - buf; 464 cur_len -= consumed; 465 if (!cur_len) 466 return -EINVAL; 467 468 cur_len = min(cur_len, sizeof(str) - 1); 469 memcpy(str, cur_buf, cur_len); 470 str[cur_len] = '\0'; 471 cur_buf = str; 472 473 cur_buf = _parse_integer_fixup_radix(cur_buf, &base); 474 val_len = _parse_integer(cur_buf, base, res); 475 476 if (val_len & KSTRTOX_OVERFLOW) 477 return -ERANGE; 478 479 if (val_len == 0) 480 return -EINVAL; 481 482 cur_buf += val_len; 483 consumed += cur_buf - str; 484 485 return consumed; 486 } 487 488 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, 489 long long *res) 490 { 491 unsigned long long _res; 492 bool is_negative; 493 int err; 494 495 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 496 if (err < 0) 497 return err; 498 if (is_negative) { 499 if ((long long)-_res > 0) 500 return -ERANGE; 501 *res = -_res; 502 } else { 503 if ((long long)_res < 0) 504 return -ERANGE; 505 *res = _res; 506 } 507 return err; 508 } 509 510 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, 511 long *, res) 512 { 513 long long _res; 514 int err; 515 516 err = __bpf_strtoll(buf, buf_len, flags, &_res); 517 if (err < 0) 518 return err; 519 if (_res != (long)_res) 520 return -ERANGE; 521 *res = _res; 522 return err; 523 } 524 525 const struct bpf_func_proto bpf_strtol_proto = { 526 .func = bpf_strtol, 527 .gpl_only = false, 528 .ret_type = RET_INTEGER, 529 .arg1_type = ARG_PTR_TO_MEM, 530 .arg2_type = ARG_CONST_SIZE, 531 .arg3_type = ARG_ANYTHING, 532 .arg4_type = ARG_PTR_TO_LONG, 533 }; 534 535 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, 536 unsigned long *, res) 537 { 538 unsigned long long _res; 539 bool is_negative; 540 int err; 541 542 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 543 if (err < 0) 544 return err; 545 if (is_negative) 546 return -EINVAL; 547 if (_res != (unsigned long)_res) 548 return -ERANGE; 549 *res = _res; 550 return err; 551 } 552 553 const struct bpf_func_proto bpf_strtoul_proto = { 554 .func = bpf_strtoul, 555 .gpl_only = false, 556 .ret_type = RET_INTEGER, 557 .arg1_type = ARG_PTR_TO_MEM, 558 .arg2_type = ARG_CONST_SIZE, 559 .arg3_type = ARG_ANYTHING, 560 .arg4_type = ARG_PTR_TO_LONG, 561 }; 562 #endif 563 564 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino, 565 struct bpf_pidns_info *, nsdata, u32, size) 566 { 567 struct task_struct *task = current; 568 struct pid_namespace *pidns; 569 int err = -EINVAL; 570 571 if (unlikely(size != sizeof(struct bpf_pidns_info))) 572 goto clear; 573 574 if (unlikely((u64)(dev_t)dev != dev)) 575 goto clear; 576 577 if (unlikely(!task)) 578 goto clear; 579 580 pidns = task_active_pid_ns(task); 581 if (unlikely(!pidns)) { 582 err = -ENOENT; 583 goto clear; 584 } 585 586 if (!ns_match(&pidns->ns, (dev_t)dev, ino)) 587 goto clear; 588 589 nsdata->pid = task_pid_nr_ns(task, pidns); 590 nsdata->tgid = task_tgid_nr_ns(task, pidns); 591 return 0; 592 clear: 593 memset((void *)nsdata, 0, (size_t) size); 594 return err; 595 } 596 597 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = { 598 .func = bpf_get_ns_current_pid_tgid, 599 .gpl_only = false, 600 .ret_type = RET_INTEGER, 601 .arg1_type = ARG_ANYTHING, 602 .arg2_type = ARG_ANYTHING, 603 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 604 .arg4_type = ARG_CONST_SIZE, 605 }; 606 607 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { 608 .func = bpf_get_raw_cpu_id, 609 .gpl_only = false, 610 .ret_type = RET_INTEGER, 611 }; 612 613 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, 614 u64, flags, void *, data, u64, size) 615 { 616 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 617 return -EINVAL; 618 619 return bpf_event_output(map, flags, data, size, NULL, 0, NULL); 620 } 621 622 const struct bpf_func_proto bpf_event_output_data_proto = { 623 .func = bpf_event_output_data, 624 .gpl_only = true, 625 .ret_type = RET_INTEGER, 626 .arg1_type = ARG_PTR_TO_CTX, 627 .arg2_type = ARG_CONST_MAP_PTR, 628 .arg3_type = ARG_ANYTHING, 629 .arg4_type = ARG_PTR_TO_MEM, 630 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 631 }; 632 633 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size, 634 const void __user *, user_ptr) 635 { 636 int ret = copy_from_user(dst, user_ptr, size); 637 638 if (unlikely(ret)) { 639 memset(dst, 0, size); 640 ret = -EFAULT; 641 } 642 643 return ret; 644 } 645 646 const struct bpf_func_proto bpf_copy_from_user_proto = { 647 .func = bpf_copy_from_user, 648 .gpl_only = false, 649 .ret_type = RET_INTEGER, 650 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 651 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 652 .arg3_type = ARG_ANYTHING, 653 }; 654 655 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) 656 { 657 if (cpu >= nr_cpu_ids) 658 return (unsigned long)NULL; 659 660 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu); 661 } 662 663 const struct bpf_func_proto bpf_per_cpu_ptr_proto = { 664 .func = bpf_per_cpu_ptr, 665 .gpl_only = false, 666 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, 667 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 668 .arg2_type = ARG_ANYTHING, 669 }; 670 671 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr) 672 { 673 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr); 674 } 675 676 const struct bpf_func_proto bpf_this_cpu_ptr_proto = { 677 .func = bpf_this_cpu_ptr, 678 .gpl_only = false, 679 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID, 680 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 681 }; 682 683 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, 684 size_t bufsz) 685 { 686 void __user *user_ptr = (__force void __user *)unsafe_ptr; 687 688 buf[0] = 0; 689 690 switch (fmt_ptype) { 691 case 's': 692 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 693 if ((unsigned long)unsafe_ptr < TASK_SIZE) 694 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 695 fallthrough; 696 #endif 697 case 'k': 698 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz); 699 case 'u': 700 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 701 } 702 703 return -EINVAL; 704 } 705 706 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary 707 * arguments representation. 708 */ 709 #define MAX_BPRINTF_BUF_LEN 512 710 711 /* Support executing three nested bprintf helper calls on a given CPU */ 712 #define MAX_BPRINTF_NEST_LEVEL 3 713 struct bpf_bprintf_buffers { 714 char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN]; 715 }; 716 static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs); 717 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level); 718 719 static int try_get_fmt_tmp_buf(char **tmp_buf) 720 { 721 struct bpf_bprintf_buffers *bufs; 722 int nest_level; 723 724 preempt_disable(); 725 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level); 726 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) { 727 this_cpu_dec(bpf_bprintf_nest_level); 728 preempt_enable(); 729 return -EBUSY; 730 } 731 bufs = this_cpu_ptr(&bpf_bprintf_bufs); 732 *tmp_buf = bufs->tmp_bufs[nest_level - 1]; 733 734 return 0; 735 } 736 737 void bpf_bprintf_cleanup(void) 738 { 739 if (this_cpu_read(bpf_bprintf_nest_level)) { 740 this_cpu_dec(bpf_bprintf_nest_level); 741 preempt_enable(); 742 } 743 } 744 745 /* 746 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers 747 * 748 * Returns a negative value if fmt is an invalid format string or 0 otherwise. 749 * 750 * This can be used in two ways: 751 * - Format string verification only: when bin_args is NULL 752 * - Arguments preparation: in addition to the above verification, it writes in 753 * bin_args a binary representation of arguments usable by bstr_printf where 754 * pointers from BPF have been sanitized. 755 * 756 * In argument preparation mode, if 0 is returned, safe temporary buffers are 757 * allocated and bpf_bprintf_cleanup should be called to free them after use. 758 */ 759 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, 760 u32 **bin_args, u32 num_args) 761 { 762 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end; 763 size_t sizeof_cur_arg, sizeof_cur_ip; 764 int err, i, num_spec = 0; 765 u64 cur_arg; 766 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX"; 767 768 fmt_end = strnchr(fmt, fmt_size, 0); 769 if (!fmt_end) 770 return -EINVAL; 771 fmt_size = fmt_end - fmt; 772 773 if (bin_args) { 774 if (num_args && try_get_fmt_tmp_buf(&tmp_buf)) 775 return -EBUSY; 776 777 tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN; 778 *bin_args = (u32 *)tmp_buf; 779 } 780 781 for (i = 0; i < fmt_size; i++) { 782 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { 783 err = -EINVAL; 784 goto out; 785 } 786 787 if (fmt[i] != '%') 788 continue; 789 790 if (fmt[i + 1] == '%') { 791 i++; 792 continue; 793 } 794 795 if (num_spec >= num_args) { 796 err = -EINVAL; 797 goto out; 798 } 799 800 /* The string is zero-terminated so if fmt[i] != 0, we can 801 * always access fmt[i + 1], in the worst case it will be a 0 802 */ 803 i++; 804 805 /* skip optional "[0 +-][num]" width formatting field */ 806 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || 807 fmt[i] == ' ') 808 i++; 809 if (fmt[i] >= '1' && fmt[i] <= '9') { 810 i++; 811 while (fmt[i] >= '0' && fmt[i] <= '9') 812 i++; 813 } 814 815 if (fmt[i] == 'p') { 816 sizeof_cur_arg = sizeof(long); 817 818 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') && 819 fmt[i + 2] == 's') { 820 fmt_ptype = fmt[i + 1]; 821 i += 2; 822 goto fmt_str; 823 } 824 825 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) || 826 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' || 827 fmt[i + 1] == 'x' || fmt[i + 1] == 's' || 828 fmt[i + 1] == 'S') { 829 /* just kernel pointers */ 830 if (tmp_buf) 831 cur_arg = raw_args[num_spec]; 832 i++; 833 goto nocopy_fmt; 834 } 835 836 if (fmt[i + 1] == 'B') { 837 if (tmp_buf) { 838 err = snprintf(tmp_buf, 839 (tmp_buf_end - tmp_buf), 840 "%pB", 841 (void *)(long)raw_args[num_spec]); 842 tmp_buf += (err + 1); 843 } 844 845 i++; 846 num_spec++; 847 continue; 848 } 849 850 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ 851 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') || 852 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) { 853 err = -EINVAL; 854 goto out; 855 } 856 857 i += 2; 858 if (!tmp_buf) 859 goto nocopy_fmt; 860 861 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16; 862 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { 863 err = -ENOSPC; 864 goto out; 865 } 866 867 unsafe_ptr = (char *)(long)raw_args[num_spec]; 868 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr, 869 sizeof_cur_ip); 870 if (err < 0) 871 memset(cur_ip, 0, sizeof_cur_ip); 872 873 /* hack: bstr_printf expects IP addresses to be 874 * pre-formatted as strings, ironically, the easiest way 875 * to do that is to call snprintf. 876 */ 877 ip_spec[2] = fmt[i - 1]; 878 ip_spec[3] = fmt[i]; 879 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf, 880 ip_spec, &cur_ip); 881 882 tmp_buf += err + 1; 883 num_spec++; 884 885 continue; 886 } else if (fmt[i] == 's') { 887 fmt_ptype = fmt[i]; 888 fmt_str: 889 if (fmt[i + 1] != 0 && 890 !isspace(fmt[i + 1]) && 891 !ispunct(fmt[i + 1])) { 892 err = -EINVAL; 893 goto out; 894 } 895 896 if (!tmp_buf) 897 goto nocopy_fmt; 898 899 if (tmp_buf_end == tmp_buf) { 900 err = -ENOSPC; 901 goto out; 902 } 903 904 unsafe_ptr = (char *)(long)raw_args[num_spec]; 905 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr, 906 fmt_ptype, 907 tmp_buf_end - tmp_buf); 908 if (err < 0) { 909 tmp_buf[0] = '\0'; 910 err = 1; 911 } 912 913 tmp_buf += err; 914 num_spec++; 915 916 continue; 917 } 918 919 sizeof_cur_arg = sizeof(int); 920 921 if (fmt[i] == 'l') { 922 sizeof_cur_arg = sizeof(long); 923 i++; 924 } 925 if (fmt[i] == 'l') { 926 sizeof_cur_arg = sizeof(long long); 927 i++; 928 } 929 930 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' && 931 fmt[i] != 'x' && fmt[i] != 'X') { 932 err = -EINVAL; 933 goto out; 934 } 935 936 if (tmp_buf) 937 cur_arg = raw_args[num_spec]; 938 nocopy_fmt: 939 if (tmp_buf) { 940 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32)); 941 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { 942 err = -ENOSPC; 943 goto out; 944 } 945 946 if (sizeof_cur_arg == 8) { 947 *(u32 *)tmp_buf = *(u32 *)&cur_arg; 948 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1); 949 } else { 950 *(u32 *)tmp_buf = (u32)(long)cur_arg; 951 } 952 tmp_buf += sizeof_cur_arg; 953 } 954 num_spec++; 955 } 956 957 err = 0; 958 out: 959 if (err) 960 bpf_bprintf_cleanup(); 961 return err; 962 } 963 964 #define MAX_SNPRINTF_VARARGS 12 965 966 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, 967 const void *, data, u32, data_len) 968 { 969 int err, num_args; 970 u32 *bin_args; 971 972 if (data_len % 8 || data_len > MAX_SNPRINTF_VARARGS * 8 || 973 (data_len && !data)) 974 return -EINVAL; 975 num_args = data_len / 8; 976 977 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we 978 * can safely give an unbounded size. 979 */ 980 err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args); 981 if (err < 0) 982 return err; 983 984 err = bstr_printf(str, str_size, fmt, bin_args); 985 986 bpf_bprintf_cleanup(); 987 988 return err + 1; 989 } 990 991 const struct bpf_func_proto bpf_snprintf_proto = { 992 .func = bpf_snprintf, 993 .gpl_only = true, 994 .ret_type = RET_INTEGER, 995 .arg1_type = ARG_PTR_TO_MEM_OR_NULL, 996 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 997 .arg3_type = ARG_PTR_TO_CONST_STR, 998 .arg4_type = ARG_PTR_TO_MEM_OR_NULL, 999 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1000 }; 1001 1002 /* BPF map elements can contain 'struct bpf_timer'. 1003 * Such map owns all of its BPF timers. 1004 * 'struct bpf_timer' is allocated as part of map element allocation 1005 * and it's zero initialized. 1006 * That space is used to keep 'struct bpf_timer_kern'. 1007 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and 1008 * remembers 'struct bpf_map *' pointer it's part of. 1009 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn. 1010 * bpf_timer_start() arms the timer. 1011 * If user space reference to a map goes to zero at this point 1012 * ops->map_release_uref callback is responsible for cancelling the timers, 1013 * freeing their memory, and decrementing prog's refcnts. 1014 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt. 1015 * Inner maps can contain bpf timers as well. ops->map_release_uref is 1016 * freeing the timers when inner map is replaced or deleted by user space. 1017 */ 1018 struct bpf_hrtimer { 1019 struct hrtimer timer; 1020 struct bpf_map *map; 1021 struct bpf_prog *prog; 1022 void __rcu *callback_fn; 1023 void *value; 1024 }; 1025 1026 /* the actual struct hidden inside uapi struct bpf_timer */ 1027 struct bpf_timer_kern { 1028 struct bpf_hrtimer *timer; 1029 /* bpf_spin_lock is used here instead of spinlock_t to make 1030 * sure that it always fits into space resereved by struct bpf_timer 1031 * regardless of LOCKDEP and spinlock debug flags. 1032 */ 1033 struct bpf_spin_lock lock; 1034 } __attribute__((aligned(8))); 1035 1036 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); 1037 1038 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) 1039 { 1040 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); 1041 struct bpf_map *map = t->map; 1042 void *value = t->value; 1043 void *callback_fn; 1044 void *key; 1045 u32 idx; 1046 1047 callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held()); 1048 if (!callback_fn) 1049 goto out; 1050 1051 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and 1052 * cannot be preempted by another bpf_timer_cb() on the same cpu. 1053 * Remember the timer this callback is servicing to prevent 1054 * deadlock if callback_fn() calls bpf_timer_cancel() or 1055 * bpf_map_delete_elem() on the same timer. 1056 */ 1057 this_cpu_write(hrtimer_running, t); 1058 if (map->map_type == BPF_MAP_TYPE_ARRAY) { 1059 struct bpf_array *array = container_of(map, struct bpf_array, map); 1060 1061 /* compute the key */ 1062 idx = ((char *)value - array->value) / array->elem_size; 1063 key = &idx; 1064 } else { /* hash or lru */ 1065 key = value - round_up(map->key_size, 8); 1066 } 1067 1068 BPF_CAST_CALL(callback_fn)((u64)(long)map, (u64)(long)key, 1069 (u64)(long)value, 0, 0); 1070 /* The verifier checked that return value is zero. */ 1071 1072 this_cpu_write(hrtimer_running, NULL); 1073 out: 1074 return HRTIMER_NORESTART; 1075 } 1076 1077 BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map, 1078 u64, flags) 1079 { 1080 clockid_t clockid = flags & (MAX_CLOCKS - 1); 1081 struct bpf_hrtimer *t; 1082 int ret = 0; 1083 1084 BUILD_BUG_ON(MAX_CLOCKS != 16); 1085 BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer)); 1086 BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer)); 1087 1088 if (in_nmi()) 1089 return -EOPNOTSUPP; 1090 1091 if (flags >= MAX_CLOCKS || 1092 /* similar to timerfd except _ALARM variants are not supported */ 1093 (clockid != CLOCK_MONOTONIC && 1094 clockid != CLOCK_REALTIME && 1095 clockid != CLOCK_BOOTTIME)) 1096 return -EINVAL; 1097 __bpf_spin_lock_irqsave(&timer->lock); 1098 t = timer->timer; 1099 if (t) { 1100 ret = -EBUSY; 1101 goto out; 1102 } 1103 if (!atomic64_read(&map->usercnt)) { 1104 /* maps with timers must be either held by user space 1105 * or pinned in bpffs. 1106 */ 1107 ret = -EPERM; 1108 goto out; 1109 } 1110 /* allocate hrtimer via map_kmalloc to use memcg accounting */ 1111 t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node); 1112 if (!t) { 1113 ret = -ENOMEM; 1114 goto out; 1115 } 1116 t->value = (void *)timer - map->timer_off; 1117 t->map = map; 1118 t->prog = NULL; 1119 rcu_assign_pointer(t->callback_fn, NULL); 1120 hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); 1121 t->timer.function = bpf_timer_cb; 1122 timer->timer = t; 1123 out: 1124 __bpf_spin_unlock_irqrestore(&timer->lock); 1125 return ret; 1126 } 1127 1128 static const struct bpf_func_proto bpf_timer_init_proto = { 1129 .func = bpf_timer_init, 1130 .gpl_only = true, 1131 .ret_type = RET_INTEGER, 1132 .arg1_type = ARG_PTR_TO_TIMER, 1133 .arg2_type = ARG_CONST_MAP_PTR, 1134 .arg3_type = ARG_ANYTHING, 1135 }; 1136 1137 BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn, 1138 struct bpf_prog_aux *, aux) 1139 { 1140 struct bpf_prog *prev, *prog = aux->prog; 1141 struct bpf_hrtimer *t; 1142 int ret = 0; 1143 1144 if (in_nmi()) 1145 return -EOPNOTSUPP; 1146 __bpf_spin_lock_irqsave(&timer->lock); 1147 t = timer->timer; 1148 if (!t) { 1149 ret = -EINVAL; 1150 goto out; 1151 } 1152 if (!atomic64_read(&t->map->usercnt)) { 1153 /* maps with timers must be either held by user space 1154 * or pinned in bpffs. Otherwise timer might still be 1155 * running even when bpf prog is detached and user space 1156 * is gone, since map_release_uref won't ever be called. 1157 */ 1158 ret = -EPERM; 1159 goto out; 1160 } 1161 prev = t->prog; 1162 if (prev != prog) { 1163 /* Bump prog refcnt once. Every bpf_timer_set_callback() 1164 * can pick different callback_fn-s within the same prog. 1165 */ 1166 prog = bpf_prog_inc_not_zero(prog); 1167 if (IS_ERR(prog)) { 1168 ret = PTR_ERR(prog); 1169 goto out; 1170 } 1171 if (prev) 1172 /* Drop prev prog refcnt when swapping with new prog */ 1173 bpf_prog_put(prev); 1174 t->prog = prog; 1175 } 1176 rcu_assign_pointer(t->callback_fn, callback_fn); 1177 out: 1178 __bpf_spin_unlock_irqrestore(&timer->lock); 1179 return ret; 1180 } 1181 1182 static const struct bpf_func_proto bpf_timer_set_callback_proto = { 1183 .func = bpf_timer_set_callback, 1184 .gpl_only = true, 1185 .ret_type = RET_INTEGER, 1186 .arg1_type = ARG_PTR_TO_TIMER, 1187 .arg2_type = ARG_PTR_TO_FUNC, 1188 }; 1189 1190 BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags) 1191 { 1192 struct bpf_hrtimer *t; 1193 int ret = 0; 1194 1195 if (in_nmi()) 1196 return -EOPNOTSUPP; 1197 if (flags) 1198 return -EINVAL; 1199 __bpf_spin_lock_irqsave(&timer->lock); 1200 t = timer->timer; 1201 if (!t || !t->prog) { 1202 ret = -EINVAL; 1203 goto out; 1204 } 1205 hrtimer_start(&t->timer, ns_to_ktime(nsecs), HRTIMER_MODE_REL_SOFT); 1206 out: 1207 __bpf_spin_unlock_irqrestore(&timer->lock); 1208 return ret; 1209 } 1210 1211 static const struct bpf_func_proto bpf_timer_start_proto = { 1212 .func = bpf_timer_start, 1213 .gpl_only = true, 1214 .ret_type = RET_INTEGER, 1215 .arg1_type = ARG_PTR_TO_TIMER, 1216 .arg2_type = ARG_ANYTHING, 1217 .arg3_type = ARG_ANYTHING, 1218 }; 1219 1220 static void drop_prog_refcnt(struct bpf_hrtimer *t) 1221 { 1222 struct bpf_prog *prog = t->prog; 1223 1224 if (prog) { 1225 bpf_prog_put(prog); 1226 t->prog = NULL; 1227 rcu_assign_pointer(t->callback_fn, NULL); 1228 } 1229 } 1230 1231 BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer) 1232 { 1233 struct bpf_hrtimer *t; 1234 int ret = 0; 1235 1236 if (in_nmi()) 1237 return -EOPNOTSUPP; 1238 __bpf_spin_lock_irqsave(&timer->lock); 1239 t = timer->timer; 1240 if (!t) { 1241 ret = -EINVAL; 1242 goto out; 1243 } 1244 if (this_cpu_read(hrtimer_running) == t) { 1245 /* If bpf callback_fn is trying to bpf_timer_cancel() 1246 * its own timer the hrtimer_cancel() will deadlock 1247 * since it waits for callback_fn to finish 1248 */ 1249 ret = -EDEADLK; 1250 goto out; 1251 } 1252 drop_prog_refcnt(t); 1253 out: 1254 __bpf_spin_unlock_irqrestore(&timer->lock); 1255 /* Cancel the timer and wait for associated callback to finish 1256 * if it was running. 1257 */ 1258 ret = ret ?: hrtimer_cancel(&t->timer); 1259 return ret; 1260 } 1261 1262 static const struct bpf_func_proto bpf_timer_cancel_proto = { 1263 .func = bpf_timer_cancel, 1264 .gpl_only = true, 1265 .ret_type = RET_INTEGER, 1266 .arg1_type = ARG_PTR_TO_TIMER, 1267 }; 1268 1269 /* This function is called by map_delete/update_elem for individual element and 1270 * by ops->map_release_uref when the user space reference to a map reaches zero. 1271 */ 1272 void bpf_timer_cancel_and_free(void *val) 1273 { 1274 struct bpf_timer_kern *timer = val; 1275 struct bpf_hrtimer *t; 1276 1277 /* Performance optimization: read timer->timer without lock first. */ 1278 if (!READ_ONCE(timer->timer)) 1279 return; 1280 1281 __bpf_spin_lock_irqsave(&timer->lock); 1282 /* re-read it under lock */ 1283 t = timer->timer; 1284 if (!t) 1285 goto out; 1286 drop_prog_refcnt(t); 1287 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use 1288 * this timer, since it won't be initialized. 1289 */ 1290 timer->timer = NULL; 1291 out: 1292 __bpf_spin_unlock_irqrestore(&timer->lock); 1293 if (!t) 1294 return; 1295 /* Cancel the timer and wait for callback to complete if it was running. 1296 * If hrtimer_cancel() can be safely called it's safe to call kfree(t) 1297 * right after for both preallocated and non-preallocated maps. 1298 * The timer->timer = NULL was already done and no code path can 1299 * see address 't' anymore. 1300 * 1301 * Check that bpf_map_delete/update_elem() wasn't called from timer 1302 * callback_fn. In such case don't call hrtimer_cancel() (since it will 1303 * deadlock) and don't call hrtimer_try_to_cancel() (since it will just 1304 * return -1). Though callback_fn is still running on this cpu it's 1305 * safe to do kfree(t) because bpf_timer_cb() read everything it needed 1306 * from 't'. The bpf subprog callback_fn won't be able to access 't', 1307 * since timer->timer = NULL was already done. The timer will be 1308 * effectively cancelled because bpf_timer_cb() will return 1309 * HRTIMER_NORESTART. 1310 */ 1311 if (this_cpu_read(hrtimer_running) != t) 1312 hrtimer_cancel(&t->timer); 1313 kfree(t); 1314 } 1315 1316 const struct bpf_func_proto bpf_get_current_task_proto __weak; 1317 const struct bpf_func_proto bpf_probe_read_user_proto __weak; 1318 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; 1319 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak; 1320 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; 1321 1322 const struct bpf_func_proto * 1323 bpf_base_func_proto(enum bpf_func_id func_id) 1324 { 1325 switch (func_id) { 1326 case BPF_FUNC_map_lookup_elem: 1327 return &bpf_map_lookup_elem_proto; 1328 case BPF_FUNC_map_update_elem: 1329 return &bpf_map_update_elem_proto; 1330 case BPF_FUNC_map_delete_elem: 1331 return &bpf_map_delete_elem_proto; 1332 case BPF_FUNC_map_push_elem: 1333 return &bpf_map_push_elem_proto; 1334 case BPF_FUNC_map_pop_elem: 1335 return &bpf_map_pop_elem_proto; 1336 case BPF_FUNC_map_peek_elem: 1337 return &bpf_map_peek_elem_proto; 1338 case BPF_FUNC_get_prandom_u32: 1339 return &bpf_get_prandom_u32_proto; 1340 case BPF_FUNC_get_smp_processor_id: 1341 return &bpf_get_raw_smp_processor_id_proto; 1342 case BPF_FUNC_get_numa_node_id: 1343 return &bpf_get_numa_node_id_proto; 1344 case BPF_FUNC_tail_call: 1345 return &bpf_tail_call_proto; 1346 case BPF_FUNC_ktime_get_ns: 1347 return &bpf_ktime_get_ns_proto; 1348 case BPF_FUNC_ktime_get_boot_ns: 1349 return &bpf_ktime_get_boot_ns_proto; 1350 case BPF_FUNC_ktime_get_coarse_ns: 1351 return &bpf_ktime_get_coarse_ns_proto; 1352 case BPF_FUNC_ringbuf_output: 1353 return &bpf_ringbuf_output_proto; 1354 case BPF_FUNC_ringbuf_reserve: 1355 return &bpf_ringbuf_reserve_proto; 1356 case BPF_FUNC_ringbuf_submit: 1357 return &bpf_ringbuf_submit_proto; 1358 case BPF_FUNC_ringbuf_discard: 1359 return &bpf_ringbuf_discard_proto; 1360 case BPF_FUNC_ringbuf_query: 1361 return &bpf_ringbuf_query_proto; 1362 case BPF_FUNC_for_each_map_elem: 1363 return &bpf_for_each_map_elem_proto; 1364 default: 1365 break; 1366 } 1367 1368 if (!bpf_capable()) 1369 return NULL; 1370 1371 switch (func_id) { 1372 case BPF_FUNC_spin_lock: 1373 return &bpf_spin_lock_proto; 1374 case BPF_FUNC_spin_unlock: 1375 return &bpf_spin_unlock_proto; 1376 case BPF_FUNC_jiffies64: 1377 return &bpf_jiffies64_proto; 1378 case BPF_FUNC_per_cpu_ptr: 1379 return &bpf_per_cpu_ptr_proto; 1380 case BPF_FUNC_this_cpu_ptr: 1381 return &bpf_this_cpu_ptr_proto; 1382 case BPF_FUNC_timer_init: 1383 return &bpf_timer_init_proto; 1384 case BPF_FUNC_timer_set_callback: 1385 return &bpf_timer_set_callback_proto; 1386 case BPF_FUNC_timer_start: 1387 return &bpf_timer_start_proto; 1388 case BPF_FUNC_timer_cancel: 1389 return &bpf_timer_cancel_proto; 1390 default: 1391 break; 1392 } 1393 1394 if (!perfmon_capable()) 1395 return NULL; 1396 1397 switch (func_id) { 1398 case BPF_FUNC_trace_printk: 1399 return bpf_get_trace_printk_proto(); 1400 case BPF_FUNC_get_current_task: 1401 return &bpf_get_current_task_proto; 1402 case BPF_FUNC_probe_read_user: 1403 return &bpf_probe_read_user_proto; 1404 case BPF_FUNC_probe_read_kernel: 1405 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? 1406 NULL : &bpf_probe_read_kernel_proto; 1407 case BPF_FUNC_probe_read_user_str: 1408 return &bpf_probe_read_user_str_proto; 1409 case BPF_FUNC_probe_read_kernel_str: 1410 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? 1411 NULL : &bpf_probe_read_kernel_str_proto; 1412 case BPF_FUNC_snprintf_btf: 1413 return &bpf_snprintf_btf_proto; 1414 case BPF_FUNC_snprintf: 1415 return &bpf_snprintf_proto; 1416 default: 1417 return NULL; 1418 } 1419 } 1420