Lines Matching +full:enum +full:- +full:cnt +full:- +full:name

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
18 #include <linux/error-injection.h>
56 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
64 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
65 btp = &btm->module->bpf_raw_events[i];
66 if (!strcmp(btp->tp->name, name)) {
67 if (try_module_get(btm->module))
78 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
97 * trace_call_bpf - invoke BPF program
106 * 0 - return from kprobe (event is filtered out)
107 * 1 - store kprobe event into ring buffer
120 * and don't send kprobe event into ring-buffer,
124 bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array));
133 * whether call->prog_array is empty or not, which is
137 * non-NULL, we go into trace_call_bpf() and do the actual
146 ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
209 * strncpy_from_user() does long-sized strides in the fast path. If the
333 * access_ok() should prevent writing to non-user memory, but in
343 current->flags & (PF_KTHREAD | PF_EXITING)))
344 return -EPERM;
346 return -EPERM;
427 return -EINVAL;
469 return -EINVAL;
480 return seq_has_overflowed(m) ? -EOVERFLOW : 0;
499 return seq_write(m, data, len) ? -EOVERFLOW : 0;
523 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
547 return -EINVAL;
550 if (unlikely(index >= array->map.max_entries))
551 return -E2BIG;
553 ee = READ_ONCE(array->ptrs[index]);
555 return -ENOENT;
557 return perf_event_read_local(ee->event, value, enabled, running);
567 * this api is ugly since we miss [-22..-2] range of valid
586 int err = -EINVAL;
590 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
591 &buf->running);
628 if (unlikely(index >= array->map.max_entries))
629 return -E2BIG;
631 ee = READ_ONCE(array->ptrs[index]);
633 return -ENOENT;
635 event = ee->event;
636 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
637 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
638 return -EINVAL;
640 if (unlikely(event->oncpu != cpu))
641 return -EOPNOTSUPP;
675 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
676 err = -EBUSY;
680 sd = &sds->sds[nest_level - 1];
683 err = -EINVAL;
740 ret = -EBUSY;
743 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
744 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
800 enum pid_type type;
813 siginfo = work->has_siginfo ? &work->info : SEND_SIG_PRIV;
815 group_send_sig_info(work->sig, siginfo, work->task, work->type);
816 put_task_struct(work->task);
819 static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struct *task, u64 value)
844 if (unlikely(task->flags & (PF_KTHREAD | PF_EXITING)))
845 return -EPERM;
847 return -EPERM;
850 return -EPERM;
857 return -EINVAL;
860 if (irq_work_is_busy(&work->irq_work))
861 return -EBUSY;
867 work->task = get_task_struct(task);
868 work->has_siginfo = siginfo == &info;
869 if (work->has_siginfo)
870 copy_siginfo(&work->info, &info);
871 work->sig = sig;
872 work->type = type;
873 irq_work_queue(&work->irq_work);
926 len = buf + sz - p;
951 if (prog->type == BPF_PROG_TYPE_TRACING &&
952 prog->expected_attach_type == BPF_TRACE_ITER)
955 if (prog->type == BPF_PROG_TYPE_LSM)
956 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
959 prog->aux->attach_btf_id);
985 return -EINVAL;
988 return -EINVAL;
993 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
995 if (ptr->type_id > 0)
996 *btf_id = ptr->type_id;
998 return -EINVAL;
1003 return -ENOENT;
1019 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1037 return ((u64 *)ctx)[-2];
1050 if (is_endbr((void *)(fentry_ip - ENDBR_INSN_SIZE)))
1051 fentry_ip -= ENDBR_INSN_SIZE;
1062 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1063 if (run_ctx->is_uprobe)
1064 return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
1069 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1072 return get_entry_ip((uintptr_t)kp->addr);
1084 return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1096 return bpf_kprobe_multi_cookie(current->bpf_ctx);
1108 return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
1120 return bpf_uprobe_multi_cookie(current->bpf_ctx);
1134 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1135 return run_ctx->bpf_cookie;
1147 return ctx->event->bpf_cookie;
1161 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1162 return run_ctx->bpf_cookie;
1180 return -EINVAL;
1183 return -ENOENT;
1199 u64 nr_args = ((u64 *)ctx)[-1];
1202 return -EINVAL;
1219 u64 nr_args = ((u64 *)ctx)[-1];
1236 return ((u64 *)ctx)[-1];
1249 * bpf_lookup_user_key - lookup a key by its serial
1251 * @flags: lookup-specific flags
1262 * one of the available key-specific kfuncs.
1295 bkey->key = key_ref_to_ptr(key_ref);
1296 bkey->has_ref = true;
1302 * bpf_lookup_system_key - lookup a key by a system-defined ID
1320 * pre-determined ID on success, a NULL pointer otherwise
1333 bkey->key = (struct key *)(unsigned long)id;
1334 bkey->has_ref = false;
1340 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1348 if (bkey->has_ref)
1349 key_put(bkey->key);
1356 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1376 if (trusted_keyring->has_ref) {
1385 ret = key_validate(trusted_keyring->key);
1396 trusted_keyring->key,
1428 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1453 if (!bpf_token_capable(prog->aux->token, CAP_SYS_ADMIN))
1467 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ||
1468 prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1473 return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1478 return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI ||
1479 prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
1484 return prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
1488 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1496 return prog->sleepable ? &bpf_get_stack_sleepable_proto : &bpf_get_stack_proto;
1519 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1614 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1630 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1656 int err = -EINVAL;
1660 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1661 &buf->running);
1683 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1687 return -EINVAL;
1689 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1690 return -ENOENT;
1693 return -ENOENT;
1696 return br_stack->nr * br_entry_size;
1699 return -EINVAL;
1701 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1702 memcpy(buf, br_stack->entries, to_copy);
1718 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1756 if (nest_level > ARRAY_SIZE(tp_regs->regs)) {
1758 return ERR_PTR(-EBUSY);
1761 return &tp_regs->regs[nest_level - 1];
1853 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1870 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1906 return prog->expected_attach_type == BPF_TRACE_ITER ?
1910 return prog->expected_attach_type == BPF_TRACE_ITER ?
1914 return prog->expected_attach_type == BPF_TRACE_ITER ?
1926 if (prog->type == BPF_PROG_TYPE_TRACING &&
1927 prog->expected_attach_type == BPF_TRACE_RAW_TP)
1932 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
1939 enum bpf_access_type type,
1947 enum bpf_access_type type,
1958 return -ENOTSUPP;
1982 enum bpf_access_type type,
1989 info->reg_type = PTR_TO_TP_BUFFER;
2002 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2040 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2047 switch (si->off) {
2050 data), si->dst_reg, si->src_reg,
2052 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2058 data), si->dst_reg, si->src_reg,
2060 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2066 regs), si->dst_reg, si->src_reg,
2068 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2069 si->off);
2073 return insn - insn_buf;
2095 int ret = -EEXIST;
2099 * and only if they are on the opt-in list.
2101 if (prog->kprobe_override &&
2102 (!trace_kprobe_on_func_entry(event->tp_event) ||
2103 !trace_kprobe_error_injectable(event->tp_event)))
2104 return -EINVAL;
2108 if (event->prog)
2111 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2114 ret = -E2BIG;
2122 /* set the new array to event->tp_event and set event->prog */
2123 event->prog = prog;
2124 event->bpf_cookie = bpf_cookie;
2125 rcu_assign_pointer(event->tp_event->prog_array, new_array);
2142 if (!event->prog)
2145 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2149 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2151 bpf_prog_array_delete_safe(old_array, event->prog);
2153 rcu_assign_pointer(event->tp_event->prog_array, new_array);
2158 prog = event->prog;
2159 event->prog = NULL;
2168 * programs and uses tasks-trace-RCU.
2185 return -EPERM;
2186 if (event->attr.type != PERF_TYPE_TRACEPOINT)
2187 return -EINVAL;
2189 return -EFAULT;
2193 return -E2BIG;
2196 return -ENOMEM;
2199 * is required when user only wants to check for uquery->prog_cnt.
2205 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2209 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2210 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2211 ret = -EFAULT;
2220 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2225 if (!strcmp(btp->tp->name, name))
2229 return bpf_get_raw_tracepoint_module(name);
2244 struct bpf_prog *prog = link->link.prog;
2249 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2254 run_ctx.bpf_cookie = link->cookie;
2263 this_cpu_dec(*(prog->active));
2313 struct tracepoint *tp = btp->tp;
2314 struct bpf_prog *prog = link->link.prog;
2320 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2321 return -EINVAL;
2323 if (prog->aux->max_tp_access > btp->writable_size)
2324 return -EINVAL;
2326 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link);
2331 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link);
2343 prog = event->prog;
2345 return -ENOENT;
2348 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2349 return -EOPNOTSUPP;
2351 *prog_id = prog->aux->id;
2352 flags = event->tp_event->flags;
2354 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2357 *buf = is_tracepoint ? event->tp_event->tp->name
2358 : event->tp_event->name;
2368 err = -EOPNOTSUPP;
2373 event->attr.type == PERF_TYPE_TRACEPOINT);
2379 event->attr.type == PERF_TYPE_TRACEPOINT);
2393 init_irq_work(&work->irq_work, do_bpf_send_signal);
2408 if (mod->num_bpf_raw_events == 0 ||
2418 btm->module = module;
2419 list_add(&btm->list, &bpf_trace_modules);
2421 ret = -ENOMEM;
2426 if (btm->module == module) {
2427 list_del(&btm->list);
2466 u32 cnt;
2497 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2502 int err = -ENOMEM;
2505 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2509 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2513 for (p = buf, i = 0; i < cnt; i++) {
2515 err = -EFAULT;
2520 err = -E2BIG;
2527 us->syms = syms;
2528 us->buf = buf;
2539 static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2543 for (i = 0; i < cnt; i++)
2549 kvfree(us->syms);
2550 kvfree(us->buf);
2558 unregister_fprobe(&kmulti_link->fp);
2559 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2567 kvfree(kmulti_link->addrs);
2568 kvfree(kmulti_link->cookies);
2569 kfree(kmulti_link->mods);
2576 u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies);
2577 u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
2579 u32 ucount = info->kprobe_multi.count;
2583 return -EINVAL;
2585 return -EINVAL;
2588 info->kprobe_multi.count = kmulti_link->cnt;
2589 info->kprobe_multi.flags = kmulti_link->flags;
2590 info->kprobe_multi.missed = kmulti_link->fp.nmissed;
2594 if (ucount < kmulti_link->cnt)
2595 err = -ENOSPC;
2597 ucount = kmulti_link->cnt;
2600 if (kmulti_link->cookies) {
2601 if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64)))
2602 return -EFAULT;
2606 return -EFAULT;
2612 if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
2613 return -EFAULT;
2617 return -EFAULT;
2635 cookie_a = link->cookies + (addr_a - link->addrs);
2636 cookie_b = link->cookies + (addr_b - link->addrs);
2649 return *addr_a < *addr_b ? -1 : 1;
2666 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2668 link = run_ctx->link;
2669 if (!link->cookies)
2671 entry_ip = run_ctx->entry_ip;
2672 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2676 cookie = link->cookies + (addr - link->addrs);
2684 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2686 return run_ctx->entry_ip;
2707 bpf_prog_inc_misses_counter(link->link.prog);
2716 err = bpf_prog_run(link->link.prog, regs);
2737 return is_kprobe_session(link->link.prog) ? err : 0;
2773 if (data->cookies) {
2776 cookie_a = data->cookies + (name_a - data->funcs);
2777 cookie_b = data->cookies + (name_b - data->funcs);
2792 if (arr->mods_cnt == arr->mods_cap) {
2793 arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2794 mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
2796 return -ENOMEM;
2797 arr->mods = mods;
2800 arr->mods[arr->mods_cnt] = mod;
2801 arr->mods_cnt++;
2809 for (i = arr->mods_cnt - 1; i >= 0; i--) {
2810 if (arr->mods[i] == mod)
2833 err = -EINVAL;
2858 static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
2862 for (i = 0; i < cnt; i++) {
2864 return -EINVAL;
2875 u32 flags, cnt, size;
2883 return -EOPNOTSUPP;
2885 if (attr->link_create.flags)
2886 return -EINVAL;
2889 return -EINVAL;
2891 flags = attr->link_create.kprobe_multi.flags;
2893 return -EINVAL;
2895 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2896 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2898 return -EINVAL;
2900 cnt = attr->link_create.kprobe_multi.cnt;
2901 if (!cnt)
2902 return -EINVAL;
2903 if (cnt > MAX_KPROBE_MULTI_CNT)
2904 return -E2BIG;
2906 size = cnt * sizeof(*addrs);
2907 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2909 return -ENOMEM;
2911 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2913 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2915 err = -ENOMEM;
2919 err = -EFAULT;
2926 err = -EFAULT;
2935 err = copy_user_syms(&us, usyms, cnt);
2942 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
2945 err = ftrace_lookup_symbols(us.syms, cnt, addrs);
2951 if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
2952 err = -EINVAL;
2958 err = -ENOMEM;
2962 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
2965 err = bpf_link_prime(&link->link, &link_primer);
2970 link->fp.entry_handler = kprobe_multi_link_handler;
2972 link->fp.exit_handler = kprobe_multi_link_exit_handler;
2974 link->fp.entry_data_size = sizeof(u64);
2976 link->addrs = addrs;
2977 link->cookies = cookies;
2978 link->cnt = cnt;
2979 link->flags = flags;
2988 sort_r(addrs, cnt, sizeof(*addrs),
2994 err = get_modules_for_addrs(&link->mods, addrs, cnt);
2999 link->mods_cnt = err;
3001 err = register_fprobe_ips(&link->fp, addrs, cnt);
3003 kprobe_multi_put_modules(link->mods, link->mods_cnt);
3019 return -EOPNOTSUPP;
3047 u32 cnt;
3059 static void bpf_uprobe_unregister(struct bpf_uprobe *uprobes, u32 cnt)
3063 for (i = 0; i < cnt; i++)
3066 if (cnt)
3075 bpf_uprobe_unregister(umulti_link->uprobes, umulti_link->cnt);
3076 if (umulti_link->task)
3077 put_task_struct(umulti_link->task);
3078 path_put(&umulti_link->path);
3086 kvfree(umulti_link->uprobes);
3093 u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets);
3094 u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies);
3095 u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets);
3096 u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path);
3097 u32 upath_size = info->uprobe_multi.path_size;
3099 u32 ucount = info->uprobe_multi.count;
3105 return -EINVAL;
3108 return -EINVAL;
3111 info->uprobe_multi.count = umulti_link->cnt;
3112 info->uprobe_multi.flags = umulti_link->flags;
3113 info->uprobe_multi.pid = umulti_link->task ?
3114 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
3119 return -ENOMEM;
3120 p = d_path(&umulti_link->path, buf, upath_size);
3125 upath_size = buf + upath_size - p;
3131 return -EFAULT;
3132 info->uprobe_multi.path_size = upath_size;
3137 if (ucount < umulti_link->cnt)
3138 err = -ENOSPC;
3140 ucount = umulti_link->cnt;
3144 put_user(umulti_link->uprobes[i].offset, uoffsets + i))
3145 return -EFAULT;
3147 put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i))
3148 return -EFAULT;
3150 put_user(umulti_link->uprobes[i].cookie, ucookies + i))
3151 return -EFAULT;
3168 struct bpf_uprobe_multi_link *link = uprobe->link;
3177 struct bpf_prog *prog = link->link.prog;
3178 bool sleepable = prog->sleepable;
3182 if (link->task && !same_thread_group(current, link->task))
3193 err = bpf_prog_run(link->link.prog, regs);
3211 return uprobe->link->task->mm == mm;
3223 if (uprobe->session)
3243 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx,
3245 return run_ctx->entry_ip;
3252 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx,
3254 return run_ctx->uprobe->cookie;
3267 u32 flags, cnt, i;
3269 char *name;
3275 return -EOPNOTSUPP;
3277 if (attr->link_create.flags)
3278 return -EINVAL;
3281 return -EINVAL;
3283 flags = attr->link_create.uprobe_multi.flags;
3285 return -EINVAL;
3288 * path, offsets and cnt are mandatory,
3291 upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
3292 uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
3293 cnt = attr->link_create.uprobe_multi.cnt;
3294 pid = attr->link_create.uprobe_multi.pid;
3296 if (!upath || !uoffsets || !cnt || pid < 0)
3297 return -EINVAL;
3298 if (cnt > MAX_UPROBE_MULTI_CNT)
3299 return -E2BIG;
3301 uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
3302 ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
3304 name = strndup_user(upath, PATH_MAX);
3305 if (IS_ERR(name)) {
3306 err = PTR_ERR(name);
3310 err = kern_path(name, LOOKUP_FOLLOW, &path);
3311 kfree(name);
3316 err = -EBADF;
3325 err = -ESRCH;
3330 err = -ENOMEM;
3333 uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
3338 for (i = 0; i < cnt; i++) {
3340 err = -EFAULT;
3344 err = -EINVAL;
3348 err = -EFAULT;
3352 err = -EFAULT;
3368 link->cnt = cnt;
3369 link->uprobes = uprobes;
3370 link->path = path;
3371 link->task = task;
3372 link->flags = flags;
3374 bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
3377 for (i = 0; i < cnt; i++) {
3378 uprobes[i].uprobe = uprobe_register(d_real_inode(link->path.dentry),
3384 link->cnt = i;
3389 err = bpf_link_prime(&link->link, &link_primer);
3396 bpf_uprobe_unregister(uprobes, link->cnt);
3410 return -EOPNOTSUPP;
3428 session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3429 return session_ctx->is_return;
3436 session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3437 return session_ctx->data;
3453 return -EACCES;
3488 int cnt, err;
3497 return -E2BIG;
3499 for (off = 0; off < size; off += chunk_sz - 1) {
3500 chunk_sz = min_t(u32, sizeof(buf), size - off);
3502 * zero terminator. Next iteration increment off by chunk_sz - 1 to
3505 cnt = str_copy_fn(buf, unsafe_src + off, chunk_sz, tsk);
3506 if (cnt < 0)
3507 return cnt;
3508 err = __bpf_dynptr_write(dst, doff + off, buf, cnt, 0);
3511 if (cnt < chunk_sz || chunk_sz == 1) /* we are done */
3512 return off + cnt;
3533 return -E2BIG;
3536 chunk_sz = min_t(u32, sizeof(buf), size - off);
3561 return -EFAULT;
3567 return -EFAULT;
3594 ret = strncpy_from_user(dst, (const void __user *)unsafe_src, size - 1);
3613 __bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type,
3617 return -EINVAL;