Lines Matching +full:im +full:-
1 // SPDX-License-Identifier: GPL-2.0-only
37 struct bpf_trampoline *tr = ops->private;
42 * tr->mutex is already locked.
44 lockdep_assert_held_once(&tr->mutex);
47 * -EAGAIN to register_ftrace_direct(). Then we can
51 if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) &&
52 !(tr->flags & BPF_TRAMP_F_ORIG_STACK)) {
53 if (WARN_ON_ONCE(tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY))
54 return -EBUSY;
56 tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY;
57 return -EAGAIN;
64 * tr->mutex => direct_mutex (ftrace.c) => ftrace_lock (ftrace.c)
72 * mutex_trylock(&tr->mutex) to avoid deadlock in race condition
75 if (!mutex_trylock(&tr->mutex)) {
76 /* sleep 1 ms to make sure whatever holding tr->mutex makes
80 return -EAGAIN;
85 tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY;
87 if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) &&
88 !(tr->flags & BPF_TRAMP_F_ORIG_STACK))
92 tr->flags &= ~BPF_TRAMP_F_SHARE_IPMODIFY;
94 if (tr->flags & BPF_TRAMP_F_ORIG_STACK)
98 ret = -EINVAL;
102 mutex_unlock(&tr->mutex);
109 enum bpf_attach_type eatype = prog->expected_attach_type;
110 enum bpf_prog_type ptype = prog->type;
120 ksym->start = (unsigned long) data;
121 ksym->end = ksym->start + size;
127 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
128 PAGE_SIZE, false, ksym->name);
134 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
135 PAGE_SIZE, true, ksym->name);
147 if (tr->key == key) {
148 refcount_inc(&tr->refcnt);
156 tr->fops = kzalloc(sizeof(struct ftrace_ops), GFP_KERNEL);
157 if (!tr->fops) {
162 tr->fops->private = tr;
163 tr->fops->ops_func = bpf_tramp_ftrace_ops_func;
166 tr->key = key;
167 INIT_HLIST_NODE(&tr->hlist);
168 hlist_add_head(&tr->hlist, head);
169 refcount_set(&tr->refcnt, 1);
170 mutex_init(&tr->mutex);
172 INIT_HLIST_HEAD(&tr->progs_hlist[i]);
180 void *ip = tr->func.addr;
183 if (tr->func.ftrace_managed)
184 ret = unregister_ftrace_direct(tr->fops, (long)old_addr, false);
194 void *ip = tr->func.addr;
197 if (tr->func.ftrace_managed) {
199 ret = modify_ftrace_direct(tr->fops, (long)new_addr);
201 ret = modify_ftrace_direct_nolock(tr->fops, (long)new_addr);
211 void *ip = tr->func.addr;
217 if (!tr->fops)
218 return -ENOTSUPP;
219 tr->func.ftrace_managed = true;
222 if (tr->func.ftrace_managed) {
223 ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1);
224 ret = register_ftrace_direct(tr->fops, (long)new_addr);
243 return ERR_PTR(-ENOMEM);
246 tlinks[kind].nr_links = tr->progs_cnt[kind];
247 *total += tr->progs_cnt[kind];
250 hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
251 *ip_arg |= link->link.prog->call_get_func_ip;
258 static void bpf_tramp_image_free(struct bpf_tramp_image *im)
260 bpf_image_ksym_del(&im->ksym);
261 arch_free_bpf_trampoline(im->image, im->size);
262 bpf_jit_uncharge_modmem(im->size);
263 percpu_ref_exit(&im->pcref);
264 kfree_rcu(im, rcu);
269 struct bpf_tramp_image *im;
271 im = container_of(work, struct bpf_tramp_image, work);
272 bpf_tramp_image_free(im);
278 struct bpf_tramp_image *im;
280 im = container_of(rcu, struct bpf_tramp_image, rcu);
281 INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
282 schedule_work(&im->work);
288 struct bpf_tramp_image *im;
290 im = container_of(pcref, struct bpf_tramp_image, pcref);
291 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
297 struct bpf_tramp_image *im;
299 im = container_of(rcu, struct bpf_tramp_image, rcu);
300 if (im->ip_after_call)
302 percpu_ref_kill(&im->pcref);
305 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
308 static void bpf_tramp_image_put(struct bpf_tramp_image *im)
325 * __bpf_tramp_enter->percpu_ref_get.
336 if (im->ip_after_call) {
337 int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
338 NULL, im->ip_epilogue);
341 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
343 percpu_ref_kill(&im->pcref);
353 call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
358 struct bpf_tramp_image *im;
361 int err = -ENOMEM;
363 im = kzalloc(sizeof(*im), GFP_KERNEL);
364 if (!im)
370 im->size = size;
372 err = -ENOMEM;
373 im->image = image = arch_alloc_bpf_trampoline(size);
377 err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
381 ksym = &im->ksym;
382 INIT_LIST_HEAD_RCU(&ksym->lnode);
383 snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", key);
386 return im;
389 arch_free_bpf_trampoline(im->image, im->size);
393 kfree(im);
400 struct bpf_tramp_image *im;
402 u32 orig_flags = tr->flags;
411 err = unregister_fentry(tr, tr->cur_image->image);
412 bpf_tramp_image_put(tr->cur_image);
413 tr->cur_image = NULL;
418 tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
425 tr->flags |= BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
427 tr->flags |= BPF_TRAMP_F_RESTORE_REGS;
431 tr->flags |= BPF_TRAMP_F_IP_ARG;
435 if ((tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY) &&
436 (tr->flags & BPF_TRAMP_F_CALL_ORIG))
437 tr->flags |= BPF_TRAMP_F_ORIG_STACK;
440 size = arch_bpf_trampoline_size(&tr->func.model, tr->flags,
441 tlinks, tr->func.addr);
448 err = -E2BIG;
452 im = bpf_tramp_image_alloc(tr->key, size);
453 if (IS_ERR(im)) {
454 err = PTR_ERR(im);
458 err = arch_prepare_bpf_trampoline(im, im->image, im->image + size,
459 &tr->func.model, tr->flags, tlinks,
460 tr->func.addr);
464 err = arch_protect_bpf_trampoline(im->image, im->size);
468 WARN_ON(tr->cur_image && total == 0);
469 if (tr->cur_image)
471 err = modify_fentry(tr, tr->cur_image->image, im->image, lock_direct_mutex);
474 err = register_fentry(tr, im->image);
477 if (err == -EAGAIN) {
478 /* -EAGAIN from bpf_tramp_ftrace_ops_func. Now
482 /* reset fops->func and fops->trampoline for re-register */
483 tr->fops->func = NULL;
484 tr->fops->trampoline = 0;
486 /* free im memory and reallocate later */
487 bpf_tramp_image_free(im);
494 if (tr->cur_image)
495 bpf_tramp_image_put(tr->cur_image);
496 tr->cur_image = im;
500 tr->flags = orig_flags;
505 bpf_tramp_image_free(im);
511 switch (prog->expected_attach_type) {
519 if (!prog->aux->attach_func_proto->type)
533 struct bpf_prog_aux *aux = tgt_prog->aux;
535 guard(mutex)(&aux->ext_mutex);
536 if (aux->prog_array_member_cnt)
540 * tgt prog entry -> tgt prog subprog -> freplace prog entry
541 * --tailcall-> tgt prog entry.
543 return -EBUSY;
545 aux->is_extended = true;
558 kind = bpf_attach_type_to_tramp(link->link.prog);
559 if (tr->extension_prog)
563 return -EBUSY;
566 cnt += tr->progs_cnt[i];
571 return -EBUSY;
575 tr->extension_prog = link->link.prog;
576 return bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
577 link->link.prog->bpf_func);
580 return -E2BIG;
581 if (!hlist_unhashed(&link->tramp_hlist))
583 return -EBUSY;
584 hlist_for_each_entry(link_exiting, &tr->progs_hlist[kind], tramp_hlist) {
585 if (link_exiting->link.prog != link->link.prog)
588 return -EBUSY;
591 hlist_add_head(&link->tramp_hlist, &tr->progs_hlist[kind]);
592 tr->progs_cnt[kind]++;
595 hlist_del_init(&link->tramp_hlist);
596 tr->progs_cnt[kind]--;
607 mutex_lock(&tr->mutex);
609 mutex_unlock(&tr->mutex);
620 kind = bpf_attach_type_to_tramp(link->link.prog);
622 WARN_ON_ONCE(!tr->extension_prog);
623 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
624 tr->extension_prog->bpf_func, NULL);
625 tr->extension_prog = NULL;
626 guard(mutex)(&tgt_prog->aux->ext_mutex);
627 tgt_prog->aux->is_extended = false;
630 hlist_del_init(&link->tramp_hlist);
631 tr->progs_cnt[kind]--;
642 mutex_lock(&tr->mutex);
644 mutex_unlock(&tr->mutex);
654 /* paired with 'shim_link->trampoline = tr' in bpf_trampoline_link_cgroup_shim */
655 if (!shim_link->trampoline)
658 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline, NULL));
659 bpf_trampoline_put(shim_link->trampoline);
693 p->jited = false;
694 p->bpf_func = bpf_func;
696 p->aux->cgroup_atype = cgroup_atype;
697 p->aux->attach_func_proto = prog->aux->attach_func_proto;
698 p->aux->attach_btf_id = prog->aux->attach_btf_id;
699 p->aux->attach_btf = prog->aux->attach_btf;
700 btf_get(p->aux->attach_btf);
701 p->type = BPF_PROG_TYPE_LSM;
702 p->expected_attach_type = BPF_LSM_MAC;
704 bpf_link_init(&shim_link->link.link, BPF_LINK_TYPE_UNSPEC,
706 bpf_cgroup_atype_get(p->aux->attach_btf_id, cgroup_atype);
718 hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
719 struct bpf_prog *p = link->link.prog;
721 if (p->bpf_func == bpf_func)
741 prog->aux->attach_btf_id,
746 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf,
747 prog->aux->attach_btf_id);
752 return -ENOMEM;
754 mutex_lock(&tr->mutex);
759 bpf_link_inc(&shim_link->link.link);
761 mutex_unlock(&tr->mutex);
770 err = -ENOMEM;
774 err = __bpf_trampoline_link_prog(&shim_link->link, tr, NULL);
778 shim_link->trampoline = tr;
781 mutex_unlock(&tr->mutex);
785 mutex_unlock(&tr->mutex);
788 bpf_link_put(&shim_link->link.link);
803 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf,
804 prog->aux->attach_btf_id);
811 mutex_lock(&tr->mutex);
813 mutex_unlock(&tr->mutex);
816 bpf_link_put(&shim_link->link.link);
831 mutex_lock(&tr->mutex);
832 if (tr->func.addr)
835 memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
836 tr->func.addr = (void *)tgt_info->tgt_addr;
838 mutex_unlock(&tr->mutex);
849 if (!refcount_dec_and_test(&tr->refcnt))
851 WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
854 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i])))
860 * fexit progs. The fentry-only trampoline will be freed via
863 hlist_del(&tr->hlist);
864 if (tr->fops) {
865 ftrace_free_filter(tr->fops);
866 kfree(tr->fops);
890 * call prog->bpf_func
894 * 0 - skip execution of the bpf prog
895 * 1 - execute bpf prog
896 * [2..MAX_U64] - execute bpf prog and record execution time.
904 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
906 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
908 if (prog->aux->recursion_detected)
909 prog->aux->recursion_detected(prog);
928 duration = sched_clock() - start;
929 stats = this_cpu_ptr(prog->stats);
930 flags = u64_stats_update_begin_irqsave(&stats->syncp);
931 u64_stats_inc(&stats->cnt);
932 u64_stats_add(&stats->nsecs, duration);
933 u64_stats_update_end_irqrestore(&stats->syncp, flags);
947 bpf_reset_run_ctx(run_ctx->saved_run_ctx);
950 this_cpu_dec(*(prog->active));
963 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
972 bpf_reset_run_ctx(run_ctx->saved_run_ctx);
984 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
986 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
988 if (prog->aux->recursion_detected)
989 prog->aux->recursion_detected(prog);
998 bpf_reset_run_ctx(run_ctx->saved_run_ctx);
1001 this_cpu_dec(*(prog->active));
1013 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
1021 bpf_reset_run_ctx(run_ctx->saved_run_ctx);
1034 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
1043 bpf_reset_run_ctx(run_ctx->saved_run_ctx);
1051 percpu_ref_get(&tr->pcref);
1056 percpu_ref_put(&tr->pcref);
1061 bool sleepable = prog->sleepable;
1068 prog->expected_attach_type == BPF_LSM_CGROUP)
1076 bool sleepable = prog->sleepable;
1083 prog->expected_attach_type == BPF_LSM_CGROUP)
1090 arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
1095 return -ENOTSUPP;
1128 return -ENOTSUPP;