xref: /linux/kernel/bpf/trampoline.c (revision 4359a011e259a4608afc7fb3635370c9d4ba5943)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/hash.h>
4 #include <linux/bpf.h>
5 #include <linux/filter.h>
6 #include <linux/ftrace.h>
7 #include <linux/rbtree_latch.h>
8 #include <linux/perf_event.h>
9 #include <linux/btf.h>
10 #include <linux/rcupdate_trace.h>
11 #include <linux/rcupdate_wait.h>
12 #include <linux/module.h>
13 #include <linux/static_call.h>
14 #include <linux/bpf_verifier.h>
15 #include <linux/bpf_lsm.h>
16 #include <linux/delay.h>
17 
18 /* dummy _ops. The verifier will operate on target program's ops. */
19 const struct bpf_verifier_ops bpf_extension_verifier_ops = {
20 };
21 const struct bpf_prog_ops bpf_extension_prog_ops = {
22 };
23 
24 /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
25 #define TRAMPOLINE_HASH_BITS 10
26 #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
27 
28 static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
29 
30 /* serializes access to trampoline_table */
31 static DEFINE_MUTEX(trampoline_mutex);
32 
33 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
34 static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex);
35 
36 static int bpf_tramp_ftrace_ops_func(struct ftrace_ops *ops, enum ftrace_ops_cmd cmd)
37 {
38 	struct bpf_trampoline *tr = ops->private;
39 	int ret = 0;
40 
41 	if (cmd == FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF) {
42 		/* This is called inside register_ftrace_direct_multi(), so
43 		 * tr->mutex is already locked.
44 		 */
45 		lockdep_assert_held_once(&tr->mutex);
46 
47 		/* Instead of updating the trampoline here, we propagate
48 		 * -EAGAIN to register_ftrace_direct_multi(). Then we can
49 		 * retry register_ftrace_direct_multi() after updating the
50 		 * trampoline.
51 		 */
52 		if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) &&
53 		    !(tr->flags & BPF_TRAMP_F_ORIG_STACK)) {
54 			if (WARN_ON_ONCE(tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY))
55 				return -EBUSY;
56 
57 			tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY;
58 			return -EAGAIN;
59 		}
60 
61 		return 0;
62 	}
63 
64 	/* The normal locking order is
65 	 *    tr->mutex => direct_mutex (ftrace.c) => ftrace_lock (ftrace.c)
66 	 *
67 	 * The following two commands are called from
68 	 *
69 	 *   prepare_direct_functions_for_ipmodify
70 	 *   cleanup_direct_functions_after_ipmodify
71 	 *
72 	 * In both cases, direct_mutex is already locked. Use
73 	 * mutex_trylock(&tr->mutex) to avoid deadlock in race condition
74 	 * (something else is making changes to this same trampoline).
75 	 */
76 	if (!mutex_trylock(&tr->mutex)) {
77 		/* sleep 1 ms to make sure whatever holding tr->mutex makes
78 		 * some progress.
79 		 */
80 		msleep(1);
81 		return -EAGAIN;
82 	}
83 
84 	switch (cmd) {
85 	case FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER:
86 		tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY;
87 
88 		if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) &&
89 		    !(tr->flags & BPF_TRAMP_F_ORIG_STACK))
90 			ret = bpf_trampoline_update(tr, false /* lock_direct_mutex */);
91 		break;
92 	case FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER:
93 		tr->flags &= ~BPF_TRAMP_F_SHARE_IPMODIFY;
94 
95 		if (tr->flags & BPF_TRAMP_F_ORIG_STACK)
96 			ret = bpf_trampoline_update(tr, false /* lock_direct_mutex */);
97 		break;
98 	default:
99 		ret = -EINVAL;
100 		break;
101 	}
102 
103 	mutex_unlock(&tr->mutex);
104 	return ret;
105 }
106 #endif
107 
108 bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
109 {
110 	enum bpf_attach_type eatype = prog->expected_attach_type;
111 	enum bpf_prog_type ptype = prog->type;
112 
113 	return (ptype == BPF_PROG_TYPE_TRACING &&
114 		(eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT ||
115 		 eatype == BPF_MODIFY_RETURN)) ||
116 		(ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC);
117 }
118 
119 void *bpf_jit_alloc_exec_page(void)
120 {
121 	void *image;
122 
123 	image = bpf_jit_alloc_exec(PAGE_SIZE);
124 	if (!image)
125 		return NULL;
126 
127 	set_vm_flush_reset_perms(image);
128 	/* Keep image as writeable. The alternative is to keep flipping ro/rw
129 	 * every time new program is attached or detached.
130 	 */
131 	set_memory_x((long)image, 1);
132 	return image;
133 }
134 
135 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
136 {
137 	ksym->start = (unsigned long) data;
138 	ksym->end = ksym->start + PAGE_SIZE;
139 	bpf_ksym_add(ksym);
140 	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
141 			   PAGE_SIZE, false, ksym->name);
142 }
143 
144 void bpf_image_ksym_del(struct bpf_ksym *ksym)
145 {
146 	bpf_ksym_del(ksym);
147 	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
148 			   PAGE_SIZE, true, ksym->name);
149 }
150 
151 static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
152 {
153 	struct bpf_trampoline *tr;
154 	struct hlist_head *head;
155 	int i;
156 
157 	mutex_lock(&trampoline_mutex);
158 	head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
159 	hlist_for_each_entry(tr, head, hlist) {
160 		if (tr->key == key) {
161 			refcount_inc(&tr->refcnt);
162 			goto out;
163 		}
164 	}
165 	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
166 	if (!tr)
167 		goto out;
168 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
169 	tr->fops = kzalloc(sizeof(struct ftrace_ops), GFP_KERNEL);
170 	if (!tr->fops) {
171 		kfree(tr);
172 		tr = NULL;
173 		goto out;
174 	}
175 	tr->fops->private = tr;
176 	tr->fops->ops_func = bpf_tramp_ftrace_ops_func;
177 #endif
178 
179 	tr->key = key;
180 	INIT_HLIST_NODE(&tr->hlist);
181 	hlist_add_head(&tr->hlist, head);
182 	refcount_set(&tr->refcnt, 1);
183 	mutex_init(&tr->mutex);
184 	for (i = 0; i < BPF_TRAMP_MAX; i++)
185 		INIT_HLIST_HEAD(&tr->progs_hlist[i]);
186 out:
187 	mutex_unlock(&trampoline_mutex);
188 	return tr;
189 }
190 
191 static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
192 {
193 	struct module *mod;
194 	int err = 0;
195 
196 	preempt_disable();
197 	mod = __module_text_address((unsigned long) tr->func.addr);
198 	if (mod && !try_module_get(mod))
199 		err = -ENOENT;
200 	preempt_enable();
201 	tr->mod = mod;
202 	return err;
203 }
204 
205 static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
206 {
207 	module_put(tr->mod);
208 	tr->mod = NULL;
209 }
210 
211 static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
212 {
213 	void *ip = tr->func.addr;
214 	int ret;
215 
216 	if (tr->func.ftrace_managed)
217 		ret = unregister_ftrace_direct_multi(tr->fops, (long)old_addr);
218 	else
219 		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
220 
221 	if (!ret)
222 		bpf_trampoline_module_put(tr);
223 	return ret;
224 }
225 
226 static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr,
227 			 bool lock_direct_mutex)
228 {
229 	void *ip = tr->func.addr;
230 	int ret;
231 
232 	if (tr->func.ftrace_managed) {
233 		if (lock_direct_mutex)
234 			ret = modify_ftrace_direct_multi(tr->fops, (long)new_addr);
235 		else
236 			ret = modify_ftrace_direct_multi_nolock(tr->fops, (long)new_addr);
237 	} else {
238 		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
239 	}
240 	return ret;
241 }
242 
243 /* first time registering */
244 static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
245 {
246 	void *ip = tr->func.addr;
247 	unsigned long faddr;
248 	int ret;
249 
250 	faddr = ftrace_location((unsigned long)ip);
251 	if (faddr) {
252 		if (!tr->fops)
253 			return -ENOTSUPP;
254 		tr->func.ftrace_managed = true;
255 	}
256 
257 	if (bpf_trampoline_module_get(tr))
258 		return -ENOENT;
259 
260 	if (tr->func.ftrace_managed) {
261 		ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1);
262 		ret = register_ftrace_direct_multi(tr->fops, (long)new_addr);
263 	} else {
264 		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
265 	}
266 
267 	if (ret)
268 		bpf_trampoline_module_put(tr);
269 	return ret;
270 }
271 
272 static struct bpf_tramp_links *
273 bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg)
274 {
275 	struct bpf_tramp_link *link;
276 	struct bpf_tramp_links *tlinks;
277 	struct bpf_tramp_link **links;
278 	int kind;
279 
280 	*total = 0;
281 	tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
282 	if (!tlinks)
283 		return ERR_PTR(-ENOMEM);
284 
285 	for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
286 		tlinks[kind].nr_links = tr->progs_cnt[kind];
287 		*total += tr->progs_cnt[kind];
288 		links = tlinks[kind].links;
289 
290 		hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
291 			*ip_arg |= link->link.prog->call_get_func_ip;
292 			*links++ = link;
293 		}
294 	}
295 	return tlinks;
296 }
297 
298 static void __bpf_tramp_image_put_deferred(struct work_struct *work)
299 {
300 	struct bpf_tramp_image *im;
301 
302 	im = container_of(work, struct bpf_tramp_image, work);
303 	bpf_image_ksym_del(&im->ksym);
304 	bpf_jit_free_exec(im->image);
305 	bpf_jit_uncharge_modmem(PAGE_SIZE);
306 	percpu_ref_exit(&im->pcref);
307 	kfree_rcu(im, rcu);
308 }
309 
310 /* callback, fexit step 3 or fentry step 2 */
311 static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
312 {
313 	struct bpf_tramp_image *im;
314 
315 	im = container_of(rcu, struct bpf_tramp_image, rcu);
316 	INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
317 	schedule_work(&im->work);
318 }
319 
320 /* callback, fexit step 2. Called after percpu_ref_kill confirms. */
321 static void __bpf_tramp_image_release(struct percpu_ref *pcref)
322 {
323 	struct bpf_tramp_image *im;
324 
325 	im = container_of(pcref, struct bpf_tramp_image, pcref);
326 	call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
327 }
328 
329 /* callback, fexit or fentry step 1 */
330 static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
331 {
332 	struct bpf_tramp_image *im;
333 
334 	im = container_of(rcu, struct bpf_tramp_image, rcu);
335 	if (im->ip_after_call)
336 		/* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
337 		percpu_ref_kill(&im->pcref);
338 	else
339 		/* the case of fentry trampoline */
340 		call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
341 }
342 
343 static void bpf_tramp_image_put(struct bpf_tramp_image *im)
344 {
345 	/* The trampoline image that calls original function is using:
346 	 * rcu_read_lock_trace to protect sleepable bpf progs
347 	 * rcu_read_lock to protect normal bpf progs
348 	 * percpu_ref to protect trampoline itself
349 	 * rcu tasks to protect trampoline asm not covered by percpu_ref
350 	 * (which are few asm insns before __bpf_tramp_enter and
351 	 *  after __bpf_tramp_exit)
352 	 *
353 	 * The trampoline is unreachable before bpf_tramp_image_put().
354 	 *
355 	 * First, patch the trampoline to avoid calling into fexit progs.
356 	 * The progs will be freed even if the original function is still
357 	 * executing or sleeping.
358 	 * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
359 	 * first few asm instructions to execute and call into
360 	 * __bpf_tramp_enter->percpu_ref_get.
361 	 * Then use percpu_ref_kill to wait for the trampoline and the original
362 	 * function to finish.
363 	 * Then use call_rcu_tasks() to make sure few asm insns in
364 	 * the trampoline epilogue are done as well.
365 	 *
366 	 * In !PREEMPT case the task that got interrupted in the first asm
367 	 * insns won't go through an RCU quiescent state which the
368 	 * percpu_ref_kill will be waiting for. Hence the first
369 	 * call_rcu_tasks() is not necessary.
370 	 */
371 	if (im->ip_after_call) {
372 		int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
373 					     NULL, im->ip_epilogue);
374 		WARN_ON(err);
375 		if (IS_ENABLED(CONFIG_PREEMPTION))
376 			call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
377 		else
378 			percpu_ref_kill(&im->pcref);
379 		return;
380 	}
381 
382 	/* The trampoline without fexit and fmod_ret progs doesn't call original
383 	 * function and doesn't use percpu_ref.
384 	 * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
385 	 * Then use call_rcu_tasks() to wait for the rest of trampoline asm
386 	 * and normal progs.
387 	 */
388 	call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
389 }
390 
391 static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
392 {
393 	struct bpf_tramp_image *im;
394 	struct bpf_ksym *ksym;
395 	void *image;
396 	int err = -ENOMEM;
397 
398 	im = kzalloc(sizeof(*im), GFP_KERNEL);
399 	if (!im)
400 		goto out;
401 
402 	err = bpf_jit_charge_modmem(PAGE_SIZE);
403 	if (err)
404 		goto out_free_im;
405 
406 	err = -ENOMEM;
407 	im->image = image = bpf_jit_alloc_exec_page();
408 	if (!image)
409 		goto out_uncharge;
410 
411 	err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
412 	if (err)
413 		goto out_free_image;
414 
415 	ksym = &im->ksym;
416 	INIT_LIST_HEAD_RCU(&ksym->lnode);
417 	snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
418 	bpf_image_ksym_add(image, ksym);
419 	return im;
420 
421 out_free_image:
422 	bpf_jit_free_exec(im->image);
423 out_uncharge:
424 	bpf_jit_uncharge_modmem(PAGE_SIZE);
425 out_free_im:
426 	kfree(im);
427 out:
428 	return ERR_PTR(err);
429 }
430 
431 static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex)
432 {
433 	struct bpf_tramp_image *im;
434 	struct bpf_tramp_links *tlinks;
435 	u32 orig_flags = tr->flags;
436 	bool ip_arg = false;
437 	int err, total;
438 
439 	tlinks = bpf_trampoline_get_progs(tr, &total, &ip_arg);
440 	if (IS_ERR(tlinks))
441 		return PTR_ERR(tlinks);
442 
443 	if (total == 0) {
444 		err = unregister_fentry(tr, tr->cur_image->image);
445 		bpf_tramp_image_put(tr->cur_image);
446 		tr->cur_image = NULL;
447 		tr->selector = 0;
448 		goto out;
449 	}
450 
451 	im = bpf_tramp_image_alloc(tr->key, tr->selector);
452 	if (IS_ERR(im)) {
453 		err = PTR_ERR(im);
454 		goto out;
455 	}
456 
457 	/* clear all bits except SHARE_IPMODIFY */
458 	tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
459 
460 	if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
461 	    tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
462 		/* NOTE: BPF_TRAMP_F_RESTORE_REGS and BPF_TRAMP_F_SKIP_FRAME
463 		 * should not be set together.
464 		 */
465 		tr->flags |= BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
466 	} else {
467 		tr->flags |= BPF_TRAMP_F_RESTORE_REGS;
468 	}
469 
470 	if (ip_arg)
471 		tr->flags |= BPF_TRAMP_F_IP_ARG;
472 
473 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
474 again:
475 	if ((tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY) &&
476 	    (tr->flags & BPF_TRAMP_F_CALL_ORIG))
477 		tr->flags |= BPF_TRAMP_F_ORIG_STACK;
478 #endif
479 
480 	err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
481 					  &tr->func.model, tr->flags, tlinks,
482 					  tr->func.addr);
483 	if (err < 0)
484 		goto out;
485 
486 	WARN_ON(tr->cur_image && tr->selector == 0);
487 	WARN_ON(!tr->cur_image && tr->selector);
488 	if (tr->cur_image)
489 		/* progs already running at this address */
490 		err = modify_fentry(tr, tr->cur_image->image, im->image, lock_direct_mutex);
491 	else
492 		/* first time registering */
493 		err = register_fentry(tr, im->image);
494 
495 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
496 	if (err == -EAGAIN) {
497 		/* -EAGAIN from bpf_tramp_ftrace_ops_func. Now
498 		 * BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the
499 		 * trampoline again, and retry register.
500 		 */
501 		/* reset fops->func and fops->trampoline for re-register */
502 		tr->fops->func = NULL;
503 		tr->fops->trampoline = 0;
504 		goto again;
505 	}
506 #endif
507 	if (err)
508 		goto out;
509 
510 	if (tr->cur_image)
511 		bpf_tramp_image_put(tr->cur_image);
512 	tr->cur_image = im;
513 	tr->selector++;
514 out:
515 	/* If any error happens, restore previous flags */
516 	if (err)
517 		tr->flags = orig_flags;
518 	kfree(tlinks);
519 	return err;
520 }
521 
522 static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
523 {
524 	switch (prog->expected_attach_type) {
525 	case BPF_TRACE_FENTRY:
526 		return BPF_TRAMP_FENTRY;
527 	case BPF_MODIFY_RETURN:
528 		return BPF_TRAMP_MODIFY_RETURN;
529 	case BPF_TRACE_FEXIT:
530 		return BPF_TRAMP_FEXIT;
531 	case BPF_LSM_MAC:
532 		if (!prog->aux->attach_func_proto->type)
533 			/* The function returns void, we cannot modify its
534 			 * return value.
535 			 */
536 			return BPF_TRAMP_FEXIT;
537 		else
538 			return BPF_TRAMP_MODIFY_RETURN;
539 	default:
540 		return BPF_TRAMP_REPLACE;
541 	}
542 }
543 
544 static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
545 {
546 	enum bpf_tramp_prog_type kind;
547 	struct bpf_tramp_link *link_exiting;
548 	int err = 0;
549 	int cnt = 0, i;
550 
551 	kind = bpf_attach_type_to_tramp(link->link.prog);
552 	if (tr->extension_prog)
553 		/* cannot attach fentry/fexit if extension prog is attached.
554 		 * cannot overwrite extension prog either.
555 		 */
556 		return -EBUSY;
557 
558 	for (i = 0; i < BPF_TRAMP_MAX; i++)
559 		cnt += tr->progs_cnt[i];
560 
561 	if (kind == BPF_TRAMP_REPLACE) {
562 		/* Cannot attach extension if fentry/fexit are in use. */
563 		if (cnt)
564 			return -EBUSY;
565 		tr->extension_prog = link->link.prog;
566 		return bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
567 					  link->link.prog->bpf_func);
568 	}
569 	if (cnt >= BPF_MAX_TRAMP_LINKS)
570 		return -E2BIG;
571 	if (!hlist_unhashed(&link->tramp_hlist))
572 		/* prog already linked */
573 		return -EBUSY;
574 	hlist_for_each_entry(link_exiting, &tr->progs_hlist[kind], tramp_hlist) {
575 		if (link_exiting->link.prog != link->link.prog)
576 			continue;
577 		/* prog already linked */
578 		return -EBUSY;
579 	}
580 
581 	hlist_add_head(&link->tramp_hlist, &tr->progs_hlist[kind]);
582 	tr->progs_cnt[kind]++;
583 	err = bpf_trampoline_update(tr, true /* lock_direct_mutex */);
584 	if (err) {
585 		hlist_del_init(&link->tramp_hlist);
586 		tr->progs_cnt[kind]--;
587 	}
588 	return err;
589 }
590 
591 int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
592 {
593 	int err;
594 
595 	mutex_lock(&tr->mutex);
596 	err = __bpf_trampoline_link_prog(link, tr);
597 	mutex_unlock(&tr->mutex);
598 	return err;
599 }
600 
601 static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
602 {
603 	enum bpf_tramp_prog_type kind;
604 	int err;
605 
606 	kind = bpf_attach_type_to_tramp(link->link.prog);
607 	if (kind == BPF_TRAMP_REPLACE) {
608 		WARN_ON_ONCE(!tr->extension_prog);
609 		err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
610 					 tr->extension_prog->bpf_func, NULL);
611 		tr->extension_prog = NULL;
612 		return err;
613 	}
614 	hlist_del_init(&link->tramp_hlist);
615 	tr->progs_cnt[kind]--;
616 	return bpf_trampoline_update(tr, true /* lock_direct_mutex */);
617 }
618 
619 /* bpf_trampoline_unlink_prog() should never fail. */
620 int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
621 {
622 	int err;
623 
624 	mutex_lock(&tr->mutex);
625 	err = __bpf_trampoline_unlink_prog(link, tr);
626 	mutex_unlock(&tr->mutex);
627 	return err;
628 }
629 
630 #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
631 static void bpf_shim_tramp_link_release(struct bpf_link *link)
632 {
633 	struct bpf_shim_tramp_link *shim_link =
634 		container_of(link, struct bpf_shim_tramp_link, link.link);
635 
636 	/* paired with 'shim_link->trampoline = tr' in bpf_trampoline_link_cgroup_shim */
637 	if (!shim_link->trampoline)
638 		return;
639 
640 	WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline));
641 	bpf_trampoline_put(shim_link->trampoline);
642 }
643 
644 static void bpf_shim_tramp_link_dealloc(struct bpf_link *link)
645 {
646 	struct bpf_shim_tramp_link *shim_link =
647 		container_of(link, struct bpf_shim_tramp_link, link.link);
648 
649 	kfree(shim_link);
650 }
651 
652 static const struct bpf_link_ops bpf_shim_tramp_link_lops = {
653 	.release = bpf_shim_tramp_link_release,
654 	.dealloc = bpf_shim_tramp_link_dealloc,
655 };
656 
657 static struct bpf_shim_tramp_link *cgroup_shim_alloc(const struct bpf_prog *prog,
658 						     bpf_func_t bpf_func,
659 						     int cgroup_atype)
660 {
661 	struct bpf_shim_tramp_link *shim_link = NULL;
662 	struct bpf_prog *p;
663 
664 	shim_link = kzalloc(sizeof(*shim_link), GFP_USER);
665 	if (!shim_link)
666 		return NULL;
667 
668 	p = bpf_prog_alloc(1, 0);
669 	if (!p) {
670 		kfree(shim_link);
671 		return NULL;
672 	}
673 
674 	p->jited = false;
675 	p->bpf_func = bpf_func;
676 
677 	p->aux->cgroup_atype = cgroup_atype;
678 	p->aux->attach_func_proto = prog->aux->attach_func_proto;
679 	p->aux->attach_btf_id = prog->aux->attach_btf_id;
680 	p->aux->attach_btf = prog->aux->attach_btf;
681 	btf_get(p->aux->attach_btf);
682 	p->type = BPF_PROG_TYPE_LSM;
683 	p->expected_attach_type = BPF_LSM_MAC;
684 	bpf_prog_inc(p);
685 	bpf_link_init(&shim_link->link.link, BPF_LINK_TYPE_UNSPEC,
686 		      &bpf_shim_tramp_link_lops, p);
687 	bpf_cgroup_atype_get(p->aux->attach_btf_id, cgroup_atype);
688 
689 	return shim_link;
690 }
691 
692 static struct bpf_shim_tramp_link *cgroup_shim_find(struct bpf_trampoline *tr,
693 						    bpf_func_t bpf_func)
694 {
695 	struct bpf_tramp_link *link;
696 	int kind;
697 
698 	for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
699 		hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
700 			struct bpf_prog *p = link->link.prog;
701 
702 			if (p->bpf_func == bpf_func)
703 				return container_of(link, struct bpf_shim_tramp_link, link);
704 		}
705 	}
706 
707 	return NULL;
708 }
709 
710 int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
711 				    int cgroup_atype)
712 {
713 	struct bpf_shim_tramp_link *shim_link = NULL;
714 	struct bpf_attach_target_info tgt_info = {};
715 	struct bpf_trampoline *tr;
716 	bpf_func_t bpf_func;
717 	u64 key;
718 	int err;
719 
720 	err = bpf_check_attach_target(NULL, prog, NULL,
721 				      prog->aux->attach_btf_id,
722 				      &tgt_info);
723 	if (err)
724 		return err;
725 
726 	key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf,
727 					 prog->aux->attach_btf_id);
728 
729 	bpf_lsm_find_cgroup_shim(prog, &bpf_func);
730 	tr = bpf_trampoline_get(key, &tgt_info);
731 	if (!tr)
732 		return  -ENOMEM;
733 
734 	mutex_lock(&tr->mutex);
735 
736 	shim_link = cgroup_shim_find(tr, bpf_func);
737 	if (shim_link) {
738 		/* Reusing existing shim attached by the other program. */
739 		bpf_link_inc(&shim_link->link.link);
740 
741 		mutex_unlock(&tr->mutex);
742 		bpf_trampoline_put(tr); /* bpf_trampoline_get above */
743 		return 0;
744 	}
745 
746 	/* Allocate and install new shim. */
747 
748 	shim_link = cgroup_shim_alloc(prog, bpf_func, cgroup_atype);
749 	if (!shim_link) {
750 		err = -ENOMEM;
751 		goto err;
752 	}
753 
754 	err = __bpf_trampoline_link_prog(&shim_link->link, tr);
755 	if (err)
756 		goto err;
757 
758 	shim_link->trampoline = tr;
759 	/* note, we're still holding tr refcnt from above */
760 
761 	mutex_unlock(&tr->mutex);
762 
763 	return 0;
764 err:
765 	mutex_unlock(&tr->mutex);
766 
767 	if (shim_link)
768 		bpf_link_put(&shim_link->link.link);
769 
770 	/* have to release tr while _not_ holding its mutex */
771 	bpf_trampoline_put(tr); /* bpf_trampoline_get above */
772 
773 	return err;
774 }
775 
776 void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
777 {
778 	struct bpf_shim_tramp_link *shim_link = NULL;
779 	struct bpf_trampoline *tr;
780 	bpf_func_t bpf_func;
781 	u64 key;
782 
783 	key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf,
784 					 prog->aux->attach_btf_id);
785 
786 	bpf_lsm_find_cgroup_shim(prog, &bpf_func);
787 	tr = bpf_trampoline_lookup(key);
788 	if (WARN_ON_ONCE(!tr))
789 		return;
790 
791 	mutex_lock(&tr->mutex);
792 	shim_link = cgroup_shim_find(tr, bpf_func);
793 	mutex_unlock(&tr->mutex);
794 
795 	if (shim_link)
796 		bpf_link_put(&shim_link->link.link);
797 
798 	bpf_trampoline_put(tr); /* bpf_trampoline_lookup above */
799 }
800 #endif
801 
802 struct bpf_trampoline *bpf_trampoline_get(u64 key,
803 					  struct bpf_attach_target_info *tgt_info)
804 {
805 	struct bpf_trampoline *tr;
806 
807 	tr = bpf_trampoline_lookup(key);
808 	if (!tr)
809 		return NULL;
810 
811 	mutex_lock(&tr->mutex);
812 	if (tr->func.addr)
813 		goto out;
814 
815 	memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
816 	tr->func.addr = (void *)tgt_info->tgt_addr;
817 out:
818 	mutex_unlock(&tr->mutex);
819 	return tr;
820 }
821 
822 void bpf_trampoline_put(struct bpf_trampoline *tr)
823 {
824 	int i;
825 
826 	if (!tr)
827 		return;
828 	mutex_lock(&trampoline_mutex);
829 	if (!refcount_dec_and_test(&tr->refcnt))
830 		goto out;
831 	WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
832 
833 	for (i = 0; i < BPF_TRAMP_MAX; i++)
834 		if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i])))
835 			goto out;
836 
837 	/* This code will be executed even when the last bpf_tramp_image
838 	 * is alive. All progs are detached from the trampoline and the
839 	 * trampoline image is patched with jmp into epilogue to skip
840 	 * fexit progs. The fentry-only trampoline will be freed via
841 	 * multiple rcu callbacks.
842 	 */
843 	hlist_del(&tr->hlist);
844 	if (tr->fops) {
845 		ftrace_free_filter(tr->fops);
846 		kfree(tr->fops);
847 	}
848 	kfree(tr);
849 out:
850 	mutex_unlock(&trampoline_mutex);
851 }
852 
853 #define NO_START_TIME 1
854 static __always_inline u64 notrace bpf_prog_start_time(void)
855 {
856 	u64 start = NO_START_TIME;
857 
858 	if (static_branch_unlikely(&bpf_stats_enabled_key)) {
859 		start = sched_clock();
860 		if (unlikely(!start))
861 			start = NO_START_TIME;
862 	}
863 	return start;
864 }
865 
866 static void notrace inc_misses_counter(struct bpf_prog *prog)
867 {
868 	struct bpf_prog_stats *stats;
869 	unsigned int flags;
870 
871 	stats = this_cpu_ptr(prog->stats);
872 	flags = u64_stats_update_begin_irqsave(&stats->syncp);
873 	u64_stats_inc(&stats->misses);
874 	u64_stats_update_end_irqrestore(&stats->syncp, flags);
875 }
876 
877 /* The logic is similar to bpf_prog_run(), but with an explicit
878  * rcu_read_lock() and migrate_disable() which are required
879  * for the trampoline. The macro is split into
880  * call __bpf_prog_enter
881  * call prog->bpf_func
882  * call __bpf_prog_exit
883  *
884  * __bpf_prog_enter returns:
885  * 0 - skip execution of the bpf prog
886  * 1 - execute bpf prog
887  * [2..MAX_U64] - execute bpf prog and record execution time.
888  *     This is start time.
889  */
890 u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
891 	__acquires(RCU)
892 {
893 	rcu_read_lock();
894 	migrate_disable();
895 
896 	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
897 
898 	if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
899 		inc_misses_counter(prog);
900 		return 0;
901 	}
902 	return bpf_prog_start_time();
903 }
904 
905 static void notrace update_prog_stats(struct bpf_prog *prog,
906 				      u64 start)
907 {
908 	struct bpf_prog_stats *stats;
909 
910 	if (static_branch_unlikely(&bpf_stats_enabled_key) &&
911 	    /* static_key could be enabled in __bpf_prog_enter*
912 	     * and disabled in __bpf_prog_exit*.
913 	     * And vice versa.
914 	     * Hence check that 'start' is valid.
915 	     */
916 	    start > NO_START_TIME) {
917 		unsigned long flags;
918 
919 		stats = this_cpu_ptr(prog->stats);
920 		flags = u64_stats_update_begin_irqsave(&stats->syncp);
921 		u64_stats_inc(&stats->cnt);
922 		u64_stats_add(&stats->nsecs, sched_clock() - start);
923 		u64_stats_update_end_irqrestore(&stats->syncp, flags);
924 	}
925 }
926 
927 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx)
928 	__releases(RCU)
929 {
930 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
931 
932 	update_prog_stats(prog, start);
933 	__this_cpu_dec(*(prog->active));
934 	migrate_enable();
935 	rcu_read_unlock();
936 }
937 
938 u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
939 					struct bpf_tramp_run_ctx *run_ctx)
940 	__acquires(RCU)
941 {
942 	/* Runtime stats are exported via actual BPF_LSM_CGROUP
943 	 * programs, not the shims.
944 	 */
945 	rcu_read_lock();
946 	migrate_disable();
947 
948 	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
949 
950 	return NO_START_TIME;
951 }
952 
953 void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
954 					struct bpf_tramp_run_ctx *run_ctx)
955 	__releases(RCU)
956 {
957 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
958 
959 	migrate_enable();
960 	rcu_read_unlock();
961 }
962 
963 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
964 {
965 	rcu_read_lock_trace();
966 	migrate_disable();
967 	might_fault();
968 
969 	if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
970 		inc_misses_counter(prog);
971 		return 0;
972 	}
973 
974 	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
975 
976 	return bpf_prog_start_time();
977 }
978 
979 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
980 				       struct bpf_tramp_run_ctx *run_ctx)
981 {
982 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
983 
984 	update_prog_stats(prog, start);
985 	__this_cpu_dec(*(prog->active));
986 	migrate_enable();
987 	rcu_read_unlock_trace();
988 }
989 
990 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
991 {
992 	percpu_ref_get(&tr->pcref);
993 }
994 
995 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
996 {
997 	percpu_ref_put(&tr->pcref);
998 }
999 
1000 int __weak
1001 arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
1002 			    const struct btf_func_model *m, u32 flags,
1003 			    struct bpf_tramp_links *tlinks,
1004 			    void *orig_call)
1005 {
1006 	return -ENOTSUPP;
1007 }
1008 
1009 static int __init init_trampolines(void)
1010 {
1011 	int i;
1012 
1013 	for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
1014 		INIT_HLIST_HEAD(&trampoline_table[i]);
1015 	return 0;
1016 }
1017 late_initcall(init_trampolines);
1018