xref: /linux/kernel/tracepoint.c (revision 9913d5745bd720c4266805c8d29952a3702e4eca)
11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
297e1c18eSMathieu Desnoyers /*
3de7b2973SMathieu Desnoyers  * Copyright (C) 2008-2014 Mathieu Desnoyers
497e1c18eSMathieu Desnoyers  */
597e1c18eSMathieu Desnoyers #include <linux/module.h>
697e1c18eSMathieu Desnoyers #include <linux/mutex.h>
797e1c18eSMathieu Desnoyers #include <linux/types.h>
897e1c18eSMathieu Desnoyers #include <linux/jhash.h>
997e1c18eSMathieu Desnoyers #include <linux/list.h>
1097e1c18eSMathieu Desnoyers #include <linux/rcupdate.h>
1197e1c18eSMathieu Desnoyers #include <linux/tracepoint.h>
1297e1c18eSMathieu Desnoyers #include <linux/err.h>
1397e1c18eSMathieu Desnoyers #include <linux/slab.h>
143f07c014SIngo Molnar #include <linux/sched/signal.h>
1529930025SIngo Molnar #include <linux/sched/task.h>
16c5905afbSIngo Molnar #include <linux/static_key.h>
1797e1c18eSMathieu Desnoyers 
189c0be3f6SMathieu Desnoyers extern tracepoint_ptr_t __start___tracepoints_ptrs[];
199c0be3f6SMathieu Desnoyers extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
2097e1c18eSMathieu Desnoyers 
21e6753f23SJoel Fernandes (Google) DEFINE_SRCU(tracepoint_srcu);
22e6753f23SJoel Fernandes (Google) EXPORT_SYMBOL_GPL(tracepoint_srcu);
23e6753f23SJoel Fernandes (Google) 
2497e1c18eSMathieu Desnoyers /* Set to 1 to enable tracepoint debug output */
2597e1c18eSMathieu Desnoyers static const int tracepoint_debug;
2697e1c18eSMathieu Desnoyers 
27b75ef8b4SMathieu Desnoyers #ifdef CONFIG_MODULES
28de7b2973SMathieu Desnoyers /*
29de7b2973SMathieu Desnoyers  * Tracepoint module list mutex protects the local module list.
30de7b2973SMathieu Desnoyers  */
31de7b2973SMathieu Desnoyers static DEFINE_MUTEX(tracepoint_module_list_mutex);
32de7b2973SMathieu Desnoyers 
33de7b2973SMathieu Desnoyers /* Local list of struct tp_module */
34b75ef8b4SMathieu Desnoyers static LIST_HEAD(tracepoint_module_list);
35b75ef8b4SMathieu Desnoyers #endif /* CONFIG_MODULES */
36b75ef8b4SMathieu Desnoyers 
3797e1c18eSMathieu Desnoyers /*
38de7b2973SMathieu Desnoyers  * tracepoints_mutex protects the builtin and module tracepoints.
39de7b2973SMathieu Desnoyers  * tracepoints_mutex nests inside tracepoint_module_list_mutex.
4097e1c18eSMathieu Desnoyers  */
41de7b2973SMathieu Desnoyers static DEFINE_MUTEX(tracepoints_mutex);
4297e1c18eSMathieu Desnoyers 
43f8a79d5cSSteven Rostedt (VMware) static struct rcu_head *early_probes;
44f8a79d5cSSteven Rostedt (VMware) static bool ok_to_free_tracepoints;
45f8a79d5cSSteven Rostedt (VMware) 
4697e1c18eSMathieu Desnoyers /*
4797e1c18eSMathieu Desnoyers  * Note about RCU :
48fd589a8fSAnand Gadiyar  * It is used to delay the free of multiple probes array until a quiescent
4997e1c18eSMathieu Desnoyers  * state is reached.
5097e1c18eSMathieu Desnoyers  */
5119dba33cSLai Jiangshan struct tp_probes {
5219dba33cSLai Jiangshan 	struct rcu_head rcu;
539d0a49c7SGustavo A. R. Silva 	struct tracepoint_func probes[];
5419dba33cSLai Jiangshan };
5597e1c18eSMathieu Desnoyers 
56befe6d94SSteven Rostedt (VMware) /* Called in removal of a func but failed to allocate a new tp_funcs */
57befe6d94SSteven Rostedt (VMware) static void tp_stub_func(void)
58befe6d94SSteven Rostedt (VMware) {
59befe6d94SSteven Rostedt (VMware) 	return;
60befe6d94SSteven Rostedt (VMware) }
61befe6d94SSteven Rostedt (VMware) 
6219dba33cSLai Jiangshan static inline void *allocate_probes(int count)
6397e1c18eSMathieu Desnoyers {
64f0553dcbSGustavo A. R. Silva 	struct tp_probes *p  = kmalloc(struct_size(p, probes, count),
65f0553dcbSGustavo A. R. Silva 				       GFP_KERNEL);
6619dba33cSLai Jiangshan 	return p == NULL ? NULL : p->probes;
6797e1c18eSMathieu Desnoyers }
6897e1c18eSMathieu Desnoyers 
69e6753f23SJoel Fernandes (Google) static void srcu_free_old_probes(struct rcu_head *head)
7097e1c18eSMathieu Desnoyers {
710dea6d52SMathieu Desnoyers 	kfree(container_of(head, struct tp_probes, rcu));
7219dba33cSLai Jiangshan }
7319dba33cSLai Jiangshan 
74e6753f23SJoel Fernandes (Google) static void rcu_free_old_probes(struct rcu_head *head)
75e6753f23SJoel Fernandes (Google) {
76e6753f23SJoel Fernandes (Google) 	call_srcu(&tracepoint_srcu, head, srcu_free_old_probes);
77e6753f23SJoel Fernandes (Google) }
78e6753f23SJoel Fernandes (Google) 
79f8a79d5cSSteven Rostedt (VMware) static __init int release_early_probes(void)
80f8a79d5cSSteven Rostedt (VMware) {
81f8a79d5cSSteven Rostedt (VMware) 	struct rcu_head *tmp;
82f8a79d5cSSteven Rostedt (VMware) 
83f8a79d5cSSteven Rostedt (VMware) 	ok_to_free_tracepoints = true;
84f8a79d5cSSteven Rostedt (VMware) 
85f8a79d5cSSteven Rostedt (VMware) 	while (early_probes) {
86f8a79d5cSSteven Rostedt (VMware) 		tmp = early_probes;
87f8a79d5cSSteven Rostedt (VMware) 		early_probes = tmp->next;
8874401729SPaul E. McKenney 		call_rcu(tmp, rcu_free_old_probes);
89f8a79d5cSSteven Rostedt (VMware) 	}
90f8a79d5cSSteven Rostedt (VMware) 
91f8a79d5cSSteven Rostedt (VMware) 	return 0;
92f8a79d5cSSteven Rostedt (VMware) }
93f8a79d5cSSteven Rostedt (VMware) 
94f8a79d5cSSteven Rostedt (VMware) /* SRCU is initialized at core_initcall */
95f8a79d5cSSteven Rostedt (VMware) postcore_initcall(release_early_probes);
96f8a79d5cSSteven Rostedt (VMware) 
9738516ab5SSteven Rostedt static inline void release_probes(struct tracepoint_func *old)
9819dba33cSLai Jiangshan {
9919dba33cSLai Jiangshan 	if (old) {
10019dba33cSLai Jiangshan 		struct tp_probes *tp_probes = container_of(old,
10119dba33cSLai Jiangshan 			struct tp_probes, probes[0]);
102f8a79d5cSSteven Rostedt (VMware) 
103f8a79d5cSSteven Rostedt (VMware) 		/*
104f8a79d5cSSteven Rostedt (VMware) 		 * We can't free probes if SRCU is not initialized yet.
105f8a79d5cSSteven Rostedt (VMware) 		 * Postpone the freeing till after SRCU is initialized.
106f8a79d5cSSteven Rostedt (VMware) 		 */
107f8a79d5cSSteven Rostedt (VMware) 		if (unlikely(!ok_to_free_tracepoints)) {
108f8a79d5cSSteven Rostedt (VMware) 			tp_probes->rcu.next = early_probes;
109f8a79d5cSSteven Rostedt (VMware) 			early_probes = &tp_probes->rcu;
110f8a79d5cSSteven Rostedt (VMware) 			return;
111f8a79d5cSSteven Rostedt (VMware) 		}
112f8a79d5cSSteven Rostedt (VMware) 
113e6753f23SJoel Fernandes (Google) 		/*
114e6753f23SJoel Fernandes (Google) 		 * Tracepoint probes are protected by both sched RCU and SRCU,
115e6753f23SJoel Fernandes (Google) 		 * by calling the SRCU callback in the sched RCU callback we
116e6753f23SJoel Fernandes (Google) 		 * cover both cases. So let us chain the SRCU and sched RCU
117e6753f23SJoel Fernandes (Google) 		 * callbacks to wait for both grace periods.
118e6753f23SJoel Fernandes (Google) 		 */
11974401729SPaul E. McKenney 		call_rcu(&tp_probes->rcu, rcu_free_old_probes);
12019dba33cSLai Jiangshan 	}
12197e1c18eSMathieu Desnoyers }
12297e1c18eSMathieu Desnoyers 
123de7b2973SMathieu Desnoyers static void debug_print_probes(struct tracepoint_func *funcs)
12497e1c18eSMathieu Desnoyers {
12597e1c18eSMathieu Desnoyers 	int i;
12697e1c18eSMathieu Desnoyers 
127de7b2973SMathieu Desnoyers 	if (!tracepoint_debug || !funcs)
12897e1c18eSMathieu Desnoyers 		return;
12997e1c18eSMathieu Desnoyers 
130de7b2973SMathieu Desnoyers 	for (i = 0; funcs[i].func; i++)
131de7b2973SMathieu Desnoyers 		printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
13297e1c18eSMathieu Desnoyers }
13397e1c18eSMathieu Desnoyers 
1347904b5c4SSteven Rostedt (Red Hat) static struct tracepoint_func *
1357904b5c4SSteven Rostedt (Red Hat) func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
1367904b5c4SSteven Rostedt (Red Hat) 	 int prio)
13797e1c18eSMathieu Desnoyers {
13838516ab5SSteven Rostedt 	struct tracepoint_func *old, *new;
1397211f0a2SSteven Rostedt (VMware) 	int iter_probes;	/* Iterate over old probe array. */
1407211f0a2SSteven Rostedt (VMware) 	int nr_probes = 0;	/* Counter for probes */
1417211f0a2SSteven Rostedt (VMware) 	int pos = -1;		/* Insertion position into new array */
14297e1c18eSMathieu Desnoyers 
143de7b2973SMathieu Desnoyers 	if (WARN_ON(!tp_func->func))
1444c69e6eaSSahara 		return ERR_PTR(-EINVAL);
14597e1c18eSMathieu Desnoyers 
146de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
147de7b2973SMathieu Desnoyers 	old = *funcs;
14897e1c18eSMathieu Desnoyers 	if (old) {
14997e1c18eSMathieu Desnoyers 		/* (N -> N+1), (N != 0, 1) probes */
1507211f0a2SSteven Rostedt (VMware) 		for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
1517211f0a2SSteven Rostedt (VMware) 			if (old[iter_probes].func == tp_stub_func)
1527211f0a2SSteven Rostedt (VMware) 				continue;	/* Skip stub functions. */
1537211f0a2SSteven Rostedt (VMware) 			if (old[iter_probes].func == tp_func->func &&
1547211f0a2SSteven Rostedt (VMware) 			    old[iter_probes].data == tp_func->data)
15597e1c18eSMathieu Desnoyers 				return ERR_PTR(-EEXIST);
1567211f0a2SSteven Rostedt (VMware) 			nr_probes++;
15797e1c18eSMathieu Desnoyers 		}
1587904b5c4SSteven Rostedt (Red Hat) 	}
1597211f0a2SSteven Rostedt (VMware) 	/* + 2 : one for new probe, one for NULL func */
1607211f0a2SSteven Rostedt (VMware) 	new = allocate_probes(nr_probes + 2);
16197e1c18eSMathieu Desnoyers 	if (new == NULL)
16297e1c18eSMathieu Desnoyers 		return ERR_PTR(-ENOMEM);
1637904b5c4SSteven Rostedt (Red Hat) 	if (old) {
1647211f0a2SSteven Rostedt (VMware) 		nr_probes = 0;
1657211f0a2SSteven Rostedt (VMware) 		for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
1667211f0a2SSteven Rostedt (VMware) 			if (old[iter_probes].func == tp_stub_func)
167befe6d94SSteven Rostedt (VMware) 				continue;
1687211f0a2SSteven Rostedt (VMware) 			/* Insert before probes of lower priority */
1697211f0a2SSteven Rostedt (VMware) 			if (pos < 0 && old[iter_probes].prio < prio)
1707211f0a2SSteven Rostedt (VMware) 				pos = nr_probes++;
1717211f0a2SSteven Rostedt (VMware) 			new[nr_probes++] = old[iter_probes];
172befe6d94SSteven Rostedt (VMware) 		}
173befe6d94SSteven Rostedt (VMware) 		if (pos < 0)
1747211f0a2SSteven Rostedt (VMware) 			pos = nr_probes++;
1757211f0a2SSteven Rostedt (VMware) 		/* nr_probes now points to the end of the new array */
1767904b5c4SSteven Rostedt (Red Hat) 	} else {
1777904b5c4SSteven Rostedt (Red Hat) 		pos = 0;
1787211f0a2SSteven Rostedt (VMware) 		nr_probes = 1; /* must point at end of array */
1797211f0a2SSteven Rostedt (VMware) 	}
1807904b5c4SSteven Rostedt (Red Hat) 	new[pos] = *tp_func;
1817211f0a2SSteven Rostedt (VMware) 	new[nr_probes].func = NULL;
182de7b2973SMathieu Desnoyers 	*funcs = new;
183de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
18497e1c18eSMathieu Desnoyers 	return old;
18597e1c18eSMathieu Desnoyers }
18697e1c18eSMathieu Desnoyers 
187de7b2973SMathieu Desnoyers static void *func_remove(struct tracepoint_func **funcs,
188de7b2973SMathieu Desnoyers 		struct tracepoint_func *tp_func)
18997e1c18eSMathieu Desnoyers {
19097e1c18eSMathieu Desnoyers 	int nr_probes = 0, nr_del = 0, i;
19138516ab5SSteven Rostedt 	struct tracepoint_func *old, *new;
19297e1c18eSMathieu Desnoyers 
193de7b2973SMathieu Desnoyers 	old = *funcs;
19497e1c18eSMathieu Desnoyers 
195f66af459SFrederic Weisbecker 	if (!old)
19619dba33cSLai Jiangshan 		return ERR_PTR(-ENOENT);
197f66af459SFrederic Weisbecker 
198de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
19997e1c18eSMathieu Desnoyers 	/* (N -> M), (N > 1, M >= 0) probes */
200de7b2973SMathieu Desnoyers 	if (tp_func->func) {
20138516ab5SSteven Rostedt 		for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
202befe6d94SSteven Rostedt (VMware) 			if ((old[nr_probes].func == tp_func->func &&
203befe6d94SSteven Rostedt (VMware) 			     old[nr_probes].data == tp_func->data) ||
204befe6d94SSteven Rostedt (VMware) 			    old[nr_probes].func == tp_stub_func)
20597e1c18eSMathieu Desnoyers 				nr_del++;
20697e1c18eSMathieu Desnoyers 		}
2074c69e6eaSSahara 	}
20897e1c18eSMathieu Desnoyers 
2094c69e6eaSSahara 	/*
2104c69e6eaSSahara 	 * If probe is NULL, then nr_probes = nr_del = 0, and then the
2114c69e6eaSSahara 	 * entire entry will be removed.
2124c69e6eaSSahara 	 */
21397e1c18eSMathieu Desnoyers 	if (nr_probes - nr_del == 0) {
21497e1c18eSMathieu Desnoyers 		/* N -> 0, (N > 1) */
215de7b2973SMathieu Desnoyers 		*funcs = NULL;
216de7b2973SMathieu Desnoyers 		debug_print_probes(*funcs);
21797e1c18eSMathieu Desnoyers 		return old;
21897e1c18eSMathieu Desnoyers 	} else {
21997e1c18eSMathieu Desnoyers 		int j = 0;
22097e1c18eSMathieu Desnoyers 		/* N -> M, (N > 1, M > 0) */
22197e1c18eSMathieu Desnoyers 		/* + 1 for NULL */
22219dba33cSLai Jiangshan 		new = allocate_probes(nr_probes - nr_del + 1);
223befe6d94SSteven Rostedt (VMware) 		if (new) {
2247211f0a2SSteven Rostedt (VMware) 			for (i = 0; old[i].func; i++) {
2257211f0a2SSteven Rostedt (VMware) 				if ((old[i].func != tp_func->func ||
2267211f0a2SSteven Rostedt (VMware) 				     old[i].data != tp_func->data) &&
2277211f0a2SSteven Rostedt (VMware) 				    old[i].func != tp_stub_func)
22897e1c18eSMathieu Desnoyers 					new[j++] = old[i];
2297211f0a2SSteven Rostedt (VMware) 			}
23038516ab5SSteven Rostedt 			new[nr_probes - nr_del].func = NULL;
231de7b2973SMathieu Desnoyers 			*funcs = new;
232befe6d94SSteven Rostedt (VMware) 		} else {
233befe6d94SSteven Rostedt (VMware) 			/*
234befe6d94SSteven Rostedt (VMware) 			 * Failed to allocate, replace the old function
235befe6d94SSteven Rostedt (VMware) 			 * with calls to tp_stub_func.
236befe6d94SSteven Rostedt (VMware) 			 */
2377211f0a2SSteven Rostedt (VMware) 			for (i = 0; old[i].func; i++) {
238befe6d94SSteven Rostedt (VMware) 				if (old[i].func == tp_func->func &&
2397211f0a2SSteven Rostedt (VMware) 				    old[i].data == tp_func->data)
2407211f0a2SSteven Rostedt (VMware) 					WRITE_ONCE(old[i].func, tp_stub_func);
241befe6d94SSteven Rostedt (VMware) 			}
242befe6d94SSteven Rostedt (VMware) 			*funcs = old;
243befe6d94SSteven Rostedt (VMware) 		}
24497e1c18eSMathieu Desnoyers 	}
245de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
24697e1c18eSMathieu Desnoyers 	return old;
24797e1c18eSMathieu Desnoyers }
24897e1c18eSMathieu Desnoyers 
249547305a6SSteven Rostedt (VMware) static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs, bool sync)
250d25e37d8SSteven Rostedt (VMware) {
251d25e37d8SSteven Rostedt (VMware) 	void *func = tp->iterator;
252d25e37d8SSteven Rostedt (VMware) 
253d25e37d8SSteven Rostedt (VMware) 	/* Synthetic events do not have static call sites */
254d25e37d8SSteven Rostedt (VMware) 	if (!tp->static_call_key)
255d25e37d8SSteven Rostedt (VMware) 		return;
256d25e37d8SSteven Rostedt (VMware) 
257547305a6SSteven Rostedt (VMware) 	if (!tp_funcs[1].func) {
258d25e37d8SSteven Rostedt (VMware) 		func = tp_funcs[0].func;
259547305a6SSteven Rostedt (VMware) 		/*
260547305a6SSteven Rostedt (VMware) 		 * If going from the iterator back to a single caller,
261547305a6SSteven Rostedt (VMware) 		 * we need to synchronize with __DO_TRACE to make sure
262547305a6SSteven Rostedt (VMware) 		 * that the data passed to the callback is the one that
263547305a6SSteven Rostedt (VMware) 		 * belongs to that callback.
264547305a6SSteven Rostedt (VMware) 		 */
265547305a6SSteven Rostedt (VMware) 		if (sync)
266547305a6SSteven Rostedt (VMware) 			tracepoint_synchronize_unregister();
267547305a6SSteven Rostedt (VMware) 	}
268d25e37d8SSteven Rostedt (VMware) 
269d25e37d8SSteven Rostedt (VMware) 	__static_call_update(tp->static_call_key, tp->static_call_tramp, func);
270d25e37d8SSteven Rostedt (VMware) }
271d25e37d8SSteven Rostedt (VMware) 
27297e1c18eSMathieu Desnoyers /*
273de7b2973SMathieu Desnoyers  * Add the probe function to a tracepoint.
27497e1c18eSMathieu Desnoyers  */
275de7b2973SMathieu Desnoyers static int tracepoint_add_func(struct tracepoint *tp,
276*9913d574SSteven Rostedt (VMware) 			       struct tracepoint_func *func, int prio,
277*9913d574SSteven Rostedt (VMware) 			       bool warn)
27897e1c18eSMathieu Desnoyers {
279de7b2973SMathieu Desnoyers 	struct tracepoint_func *old, *tp_funcs;
2808cf868afSSteven Rostedt (Red Hat) 	int ret;
28197e1c18eSMathieu Desnoyers 
2828cf868afSSteven Rostedt (Red Hat) 	if (tp->regfunc && !static_key_enabled(&tp->key)) {
2838cf868afSSteven Rostedt (Red Hat) 		ret = tp->regfunc();
2848cf868afSSteven Rostedt (Red Hat) 		if (ret < 0)
2858cf868afSSteven Rostedt (Red Hat) 			return ret;
2868cf868afSSteven Rostedt (Red Hat) 	}
28797e1c18eSMathieu Desnoyers 
288b725dfeaSMathieu Desnoyers 	tp_funcs = rcu_dereference_protected(tp->funcs,
289b725dfeaSMathieu Desnoyers 			lockdep_is_held(&tracepoints_mutex));
2907904b5c4SSteven Rostedt (Red Hat) 	old = func_add(&tp_funcs, func, prio);
291de7b2973SMathieu Desnoyers 	if (IS_ERR(old)) {
292*9913d574SSteven Rostedt (VMware) 		WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM);
293de7b2973SMathieu Desnoyers 		return PTR_ERR(old);
29497e1c18eSMathieu Desnoyers 	}
29597419875SJosh Stone 
29697e1c18eSMathieu Desnoyers 	/*
297243d1a79SPaul E. McKenney 	 * rcu_assign_pointer has as smp_store_release() which makes sure
298243d1a79SPaul E. McKenney 	 * that the new probe callbacks array is consistent before setting
299243d1a79SPaul E. McKenney 	 * a pointer to it.  This array is referenced by __DO_TRACE from
300243d1a79SPaul E. McKenney 	 * include/linux/tracepoint.h using rcu_dereference_sched().
30197e1c18eSMathieu Desnoyers 	 */
302de7b2973SMathieu Desnoyers 	rcu_assign_pointer(tp->funcs, tp_funcs);
303547305a6SSteven Rostedt (VMware) 	tracepoint_update_call(tp, tp_funcs, false);
304d25e37d8SSteven Rostedt (VMware) 	static_key_enable(&tp->key);
305d25e37d8SSteven Rostedt (VMware) 
3068058bd0fSMathieu Desnoyers 	release_probes(old);
307de7b2973SMathieu Desnoyers 	return 0;
30897e1c18eSMathieu Desnoyers }
30997e1c18eSMathieu Desnoyers 
31097e1c18eSMathieu Desnoyers /*
311de7b2973SMathieu Desnoyers  * Remove a probe function from a tracepoint.
31297e1c18eSMathieu Desnoyers  * Note: only waiting an RCU period after setting elem->call to the empty
31397e1c18eSMathieu Desnoyers  * function insures that the original callback is not used anymore. This insured
31497e1c18eSMathieu Desnoyers  * by preempt_disable around the call site.
31597e1c18eSMathieu Desnoyers  */
316de7b2973SMathieu Desnoyers static int tracepoint_remove_func(struct tracepoint *tp,
317de7b2973SMathieu Desnoyers 		struct tracepoint_func *func)
31897e1c18eSMathieu Desnoyers {
319de7b2973SMathieu Desnoyers 	struct tracepoint_func *old, *tp_funcs;
32097419875SJosh Stone 
321b725dfeaSMathieu Desnoyers 	tp_funcs = rcu_dereference_protected(tp->funcs,
322b725dfeaSMathieu Desnoyers 			lockdep_is_held(&tracepoints_mutex));
323de7b2973SMathieu Desnoyers 	old = func_remove(&tp_funcs, func);
324befe6d94SSteven Rostedt (VMware) 	if (WARN_ON_ONCE(IS_ERR(old)))
325de7b2973SMathieu Desnoyers 		return PTR_ERR(old);
326befe6d94SSteven Rostedt (VMware) 
327befe6d94SSteven Rostedt (VMware) 	if (tp_funcs == old)
328befe6d94SSteven Rostedt (VMware) 		/* Failed allocating new tp_funcs, replaced func with stub */
329befe6d94SSteven Rostedt (VMware) 		return 0;
33097e1c18eSMathieu Desnoyers 
331de7b2973SMathieu Desnoyers 	if (!tp_funcs) {
332de7b2973SMathieu Desnoyers 		/* Removed last function */
333de7b2973SMathieu Desnoyers 		if (tp->unregfunc && static_key_enabled(&tp->key))
334de7b2973SMathieu Desnoyers 			tp->unregfunc();
33597e1c18eSMathieu Desnoyers 
336d25e37d8SSteven Rostedt (VMware) 		static_key_disable(&tp->key);
337de7b2973SMathieu Desnoyers 		rcu_assign_pointer(tp->funcs, tp_funcs);
338547305a6SSteven Rostedt (VMware) 	} else {
339547305a6SSteven Rostedt (VMware) 		rcu_assign_pointer(tp->funcs, tp_funcs);
340547305a6SSteven Rostedt (VMware) 		tracepoint_update_call(tp, tp_funcs,
341547305a6SSteven Rostedt (VMware) 				       tp_funcs[0].func != old[0].func);
342547305a6SSteven Rostedt (VMware) 	}
3438058bd0fSMathieu Desnoyers 	release_probes(old);
344de7b2973SMathieu Desnoyers 	return 0;
345127cafbbSLai Jiangshan }
346127cafbbSLai Jiangshan 
34797e1c18eSMathieu Desnoyers /**
348*9913d574SSteven Rostedt (VMware)  * tracepoint_probe_register_prio_may_exist -  Connect a probe to a tracepoint with priority
349*9913d574SSteven Rostedt (VMware)  * @tp: tracepoint
350*9913d574SSteven Rostedt (VMware)  * @probe: probe handler
351*9913d574SSteven Rostedt (VMware)  * @data: tracepoint data
352*9913d574SSteven Rostedt (VMware)  * @prio: priority of this function over other registered functions
353*9913d574SSteven Rostedt (VMware)  *
354*9913d574SSteven Rostedt (VMware)  * Same as tracepoint_probe_register_prio() except that it will not warn
355*9913d574SSteven Rostedt (VMware)  * if the tracepoint is already registered.
356*9913d574SSteven Rostedt (VMware)  */
357*9913d574SSteven Rostedt (VMware) int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe,
358*9913d574SSteven Rostedt (VMware) 					     void *data, int prio)
359*9913d574SSteven Rostedt (VMware) {
360*9913d574SSteven Rostedt (VMware) 	struct tracepoint_func tp_func;
361*9913d574SSteven Rostedt (VMware) 	int ret;
362*9913d574SSteven Rostedt (VMware) 
363*9913d574SSteven Rostedt (VMware) 	mutex_lock(&tracepoints_mutex);
364*9913d574SSteven Rostedt (VMware) 	tp_func.func = probe;
365*9913d574SSteven Rostedt (VMware) 	tp_func.data = data;
366*9913d574SSteven Rostedt (VMware) 	tp_func.prio = prio;
367*9913d574SSteven Rostedt (VMware) 	ret = tracepoint_add_func(tp, &tp_func, prio, false);
368*9913d574SSteven Rostedt (VMware) 	mutex_unlock(&tracepoints_mutex);
369*9913d574SSteven Rostedt (VMware) 	return ret;
370*9913d574SSteven Rostedt (VMware) }
371*9913d574SSteven Rostedt (VMware) EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist);
372*9913d574SSteven Rostedt (VMware) 
373*9913d574SSteven Rostedt (VMware) /**
374f39e2391SLee, Chun-Yi  * tracepoint_probe_register_prio -  Connect a probe to a tracepoint with priority
375de7b2973SMathieu Desnoyers  * @tp: tracepoint
37697e1c18eSMathieu Desnoyers  * @probe: probe handler
377cac92ba7SFabian Frederick  * @data: tracepoint data
3787904b5c4SSteven Rostedt (Red Hat)  * @prio: priority of this function over other registered functions
3797904b5c4SSteven Rostedt (Red Hat)  *
3807904b5c4SSteven Rostedt (Red Hat)  * Returns 0 if ok, error value on error.
3817904b5c4SSteven Rostedt (Red Hat)  * Note: if @tp is within a module, the caller is responsible for
3827904b5c4SSteven Rostedt (Red Hat)  * unregistering the probe before the module is gone. This can be
3837904b5c4SSteven Rostedt (Red Hat)  * performed either with a tracepoint module going notifier, or from
3847904b5c4SSteven Rostedt (Red Hat)  * within module exit functions.
3857904b5c4SSteven Rostedt (Red Hat)  */
3867904b5c4SSteven Rostedt (Red Hat) int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
3877904b5c4SSteven Rostedt (Red Hat) 				   void *data, int prio)
3887904b5c4SSteven Rostedt (Red Hat) {
3897904b5c4SSteven Rostedt (Red Hat) 	struct tracepoint_func tp_func;
3907904b5c4SSteven Rostedt (Red Hat) 	int ret;
3917904b5c4SSteven Rostedt (Red Hat) 
3927904b5c4SSteven Rostedt (Red Hat) 	mutex_lock(&tracepoints_mutex);
3937904b5c4SSteven Rostedt (Red Hat) 	tp_func.func = probe;
3947904b5c4SSteven Rostedt (Red Hat) 	tp_func.data = data;
3957904b5c4SSteven Rostedt (Red Hat) 	tp_func.prio = prio;
396*9913d574SSteven Rostedt (VMware) 	ret = tracepoint_add_func(tp, &tp_func, prio, true);
3977904b5c4SSteven Rostedt (Red Hat) 	mutex_unlock(&tracepoints_mutex);
3987904b5c4SSteven Rostedt (Red Hat) 	return ret;
3997904b5c4SSteven Rostedt (Red Hat) }
4007904b5c4SSteven Rostedt (Red Hat) EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio);
4017904b5c4SSteven Rostedt (Red Hat) 
4027904b5c4SSteven Rostedt (Red Hat) /**
4037904b5c4SSteven Rostedt (Red Hat)  * tracepoint_probe_register -  Connect a probe to a tracepoint
4047904b5c4SSteven Rostedt (Red Hat)  * @tp: tracepoint
4057904b5c4SSteven Rostedt (Red Hat)  * @probe: probe handler
4067904b5c4SSteven Rostedt (Red Hat)  * @data: tracepoint data
40797e1c18eSMathieu Desnoyers  *
408de7b2973SMathieu Desnoyers  * Returns 0 if ok, error value on error.
409de7b2973SMathieu Desnoyers  * Note: if @tp is within a module, the caller is responsible for
410de7b2973SMathieu Desnoyers  * unregistering the probe before the module is gone. This can be
411de7b2973SMathieu Desnoyers  * performed either with a tracepoint module going notifier, or from
412de7b2973SMathieu Desnoyers  * within module exit functions.
41397e1c18eSMathieu Desnoyers  */
414de7b2973SMathieu Desnoyers int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
41597e1c18eSMathieu Desnoyers {
4167904b5c4SSteven Rostedt (Red Hat) 	return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO);
41797e1c18eSMathieu Desnoyers }
41897e1c18eSMathieu Desnoyers EXPORT_SYMBOL_GPL(tracepoint_probe_register);
41997e1c18eSMathieu Desnoyers 
42097e1c18eSMathieu Desnoyers /**
42197e1c18eSMathieu Desnoyers  * tracepoint_probe_unregister -  Disconnect a probe from a tracepoint
422de7b2973SMathieu Desnoyers  * @tp: tracepoint
42397e1c18eSMathieu Desnoyers  * @probe: probe function pointer
424cac92ba7SFabian Frederick  * @data: tracepoint data
42597e1c18eSMathieu Desnoyers  *
426de7b2973SMathieu Desnoyers  * Returns 0 if ok, error value on error.
42797e1c18eSMathieu Desnoyers  */
428de7b2973SMathieu Desnoyers int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
42997e1c18eSMathieu Desnoyers {
430de7b2973SMathieu Desnoyers 	struct tracepoint_func tp_func;
431de7b2973SMathieu Desnoyers 	int ret;
43297e1c18eSMathieu Desnoyers 
43397e1c18eSMathieu Desnoyers 	mutex_lock(&tracepoints_mutex);
434de7b2973SMathieu Desnoyers 	tp_func.func = probe;
435de7b2973SMathieu Desnoyers 	tp_func.data = data;
436de7b2973SMathieu Desnoyers 	ret = tracepoint_remove_func(tp, &tp_func);
43797e1c18eSMathieu Desnoyers 	mutex_unlock(&tracepoints_mutex);
438de7b2973SMathieu Desnoyers 	return ret;
43997e1c18eSMathieu Desnoyers }
44097e1c18eSMathieu Desnoyers EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
44197e1c18eSMathieu Desnoyers 
4429c0be3f6SMathieu Desnoyers static void for_each_tracepoint_range(
4439c0be3f6SMathieu Desnoyers 		tracepoint_ptr_t *begin, tracepoint_ptr_t *end,
44446e0c9beSArd Biesheuvel 		void (*fct)(struct tracepoint *tp, void *priv),
44546e0c9beSArd Biesheuvel 		void *priv)
44646e0c9beSArd Biesheuvel {
4479c0be3f6SMathieu Desnoyers 	tracepoint_ptr_t *iter;
4489c0be3f6SMathieu Desnoyers 
44946e0c9beSArd Biesheuvel 	if (!begin)
45046e0c9beSArd Biesheuvel 		return;
45146e0c9beSArd Biesheuvel 	for (iter = begin; iter < end; iter++)
4529c0be3f6SMathieu Desnoyers 		fct(tracepoint_ptr_deref(iter), priv);
45346e0c9beSArd Biesheuvel }
45446e0c9beSArd Biesheuvel 
455227a8375SIngo Molnar #ifdef CONFIG_MODULES
45645ab2813SSteven Rostedt (Red Hat) bool trace_module_has_bad_taint(struct module *mod)
45745ab2813SSteven Rostedt (Red Hat) {
45866cc69e3SMathieu Desnoyers 	return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) |
45966cc69e3SMathieu Desnoyers 			       (1 << TAINT_UNSIGNED_MODULE));
46045ab2813SSteven Rostedt (Red Hat) }
46145ab2813SSteven Rostedt (Red Hat) 
462de7b2973SMathieu Desnoyers static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
463de7b2973SMathieu Desnoyers 
464de7b2973SMathieu Desnoyers /**
465de7b2973SMathieu Desnoyers  * register_tracepoint_notifier - register tracepoint coming/going notifier
466de7b2973SMathieu Desnoyers  * @nb: notifier block
467de7b2973SMathieu Desnoyers  *
468de7b2973SMathieu Desnoyers  * Notifiers registered with this function are called on module
469de7b2973SMathieu Desnoyers  * coming/going with the tracepoint_module_list_mutex held.
470de7b2973SMathieu Desnoyers  * The notifier block callback should expect a "struct tp_module" data
471de7b2973SMathieu Desnoyers  * pointer.
472de7b2973SMathieu Desnoyers  */
473de7b2973SMathieu Desnoyers int register_tracepoint_module_notifier(struct notifier_block *nb)
474de7b2973SMathieu Desnoyers {
475de7b2973SMathieu Desnoyers 	struct tp_module *tp_mod;
476de7b2973SMathieu Desnoyers 	int ret;
477de7b2973SMathieu Desnoyers 
478de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
479de7b2973SMathieu Desnoyers 	ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
480de7b2973SMathieu Desnoyers 	if (ret)
481de7b2973SMathieu Desnoyers 		goto end;
482de7b2973SMathieu Desnoyers 	list_for_each_entry(tp_mod, &tracepoint_module_list, list)
483de7b2973SMathieu Desnoyers 		(void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
484de7b2973SMathieu Desnoyers end:
485de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
486de7b2973SMathieu Desnoyers 	return ret;
487de7b2973SMathieu Desnoyers }
488de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
489de7b2973SMathieu Desnoyers 
490de7b2973SMathieu Desnoyers /**
491de7b2973SMathieu Desnoyers  * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier
492de7b2973SMathieu Desnoyers  * @nb: notifier block
493de7b2973SMathieu Desnoyers  *
494de7b2973SMathieu Desnoyers  * The notifier block callback should expect a "struct tp_module" data
495de7b2973SMathieu Desnoyers  * pointer.
496de7b2973SMathieu Desnoyers  */
497de7b2973SMathieu Desnoyers int unregister_tracepoint_module_notifier(struct notifier_block *nb)
498de7b2973SMathieu Desnoyers {
499de7b2973SMathieu Desnoyers 	struct tp_module *tp_mod;
500de7b2973SMathieu Desnoyers 	int ret;
501de7b2973SMathieu Desnoyers 
502de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
503de7b2973SMathieu Desnoyers 	ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
504de7b2973SMathieu Desnoyers 	if (ret)
505de7b2973SMathieu Desnoyers 		goto end;
506de7b2973SMathieu Desnoyers 	list_for_each_entry(tp_mod, &tracepoint_module_list, list)
507de7b2973SMathieu Desnoyers 		(void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
508de7b2973SMathieu Desnoyers end:
509de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
510de7b2973SMathieu Desnoyers 	return ret;
511de7b2973SMathieu Desnoyers 
512de7b2973SMathieu Desnoyers }
513de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
514de7b2973SMathieu Desnoyers 
515de7b2973SMathieu Desnoyers /*
516de7b2973SMathieu Desnoyers  * Ensure the tracer unregistered the module's probes before the module
517de7b2973SMathieu Desnoyers  * teardown is performed. Prevents leaks of probe and data pointers.
518de7b2973SMathieu Desnoyers  */
51946e0c9beSArd Biesheuvel static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv)
520de7b2973SMathieu Desnoyers {
52146e0c9beSArd Biesheuvel 	WARN_ON_ONCE(tp->funcs);
522de7b2973SMathieu Desnoyers }
523de7b2973SMathieu Desnoyers 
524b75ef8b4SMathieu Desnoyers static int tracepoint_module_coming(struct module *mod)
525b75ef8b4SMathieu Desnoyers {
5260dea6d52SMathieu Desnoyers 	struct tp_module *tp_mod;
527b75ef8b4SMathieu Desnoyers 	int ret = 0;
528b75ef8b4SMathieu Desnoyers 
5297dec935aSSteven Rostedt (Red Hat) 	if (!mod->num_tracepoints)
5307dec935aSSteven Rostedt (Red Hat) 		return 0;
5317dec935aSSteven Rostedt (Red Hat) 
532b75ef8b4SMathieu Desnoyers 	/*
533c10076c4SSteven Rostedt 	 * We skip modules that taint the kernel, especially those with different
534c10076c4SSteven Rostedt 	 * module headers (for forced load), to make sure we don't cause a crash.
53566cc69e3SMathieu Desnoyers 	 * Staging, out-of-tree, and unsigned GPL modules are fine.
536b75ef8b4SMathieu Desnoyers 	 */
53745ab2813SSteven Rostedt (Red Hat) 	if (trace_module_has_bad_taint(mod))
538b75ef8b4SMathieu Desnoyers 		return 0;
539de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
540b75ef8b4SMathieu Desnoyers 	tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
541b75ef8b4SMathieu Desnoyers 	if (!tp_mod) {
542b75ef8b4SMathieu Desnoyers 		ret = -ENOMEM;
543b75ef8b4SMathieu Desnoyers 		goto end;
544b75ef8b4SMathieu Desnoyers 	}
545eb7d035cSSteven Rostedt (Red Hat) 	tp_mod->mod = mod;
5460dea6d52SMathieu Desnoyers 	list_add_tail(&tp_mod->list, &tracepoint_module_list);
547de7b2973SMathieu Desnoyers 	blocking_notifier_call_chain(&tracepoint_notify_list,
548de7b2973SMathieu Desnoyers 			MODULE_STATE_COMING, tp_mod);
549b75ef8b4SMathieu Desnoyers end:
550de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
551b75ef8b4SMathieu Desnoyers 	return ret;
552b75ef8b4SMathieu Desnoyers }
553b75ef8b4SMathieu Desnoyers 
554de7b2973SMathieu Desnoyers static void tracepoint_module_going(struct module *mod)
555b75ef8b4SMathieu Desnoyers {
556de7b2973SMathieu Desnoyers 	struct tp_module *tp_mod;
557b75ef8b4SMathieu Desnoyers 
5587dec935aSSteven Rostedt (Red Hat) 	if (!mod->num_tracepoints)
559de7b2973SMathieu Desnoyers 		return;
5607dec935aSSteven Rostedt (Red Hat) 
561de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
562de7b2973SMathieu Desnoyers 	list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
563eb7d035cSSteven Rostedt (Red Hat) 		if (tp_mod->mod == mod) {
564de7b2973SMathieu Desnoyers 			blocking_notifier_call_chain(&tracepoint_notify_list,
565de7b2973SMathieu Desnoyers 					MODULE_STATE_GOING, tp_mod);
566de7b2973SMathieu Desnoyers 			list_del(&tp_mod->list);
567de7b2973SMathieu Desnoyers 			kfree(tp_mod);
568de7b2973SMathieu Desnoyers 			/*
569de7b2973SMathieu Desnoyers 			 * Called the going notifier before checking for
570de7b2973SMathieu Desnoyers 			 * quiescence.
571de7b2973SMathieu Desnoyers 			 */
57246e0c9beSArd Biesheuvel 			for_each_tracepoint_range(mod->tracepoints_ptrs,
57346e0c9beSArd Biesheuvel 				mod->tracepoints_ptrs + mod->num_tracepoints,
57446e0c9beSArd Biesheuvel 				tp_module_going_check_quiescent, NULL);
575b75ef8b4SMathieu Desnoyers 			break;
576b75ef8b4SMathieu Desnoyers 		}
577b75ef8b4SMathieu Desnoyers 	}
578b75ef8b4SMathieu Desnoyers 	/*
579b75ef8b4SMathieu Desnoyers 	 * In the case of modules that were tainted at "coming", we'll simply
580b75ef8b4SMathieu Desnoyers 	 * walk through the list without finding it. We cannot use the "tainted"
581b75ef8b4SMathieu Desnoyers 	 * flag on "going", in case a module taints the kernel only after being
582b75ef8b4SMathieu Desnoyers 	 * loaded.
583b75ef8b4SMathieu Desnoyers 	 */
584de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
585b75ef8b4SMathieu Desnoyers }
586227a8375SIngo Molnar 
587de7b2973SMathieu Desnoyers static int tracepoint_module_notify(struct notifier_block *self,
58832f85742SMathieu Desnoyers 		unsigned long val, void *data)
58932f85742SMathieu Desnoyers {
59032f85742SMathieu Desnoyers 	struct module *mod = data;
591b75ef8b4SMathieu Desnoyers 	int ret = 0;
59232f85742SMathieu Desnoyers 
59332f85742SMathieu Desnoyers 	switch (val) {
59432f85742SMathieu Desnoyers 	case MODULE_STATE_COMING:
595b75ef8b4SMathieu Desnoyers 		ret = tracepoint_module_coming(mod);
596b75ef8b4SMathieu Desnoyers 		break;
597b75ef8b4SMathieu Desnoyers 	case MODULE_STATE_LIVE:
598b75ef8b4SMathieu Desnoyers 		break;
59932f85742SMathieu Desnoyers 	case MODULE_STATE_GOING:
600de7b2973SMathieu Desnoyers 		tracepoint_module_going(mod);
601de7b2973SMathieu Desnoyers 		break;
602de7b2973SMathieu Desnoyers 	case MODULE_STATE_UNFORMED:
60332f85742SMathieu Desnoyers 		break;
60432f85742SMathieu Desnoyers 	}
6050340a6b7SPeter Zijlstra 	return notifier_from_errno(ret);
60632f85742SMathieu Desnoyers }
60732f85742SMathieu Desnoyers 
608de7b2973SMathieu Desnoyers static struct notifier_block tracepoint_module_nb = {
60932f85742SMathieu Desnoyers 	.notifier_call = tracepoint_module_notify,
61032f85742SMathieu Desnoyers 	.priority = 0,
61132f85742SMathieu Desnoyers };
61232f85742SMathieu Desnoyers 
613de7b2973SMathieu Desnoyers static __init int init_tracepoints(void)
61432f85742SMathieu Desnoyers {
615de7b2973SMathieu Desnoyers 	int ret;
616de7b2973SMathieu Desnoyers 
617de7b2973SMathieu Desnoyers 	ret = register_module_notifier(&tracepoint_module_nb);
618eb7d035cSSteven Rostedt (Red Hat) 	if (ret)
619a395d6a7SJoe Perches 		pr_warn("Failed to register tracepoint module enter notifier\n");
620eb7d035cSSteven Rostedt (Red Hat) 
621de7b2973SMathieu Desnoyers 	return ret;
62232f85742SMathieu Desnoyers }
62332f85742SMathieu Desnoyers __initcall(init_tracepoints);
624227a8375SIngo Molnar #endif /* CONFIG_MODULES */
625a871bd33SJason Baron 
626de7b2973SMathieu Desnoyers /**
627de7b2973SMathieu Desnoyers  * for_each_kernel_tracepoint - iteration on all kernel tracepoints
628de7b2973SMathieu Desnoyers  * @fct: callback
629de7b2973SMathieu Desnoyers  * @priv: private data
630de7b2973SMathieu Desnoyers  */
631de7b2973SMathieu Desnoyers void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
632de7b2973SMathieu Desnoyers 		void *priv)
633de7b2973SMathieu Desnoyers {
634de7b2973SMathieu Desnoyers 	for_each_tracepoint_range(__start___tracepoints_ptrs,
635de7b2973SMathieu Desnoyers 		__stop___tracepoints_ptrs, fct, priv);
636de7b2973SMathieu Desnoyers }
637de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
638de7b2973SMathieu Desnoyers 
6393d27d8cbSJosh Stone #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
64060d970c2SIngo Molnar 
64197419875SJosh Stone /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
642a871bd33SJason Baron static int sys_tracepoint_refcount;
643a871bd33SJason Baron 
6448cf868afSSteven Rostedt (Red Hat) int syscall_regfunc(void)
645a871bd33SJason Baron {
6468063e41dSOleg Nesterov 	struct task_struct *p, *t;
647a871bd33SJason Baron 
648a871bd33SJason Baron 	if (!sys_tracepoint_refcount) {
6498063e41dSOleg Nesterov 		read_lock(&tasklist_lock);
6508063e41dSOleg Nesterov 		for_each_process_thread(p, t) {
651524666cbSGabriel Krisman Bertazi 			set_task_syscall_work(t, SYSCALL_TRACEPOINT);
6528063e41dSOleg Nesterov 		}
6538063e41dSOleg Nesterov 		read_unlock(&tasklist_lock);
654a871bd33SJason Baron 	}
655a871bd33SJason Baron 	sys_tracepoint_refcount++;
6568cf868afSSteven Rostedt (Red Hat) 
6578cf868afSSteven Rostedt (Red Hat) 	return 0;
658a871bd33SJason Baron }
659a871bd33SJason Baron 
660a871bd33SJason Baron void syscall_unregfunc(void)
661a871bd33SJason Baron {
6628063e41dSOleg Nesterov 	struct task_struct *p, *t;
663a871bd33SJason Baron 
664a871bd33SJason Baron 	sys_tracepoint_refcount--;
665a871bd33SJason Baron 	if (!sys_tracepoint_refcount) {
6668063e41dSOleg Nesterov 		read_lock(&tasklist_lock);
6678063e41dSOleg Nesterov 		for_each_process_thread(p, t) {
668524666cbSGabriel Krisman Bertazi 			clear_task_syscall_work(t, SYSCALL_TRACEPOINT);
6698063e41dSOleg Nesterov 		}
6708063e41dSOleg Nesterov 		read_unlock(&tasklist_lock);
671a871bd33SJason Baron 	}
672a871bd33SJason Baron }
67360d970c2SIngo Molnar #endif
674