xref: /linux/kernel/tracepoint.c (revision befe6d946551d65cddbd32b9cb0170b0249fd5ed)
11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
297e1c18eSMathieu Desnoyers /*
3de7b2973SMathieu Desnoyers  * Copyright (C) 2008-2014 Mathieu Desnoyers
497e1c18eSMathieu Desnoyers  */
597e1c18eSMathieu Desnoyers #include <linux/module.h>
697e1c18eSMathieu Desnoyers #include <linux/mutex.h>
797e1c18eSMathieu Desnoyers #include <linux/types.h>
897e1c18eSMathieu Desnoyers #include <linux/jhash.h>
997e1c18eSMathieu Desnoyers #include <linux/list.h>
1097e1c18eSMathieu Desnoyers #include <linux/rcupdate.h>
1197e1c18eSMathieu Desnoyers #include <linux/tracepoint.h>
1297e1c18eSMathieu Desnoyers #include <linux/err.h>
1397e1c18eSMathieu Desnoyers #include <linux/slab.h>
143f07c014SIngo Molnar #include <linux/sched/signal.h>
1529930025SIngo Molnar #include <linux/sched/task.h>
16c5905afbSIngo Molnar #include <linux/static_key.h>
1797e1c18eSMathieu Desnoyers 
189c0be3f6SMathieu Desnoyers extern tracepoint_ptr_t __start___tracepoints_ptrs[];
199c0be3f6SMathieu Desnoyers extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
2097e1c18eSMathieu Desnoyers 
21e6753f23SJoel Fernandes (Google) DEFINE_SRCU(tracepoint_srcu);
22e6753f23SJoel Fernandes (Google) EXPORT_SYMBOL_GPL(tracepoint_srcu);
23e6753f23SJoel Fernandes (Google) 
2497e1c18eSMathieu Desnoyers /* Set to 1 to enable tracepoint debug output */
2597e1c18eSMathieu Desnoyers static const int tracepoint_debug;
2697e1c18eSMathieu Desnoyers 
27b75ef8b4SMathieu Desnoyers #ifdef CONFIG_MODULES
28de7b2973SMathieu Desnoyers /*
29de7b2973SMathieu Desnoyers  * Tracepoint module list mutex protects the local module list.
30de7b2973SMathieu Desnoyers  */
31de7b2973SMathieu Desnoyers static DEFINE_MUTEX(tracepoint_module_list_mutex);
32de7b2973SMathieu Desnoyers 
33de7b2973SMathieu Desnoyers /* Local list of struct tp_module */
34b75ef8b4SMathieu Desnoyers static LIST_HEAD(tracepoint_module_list);
35b75ef8b4SMathieu Desnoyers #endif /* CONFIG_MODULES */
36b75ef8b4SMathieu Desnoyers 
3797e1c18eSMathieu Desnoyers /*
38de7b2973SMathieu Desnoyers  * tracepoints_mutex protects the builtin and module tracepoints.
39de7b2973SMathieu Desnoyers  * tracepoints_mutex nests inside tracepoint_module_list_mutex.
4097e1c18eSMathieu Desnoyers  */
41de7b2973SMathieu Desnoyers static DEFINE_MUTEX(tracepoints_mutex);
4297e1c18eSMathieu Desnoyers 
43f8a79d5cSSteven Rostedt (VMware) static struct rcu_head *early_probes;
44f8a79d5cSSteven Rostedt (VMware) static bool ok_to_free_tracepoints;
45f8a79d5cSSteven Rostedt (VMware) 
4697e1c18eSMathieu Desnoyers /*
4797e1c18eSMathieu Desnoyers  * Note about RCU :
48fd589a8fSAnand Gadiyar  * It is used to delay the free of multiple probes array until a quiescent
4997e1c18eSMathieu Desnoyers  * state is reached.
5097e1c18eSMathieu Desnoyers  */
5119dba33cSLai Jiangshan struct tp_probes {
5219dba33cSLai Jiangshan 	struct rcu_head rcu;
539d0a49c7SGustavo A. R. Silva 	struct tracepoint_func probes[];
5419dba33cSLai Jiangshan };
5597e1c18eSMathieu Desnoyers 
56*befe6d94SSteven Rostedt (VMware) /* Called in removal of a func but failed to allocate a new tp_funcs */
57*befe6d94SSteven Rostedt (VMware) static void tp_stub_func(void)
58*befe6d94SSteven Rostedt (VMware) {
59*befe6d94SSteven Rostedt (VMware) 	return;
60*befe6d94SSteven Rostedt (VMware) }
61*befe6d94SSteven Rostedt (VMware) 
6219dba33cSLai Jiangshan static inline void *allocate_probes(int count)
6397e1c18eSMathieu Desnoyers {
64f0553dcbSGustavo A. R. Silva 	struct tp_probes *p  = kmalloc(struct_size(p, probes, count),
65f0553dcbSGustavo A. R. Silva 				       GFP_KERNEL);
6619dba33cSLai Jiangshan 	return p == NULL ? NULL : p->probes;
6797e1c18eSMathieu Desnoyers }
6897e1c18eSMathieu Desnoyers 
69e6753f23SJoel Fernandes (Google) static void srcu_free_old_probes(struct rcu_head *head)
7097e1c18eSMathieu Desnoyers {
710dea6d52SMathieu Desnoyers 	kfree(container_of(head, struct tp_probes, rcu));
7219dba33cSLai Jiangshan }
7319dba33cSLai Jiangshan 
74e6753f23SJoel Fernandes (Google) static void rcu_free_old_probes(struct rcu_head *head)
75e6753f23SJoel Fernandes (Google) {
76e6753f23SJoel Fernandes (Google) 	call_srcu(&tracepoint_srcu, head, srcu_free_old_probes);
77e6753f23SJoel Fernandes (Google) }
78e6753f23SJoel Fernandes (Google) 
79f8a79d5cSSteven Rostedt (VMware) static __init int release_early_probes(void)
80f8a79d5cSSteven Rostedt (VMware) {
81f8a79d5cSSteven Rostedt (VMware) 	struct rcu_head *tmp;
82f8a79d5cSSteven Rostedt (VMware) 
83f8a79d5cSSteven Rostedt (VMware) 	ok_to_free_tracepoints = true;
84f8a79d5cSSteven Rostedt (VMware) 
85f8a79d5cSSteven Rostedt (VMware) 	while (early_probes) {
86f8a79d5cSSteven Rostedt (VMware) 		tmp = early_probes;
87f8a79d5cSSteven Rostedt (VMware) 		early_probes = tmp->next;
8874401729SPaul E. McKenney 		call_rcu(tmp, rcu_free_old_probes);
89f8a79d5cSSteven Rostedt (VMware) 	}
90f8a79d5cSSteven Rostedt (VMware) 
91f8a79d5cSSteven Rostedt (VMware) 	return 0;
92f8a79d5cSSteven Rostedt (VMware) }
93f8a79d5cSSteven Rostedt (VMware) 
94f8a79d5cSSteven Rostedt (VMware) /* SRCU is initialized at core_initcall */
95f8a79d5cSSteven Rostedt (VMware) postcore_initcall(release_early_probes);
96f8a79d5cSSteven Rostedt (VMware) 
9738516ab5SSteven Rostedt static inline void release_probes(struct tracepoint_func *old)
9819dba33cSLai Jiangshan {
9919dba33cSLai Jiangshan 	if (old) {
10019dba33cSLai Jiangshan 		struct tp_probes *tp_probes = container_of(old,
10119dba33cSLai Jiangshan 			struct tp_probes, probes[0]);
102f8a79d5cSSteven Rostedt (VMware) 
103f8a79d5cSSteven Rostedt (VMware) 		/*
104f8a79d5cSSteven Rostedt (VMware) 		 * We can't free probes if SRCU is not initialized yet.
105f8a79d5cSSteven Rostedt (VMware) 		 * Postpone the freeing till after SRCU is initialized.
106f8a79d5cSSteven Rostedt (VMware) 		 */
107f8a79d5cSSteven Rostedt (VMware) 		if (unlikely(!ok_to_free_tracepoints)) {
108f8a79d5cSSteven Rostedt (VMware) 			tp_probes->rcu.next = early_probes;
109f8a79d5cSSteven Rostedt (VMware) 			early_probes = &tp_probes->rcu;
110f8a79d5cSSteven Rostedt (VMware) 			return;
111f8a79d5cSSteven Rostedt (VMware) 		}
112f8a79d5cSSteven Rostedt (VMware) 
113e6753f23SJoel Fernandes (Google) 		/*
114e6753f23SJoel Fernandes (Google) 		 * Tracepoint probes are protected by both sched RCU and SRCU,
115e6753f23SJoel Fernandes (Google) 		 * by calling the SRCU callback in the sched RCU callback we
116e6753f23SJoel Fernandes (Google) 		 * cover both cases. So let us chain the SRCU and sched RCU
117e6753f23SJoel Fernandes (Google) 		 * callbacks to wait for both grace periods.
118e6753f23SJoel Fernandes (Google) 		 */
11974401729SPaul E. McKenney 		call_rcu(&tp_probes->rcu, rcu_free_old_probes);
12019dba33cSLai Jiangshan 	}
12197e1c18eSMathieu Desnoyers }
12297e1c18eSMathieu Desnoyers 
123de7b2973SMathieu Desnoyers static void debug_print_probes(struct tracepoint_func *funcs)
12497e1c18eSMathieu Desnoyers {
12597e1c18eSMathieu Desnoyers 	int i;
12697e1c18eSMathieu Desnoyers 
127de7b2973SMathieu Desnoyers 	if (!tracepoint_debug || !funcs)
12897e1c18eSMathieu Desnoyers 		return;
12997e1c18eSMathieu Desnoyers 
130de7b2973SMathieu Desnoyers 	for (i = 0; funcs[i].func; i++)
131de7b2973SMathieu Desnoyers 		printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
13297e1c18eSMathieu Desnoyers }
13397e1c18eSMathieu Desnoyers 
1347904b5c4SSteven Rostedt (Red Hat) static struct tracepoint_func *
1357904b5c4SSteven Rostedt (Red Hat) func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
1367904b5c4SSteven Rostedt (Red Hat) 	 int prio)
13797e1c18eSMathieu Desnoyers {
13838516ab5SSteven Rostedt 	struct tracepoint_func *old, *new;
1397904b5c4SSteven Rostedt (Red Hat) 	int nr_probes = 0;
140*befe6d94SSteven Rostedt (VMware) 	int stub_funcs = 0;
1417904b5c4SSteven Rostedt (Red Hat) 	int pos = -1;
14297e1c18eSMathieu Desnoyers 
143de7b2973SMathieu Desnoyers 	if (WARN_ON(!tp_func->func))
1444c69e6eaSSahara 		return ERR_PTR(-EINVAL);
14597e1c18eSMathieu Desnoyers 
146de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
147de7b2973SMathieu Desnoyers 	old = *funcs;
14897e1c18eSMathieu Desnoyers 	if (old) {
14997e1c18eSMathieu Desnoyers 		/* (N -> N+1), (N != 0, 1) probes */
1507904b5c4SSteven Rostedt (Red Hat) 		for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
1517904b5c4SSteven Rostedt (Red Hat) 			/* Insert before probes of lower priority */
1527904b5c4SSteven Rostedt (Red Hat) 			if (pos < 0 && old[nr_probes].prio < prio)
1537904b5c4SSteven Rostedt (Red Hat) 				pos = nr_probes;
154de7b2973SMathieu Desnoyers 			if (old[nr_probes].func == tp_func->func &&
155de7b2973SMathieu Desnoyers 			    old[nr_probes].data == tp_func->data)
15697e1c18eSMathieu Desnoyers 				return ERR_PTR(-EEXIST);
157*befe6d94SSteven Rostedt (VMware) 			if (old[nr_probes].func == tp_stub_func)
158*befe6d94SSteven Rostedt (VMware) 				stub_funcs++;
15997e1c18eSMathieu Desnoyers 		}
1607904b5c4SSteven Rostedt (Red Hat) 	}
161*befe6d94SSteven Rostedt (VMware) 	/* + 2 : one for new probe, one for NULL func - stub functions */
162*befe6d94SSteven Rostedt (VMware) 	new = allocate_probes(nr_probes + 2 - stub_funcs);
16397e1c18eSMathieu Desnoyers 	if (new == NULL)
16497e1c18eSMathieu Desnoyers 		return ERR_PTR(-ENOMEM);
1657904b5c4SSteven Rostedt (Red Hat) 	if (old) {
166*befe6d94SSteven Rostedt (VMware) 		if (stub_funcs) {
167*befe6d94SSteven Rostedt (VMware) 			/* Need to copy one at a time to remove stubs */
168*befe6d94SSteven Rostedt (VMware) 			int probes = 0;
169*befe6d94SSteven Rostedt (VMware) 
170*befe6d94SSteven Rostedt (VMware) 			pos = -1;
171*befe6d94SSteven Rostedt (VMware) 			for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
172*befe6d94SSteven Rostedt (VMware) 				if (old[nr_probes].func == tp_stub_func)
173*befe6d94SSteven Rostedt (VMware) 					continue;
174*befe6d94SSteven Rostedt (VMware) 				if (pos < 0 && old[nr_probes].prio < prio)
175*befe6d94SSteven Rostedt (VMware) 					pos = probes++;
176*befe6d94SSteven Rostedt (VMware) 				new[probes++] = old[nr_probes];
177*befe6d94SSteven Rostedt (VMware) 			}
178*befe6d94SSteven Rostedt (VMware) 			nr_probes = probes;
179*befe6d94SSteven Rostedt (VMware) 			if (pos < 0)
180*befe6d94SSteven Rostedt (VMware) 				pos = probes;
181*befe6d94SSteven Rostedt (VMware) 			else
182*befe6d94SSteven Rostedt (VMware) 				nr_probes--; /* Account for insertion */
183*befe6d94SSteven Rostedt (VMware) 
184*befe6d94SSteven Rostedt (VMware) 		} else if (pos < 0) {
1857904b5c4SSteven Rostedt (Red Hat) 			pos = nr_probes;
18638516ab5SSteven Rostedt 			memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
1877904b5c4SSteven Rostedt (Red Hat) 		} else {
1887904b5c4SSteven Rostedt (Red Hat) 			/* Copy higher priority probes ahead of the new probe */
1897904b5c4SSteven Rostedt (Red Hat) 			memcpy(new, old, pos * sizeof(struct tracepoint_func));
1907904b5c4SSteven Rostedt (Red Hat) 			/* Copy the rest after it. */
1917904b5c4SSteven Rostedt (Red Hat) 			memcpy(new + pos + 1, old + pos,
1927904b5c4SSteven Rostedt (Red Hat) 			       (nr_probes - pos) * sizeof(struct tracepoint_func));
1937904b5c4SSteven Rostedt (Red Hat) 		}
1947904b5c4SSteven Rostedt (Red Hat) 	} else
1957904b5c4SSteven Rostedt (Red Hat) 		pos = 0;
1967904b5c4SSteven Rostedt (Red Hat) 	new[pos] = *tp_func;
19738516ab5SSteven Rostedt 	new[nr_probes + 1].func = NULL;
198de7b2973SMathieu Desnoyers 	*funcs = new;
199de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
20097e1c18eSMathieu Desnoyers 	return old;
20197e1c18eSMathieu Desnoyers }
20297e1c18eSMathieu Desnoyers 
203de7b2973SMathieu Desnoyers static void *func_remove(struct tracepoint_func **funcs,
204de7b2973SMathieu Desnoyers 		struct tracepoint_func *tp_func)
20597e1c18eSMathieu Desnoyers {
20697e1c18eSMathieu Desnoyers 	int nr_probes = 0, nr_del = 0, i;
20738516ab5SSteven Rostedt 	struct tracepoint_func *old, *new;
20897e1c18eSMathieu Desnoyers 
209de7b2973SMathieu Desnoyers 	old = *funcs;
21097e1c18eSMathieu Desnoyers 
211f66af459SFrederic Weisbecker 	if (!old)
21219dba33cSLai Jiangshan 		return ERR_PTR(-ENOENT);
213f66af459SFrederic Weisbecker 
214de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
21597e1c18eSMathieu Desnoyers 	/* (N -> M), (N > 1, M >= 0) probes */
216de7b2973SMathieu Desnoyers 	if (tp_func->func) {
21738516ab5SSteven Rostedt 		for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
218*befe6d94SSteven Rostedt (VMware) 			if ((old[nr_probes].func == tp_func->func &&
219*befe6d94SSteven Rostedt (VMware) 			     old[nr_probes].data == tp_func->data) ||
220*befe6d94SSteven Rostedt (VMware) 			    old[nr_probes].func == tp_stub_func)
22197e1c18eSMathieu Desnoyers 				nr_del++;
22297e1c18eSMathieu Desnoyers 		}
2234c69e6eaSSahara 	}
22497e1c18eSMathieu Desnoyers 
2254c69e6eaSSahara 	/*
2264c69e6eaSSahara 	 * If probe is NULL, then nr_probes = nr_del = 0, and then the
2274c69e6eaSSahara 	 * entire entry will be removed.
2284c69e6eaSSahara 	 */
22997e1c18eSMathieu Desnoyers 	if (nr_probes - nr_del == 0) {
23097e1c18eSMathieu Desnoyers 		/* N -> 0, (N > 1) */
231de7b2973SMathieu Desnoyers 		*funcs = NULL;
232de7b2973SMathieu Desnoyers 		debug_print_probes(*funcs);
23397e1c18eSMathieu Desnoyers 		return old;
23497e1c18eSMathieu Desnoyers 	} else {
23597e1c18eSMathieu Desnoyers 		int j = 0;
23697e1c18eSMathieu Desnoyers 		/* N -> M, (N > 1, M > 0) */
23797e1c18eSMathieu Desnoyers 		/* + 1 for NULL */
23819dba33cSLai Jiangshan 		new = allocate_probes(nr_probes - nr_del + 1);
239*befe6d94SSteven Rostedt (VMware) 		if (new) {
24038516ab5SSteven Rostedt 			for (i = 0; old[i].func; i++)
241*befe6d94SSteven Rostedt (VMware) 				if ((old[i].func != tp_func->func
242de7b2973SMathieu Desnoyers 				     || old[i].data != tp_func->data)
243*befe6d94SSteven Rostedt (VMware) 				    && old[i].func != tp_stub_func)
24497e1c18eSMathieu Desnoyers 					new[j++] = old[i];
24538516ab5SSteven Rostedt 			new[nr_probes - nr_del].func = NULL;
246de7b2973SMathieu Desnoyers 			*funcs = new;
247*befe6d94SSteven Rostedt (VMware) 		} else {
248*befe6d94SSteven Rostedt (VMware) 			/*
249*befe6d94SSteven Rostedt (VMware) 			 * Failed to allocate, replace the old function
250*befe6d94SSteven Rostedt (VMware) 			 * with calls to tp_stub_func.
251*befe6d94SSteven Rostedt (VMware) 			 */
252*befe6d94SSteven Rostedt (VMware) 			for (i = 0; old[i].func; i++)
253*befe6d94SSteven Rostedt (VMware) 				if (old[i].func == tp_func->func &&
254*befe6d94SSteven Rostedt (VMware) 				    old[i].data == tp_func->data) {
255*befe6d94SSteven Rostedt (VMware) 					old[i].func = tp_stub_func;
256*befe6d94SSteven Rostedt (VMware) 					/* Set the prio to the next event. */
257*befe6d94SSteven Rostedt (VMware) 					if (old[i + 1].func)
258*befe6d94SSteven Rostedt (VMware) 						old[i].prio =
259*befe6d94SSteven Rostedt (VMware) 							old[i + 1].prio;
260*befe6d94SSteven Rostedt (VMware) 					else
261*befe6d94SSteven Rostedt (VMware) 						old[i].prio = -1;
262*befe6d94SSteven Rostedt (VMware) 				}
263*befe6d94SSteven Rostedt (VMware) 			*funcs = old;
264*befe6d94SSteven Rostedt (VMware) 		}
26597e1c18eSMathieu Desnoyers 	}
266de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
26797e1c18eSMathieu Desnoyers 	return old;
26897e1c18eSMathieu Desnoyers }
26997e1c18eSMathieu Desnoyers 
270547305a6SSteven Rostedt (VMware) static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs, bool sync)
271d25e37d8SSteven Rostedt (VMware) {
272d25e37d8SSteven Rostedt (VMware) 	void *func = tp->iterator;
273d25e37d8SSteven Rostedt (VMware) 
274d25e37d8SSteven Rostedt (VMware) 	/* Synthetic events do not have static call sites */
275d25e37d8SSteven Rostedt (VMware) 	if (!tp->static_call_key)
276d25e37d8SSteven Rostedt (VMware) 		return;
277d25e37d8SSteven Rostedt (VMware) 
278547305a6SSteven Rostedt (VMware) 	if (!tp_funcs[1].func) {
279d25e37d8SSteven Rostedt (VMware) 		func = tp_funcs[0].func;
280547305a6SSteven Rostedt (VMware) 		/*
281547305a6SSteven Rostedt (VMware) 		 * If going from the iterator back to a single caller,
282547305a6SSteven Rostedt (VMware) 		 * we need to synchronize with __DO_TRACE to make sure
283547305a6SSteven Rostedt (VMware) 		 * that the data passed to the callback is the one that
284547305a6SSteven Rostedt (VMware) 		 * belongs to that callback.
285547305a6SSteven Rostedt (VMware) 		 */
286547305a6SSteven Rostedt (VMware) 		if (sync)
287547305a6SSteven Rostedt (VMware) 			tracepoint_synchronize_unregister();
288547305a6SSteven Rostedt (VMware) 	}
289d25e37d8SSteven Rostedt (VMware) 
290d25e37d8SSteven Rostedt (VMware) 	__static_call_update(tp->static_call_key, tp->static_call_tramp, func);
291d25e37d8SSteven Rostedt (VMware) }
292d25e37d8SSteven Rostedt (VMware) 
29397e1c18eSMathieu Desnoyers /*
294de7b2973SMathieu Desnoyers  * Add the probe function to a tracepoint.
29597e1c18eSMathieu Desnoyers  */
296de7b2973SMathieu Desnoyers static int tracepoint_add_func(struct tracepoint *tp,
2977904b5c4SSteven Rostedt (Red Hat) 			       struct tracepoint_func *func, int prio)
29897e1c18eSMathieu Desnoyers {
299de7b2973SMathieu Desnoyers 	struct tracepoint_func *old, *tp_funcs;
3008cf868afSSteven Rostedt (Red Hat) 	int ret;
30197e1c18eSMathieu Desnoyers 
3028cf868afSSteven Rostedt (Red Hat) 	if (tp->regfunc && !static_key_enabled(&tp->key)) {
3038cf868afSSteven Rostedt (Red Hat) 		ret = tp->regfunc();
3048cf868afSSteven Rostedt (Red Hat) 		if (ret < 0)
3058cf868afSSteven Rostedt (Red Hat) 			return ret;
3068cf868afSSteven Rostedt (Red Hat) 	}
30797e1c18eSMathieu Desnoyers 
308b725dfeaSMathieu Desnoyers 	tp_funcs = rcu_dereference_protected(tp->funcs,
309b725dfeaSMathieu Desnoyers 			lockdep_is_held(&tracepoints_mutex));
3107904b5c4SSteven Rostedt (Red Hat) 	old = func_add(&tp_funcs, func, prio);
311de7b2973SMathieu Desnoyers 	if (IS_ERR(old)) {
312d66a270bSMathieu Desnoyers 		WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
313de7b2973SMathieu Desnoyers 		return PTR_ERR(old);
31497e1c18eSMathieu Desnoyers 	}
31597419875SJosh Stone 
31697e1c18eSMathieu Desnoyers 	/*
317243d1a79SPaul E. McKenney 	 * rcu_assign_pointer has as smp_store_release() which makes sure
318243d1a79SPaul E. McKenney 	 * that the new probe callbacks array is consistent before setting
319243d1a79SPaul E. McKenney 	 * a pointer to it.  This array is referenced by __DO_TRACE from
320243d1a79SPaul E. McKenney 	 * include/linux/tracepoint.h using rcu_dereference_sched().
32197e1c18eSMathieu Desnoyers 	 */
322de7b2973SMathieu Desnoyers 	rcu_assign_pointer(tp->funcs, tp_funcs);
323547305a6SSteven Rostedt (VMware) 	tracepoint_update_call(tp, tp_funcs, false);
324d25e37d8SSteven Rostedt (VMware) 	static_key_enable(&tp->key);
325d25e37d8SSteven Rostedt (VMware) 
3268058bd0fSMathieu Desnoyers 	release_probes(old);
327de7b2973SMathieu Desnoyers 	return 0;
32897e1c18eSMathieu Desnoyers }
32997e1c18eSMathieu Desnoyers 
33097e1c18eSMathieu Desnoyers /*
331de7b2973SMathieu Desnoyers  * Remove a probe function from a tracepoint.
33297e1c18eSMathieu Desnoyers  * Note: only waiting an RCU period after setting elem->call to the empty
33397e1c18eSMathieu Desnoyers  * function insures that the original callback is not used anymore. This insured
33497e1c18eSMathieu Desnoyers  * by preempt_disable around the call site.
33597e1c18eSMathieu Desnoyers  */
336de7b2973SMathieu Desnoyers static int tracepoint_remove_func(struct tracepoint *tp,
337de7b2973SMathieu Desnoyers 		struct tracepoint_func *func)
33897e1c18eSMathieu Desnoyers {
339de7b2973SMathieu Desnoyers 	struct tracepoint_func *old, *tp_funcs;
34097419875SJosh Stone 
341b725dfeaSMathieu Desnoyers 	tp_funcs = rcu_dereference_protected(tp->funcs,
342b725dfeaSMathieu Desnoyers 			lockdep_is_held(&tracepoints_mutex));
343de7b2973SMathieu Desnoyers 	old = func_remove(&tp_funcs, func);
344*befe6d94SSteven Rostedt (VMware) 	if (WARN_ON_ONCE(IS_ERR(old)))
345de7b2973SMathieu Desnoyers 		return PTR_ERR(old);
346*befe6d94SSteven Rostedt (VMware) 
347*befe6d94SSteven Rostedt (VMware) 	if (tp_funcs == old)
348*befe6d94SSteven Rostedt (VMware) 		/* Failed allocating new tp_funcs, replaced func with stub */
349*befe6d94SSteven Rostedt (VMware) 		return 0;
35097e1c18eSMathieu Desnoyers 
351de7b2973SMathieu Desnoyers 	if (!tp_funcs) {
352de7b2973SMathieu Desnoyers 		/* Removed last function */
353de7b2973SMathieu Desnoyers 		if (tp->unregfunc && static_key_enabled(&tp->key))
354de7b2973SMathieu Desnoyers 			tp->unregfunc();
35597e1c18eSMathieu Desnoyers 
356d25e37d8SSteven Rostedt (VMware) 		static_key_disable(&tp->key);
357de7b2973SMathieu Desnoyers 		rcu_assign_pointer(tp->funcs, tp_funcs);
358547305a6SSteven Rostedt (VMware) 	} else {
359547305a6SSteven Rostedt (VMware) 		rcu_assign_pointer(tp->funcs, tp_funcs);
360547305a6SSteven Rostedt (VMware) 		tracepoint_update_call(tp, tp_funcs,
361547305a6SSteven Rostedt (VMware) 				       tp_funcs[0].func != old[0].func);
362547305a6SSteven Rostedt (VMware) 	}
3638058bd0fSMathieu Desnoyers 	release_probes(old);
364de7b2973SMathieu Desnoyers 	return 0;
365127cafbbSLai Jiangshan }
366127cafbbSLai Jiangshan 
36797e1c18eSMathieu Desnoyers /**
368f39e2391SLee, Chun-Yi  * tracepoint_probe_register_prio -  Connect a probe to a tracepoint with priority
369de7b2973SMathieu Desnoyers  * @tp: tracepoint
37097e1c18eSMathieu Desnoyers  * @probe: probe handler
371cac92ba7SFabian Frederick  * @data: tracepoint data
3727904b5c4SSteven Rostedt (Red Hat)  * @prio: priority of this function over other registered functions
3737904b5c4SSteven Rostedt (Red Hat)  *
3747904b5c4SSteven Rostedt (Red Hat)  * Returns 0 if ok, error value on error.
3757904b5c4SSteven Rostedt (Red Hat)  * Note: if @tp is within a module, the caller is responsible for
3767904b5c4SSteven Rostedt (Red Hat)  * unregistering the probe before the module is gone. This can be
3777904b5c4SSteven Rostedt (Red Hat)  * performed either with a tracepoint module going notifier, or from
3787904b5c4SSteven Rostedt (Red Hat)  * within module exit functions.
3797904b5c4SSteven Rostedt (Red Hat)  */
3807904b5c4SSteven Rostedt (Red Hat) int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
3817904b5c4SSteven Rostedt (Red Hat) 				   void *data, int prio)
3827904b5c4SSteven Rostedt (Red Hat) {
3837904b5c4SSteven Rostedt (Red Hat) 	struct tracepoint_func tp_func;
3847904b5c4SSteven Rostedt (Red Hat) 	int ret;
3857904b5c4SSteven Rostedt (Red Hat) 
3867904b5c4SSteven Rostedt (Red Hat) 	mutex_lock(&tracepoints_mutex);
3877904b5c4SSteven Rostedt (Red Hat) 	tp_func.func = probe;
3887904b5c4SSteven Rostedt (Red Hat) 	tp_func.data = data;
3897904b5c4SSteven Rostedt (Red Hat) 	tp_func.prio = prio;
3907904b5c4SSteven Rostedt (Red Hat) 	ret = tracepoint_add_func(tp, &tp_func, prio);
3917904b5c4SSteven Rostedt (Red Hat) 	mutex_unlock(&tracepoints_mutex);
3927904b5c4SSteven Rostedt (Red Hat) 	return ret;
3937904b5c4SSteven Rostedt (Red Hat) }
3947904b5c4SSteven Rostedt (Red Hat) EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio);
3957904b5c4SSteven Rostedt (Red Hat) 
3967904b5c4SSteven Rostedt (Red Hat) /**
3977904b5c4SSteven Rostedt (Red Hat)  * tracepoint_probe_register -  Connect a probe to a tracepoint
3987904b5c4SSteven Rostedt (Red Hat)  * @tp: tracepoint
3997904b5c4SSteven Rostedt (Red Hat)  * @probe: probe handler
4007904b5c4SSteven Rostedt (Red Hat)  * @data: tracepoint data
40197e1c18eSMathieu Desnoyers  *
402de7b2973SMathieu Desnoyers  * Returns 0 if ok, error value on error.
403de7b2973SMathieu Desnoyers  * Note: if @tp is within a module, the caller is responsible for
404de7b2973SMathieu Desnoyers  * unregistering the probe before the module is gone. This can be
405de7b2973SMathieu Desnoyers  * performed either with a tracepoint module going notifier, or from
406de7b2973SMathieu Desnoyers  * within module exit functions.
40797e1c18eSMathieu Desnoyers  */
408de7b2973SMathieu Desnoyers int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
40997e1c18eSMathieu Desnoyers {
4107904b5c4SSteven Rostedt (Red Hat) 	return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO);
41197e1c18eSMathieu Desnoyers }
41297e1c18eSMathieu Desnoyers EXPORT_SYMBOL_GPL(tracepoint_probe_register);
41397e1c18eSMathieu Desnoyers 
41497e1c18eSMathieu Desnoyers /**
41597e1c18eSMathieu Desnoyers  * tracepoint_probe_unregister -  Disconnect a probe from a tracepoint
416de7b2973SMathieu Desnoyers  * @tp: tracepoint
41797e1c18eSMathieu Desnoyers  * @probe: probe function pointer
418cac92ba7SFabian Frederick  * @data: tracepoint data
41997e1c18eSMathieu Desnoyers  *
420de7b2973SMathieu Desnoyers  * Returns 0 if ok, error value on error.
42197e1c18eSMathieu Desnoyers  */
422de7b2973SMathieu Desnoyers int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
42397e1c18eSMathieu Desnoyers {
424de7b2973SMathieu Desnoyers 	struct tracepoint_func tp_func;
425de7b2973SMathieu Desnoyers 	int ret;
42697e1c18eSMathieu Desnoyers 
42797e1c18eSMathieu Desnoyers 	mutex_lock(&tracepoints_mutex);
428de7b2973SMathieu Desnoyers 	tp_func.func = probe;
429de7b2973SMathieu Desnoyers 	tp_func.data = data;
430de7b2973SMathieu Desnoyers 	ret = tracepoint_remove_func(tp, &tp_func);
43197e1c18eSMathieu Desnoyers 	mutex_unlock(&tracepoints_mutex);
432de7b2973SMathieu Desnoyers 	return ret;
43397e1c18eSMathieu Desnoyers }
43497e1c18eSMathieu Desnoyers EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
43597e1c18eSMathieu Desnoyers 
4369c0be3f6SMathieu Desnoyers static void for_each_tracepoint_range(
4379c0be3f6SMathieu Desnoyers 		tracepoint_ptr_t *begin, tracepoint_ptr_t *end,
43846e0c9beSArd Biesheuvel 		void (*fct)(struct tracepoint *tp, void *priv),
43946e0c9beSArd Biesheuvel 		void *priv)
44046e0c9beSArd Biesheuvel {
4419c0be3f6SMathieu Desnoyers 	tracepoint_ptr_t *iter;
4429c0be3f6SMathieu Desnoyers 
44346e0c9beSArd Biesheuvel 	if (!begin)
44446e0c9beSArd Biesheuvel 		return;
44546e0c9beSArd Biesheuvel 	for (iter = begin; iter < end; iter++)
4469c0be3f6SMathieu Desnoyers 		fct(tracepoint_ptr_deref(iter), priv);
44746e0c9beSArd Biesheuvel }
44846e0c9beSArd Biesheuvel 
449227a8375SIngo Molnar #ifdef CONFIG_MODULES
45045ab2813SSteven Rostedt (Red Hat) bool trace_module_has_bad_taint(struct module *mod)
45145ab2813SSteven Rostedt (Red Hat) {
45266cc69e3SMathieu Desnoyers 	return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) |
45366cc69e3SMathieu Desnoyers 			       (1 << TAINT_UNSIGNED_MODULE));
45445ab2813SSteven Rostedt (Red Hat) }
45545ab2813SSteven Rostedt (Red Hat) 
456de7b2973SMathieu Desnoyers static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
457de7b2973SMathieu Desnoyers 
458de7b2973SMathieu Desnoyers /**
459de7b2973SMathieu Desnoyers  * register_tracepoint_notifier - register tracepoint coming/going notifier
460de7b2973SMathieu Desnoyers  * @nb: notifier block
461de7b2973SMathieu Desnoyers  *
462de7b2973SMathieu Desnoyers  * Notifiers registered with this function are called on module
463de7b2973SMathieu Desnoyers  * coming/going with the tracepoint_module_list_mutex held.
464de7b2973SMathieu Desnoyers  * The notifier block callback should expect a "struct tp_module" data
465de7b2973SMathieu Desnoyers  * pointer.
466de7b2973SMathieu Desnoyers  */
467de7b2973SMathieu Desnoyers int register_tracepoint_module_notifier(struct notifier_block *nb)
468de7b2973SMathieu Desnoyers {
469de7b2973SMathieu Desnoyers 	struct tp_module *tp_mod;
470de7b2973SMathieu Desnoyers 	int ret;
471de7b2973SMathieu Desnoyers 
472de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
473de7b2973SMathieu Desnoyers 	ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
474de7b2973SMathieu Desnoyers 	if (ret)
475de7b2973SMathieu Desnoyers 		goto end;
476de7b2973SMathieu Desnoyers 	list_for_each_entry(tp_mod, &tracepoint_module_list, list)
477de7b2973SMathieu Desnoyers 		(void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
478de7b2973SMathieu Desnoyers end:
479de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
480de7b2973SMathieu Desnoyers 	return ret;
481de7b2973SMathieu Desnoyers }
482de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
483de7b2973SMathieu Desnoyers 
484de7b2973SMathieu Desnoyers /**
485de7b2973SMathieu Desnoyers  * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier
486de7b2973SMathieu Desnoyers  * @nb: notifier block
487de7b2973SMathieu Desnoyers  *
488de7b2973SMathieu Desnoyers  * The notifier block callback should expect a "struct tp_module" data
489de7b2973SMathieu Desnoyers  * pointer.
490de7b2973SMathieu Desnoyers  */
491de7b2973SMathieu Desnoyers int unregister_tracepoint_module_notifier(struct notifier_block *nb)
492de7b2973SMathieu Desnoyers {
493de7b2973SMathieu Desnoyers 	struct tp_module *tp_mod;
494de7b2973SMathieu Desnoyers 	int ret;
495de7b2973SMathieu Desnoyers 
496de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
497de7b2973SMathieu Desnoyers 	ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
498de7b2973SMathieu Desnoyers 	if (ret)
499de7b2973SMathieu Desnoyers 		goto end;
500de7b2973SMathieu Desnoyers 	list_for_each_entry(tp_mod, &tracepoint_module_list, list)
501de7b2973SMathieu Desnoyers 		(void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
502de7b2973SMathieu Desnoyers end:
503de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
504de7b2973SMathieu Desnoyers 	return ret;
505de7b2973SMathieu Desnoyers 
506de7b2973SMathieu Desnoyers }
507de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
508de7b2973SMathieu Desnoyers 
509de7b2973SMathieu Desnoyers /*
510de7b2973SMathieu Desnoyers  * Ensure the tracer unregistered the module's probes before the module
511de7b2973SMathieu Desnoyers  * teardown is performed. Prevents leaks of probe and data pointers.
512de7b2973SMathieu Desnoyers  */
51346e0c9beSArd Biesheuvel static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv)
514de7b2973SMathieu Desnoyers {
51546e0c9beSArd Biesheuvel 	WARN_ON_ONCE(tp->funcs);
516de7b2973SMathieu Desnoyers }
517de7b2973SMathieu Desnoyers 
518b75ef8b4SMathieu Desnoyers static int tracepoint_module_coming(struct module *mod)
519b75ef8b4SMathieu Desnoyers {
5200dea6d52SMathieu Desnoyers 	struct tp_module *tp_mod;
521b75ef8b4SMathieu Desnoyers 	int ret = 0;
522b75ef8b4SMathieu Desnoyers 
5237dec935aSSteven Rostedt (Red Hat) 	if (!mod->num_tracepoints)
5247dec935aSSteven Rostedt (Red Hat) 		return 0;
5257dec935aSSteven Rostedt (Red Hat) 
526b75ef8b4SMathieu Desnoyers 	/*
527c10076c4SSteven Rostedt 	 * We skip modules that taint the kernel, especially those with different
528c10076c4SSteven Rostedt 	 * module headers (for forced load), to make sure we don't cause a crash.
52966cc69e3SMathieu Desnoyers 	 * Staging, out-of-tree, and unsigned GPL modules are fine.
530b75ef8b4SMathieu Desnoyers 	 */
53145ab2813SSteven Rostedt (Red Hat) 	if (trace_module_has_bad_taint(mod))
532b75ef8b4SMathieu Desnoyers 		return 0;
533de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
534b75ef8b4SMathieu Desnoyers 	tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
535b75ef8b4SMathieu Desnoyers 	if (!tp_mod) {
536b75ef8b4SMathieu Desnoyers 		ret = -ENOMEM;
537b75ef8b4SMathieu Desnoyers 		goto end;
538b75ef8b4SMathieu Desnoyers 	}
539eb7d035cSSteven Rostedt (Red Hat) 	tp_mod->mod = mod;
5400dea6d52SMathieu Desnoyers 	list_add_tail(&tp_mod->list, &tracepoint_module_list);
541de7b2973SMathieu Desnoyers 	blocking_notifier_call_chain(&tracepoint_notify_list,
542de7b2973SMathieu Desnoyers 			MODULE_STATE_COMING, tp_mod);
543b75ef8b4SMathieu Desnoyers end:
544de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
545b75ef8b4SMathieu Desnoyers 	return ret;
546b75ef8b4SMathieu Desnoyers }
547b75ef8b4SMathieu Desnoyers 
548de7b2973SMathieu Desnoyers static void tracepoint_module_going(struct module *mod)
549b75ef8b4SMathieu Desnoyers {
550de7b2973SMathieu Desnoyers 	struct tp_module *tp_mod;
551b75ef8b4SMathieu Desnoyers 
5527dec935aSSteven Rostedt (Red Hat) 	if (!mod->num_tracepoints)
553de7b2973SMathieu Desnoyers 		return;
5547dec935aSSteven Rostedt (Red Hat) 
555de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
556de7b2973SMathieu Desnoyers 	list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
557eb7d035cSSteven Rostedt (Red Hat) 		if (tp_mod->mod == mod) {
558de7b2973SMathieu Desnoyers 			blocking_notifier_call_chain(&tracepoint_notify_list,
559de7b2973SMathieu Desnoyers 					MODULE_STATE_GOING, tp_mod);
560de7b2973SMathieu Desnoyers 			list_del(&tp_mod->list);
561de7b2973SMathieu Desnoyers 			kfree(tp_mod);
562de7b2973SMathieu Desnoyers 			/*
563de7b2973SMathieu Desnoyers 			 * Called the going notifier before checking for
564de7b2973SMathieu Desnoyers 			 * quiescence.
565de7b2973SMathieu Desnoyers 			 */
56646e0c9beSArd Biesheuvel 			for_each_tracepoint_range(mod->tracepoints_ptrs,
56746e0c9beSArd Biesheuvel 				mod->tracepoints_ptrs + mod->num_tracepoints,
56846e0c9beSArd Biesheuvel 				tp_module_going_check_quiescent, NULL);
569b75ef8b4SMathieu Desnoyers 			break;
570b75ef8b4SMathieu Desnoyers 		}
571b75ef8b4SMathieu Desnoyers 	}
572b75ef8b4SMathieu Desnoyers 	/*
573b75ef8b4SMathieu Desnoyers 	 * In the case of modules that were tainted at "coming", we'll simply
574b75ef8b4SMathieu Desnoyers 	 * walk through the list without finding it. We cannot use the "tainted"
575b75ef8b4SMathieu Desnoyers 	 * flag on "going", in case a module taints the kernel only after being
576b75ef8b4SMathieu Desnoyers 	 * loaded.
577b75ef8b4SMathieu Desnoyers 	 */
578de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
579b75ef8b4SMathieu Desnoyers }
580227a8375SIngo Molnar 
581de7b2973SMathieu Desnoyers static int tracepoint_module_notify(struct notifier_block *self,
58232f85742SMathieu Desnoyers 		unsigned long val, void *data)
58332f85742SMathieu Desnoyers {
58432f85742SMathieu Desnoyers 	struct module *mod = data;
585b75ef8b4SMathieu Desnoyers 	int ret = 0;
58632f85742SMathieu Desnoyers 
58732f85742SMathieu Desnoyers 	switch (val) {
58832f85742SMathieu Desnoyers 	case MODULE_STATE_COMING:
589b75ef8b4SMathieu Desnoyers 		ret = tracepoint_module_coming(mod);
590b75ef8b4SMathieu Desnoyers 		break;
591b75ef8b4SMathieu Desnoyers 	case MODULE_STATE_LIVE:
592b75ef8b4SMathieu Desnoyers 		break;
59332f85742SMathieu Desnoyers 	case MODULE_STATE_GOING:
594de7b2973SMathieu Desnoyers 		tracepoint_module_going(mod);
595de7b2973SMathieu Desnoyers 		break;
596de7b2973SMathieu Desnoyers 	case MODULE_STATE_UNFORMED:
59732f85742SMathieu Desnoyers 		break;
59832f85742SMathieu Desnoyers 	}
5990340a6b7SPeter Zijlstra 	return notifier_from_errno(ret);
60032f85742SMathieu Desnoyers }
60132f85742SMathieu Desnoyers 
602de7b2973SMathieu Desnoyers static struct notifier_block tracepoint_module_nb = {
60332f85742SMathieu Desnoyers 	.notifier_call = tracepoint_module_notify,
60432f85742SMathieu Desnoyers 	.priority = 0,
60532f85742SMathieu Desnoyers };
60632f85742SMathieu Desnoyers 
607de7b2973SMathieu Desnoyers static __init int init_tracepoints(void)
60832f85742SMathieu Desnoyers {
609de7b2973SMathieu Desnoyers 	int ret;
610de7b2973SMathieu Desnoyers 
611de7b2973SMathieu Desnoyers 	ret = register_module_notifier(&tracepoint_module_nb);
612eb7d035cSSteven Rostedt (Red Hat) 	if (ret)
613a395d6a7SJoe Perches 		pr_warn("Failed to register tracepoint module enter notifier\n");
614eb7d035cSSteven Rostedt (Red Hat) 
615de7b2973SMathieu Desnoyers 	return ret;
61632f85742SMathieu Desnoyers }
61732f85742SMathieu Desnoyers __initcall(init_tracepoints);
618227a8375SIngo Molnar #endif /* CONFIG_MODULES */
619a871bd33SJason Baron 
620de7b2973SMathieu Desnoyers /**
621de7b2973SMathieu Desnoyers  * for_each_kernel_tracepoint - iteration on all kernel tracepoints
622de7b2973SMathieu Desnoyers  * @fct: callback
623de7b2973SMathieu Desnoyers  * @priv: private data
624de7b2973SMathieu Desnoyers  */
625de7b2973SMathieu Desnoyers void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
626de7b2973SMathieu Desnoyers 		void *priv)
627de7b2973SMathieu Desnoyers {
628de7b2973SMathieu Desnoyers 	for_each_tracepoint_range(__start___tracepoints_ptrs,
629de7b2973SMathieu Desnoyers 		__stop___tracepoints_ptrs, fct, priv);
630de7b2973SMathieu Desnoyers }
631de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
632de7b2973SMathieu Desnoyers 
6333d27d8cbSJosh Stone #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
63460d970c2SIngo Molnar 
63597419875SJosh Stone /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
636a871bd33SJason Baron static int sys_tracepoint_refcount;
637a871bd33SJason Baron 
6388cf868afSSteven Rostedt (Red Hat) int syscall_regfunc(void)
639a871bd33SJason Baron {
6408063e41dSOleg Nesterov 	struct task_struct *p, *t;
641a871bd33SJason Baron 
642a871bd33SJason Baron 	if (!sys_tracepoint_refcount) {
6438063e41dSOleg Nesterov 		read_lock(&tasklist_lock);
6448063e41dSOleg Nesterov 		for_each_process_thread(p, t) {
645524666cbSGabriel Krisman Bertazi 			set_task_syscall_work(t, SYSCALL_TRACEPOINT);
6468063e41dSOleg Nesterov 		}
6478063e41dSOleg Nesterov 		read_unlock(&tasklist_lock);
648a871bd33SJason Baron 	}
649a871bd33SJason Baron 	sys_tracepoint_refcount++;
6508cf868afSSteven Rostedt (Red Hat) 
6518cf868afSSteven Rostedt (Red Hat) 	return 0;
652a871bd33SJason Baron }
653a871bd33SJason Baron 
654a871bd33SJason Baron void syscall_unregfunc(void)
655a871bd33SJason Baron {
6568063e41dSOleg Nesterov 	struct task_struct *p, *t;
657a871bd33SJason Baron 
658a871bd33SJason Baron 	sys_tracepoint_refcount--;
659a871bd33SJason Baron 	if (!sys_tracepoint_refcount) {
6608063e41dSOleg Nesterov 		read_lock(&tasklist_lock);
6618063e41dSOleg Nesterov 		for_each_process_thread(p, t) {
662524666cbSGabriel Krisman Bertazi 			clear_task_syscall_work(t, SYSCALL_TRACEPOINT);
6638063e41dSOleg Nesterov 		}
6648063e41dSOleg Nesterov 		read_unlock(&tasklist_lock);
665a871bd33SJason Baron 	}
666a871bd33SJason Baron }
66760d970c2SIngo Molnar #endif
668