xref: /linux/kernel/tracepoint.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
297e1c18eSMathieu Desnoyers /*
3de7b2973SMathieu Desnoyers  * Copyright (C) 2008-2014 Mathieu Desnoyers
497e1c18eSMathieu Desnoyers  */
597e1c18eSMathieu Desnoyers #include <linux/module.h>
697e1c18eSMathieu Desnoyers #include <linux/mutex.h>
797e1c18eSMathieu Desnoyers #include <linux/types.h>
897e1c18eSMathieu Desnoyers #include <linux/jhash.h>
997e1c18eSMathieu Desnoyers #include <linux/list.h>
1097e1c18eSMathieu Desnoyers #include <linux/rcupdate.h>
1197e1c18eSMathieu Desnoyers #include <linux/tracepoint.h>
1297e1c18eSMathieu Desnoyers #include <linux/err.h>
1397e1c18eSMathieu Desnoyers #include <linux/slab.h>
143f07c014SIngo Molnar #include <linux/sched/signal.h>
1529930025SIngo Molnar #include <linux/sched/task.h>
16c5905afbSIngo Molnar #include <linux/static_key.h>
1797e1c18eSMathieu Desnoyers 
18231264d6SMathieu Desnoyers enum tp_func_state {
19231264d6SMathieu Desnoyers 	TP_FUNC_0,
20231264d6SMathieu Desnoyers 	TP_FUNC_1,
21231264d6SMathieu Desnoyers 	TP_FUNC_2,
22231264d6SMathieu Desnoyers 	TP_FUNC_N,
23231264d6SMathieu Desnoyers };
24231264d6SMathieu Desnoyers 
259c0be3f6SMathieu Desnoyers extern tracepoint_ptr_t __start___tracepoints_ptrs[];
269c0be3f6SMathieu Desnoyers extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
2797e1c18eSMathieu Desnoyers 
28e6753f23SJoel Fernandes (Google) DEFINE_SRCU(tracepoint_srcu);
29e6753f23SJoel Fernandes (Google) EXPORT_SYMBOL_GPL(tracepoint_srcu);
30e6753f23SJoel Fernandes (Google) 
317b40066cSMathieu Desnoyers enum tp_transition_sync {
327b40066cSMathieu Desnoyers 	TP_TRANSITION_SYNC_1_0_1,
337b40066cSMathieu Desnoyers 	TP_TRANSITION_SYNC_N_2_1,
347b40066cSMathieu Desnoyers 
357b40066cSMathieu Desnoyers 	_NR_TP_TRANSITION_SYNC,
367b40066cSMathieu Desnoyers };
377b40066cSMathieu Desnoyers 
387b40066cSMathieu Desnoyers struct tp_transition_snapshot {
397b40066cSMathieu Desnoyers 	unsigned long rcu;
407b40066cSMathieu Desnoyers 	unsigned long srcu;
417b40066cSMathieu Desnoyers 	bool ongoing;
427b40066cSMathieu Desnoyers };
437b40066cSMathieu Desnoyers 
447b40066cSMathieu Desnoyers /* Protected by tracepoints_mutex */
457b40066cSMathieu Desnoyers static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC];
467b40066cSMathieu Desnoyers 
tp_rcu_get_state(enum tp_transition_sync sync)477b40066cSMathieu Desnoyers static void tp_rcu_get_state(enum tp_transition_sync sync)
487b40066cSMathieu Desnoyers {
497b40066cSMathieu Desnoyers 	struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
507b40066cSMathieu Desnoyers 
517b40066cSMathieu Desnoyers 	/* Keep the latest get_state snapshot. */
527b40066cSMathieu Desnoyers 	snapshot->rcu = get_state_synchronize_rcu();
537b40066cSMathieu Desnoyers 	snapshot->srcu = start_poll_synchronize_srcu(&tracepoint_srcu);
547b40066cSMathieu Desnoyers 	snapshot->ongoing = true;
557b40066cSMathieu Desnoyers }
567b40066cSMathieu Desnoyers 
tp_rcu_cond_sync(enum tp_transition_sync sync)577b40066cSMathieu Desnoyers static void tp_rcu_cond_sync(enum tp_transition_sync sync)
587b40066cSMathieu Desnoyers {
597b40066cSMathieu Desnoyers 	struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
607b40066cSMathieu Desnoyers 
617b40066cSMathieu Desnoyers 	if (!snapshot->ongoing)
627b40066cSMathieu Desnoyers 		return;
637b40066cSMathieu Desnoyers 	cond_synchronize_rcu(snapshot->rcu);
647b40066cSMathieu Desnoyers 	if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu))
657b40066cSMathieu Desnoyers 		synchronize_srcu(&tracepoint_srcu);
667b40066cSMathieu Desnoyers 	snapshot->ongoing = false;
677b40066cSMathieu Desnoyers }
687b40066cSMathieu Desnoyers 
6997e1c18eSMathieu Desnoyers /* Set to 1 to enable tracepoint debug output */
7097e1c18eSMathieu Desnoyers static const int tracepoint_debug;
7197e1c18eSMathieu Desnoyers 
72b75ef8b4SMathieu Desnoyers #ifdef CONFIG_MODULES
73de7b2973SMathieu Desnoyers /*
74de7b2973SMathieu Desnoyers  * Tracepoint module list mutex protects the local module list.
75de7b2973SMathieu Desnoyers  */
76de7b2973SMathieu Desnoyers static DEFINE_MUTEX(tracepoint_module_list_mutex);
77de7b2973SMathieu Desnoyers 
78de7b2973SMathieu Desnoyers /* Local list of struct tp_module */
79b75ef8b4SMathieu Desnoyers static LIST_HEAD(tracepoint_module_list);
80b75ef8b4SMathieu Desnoyers #endif /* CONFIG_MODULES */
81b75ef8b4SMathieu Desnoyers 
8297e1c18eSMathieu Desnoyers /*
83de7b2973SMathieu Desnoyers  * tracepoints_mutex protects the builtin and module tracepoints.
84de7b2973SMathieu Desnoyers  * tracepoints_mutex nests inside tracepoint_module_list_mutex.
8597e1c18eSMathieu Desnoyers  */
86de7b2973SMathieu Desnoyers static DEFINE_MUTEX(tracepoints_mutex);
8797e1c18eSMathieu Desnoyers 
88f8a79d5cSSteven Rostedt (VMware) static struct rcu_head *early_probes;
89f8a79d5cSSteven Rostedt (VMware) static bool ok_to_free_tracepoints;
90f8a79d5cSSteven Rostedt (VMware) 
9197e1c18eSMathieu Desnoyers /*
9297e1c18eSMathieu Desnoyers  * Note about RCU :
93fd589a8fSAnand Gadiyar  * It is used to delay the free of multiple probes array until a quiescent
9497e1c18eSMathieu Desnoyers  * state is reached.
9597e1c18eSMathieu Desnoyers  */
9619dba33cSLai Jiangshan struct tp_probes {
9719dba33cSLai Jiangshan 	struct rcu_head rcu;
989d0a49c7SGustavo A. R. Silva 	struct tracepoint_func probes[];
9919dba33cSLai Jiangshan };
10097e1c18eSMathieu Desnoyers 
101befe6d94SSteven Rostedt (VMware) /* Called in removal of a func but failed to allocate a new tp_funcs */
tp_stub_func(void)102befe6d94SSteven Rostedt (VMware) static void tp_stub_func(void)
103befe6d94SSteven Rostedt (VMware) {
104befe6d94SSteven Rostedt (VMware) 	return;
105befe6d94SSteven Rostedt (VMware) }
106befe6d94SSteven Rostedt (VMware) 
allocate_probes(int count)10719dba33cSLai Jiangshan static inline void *allocate_probes(int count)
10897e1c18eSMathieu Desnoyers {
109f0553dcbSGustavo A. R. Silva 	struct tp_probes *p  = kmalloc(struct_size(p, probes, count),
110f0553dcbSGustavo A. R. Silva 				       GFP_KERNEL);
11119dba33cSLai Jiangshan 	return p == NULL ? NULL : p->probes;
11297e1c18eSMathieu Desnoyers }
11397e1c18eSMathieu Desnoyers 
srcu_free_old_probes(struct rcu_head * head)114e6753f23SJoel Fernandes (Google) static void srcu_free_old_probes(struct rcu_head *head)
11597e1c18eSMathieu Desnoyers {
1160dea6d52SMathieu Desnoyers 	kfree(container_of(head, struct tp_probes, rcu));
11719dba33cSLai Jiangshan }
11819dba33cSLai Jiangshan 
rcu_free_old_probes(struct rcu_head * head)119e6753f23SJoel Fernandes (Google) static void rcu_free_old_probes(struct rcu_head *head)
120e6753f23SJoel Fernandes (Google) {
121e6753f23SJoel Fernandes (Google) 	call_srcu(&tracepoint_srcu, head, srcu_free_old_probes);
122e6753f23SJoel Fernandes (Google) }
123e6753f23SJoel Fernandes (Google) 
release_early_probes(void)124f8a79d5cSSteven Rostedt (VMware) static __init int release_early_probes(void)
125f8a79d5cSSteven Rostedt (VMware) {
126f8a79d5cSSteven Rostedt (VMware) 	struct rcu_head *tmp;
127f8a79d5cSSteven Rostedt (VMware) 
128f8a79d5cSSteven Rostedt (VMware) 	ok_to_free_tracepoints = true;
129f8a79d5cSSteven Rostedt (VMware) 
130f8a79d5cSSteven Rostedt (VMware) 	while (early_probes) {
131f8a79d5cSSteven Rostedt (VMware) 		tmp = early_probes;
132f8a79d5cSSteven Rostedt (VMware) 		early_probes = tmp->next;
13374401729SPaul E. McKenney 		call_rcu(tmp, rcu_free_old_probes);
134f8a79d5cSSteven Rostedt (VMware) 	}
135f8a79d5cSSteven Rostedt (VMware) 
136f8a79d5cSSteven Rostedt (VMware) 	return 0;
137f8a79d5cSSteven Rostedt (VMware) }
138f8a79d5cSSteven Rostedt (VMware) 
139f8a79d5cSSteven Rostedt (VMware) /* SRCU is initialized at core_initcall */
140f8a79d5cSSteven Rostedt (VMware) postcore_initcall(release_early_probes);
141f8a79d5cSSteven Rostedt (VMware) 
release_probes(struct tracepoint_func * old)14238516ab5SSteven Rostedt static inline void release_probes(struct tracepoint_func *old)
14319dba33cSLai Jiangshan {
14419dba33cSLai Jiangshan 	if (old) {
14519dba33cSLai Jiangshan 		struct tp_probes *tp_probes = container_of(old,
14619dba33cSLai Jiangshan 			struct tp_probes, probes[0]);
147f8a79d5cSSteven Rostedt (VMware) 
148f8a79d5cSSteven Rostedt (VMware) 		/*
149f8a79d5cSSteven Rostedt (VMware) 		 * We can't free probes if SRCU is not initialized yet.
150f8a79d5cSSteven Rostedt (VMware) 		 * Postpone the freeing till after SRCU is initialized.
151f8a79d5cSSteven Rostedt (VMware) 		 */
152f8a79d5cSSteven Rostedt (VMware) 		if (unlikely(!ok_to_free_tracepoints)) {
153f8a79d5cSSteven Rostedt (VMware) 			tp_probes->rcu.next = early_probes;
154f8a79d5cSSteven Rostedt (VMware) 			early_probes = &tp_probes->rcu;
155f8a79d5cSSteven Rostedt (VMware) 			return;
156f8a79d5cSSteven Rostedt (VMware) 		}
157f8a79d5cSSteven Rostedt (VMware) 
158e6753f23SJoel Fernandes (Google) 		/*
159e6753f23SJoel Fernandes (Google) 		 * Tracepoint probes are protected by both sched RCU and SRCU,
160e6753f23SJoel Fernandes (Google) 		 * by calling the SRCU callback in the sched RCU callback we
161e6753f23SJoel Fernandes (Google) 		 * cover both cases. So let us chain the SRCU and sched RCU
162e6753f23SJoel Fernandes (Google) 		 * callbacks to wait for both grace periods.
163e6753f23SJoel Fernandes (Google) 		 */
16474401729SPaul E. McKenney 		call_rcu(&tp_probes->rcu, rcu_free_old_probes);
16519dba33cSLai Jiangshan 	}
16697e1c18eSMathieu Desnoyers }
16797e1c18eSMathieu Desnoyers 
debug_print_probes(struct tracepoint_func * funcs)168de7b2973SMathieu Desnoyers static void debug_print_probes(struct tracepoint_func *funcs)
16997e1c18eSMathieu Desnoyers {
17097e1c18eSMathieu Desnoyers 	int i;
17197e1c18eSMathieu Desnoyers 
172de7b2973SMathieu Desnoyers 	if (!tracepoint_debug || !funcs)
17397e1c18eSMathieu Desnoyers 		return;
17497e1c18eSMathieu Desnoyers 
175de7b2973SMathieu Desnoyers 	for (i = 0; funcs[i].func; i++)
176de7b2973SMathieu Desnoyers 		printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
17797e1c18eSMathieu Desnoyers }
17897e1c18eSMathieu Desnoyers 
1797904b5c4SSteven Rostedt (Red Hat) static struct tracepoint_func *
func_add(struct tracepoint_func ** funcs,struct tracepoint_func * tp_func,int prio)1807904b5c4SSteven Rostedt (Red Hat) func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
1817904b5c4SSteven Rostedt (Red Hat) 	 int prio)
18297e1c18eSMathieu Desnoyers {
18338516ab5SSteven Rostedt 	struct tracepoint_func *old, *new;
1847211f0a2SSteven Rostedt (VMware) 	int iter_probes;	/* Iterate over old probe array. */
1857211f0a2SSteven Rostedt (VMware) 	int nr_probes = 0;	/* Counter for probes */
1867211f0a2SSteven Rostedt (VMware) 	int pos = -1;		/* Insertion position into new array */
18797e1c18eSMathieu Desnoyers 
188de7b2973SMathieu Desnoyers 	if (WARN_ON(!tp_func->func))
1894c69e6eaSSahara 		return ERR_PTR(-EINVAL);
19097e1c18eSMathieu Desnoyers 
191de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
192de7b2973SMathieu Desnoyers 	old = *funcs;
19397e1c18eSMathieu Desnoyers 	if (old) {
19497e1c18eSMathieu Desnoyers 		/* (N -> N+1), (N != 0, 1) probes */
1957211f0a2SSteven Rostedt (VMware) 		for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
1967211f0a2SSteven Rostedt (VMware) 			if (old[iter_probes].func == tp_stub_func)
1977211f0a2SSteven Rostedt (VMware) 				continue;	/* Skip stub functions. */
1987211f0a2SSteven Rostedt (VMware) 			if (old[iter_probes].func == tp_func->func &&
1997211f0a2SSteven Rostedt (VMware) 			    old[iter_probes].data == tp_func->data)
20097e1c18eSMathieu Desnoyers 				return ERR_PTR(-EEXIST);
2017211f0a2SSteven Rostedt (VMware) 			nr_probes++;
20297e1c18eSMathieu Desnoyers 		}
2037904b5c4SSteven Rostedt (Red Hat) 	}
2047211f0a2SSteven Rostedt (VMware) 	/* + 2 : one for new probe, one for NULL func */
2057211f0a2SSteven Rostedt (VMware) 	new = allocate_probes(nr_probes + 2);
20697e1c18eSMathieu Desnoyers 	if (new == NULL)
20797e1c18eSMathieu Desnoyers 		return ERR_PTR(-ENOMEM);
2087904b5c4SSteven Rostedt (Red Hat) 	if (old) {
2097211f0a2SSteven Rostedt (VMware) 		nr_probes = 0;
2107211f0a2SSteven Rostedt (VMware) 		for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
2117211f0a2SSteven Rostedt (VMware) 			if (old[iter_probes].func == tp_stub_func)
212befe6d94SSteven Rostedt (VMware) 				continue;
2137211f0a2SSteven Rostedt (VMware) 			/* Insert before probes of lower priority */
2147211f0a2SSteven Rostedt (VMware) 			if (pos < 0 && old[iter_probes].prio < prio)
2157211f0a2SSteven Rostedt (VMware) 				pos = nr_probes++;
2167211f0a2SSteven Rostedt (VMware) 			new[nr_probes++] = old[iter_probes];
217befe6d94SSteven Rostedt (VMware) 		}
218befe6d94SSteven Rostedt (VMware) 		if (pos < 0)
2197211f0a2SSteven Rostedt (VMware) 			pos = nr_probes++;
2207211f0a2SSteven Rostedt (VMware) 		/* nr_probes now points to the end of the new array */
2217904b5c4SSteven Rostedt (Red Hat) 	} else {
2227904b5c4SSteven Rostedt (Red Hat) 		pos = 0;
2237211f0a2SSteven Rostedt (VMware) 		nr_probes = 1; /* must point at end of array */
2247211f0a2SSteven Rostedt (VMware) 	}
2257904b5c4SSteven Rostedt (Red Hat) 	new[pos] = *tp_func;
2267211f0a2SSteven Rostedt (VMware) 	new[nr_probes].func = NULL;
227de7b2973SMathieu Desnoyers 	*funcs = new;
228de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
22997e1c18eSMathieu Desnoyers 	return old;
23097e1c18eSMathieu Desnoyers }
23197e1c18eSMathieu Desnoyers 
func_remove(struct tracepoint_func ** funcs,struct tracepoint_func * tp_func)232de7b2973SMathieu Desnoyers static void *func_remove(struct tracepoint_func **funcs,
233de7b2973SMathieu Desnoyers 		struct tracepoint_func *tp_func)
23497e1c18eSMathieu Desnoyers {
23597e1c18eSMathieu Desnoyers 	int nr_probes = 0, nr_del = 0, i;
23638516ab5SSteven Rostedt 	struct tracepoint_func *old, *new;
23797e1c18eSMathieu Desnoyers 
238de7b2973SMathieu Desnoyers 	old = *funcs;
23997e1c18eSMathieu Desnoyers 
240f66af459SFrederic Weisbecker 	if (!old)
24119dba33cSLai Jiangshan 		return ERR_PTR(-ENOENT);
242f66af459SFrederic Weisbecker 
243de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
24497e1c18eSMathieu Desnoyers 	/* (N -> M), (N > 1, M >= 0) probes */
245de7b2973SMathieu Desnoyers 	if (tp_func->func) {
24638516ab5SSteven Rostedt 		for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
247befe6d94SSteven Rostedt (VMware) 			if ((old[nr_probes].func == tp_func->func &&
248befe6d94SSteven Rostedt (VMware) 			     old[nr_probes].data == tp_func->data) ||
249befe6d94SSteven Rostedt (VMware) 			    old[nr_probes].func == tp_stub_func)
25097e1c18eSMathieu Desnoyers 				nr_del++;
25197e1c18eSMathieu Desnoyers 		}
2524c69e6eaSSahara 	}
25397e1c18eSMathieu Desnoyers 
2544c69e6eaSSahara 	/*
2554c69e6eaSSahara 	 * If probe is NULL, then nr_probes = nr_del = 0, and then the
2564c69e6eaSSahara 	 * entire entry will be removed.
2574c69e6eaSSahara 	 */
25897e1c18eSMathieu Desnoyers 	if (nr_probes - nr_del == 0) {
25997e1c18eSMathieu Desnoyers 		/* N -> 0, (N > 1) */
260de7b2973SMathieu Desnoyers 		*funcs = NULL;
261de7b2973SMathieu Desnoyers 		debug_print_probes(*funcs);
26297e1c18eSMathieu Desnoyers 		return old;
26397e1c18eSMathieu Desnoyers 	} else {
26497e1c18eSMathieu Desnoyers 		int j = 0;
26597e1c18eSMathieu Desnoyers 		/* N -> M, (N > 1, M > 0) */
26697e1c18eSMathieu Desnoyers 		/* + 1 for NULL */
26719dba33cSLai Jiangshan 		new = allocate_probes(nr_probes - nr_del + 1);
268befe6d94SSteven Rostedt (VMware) 		if (new) {
2697211f0a2SSteven Rostedt (VMware) 			for (i = 0; old[i].func; i++) {
2707211f0a2SSteven Rostedt (VMware) 				if ((old[i].func != tp_func->func ||
2717211f0a2SSteven Rostedt (VMware) 				     old[i].data != tp_func->data) &&
2727211f0a2SSteven Rostedt (VMware) 				    old[i].func != tp_stub_func)
27397e1c18eSMathieu Desnoyers 					new[j++] = old[i];
2747211f0a2SSteven Rostedt (VMware) 			}
27538516ab5SSteven Rostedt 			new[nr_probes - nr_del].func = NULL;
276de7b2973SMathieu Desnoyers 			*funcs = new;
277befe6d94SSteven Rostedt (VMware) 		} else {
278befe6d94SSteven Rostedt (VMware) 			/*
279befe6d94SSteven Rostedt (VMware) 			 * Failed to allocate, replace the old function
280befe6d94SSteven Rostedt (VMware) 			 * with calls to tp_stub_func.
281befe6d94SSteven Rostedt (VMware) 			 */
2827211f0a2SSteven Rostedt (VMware) 			for (i = 0; old[i].func; i++) {
283befe6d94SSteven Rostedt (VMware) 				if (old[i].func == tp_func->func &&
2847211f0a2SSteven Rostedt (VMware) 				    old[i].data == tp_func->data)
2857211f0a2SSteven Rostedt (VMware) 					WRITE_ONCE(old[i].func, tp_stub_func);
286befe6d94SSteven Rostedt (VMware) 			}
287befe6d94SSteven Rostedt (VMware) 			*funcs = old;
288befe6d94SSteven Rostedt (VMware) 		}
28997e1c18eSMathieu Desnoyers 	}
290de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
29197e1c18eSMathieu Desnoyers 	return old;
29297e1c18eSMathieu Desnoyers }
29397e1c18eSMathieu Desnoyers 
294231264d6SMathieu Desnoyers /*
295231264d6SMathieu Desnoyers  * Count the number of functions (enum tp_func_state) in a tp_funcs array.
296231264d6SMathieu Desnoyers  */
nr_func_state(const struct tracepoint_func * tp_funcs)297231264d6SMathieu Desnoyers static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs)
298231264d6SMathieu Desnoyers {
299231264d6SMathieu Desnoyers 	if (!tp_funcs)
300231264d6SMathieu Desnoyers 		return TP_FUNC_0;
301231264d6SMathieu Desnoyers 	if (!tp_funcs[1].func)
302231264d6SMathieu Desnoyers 		return TP_FUNC_1;
303231264d6SMathieu Desnoyers 	if (!tp_funcs[2].func)
304231264d6SMathieu Desnoyers 		return TP_FUNC_2;
305231264d6SMathieu Desnoyers 	return TP_FUNC_N;	/* 3 or more */
306231264d6SMathieu Desnoyers }
307231264d6SMathieu Desnoyers 
tracepoint_update_call(struct tracepoint * tp,struct tracepoint_func * tp_funcs)308231264d6SMathieu Desnoyers static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs)
309d25e37d8SSteven Rostedt (VMware) {
310d25e37d8SSteven Rostedt (VMware) 	void *func = tp->iterator;
311d25e37d8SSteven Rostedt (VMware) 
312d25e37d8SSteven Rostedt (VMware) 	/* Synthetic events do not have static call sites */
313d25e37d8SSteven Rostedt (VMware) 	if (!tp->static_call_key)
314d25e37d8SSteven Rostedt (VMware) 		return;
315231264d6SMathieu Desnoyers 	if (nr_func_state(tp_funcs) == TP_FUNC_1)
316d25e37d8SSteven Rostedt (VMware) 		func = tp_funcs[0].func;
317d25e37d8SSteven Rostedt (VMware) 	__static_call_update(tp->static_call_key, tp->static_call_tramp, func);
318d25e37d8SSteven Rostedt (VMware) }
319d25e37d8SSteven Rostedt (VMware) 
32097e1c18eSMathieu Desnoyers /*
321de7b2973SMathieu Desnoyers  * Add the probe function to a tracepoint.
32297e1c18eSMathieu Desnoyers  */
tracepoint_add_func(struct tracepoint * tp,struct tracepoint_func * func,int prio,bool warn)323de7b2973SMathieu Desnoyers static int tracepoint_add_func(struct tracepoint *tp,
3249913d574SSteven Rostedt (VMware) 			       struct tracepoint_func *func, int prio,
3259913d574SSteven Rostedt (VMware) 			       bool warn)
32697e1c18eSMathieu Desnoyers {
327de7b2973SMathieu Desnoyers 	struct tracepoint_func *old, *tp_funcs;
3288cf868afSSteven Rostedt (Red Hat) 	int ret;
32997e1c18eSMathieu Desnoyers 
3308cf868afSSteven Rostedt (Red Hat) 	if (tp->regfunc && !static_key_enabled(&tp->key)) {
3318cf868afSSteven Rostedt (Red Hat) 		ret = tp->regfunc();
3328cf868afSSteven Rostedt (Red Hat) 		if (ret < 0)
3338cf868afSSteven Rostedt (Red Hat) 			return ret;
3348cf868afSSteven Rostedt (Red Hat) 	}
33597e1c18eSMathieu Desnoyers 
336b725dfeaSMathieu Desnoyers 	tp_funcs = rcu_dereference_protected(tp->funcs,
337b725dfeaSMathieu Desnoyers 			lockdep_is_held(&tracepoints_mutex));
3387904b5c4SSteven Rostedt (Red Hat) 	old = func_add(&tp_funcs, func, prio);
339de7b2973SMathieu Desnoyers 	if (IS_ERR(old)) {
3409913d574SSteven Rostedt (VMware) 		WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM);
341de7b2973SMathieu Desnoyers 		return PTR_ERR(old);
34297e1c18eSMathieu Desnoyers 	}
34397419875SJosh Stone 
34497e1c18eSMathieu Desnoyers 	/*
345243d1a79SPaul E. McKenney 	 * rcu_assign_pointer has as smp_store_release() which makes sure
346243d1a79SPaul E. McKenney 	 * that the new probe callbacks array is consistent before setting
347243d1a79SPaul E. McKenney 	 * a pointer to it.  This array is referenced by __DO_TRACE from
348243d1a79SPaul E. McKenney 	 * include/linux/tracepoint.h using rcu_dereference_sched().
34997e1c18eSMathieu Desnoyers 	 */
350231264d6SMathieu Desnoyers 	switch (nr_func_state(tp_funcs)) {
351231264d6SMathieu Desnoyers 	case TP_FUNC_1:		/* 0->1 */
3527b40066cSMathieu Desnoyers 		/*
3537b40066cSMathieu Desnoyers 		 * Make sure new static func never uses old data after a
3547b40066cSMathieu Desnoyers 		 * 1->0->1 transition sequence.
3557b40066cSMathieu Desnoyers 		 */
3567b40066cSMathieu Desnoyers 		tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1);
357231264d6SMathieu Desnoyers 		/* Set static call to first function */
358231264d6SMathieu Desnoyers 		tracepoint_update_call(tp, tp_funcs);
359231264d6SMathieu Desnoyers 		/* Both iterator and static call handle NULL tp->funcs */
360352384d5SSteven Rostedt (VMware) 		rcu_assign_pointer(tp->funcs, tp_funcs);
361d25e37d8SSteven Rostedt (VMware) 		static_key_enable(&tp->key);
362231264d6SMathieu Desnoyers 		break;
363231264d6SMathieu Desnoyers 	case TP_FUNC_2:		/* 1->2 */
364231264d6SMathieu Desnoyers 		/* Set iterator static call */
365231264d6SMathieu Desnoyers 		tracepoint_update_call(tp, tp_funcs);
366231264d6SMathieu Desnoyers 		/*
367231264d6SMathieu Desnoyers 		 * Iterator callback installed before updating tp->funcs.
368231264d6SMathieu Desnoyers 		 * Requires ordering between RCU assign/dereference and
369231264d6SMathieu Desnoyers 		 * static call update/call.
370231264d6SMathieu Desnoyers 		 */
3717b40066cSMathieu Desnoyers 		fallthrough;
372231264d6SMathieu Desnoyers 	case TP_FUNC_N:		/* N->N+1 (N>1) */
373231264d6SMathieu Desnoyers 		rcu_assign_pointer(tp->funcs, tp_funcs);
3747b40066cSMathieu Desnoyers 		/*
3757b40066cSMathieu Desnoyers 		 * Make sure static func never uses incorrect data after a
3767b40066cSMathieu Desnoyers 		 * N->...->2->1 (N>1) transition sequence.
3777b40066cSMathieu Desnoyers 		 */
3787b40066cSMathieu Desnoyers 		if (tp_funcs[0].data != old[0].data)
3797b40066cSMathieu Desnoyers 			tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
380231264d6SMathieu Desnoyers 		break;
381231264d6SMathieu Desnoyers 	default:
382231264d6SMathieu Desnoyers 		WARN_ON_ONCE(1);
383231264d6SMathieu Desnoyers 		break;
384231264d6SMathieu Desnoyers 	}
385d25e37d8SSteven Rostedt (VMware) 
3868058bd0fSMathieu Desnoyers 	release_probes(old);
387de7b2973SMathieu Desnoyers 	return 0;
38897e1c18eSMathieu Desnoyers }
38997e1c18eSMathieu Desnoyers 
39097e1c18eSMathieu Desnoyers /*
391de7b2973SMathieu Desnoyers  * Remove a probe function from a tracepoint.
39297e1c18eSMathieu Desnoyers  * Note: only waiting an RCU period after setting elem->call to the empty
39397e1c18eSMathieu Desnoyers  * function insures that the original callback is not used anymore. This insured
39497e1c18eSMathieu Desnoyers  * by preempt_disable around the call site.
39597e1c18eSMathieu Desnoyers  */
tracepoint_remove_func(struct tracepoint * tp,struct tracepoint_func * func)396de7b2973SMathieu Desnoyers static int tracepoint_remove_func(struct tracepoint *tp,
397de7b2973SMathieu Desnoyers 		struct tracepoint_func *func)
39897e1c18eSMathieu Desnoyers {
399de7b2973SMathieu Desnoyers 	struct tracepoint_func *old, *tp_funcs;
40097419875SJosh Stone 
401b725dfeaSMathieu Desnoyers 	tp_funcs = rcu_dereference_protected(tp->funcs,
402b725dfeaSMathieu Desnoyers 			lockdep_is_held(&tracepoints_mutex));
403de7b2973SMathieu Desnoyers 	old = func_remove(&tp_funcs, func);
404befe6d94SSteven Rostedt (VMware) 	if (WARN_ON_ONCE(IS_ERR(old)))
405de7b2973SMathieu Desnoyers 		return PTR_ERR(old);
406befe6d94SSteven Rostedt (VMware) 
407befe6d94SSteven Rostedt (VMware) 	if (tp_funcs == old)
408befe6d94SSteven Rostedt (VMware) 		/* Failed allocating new tp_funcs, replaced func with stub */
409befe6d94SSteven Rostedt (VMware) 		return 0;
41097e1c18eSMathieu Desnoyers 
411231264d6SMathieu Desnoyers 	switch (nr_func_state(tp_funcs)) {
412231264d6SMathieu Desnoyers 	case TP_FUNC_0:		/* 1->0 */
413de7b2973SMathieu Desnoyers 		/* Removed last function */
414de7b2973SMathieu Desnoyers 		if (tp->unregfunc && static_key_enabled(&tp->key))
415de7b2973SMathieu Desnoyers 			tp->unregfunc();
41697e1c18eSMathieu Desnoyers 
417d25e37d8SSteven Rostedt (VMware) 		static_key_disable(&tp->key);
418231264d6SMathieu Desnoyers 		/* Set iterator static call */
419231264d6SMathieu Desnoyers 		tracepoint_update_call(tp, tp_funcs);
420231264d6SMathieu Desnoyers 		/* Both iterator and static call handle NULL tp->funcs */
421231264d6SMathieu Desnoyers 		rcu_assign_pointer(tp->funcs, NULL);
422231264d6SMathieu Desnoyers 		/*
4237b40066cSMathieu Desnoyers 		 * Make sure new static func never uses old data after a
4247b40066cSMathieu Desnoyers 		 * 1->0->1 transition sequence.
425231264d6SMathieu Desnoyers 		 */
4267b40066cSMathieu Desnoyers 		tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1);
427231264d6SMathieu Desnoyers 		break;
428231264d6SMathieu Desnoyers 	case TP_FUNC_1:		/* 2->1 */
429de7b2973SMathieu Desnoyers 		rcu_assign_pointer(tp->funcs, tp_funcs);
430231264d6SMathieu Desnoyers 		/*
4317b40066cSMathieu Desnoyers 		 * Make sure static func never uses incorrect data after a
4327b40066cSMathieu Desnoyers 		 * N->...->2->1 (N>2) transition sequence. If the first
4337b40066cSMathieu Desnoyers 		 * element's data has changed, then force the synchronization
4347b40066cSMathieu Desnoyers 		 * to prevent current readers that have loaded the old data
4357b40066cSMathieu Desnoyers 		 * from calling the new function.
436231264d6SMathieu Desnoyers 		 */
4377b40066cSMathieu Desnoyers 		if (tp_funcs[0].data != old[0].data)
4387b40066cSMathieu Desnoyers 			tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
4397b40066cSMathieu Desnoyers 		tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1);
440231264d6SMathieu Desnoyers 		/* Set static call to first function */
441231264d6SMathieu Desnoyers 		tracepoint_update_call(tp, tp_funcs);
442231264d6SMathieu Desnoyers 		break;
443231264d6SMathieu Desnoyers 	case TP_FUNC_2:		/* N->N-1 (N>2) */
444231264d6SMathieu Desnoyers 		fallthrough;
445231264d6SMathieu Desnoyers 	case TP_FUNC_N:
446547305a6SSteven Rostedt (VMware) 		rcu_assign_pointer(tp->funcs, tp_funcs);
4477b40066cSMathieu Desnoyers 		/*
4487b40066cSMathieu Desnoyers 		 * Make sure static func never uses incorrect data after a
4497b40066cSMathieu Desnoyers 		 * N->...->2->1 (N>2) transition sequence.
4507b40066cSMathieu Desnoyers 		 */
4517b40066cSMathieu Desnoyers 		if (tp_funcs[0].data != old[0].data)
4527b40066cSMathieu Desnoyers 			tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
453231264d6SMathieu Desnoyers 		break;
454231264d6SMathieu Desnoyers 	default:
455231264d6SMathieu Desnoyers 		WARN_ON_ONCE(1);
456231264d6SMathieu Desnoyers 		break;
457547305a6SSteven Rostedt (VMware) 	}
4588058bd0fSMathieu Desnoyers 	release_probes(old);
459de7b2973SMathieu Desnoyers 	return 0;
460127cafbbSLai Jiangshan }
461127cafbbSLai Jiangshan 
46297e1c18eSMathieu Desnoyers /**
4639913d574SSteven Rostedt (VMware)  * tracepoint_probe_register_prio_may_exist -  Connect a probe to a tracepoint with priority
4649913d574SSteven Rostedt (VMware)  * @tp: tracepoint
4659913d574SSteven Rostedt (VMware)  * @probe: probe handler
4669913d574SSteven Rostedt (VMware)  * @data: tracepoint data
4679913d574SSteven Rostedt (VMware)  * @prio: priority of this function over other registered functions
4689913d574SSteven Rostedt (VMware)  *
4699913d574SSteven Rostedt (VMware)  * Same as tracepoint_probe_register_prio() except that it will not warn
4709913d574SSteven Rostedt (VMware)  * if the tracepoint is already registered.
4719913d574SSteven Rostedt (VMware)  */
tracepoint_probe_register_prio_may_exist(struct tracepoint * tp,void * probe,void * data,int prio)4729913d574SSteven Rostedt (VMware) int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe,
4739913d574SSteven Rostedt (VMware) 					     void *data, int prio)
4749913d574SSteven Rostedt (VMware) {
4759913d574SSteven Rostedt (VMware) 	struct tracepoint_func tp_func;
4769913d574SSteven Rostedt (VMware) 	int ret;
4779913d574SSteven Rostedt (VMware) 
4789913d574SSteven Rostedt (VMware) 	mutex_lock(&tracepoints_mutex);
4799913d574SSteven Rostedt (VMware) 	tp_func.func = probe;
4809913d574SSteven Rostedt (VMware) 	tp_func.data = data;
4819913d574SSteven Rostedt (VMware) 	tp_func.prio = prio;
4829913d574SSteven Rostedt (VMware) 	ret = tracepoint_add_func(tp, &tp_func, prio, false);
4839913d574SSteven Rostedt (VMware) 	mutex_unlock(&tracepoints_mutex);
4849913d574SSteven Rostedt (VMware) 	return ret;
4859913d574SSteven Rostedt (VMware) }
4869913d574SSteven Rostedt (VMware) EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist);
4879913d574SSteven Rostedt (VMware) 
4889913d574SSteven Rostedt (VMware) /**
489f39e2391SLee, Chun-Yi  * tracepoint_probe_register_prio -  Connect a probe to a tracepoint with priority
490de7b2973SMathieu Desnoyers  * @tp: tracepoint
49197e1c18eSMathieu Desnoyers  * @probe: probe handler
492cac92ba7SFabian Frederick  * @data: tracepoint data
4937904b5c4SSteven Rostedt (Red Hat)  * @prio: priority of this function over other registered functions
4947904b5c4SSteven Rostedt (Red Hat)  *
4957904b5c4SSteven Rostedt (Red Hat)  * Returns 0 if ok, error value on error.
4967904b5c4SSteven Rostedt (Red Hat)  * Note: if @tp is within a module, the caller is responsible for
4977904b5c4SSteven Rostedt (Red Hat)  * unregistering the probe before the module is gone. This can be
4987904b5c4SSteven Rostedt (Red Hat)  * performed either with a tracepoint module going notifier, or from
4997904b5c4SSteven Rostedt (Red Hat)  * within module exit functions.
5007904b5c4SSteven Rostedt (Red Hat)  */
tracepoint_probe_register_prio(struct tracepoint * tp,void * probe,void * data,int prio)5017904b5c4SSteven Rostedt (Red Hat) int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
5027904b5c4SSteven Rostedt (Red Hat) 				   void *data, int prio)
5037904b5c4SSteven Rostedt (Red Hat) {
5047904b5c4SSteven Rostedt (Red Hat) 	struct tracepoint_func tp_func;
5057904b5c4SSteven Rostedt (Red Hat) 	int ret;
5067904b5c4SSteven Rostedt (Red Hat) 
5077904b5c4SSteven Rostedt (Red Hat) 	mutex_lock(&tracepoints_mutex);
5087904b5c4SSteven Rostedt (Red Hat) 	tp_func.func = probe;
5097904b5c4SSteven Rostedt (Red Hat) 	tp_func.data = data;
5107904b5c4SSteven Rostedt (Red Hat) 	tp_func.prio = prio;
5119913d574SSteven Rostedt (VMware) 	ret = tracepoint_add_func(tp, &tp_func, prio, true);
5127904b5c4SSteven Rostedt (Red Hat) 	mutex_unlock(&tracepoints_mutex);
5137904b5c4SSteven Rostedt (Red Hat) 	return ret;
5147904b5c4SSteven Rostedt (Red Hat) }
5157904b5c4SSteven Rostedt (Red Hat) EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio);
5167904b5c4SSteven Rostedt (Red Hat) 
5177904b5c4SSteven Rostedt (Red Hat) /**
5187904b5c4SSteven Rostedt (Red Hat)  * tracepoint_probe_register -  Connect a probe to a tracepoint
5197904b5c4SSteven Rostedt (Red Hat)  * @tp: tracepoint
5207904b5c4SSteven Rostedt (Red Hat)  * @probe: probe handler
5217904b5c4SSteven Rostedt (Red Hat)  * @data: tracepoint data
52297e1c18eSMathieu Desnoyers  *
523de7b2973SMathieu Desnoyers  * Returns 0 if ok, error value on error.
524de7b2973SMathieu Desnoyers  * Note: if @tp is within a module, the caller is responsible for
525de7b2973SMathieu Desnoyers  * unregistering the probe before the module is gone. This can be
526de7b2973SMathieu Desnoyers  * performed either with a tracepoint module going notifier, or from
527de7b2973SMathieu Desnoyers  * within module exit functions.
52897e1c18eSMathieu Desnoyers  */
tracepoint_probe_register(struct tracepoint * tp,void * probe,void * data)529de7b2973SMathieu Desnoyers int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
53097e1c18eSMathieu Desnoyers {
5317904b5c4SSteven Rostedt (Red Hat) 	return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO);
53297e1c18eSMathieu Desnoyers }
53397e1c18eSMathieu Desnoyers EXPORT_SYMBOL_GPL(tracepoint_probe_register);
53497e1c18eSMathieu Desnoyers 
53597e1c18eSMathieu Desnoyers /**
53697e1c18eSMathieu Desnoyers  * tracepoint_probe_unregister -  Disconnect a probe from a tracepoint
537de7b2973SMathieu Desnoyers  * @tp: tracepoint
53897e1c18eSMathieu Desnoyers  * @probe: probe function pointer
539cac92ba7SFabian Frederick  * @data: tracepoint data
54097e1c18eSMathieu Desnoyers  *
541de7b2973SMathieu Desnoyers  * Returns 0 if ok, error value on error.
54297e1c18eSMathieu Desnoyers  */
tracepoint_probe_unregister(struct tracepoint * tp,void * probe,void * data)543de7b2973SMathieu Desnoyers int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
54497e1c18eSMathieu Desnoyers {
545de7b2973SMathieu Desnoyers 	struct tracepoint_func tp_func;
546de7b2973SMathieu Desnoyers 	int ret;
54797e1c18eSMathieu Desnoyers 
54897e1c18eSMathieu Desnoyers 	mutex_lock(&tracepoints_mutex);
549de7b2973SMathieu Desnoyers 	tp_func.func = probe;
550de7b2973SMathieu Desnoyers 	tp_func.data = data;
551de7b2973SMathieu Desnoyers 	ret = tracepoint_remove_func(tp, &tp_func);
55297e1c18eSMathieu Desnoyers 	mutex_unlock(&tracepoints_mutex);
553de7b2973SMathieu Desnoyers 	return ret;
55497e1c18eSMathieu Desnoyers }
55597e1c18eSMathieu Desnoyers EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
55697e1c18eSMathieu Desnoyers 
for_each_tracepoint_range(tracepoint_ptr_t * begin,tracepoint_ptr_t * end,void (* fct)(struct tracepoint * tp,void * priv),void * priv)5579c0be3f6SMathieu Desnoyers static void for_each_tracepoint_range(
5589c0be3f6SMathieu Desnoyers 		tracepoint_ptr_t *begin, tracepoint_ptr_t *end,
55946e0c9beSArd Biesheuvel 		void (*fct)(struct tracepoint *tp, void *priv),
56046e0c9beSArd Biesheuvel 		void *priv)
56146e0c9beSArd Biesheuvel {
5629c0be3f6SMathieu Desnoyers 	tracepoint_ptr_t *iter;
5639c0be3f6SMathieu Desnoyers 
56446e0c9beSArd Biesheuvel 	if (!begin)
56546e0c9beSArd Biesheuvel 		return;
56646e0c9beSArd Biesheuvel 	for (iter = begin; iter < end; iter++)
5679c0be3f6SMathieu Desnoyers 		fct(tracepoint_ptr_deref(iter), priv);
56846e0c9beSArd Biesheuvel }
56946e0c9beSArd Biesheuvel 
570227a8375SIngo Molnar #ifdef CONFIG_MODULES
trace_module_has_bad_taint(struct module * mod)57145ab2813SSteven Rostedt (Red Hat) bool trace_module_has_bad_taint(struct module *mod)
57245ab2813SSteven Rostedt (Red Hat) {
57366cc69e3SMathieu Desnoyers 	return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) |
574e7bb66f7SJianlin Lv 				(1 << TAINT_UNSIGNED_MODULE) | (1 << TAINT_TEST) |
575e7bb66f7SJianlin Lv 				(1 << TAINT_LIVEPATCH));
57645ab2813SSteven Rostedt (Red Hat) }
57745ab2813SSteven Rostedt (Red Hat) 
578de7b2973SMathieu Desnoyers static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
579de7b2973SMathieu Desnoyers 
580de7b2973SMathieu Desnoyers /**
581bd740953Szhaoxiao  * register_tracepoint_module_notifier - register tracepoint coming/going notifier
582de7b2973SMathieu Desnoyers  * @nb: notifier block
583de7b2973SMathieu Desnoyers  *
584de7b2973SMathieu Desnoyers  * Notifiers registered with this function are called on module
585de7b2973SMathieu Desnoyers  * coming/going with the tracepoint_module_list_mutex held.
586de7b2973SMathieu Desnoyers  * The notifier block callback should expect a "struct tp_module" data
587de7b2973SMathieu Desnoyers  * pointer.
588de7b2973SMathieu Desnoyers  */
register_tracepoint_module_notifier(struct notifier_block * nb)589de7b2973SMathieu Desnoyers int register_tracepoint_module_notifier(struct notifier_block *nb)
590de7b2973SMathieu Desnoyers {
591de7b2973SMathieu Desnoyers 	struct tp_module *tp_mod;
592de7b2973SMathieu Desnoyers 	int ret;
593de7b2973SMathieu Desnoyers 
594de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
595de7b2973SMathieu Desnoyers 	ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
596de7b2973SMathieu Desnoyers 	if (ret)
597de7b2973SMathieu Desnoyers 		goto end;
598de7b2973SMathieu Desnoyers 	list_for_each_entry(tp_mod, &tracepoint_module_list, list)
599de7b2973SMathieu Desnoyers 		(void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
600de7b2973SMathieu Desnoyers end:
601de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
602de7b2973SMathieu Desnoyers 	return ret;
603de7b2973SMathieu Desnoyers }
604de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
605de7b2973SMathieu Desnoyers 
606de7b2973SMathieu Desnoyers /**
607bd740953Szhaoxiao  * unregister_tracepoint_module_notifier - unregister tracepoint coming/going notifier
608de7b2973SMathieu Desnoyers  * @nb: notifier block
609de7b2973SMathieu Desnoyers  *
610de7b2973SMathieu Desnoyers  * The notifier block callback should expect a "struct tp_module" data
611de7b2973SMathieu Desnoyers  * pointer.
612de7b2973SMathieu Desnoyers  */
unregister_tracepoint_module_notifier(struct notifier_block * nb)613de7b2973SMathieu Desnoyers int unregister_tracepoint_module_notifier(struct notifier_block *nb)
614de7b2973SMathieu Desnoyers {
615de7b2973SMathieu Desnoyers 	struct tp_module *tp_mod;
616de7b2973SMathieu Desnoyers 	int ret;
617de7b2973SMathieu Desnoyers 
618de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
619de7b2973SMathieu Desnoyers 	ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
620de7b2973SMathieu Desnoyers 	if (ret)
621de7b2973SMathieu Desnoyers 		goto end;
622de7b2973SMathieu Desnoyers 	list_for_each_entry(tp_mod, &tracepoint_module_list, list)
623de7b2973SMathieu Desnoyers 		(void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
624de7b2973SMathieu Desnoyers end:
625de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
626de7b2973SMathieu Desnoyers 	return ret;
627de7b2973SMathieu Desnoyers 
628de7b2973SMathieu Desnoyers }
629de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
630de7b2973SMathieu Desnoyers 
631de7b2973SMathieu Desnoyers /*
632de7b2973SMathieu Desnoyers  * Ensure the tracer unregistered the module's probes before the module
633de7b2973SMathieu Desnoyers  * teardown is performed. Prevents leaks of probe and data pointers.
634de7b2973SMathieu Desnoyers  */
tp_module_going_check_quiescent(struct tracepoint * tp,void * priv)63546e0c9beSArd Biesheuvel static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv)
636de7b2973SMathieu Desnoyers {
63746e0c9beSArd Biesheuvel 	WARN_ON_ONCE(tp->funcs);
638de7b2973SMathieu Desnoyers }
639de7b2973SMathieu Desnoyers 
tracepoint_module_coming(struct module * mod)640b75ef8b4SMathieu Desnoyers static int tracepoint_module_coming(struct module *mod)
641b75ef8b4SMathieu Desnoyers {
6420dea6d52SMathieu Desnoyers 	struct tp_module *tp_mod;
643b75ef8b4SMathieu Desnoyers 
6447dec935aSSteven Rostedt (Red Hat) 	if (!mod->num_tracepoints)
6457dec935aSSteven Rostedt (Red Hat) 		return 0;
6467dec935aSSteven Rostedt (Red Hat) 
647b75ef8b4SMathieu Desnoyers 	/*
648c10076c4SSteven Rostedt 	 * We skip modules that taint the kernel, especially those with different
649c10076c4SSteven Rostedt 	 * module headers (for forced load), to make sure we don't cause a crash.
65054be5509SAlison Schofield 	 * Staging, out-of-tree, unsigned GPL, and test modules are fine.
651b75ef8b4SMathieu Desnoyers 	 */
65245ab2813SSteven Rostedt (Red Hat) 	if (trace_module_has_bad_taint(mod))
653b75ef8b4SMathieu Desnoyers 		return 0;
65451714678SZhen Lei 
655b75ef8b4SMathieu Desnoyers 	tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
65651714678SZhen Lei 	if (!tp_mod)
65751714678SZhen Lei 		return -ENOMEM;
658eb7d035cSSteven Rostedt (Red Hat) 	tp_mod->mod = mod;
65951714678SZhen Lei 
66051714678SZhen Lei 	mutex_lock(&tracepoint_module_list_mutex);
6610dea6d52SMathieu Desnoyers 	list_add_tail(&tp_mod->list, &tracepoint_module_list);
662de7b2973SMathieu Desnoyers 	blocking_notifier_call_chain(&tracepoint_notify_list,
663de7b2973SMathieu Desnoyers 			MODULE_STATE_COMING, tp_mod);
664de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
66551714678SZhen Lei 	return 0;
666b75ef8b4SMathieu Desnoyers }
667b75ef8b4SMathieu Desnoyers 
tracepoint_module_going(struct module * mod)668de7b2973SMathieu Desnoyers static void tracepoint_module_going(struct module *mod)
669b75ef8b4SMathieu Desnoyers {
670de7b2973SMathieu Desnoyers 	struct tp_module *tp_mod;
671b75ef8b4SMathieu Desnoyers 
6727dec935aSSteven Rostedt (Red Hat) 	if (!mod->num_tracepoints)
673de7b2973SMathieu Desnoyers 		return;
6747dec935aSSteven Rostedt (Red Hat) 
675de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
676de7b2973SMathieu Desnoyers 	list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
677eb7d035cSSteven Rostedt (Red Hat) 		if (tp_mod->mod == mod) {
678de7b2973SMathieu Desnoyers 			blocking_notifier_call_chain(&tracepoint_notify_list,
679de7b2973SMathieu Desnoyers 					MODULE_STATE_GOING, tp_mod);
680de7b2973SMathieu Desnoyers 			list_del(&tp_mod->list);
681de7b2973SMathieu Desnoyers 			kfree(tp_mod);
682de7b2973SMathieu Desnoyers 			/*
683de7b2973SMathieu Desnoyers 			 * Called the going notifier before checking for
684de7b2973SMathieu Desnoyers 			 * quiescence.
685de7b2973SMathieu Desnoyers 			 */
68646e0c9beSArd Biesheuvel 			for_each_tracepoint_range(mod->tracepoints_ptrs,
68746e0c9beSArd Biesheuvel 				mod->tracepoints_ptrs + mod->num_tracepoints,
68846e0c9beSArd Biesheuvel 				tp_module_going_check_quiescent, NULL);
689b75ef8b4SMathieu Desnoyers 			break;
690b75ef8b4SMathieu Desnoyers 		}
691b75ef8b4SMathieu Desnoyers 	}
692b75ef8b4SMathieu Desnoyers 	/*
693b75ef8b4SMathieu Desnoyers 	 * In the case of modules that were tainted at "coming", we'll simply
694b75ef8b4SMathieu Desnoyers 	 * walk through the list without finding it. We cannot use the "tainted"
695b75ef8b4SMathieu Desnoyers 	 * flag on "going", in case a module taints the kernel only after being
696b75ef8b4SMathieu Desnoyers 	 * loaded.
697b75ef8b4SMathieu Desnoyers 	 */
698de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
699b75ef8b4SMathieu Desnoyers }
700227a8375SIngo Molnar 
tracepoint_module_notify(struct notifier_block * self,unsigned long val,void * data)701de7b2973SMathieu Desnoyers static int tracepoint_module_notify(struct notifier_block *self,
70232f85742SMathieu Desnoyers 		unsigned long val, void *data)
70332f85742SMathieu Desnoyers {
70432f85742SMathieu Desnoyers 	struct module *mod = data;
705b75ef8b4SMathieu Desnoyers 	int ret = 0;
70632f85742SMathieu Desnoyers 
70732f85742SMathieu Desnoyers 	switch (val) {
70832f85742SMathieu Desnoyers 	case MODULE_STATE_COMING:
709b75ef8b4SMathieu Desnoyers 		ret = tracepoint_module_coming(mod);
710b75ef8b4SMathieu Desnoyers 		break;
711b75ef8b4SMathieu Desnoyers 	case MODULE_STATE_LIVE:
712b75ef8b4SMathieu Desnoyers 		break;
71332f85742SMathieu Desnoyers 	case MODULE_STATE_GOING:
714de7b2973SMathieu Desnoyers 		tracepoint_module_going(mod);
715de7b2973SMathieu Desnoyers 		break;
716de7b2973SMathieu Desnoyers 	case MODULE_STATE_UNFORMED:
71732f85742SMathieu Desnoyers 		break;
71832f85742SMathieu Desnoyers 	}
7190340a6b7SPeter Zijlstra 	return notifier_from_errno(ret);
72032f85742SMathieu Desnoyers }
72132f85742SMathieu Desnoyers 
722de7b2973SMathieu Desnoyers static struct notifier_block tracepoint_module_nb = {
72332f85742SMathieu Desnoyers 	.notifier_call = tracepoint_module_notify,
72432f85742SMathieu Desnoyers 	.priority = 0,
72532f85742SMathieu Desnoyers };
72632f85742SMathieu Desnoyers 
init_tracepoints(void)727de7b2973SMathieu Desnoyers static __init int init_tracepoints(void)
72832f85742SMathieu Desnoyers {
729de7b2973SMathieu Desnoyers 	int ret;
730de7b2973SMathieu Desnoyers 
731de7b2973SMathieu Desnoyers 	ret = register_module_notifier(&tracepoint_module_nb);
732eb7d035cSSteven Rostedt (Red Hat) 	if (ret)
733a395d6a7SJoe Perches 		pr_warn("Failed to register tracepoint module enter notifier\n");
734eb7d035cSSteven Rostedt (Red Hat) 
735de7b2973SMathieu Desnoyers 	return ret;
73632f85742SMathieu Desnoyers }
73732f85742SMathieu Desnoyers __initcall(init_tracepoints);
738d5dbf8b4SMasami Hiramatsu (Google) 
739d5dbf8b4SMasami Hiramatsu (Google) /**
740*d4df54f3SMasami Hiramatsu (Google)  * for_each_tracepoint_in_module - iteration on all tracepoints in a module
741*d4df54f3SMasami Hiramatsu (Google)  * @mod: module
742*d4df54f3SMasami Hiramatsu (Google)  * @fct: callback
743*d4df54f3SMasami Hiramatsu (Google)  * @priv: private data
744*d4df54f3SMasami Hiramatsu (Google)  */
for_each_tracepoint_in_module(struct module * mod,void (* fct)(struct tracepoint * tp,struct module * mod,void * priv),void * priv)745*d4df54f3SMasami Hiramatsu (Google) void for_each_tracepoint_in_module(struct module *mod,
746*d4df54f3SMasami Hiramatsu (Google) 				   void (*fct)(struct tracepoint *tp,
747*d4df54f3SMasami Hiramatsu (Google) 				    struct module *mod, void *priv),
748*d4df54f3SMasami Hiramatsu (Google) 				   void *priv)
749*d4df54f3SMasami Hiramatsu (Google) {
750*d4df54f3SMasami Hiramatsu (Google) 	tracepoint_ptr_t *begin, *end, *iter;
751*d4df54f3SMasami Hiramatsu (Google) 
752*d4df54f3SMasami Hiramatsu (Google) 	lockdep_assert_held(&tracepoint_module_list_mutex);
753*d4df54f3SMasami Hiramatsu (Google) 
754*d4df54f3SMasami Hiramatsu (Google) 	if (!mod)
755*d4df54f3SMasami Hiramatsu (Google) 		return;
756*d4df54f3SMasami Hiramatsu (Google) 
757*d4df54f3SMasami Hiramatsu (Google) 	begin = mod->tracepoints_ptrs;
758*d4df54f3SMasami Hiramatsu (Google) 	end = mod->tracepoints_ptrs + mod->num_tracepoints;
759*d4df54f3SMasami Hiramatsu (Google) 
760*d4df54f3SMasami Hiramatsu (Google) 	for (iter = begin; iter < end; iter++)
761*d4df54f3SMasami Hiramatsu (Google) 		fct(tracepoint_ptr_deref(iter), mod, priv);
762*d4df54f3SMasami Hiramatsu (Google) }
763*d4df54f3SMasami Hiramatsu (Google) 
764*d4df54f3SMasami Hiramatsu (Google) /**
765d5dbf8b4SMasami Hiramatsu (Google)  * for_each_module_tracepoint - iteration on all tracepoints in all modules
766d5dbf8b4SMasami Hiramatsu (Google)  * @fct: callback
767d5dbf8b4SMasami Hiramatsu (Google)  * @priv: private data
768d5dbf8b4SMasami Hiramatsu (Google)  */
for_each_module_tracepoint(void (* fct)(struct tracepoint * tp,struct module * mod,void * priv),void * priv)769*d4df54f3SMasami Hiramatsu (Google) void for_each_module_tracepoint(void (*fct)(struct tracepoint *tp,
770*d4df54f3SMasami Hiramatsu (Google) 				 struct module *mod, void *priv),
771d5dbf8b4SMasami Hiramatsu (Google) 				void *priv)
772d5dbf8b4SMasami Hiramatsu (Google) {
773d5dbf8b4SMasami Hiramatsu (Google) 	struct tp_module *tp_mod;
774d5dbf8b4SMasami Hiramatsu (Google) 
775d5dbf8b4SMasami Hiramatsu (Google) 	mutex_lock(&tracepoint_module_list_mutex);
776*d4df54f3SMasami Hiramatsu (Google) 	list_for_each_entry(tp_mod, &tracepoint_module_list, list)
777*d4df54f3SMasami Hiramatsu (Google) 		for_each_tracepoint_in_module(tp_mod->mod, fct, priv);
778d5dbf8b4SMasami Hiramatsu (Google) 	mutex_unlock(&tracepoint_module_list_mutex);
779d5dbf8b4SMasami Hiramatsu (Google) }
780227a8375SIngo Molnar #endif /* CONFIG_MODULES */
781a871bd33SJason Baron 
782de7b2973SMathieu Desnoyers /**
783de7b2973SMathieu Desnoyers  * for_each_kernel_tracepoint - iteration on all kernel tracepoints
784de7b2973SMathieu Desnoyers  * @fct: callback
785de7b2973SMathieu Desnoyers  * @priv: private data
786de7b2973SMathieu Desnoyers  */
for_each_kernel_tracepoint(void (* fct)(struct tracepoint * tp,void * priv),void * priv)787de7b2973SMathieu Desnoyers void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
788de7b2973SMathieu Desnoyers 		void *priv)
789de7b2973SMathieu Desnoyers {
790de7b2973SMathieu Desnoyers 	for_each_tracepoint_range(__start___tracepoints_ptrs,
791de7b2973SMathieu Desnoyers 		__stop___tracepoints_ptrs, fct, priv);
792de7b2973SMathieu Desnoyers }
793de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
794de7b2973SMathieu Desnoyers 
7953d27d8cbSJosh Stone #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
79660d970c2SIngo Molnar 
79797419875SJosh Stone /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
798a871bd33SJason Baron static int sys_tracepoint_refcount;
799a871bd33SJason Baron 
syscall_regfunc(void)8008cf868afSSteven Rostedt (Red Hat) int syscall_regfunc(void)
801a871bd33SJason Baron {
8028063e41dSOleg Nesterov 	struct task_struct *p, *t;
803a871bd33SJason Baron 
804a871bd33SJason Baron 	if (!sys_tracepoint_refcount) {
8058063e41dSOleg Nesterov 		read_lock(&tasklist_lock);
8068063e41dSOleg Nesterov 		for_each_process_thread(p, t) {
807524666cbSGabriel Krisman Bertazi 			set_task_syscall_work(t, SYSCALL_TRACEPOINT);
8088063e41dSOleg Nesterov 		}
8098063e41dSOleg Nesterov 		read_unlock(&tasklist_lock);
810a871bd33SJason Baron 	}
811a871bd33SJason Baron 	sys_tracepoint_refcount++;
8128cf868afSSteven Rostedt (Red Hat) 
8138cf868afSSteven Rostedt (Red Hat) 	return 0;
814a871bd33SJason Baron }
815a871bd33SJason Baron 
syscall_unregfunc(void)816a871bd33SJason Baron void syscall_unregfunc(void)
817a871bd33SJason Baron {
8188063e41dSOleg Nesterov 	struct task_struct *p, *t;
819a871bd33SJason Baron 
820a871bd33SJason Baron 	sys_tracepoint_refcount--;
821a871bd33SJason Baron 	if (!sys_tracepoint_refcount) {
8228063e41dSOleg Nesterov 		read_lock(&tasklist_lock);
8238063e41dSOleg Nesterov 		for_each_process_thread(p, t) {
824524666cbSGabriel Krisman Bertazi 			clear_task_syscall_work(t, SYSCALL_TRACEPOINT);
8258063e41dSOleg Nesterov 		}
8268063e41dSOleg Nesterov 		read_unlock(&tasklist_lock);
827a871bd33SJason Baron 	}
828a871bd33SJason Baron }
82960d970c2SIngo Molnar #endif
830