xref: /linux/kernel/tracepoint.c (revision 7211f0a25781ace5f79b272318b4c60b5dcfd413)
11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
297e1c18eSMathieu Desnoyers /*
3de7b2973SMathieu Desnoyers  * Copyright (C) 2008-2014 Mathieu Desnoyers
497e1c18eSMathieu Desnoyers  */
597e1c18eSMathieu Desnoyers #include <linux/module.h>
697e1c18eSMathieu Desnoyers #include <linux/mutex.h>
797e1c18eSMathieu Desnoyers #include <linux/types.h>
897e1c18eSMathieu Desnoyers #include <linux/jhash.h>
997e1c18eSMathieu Desnoyers #include <linux/list.h>
1097e1c18eSMathieu Desnoyers #include <linux/rcupdate.h>
1197e1c18eSMathieu Desnoyers #include <linux/tracepoint.h>
1297e1c18eSMathieu Desnoyers #include <linux/err.h>
1397e1c18eSMathieu Desnoyers #include <linux/slab.h>
143f07c014SIngo Molnar #include <linux/sched/signal.h>
1529930025SIngo Molnar #include <linux/sched/task.h>
16c5905afbSIngo Molnar #include <linux/static_key.h>
1797e1c18eSMathieu Desnoyers 
189c0be3f6SMathieu Desnoyers extern tracepoint_ptr_t __start___tracepoints_ptrs[];
199c0be3f6SMathieu Desnoyers extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
2097e1c18eSMathieu Desnoyers 
21e6753f23SJoel Fernandes (Google) DEFINE_SRCU(tracepoint_srcu);
22e6753f23SJoel Fernandes (Google) EXPORT_SYMBOL_GPL(tracepoint_srcu);
23e6753f23SJoel Fernandes (Google) 
2497e1c18eSMathieu Desnoyers /* Set to 1 to enable tracepoint debug output */
2597e1c18eSMathieu Desnoyers static const int tracepoint_debug;
2697e1c18eSMathieu Desnoyers 
27b75ef8b4SMathieu Desnoyers #ifdef CONFIG_MODULES
28de7b2973SMathieu Desnoyers /*
29de7b2973SMathieu Desnoyers  * Tracepoint module list mutex protects the local module list.
30de7b2973SMathieu Desnoyers  */
31de7b2973SMathieu Desnoyers static DEFINE_MUTEX(tracepoint_module_list_mutex);
32de7b2973SMathieu Desnoyers 
33de7b2973SMathieu Desnoyers /* Local list of struct tp_module */
34b75ef8b4SMathieu Desnoyers static LIST_HEAD(tracepoint_module_list);
35b75ef8b4SMathieu Desnoyers #endif /* CONFIG_MODULES */
36b75ef8b4SMathieu Desnoyers 
3797e1c18eSMathieu Desnoyers /*
38de7b2973SMathieu Desnoyers  * tracepoints_mutex protects the builtin and module tracepoints.
39de7b2973SMathieu Desnoyers  * tracepoints_mutex nests inside tracepoint_module_list_mutex.
4097e1c18eSMathieu Desnoyers  */
41de7b2973SMathieu Desnoyers static DEFINE_MUTEX(tracepoints_mutex);
4297e1c18eSMathieu Desnoyers 
43f8a79d5cSSteven Rostedt (VMware) static struct rcu_head *early_probes;
44f8a79d5cSSteven Rostedt (VMware) static bool ok_to_free_tracepoints;
45f8a79d5cSSteven Rostedt (VMware) 
4697e1c18eSMathieu Desnoyers /*
4797e1c18eSMathieu Desnoyers  * Note about RCU :
48fd589a8fSAnand Gadiyar  * It is used to delay the free of multiple probes array until a quiescent
4997e1c18eSMathieu Desnoyers  * state is reached.
5097e1c18eSMathieu Desnoyers  */
5119dba33cSLai Jiangshan struct tp_probes {
5219dba33cSLai Jiangshan 	struct rcu_head rcu;
539d0a49c7SGustavo A. R. Silva 	struct tracepoint_func probes[];
5419dba33cSLai Jiangshan };
5597e1c18eSMathieu Desnoyers 
56befe6d94SSteven Rostedt (VMware) /* Called in removal of a func but failed to allocate a new tp_funcs */
57befe6d94SSteven Rostedt (VMware) static void tp_stub_func(void)
58befe6d94SSteven Rostedt (VMware) {
59befe6d94SSteven Rostedt (VMware) 	return;
60befe6d94SSteven Rostedt (VMware) }
61befe6d94SSteven Rostedt (VMware) 
6219dba33cSLai Jiangshan static inline void *allocate_probes(int count)
6397e1c18eSMathieu Desnoyers {
64f0553dcbSGustavo A. R. Silva 	struct tp_probes *p  = kmalloc(struct_size(p, probes, count),
65f0553dcbSGustavo A. R. Silva 				       GFP_KERNEL);
6619dba33cSLai Jiangshan 	return p == NULL ? NULL : p->probes;
6797e1c18eSMathieu Desnoyers }
6897e1c18eSMathieu Desnoyers 
69e6753f23SJoel Fernandes (Google) static void srcu_free_old_probes(struct rcu_head *head)
7097e1c18eSMathieu Desnoyers {
710dea6d52SMathieu Desnoyers 	kfree(container_of(head, struct tp_probes, rcu));
7219dba33cSLai Jiangshan }
7319dba33cSLai Jiangshan 
74e6753f23SJoel Fernandes (Google) static void rcu_free_old_probes(struct rcu_head *head)
75e6753f23SJoel Fernandes (Google) {
76e6753f23SJoel Fernandes (Google) 	call_srcu(&tracepoint_srcu, head, srcu_free_old_probes);
77e6753f23SJoel Fernandes (Google) }
78e6753f23SJoel Fernandes (Google) 
79f8a79d5cSSteven Rostedt (VMware) static __init int release_early_probes(void)
80f8a79d5cSSteven Rostedt (VMware) {
81f8a79d5cSSteven Rostedt (VMware) 	struct rcu_head *tmp;
82f8a79d5cSSteven Rostedt (VMware) 
83f8a79d5cSSteven Rostedt (VMware) 	ok_to_free_tracepoints = true;
84f8a79d5cSSteven Rostedt (VMware) 
85f8a79d5cSSteven Rostedt (VMware) 	while (early_probes) {
86f8a79d5cSSteven Rostedt (VMware) 		tmp = early_probes;
87f8a79d5cSSteven Rostedt (VMware) 		early_probes = tmp->next;
8874401729SPaul E. McKenney 		call_rcu(tmp, rcu_free_old_probes);
89f8a79d5cSSteven Rostedt (VMware) 	}
90f8a79d5cSSteven Rostedt (VMware) 
91f8a79d5cSSteven Rostedt (VMware) 	return 0;
92f8a79d5cSSteven Rostedt (VMware) }
93f8a79d5cSSteven Rostedt (VMware) 
94f8a79d5cSSteven Rostedt (VMware) /* SRCU is initialized at core_initcall */
95f8a79d5cSSteven Rostedt (VMware) postcore_initcall(release_early_probes);
96f8a79d5cSSteven Rostedt (VMware) 
9738516ab5SSteven Rostedt static inline void release_probes(struct tracepoint_func *old)
9819dba33cSLai Jiangshan {
9919dba33cSLai Jiangshan 	if (old) {
10019dba33cSLai Jiangshan 		struct tp_probes *tp_probes = container_of(old,
10119dba33cSLai Jiangshan 			struct tp_probes, probes[0]);
102f8a79d5cSSteven Rostedt (VMware) 
103f8a79d5cSSteven Rostedt (VMware) 		/*
104f8a79d5cSSteven Rostedt (VMware) 		 * We can't free probes if SRCU is not initialized yet.
105f8a79d5cSSteven Rostedt (VMware) 		 * Postpone the freeing till after SRCU is initialized.
106f8a79d5cSSteven Rostedt (VMware) 		 */
107f8a79d5cSSteven Rostedt (VMware) 		if (unlikely(!ok_to_free_tracepoints)) {
108f8a79d5cSSteven Rostedt (VMware) 			tp_probes->rcu.next = early_probes;
109f8a79d5cSSteven Rostedt (VMware) 			early_probes = &tp_probes->rcu;
110f8a79d5cSSteven Rostedt (VMware) 			return;
111f8a79d5cSSteven Rostedt (VMware) 		}
112f8a79d5cSSteven Rostedt (VMware) 
113e6753f23SJoel Fernandes (Google) 		/*
114e6753f23SJoel Fernandes (Google) 		 * Tracepoint probes are protected by both sched RCU and SRCU,
115e6753f23SJoel Fernandes (Google) 		 * by calling the SRCU callback in the sched RCU callback we
116e6753f23SJoel Fernandes (Google) 		 * cover both cases. So let us chain the SRCU and sched RCU
117e6753f23SJoel Fernandes (Google) 		 * callbacks to wait for both grace periods.
118e6753f23SJoel Fernandes (Google) 		 */
11974401729SPaul E. McKenney 		call_rcu(&tp_probes->rcu, rcu_free_old_probes);
12019dba33cSLai Jiangshan 	}
12197e1c18eSMathieu Desnoyers }
12297e1c18eSMathieu Desnoyers 
123de7b2973SMathieu Desnoyers static void debug_print_probes(struct tracepoint_func *funcs)
12497e1c18eSMathieu Desnoyers {
12597e1c18eSMathieu Desnoyers 	int i;
12697e1c18eSMathieu Desnoyers 
127de7b2973SMathieu Desnoyers 	if (!tracepoint_debug || !funcs)
12897e1c18eSMathieu Desnoyers 		return;
12997e1c18eSMathieu Desnoyers 
130de7b2973SMathieu Desnoyers 	for (i = 0; funcs[i].func; i++)
131de7b2973SMathieu Desnoyers 		printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
13297e1c18eSMathieu Desnoyers }
13397e1c18eSMathieu Desnoyers 
1347904b5c4SSteven Rostedt (Red Hat) static struct tracepoint_func *
1357904b5c4SSteven Rostedt (Red Hat) func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
1367904b5c4SSteven Rostedt (Red Hat) 	 int prio)
13797e1c18eSMathieu Desnoyers {
13838516ab5SSteven Rostedt 	struct tracepoint_func *old, *new;
139*7211f0a2SSteven Rostedt (VMware) 	int iter_probes;	/* Iterate over old probe array. */
140*7211f0a2SSteven Rostedt (VMware) 	int nr_probes = 0;	/* Counter for probes */
141*7211f0a2SSteven Rostedt (VMware) 	int pos = -1;		/* Insertion position into new array */
14297e1c18eSMathieu Desnoyers 
143de7b2973SMathieu Desnoyers 	if (WARN_ON(!tp_func->func))
1444c69e6eaSSahara 		return ERR_PTR(-EINVAL);
14597e1c18eSMathieu Desnoyers 
146de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
147de7b2973SMathieu Desnoyers 	old = *funcs;
14897e1c18eSMathieu Desnoyers 	if (old) {
14997e1c18eSMathieu Desnoyers 		/* (N -> N+1), (N != 0, 1) probes */
150*7211f0a2SSteven Rostedt (VMware) 		for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
151*7211f0a2SSteven Rostedt (VMware) 			if (old[iter_probes].func == tp_stub_func)
152*7211f0a2SSteven Rostedt (VMware) 				continue;	/* Skip stub functions. */
153*7211f0a2SSteven Rostedt (VMware) 			if (old[iter_probes].func == tp_func->func &&
154*7211f0a2SSteven Rostedt (VMware) 			    old[iter_probes].data == tp_func->data)
15597e1c18eSMathieu Desnoyers 				return ERR_PTR(-EEXIST);
156*7211f0a2SSteven Rostedt (VMware) 			nr_probes++;
15797e1c18eSMathieu Desnoyers 		}
1587904b5c4SSteven Rostedt (Red Hat) 	}
159*7211f0a2SSteven Rostedt (VMware) 	/* + 2 : one for new probe, one for NULL func */
160*7211f0a2SSteven Rostedt (VMware) 	new = allocate_probes(nr_probes + 2);
16197e1c18eSMathieu Desnoyers 	if (new == NULL)
16297e1c18eSMathieu Desnoyers 		return ERR_PTR(-ENOMEM);
1637904b5c4SSteven Rostedt (Red Hat) 	if (old) {
164*7211f0a2SSteven Rostedt (VMware) 		nr_probes = 0;
165*7211f0a2SSteven Rostedt (VMware) 		for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
166*7211f0a2SSteven Rostedt (VMware) 			if (old[iter_probes].func == tp_stub_func)
167befe6d94SSteven Rostedt (VMware) 				continue;
168*7211f0a2SSteven Rostedt (VMware) 			/* Insert before probes of lower priority */
169*7211f0a2SSteven Rostedt (VMware) 			if (pos < 0 && old[iter_probes].prio < prio)
170*7211f0a2SSteven Rostedt (VMware) 				pos = nr_probes++;
171*7211f0a2SSteven Rostedt (VMware) 			new[nr_probes++] = old[iter_probes];
172befe6d94SSteven Rostedt (VMware) 		}
173befe6d94SSteven Rostedt (VMware) 		if (pos < 0)
174*7211f0a2SSteven Rostedt (VMware) 			pos = nr_probes++;
175*7211f0a2SSteven Rostedt (VMware) 		/* nr_probes now points to the end of the new array */
1767904b5c4SSteven Rostedt (Red Hat) 	} else {
1777904b5c4SSteven Rostedt (Red Hat) 		pos = 0;
178*7211f0a2SSteven Rostedt (VMware) 		nr_probes = 1; /* must point at end of array */
179*7211f0a2SSteven Rostedt (VMware) 	}
1807904b5c4SSteven Rostedt (Red Hat) 	new[pos] = *tp_func;
181*7211f0a2SSteven Rostedt (VMware) 	new[nr_probes].func = NULL;
182de7b2973SMathieu Desnoyers 	*funcs = new;
183de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
18497e1c18eSMathieu Desnoyers 	return old;
18597e1c18eSMathieu Desnoyers }
18697e1c18eSMathieu Desnoyers 
187de7b2973SMathieu Desnoyers static void *func_remove(struct tracepoint_func **funcs,
188de7b2973SMathieu Desnoyers 		struct tracepoint_func *tp_func)
18997e1c18eSMathieu Desnoyers {
19097e1c18eSMathieu Desnoyers 	int nr_probes = 0, nr_del = 0, i;
19138516ab5SSteven Rostedt 	struct tracepoint_func *old, *new;
19297e1c18eSMathieu Desnoyers 
193de7b2973SMathieu Desnoyers 	old = *funcs;
19497e1c18eSMathieu Desnoyers 
195f66af459SFrederic Weisbecker 	if (!old)
19619dba33cSLai Jiangshan 		return ERR_PTR(-ENOENT);
197f66af459SFrederic Weisbecker 
198de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
19997e1c18eSMathieu Desnoyers 	/* (N -> M), (N > 1, M >= 0) probes */
200de7b2973SMathieu Desnoyers 	if (tp_func->func) {
20138516ab5SSteven Rostedt 		for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
202befe6d94SSteven Rostedt (VMware) 			if ((old[nr_probes].func == tp_func->func &&
203befe6d94SSteven Rostedt (VMware) 			     old[nr_probes].data == tp_func->data) ||
204befe6d94SSteven Rostedt (VMware) 			    old[nr_probes].func == tp_stub_func)
20597e1c18eSMathieu Desnoyers 				nr_del++;
20697e1c18eSMathieu Desnoyers 		}
2074c69e6eaSSahara 	}
20897e1c18eSMathieu Desnoyers 
2094c69e6eaSSahara 	/*
2104c69e6eaSSahara 	 * If probe is NULL, then nr_probes = nr_del = 0, and then the
2114c69e6eaSSahara 	 * entire entry will be removed.
2124c69e6eaSSahara 	 */
21397e1c18eSMathieu Desnoyers 	if (nr_probes - nr_del == 0) {
21497e1c18eSMathieu Desnoyers 		/* N -> 0, (N > 1) */
215de7b2973SMathieu Desnoyers 		*funcs = NULL;
216de7b2973SMathieu Desnoyers 		debug_print_probes(*funcs);
21797e1c18eSMathieu Desnoyers 		return old;
21897e1c18eSMathieu Desnoyers 	} else {
21997e1c18eSMathieu Desnoyers 		int j = 0;
22097e1c18eSMathieu Desnoyers 		/* N -> M, (N > 1, M > 0) */
22197e1c18eSMathieu Desnoyers 		/* + 1 for NULL */
22219dba33cSLai Jiangshan 		new = allocate_probes(nr_probes - nr_del + 1);
223befe6d94SSteven Rostedt (VMware) 		if (new) {
224*7211f0a2SSteven Rostedt (VMware) 			for (i = 0; old[i].func; i++) {
225*7211f0a2SSteven Rostedt (VMware) 				if ((old[i].func != tp_func->func ||
226*7211f0a2SSteven Rostedt (VMware) 				     old[i].data != tp_func->data) &&
227*7211f0a2SSteven Rostedt (VMware) 				    old[i].func != tp_stub_func)
22897e1c18eSMathieu Desnoyers 					new[j++] = old[i];
229*7211f0a2SSteven Rostedt (VMware) 			}
23038516ab5SSteven Rostedt 			new[nr_probes - nr_del].func = NULL;
231de7b2973SMathieu Desnoyers 			*funcs = new;
232befe6d94SSteven Rostedt (VMware) 		} else {
233befe6d94SSteven Rostedt (VMware) 			/*
234befe6d94SSteven Rostedt (VMware) 			 * Failed to allocate, replace the old function
235befe6d94SSteven Rostedt (VMware) 			 * with calls to tp_stub_func.
236befe6d94SSteven Rostedt (VMware) 			 */
237*7211f0a2SSteven Rostedt (VMware) 			for (i = 0; old[i].func; i++) {
238befe6d94SSteven Rostedt (VMware) 				if (old[i].func == tp_func->func &&
239*7211f0a2SSteven Rostedt (VMware) 				    old[i].data == tp_func->data)
240*7211f0a2SSteven Rostedt (VMware) 					WRITE_ONCE(old[i].func, tp_stub_func);
241befe6d94SSteven Rostedt (VMware) 			}
242befe6d94SSteven Rostedt (VMware) 			*funcs = old;
243befe6d94SSteven Rostedt (VMware) 		}
24497e1c18eSMathieu Desnoyers 	}
245de7b2973SMathieu Desnoyers 	debug_print_probes(*funcs);
24697e1c18eSMathieu Desnoyers 	return old;
24797e1c18eSMathieu Desnoyers }
24897e1c18eSMathieu Desnoyers 
249547305a6SSteven Rostedt (VMware) static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs, bool sync)
250d25e37d8SSteven Rostedt (VMware) {
251d25e37d8SSteven Rostedt (VMware) 	void *func = tp->iterator;
252d25e37d8SSteven Rostedt (VMware) 
253d25e37d8SSteven Rostedt (VMware) 	/* Synthetic events do not have static call sites */
254d25e37d8SSteven Rostedt (VMware) 	if (!tp->static_call_key)
255d25e37d8SSteven Rostedt (VMware) 		return;
256d25e37d8SSteven Rostedt (VMware) 
257547305a6SSteven Rostedt (VMware) 	if (!tp_funcs[1].func) {
258d25e37d8SSteven Rostedt (VMware) 		func = tp_funcs[0].func;
259547305a6SSteven Rostedt (VMware) 		/*
260547305a6SSteven Rostedt (VMware) 		 * If going from the iterator back to a single caller,
261547305a6SSteven Rostedt (VMware) 		 * we need to synchronize with __DO_TRACE to make sure
262547305a6SSteven Rostedt (VMware) 		 * that the data passed to the callback is the one that
263547305a6SSteven Rostedt (VMware) 		 * belongs to that callback.
264547305a6SSteven Rostedt (VMware) 		 */
265547305a6SSteven Rostedt (VMware) 		if (sync)
266547305a6SSteven Rostedt (VMware) 			tracepoint_synchronize_unregister();
267547305a6SSteven Rostedt (VMware) 	}
268d25e37d8SSteven Rostedt (VMware) 
269d25e37d8SSteven Rostedt (VMware) 	__static_call_update(tp->static_call_key, tp->static_call_tramp, func);
270d25e37d8SSteven Rostedt (VMware) }
271d25e37d8SSteven Rostedt (VMware) 
27297e1c18eSMathieu Desnoyers /*
273de7b2973SMathieu Desnoyers  * Add the probe function to a tracepoint.
27497e1c18eSMathieu Desnoyers  */
275de7b2973SMathieu Desnoyers static int tracepoint_add_func(struct tracepoint *tp,
2767904b5c4SSteven Rostedt (Red Hat) 			       struct tracepoint_func *func, int prio)
27797e1c18eSMathieu Desnoyers {
278de7b2973SMathieu Desnoyers 	struct tracepoint_func *old, *tp_funcs;
2798cf868afSSteven Rostedt (Red Hat) 	int ret;
28097e1c18eSMathieu Desnoyers 
2818cf868afSSteven Rostedt (Red Hat) 	if (tp->regfunc && !static_key_enabled(&tp->key)) {
2828cf868afSSteven Rostedt (Red Hat) 		ret = tp->regfunc();
2838cf868afSSteven Rostedt (Red Hat) 		if (ret < 0)
2848cf868afSSteven Rostedt (Red Hat) 			return ret;
2858cf868afSSteven Rostedt (Red Hat) 	}
28697e1c18eSMathieu Desnoyers 
287b725dfeaSMathieu Desnoyers 	tp_funcs = rcu_dereference_protected(tp->funcs,
288b725dfeaSMathieu Desnoyers 			lockdep_is_held(&tracepoints_mutex));
2897904b5c4SSteven Rostedt (Red Hat) 	old = func_add(&tp_funcs, func, prio);
290de7b2973SMathieu Desnoyers 	if (IS_ERR(old)) {
291d66a270bSMathieu Desnoyers 		WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
292de7b2973SMathieu Desnoyers 		return PTR_ERR(old);
29397e1c18eSMathieu Desnoyers 	}
29497419875SJosh Stone 
29597e1c18eSMathieu Desnoyers 	/*
296243d1a79SPaul E. McKenney 	 * rcu_assign_pointer has as smp_store_release() which makes sure
297243d1a79SPaul E. McKenney 	 * that the new probe callbacks array is consistent before setting
298243d1a79SPaul E. McKenney 	 * a pointer to it.  This array is referenced by __DO_TRACE from
299243d1a79SPaul E. McKenney 	 * include/linux/tracepoint.h using rcu_dereference_sched().
30097e1c18eSMathieu Desnoyers 	 */
301de7b2973SMathieu Desnoyers 	rcu_assign_pointer(tp->funcs, tp_funcs);
302547305a6SSteven Rostedt (VMware) 	tracepoint_update_call(tp, tp_funcs, false);
303d25e37d8SSteven Rostedt (VMware) 	static_key_enable(&tp->key);
304d25e37d8SSteven Rostedt (VMware) 
3058058bd0fSMathieu Desnoyers 	release_probes(old);
306de7b2973SMathieu Desnoyers 	return 0;
30797e1c18eSMathieu Desnoyers }
30897e1c18eSMathieu Desnoyers 
30997e1c18eSMathieu Desnoyers /*
310de7b2973SMathieu Desnoyers  * Remove a probe function from a tracepoint.
31197e1c18eSMathieu Desnoyers  * Note: only waiting an RCU period after setting elem->call to the empty
31297e1c18eSMathieu Desnoyers  * function insures that the original callback is not used anymore. This insured
31397e1c18eSMathieu Desnoyers  * by preempt_disable around the call site.
31497e1c18eSMathieu Desnoyers  */
315de7b2973SMathieu Desnoyers static int tracepoint_remove_func(struct tracepoint *tp,
316de7b2973SMathieu Desnoyers 		struct tracepoint_func *func)
31797e1c18eSMathieu Desnoyers {
318de7b2973SMathieu Desnoyers 	struct tracepoint_func *old, *tp_funcs;
31997419875SJosh Stone 
320b725dfeaSMathieu Desnoyers 	tp_funcs = rcu_dereference_protected(tp->funcs,
321b725dfeaSMathieu Desnoyers 			lockdep_is_held(&tracepoints_mutex));
322de7b2973SMathieu Desnoyers 	old = func_remove(&tp_funcs, func);
323befe6d94SSteven Rostedt (VMware) 	if (WARN_ON_ONCE(IS_ERR(old)))
324de7b2973SMathieu Desnoyers 		return PTR_ERR(old);
325befe6d94SSteven Rostedt (VMware) 
326befe6d94SSteven Rostedt (VMware) 	if (tp_funcs == old)
327befe6d94SSteven Rostedt (VMware) 		/* Failed allocating new tp_funcs, replaced func with stub */
328befe6d94SSteven Rostedt (VMware) 		return 0;
32997e1c18eSMathieu Desnoyers 
330de7b2973SMathieu Desnoyers 	if (!tp_funcs) {
331de7b2973SMathieu Desnoyers 		/* Removed last function */
332de7b2973SMathieu Desnoyers 		if (tp->unregfunc && static_key_enabled(&tp->key))
333de7b2973SMathieu Desnoyers 			tp->unregfunc();
33497e1c18eSMathieu Desnoyers 
335d25e37d8SSteven Rostedt (VMware) 		static_key_disable(&tp->key);
336de7b2973SMathieu Desnoyers 		rcu_assign_pointer(tp->funcs, tp_funcs);
337547305a6SSteven Rostedt (VMware) 	} else {
338547305a6SSteven Rostedt (VMware) 		rcu_assign_pointer(tp->funcs, tp_funcs);
339547305a6SSteven Rostedt (VMware) 		tracepoint_update_call(tp, tp_funcs,
340547305a6SSteven Rostedt (VMware) 				       tp_funcs[0].func != old[0].func);
341547305a6SSteven Rostedt (VMware) 	}
3428058bd0fSMathieu Desnoyers 	release_probes(old);
343de7b2973SMathieu Desnoyers 	return 0;
344127cafbbSLai Jiangshan }
345127cafbbSLai Jiangshan 
34697e1c18eSMathieu Desnoyers /**
347f39e2391SLee, Chun-Yi  * tracepoint_probe_register_prio -  Connect a probe to a tracepoint with priority
348de7b2973SMathieu Desnoyers  * @tp: tracepoint
34997e1c18eSMathieu Desnoyers  * @probe: probe handler
350cac92ba7SFabian Frederick  * @data: tracepoint data
3517904b5c4SSteven Rostedt (Red Hat)  * @prio: priority of this function over other registered functions
3527904b5c4SSteven Rostedt (Red Hat)  *
3537904b5c4SSteven Rostedt (Red Hat)  * Returns 0 if ok, error value on error.
3547904b5c4SSteven Rostedt (Red Hat)  * Note: if @tp is within a module, the caller is responsible for
3557904b5c4SSteven Rostedt (Red Hat)  * unregistering the probe before the module is gone. This can be
3567904b5c4SSteven Rostedt (Red Hat)  * performed either with a tracepoint module going notifier, or from
3577904b5c4SSteven Rostedt (Red Hat)  * within module exit functions.
3587904b5c4SSteven Rostedt (Red Hat)  */
3597904b5c4SSteven Rostedt (Red Hat) int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
3607904b5c4SSteven Rostedt (Red Hat) 				   void *data, int prio)
3617904b5c4SSteven Rostedt (Red Hat) {
3627904b5c4SSteven Rostedt (Red Hat) 	struct tracepoint_func tp_func;
3637904b5c4SSteven Rostedt (Red Hat) 	int ret;
3647904b5c4SSteven Rostedt (Red Hat) 
3657904b5c4SSteven Rostedt (Red Hat) 	mutex_lock(&tracepoints_mutex);
3667904b5c4SSteven Rostedt (Red Hat) 	tp_func.func = probe;
3677904b5c4SSteven Rostedt (Red Hat) 	tp_func.data = data;
3687904b5c4SSteven Rostedt (Red Hat) 	tp_func.prio = prio;
3697904b5c4SSteven Rostedt (Red Hat) 	ret = tracepoint_add_func(tp, &tp_func, prio);
3707904b5c4SSteven Rostedt (Red Hat) 	mutex_unlock(&tracepoints_mutex);
3717904b5c4SSteven Rostedt (Red Hat) 	return ret;
3727904b5c4SSteven Rostedt (Red Hat) }
3737904b5c4SSteven Rostedt (Red Hat) EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio);
3747904b5c4SSteven Rostedt (Red Hat) 
3757904b5c4SSteven Rostedt (Red Hat) /**
3767904b5c4SSteven Rostedt (Red Hat)  * tracepoint_probe_register -  Connect a probe to a tracepoint
3777904b5c4SSteven Rostedt (Red Hat)  * @tp: tracepoint
3787904b5c4SSteven Rostedt (Red Hat)  * @probe: probe handler
3797904b5c4SSteven Rostedt (Red Hat)  * @data: tracepoint data
38097e1c18eSMathieu Desnoyers  *
381de7b2973SMathieu Desnoyers  * Returns 0 if ok, error value on error.
382de7b2973SMathieu Desnoyers  * Note: if @tp is within a module, the caller is responsible for
383de7b2973SMathieu Desnoyers  * unregistering the probe before the module is gone. This can be
384de7b2973SMathieu Desnoyers  * performed either with a tracepoint module going notifier, or from
385de7b2973SMathieu Desnoyers  * within module exit functions.
38697e1c18eSMathieu Desnoyers  */
387de7b2973SMathieu Desnoyers int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
38897e1c18eSMathieu Desnoyers {
3897904b5c4SSteven Rostedt (Red Hat) 	return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO);
39097e1c18eSMathieu Desnoyers }
39197e1c18eSMathieu Desnoyers EXPORT_SYMBOL_GPL(tracepoint_probe_register);
39297e1c18eSMathieu Desnoyers 
39397e1c18eSMathieu Desnoyers /**
39497e1c18eSMathieu Desnoyers  * tracepoint_probe_unregister -  Disconnect a probe from a tracepoint
395de7b2973SMathieu Desnoyers  * @tp: tracepoint
39697e1c18eSMathieu Desnoyers  * @probe: probe function pointer
397cac92ba7SFabian Frederick  * @data: tracepoint data
39897e1c18eSMathieu Desnoyers  *
399de7b2973SMathieu Desnoyers  * Returns 0 if ok, error value on error.
40097e1c18eSMathieu Desnoyers  */
401de7b2973SMathieu Desnoyers int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
40297e1c18eSMathieu Desnoyers {
403de7b2973SMathieu Desnoyers 	struct tracepoint_func tp_func;
404de7b2973SMathieu Desnoyers 	int ret;
40597e1c18eSMathieu Desnoyers 
40697e1c18eSMathieu Desnoyers 	mutex_lock(&tracepoints_mutex);
407de7b2973SMathieu Desnoyers 	tp_func.func = probe;
408de7b2973SMathieu Desnoyers 	tp_func.data = data;
409de7b2973SMathieu Desnoyers 	ret = tracepoint_remove_func(tp, &tp_func);
41097e1c18eSMathieu Desnoyers 	mutex_unlock(&tracepoints_mutex);
411de7b2973SMathieu Desnoyers 	return ret;
41297e1c18eSMathieu Desnoyers }
41397e1c18eSMathieu Desnoyers EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
41497e1c18eSMathieu Desnoyers 
4159c0be3f6SMathieu Desnoyers static void for_each_tracepoint_range(
4169c0be3f6SMathieu Desnoyers 		tracepoint_ptr_t *begin, tracepoint_ptr_t *end,
41746e0c9beSArd Biesheuvel 		void (*fct)(struct tracepoint *tp, void *priv),
41846e0c9beSArd Biesheuvel 		void *priv)
41946e0c9beSArd Biesheuvel {
4209c0be3f6SMathieu Desnoyers 	tracepoint_ptr_t *iter;
4219c0be3f6SMathieu Desnoyers 
42246e0c9beSArd Biesheuvel 	if (!begin)
42346e0c9beSArd Biesheuvel 		return;
42446e0c9beSArd Biesheuvel 	for (iter = begin; iter < end; iter++)
4259c0be3f6SMathieu Desnoyers 		fct(tracepoint_ptr_deref(iter), priv);
42646e0c9beSArd Biesheuvel }
42746e0c9beSArd Biesheuvel 
428227a8375SIngo Molnar #ifdef CONFIG_MODULES
42945ab2813SSteven Rostedt (Red Hat) bool trace_module_has_bad_taint(struct module *mod)
43045ab2813SSteven Rostedt (Red Hat) {
43166cc69e3SMathieu Desnoyers 	return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) |
43266cc69e3SMathieu Desnoyers 			       (1 << TAINT_UNSIGNED_MODULE));
43345ab2813SSteven Rostedt (Red Hat) }
43445ab2813SSteven Rostedt (Red Hat) 
435de7b2973SMathieu Desnoyers static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
436de7b2973SMathieu Desnoyers 
437de7b2973SMathieu Desnoyers /**
438de7b2973SMathieu Desnoyers  * register_tracepoint_notifier - register tracepoint coming/going notifier
439de7b2973SMathieu Desnoyers  * @nb: notifier block
440de7b2973SMathieu Desnoyers  *
441de7b2973SMathieu Desnoyers  * Notifiers registered with this function are called on module
442de7b2973SMathieu Desnoyers  * coming/going with the tracepoint_module_list_mutex held.
443de7b2973SMathieu Desnoyers  * The notifier block callback should expect a "struct tp_module" data
444de7b2973SMathieu Desnoyers  * pointer.
445de7b2973SMathieu Desnoyers  */
446de7b2973SMathieu Desnoyers int register_tracepoint_module_notifier(struct notifier_block *nb)
447de7b2973SMathieu Desnoyers {
448de7b2973SMathieu Desnoyers 	struct tp_module *tp_mod;
449de7b2973SMathieu Desnoyers 	int ret;
450de7b2973SMathieu Desnoyers 
451de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
452de7b2973SMathieu Desnoyers 	ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
453de7b2973SMathieu Desnoyers 	if (ret)
454de7b2973SMathieu Desnoyers 		goto end;
455de7b2973SMathieu Desnoyers 	list_for_each_entry(tp_mod, &tracepoint_module_list, list)
456de7b2973SMathieu Desnoyers 		(void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
457de7b2973SMathieu Desnoyers end:
458de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
459de7b2973SMathieu Desnoyers 	return ret;
460de7b2973SMathieu Desnoyers }
461de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
462de7b2973SMathieu Desnoyers 
463de7b2973SMathieu Desnoyers /**
464de7b2973SMathieu Desnoyers  * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier
465de7b2973SMathieu Desnoyers  * @nb: notifier block
466de7b2973SMathieu Desnoyers  *
467de7b2973SMathieu Desnoyers  * The notifier block callback should expect a "struct tp_module" data
468de7b2973SMathieu Desnoyers  * pointer.
469de7b2973SMathieu Desnoyers  */
470de7b2973SMathieu Desnoyers int unregister_tracepoint_module_notifier(struct notifier_block *nb)
471de7b2973SMathieu Desnoyers {
472de7b2973SMathieu Desnoyers 	struct tp_module *tp_mod;
473de7b2973SMathieu Desnoyers 	int ret;
474de7b2973SMathieu Desnoyers 
475de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
476de7b2973SMathieu Desnoyers 	ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
477de7b2973SMathieu Desnoyers 	if (ret)
478de7b2973SMathieu Desnoyers 		goto end;
479de7b2973SMathieu Desnoyers 	list_for_each_entry(tp_mod, &tracepoint_module_list, list)
480de7b2973SMathieu Desnoyers 		(void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
481de7b2973SMathieu Desnoyers end:
482de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
483de7b2973SMathieu Desnoyers 	return ret;
484de7b2973SMathieu Desnoyers 
485de7b2973SMathieu Desnoyers }
486de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
487de7b2973SMathieu Desnoyers 
488de7b2973SMathieu Desnoyers /*
489de7b2973SMathieu Desnoyers  * Ensure the tracer unregistered the module's probes before the module
490de7b2973SMathieu Desnoyers  * teardown is performed. Prevents leaks of probe and data pointers.
491de7b2973SMathieu Desnoyers  */
49246e0c9beSArd Biesheuvel static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv)
493de7b2973SMathieu Desnoyers {
49446e0c9beSArd Biesheuvel 	WARN_ON_ONCE(tp->funcs);
495de7b2973SMathieu Desnoyers }
496de7b2973SMathieu Desnoyers 
497b75ef8b4SMathieu Desnoyers static int tracepoint_module_coming(struct module *mod)
498b75ef8b4SMathieu Desnoyers {
4990dea6d52SMathieu Desnoyers 	struct tp_module *tp_mod;
500b75ef8b4SMathieu Desnoyers 	int ret = 0;
501b75ef8b4SMathieu Desnoyers 
5027dec935aSSteven Rostedt (Red Hat) 	if (!mod->num_tracepoints)
5037dec935aSSteven Rostedt (Red Hat) 		return 0;
5047dec935aSSteven Rostedt (Red Hat) 
505b75ef8b4SMathieu Desnoyers 	/*
506c10076c4SSteven Rostedt 	 * We skip modules that taint the kernel, especially those with different
507c10076c4SSteven Rostedt 	 * module headers (for forced load), to make sure we don't cause a crash.
50866cc69e3SMathieu Desnoyers 	 * Staging, out-of-tree, and unsigned GPL modules are fine.
509b75ef8b4SMathieu Desnoyers 	 */
51045ab2813SSteven Rostedt (Red Hat) 	if (trace_module_has_bad_taint(mod))
511b75ef8b4SMathieu Desnoyers 		return 0;
512de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
513b75ef8b4SMathieu Desnoyers 	tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
514b75ef8b4SMathieu Desnoyers 	if (!tp_mod) {
515b75ef8b4SMathieu Desnoyers 		ret = -ENOMEM;
516b75ef8b4SMathieu Desnoyers 		goto end;
517b75ef8b4SMathieu Desnoyers 	}
518eb7d035cSSteven Rostedt (Red Hat) 	tp_mod->mod = mod;
5190dea6d52SMathieu Desnoyers 	list_add_tail(&tp_mod->list, &tracepoint_module_list);
520de7b2973SMathieu Desnoyers 	blocking_notifier_call_chain(&tracepoint_notify_list,
521de7b2973SMathieu Desnoyers 			MODULE_STATE_COMING, tp_mod);
522b75ef8b4SMathieu Desnoyers end:
523de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
524b75ef8b4SMathieu Desnoyers 	return ret;
525b75ef8b4SMathieu Desnoyers }
526b75ef8b4SMathieu Desnoyers 
527de7b2973SMathieu Desnoyers static void tracepoint_module_going(struct module *mod)
528b75ef8b4SMathieu Desnoyers {
529de7b2973SMathieu Desnoyers 	struct tp_module *tp_mod;
530b75ef8b4SMathieu Desnoyers 
5317dec935aSSteven Rostedt (Red Hat) 	if (!mod->num_tracepoints)
532de7b2973SMathieu Desnoyers 		return;
5337dec935aSSteven Rostedt (Red Hat) 
534de7b2973SMathieu Desnoyers 	mutex_lock(&tracepoint_module_list_mutex);
535de7b2973SMathieu Desnoyers 	list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
536eb7d035cSSteven Rostedt (Red Hat) 		if (tp_mod->mod == mod) {
537de7b2973SMathieu Desnoyers 			blocking_notifier_call_chain(&tracepoint_notify_list,
538de7b2973SMathieu Desnoyers 					MODULE_STATE_GOING, tp_mod);
539de7b2973SMathieu Desnoyers 			list_del(&tp_mod->list);
540de7b2973SMathieu Desnoyers 			kfree(tp_mod);
541de7b2973SMathieu Desnoyers 			/*
542de7b2973SMathieu Desnoyers 			 * Called the going notifier before checking for
543de7b2973SMathieu Desnoyers 			 * quiescence.
544de7b2973SMathieu Desnoyers 			 */
54546e0c9beSArd Biesheuvel 			for_each_tracepoint_range(mod->tracepoints_ptrs,
54646e0c9beSArd Biesheuvel 				mod->tracepoints_ptrs + mod->num_tracepoints,
54746e0c9beSArd Biesheuvel 				tp_module_going_check_quiescent, NULL);
548b75ef8b4SMathieu Desnoyers 			break;
549b75ef8b4SMathieu Desnoyers 		}
550b75ef8b4SMathieu Desnoyers 	}
551b75ef8b4SMathieu Desnoyers 	/*
552b75ef8b4SMathieu Desnoyers 	 * In the case of modules that were tainted at "coming", we'll simply
553b75ef8b4SMathieu Desnoyers 	 * walk through the list without finding it. We cannot use the "tainted"
554b75ef8b4SMathieu Desnoyers 	 * flag on "going", in case a module taints the kernel only after being
555b75ef8b4SMathieu Desnoyers 	 * loaded.
556b75ef8b4SMathieu Desnoyers 	 */
557de7b2973SMathieu Desnoyers 	mutex_unlock(&tracepoint_module_list_mutex);
558b75ef8b4SMathieu Desnoyers }
559227a8375SIngo Molnar 
560de7b2973SMathieu Desnoyers static int tracepoint_module_notify(struct notifier_block *self,
56132f85742SMathieu Desnoyers 		unsigned long val, void *data)
56232f85742SMathieu Desnoyers {
56332f85742SMathieu Desnoyers 	struct module *mod = data;
564b75ef8b4SMathieu Desnoyers 	int ret = 0;
56532f85742SMathieu Desnoyers 
56632f85742SMathieu Desnoyers 	switch (val) {
56732f85742SMathieu Desnoyers 	case MODULE_STATE_COMING:
568b75ef8b4SMathieu Desnoyers 		ret = tracepoint_module_coming(mod);
569b75ef8b4SMathieu Desnoyers 		break;
570b75ef8b4SMathieu Desnoyers 	case MODULE_STATE_LIVE:
571b75ef8b4SMathieu Desnoyers 		break;
57232f85742SMathieu Desnoyers 	case MODULE_STATE_GOING:
573de7b2973SMathieu Desnoyers 		tracepoint_module_going(mod);
574de7b2973SMathieu Desnoyers 		break;
575de7b2973SMathieu Desnoyers 	case MODULE_STATE_UNFORMED:
57632f85742SMathieu Desnoyers 		break;
57732f85742SMathieu Desnoyers 	}
5780340a6b7SPeter Zijlstra 	return notifier_from_errno(ret);
57932f85742SMathieu Desnoyers }
58032f85742SMathieu Desnoyers 
581de7b2973SMathieu Desnoyers static struct notifier_block tracepoint_module_nb = {
58232f85742SMathieu Desnoyers 	.notifier_call = tracepoint_module_notify,
58332f85742SMathieu Desnoyers 	.priority = 0,
58432f85742SMathieu Desnoyers };
58532f85742SMathieu Desnoyers 
586de7b2973SMathieu Desnoyers static __init int init_tracepoints(void)
58732f85742SMathieu Desnoyers {
588de7b2973SMathieu Desnoyers 	int ret;
589de7b2973SMathieu Desnoyers 
590de7b2973SMathieu Desnoyers 	ret = register_module_notifier(&tracepoint_module_nb);
591eb7d035cSSteven Rostedt (Red Hat) 	if (ret)
592a395d6a7SJoe Perches 		pr_warn("Failed to register tracepoint module enter notifier\n");
593eb7d035cSSteven Rostedt (Red Hat) 
594de7b2973SMathieu Desnoyers 	return ret;
59532f85742SMathieu Desnoyers }
59632f85742SMathieu Desnoyers __initcall(init_tracepoints);
597227a8375SIngo Molnar #endif /* CONFIG_MODULES */
598a871bd33SJason Baron 
599de7b2973SMathieu Desnoyers /**
600de7b2973SMathieu Desnoyers  * for_each_kernel_tracepoint - iteration on all kernel tracepoints
601de7b2973SMathieu Desnoyers  * @fct: callback
602de7b2973SMathieu Desnoyers  * @priv: private data
603de7b2973SMathieu Desnoyers  */
604de7b2973SMathieu Desnoyers void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
605de7b2973SMathieu Desnoyers 		void *priv)
606de7b2973SMathieu Desnoyers {
607de7b2973SMathieu Desnoyers 	for_each_tracepoint_range(__start___tracepoints_ptrs,
608de7b2973SMathieu Desnoyers 		__stop___tracepoints_ptrs, fct, priv);
609de7b2973SMathieu Desnoyers }
610de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
611de7b2973SMathieu Desnoyers 
6123d27d8cbSJosh Stone #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
61360d970c2SIngo Molnar 
61497419875SJosh Stone /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
615a871bd33SJason Baron static int sys_tracepoint_refcount;
616a871bd33SJason Baron 
6178cf868afSSteven Rostedt (Red Hat) int syscall_regfunc(void)
618a871bd33SJason Baron {
6198063e41dSOleg Nesterov 	struct task_struct *p, *t;
620a871bd33SJason Baron 
621a871bd33SJason Baron 	if (!sys_tracepoint_refcount) {
6228063e41dSOleg Nesterov 		read_lock(&tasklist_lock);
6238063e41dSOleg Nesterov 		for_each_process_thread(p, t) {
624524666cbSGabriel Krisman Bertazi 			set_task_syscall_work(t, SYSCALL_TRACEPOINT);
6258063e41dSOleg Nesterov 		}
6268063e41dSOleg Nesterov 		read_unlock(&tasklist_lock);
627a871bd33SJason Baron 	}
628a871bd33SJason Baron 	sys_tracepoint_refcount++;
6298cf868afSSteven Rostedt (Red Hat) 
6308cf868afSSteven Rostedt (Red Hat) 	return 0;
631a871bd33SJason Baron }
632a871bd33SJason Baron 
633a871bd33SJason Baron void syscall_unregfunc(void)
634a871bd33SJason Baron {
6358063e41dSOleg Nesterov 	struct task_struct *p, *t;
636a871bd33SJason Baron 
637a871bd33SJason Baron 	sys_tracepoint_refcount--;
638a871bd33SJason Baron 	if (!sys_tracepoint_refcount) {
6398063e41dSOleg Nesterov 		read_lock(&tasklist_lock);
6408063e41dSOleg Nesterov 		for_each_process_thread(p, t) {
641524666cbSGabriel Krisman Bertazi 			clear_task_syscall_work(t, SYSCALL_TRACEPOINT);
6428063e41dSOleg Nesterov 		}
6438063e41dSOleg Nesterov 		read_unlock(&tasklist_lock);
644a871bd33SJason Baron 	}
645a871bd33SJason Baron }
64660d970c2SIngo Molnar #endif
647