xref: /linux/arch/powerpc/platforms/powernv/opal-tracepoints.c (revision 6647852abc1fd74e9c5e0dcf404ea4cb9c929630)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/percpu.h>
3 #include <linux/jump_label.h>
4 #include <asm/trace.h>
5 #include <asm/asm-prototypes.h>
6 
7 #ifdef HAVE_JUMP_LABEL
8 struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
9 
10 int opal_tracepoint_regfunc(void)
11 {
12 	static_key_slow_inc(&opal_tracepoint_key);
13 	return 0;
14 }
15 
16 void opal_tracepoint_unregfunc(void)
17 {
18 	static_key_slow_dec(&opal_tracepoint_key);
19 }
20 #else
21 /*
22  * We optimise OPAL calls by placing opal_tracepoint_refcount
23  * directly in the TOC so we can check if the opal tracepoints are
24  * enabled via a single load.
25  */
26 
27 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
28 extern long opal_tracepoint_refcount;
29 
30 int opal_tracepoint_regfunc(void)
31 {
32 	opal_tracepoint_refcount++;
33 	return 0;
34 }
35 
36 void opal_tracepoint_unregfunc(void)
37 {
38 	opal_tracepoint_refcount--;
39 }
40 #endif
41 
42 /*
43  * Since the tracing code might execute OPAL calls we need to guard against
44  * recursion.
45  */
46 static DEFINE_PER_CPU(unsigned int, opal_trace_depth);
47 
48 void __trace_opal_entry(unsigned long opcode, unsigned long *args)
49 {
50 	unsigned long flags;
51 	unsigned int *depth;
52 
53 	local_irq_save(flags);
54 
55 	depth = this_cpu_ptr(&opal_trace_depth);
56 
57 	if (*depth)
58 		goto out;
59 
60 	(*depth)++;
61 	preempt_disable();
62 	trace_opal_entry(opcode, args);
63 	(*depth)--;
64 
65 out:
66 	local_irq_restore(flags);
67 }
68 
69 void __trace_opal_exit(long opcode, unsigned long retval)
70 {
71 	unsigned long flags;
72 	unsigned int *depth;
73 
74 	local_irq_save(flags);
75 
76 	depth = this_cpu_ptr(&opal_trace_depth);
77 
78 	if (*depth)
79 		goto out;
80 
81 	(*depth)++;
82 	trace_opal_exit(opcode, retval);
83 	preempt_enable();
84 	(*depth)--;
85 
86 out:
87 	local_irq_restore(flags);
88 }
89