xref: /linux/arch/s390/kernel/perf_event.c (revision 79997eda0d31bc68203c95ecb978773ee6ce7a1f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Performance event support for s390x
4  *
5  *  Copyright IBM Corp. 2012, 2013
6  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
7  */
8 #define KMSG_COMPONENT	"perf"
9 #define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/perf_event.h>
13 #include <linux/kvm_host.h>
14 #include <linux/percpu.h>
15 #include <linux/export.h>
16 #include <linux/seq_file.h>
17 #include <linux/spinlock.h>
18 #include <linux/uaccess.h>
19 #include <linux/compat.h>
20 #include <linux/sysfs.h>
21 #include <asm/stacktrace.h>
22 #include <asm/irq.h>
23 #include <asm/cpu_mf.h>
24 #include <asm/lowcore.h>
25 #include <asm/processor.h>
26 #include <asm/sysinfo.h>
27 #include <asm/unwind.h>
28 
29 static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
30 {
31 	struct stack_frame *stack = (struct stack_frame *) regs->gprs[15];
32 
33 	if (!stack)
34 		return NULL;
35 
36 	return (struct kvm_s390_sie_block *)stack->sie_control_block;
37 }
38 
39 static bool is_in_guest(struct pt_regs *regs)
40 {
41 	if (user_mode(regs))
42 		return false;
43 #if IS_ENABLED(CONFIG_KVM)
44 	return instruction_pointer(regs) == (unsigned long) &sie_exit;
45 #else
46 	return false;
47 #endif
48 }
49 
50 static unsigned long guest_is_user_mode(struct pt_regs *regs)
51 {
52 	return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE;
53 }
54 
55 static unsigned long instruction_pointer_guest(struct pt_regs *regs)
56 {
57 	return sie_block(regs)->gpsw.addr;
58 }
59 
60 unsigned long perf_instruction_pointer(struct pt_regs *regs)
61 {
62 	return is_in_guest(regs) ? instruction_pointer_guest(regs)
63 				 : instruction_pointer(regs);
64 }
65 
66 static unsigned long perf_misc_guest_flags(struct pt_regs *regs)
67 {
68 	return guest_is_user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
69 					: PERF_RECORD_MISC_GUEST_KERNEL;
70 }
71 
72 static unsigned long perf_misc_flags_sf(struct pt_regs *regs)
73 {
74 	struct perf_sf_sde_regs *sde_regs;
75 	unsigned long flags;
76 
77 	sde_regs = (struct perf_sf_sde_regs *) &regs->int_parm_long;
78 	if (sde_regs->in_guest)
79 		flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
80 					: PERF_RECORD_MISC_GUEST_KERNEL;
81 	else
82 		flags = user_mode(regs) ? PERF_RECORD_MISC_USER
83 					: PERF_RECORD_MISC_KERNEL;
84 	return flags;
85 }
86 
87 unsigned long perf_misc_flags(struct pt_regs *regs)
88 {
89 	/* Check if the cpum_sf PMU has created the pt_regs structure.
90 	 * In this case, perf misc flags can be easily extracted.  Otherwise,
91 	 * do regular checks on the pt_regs content.
92 	 */
93 	if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA)
94 		if (!regs->gprs[15])
95 			return perf_misc_flags_sf(regs);
96 
97 	if (is_in_guest(regs))
98 		return perf_misc_guest_flags(regs);
99 
100 	return user_mode(regs) ? PERF_RECORD_MISC_USER
101 			       : PERF_RECORD_MISC_KERNEL;
102 }
103 
104 static void print_debug_cf(void)
105 {
106 	struct cpumf_ctr_info cf_info;
107 	int cpu = smp_processor_id();
108 
109 	memset(&cf_info, 0, sizeof(cf_info));
110 	if (!qctri(&cf_info))
111 		pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
112 			cpu, cf_info.cfvn, cf_info.csvn,
113 			cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
114 }
115 
116 static void print_debug_sf(void)
117 {
118 	struct hws_qsi_info_block si;
119 	int cpu = smp_processor_id();
120 
121 	memset(&si, 0, sizeof(si));
122 	if (qsi(&si))
123 		return;
124 
125 	pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n",
126 		cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate,
127 		si.cpu_speed);
128 
129 	if (si.as)
130 		pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i"
131 			" bsdes=%i tear=%016lx dear=%016lx\n", cpu,
132 			si.as, si.es, si.cs, si.bsdes, si.tear, si.dear);
133 	if (si.ad)
134 		pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i"
135 			" dsdes=%i tear=%016lx dear=%016lx\n", cpu,
136 			si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear);
137 }
138 
139 void perf_event_print_debug(void)
140 {
141 	unsigned long flags;
142 
143 	local_irq_save(flags);
144 	if (cpum_cf_avail())
145 		print_debug_cf();
146 	if (cpum_sf_avail())
147 		print_debug_sf();
148 	local_irq_restore(flags);
149 }
150 
151 /* Service level infrastructure */
152 static void sl_print_counter(struct seq_file *m)
153 {
154 	struct cpumf_ctr_info ci;
155 
156 	memset(&ci, 0, sizeof(ci));
157 	if (qctri(&ci))
158 		return;
159 
160 	seq_printf(m, "CPU-MF: Counter facility: version=%u.%u "
161 		   "authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl);
162 }
163 
164 static void sl_print_sampling(struct seq_file *m)
165 {
166 	struct hws_qsi_info_block si;
167 
168 	memset(&si, 0, sizeof(si));
169 	if (qsi(&si))
170 		return;
171 
172 	if (!si.as && !si.ad)
173 		return;
174 
175 	seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu"
176 		   " cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate,
177 		   si.cpu_speed);
178 	if (si.as)
179 		seq_printf(m, "CPU-MF: Sampling facility: mode=basic"
180 			   " sample_size=%u\n", si.bsdes);
181 	if (si.ad)
182 		seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic"
183 			   " sample_size=%u\n", si.dsdes);
184 }
185 
186 static void service_level_perf_print(struct seq_file *m,
187 				     struct service_level *sl)
188 {
189 	if (cpum_cf_avail())
190 		sl_print_counter(m);
191 	if (cpum_sf_avail())
192 		sl_print_sampling(m);
193 }
194 
195 static struct service_level service_level_perf = {
196 	.seq_print = service_level_perf_print,
197 };
198 
199 static int __init service_level_perf_register(void)
200 {
201 	return register_service_level(&service_level_perf);
202 }
203 arch_initcall(service_level_perf_register);
204 
205 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
206 			   struct pt_regs *regs)
207 {
208 	struct unwind_state state;
209 	unsigned long addr;
210 
211 	unwind_for_each_frame(&state, current, regs, 0) {
212 		addr = unwind_get_return_address(&state);
213 		if (!addr || perf_callchain_store(entry, addr))
214 			return;
215 	}
216 }
217 
218 void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
219 			 struct pt_regs *regs)
220 {
221 	struct stack_frame_user __user *sf;
222 	unsigned long ip, sp;
223 	bool first = true;
224 
225 	if (is_compat_task())
226 		return;
227 	perf_callchain_store(entry, instruction_pointer(regs));
228 	sf = (void __user *)user_stack_pointer(regs);
229 	pagefault_disable();
230 	while (entry->nr < entry->max_stack) {
231 		if (__get_user(sp, &sf->back_chain))
232 			break;
233 		if (__get_user(ip, &sf->gprs[8]))
234 			break;
235 		if (ip & 0x1) {
236 			/*
237 			 * If the instruction address is invalid, and this
238 			 * is the first stack frame, assume r14 has not
239 			 * been written to the stack yet. Otherwise exit.
240 			 */
241 			if (first && !(regs->gprs[14] & 0x1))
242 				ip = regs->gprs[14];
243 			else
244 				break;
245 		}
246 		perf_callchain_store(entry, ip);
247 		/* Sanity check: ABI requires SP to be aligned 8 bytes. */
248 		if (!sp || sp & 0x7)
249 			break;
250 		sf = (void __user *)sp;
251 		first = false;
252 	}
253 	pagefault_enable();
254 }
255 
256 /* Perf definitions for PMU event attributes in sysfs */
257 ssize_t cpumf_events_sysfs_show(struct device *dev,
258 				struct device_attribute *attr, char *page)
259 {
260 	struct perf_pmu_events_attr *pmu_attr;
261 
262 	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
263 	return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
264 }
265