xref: /linux/arch/x86/events/utils.c (revision 320fefa9e2edc67011e235ea1d50f0d00ddfe004)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <asm/insn.h>
3 
4 #include "perf_event.h"
5 
6 static int decode_branch_type(struct insn *insn)
7 {
8 	int ext;
9 
10 	if (insn_get_opcode(insn))
11 		return X86_BR_ABORT;
12 
13 	switch (insn->opcode.bytes[0]) {
14 	case 0xf:
15 		switch (insn->opcode.bytes[1]) {
16 		case 0x05: /* syscall */
17 		case 0x34: /* sysenter */
18 			return X86_BR_SYSCALL;
19 		case 0x07: /* sysret */
20 		case 0x35: /* sysexit */
21 			return X86_BR_SYSRET;
22 		case 0x80 ... 0x8f: /* conditional */
23 			return X86_BR_JCC;
24 		}
25 		return X86_BR_NONE;
26 	case 0x70 ... 0x7f: /* conditional */
27 		return X86_BR_JCC;
28 	case 0xc2: /* near ret */
29 	case 0xc3: /* near ret */
30 	case 0xca: /* far ret */
31 	case 0xcb: /* far ret */
32 		return X86_BR_RET;
33 	case 0xcf: /* iret */
34 		return X86_BR_IRET;
35 	case 0xcc ... 0xce: /* int */
36 		return X86_BR_INT;
37 	case 0xe8: /* call near rel */
38 		if (insn_get_immediate(insn) || insn->immediate1.value == 0) {
39 			/* zero length call */
40 			return X86_BR_ZERO_CALL;
41 		}
42 		fallthrough;
43 	case 0x9a: /* call far absolute */
44 		return X86_BR_CALL;
45 	case 0xe0 ... 0xe3: /* loop jmp */
46 		return X86_BR_JCC;
47 	case 0xe9 ... 0xeb: /* jmp */
48 		return X86_BR_JMP;
49 	case 0xff: /* call near absolute, call far absolute ind */
50 		if (insn_get_modrm(insn))
51 			return X86_BR_ABORT;
52 
53 		ext = (insn->modrm.bytes[0] >> 3) & 0x7;
54 		switch (ext) {
55 		case 2: /* near ind call */
56 		case 3: /* far ind call */
57 			return X86_BR_IND_CALL;
58 		case 4:
59 		case 5:
60 			return X86_BR_IND_JMP;
61 		}
62 		return X86_BR_NONE;
63 	}
64 
65 	return X86_BR_NONE;
66 }
67 
68 /*
69  * return the type of control flow change at address "from"
70  * instruction is not necessarily a branch (in case of interrupt).
71  *
72  * The branch type returned also includes the priv level of the
73  * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
74  *
75  * If a branch type is unknown OR the instruction cannot be
76  * decoded (e.g., text page not present), then X86_BR_NONE is
77  * returned.
78  *
79  * While recording branches, some processors can report the "from"
80  * address to be that of an instruction preceding the actual branch
81  * when instruction fusion occurs. If fusion is expected, attempt to
82  * find the type of the first branch instruction within the next
83  * MAX_INSN_SIZE bytes and if found, provide the offset between the
84  * reported "from" address and the actual branch instruction address.
85  */
86 static int get_branch_type(unsigned long from, unsigned long to, int abort,
87 			   bool fused, int *offset)
88 {
89 	struct insn insn;
90 	void *addr;
91 	int bytes_read, bytes_left, insn_offset;
92 	int ret = X86_BR_NONE;
93 	int to_plm, from_plm;
94 	u8 buf[MAX_INSN_SIZE];
95 	int is64 = 0;
96 
97 	/* make sure we initialize offset */
98 	if (offset)
99 		*offset = 0;
100 
101 	to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
102 	from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
103 
104 	/*
105 	 * maybe zero if lbr did not fill up after a reset by the time
106 	 * we get a PMU interrupt
107 	 */
108 	if (from == 0 || to == 0)
109 		return X86_BR_NONE;
110 
111 	if (abort)
112 		return X86_BR_ABORT | to_plm;
113 
114 	if (from_plm == X86_BR_USER) {
115 		/*
116 		 * can happen if measuring at the user level only
117 		 * and we interrupt in a kernel thread, e.g., idle.
118 		 */
119 		if (!current->mm)
120 			return X86_BR_NONE;
121 
122 		/* may fail if text not present */
123 		bytes_left = copy_from_user_nmi(buf, (void __user *)from,
124 						MAX_INSN_SIZE);
125 		bytes_read = MAX_INSN_SIZE - bytes_left;
126 		if (!bytes_read)
127 			return X86_BR_NONE;
128 
129 		addr = buf;
130 	} else {
131 		/*
132 		 * The LBR logs any address in the IP, even if the IP just
133 		 * faulted. This means userspace can control the from address.
134 		 * Ensure we don't blindly read any address by validating it is
135 		 * a known text address.
136 		 */
137 		if (kernel_text_address(from)) {
138 			addr = (void *)from;
139 			/*
140 			 * Assume we can get the maximum possible size
141 			 * when grabbing kernel data.  This is not
142 			 * _strictly_ true since we could possibly be
143 			 * executing up next to a memory hole, but
144 			 * it is very unlikely to be a problem.
145 			 */
146 			bytes_read = MAX_INSN_SIZE;
147 		} else {
148 			return X86_BR_NONE;
149 		}
150 	}
151 
152 	/*
153 	 * decoder needs to know the ABI especially
154 	 * on 64-bit systems running 32-bit apps
155 	 */
156 #ifdef CONFIG_X86_64
157 	is64 = kernel_ip((unsigned long)addr) || any_64bit_mode(current_pt_regs());
158 #endif
159 	insn_init(&insn, addr, bytes_read, is64);
160 	ret = decode_branch_type(&insn);
161 	insn_offset = 0;
162 
163 	/* Check for the possibility of branch fusion */
164 	while (fused && ret == X86_BR_NONE) {
165 		/* Check for decoding errors */
166 		if (insn_get_length(&insn) || !insn.length)
167 			break;
168 
169 		insn_offset += insn.length;
170 		bytes_read -= insn.length;
171 		if (bytes_read < 0)
172 			break;
173 
174 		insn_init(&insn, addr + insn_offset, bytes_read, is64);
175 		ret = decode_branch_type(&insn);
176 	}
177 
178 	if (offset)
179 		*offset = insn_offset;
180 
181 	/*
182 	 * interrupts, traps, faults (and thus ring transition) may
183 	 * occur on any instructions. Thus, to classify them correctly,
184 	 * we need to first look at the from and to priv levels. If they
185 	 * are different and to is in the kernel, then it indicates
186 	 * a ring transition. If the from instruction is not a ring
187 	 * transition instr (syscall, systenter, int), then it means
188 	 * it was a irq, trap or fault.
189 	 *
190 	 * we have no way of detecting kernel to kernel faults.
191 	 */
192 	if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
193 	    && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
194 		ret = X86_BR_IRQ;
195 
196 	/*
197 	 * branch priv level determined by target as
198 	 * is done by HW when LBR_SELECT is implemented
199 	 */
200 	if (ret != X86_BR_NONE)
201 		ret |= to_plm;
202 
203 	return ret;
204 }
205 
206 int branch_type(unsigned long from, unsigned long to, int abort)
207 {
208 	return get_branch_type(from, to, abort, false, NULL);
209 }
210 
211 int branch_type_fused(unsigned long from, unsigned long to, int abort,
212 		      int *offset)
213 {
214 	return get_branch_type(from, to, abort, true, offset);
215 }
216 
217 #define X86_BR_TYPE_MAP_MAX	16
218 
219 static int branch_map[X86_BR_TYPE_MAP_MAX] = {
220 	PERF_BR_CALL,		/* X86_BR_CALL */
221 	PERF_BR_RET,		/* X86_BR_RET */
222 	PERF_BR_SYSCALL,	/* X86_BR_SYSCALL */
223 	PERF_BR_SYSRET,		/* X86_BR_SYSRET */
224 	PERF_BR_UNKNOWN,	/* X86_BR_INT */
225 	PERF_BR_ERET,		/* X86_BR_IRET */
226 	PERF_BR_COND,		/* X86_BR_JCC */
227 	PERF_BR_UNCOND,		/* X86_BR_JMP */
228 	PERF_BR_IRQ,		/* X86_BR_IRQ */
229 	PERF_BR_IND_CALL,	/* X86_BR_IND_CALL */
230 	PERF_BR_UNKNOWN,	/* X86_BR_ABORT */
231 	PERF_BR_UNKNOWN,	/* X86_BR_IN_TX */
232 	PERF_BR_NO_TX,		/* X86_BR_NO_TX */
233 	PERF_BR_CALL,		/* X86_BR_ZERO_CALL */
234 	PERF_BR_UNKNOWN,	/* X86_BR_CALL_STACK */
235 	PERF_BR_IND,		/* X86_BR_IND_JMP */
236 };
237 
238 int common_branch_type(int type)
239 {
240 	int i;
241 
242 	type >>= 2; /* skip X86_BR_USER and X86_BR_KERNEL */
243 
244 	if (type) {
245 		i = __ffs(type);
246 		if (i < X86_BR_TYPE_MAP_MAX)
247 			return branch_map[i];
248 	}
249 
250 	return PERF_BR_UNKNOWN;
251 }
252