xref: /freebsd/sys/dev/hwpmc/hwpmc_x86.c (revision f5463265955b829775bbb32e1fd0bc11dafc36ce)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2005,2008 Joseph Koshy
5  * Copyright (c) 2007 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by A. Joseph Koshy under
9  * sponsorship from the FreeBSD Foundation and Google, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/pmc.h>
37 
38 #include <vm/vm.h>
39 #include <vm/pmap.h>
40 
41 #include <machine/cpu.h>
42 #include <machine/cputypes.h>
43 #include <machine/intr_machdep.h>	/* For x86/apicvar.h */
44 #include <machine/md_var.h>
45 #include <machine/pmc_mdep.h>
46 #include <machine/stack.h>
47 #include <machine/vmparam.h>
48 
49 #include <x86/apicvar.h>
50 
51 #include "hwpmc_soft.h"
52 
53 /*
54  * Attempt to walk a user call stack using a too-simple algorithm.
55  * In the general case we need unwind information associated with
56  * the executable to be able to walk the user stack.
57  *
58  * We are handed a trap frame laid down at the time the PMC interrupt
59  * was taken.  If the application is using frame pointers, the saved
60  * PC value could be:
61  * a. at the beginning of a function before the stack frame is laid
62  *    down,
63  * b. just before a 'ret', after the stack frame has been taken off,
64  * c. somewhere else in the function with a valid stack frame being
65  *    present,
66  *
67  * If the application is not using frame pointers, this algorithm will
68  * fail to yield an interesting call chain.
69  *
70  * TODO: figure out a way to use unwind information.
71  */
72 
73 int
74 pmc_save_user_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
75 {
76 	int n;
77 	uint32_t instr;
78 	uintptr_t fp, oldfp, pc, r, sp;
79 
80 	KASSERT(TRAPF_USERMODE(tf), ("[x86,%d] Not a user trap frame tf=%p",
81 	    __LINE__, (void *) tf));
82 
83 	pc = PMC_TRAPFRAME_TO_PC(tf);
84 	oldfp = fp = PMC_TRAPFRAME_TO_FP(tf);
85 	sp = PMC_TRAPFRAME_TO_USER_SP(tf);
86 
87 	*cc++ = pc; n = 1;
88 
89 	r = fp + sizeof(uintptr_t); /* points to return address */
90 
91 	if (!PMC_IN_USERSPACE(pc))
92 		return (n);
93 
94 	if (copyin((void *) pc, &instr, sizeof(instr)) != 0)
95 		return (n);
96 
97 	if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) ||
98 	    PMC_AT_FUNCTION_EPILOGUE_RET(instr)) { /* ret */
99 		if (copyin((void *) sp, &pc, sizeof(pc)) != 0)
100 			return (n);
101 	} else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) {
102 		sp += sizeof(uintptr_t);
103 		if (copyin((void *) sp, &pc, sizeof(pc)) != 0)
104 			return (n);
105 	} else if (copyin((void *) r, &pc, sizeof(pc)) != 0 ||
106 	    copyin((void *) fp, &fp, sizeof(fp)) != 0)
107 		return (n);
108 
109 	for (; n < nframes;) {
110 		if (pc == 0 || !PMC_IN_USERSPACE(pc))
111 			break;
112 
113 		*cc++ = pc; n++;
114 
115 		if (fp < oldfp)
116 			break;
117 
118 		r = fp + sizeof(uintptr_t); /* address of return address */
119 		oldfp = fp;
120 
121 		if (copyin((void *) r, &pc, sizeof(pc)) != 0 ||
122 		    copyin((void *) fp, &fp, sizeof(fp)) != 0)
123 			break;
124 	}
125 
126 	return (n);
127 }
128 
129 /*
130  * Walking the kernel call stack.
131  *
132  * We are handed the trap frame laid down at the time the PMC
133  * interrupt was taken.  The saved PC could be:
134  * a. in the lowlevel trap handler, meaning that there isn't a C stack
135  *    to traverse,
136  * b. at the beginning of a function before the stack frame is laid
137  *    down,
138  * c. just before a 'ret', after the stack frame has been taken off,
139  * d. somewhere else in a function with a valid stack frame being
140  *    present.
141  *
142  * In case (d), the previous frame pointer is at [%ebp]/[%rbp] and
143  * the return address is at [%ebp+4]/[%rbp+8].
144  *
145  * For cases (b) and (c), the return address is at [%esp]/[%rsp] and
146  * the frame pointer doesn't need to be changed when going up one
147  * level in the stack.
148  *
149  * For case (a), we check if the PC lies in low-level trap handling
150  * code, and if so we terminate our trace.
151  */
152 
153 int __nosanitizeaddress __nosanitizememory
154 pmc_save_kernel_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
155 {
156 	uintptr_t fp, pc, ra, sp;
157 	uint32_t instr;
158 	int n;
159 
160 	KASSERT(TRAPF_USERMODE(tf) == 0,("[x86,%d] not a kernel backtrace",
161 	    __LINE__));
162 
163 	pc = PMC_TRAPFRAME_TO_PC(tf);
164 	fp = PMC_TRAPFRAME_TO_FP(tf);
165 	sp = PMC_TRAPFRAME_TO_KERNEL_SP(tf);
166 
167 	*cc++ = pc;
168 	ra = fp + sizeof(uintptr_t); /* points to return address */
169 
170 	if (nframes <= 1)
171 		return (1);
172 
173 	if (PMC_IN_TRAP_HANDLER(pc) || !PMC_IN_KERNEL(pc) ||
174 	    !PMC_IN_KERNEL_STACK(ra) || !PMC_IN_KERNEL_STACK(sp) ||
175 	    !PMC_IN_KERNEL_STACK(fp))
176 		return (1);
177 
178 	instr = *(uint32_t *)pc;
179 
180 	/*
181 	 * Determine whether the interrupted function was in the
182 	 * processing of either laying down its stack frame or taking
183 	 * it off.
184 	 *
185 	 * If we haven't started laying down a stack frame, or are
186 	 * just about to return, then our caller's address is at
187 	 * *sp, and we don't have a frame to unwind.
188 	 */
189 	if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) ||
190 	    PMC_AT_FUNCTION_EPILOGUE_RET(instr))
191 		pc = *(uintptr_t *) sp;
192 	else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) {
193 		/*
194 		 * The code was midway through laying down a frame.
195 		 * At this point sp[0] has a frame back pointer,
196 		 * and the caller's address is therefore at sp[1].
197 		 */
198 		sp += sizeof(uintptr_t);
199 		if (!PMC_IN_KERNEL_STACK(sp))
200 			return (1);
201 		pc = *(uintptr_t *)sp;
202 	} else {
203 		/*
204 		 * Not in the function prologue or epilogue.
205 		 */
206 		pc = *(uintptr_t *)ra;
207 		fp = *(uintptr_t *)fp;
208 	}
209 
210 	for (n = 1; n < nframes; n++) {
211 		*cc++ = pc;
212 
213 		if (PMC_IN_TRAP_HANDLER(pc))
214 			break;
215 
216 		ra = fp + sizeof(uintptr_t);
217 		if (!PMC_IN_KERNEL_STACK(fp) || !PMC_IN_KERNEL_STACK(ra))
218 			break;
219 		pc = *(uintptr_t *)ra;
220 		fp = *(uintptr_t *)fp;
221 	}
222 
223 	return (n);
224 }
225 
226 /*
227  * Machine dependent initialization for x86 class platforms.
228  */
229 struct pmc_mdep *
230 pmc_md_initialize(void)
231 {
232 	int i;
233 	struct pmc_mdep *md;
234 
235 	/* determine the CPU kind */
236 	if (cpu_vendor_id == CPU_VENDOR_AMD ||
237 	    cpu_vendor_id == CPU_VENDOR_HYGON)
238 		md = pmc_amd_initialize();
239 	else if (cpu_vendor_id == CPU_VENDOR_INTEL)
240 		md = pmc_intel_initialize();
241 	else
242 		return (NULL);
243 
244 	/* disallow sampling if we do not have an LAPIC */
245 	if (md != NULL && !lapic_enable_pmc())
246 		for (i = 0; i < md->pmd_nclass; i++) {
247 			if (i == PMC_CLASS_INDEX_SOFT)
248 				continue;
249 			md->pmd_classdep[i].pcd_caps &= ~PMC_CAP_INTERRUPT;
250 		}
251 
252 	return (md);
253 }
254 
255 void
256 pmc_md_finalize(struct pmc_mdep *md)
257 {
258 
259 	lapic_disable_pmc();
260 	if (cpu_vendor_id == CPU_VENDOR_AMD ||
261 	    cpu_vendor_id == CPU_VENDOR_HYGON)
262 		pmc_amd_finalize(md);
263 	else if (cpu_vendor_id == CPU_VENDOR_INTEL)
264 		pmc_intel_finalize(md);
265 	else
266 		KASSERT(0, ("[x86,%d] Unknown vendor", __LINE__));
267 }
268