xref: /freebsd/sys/dev/hwpmc/hwpmc_x86.c (revision f95e683fa231fd1bed5cc10f52db0629a687ac3d)
1f263522aSJoseph Koshy /*-
2718cf2ccSPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3718cf2ccSPedro F. Giffuni  *
4d0d0192fSJoseph Koshy  * Copyright (c) 2005,2008 Joseph Koshy
5d07f36b0SJoseph Koshy  * Copyright (c) 2007 The FreeBSD Foundation
6f263522aSJoseph Koshy  * All rights reserved.
7f263522aSJoseph Koshy  *
8d07f36b0SJoseph Koshy  * Portions of this software were developed by A. Joseph Koshy under
9d07f36b0SJoseph Koshy  * sponsorship from the FreeBSD Foundation and Google, Inc.
10d07f36b0SJoseph Koshy  *
11f263522aSJoseph Koshy  * Redistribution and use in source and binary forms, with or without
12f263522aSJoseph Koshy  * modification, are permitted provided that the following conditions
13f263522aSJoseph Koshy  * are met:
14f263522aSJoseph Koshy  * 1. Redistributions of source code must retain the above copyright
15f263522aSJoseph Koshy  *    notice, this list of conditions and the following disclaimer.
16f263522aSJoseph Koshy  * 2. Redistributions in binary form must reproduce the above copyright
17f263522aSJoseph Koshy  *    notice, this list of conditions and the following disclaimer in the
18f263522aSJoseph Koshy  *    documentation and/or other materials provided with the distribution.
19f263522aSJoseph Koshy  *
20f263522aSJoseph Koshy  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21f263522aSJoseph Koshy  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22f263522aSJoseph Koshy  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23f263522aSJoseph Koshy  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24f263522aSJoseph Koshy  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25f263522aSJoseph Koshy  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26f263522aSJoseph Koshy  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27f263522aSJoseph Koshy  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28f263522aSJoseph Koshy  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29f263522aSJoseph Koshy  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30f263522aSJoseph Koshy  * SUCH DAMAGE.
31f263522aSJoseph Koshy  */
32f263522aSJoseph Koshy 
33f263522aSJoseph Koshy #include <sys/cdefs.h>
34f263522aSJoseph Koshy __FBSDID("$FreeBSD$");
35f263522aSJoseph Koshy 
36f263522aSJoseph Koshy #include <sys/param.h>
37f263522aSJoseph Koshy #include <sys/bus.h>
38f263522aSJoseph Koshy #include <sys/pmc.h>
39d07f36b0SJoseph Koshy #include <sys/proc.h>
40f263522aSJoseph Koshy #include <sys/systm.h>
41f263522aSJoseph Koshy 
42d07f36b0SJoseph Koshy #include <machine/cpu.h>
435113aa0aSJung-uk Kim #include <machine/cputypes.h>
4421157ad3SJohn Baldwin #include <machine/intr_machdep.h>
45d95b3509SRandall Stewart #if (__FreeBSD_version >= 1100000)
46e07ef9b0SJohn Baldwin #include <x86/apicvar.h>
47d95b3509SRandall Stewart #else
48d95b3509SRandall Stewart #include <machine/apicvar.h>
49d95b3509SRandall Stewart #endif
50f263522aSJoseph Koshy #include <machine/pmc_mdep.h>
51f263522aSJoseph Koshy #include <machine/md_var.h>
52f263522aSJoseph Koshy 
53d07f36b0SJoseph Koshy #include <vm/vm.h>
54d07f36b0SJoseph Koshy #include <vm/vm_param.h>
55d07f36b0SJoseph Koshy #include <vm/pmap.h>
56d07f36b0SJoseph Koshy 
57f5f9340bSFabien Thomas #include "hwpmc_soft.h"
58f5f9340bSFabien Thomas 
59d07f36b0SJoseph Koshy /*
60d07f36b0SJoseph Koshy  * Attempt to walk a user call stack using a too-simple algorithm.
61d07f36b0SJoseph Koshy  * In the general case we need unwind information associated with
62d07f36b0SJoseph Koshy  * the executable to be able to walk the user stack.
63d07f36b0SJoseph Koshy  *
64d07f36b0SJoseph Koshy  * We are handed a trap frame laid down at the time the PMC interrupt
65d07f36b0SJoseph Koshy  * was taken.  If the application is using frame pointers, the saved
66d07f36b0SJoseph Koshy  * PC value could be:
67d07f36b0SJoseph Koshy  * a. at the beginning of a function before the stack frame is laid
68d07f36b0SJoseph Koshy  *    down,
69d07f36b0SJoseph Koshy  * b. just before a 'ret', after the stack frame has been taken off,
70d07f36b0SJoseph Koshy  * c. somewhere else in the function with a valid stack frame being
71d07f36b0SJoseph Koshy  *    present,
72d07f36b0SJoseph Koshy  *
73d07f36b0SJoseph Koshy  * If the application is not using frame pointers, this algorithm will
74d07f36b0SJoseph Koshy  * fail to yield an interesting call chain.
75d07f36b0SJoseph Koshy  *
76d07f36b0SJoseph Koshy  * TODO: figure out a way to use unwind information.
77d07f36b0SJoseph Koshy  */
78d07f36b0SJoseph Koshy 
79d07f36b0SJoseph Koshy int
80d07f36b0SJoseph Koshy pmc_save_user_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
81d07f36b0SJoseph Koshy {
82d07f36b0SJoseph Koshy 	int n;
83d07f36b0SJoseph Koshy 	uint32_t instr;
84d07f36b0SJoseph Koshy 	uintptr_t fp, oldfp, pc, r, sp;
85d07f36b0SJoseph Koshy 
86d07f36b0SJoseph Koshy 	KASSERT(TRAPF_USERMODE(tf), ("[x86,%d] Not a user trap frame tf=%p",
87d07f36b0SJoseph Koshy 	    __LINE__, (void *) tf));
88d07f36b0SJoseph Koshy 
89d07f36b0SJoseph Koshy 	pc = PMC_TRAPFRAME_TO_PC(tf);
90d07f36b0SJoseph Koshy 	oldfp = fp = PMC_TRAPFRAME_TO_FP(tf);
91d0d0192fSJoseph Koshy 	sp = PMC_TRAPFRAME_TO_USER_SP(tf);
92d07f36b0SJoseph Koshy 
93d07f36b0SJoseph Koshy 	*cc++ = pc; n = 1;
94d07f36b0SJoseph Koshy 
95d07f36b0SJoseph Koshy 	r = fp + sizeof(uintptr_t); /* points to return address */
96d07f36b0SJoseph Koshy 
97d07f36b0SJoseph Koshy 	if (!PMC_IN_USERSPACE(pc))
98d07f36b0SJoseph Koshy 		return (n);
99d07f36b0SJoseph Koshy 
100d07f36b0SJoseph Koshy 	if (copyin((void *) pc, &instr, sizeof(instr)) != 0)
101d07f36b0SJoseph Koshy 		return (n);
102d07f36b0SJoseph Koshy 
103d07f36b0SJoseph Koshy 	if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) ||
104d07f36b0SJoseph Koshy 	    PMC_AT_FUNCTION_EPILOGUE_RET(instr)) { /* ret */
105d07f36b0SJoseph Koshy 		if (copyin((void *) sp, &pc, sizeof(pc)) != 0)
106d07f36b0SJoseph Koshy 			return (n);
107d07f36b0SJoseph Koshy 	} else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) {
108d07f36b0SJoseph Koshy 		sp += sizeof(uintptr_t);
109d07f36b0SJoseph Koshy 		if (copyin((void *) sp, &pc, sizeof(pc)) != 0)
110d07f36b0SJoseph Koshy 			return (n);
111d07f36b0SJoseph Koshy 	} else if (copyin((void *) r, &pc, sizeof(pc)) != 0 ||
1121a12d24bSEd Maste 	    copyin((void *) fp, &fp, sizeof(fp)) != 0)
113d07f36b0SJoseph Koshy 		return (n);
114d07f36b0SJoseph Koshy 
115d07f36b0SJoseph Koshy 	for (; n < nframes;) {
116d07f36b0SJoseph Koshy 		if (pc == 0 || !PMC_IN_USERSPACE(pc))
117d07f36b0SJoseph Koshy 			break;
118d07f36b0SJoseph Koshy 
119d07f36b0SJoseph Koshy 		*cc++ = pc; n++;
120d07f36b0SJoseph Koshy 
121d07f36b0SJoseph Koshy 		if (fp < oldfp)
122d07f36b0SJoseph Koshy 			break;
123d07f36b0SJoseph Koshy 
124d07f36b0SJoseph Koshy 		r = fp + sizeof(uintptr_t); /* address of return address */
125d07f36b0SJoseph Koshy 		oldfp = fp;
126d07f36b0SJoseph Koshy 
127d07f36b0SJoseph Koshy 		if (copyin((void *) r, &pc, sizeof(pc)) != 0 ||
128d07f36b0SJoseph Koshy 		    copyin((void *) fp, &fp, sizeof(fp)) != 0)
129d07f36b0SJoseph Koshy 			break;
130d07f36b0SJoseph Koshy 	}
131d07f36b0SJoseph Koshy 
132d07f36b0SJoseph Koshy 	return (n);
133d07f36b0SJoseph Koshy }
134d07f36b0SJoseph Koshy 
135d07f36b0SJoseph Koshy /*
136d07f36b0SJoseph Koshy  * Walking the kernel call stack.
137d07f36b0SJoseph Koshy  *
138d07f36b0SJoseph Koshy  * We are handed the trap frame laid down at the time the PMC
139d07f36b0SJoseph Koshy  * interrupt was taken.  The saved PC could be:
140d07f36b0SJoseph Koshy  * a. in the lowlevel trap handler, meaning that there isn't a C stack
141d07f36b0SJoseph Koshy  *    to traverse,
142d07f36b0SJoseph Koshy  * b. at the beginning of a function before the stack frame is laid
143d07f36b0SJoseph Koshy  *    down,
144d07f36b0SJoseph Koshy  * c. just before a 'ret', after the stack frame has been taken off,
145d07f36b0SJoseph Koshy  * d. somewhere else in a function with a valid stack frame being
146d07f36b0SJoseph Koshy  *    present.
147d07f36b0SJoseph Koshy  *
148d07f36b0SJoseph Koshy  * In case (d), the previous frame pointer is at [%ebp]/[%rbp] and
149d07f36b0SJoseph Koshy  * the return address is at [%ebp+4]/[%rbp+8].
150d07f36b0SJoseph Koshy  *
151d07f36b0SJoseph Koshy  * For cases (b) and (c), the return address is at [%esp]/[%rsp] and
152d07f36b0SJoseph Koshy  * the frame pointer doesn't need to be changed when going up one
153d07f36b0SJoseph Koshy  * level in the stack.
154d07f36b0SJoseph Koshy  *
155d07f36b0SJoseph Koshy  * For case (a), we check if the PC lies in low-level trap handling
156d07f36b0SJoseph Koshy  * code, and if so we terminate our trace.
157d07f36b0SJoseph Koshy  */
158d07f36b0SJoseph Koshy 
159*f95e683fSMark Johnston int __nosanitizeaddress __nosanitizememory
160d07f36b0SJoseph Koshy pmc_save_kernel_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
161d07f36b0SJoseph Koshy {
162d07f36b0SJoseph Koshy 	int n;
163d07f36b0SJoseph Koshy 	uint32_t instr;
164d07f36b0SJoseph Koshy 	uintptr_t fp, pc, r, sp, stackstart, stackend;
165d07f36b0SJoseph Koshy 	struct thread *td;
166d07f36b0SJoseph Koshy 
167d07f36b0SJoseph Koshy 	KASSERT(TRAPF_USERMODE(tf) == 0,("[x86,%d] not a kernel backtrace",
168d07f36b0SJoseph Koshy 	    __LINE__));
169d07f36b0SJoseph Koshy 
1700a15e5d3SAttilio Rao 	td = curthread;
171d07f36b0SJoseph Koshy 	pc = PMC_TRAPFRAME_TO_PC(tf);
172d07f36b0SJoseph Koshy 	fp = PMC_TRAPFRAME_TO_FP(tf);
173d0d0192fSJoseph Koshy 	sp = PMC_TRAPFRAME_TO_KERNEL_SP(tf);
174d07f36b0SJoseph Koshy 
175d07f36b0SJoseph Koshy 	*cc++ = pc;
176d07f36b0SJoseph Koshy 	r = fp + sizeof(uintptr_t); /* points to return address */
177d07f36b0SJoseph Koshy 
178d07f36b0SJoseph Koshy 	if (nframes <= 1)
179d07f36b0SJoseph Koshy 		return (1);
180d07f36b0SJoseph Koshy 
181d07f36b0SJoseph Koshy 	stackstart = (uintptr_t) td->td_kstack;
182d07f36b0SJoseph Koshy 	stackend = (uintptr_t) td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
183d07f36b0SJoseph Koshy 
184d07f36b0SJoseph Koshy 	if (PMC_IN_TRAP_HANDLER(pc) ||
185c66e06a2SJoseph Koshy 	    !PMC_IN_KERNEL(pc) ||
186c66e06a2SJoseph Koshy 	    !PMC_IN_KERNEL_STACK(r, stackstart, stackend) ||
187d07f36b0SJoseph Koshy 	    !PMC_IN_KERNEL_STACK(sp, stackstart, stackend) ||
188d07f36b0SJoseph Koshy 	    !PMC_IN_KERNEL_STACK(fp, stackstart, stackend))
189d07f36b0SJoseph Koshy 		return (1);
190d07f36b0SJoseph Koshy 
191d07f36b0SJoseph Koshy 	instr = *(uint32_t *) pc;
192d07f36b0SJoseph Koshy 
193d07f36b0SJoseph Koshy 	/*
194d07f36b0SJoseph Koshy 	 * Determine whether the interrupted function was in the
195d07f36b0SJoseph Koshy 	 * processing of either laying down its stack frame or taking
196d07f36b0SJoseph Koshy 	 * it off.
197d07f36b0SJoseph Koshy 	 *
198d07f36b0SJoseph Koshy 	 * If we haven't started laying down a stack frame, or are
199d07f36b0SJoseph Koshy 	 * just about to return, then our caller's address is at
200d07f36b0SJoseph Koshy 	 * *sp, and we don't have a frame to unwind.
201d07f36b0SJoseph Koshy 	 */
202d07f36b0SJoseph Koshy 	if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) ||
203d07f36b0SJoseph Koshy 	    PMC_AT_FUNCTION_EPILOGUE_RET(instr))
204d07f36b0SJoseph Koshy 		pc = *(uintptr_t *) sp;
205d07f36b0SJoseph Koshy 	else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) {
206d07f36b0SJoseph Koshy 		/*
207d07f36b0SJoseph Koshy 		 * The code was midway through laying down a frame.
208d07f36b0SJoseph Koshy 		 * At this point sp[0] has a frame back pointer,
209d07f36b0SJoseph Koshy 		 * and the caller's address is therefore at sp[1].
210d07f36b0SJoseph Koshy 		 */
211d07f36b0SJoseph Koshy 		sp += sizeof(uintptr_t);
212d07f36b0SJoseph Koshy 		if (!PMC_IN_KERNEL_STACK(sp, stackstart, stackend))
213d07f36b0SJoseph Koshy 			return (1);
214d07f36b0SJoseph Koshy 		pc = *(uintptr_t *) sp;
215d07f36b0SJoseph Koshy 	} else {
216d07f36b0SJoseph Koshy 		/*
217d07f36b0SJoseph Koshy 		 * Not in the function prologue or epilogue.
218d07f36b0SJoseph Koshy 		 */
219d07f36b0SJoseph Koshy 		pc = *(uintptr_t *) r;
220d07f36b0SJoseph Koshy 		fp = *(uintptr_t *) fp;
221d07f36b0SJoseph Koshy 	}
222d07f36b0SJoseph Koshy 
223d07f36b0SJoseph Koshy 	for (n = 1; n < nframes; n++) {
224d07f36b0SJoseph Koshy 		*cc++ = pc;
225d07f36b0SJoseph Koshy 
226d07f36b0SJoseph Koshy 		if (PMC_IN_TRAP_HANDLER(pc))
227d07f36b0SJoseph Koshy 			break;
228d07f36b0SJoseph Koshy 
229d07f36b0SJoseph Koshy 		r = fp + sizeof(uintptr_t);
230d07f36b0SJoseph Koshy 		if (!PMC_IN_KERNEL_STACK(fp, stackstart, stackend) ||
231c66e06a2SJoseph Koshy 		    !PMC_IN_KERNEL_STACK(r, stackstart, stackend))
232d07f36b0SJoseph Koshy 			break;
233d07f36b0SJoseph Koshy 		pc = *(uintptr_t *) r;
234d07f36b0SJoseph Koshy 		fp = *(uintptr_t *) fp;
235d07f36b0SJoseph Koshy 	}
236d07f36b0SJoseph Koshy 
237d07f36b0SJoseph Koshy 	return (n);
238d07f36b0SJoseph Koshy }
239f263522aSJoseph Koshy 
240f263522aSJoseph Koshy /*
241f263522aSJoseph Koshy  * Machine dependent initialization for x86 class platforms.
242f263522aSJoseph Koshy  */
243f263522aSJoseph Koshy 
244f263522aSJoseph Koshy struct pmc_mdep *
245f263522aSJoseph Koshy pmc_md_initialize()
246f263522aSJoseph Koshy {
247e753fde4SJoseph Koshy 	int i;
248e753fde4SJoseph Koshy 	struct pmc_mdep *md;
249e753fde4SJoseph Koshy 
250f263522aSJoseph Koshy 	/* determine the CPU kind */
25153071ed1SKonstantin Belousov 	if (cpu_vendor_id == CPU_VENDOR_AMD ||
25253071ed1SKonstantin Belousov 	    cpu_vendor_id == CPU_VENDOR_HYGON)
253e753fde4SJoseph Koshy 		md = pmc_amd_initialize();
2545113aa0aSJung-uk Kim 	else if (cpu_vendor_id == CPU_VENDOR_INTEL)
255e753fde4SJoseph Koshy 		md = pmc_intel_initialize();
256e829eb6dSJoseph Koshy 	else
25721157ad3SJohn Baldwin 		return (NULL);
258e753fde4SJoseph Koshy 
259e753fde4SJoseph Koshy 	/* disallow sampling if we do not have an LAPIC */
260158c4475SEd Maste 	if (md != NULL && !lapic_enable_pmc())
261f5f9340bSFabien Thomas 		for (i = 0; i < md->pmd_nclass; i++) {
262f5f9340bSFabien Thomas 			if (i == PMC_CLASS_INDEX_SOFT)
263f5f9340bSFabien Thomas 				continue;
264e829eb6dSJoseph Koshy 			md->pmd_classdep[i].pcd_caps &= ~PMC_CAP_INTERRUPT;
265f5f9340bSFabien Thomas 		}
266e753fde4SJoseph Koshy 
267e829eb6dSJoseph Koshy 	return (md);
268e829eb6dSJoseph Koshy }
269e829eb6dSJoseph Koshy 
270e829eb6dSJoseph Koshy void
271e829eb6dSJoseph Koshy pmc_md_finalize(struct pmc_mdep *md)
272e829eb6dSJoseph Koshy {
27321157ad3SJohn Baldwin 
27421157ad3SJohn Baldwin 	lapic_disable_pmc();
27553071ed1SKonstantin Belousov 	if (cpu_vendor_id == CPU_VENDOR_AMD ||
27653071ed1SKonstantin Belousov 	    cpu_vendor_id == CPU_VENDOR_HYGON)
277e829eb6dSJoseph Koshy 		pmc_amd_finalize(md);
2785113aa0aSJung-uk Kim 	else if (cpu_vendor_id == CPU_VENDOR_INTEL)
279e829eb6dSJoseph Koshy 		pmc_intel_finalize(md);
280e829eb6dSJoseph Koshy 	else
281e829eb6dSJoseph Koshy 		KASSERT(0, ("[x86,%d] Unknown vendor", __LINE__));
282f263522aSJoseph Koshy }
283