xref: /freebsd/sys/dev/hwpmc/hwpmc_arm64_md.c (revision 6137b5f7b8c183ee8806d79b3f1d8e5e3ddb3df3)
1 /*-
2  * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by the University of Cambridge Computer
6  * Laboratory with support from ARM Ltd.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/pmc.h>
33 
34 #include <vm/vm.h>
35 #include <vm/pmap.h>
36 
37 #include <machine/cpu.h>
38 #include <machine/md_var.h>
39 #include <machine/pmc_mdep.h>
40 #include <machine/stack.h>
41 #include <machine/vmparam.h>
42 
43 struct pmc_mdep *
44 pmc_md_initialize(void)
45 {
46 
47 	return (pmc_arm64_initialize());
48 }
49 
50 void
51 pmc_md_finalize(struct pmc_mdep *md)
52 {
53 
54 	pmc_arm64_finalize(md);
55 }
56 
57 int
58 pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples, struct trapframe *tf)
59 {
60 	struct unwind_state frame;
61 	int count;
62 
63 	KASSERT(TRAPF_USERMODE(tf) == 0,("[arm64,%d] not a kernel backtrace",
64 	    __LINE__));
65 
66 	frame.pc = PMC_TRAPFRAME_TO_PC(tf);
67 	*cc++ = frame.pc;
68 
69 	if (maxsamples <= 1)
70 		return (1);
71 
72 	frame.fp = PMC_TRAPFRAME_TO_FP(tf);
73 	if (!PMC_IN_KERNEL(frame.pc) || !PMC_IN_KERNEL_STACK(frame.fp))
74 		return (1);
75 
76 	for (count = 1; count < maxsamples; count++) {
77 		if (!unwind_frame(curthread, &frame))
78 			break;
79 		if (!PMC_IN_KERNEL(frame.pc))
80 			break;
81 		*cc++ = frame.pc;
82 	}
83 
84 	return (count);
85 }
86 
87 int
88 pmc_save_user_callchain(uintptr_t *cc, int maxsamples,
89     struct trapframe *tf)
90 {
91 	uintptr_t pc, r, oldfp, fp;
92 	int count;
93 
94 	KASSERT(TRAPF_USERMODE(tf), ("[arm64,%d] Not a user trap frame tf=%p",
95 	    __LINE__, (void *) tf));
96 
97 	pc = PMC_TRAPFRAME_TO_PC(tf);
98 	*cc++ = pc;
99 
100 	if (maxsamples <= 1)
101 		return (1);
102 
103 	oldfp = fp = PMC_TRAPFRAME_TO_FP(tf);
104 
105 	if (!PMC_IN_USERSPACE(pc) ||
106 	    !PMC_IN_USERSPACE(fp))
107 		return (1);
108 
109 	for (count = 1; count < maxsamples; count++) {
110 		/* Use saved lr as pc. */
111 		r = fp + sizeof(uintptr_t);
112 		if (copyin((void *)r, &pc, sizeof(pc)) != 0)
113 			break;
114 		if (!PMC_IN_USERSPACE(pc))
115 			break;
116 
117 		*cc++ = pc;
118 
119 		/* Switch to next frame up */
120 		oldfp = fp;
121 		r = fp;
122 		if (copyin((void *)r, &fp, sizeof(fp)) != 0)
123 			break;
124 		if (fp < oldfp || !PMC_IN_USERSPACE(fp))
125 			break;
126 	}
127 
128 	return (count);
129 }
130