xref: /freebsd/lib/libc/gmon/mcount.c (revision 2008043f386721d58158e37e0d7e50df8095942d)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1983, 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #if !defined(_KERNEL) && defined(LIBC_SCCS) && !defined(lint)
33 static char sccsid[] = "@(#)mcount.c	8.1 (Berkeley) 6/4/93";
34 #endif
35 #include <sys/param.h>
36 #include <sys/gmon.h>
37 #ifdef _KERNEL
38 #include <sys/systm.h>
39 #include <vm/vm.h>
40 #include <vm/vm_param.h>
41 #include <vm/pmap.h>
42 void	bintr(void);
43 void	btrap(void);
44 void	eintr(void);
45 void	user(void);
46 #endif
47 #include <machine/atomic.h>
48 
49 /*
50  * mcount is called on entry to each function compiled with the profiling
51  * switch set.  _mcount(), which is declared in a machine-dependent way
52  * with _MCOUNT_DECL, does the actual work and is either inlined into a
53  * C routine or called by an assembly stub.  In any case, this magic is
54  * taken care of by the MCOUNT definition in <machine/profile.h>.
55  *
56  * _mcount updates data structures that represent traversals of the
57  * program's call graph edges.  frompc and selfpc are the return
58  * address and function address that represents the given call graph edge.
59  *
60  * Note: the original BSD code used the same variable (frompcindex) for
61  * both frompcindex and frompc.  Any reasonable, modern compiler will
62  * perform this optimization.
63  */
64 /* _mcount; may be static, inline, etc */
65 _MCOUNT_DECL(uintfptr_t frompc, uintfptr_t selfpc)
66 {
67 #ifdef GUPROF
68 	u_int delta;
69 #endif
70 	fptrdiff_t frompci;
71 	u_short *frompcindex;
72 	struct tostruct *top, *prevtop;
73 	struct gmonparam *p;
74 	long toindex;
75 #ifdef _KERNEL
76 	MCOUNT_DECL(s)
77 #endif
78 
79 	p = &_gmonparam;
80 #ifndef GUPROF			/* XXX */
81 	/*
82 	 * check that we are profiling
83 	 * and that we aren't recursively invoked.
84 	 */
85 	if (p->state != GMON_PROF_ON)
86 		return;
87 #endif
88 #ifdef _KERNEL
89 	MCOUNT_ENTER(s);
90 #else
91 	if (!atomic_cmpset_acq_int(&p->state, GMON_PROF_ON, GMON_PROF_BUSY))
92 		return;
93 #endif
94 	frompci = frompc - p->lowpc;
95 
96 #ifdef _KERNEL
97 	/*
98 	 * When we are called from an exception handler, frompci may be
99 	 * for a user address.  Convert such frompci's to the index of
100 	 * user() to merge all user counts.
101 	 */
102 	if (frompci >= p->textsize) {
103 		if (frompci + p->lowpc
104 		    >= (uintfptr_t)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE))
105 			goto done;
106 		frompci = (uintfptr_t)user - p->lowpc;
107 		if (frompci >= p->textsize)
108 		    goto done;
109 	}
110 #endif
111 
112 #ifdef GUPROF
113 	if (p->state != GMON_PROF_HIRES)
114 		goto skip_guprof_stuff;
115 	/*
116 	 * Look at the clock and add the count of clock cycles since the
117 	 * clock was last looked at to a counter for frompc.  This
118 	 * solidifies the count for the function containing frompc and
119 	 * effectively starts another clock for the current function.
120 	 * The count for the new clock will be solidified when another
121 	 * function call is made or the function returns.
122 	 *
123 	 * We use the usual sampling counters since they can be located
124 	 * efficiently.  4-byte counters are usually necessary.
125 	 *
126 	 * There are many complications for subtracting the profiling
127 	 * overheads from the counts for normal functions and adding
128 	 * them to the counts for mcount(), mexitcount() and cputime().
129 	 * We attempt to handle fractional cycles, but the overheads
130 	 * are usually underestimated because they are calibrated for
131 	 * a simpler than usual setup.
132 	 */
133 	delta = cputime() - p->mcount_overhead;
134 	p->cputime_overhead_resid += p->cputime_overhead_frac;
135 	p->mcount_overhead_resid += p->mcount_overhead_frac;
136 	if ((int)delta < 0)
137 		*p->mcount_count += delta + p->mcount_overhead
138 				    - p->cputime_overhead;
139 	else if (delta != 0) {
140 		if (p->cputime_overhead_resid >= CALIB_SCALE) {
141 			p->cputime_overhead_resid -= CALIB_SCALE;
142 			++*p->cputime_count;
143 			--delta;
144 		}
145 		if (delta != 0) {
146 			if (p->mcount_overhead_resid >= CALIB_SCALE) {
147 				p->mcount_overhead_resid -= CALIB_SCALE;
148 				++*p->mcount_count;
149 				--delta;
150 			}
151 			KCOUNT(p, frompci) += delta;
152 		}
153 		*p->mcount_count += p->mcount_overhead_sub;
154 	}
155 	*p->cputime_count += p->cputime_overhead;
156 skip_guprof_stuff:
157 #endif /* GUPROF */
158 
159 #ifdef _KERNEL
160 	/*
161 	 * When we are called from an exception handler, frompc is faked
162 	 * to be for where the exception occurred.  We've just solidified
163 	 * the count for there.  Now convert frompci to the index of btrap()
164 	 * for trap handlers and bintr() for interrupt handlers to make
165 	 * exceptions appear in the call graph as calls from btrap() and
166 	 * bintr() instead of calls from all over.
167 	 */
168 	if ((uintfptr_t)selfpc >= (uintfptr_t)btrap
169 	    && (uintfptr_t)selfpc < (uintfptr_t)eintr) {
170 		if ((uintfptr_t)selfpc >= (uintfptr_t)bintr)
171 			frompci = (uintfptr_t)bintr - p->lowpc;
172 		else
173 			frompci = (uintfptr_t)btrap - p->lowpc;
174 	}
175 #endif
176 
177 	/*
178 	 * check that frompc is a reasonable pc value.
179 	 * for example:	signal catchers get called from the stack,
180 	 *		not from text space.  too bad.
181 	 */
182 	if (frompci >= p->textsize)
183 		goto done;
184 
185 	frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
186 	toindex = *frompcindex;
187 	if (toindex == 0) {
188 		/*
189 		 *	first time traversing this arc
190 		 */
191 		toindex = ++p->tos[0].link;
192 		if (toindex >= p->tolimit)
193 			/* halt further profiling */
194 			goto overflow;
195 
196 		*frompcindex = toindex;
197 		top = &p->tos[toindex];
198 		top->selfpc = selfpc;
199 		top->count = 1;
200 		top->link = 0;
201 		goto done;
202 	}
203 	top = &p->tos[toindex];
204 	if (top->selfpc == selfpc) {
205 		/*
206 		 * arc at front of chain; usual case.
207 		 */
208 		top->count++;
209 		goto done;
210 	}
211 	/*
212 	 * have to go looking down chain for it.
213 	 * top points to what we are looking at,
214 	 * prevtop points to previous top.
215 	 * we know it is not at the head of the chain.
216 	 */
217 	for (; /* goto done */; ) {
218 		if (top->link == 0) {
219 			/*
220 			 * top is end of the chain and none of the chain
221 			 * had top->selfpc == selfpc.
222 			 * so we allocate a new tostruct
223 			 * and link it to the head of the chain.
224 			 */
225 			toindex = ++p->tos[0].link;
226 			if (toindex >= p->tolimit)
227 				goto overflow;
228 
229 			top = &p->tos[toindex];
230 			top->selfpc = selfpc;
231 			top->count = 1;
232 			top->link = *frompcindex;
233 			*frompcindex = toindex;
234 			goto done;
235 		}
236 		/*
237 		 * otherwise, check the next arc on the chain.
238 		 */
239 		prevtop = top;
240 		top = &p->tos[top->link];
241 		if (top->selfpc == selfpc) {
242 			/*
243 			 * there it is.
244 			 * increment its count
245 			 * move it to the head of the chain.
246 			 */
247 			top->count++;
248 			toindex = prevtop->link;
249 			prevtop->link = top->link;
250 			top->link = *frompcindex;
251 			*frompcindex = toindex;
252 			goto done;
253 		}
254 
255 	}
256 done:
257 #ifdef _KERNEL
258 	MCOUNT_EXIT(s);
259 #else
260 	atomic_store_rel_int(&p->state, GMON_PROF_ON);
261 #endif
262 	return;
263 overflow:
264 	atomic_store_rel_int(&p->state, GMON_PROF_ERROR);
265 #ifdef _KERNEL
266 	MCOUNT_EXIT(s);
267 #endif
268 	return;
269 }
270 
271 /*
272  * Actual definition of mcount function.  Defined in <machine/profile.h>,
273  * which is included by <sys/gmon.h>.
274  */
275 MCOUNT
276 
277 #ifdef GUPROF
278 void
279 mexitcount(uintfptr_t selfpc)
280 {
281 	struct gmonparam *p;
282 	uintfptr_t selfpcdiff;
283 
284 	p = &_gmonparam;
285 	selfpcdiff = selfpc - (uintfptr_t)p->lowpc;
286 	if (selfpcdiff < p->textsize) {
287 		u_int delta;
288 
289 		/*
290 		 * Solidify the count for the current function.
291 		 */
292 		delta = cputime() - p->mexitcount_overhead;
293 		p->cputime_overhead_resid += p->cputime_overhead_frac;
294 		p->mexitcount_overhead_resid += p->mexitcount_overhead_frac;
295 		if ((int)delta < 0)
296 			*p->mexitcount_count += delta + p->mexitcount_overhead
297 						- p->cputime_overhead;
298 		else if (delta != 0) {
299 			if (p->cputime_overhead_resid >= CALIB_SCALE) {
300 				p->cputime_overhead_resid -= CALIB_SCALE;
301 				++*p->cputime_count;
302 				--delta;
303 			}
304 			if (delta != 0) {
305 				if (p->mexitcount_overhead_resid
306 				    >= CALIB_SCALE) {
307 					p->mexitcount_overhead_resid
308 					    -= CALIB_SCALE;
309 					++*p->mexitcount_count;
310 					--delta;
311 				}
312 				KCOUNT(p, selfpcdiff) += delta;
313 			}
314 			*p->mexitcount_count += p->mexitcount_overhead_sub;
315 		}
316 		*p->cputime_count += p->cputime_overhead;
317 	}
318 }
319 #endif /* GUPROF */
320