xref: /freebsd/lib/libc/gmon/mcount.c (revision 2e480d34aaabcd405e72ff80866451e8c3b08c08)
1 /*-
2  * Copyright (c) 1983, 1992, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #if !defined(lint) && !defined(KERNEL) && defined(LIBC_SCCS)
35 #if 0
36 static char sccsid[] = "@(#)mcount.c	8.1 (Berkeley) 6/4/93";
37 #endif
38 static const char rcsid[] =
39 	"$Id: mcount.c,v 1.10 1998/03/09 04:42:19 jb Exp $";
40 #endif
41 
42 #ifndef __NETBSD_SYSCALLS
43 
44 #ifdef KERNEL
45 #include <sys/param.h>
46 #else
47 /* Kludge to get fptrint_t declared. */
48 #define	KERNEL
49 #include <sys/param.h>
50 #undef KERNEL
51 #endif
52 
53 #include <sys/gmon.h>
54 #ifdef KERNEL
55 #include <sys/systm.h>
56 #include <vm/vm.h>
57 #include <vm/vm_param.h>
58 #include <vm/pmap.h>
59 void	bintr __P((void));
60 void	btrap __P((void));
61 void	eintr __P((void));
62 void	user __P((void));
63 #endif
64 
65 /*
66  * mcount is called on entry to each function compiled with the profiling
67  * switch set.  _mcount(), which is declared in a machine-dependent way
68  * with _MCOUNT_DECL, does the actual work and is either inlined into a
69  * C routine or called by an assembly stub.  In any case, this magic is
70  * taken care of by the MCOUNT definition in <machine/profile.h>.
71  *
72  * _mcount updates data structures that represent traversals of the
73  * program's call graph edges.  frompc and selfpc are the return
74  * address and function address that represents the given call graph edge.
75  *
76  * Note: the original BSD code used the same variable (frompcindex) for
77  * both frompcindex and frompc.  Any reasonable, modern compiler will
78  * perform this optimization.
79  */
80 _MCOUNT_DECL(frompc, selfpc)	/* _mcount; may be static, inline, etc */
81 	register fptrint_t frompc, selfpc;
82 {
83 #ifdef GUPROF
84 	u_int delta;
85 #endif
86 	register fptrdiff_t frompci;
87 	register u_short *frompcindex;
88 	register struct tostruct *top, *prevtop;
89 	register struct gmonparam *p;
90 	register long toindex;
91 #ifdef KERNEL
92 	MCOUNT_DECL(s)
93 #endif
94 
95 	p = &_gmonparam;
96 #ifndef GUPROF			/* XXX */
97 	/*
98 	 * check that we are profiling
99 	 * and that we aren't recursively invoked.
100 	 */
101 	if (p->state != GMON_PROF_ON)
102 		return;
103 #endif
104 #ifdef KERNEL
105 	MCOUNT_ENTER(s);
106 #else
107 	p->state = GMON_PROF_BUSY;
108 #endif
109 	frompci = frompc - p->lowpc;
110 
111 #ifdef KERNEL
112 	/*
113 	 * When we are called from an exception handler, frompci may be
114 	 * for a user address.  Convert such frompci's to the index of
115 	 * user() to merge all user counts.
116 	 */
117 	if (frompci >= p->textsize) {
118 		if (frompci + p->lowpc
119 		    >= (fptrint_t)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE))
120 			goto done;
121 		frompci = (fptrint_t)user - p->lowpc;
122 		if (frompci >= p->textsize)
123 		    goto done;
124 	}
125 #endif /* KERNEL */
126 
127 #ifdef GUPROF
128 	if (p->state != GMON_PROF_HIRES)
129 		goto skip_guprof_stuff;
130 	/*
131 	 * Look at the clock and add the count of clock cycles since the
132 	 * clock was last looked at to a counter for frompc.  This
133 	 * solidifies the count for the function containing frompc and
134 	 * effectively starts another clock for the current function.
135 	 * The count for the new clock will be solidified when another
136 	 * function call is made or the function returns.
137 	 *
138 	 * We use the usual sampling counters since they can be located
139 	 * efficiently.  4-byte counters are usually necessary.
140 	 *
141 	 * There are many complications for subtracting the profiling
142 	 * overheads from the counts for normal functions and adding
143 	 * them to the counts for mcount(), mexitcount() and cputime().
144 	 * We attempt to handle fractional cycles, but the overheads
145 	 * are usually underestimated because they are calibrated for
146 	 * a simpler than usual setup.
147 	 */
148 	delta = cputime() - p->mcount_overhead;
149 	p->cputime_overhead_resid += p->cputime_overhead_frac;
150 	p->mcount_overhead_resid += p->mcount_overhead_frac;
151 	if ((int)delta < 0)
152 		*p->mcount_count += delta + p->mcount_overhead
153 				    - p->cputime_overhead;
154 	else if (delta != 0) {
155 		if (p->cputime_overhead_resid >= CALIB_SCALE) {
156 			p->cputime_overhead_resid -= CALIB_SCALE;
157 			++*p->cputime_count;
158 			--delta;
159 		}
160 		if (delta != 0) {
161 			if (p->mcount_overhead_resid >= CALIB_SCALE) {
162 				p->mcount_overhead_resid -= CALIB_SCALE;
163 				++*p->mcount_count;
164 				--delta;
165 			}
166 			KCOUNT(p, frompci) += delta;
167 		}
168 		*p->mcount_count += p->mcount_overhead_sub;
169 	}
170 	*p->cputime_count += p->cputime_overhead;
171 skip_guprof_stuff:
172 #endif /* GUPROF */
173 
174 #ifdef KERNEL
175 	/*
176 	 * When we are called from an exception handler, frompc is faked
177 	 * to be for where the exception occurred.  We've just solidified
178 	 * the count for there.  Now convert frompci to the index of btrap()
179 	 * for trap handlers and bintr() for interrupt handlers to make
180 	 * exceptions appear in the call graph as calls from btrap() and
181 	 * bintr() instead of calls from all over.
182 	 */
183 	if ((fptrint_t)selfpc >= (fptrint_t)btrap
184 	    && (fptrint_t)selfpc < (fptrint_t)eintr) {
185 		if ((fptrint_t)selfpc >= (fptrint_t)bintr)
186 			frompci = (fptrint_t)bintr - p->lowpc;
187 		else
188 			frompci = (fptrint_t)btrap - p->lowpc;
189 	}
190 #endif /* KERNEL */
191 
192 	/*
193 	 * check that frompc is a reasonable pc value.
194 	 * for example:	signal catchers get called from the stack,
195 	 *		not from text space.  too bad.
196 	 */
197 	if (frompci >= p->textsize)
198 		goto done;
199 
200 	frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
201 	toindex = *frompcindex;
202 	if (toindex == 0) {
203 		/*
204 		 *	first time traversing this arc
205 		 */
206 		toindex = ++p->tos[0].link;
207 		if (toindex >= p->tolimit)
208 			/* halt further profiling */
209 			goto overflow;
210 
211 		*frompcindex = toindex;
212 		top = &p->tos[toindex];
213 		top->selfpc = selfpc;
214 		top->count = 1;
215 		top->link = 0;
216 		goto done;
217 	}
218 	top = &p->tos[toindex];
219 	if (top->selfpc == selfpc) {
220 		/*
221 		 * arc at front of chain; usual case.
222 		 */
223 		top->count++;
224 		goto done;
225 	}
226 	/*
227 	 * have to go looking down chain for it.
228 	 * top points to what we are looking at,
229 	 * prevtop points to previous top.
230 	 * we know it is not at the head of the chain.
231 	 */
232 	for (; /* goto done */; ) {
233 		if (top->link == 0) {
234 			/*
235 			 * top is end of the chain and none of the chain
236 			 * had top->selfpc == selfpc.
237 			 * so we allocate a new tostruct
238 			 * and link it to the head of the chain.
239 			 */
240 			toindex = ++p->tos[0].link;
241 			if (toindex >= p->tolimit)
242 				goto overflow;
243 
244 			top = &p->tos[toindex];
245 			top->selfpc = selfpc;
246 			top->count = 1;
247 			top->link = *frompcindex;
248 			*frompcindex = toindex;
249 			goto done;
250 		}
251 		/*
252 		 * otherwise, check the next arc on the chain.
253 		 */
254 		prevtop = top;
255 		top = &p->tos[top->link];
256 		if (top->selfpc == selfpc) {
257 			/*
258 			 * there it is.
259 			 * increment its count
260 			 * move it to the head of the chain.
261 			 */
262 			top->count++;
263 			toindex = prevtop->link;
264 			prevtop->link = top->link;
265 			top->link = *frompcindex;
266 			*frompcindex = toindex;
267 			goto done;
268 		}
269 
270 	}
271 done:
272 #ifdef KERNEL
273 	MCOUNT_EXIT(s);
274 #else
275 	p->state = GMON_PROF_ON;
276 #endif
277 	return;
278 overflow:
279 	p->state = GMON_PROF_ERROR;
280 #ifdef KERNEL
281 	MCOUNT_EXIT(s);
282 #endif
283 	return;
284 }
285 
286 /*
287  * Actual definition of mcount function.  Defined in <machine/profile.h>,
288  * which is included by <sys/gmon.h>.
289  */
290 MCOUNT
291 
292 #ifdef GUPROF
293 void
294 mexitcount(selfpc)
295 	fptrint_t selfpc;
296 {
297 	struct gmonparam *p;
298 	fptrint_t selfpcdiff;
299 
300 	p = &_gmonparam;
301 	selfpcdiff = selfpc - (fptrint_t)p->lowpc;
302 	if (selfpcdiff < p->textsize) {
303 		u_int delta;
304 
305 		/*
306 		 * Solidify the count for the current function.
307 		 */
308 		delta = cputime() - p->mexitcount_overhead;
309 		p->cputime_overhead_resid += p->cputime_overhead_frac;
310 		p->mexitcount_overhead_resid += p->mexitcount_overhead_frac;
311 		if ((int)delta < 0)
312 			*p->mexitcount_count += delta + p->mexitcount_overhead
313 						- p->cputime_overhead;
314 		else if (delta != 0) {
315 			if (p->cputime_overhead_resid >= CALIB_SCALE) {
316 				p->cputime_overhead_resid -= CALIB_SCALE;
317 				++*p->cputime_count;
318 				--delta;
319 			}
320 			if (delta != 0) {
321 				if (p->mexitcount_overhead_resid
322 				    >= CALIB_SCALE) {
323 					p->mexitcount_overhead_resid
324 					    -= CALIB_SCALE;
325 					++*p->mexitcount_count;
326 					--delta;
327 				}
328 				KCOUNT(p, selfpcdiff) += delta;
329 			}
330 			*p->mexitcount_count += p->mexitcount_overhead_sub;
331 		}
332 		*p->cputime_count += p->cputime_overhead;
333 	}
334 }
335 #endif /* GUPROF */
336 
337 #endif /* !__NETBSD_SYSCALLS */
338