158f0484fSRodney W. Grimes /*-
28a16b7a1SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause
38a16b7a1SPedro F. Giffuni *
458f0484fSRodney W. Grimes * Copyright (c) 1983, 1992, 1993
558f0484fSRodney W. Grimes * The Regents of the University of California. All rights reserved.
658f0484fSRodney W. Grimes *
758f0484fSRodney W. Grimes * Redistribution and use in source and binary forms, with or without
858f0484fSRodney W. Grimes * modification, are permitted provided that the following conditions
958f0484fSRodney W. Grimes * are met:
1058f0484fSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright
1158f0484fSRodney W. Grimes * notice, this list of conditions and the following disclaimer.
1258f0484fSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright
1358f0484fSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the
1458f0484fSRodney W. Grimes * documentation and/or other materials provided with the distribution.
15fbbd9655SWarner Losh * 3. Neither the name of the University nor the names of its contributors
1658f0484fSRodney W. Grimes * may be used to endorse or promote products derived from this software
1758f0484fSRodney W. Grimes * without specific prior written permission.
1858f0484fSRodney W. Grimes *
1958f0484fSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2058f0484fSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2158f0484fSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2258f0484fSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2358f0484fSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2458f0484fSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2558f0484fSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2658f0484fSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2758f0484fSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2858f0484fSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2958f0484fSRodney W. Grimes * SUCH DAMAGE.
3058f0484fSRodney W. Grimes */
3158f0484fSRodney W. Grimes
3258f0484fSRodney W. Grimes #include <sys/param.h>
3358f0484fSRodney W. Grimes #include <sys/gmon.h>
34c4473420SPeter Wemm #ifdef _KERNEL
35912e6037SBruce Evans #include <sys/systm.h>
36912e6037SBruce Evans #include <vm/vm.h>
37912e6037SBruce Evans #include <vm/vm_param.h>
38912e6037SBruce Evans #include <vm/pmap.h>
39a99d1013SDavid E. O'Brien void bintr(void);
40a99d1013SDavid E. O'Brien void btrap(void);
41a99d1013SDavid E. O'Brien void eintr(void);
42a99d1013SDavid E. O'Brien void user(void);
43691071ffSPaul Richards #endif
440fa2f943SDavid Xu #include <machine/atomic.h>
4558f0484fSRodney W. Grimes
4658f0484fSRodney W. Grimes /*
4758f0484fSRodney W. Grimes * mcount is called on entry to each function compiled with the profiling
4858f0484fSRodney W. Grimes * switch set. _mcount(), which is declared in a machine-dependent way
4958f0484fSRodney W. Grimes * with _MCOUNT_DECL, does the actual work and is either inlined into a
5058f0484fSRodney W. Grimes * C routine or called by an assembly stub. In any case, this magic is
5158f0484fSRodney W. Grimes * taken care of by the MCOUNT definition in <machine/profile.h>.
5258f0484fSRodney W. Grimes *
5358f0484fSRodney W. Grimes * _mcount updates data structures that represent traversals of the
5458f0484fSRodney W. Grimes * program's call graph edges. frompc and selfpc are the return
5558f0484fSRodney W. Grimes * address and function address that represents the given call graph edge.
5658f0484fSRodney W. Grimes *
5758f0484fSRodney W. Grimes * Note: the original BSD code used the same variable (frompcindex) for
5858f0484fSRodney W. Grimes * both frompcindex and frompc. Any reasonable, modern compiler will
5958f0484fSRodney W. Grimes * perform this optimization.
6058f0484fSRodney W. Grimes */
611d0342a3SJacques Vidrine /* _mcount; may be static, inline, etc */
_MCOUNT_DECL(uintfptr_t frompc,uintfptr_t selfpc)621d0342a3SJacques Vidrine _MCOUNT_DECL(uintfptr_t frompc, uintfptr_t selfpc)
6358f0484fSRodney W. Grimes {
64912e6037SBruce Evans #ifdef GUPROF
65912e6037SBruce Evans u_int delta;
66912e6037SBruce Evans #endif
67a99d1013SDavid E. O'Brien fptrdiff_t frompci;
68a99d1013SDavid E. O'Brien u_short *frompcindex;
69a99d1013SDavid E. O'Brien struct tostruct *top, *prevtop;
70a99d1013SDavid E. O'Brien struct gmonparam *p;
71a99d1013SDavid E. O'Brien long toindex;
72c4473420SPeter Wemm #ifdef _KERNEL
731f403fcfSBruce Evans MCOUNT_DECL(s)
7458f0484fSRodney W. Grimes #endif
7558f0484fSRodney W. Grimes
7658f0484fSRodney W. Grimes p = &_gmonparam;
77912e6037SBruce Evans #ifndef GUPROF /* XXX */
7858f0484fSRodney W. Grimes /*
7958f0484fSRodney W. Grimes * check that we are profiling
8058f0484fSRodney W. Grimes * and that we aren't recursively invoked.
8158f0484fSRodney W. Grimes */
8258f0484fSRodney W. Grimes if (p->state != GMON_PROF_ON)
8358f0484fSRodney W. Grimes return;
84912e6037SBruce Evans #endif
85c4473420SPeter Wemm #ifdef _KERNEL
861f403fcfSBruce Evans MCOUNT_ENTER(s);
8758f0484fSRodney W. Grimes #else
880fa2f943SDavid Xu if (!atomic_cmpset_acq_int(&p->state, GMON_PROF_ON, GMON_PROF_BUSY))
890fa2f943SDavid Xu return;
9058f0484fSRodney W. Grimes #endif
91912e6037SBruce Evans frompci = frompc - p->lowpc;
92912e6037SBruce Evans
93c4473420SPeter Wemm #ifdef _KERNEL
9458f0484fSRodney W. Grimes /*
95912e6037SBruce Evans * When we are called from an exception handler, frompci may be
96912e6037SBruce Evans * for a user address. Convert such frompci's to the index of
97912e6037SBruce Evans * user() to merge all user counts.
98912e6037SBruce Evans */
99912e6037SBruce Evans if (frompci >= p->textsize) {
100912e6037SBruce Evans if (frompci + p->lowpc
10137889b39SBruce Evans >= (uintfptr_t)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE))
102912e6037SBruce Evans goto done;
10337889b39SBruce Evans frompci = (uintfptr_t)user - p->lowpc;
104912e6037SBruce Evans if (frompci >= p->textsize)
105912e6037SBruce Evans goto done;
106912e6037SBruce Evans }
107c4473420SPeter Wemm #endif
108912e6037SBruce Evans
109912e6037SBruce Evans #ifdef GUPROF
110912e6037SBruce Evans if (p->state != GMON_PROF_HIRES)
111912e6037SBruce Evans goto skip_guprof_stuff;
112912e6037SBruce Evans /*
113912e6037SBruce Evans * Look at the clock and add the count of clock cycles since the
114912e6037SBruce Evans * clock was last looked at to a counter for frompc. This
115912e6037SBruce Evans * solidifies the count for the function containing frompc and
116912e6037SBruce Evans * effectively starts another clock for the current function.
117912e6037SBruce Evans * The count for the new clock will be solidified when another
118912e6037SBruce Evans * function call is made or the function returns.
119912e6037SBruce Evans *
120912e6037SBruce Evans * We use the usual sampling counters since they can be located
121912e6037SBruce Evans * efficiently. 4-byte counters are usually necessary.
122912e6037SBruce Evans *
123912e6037SBruce Evans * There are many complications for subtracting the profiling
124912e6037SBruce Evans * overheads from the counts for normal functions and adding
125912e6037SBruce Evans * them to the counts for mcount(), mexitcount() and cputime().
126912e6037SBruce Evans * We attempt to handle fractional cycles, but the overheads
127912e6037SBruce Evans * are usually underestimated because they are calibrated for
128912e6037SBruce Evans * a simpler than usual setup.
129912e6037SBruce Evans */
130912e6037SBruce Evans delta = cputime() - p->mcount_overhead;
131912e6037SBruce Evans p->cputime_overhead_resid += p->cputime_overhead_frac;
132912e6037SBruce Evans p->mcount_overhead_resid += p->mcount_overhead_frac;
133912e6037SBruce Evans if ((int)delta < 0)
134912e6037SBruce Evans *p->mcount_count += delta + p->mcount_overhead
135912e6037SBruce Evans - p->cputime_overhead;
136912e6037SBruce Evans else if (delta != 0) {
137912e6037SBruce Evans if (p->cputime_overhead_resid >= CALIB_SCALE) {
138912e6037SBruce Evans p->cputime_overhead_resid -= CALIB_SCALE;
139912e6037SBruce Evans ++*p->cputime_count;
140912e6037SBruce Evans --delta;
141912e6037SBruce Evans }
142912e6037SBruce Evans if (delta != 0) {
143912e6037SBruce Evans if (p->mcount_overhead_resid >= CALIB_SCALE) {
144912e6037SBruce Evans p->mcount_overhead_resid -= CALIB_SCALE;
145912e6037SBruce Evans ++*p->mcount_count;
146912e6037SBruce Evans --delta;
147912e6037SBruce Evans }
148912e6037SBruce Evans KCOUNT(p, frompci) += delta;
149912e6037SBruce Evans }
150912e6037SBruce Evans *p->mcount_count += p->mcount_overhead_sub;
151912e6037SBruce Evans }
152912e6037SBruce Evans *p->cputime_count += p->cputime_overhead;
153912e6037SBruce Evans skip_guprof_stuff:
154912e6037SBruce Evans #endif /* GUPROF */
155912e6037SBruce Evans
156c4473420SPeter Wemm #ifdef _KERNEL
157912e6037SBruce Evans /*
158912e6037SBruce Evans * When we are called from an exception handler, frompc is faked
159912e6037SBruce Evans * to be for where the exception occurred. We've just solidified
160912e6037SBruce Evans * the count for there. Now convert frompci to the index of btrap()
161912e6037SBruce Evans * for trap handlers and bintr() for interrupt handlers to make
162912e6037SBruce Evans * exceptions appear in the call graph as calls from btrap() and
163912e6037SBruce Evans * bintr() instead of calls from all over.
164912e6037SBruce Evans */
16537889b39SBruce Evans if ((uintfptr_t)selfpc >= (uintfptr_t)btrap
16637889b39SBruce Evans && (uintfptr_t)selfpc < (uintfptr_t)eintr) {
16737889b39SBruce Evans if ((uintfptr_t)selfpc >= (uintfptr_t)bintr)
16837889b39SBruce Evans frompci = (uintfptr_t)bintr - p->lowpc;
169912e6037SBruce Evans else
17037889b39SBruce Evans frompci = (uintfptr_t)btrap - p->lowpc;
171912e6037SBruce Evans }
172c4473420SPeter Wemm #endif
173912e6037SBruce Evans
174912e6037SBruce Evans /*
175912e6037SBruce Evans * check that frompc is a reasonable pc value.
17658f0484fSRodney W. Grimes * for example: signal catchers get called from the stack,
17758f0484fSRodney W. Grimes * not from text space. too bad.
17858f0484fSRodney W. Grimes */
179912e6037SBruce Evans if (frompci >= p->textsize)
18058f0484fSRodney W. Grimes goto done;
18158f0484fSRodney W. Grimes
182912e6037SBruce Evans frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
18358f0484fSRodney W. Grimes toindex = *frompcindex;
18458f0484fSRodney W. Grimes if (toindex == 0) {
18558f0484fSRodney W. Grimes /*
18658f0484fSRodney W. Grimes * first time traversing this arc
18758f0484fSRodney W. Grimes */
18858f0484fSRodney W. Grimes toindex = ++p->tos[0].link;
18958f0484fSRodney W. Grimes if (toindex >= p->tolimit)
19058f0484fSRodney W. Grimes /* halt further profiling */
19158f0484fSRodney W. Grimes goto overflow;
19258f0484fSRodney W. Grimes
19358f0484fSRodney W. Grimes *frompcindex = toindex;
19458f0484fSRodney W. Grimes top = &p->tos[toindex];
19558f0484fSRodney W. Grimes top->selfpc = selfpc;
19658f0484fSRodney W. Grimes top->count = 1;
19758f0484fSRodney W. Grimes top->link = 0;
19858f0484fSRodney W. Grimes goto done;
19958f0484fSRodney W. Grimes }
20058f0484fSRodney W. Grimes top = &p->tos[toindex];
20158f0484fSRodney W. Grimes if (top->selfpc == selfpc) {
20258f0484fSRodney W. Grimes /*
20358f0484fSRodney W. Grimes * arc at front of chain; usual case.
20458f0484fSRodney W. Grimes */
20558f0484fSRodney W. Grimes top->count++;
20658f0484fSRodney W. Grimes goto done;
20758f0484fSRodney W. Grimes }
20858f0484fSRodney W. Grimes /*
20958f0484fSRodney W. Grimes * have to go looking down chain for it.
21058f0484fSRodney W. Grimes * top points to what we are looking at,
21158f0484fSRodney W. Grimes * prevtop points to previous top.
21258f0484fSRodney W. Grimes * we know it is not at the head of the chain.
21358f0484fSRodney W. Grimes */
21458f0484fSRodney W. Grimes for (; /* goto done */; ) {
21558f0484fSRodney W. Grimes if (top->link == 0) {
21658f0484fSRodney W. Grimes /*
21758f0484fSRodney W. Grimes * top is end of the chain and none of the chain
21858f0484fSRodney W. Grimes * had top->selfpc == selfpc.
21958f0484fSRodney W. Grimes * so we allocate a new tostruct
22058f0484fSRodney W. Grimes * and link it to the head of the chain.
22158f0484fSRodney W. Grimes */
22258f0484fSRodney W. Grimes toindex = ++p->tos[0].link;
22358f0484fSRodney W. Grimes if (toindex >= p->tolimit)
22458f0484fSRodney W. Grimes goto overflow;
22558f0484fSRodney W. Grimes
22658f0484fSRodney W. Grimes top = &p->tos[toindex];
22758f0484fSRodney W. Grimes top->selfpc = selfpc;
22858f0484fSRodney W. Grimes top->count = 1;
22958f0484fSRodney W. Grimes top->link = *frompcindex;
23058f0484fSRodney W. Grimes *frompcindex = toindex;
23158f0484fSRodney W. Grimes goto done;
23258f0484fSRodney W. Grimes }
23358f0484fSRodney W. Grimes /*
23458f0484fSRodney W. Grimes * otherwise, check the next arc on the chain.
23558f0484fSRodney W. Grimes */
23658f0484fSRodney W. Grimes prevtop = top;
23758f0484fSRodney W. Grimes top = &p->tos[top->link];
23858f0484fSRodney W. Grimes if (top->selfpc == selfpc) {
23958f0484fSRodney W. Grimes /*
24058f0484fSRodney W. Grimes * there it is.
24158f0484fSRodney W. Grimes * increment its count
24258f0484fSRodney W. Grimes * move it to the head of the chain.
24358f0484fSRodney W. Grimes */
24458f0484fSRodney W. Grimes top->count++;
24558f0484fSRodney W. Grimes toindex = prevtop->link;
24658f0484fSRodney W. Grimes prevtop->link = top->link;
24758f0484fSRodney W. Grimes top->link = *frompcindex;
24858f0484fSRodney W. Grimes *frompcindex = toindex;
24958f0484fSRodney W. Grimes goto done;
25058f0484fSRodney W. Grimes }
25158f0484fSRodney W. Grimes
25258f0484fSRodney W. Grimes }
25358f0484fSRodney W. Grimes done:
254c4473420SPeter Wemm #ifdef _KERNEL
2551f403fcfSBruce Evans MCOUNT_EXIT(s);
25658f0484fSRodney W. Grimes #else
2570fa2f943SDavid Xu atomic_store_rel_int(&p->state, GMON_PROF_ON);
25858f0484fSRodney W. Grimes #endif
25958f0484fSRodney W. Grimes return;
26058f0484fSRodney W. Grimes overflow:
2610fa2f943SDavid Xu atomic_store_rel_int(&p->state, GMON_PROF_ERROR);
262c4473420SPeter Wemm #ifdef _KERNEL
2631f403fcfSBruce Evans MCOUNT_EXIT(s);
26458f0484fSRodney W. Grimes #endif
26558f0484fSRodney W. Grimes return;
26658f0484fSRodney W. Grimes }
26758f0484fSRodney W. Grimes
26858f0484fSRodney W. Grimes /*
26958f0484fSRodney W. Grimes * Actual definition of mcount function. Defined in <machine/profile.h>,
27058f0484fSRodney W. Grimes * which is included by <sys/gmon.h>.
27158f0484fSRodney W. Grimes */
27258f0484fSRodney W. Grimes MCOUNT
273912e6037SBruce Evans
274912e6037SBruce Evans #ifdef GUPROF
275912e6037SBruce Evans void
mexitcount(uintfptr_t selfpc)276*05bc9aa7SEd Maste mexitcount(uintfptr_t selfpc)
277912e6037SBruce Evans {
278912e6037SBruce Evans struct gmonparam *p;
27937889b39SBruce Evans uintfptr_t selfpcdiff;
280912e6037SBruce Evans
281912e6037SBruce Evans p = &_gmonparam;
28237889b39SBruce Evans selfpcdiff = selfpc - (uintfptr_t)p->lowpc;
283912e6037SBruce Evans if (selfpcdiff < p->textsize) {
284912e6037SBruce Evans u_int delta;
285912e6037SBruce Evans
286912e6037SBruce Evans /*
287912e6037SBruce Evans * Solidify the count for the current function.
288912e6037SBruce Evans */
289912e6037SBruce Evans delta = cputime() - p->mexitcount_overhead;
290912e6037SBruce Evans p->cputime_overhead_resid += p->cputime_overhead_frac;
291912e6037SBruce Evans p->mexitcount_overhead_resid += p->mexitcount_overhead_frac;
292912e6037SBruce Evans if ((int)delta < 0)
293912e6037SBruce Evans *p->mexitcount_count += delta + p->mexitcount_overhead
294912e6037SBruce Evans - p->cputime_overhead;
295912e6037SBruce Evans else if (delta != 0) {
296912e6037SBruce Evans if (p->cputime_overhead_resid >= CALIB_SCALE) {
297912e6037SBruce Evans p->cputime_overhead_resid -= CALIB_SCALE;
298912e6037SBruce Evans ++*p->cputime_count;
299912e6037SBruce Evans --delta;
300912e6037SBruce Evans }
301912e6037SBruce Evans if (delta != 0) {
302912e6037SBruce Evans if (p->mexitcount_overhead_resid
303912e6037SBruce Evans >= CALIB_SCALE) {
304912e6037SBruce Evans p->mexitcount_overhead_resid
305912e6037SBruce Evans -= CALIB_SCALE;
306912e6037SBruce Evans ++*p->mexitcount_count;
307912e6037SBruce Evans --delta;
308912e6037SBruce Evans }
309912e6037SBruce Evans KCOUNT(p, selfpcdiff) += delta;
310912e6037SBruce Evans }
311912e6037SBruce Evans *p->mexitcount_count += p->mexitcount_overhead_sub;
312912e6037SBruce Evans }
313912e6037SBruce Evans *p->cputime_count += p->cputime_overhead;
314912e6037SBruce Evans }
315912e6037SBruce Evans }
316912e6037SBruce Evans #endif /* GUPROF */
317