1 /*- 2 * Copyright (c) 1983, 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #if !defined(lint) && !defined(KERNEL) && defined(LIBC_SCCS) 35 #if 0 36 static char sccsid[] = "@(#)mcount.c 8.1 (Berkeley) 6/4/93"; 37 #endif 38 static const char rcsid[] = 39 "$Id: mcount.c,v 1.13 1998/07/14 05:09:41 bde Exp $"; 40 #endif 41 42 #ifndef __alpha__ 43 44 #include <sys/param.h> 45 #include <sys/gmon.h> 46 #ifdef KERNEL 47 #include <sys/systm.h> 48 #include <vm/vm.h> 49 #include <vm/vm_param.h> 50 #include <vm/pmap.h> 51 void bintr __P((void)); 52 void btrap __P((void)); 53 void eintr __P((void)); 54 void user __P((void)); 55 #endif 56 57 /* 58 * mcount is called on entry to each function compiled with the profiling 59 * switch set. _mcount(), which is declared in a machine-dependent way 60 * with _MCOUNT_DECL, does the actual work and is either inlined into a 61 * C routine or called by an assembly stub. In any case, this magic is 62 * taken care of by the MCOUNT definition in <machine/profile.h>. 63 * 64 * _mcount updates data structures that represent traversals of the 65 * program's call graph edges. frompc and selfpc are the return 66 * address and function address that represents the given call graph edge. 67 * 68 * Note: the original BSD code used the same variable (frompcindex) for 69 * both frompcindex and frompc. Any reasonable, modern compiler will 70 * perform this optimization. 71 */ 72 _MCOUNT_DECL(frompc, selfpc) /* _mcount; may be static, inline, etc */ 73 register uintfptr_t frompc, selfpc; 74 { 75 #ifdef GUPROF 76 u_int delta; 77 #endif 78 register fptrdiff_t frompci; 79 register u_short *frompcindex; 80 register struct tostruct *top, *prevtop; 81 register struct gmonparam *p; 82 register long toindex; 83 #ifdef KERNEL 84 MCOUNT_DECL(s) 85 #endif 86 87 p = &_gmonparam; 88 #ifndef GUPROF /* XXX */ 89 /* 90 * check that we are profiling 91 * and that we aren't recursively invoked. 92 */ 93 if (p->state != GMON_PROF_ON) 94 return; 95 #endif 96 #ifdef KERNEL 97 MCOUNT_ENTER(s); 98 #else 99 p->state = GMON_PROF_BUSY; 100 #endif 101 frompci = frompc - p->lowpc; 102 103 #ifdef KERNEL 104 /* 105 * When we are called from an exception handler, frompci may be 106 * for a user address. Convert such frompci's to the index of 107 * user() to merge all user counts. 108 */ 109 if (frompci >= p->textsize) { 110 if (frompci + p->lowpc 111 >= (uintfptr_t)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE)) 112 goto done; 113 frompci = (uintfptr_t)user - p->lowpc; 114 if (frompci >= p->textsize) 115 goto done; 116 } 117 #endif /* KERNEL */ 118 119 #ifdef GUPROF 120 if (p->state != GMON_PROF_HIRES) 121 goto skip_guprof_stuff; 122 /* 123 * Look at the clock and add the count of clock cycles since the 124 * clock was last looked at to a counter for frompc. This 125 * solidifies the count for the function containing frompc and 126 * effectively starts another clock for the current function. 127 * The count for the new clock will be solidified when another 128 * function call is made or the function returns. 129 * 130 * We use the usual sampling counters since they can be located 131 * efficiently. 4-byte counters are usually necessary. 132 * 133 * There are many complications for subtracting the profiling 134 * overheads from the counts for normal functions and adding 135 * them to the counts for mcount(), mexitcount() and cputime(). 136 * We attempt to handle fractional cycles, but the overheads 137 * are usually underestimated because they are calibrated for 138 * a simpler than usual setup. 139 */ 140 delta = cputime() - p->mcount_overhead; 141 p->cputime_overhead_resid += p->cputime_overhead_frac; 142 p->mcount_overhead_resid += p->mcount_overhead_frac; 143 if ((int)delta < 0) 144 *p->mcount_count += delta + p->mcount_overhead 145 - p->cputime_overhead; 146 else if (delta != 0) { 147 if (p->cputime_overhead_resid >= CALIB_SCALE) { 148 p->cputime_overhead_resid -= CALIB_SCALE; 149 ++*p->cputime_count; 150 --delta; 151 } 152 if (delta != 0) { 153 if (p->mcount_overhead_resid >= CALIB_SCALE) { 154 p->mcount_overhead_resid -= CALIB_SCALE; 155 ++*p->mcount_count; 156 --delta; 157 } 158 KCOUNT(p, frompci) += delta; 159 } 160 *p->mcount_count += p->mcount_overhead_sub; 161 } 162 *p->cputime_count += p->cputime_overhead; 163 skip_guprof_stuff: 164 #endif /* GUPROF */ 165 166 #ifdef KERNEL 167 /* 168 * When we are called from an exception handler, frompc is faked 169 * to be for where the exception occurred. We've just solidified 170 * the count for there. Now convert frompci to the index of btrap() 171 * for trap handlers and bintr() for interrupt handlers to make 172 * exceptions appear in the call graph as calls from btrap() and 173 * bintr() instead of calls from all over. 174 */ 175 if ((uintfptr_t)selfpc >= (uintfptr_t)btrap 176 && (uintfptr_t)selfpc < (uintfptr_t)eintr) { 177 if ((uintfptr_t)selfpc >= (uintfptr_t)bintr) 178 frompci = (uintfptr_t)bintr - p->lowpc; 179 else 180 frompci = (uintfptr_t)btrap - p->lowpc; 181 } 182 #endif /* KERNEL */ 183 184 /* 185 * check that frompc is a reasonable pc value. 186 * for example: signal catchers get called from the stack, 187 * not from text space. too bad. 188 */ 189 if (frompci >= p->textsize) 190 goto done; 191 192 frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))]; 193 toindex = *frompcindex; 194 if (toindex == 0) { 195 /* 196 * first time traversing this arc 197 */ 198 toindex = ++p->tos[0].link; 199 if (toindex >= p->tolimit) 200 /* halt further profiling */ 201 goto overflow; 202 203 *frompcindex = toindex; 204 top = &p->tos[toindex]; 205 top->selfpc = selfpc; 206 top->count = 1; 207 top->link = 0; 208 goto done; 209 } 210 top = &p->tos[toindex]; 211 if (top->selfpc == selfpc) { 212 /* 213 * arc at front of chain; usual case. 214 */ 215 top->count++; 216 goto done; 217 } 218 /* 219 * have to go looking down chain for it. 220 * top points to what we are looking at, 221 * prevtop points to previous top. 222 * we know it is not at the head of the chain. 223 */ 224 for (; /* goto done */; ) { 225 if (top->link == 0) { 226 /* 227 * top is end of the chain and none of the chain 228 * had top->selfpc == selfpc. 229 * so we allocate a new tostruct 230 * and link it to the head of the chain. 231 */ 232 toindex = ++p->tos[0].link; 233 if (toindex >= p->tolimit) 234 goto overflow; 235 236 top = &p->tos[toindex]; 237 top->selfpc = selfpc; 238 top->count = 1; 239 top->link = *frompcindex; 240 *frompcindex = toindex; 241 goto done; 242 } 243 /* 244 * otherwise, check the next arc on the chain. 245 */ 246 prevtop = top; 247 top = &p->tos[top->link]; 248 if (top->selfpc == selfpc) { 249 /* 250 * there it is. 251 * increment its count 252 * move it to the head of the chain. 253 */ 254 top->count++; 255 toindex = prevtop->link; 256 prevtop->link = top->link; 257 top->link = *frompcindex; 258 *frompcindex = toindex; 259 goto done; 260 } 261 262 } 263 done: 264 #ifdef KERNEL 265 MCOUNT_EXIT(s); 266 #else 267 p->state = GMON_PROF_ON; 268 #endif 269 return; 270 overflow: 271 p->state = GMON_PROF_ERROR; 272 #ifdef KERNEL 273 MCOUNT_EXIT(s); 274 #endif 275 return; 276 } 277 278 /* 279 * Actual definition of mcount function. Defined in <machine/profile.h>, 280 * which is included by <sys/gmon.h>. 281 */ 282 MCOUNT 283 284 #ifdef GUPROF 285 void 286 mexitcount(selfpc) 287 uintfptr_t selfpc; 288 { 289 struct gmonparam *p; 290 uintfptr_t selfpcdiff; 291 292 p = &_gmonparam; 293 selfpcdiff = selfpc - (uintfptr_t)p->lowpc; 294 if (selfpcdiff < p->textsize) { 295 u_int delta; 296 297 /* 298 * Solidify the count for the current function. 299 */ 300 delta = cputime() - p->mexitcount_overhead; 301 p->cputime_overhead_resid += p->cputime_overhead_frac; 302 p->mexitcount_overhead_resid += p->mexitcount_overhead_frac; 303 if ((int)delta < 0) 304 *p->mexitcount_count += delta + p->mexitcount_overhead 305 - p->cputime_overhead; 306 else if (delta != 0) { 307 if (p->cputime_overhead_resid >= CALIB_SCALE) { 308 p->cputime_overhead_resid -= CALIB_SCALE; 309 ++*p->cputime_count; 310 --delta; 311 } 312 if (delta != 0) { 313 if (p->mexitcount_overhead_resid 314 >= CALIB_SCALE) { 315 p->mexitcount_overhead_resid 316 -= CALIB_SCALE; 317 ++*p->mexitcount_count; 318 --delta; 319 } 320 KCOUNT(p, selfpcdiff) += delta; 321 } 322 *p->mexitcount_count += p->mexitcount_overhead_sub; 323 } 324 *p->cputime_count += p->cputime_overhead; 325 } 326 } 327 #endif /* GUPROF */ 328 329 #endif /* !__NETBSD_SYSCALLS */ 330