subr_prof.c (75dfc66c1b2b44609e5a7c3e1d6a751be4922689) subr_prof.c (aa3ea612be3659881392251e91912682b038ce78)
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 32 unchanged lines hidden (view full) ---

41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44#include <sys/resourcevar.h>
45#include <sys/sysctl.h>
46
47#include <machine/cpu.h>
48
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 32 unchanged lines hidden (view full) ---

41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44#include <sys/resourcevar.h>
45#include <sys/sysctl.h>
46
47#include <machine/cpu.h>
48
49#ifdef GPROF
50#include <sys/malloc.h>
51#include <sys/gmon.h>
52#undef MCOUNT
53
54static MALLOC_DEFINE(M_GPROF, "gprof", "kernel profiling buffer");
55
56static void kmstartup(void *);
57SYSINIT(kmem, SI_SUB_KPROF, SI_ORDER_FIRST, kmstartup, NULL);
58
59struct gmonparam _gmonparam = { GMON_PROF_OFF };
60
61#ifdef GUPROF
62void
63nullfunc_loop_profiled()
64{
65 int i;
66
67 for (i = 0; i < CALIB_SCALE; i++)
68 nullfunc_profiled();
69}
70
71#define nullfunc_loop_profiled_end nullfunc_profiled /* XXX */
72
73void
74nullfunc_profiled()
75{
76}
77#endif /* GUPROF */
78
79/*
49/*
80 * Update the histograms to support extending the text region arbitrarily.
81 * This is done slightly naively (no sparse regions), so will waste slight
82 * amounts of memory, but will overall work nicely enough to allow profiling
83 * of KLDs.
84 */
85void
86kmupetext(uintfptr_t nhighpc)
87{
88 struct gmonparam np; /* slightly large */
89 struct gmonparam *p = &_gmonparam;
90 char *cp;
91
92 GIANT_REQUIRED;
93 bcopy(p, &np, sizeof(*p));
94 np.highpc = ROUNDUP(nhighpc, HISTFRACTION * sizeof(HISTCOUNTER));
95 if (np.highpc <= p->highpc)
96 return;
97 np.textsize = np.highpc - p->lowpc;
98 np.kcountsize = np.textsize / HISTFRACTION;
99 np.hashfraction = HASHFRACTION;
100 np.fromssize = np.textsize / HASHFRACTION;
101 np.tolimit = np.textsize * ARCDENSITY / 100;
102 if (np.tolimit < MINARCS)
103 np.tolimit = MINARCS;
104 else if (np.tolimit > MAXARCS)
105 np.tolimit = MAXARCS;
106 np.tossize = np.tolimit * sizeof(struct tostruct);
107 cp = malloc(np.kcountsize + np.fromssize + np.tossize,
108 M_GPROF, M_WAITOK);
109 /*
110 * Check for something else extending highpc while we slept.
111 */
112 if (np.highpc <= p->highpc) {
113 free(cp, M_GPROF);
114 return;
115 }
116 np.tos = (struct tostruct *)cp;
117 cp += np.tossize;
118 np.kcount = (HISTCOUNTER *)cp;
119 cp += np.kcountsize;
120 np.froms = (u_short *)cp;
121#ifdef GUPROF
122 /* Reinitialize pointers to overhead counters. */
123 np.cputime_count = &KCOUNT(&np, PC_TO_I(&np, cputime));
124 np.mcount_count = &KCOUNT(&np, PC_TO_I(&np, mcount));
125 np.mexitcount_count = &KCOUNT(&np, PC_TO_I(&np, mexitcount));
126#endif
127 critical_enter();
128 bcopy(p->tos, np.tos, p->tossize);
129 bzero((char *)np.tos + p->tossize, np.tossize - p->tossize);
130 bcopy(p->kcount, np.kcount, p->kcountsize);
131 bzero((char *)np.kcount + p->kcountsize, np.kcountsize -
132 p->kcountsize);
133 bcopy(p->froms, np.froms, p->fromssize);
134 bzero((char *)np.froms + p->fromssize, np.fromssize - p->fromssize);
135 cp = (char *)p->tos;
136 bcopy(&np, p, sizeof(*p));
137 critical_exit();
138 free(cp, M_GPROF);
139}
140
141static void
142kmstartup(void *dummy)
143{
144 char *cp;
145 struct gmonparam *p = &_gmonparam;
146#ifdef GUPROF
147 int cputime_overhead;
148 int empty_loop_time;
149 int i;
150 int mcount_overhead;
151 int mexitcount_overhead;
152 int nullfunc_loop_overhead;
153 int nullfunc_loop_profiled_time;
154 uintfptr_t tmp_addr;
155#endif
156
157 /*
158 * Round lowpc and highpc to multiples of the density we're using
159 * so the rest of the scaling (here and in gprof) stays in ints.
160 */
161 p->lowpc = ROUNDDOWN((u_long)btext, HISTFRACTION * sizeof(HISTCOUNTER));
162 p->highpc = ROUNDUP((u_long)etext, HISTFRACTION * sizeof(HISTCOUNTER));
163 p->textsize = p->highpc - p->lowpc;
164 printf("Profiling kernel, textsize=%lu [%jx..%jx]\n",
165 p->textsize, (uintmax_t)p->lowpc, (uintmax_t)p->highpc);
166 p->kcountsize = p->textsize / HISTFRACTION;
167 p->hashfraction = HASHFRACTION;
168 p->fromssize = p->textsize / HASHFRACTION;
169 p->tolimit = p->textsize * ARCDENSITY / 100;
170 if (p->tolimit < MINARCS)
171 p->tolimit = MINARCS;
172 else if (p->tolimit > MAXARCS)
173 p->tolimit = MAXARCS;
174 p->tossize = p->tolimit * sizeof(struct tostruct);
175 cp = (char *)malloc(p->kcountsize + p->fromssize + p->tossize,
176 M_GPROF, M_WAITOK | M_ZERO);
177 p->tos = (struct tostruct *)cp;
178 cp += p->tossize;
179 p->kcount = (HISTCOUNTER *)cp;
180 cp += p->kcountsize;
181 p->froms = (u_short *)cp;
182 p->histcounter_type = FUNCTION_ALIGNMENT / HISTFRACTION * NBBY;
183
184#ifdef GUPROF
185 /* Signed counters. */
186 p->histcounter_type = -p->histcounter_type;
187
188 /* Initialize pointers to overhead counters. */
189 p->cputime_count = &KCOUNT(p, PC_TO_I(p, cputime));
190 p->mcount_count = &KCOUNT(p, PC_TO_I(p, mcount));
191 p->mexitcount_count = &KCOUNT(p, PC_TO_I(p, mexitcount));
192
193 /*
194 * Disable interrupts to avoid interference while we calibrate
195 * things.
196 */
197 critical_enter();
198
199 /*
200 * Determine overheads.
201 * XXX this needs to be repeated for each useful timer/counter.
202 */
203 cputime_overhead = 0;
204 startguprof(p);
205 for (i = 0; i < CALIB_SCALE; i++)
206 cputime_overhead += cputime();
207
208 empty_loop();
209 startguprof(p);
210 empty_loop();
211 empty_loop_time = cputime();
212
213 nullfunc_loop_profiled();
214
215 /*
216 * Start profiling. There won't be any normal function calls since
217 * interrupts are disabled, but we will call the profiling routines
218 * directly to determine their overheads.
219 */
220 p->state = GMON_PROF_HIRES;
221
222 startguprof(p);
223 nullfunc_loop_profiled();
224
225 startguprof(p);
226 for (i = 0; i < CALIB_SCALE; i++)
227 MCOUNT_OVERHEAD(sys_profil);
228 mcount_overhead = KCOUNT(p, PC_TO_I(p, sys_profil));
229
230 startguprof(p);
231 for (i = 0; i < CALIB_SCALE; i++)
232 MEXITCOUNT_OVERHEAD();
233 MEXITCOUNT_OVERHEAD_GETLABEL(tmp_addr);
234 mexitcount_overhead = KCOUNT(p, PC_TO_I(p, tmp_addr));
235
236 p->state = GMON_PROF_OFF;
237 stopguprof(p);
238
239 critical_exit();
240
241 nullfunc_loop_profiled_time = 0;
242 for (tmp_addr = (uintfptr_t)nullfunc_loop_profiled;
243 tmp_addr < (uintfptr_t)nullfunc_loop_profiled_end;
244 tmp_addr += HISTFRACTION * sizeof(HISTCOUNTER))
245 nullfunc_loop_profiled_time += KCOUNT(p, PC_TO_I(p, tmp_addr));
246#define CALIB_DOSCALE(count) (((count) + CALIB_SCALE / 3) / CALIB_SCALE)
247#define c2n(count, freq) ((int)((count) * 1000000000LL / freq))
248 printf("cputime %d, empty_loop %d, nullfunc_loop_profiled %d, mcount %d, mexitcount %d\n",
249 CALIB_DOSCALE(c2n(cputime_overhead, p->profrate)),
250 CALIB_DOSCALE(c2n(empty_loop_time, p->profrate)),
251 CALIB_DOSCALE(c2n(nullfunc_loop_profiled_time, p->profrate)),
252 CALIB_DOSCALE(c2n(mcount_overhead, p->profrate)),
253 CALIB_DOSCALE(c2n(mexitcount_overhead, p->profrate)));
254 cputime_overhead -= empty_loop_time;
255 mcount_overhead -= empty_loop_time;
256 mexitcount_overhead -= empty_loop_time;
257
258 /*-
259 * Profiling overheads are determined by the times between the
260 * following events:
261 * MC1: mcount() is called
262 * MC2: cputime() (called from mcount()) latches the timer
263 * MC3: mcount() completes
264 * ME1: mexitcount() is called
265 * ME2: cputime() (called from mexitcount()) latches the timer
266 * ME3: mexitcount() completes.
267 * The times between the events vary slightly depending on instruction
268 * combination and cache misses, etc. Attempt to determine the
269 * minimum times. These can be subtracted from the profiling times
270 * without much risk of reducing the profiling times below what they
271 * would be when profiling is not configured. Abbreviate:
272 * ab = minimum time between MC1 and MC3
273 * a = minimum time between MC1 and MC2
274 * b = minimum time between MC2 and MC3
275 * cd = minimum time between ME1 and ME3
276 * c = minimum time between ME1 and ME2
277 * d = minimum time between ME2 and ME3.
278 * These satisfy the relations:
279 * ab <= mcount_overhead (just measured)
280 * a + b <= ab
281 * cd <= mexitcount_overhead (just measured)
282 * c + d <= cd
283 * a + d <= nullfunc_loop_profiled_time (just measured)
284 * a >= 0, b >= 0, c >= 0, d >= 0.
285 * Assume that ab and cd are equal to the minimums.
286 */
287 p->cputime_overhead = CALIB_DOSCALE(cputime_overhead);
288 p->mcount_overhead = CALIB_DOSCALE(mcount_overhead - cputime_overhead);
289 p->mexitcount_overhead = CALIB_DOSCALE(mexitcount_overhead
290 - cputime_overhead);
291 nullfunc_loop_overhead = nullfunc_loop_profiled_time - empty_loop_time;
292 p->mexitcount_post_overhead = CALIB_DOSCALE((mcount_overhead
293 - nullfunc_loop_overhead)
294 / 4);
295 p->mexitcount_pre_overhead = p->mexitcount_overhead
296 + p->cputime_overhead
297 - p->mexitcount_post_overhead;
298 p->mcount_pre_overhead = CALIB_DOSCALE(nullfunc_loop_overhead)
299 - p->mexitcount_post_overhead;
300 p->mcount_post_overhead = p->mcount_overhead
301 + p->cputime_overhead
302 - p->mcount_pre_overhead;
303 printf(
304"Profiling overheads: mcount: %d+%d, %d+%d; mexitcount: %d+%d, %d+%d nsec\n",
305 c2n(p->cputime_overhead, p->profrate),
306 c2n(p->mcount_overhead, p->profrate),
307 c2n(p->mcount_pre_overhead, p->profrate),
308 c2n(p->mcount_post_overhead, p->profrate),
309 c2n(p->cputime_overhead, p->profrate),
310 c2n(p->mexitcount_overhead, p->profrate),
311 c2n(p->mexitcount_pre_overhead, p->profrate),
312 c2n(p->mexitcount_post_overhead, p->profrate));
313 printf(
314"Profiling overheads: mcount: %d+%d, %d+%d; mexitcount: %d+%d, %d+%d cycles\n",
315 p->cputime_overhead, p->mcount_overhead,
316 p->mcount_pre_overhead, p->mcount_post_overhead,
317 p->cputime_overhead, p->mexitcount_overhead,
318 p->mexitcount_pre_overhead, p->mexitcount_post_overhead);
319#endif /* GUPROF */
320}
321
322/*
323 * Return kernel profiling information.
324 */
325static int
326sysctl_kern_prof(SYSCTL_HANDLER_ARGS)
327{
328 int *name = (int *) arg1;
329 u_int namelen = arg2;
330 struct gmonparam *gp = &_gmonparam;
331 int error;
332 int state;
333
334 /* all sysctl names at this level are terminal */
335 if (namelen != 1)
336 return (ENOTDIR); /* overloaded */
337
338 switch (name[0]) {
339 case GPROF_STATE:
340 state = gp->state;
341 error = sysctl_handle_int(oidp, &state, 0, req);
342 if (error)
343 return (error);
344 if (!req->newptr)
345 return (0);
346 if (state == GMON_PROF_OFF) {
347 gp->state = state;
348 PROC_LOCK(&proc0);
349 stopprofclock(&proc0);
350 PROC_UNLOCK(&proc0);
351 stopguprof(gp);
352 } else if (state == GMON_PROF_ON) {
353 gp->state = GMON_PROF_OFF;
354 stopguprof(gp);
355 gp->profrate = profhz;
356 PROC_LOCK(&proc0);
357 startprofclock(&proc0);
358 PROC_UNLOCK(&proc0);
359 gp->state = state;
360#ifdef GUPROF
361 } else if (state == GMON_PROF_HIRES) {
362 gp->state = GMON_PROF_OFF;
363 PROC_LOCK(&proc0);
364 stopprofclock(&proc0);
365 PROC_UNLOCK(&proc0);
366 startguprof(gp);
367 gp->state = state;
368#endif
369 } else if (state != gp->state)
370 return (EINVAL);
371 return (0);
372 case GPROF_COUNT:
373 return (sysctl_handle_opaque(oidp,
374 gp->kcount, gp->kcountsize, req));
375 case GPROF_FROMS:
376 return (sysctl_handle_opaque(oidp,
377 gp->froms, gp->fromssize, req));
378 case GPROF_TOS:
379 return (sysctl_handle_opaque(oidp,
380 gp->tos, gp->tossize, req));
381 case GPROF_GMONPARAM:
382 return (sysctl_handle_opaque(oidp, gp, sizeof *gp, req));
383 default:
384 return (EOPNOTSUPP);
385 }
386 /* NOTREACHED */
387}
388
389static SYSCTL_NODE(_kern, KERN_PROF, prof,
390 CTLFLAG_RW | CTLFLAG_MPSAFE, sysctl_kern_prof,
391 "");
392#endif /* GPROF */
393
394/*
395 * Profiling system call.
396 *
397 * The scale factor is a fixed point number with 16 bits of fraction, so that
398 * 1.0 is represented as 0x10000. A scale factor of 0 turns off profiling.
399 */
400#ifndef _SYS_SYSPROTO_H_
401struct profil_args {
402 caddr_t samples;

--- 136 unchanged lines hidden ---
50 * Profiling system call.
51 *
52 * The scale factor is a fixed point number with 16 bits of fraction, so that
53 * 1.0 is represented as 0x10000. A scale factor of 0 turns off profiling.
54 */
55#ifndef _SYS_SYSPROTO_H_
56struct profil_args {
57 caddr_t samples;

--- 136 unchanged lines hidden ---