xref: /freebsd/sys/kern/kern_intr.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*
2  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/conf.h>
33 #include <sys/rtprio.h>
34 #include <sys/systm.h>
35 #include <sys/interrupt.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/ktr.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/proc.h>
43 #include <sys/random.h>
44 #include <sys/resourcevar.h>
45 #include <sys/sysctl.h>
46 #include <sys/unistd.h>
47 #include <sys/vmmeter.h>
48 #include <machine/atomic.h>
49 #include <machine/cpu.h>
50 #include <machine/md_var.h>
51 #include <machine/stdarg.h>
52 
53 struct	int_entropy {
54 	struct	proc *proc;
55 	int	vector;
56 };
57 
58 void	*vm_ih;
59 void	*softclock_ih;
60 struct	ithd *clk_ithd;
61 struct	ithd *tty_ithd;
62 
63 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
64 
65 static void	ithread_update(struct ithd *);
66 static void	ithread_loop(void *);
67 static void	start_softintr(void *);
68 
69 u_char
70 ithread_priority(enum intr_type flags)
71 {
72 	u_char pri;
73 
74 	flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
75 	    INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
76 	switch (flags) {
77 	case INTR_TYPE_TTY:
78 		pri = PI_TTYLOW;
79 		break;
80 	case INTR_TYPE_BIO:
81 		/*
82 		 * XXX We need to refine this.  BSD/OS distinguishes
83 		 * between tape and disk priorities.
84 		 */
85 		pri = PI_DISK;
86 		break;
87 	case INTR_TYPE_NET:
88 		pri = PI_NET;
89 		break;
90 	case INTR_TYPE_CAM:
91 		pri = PI_DISK;          /* XXX or PI_CAM? */
92 		break;
93 	case INTR_TYPE_AV:		/* Audio/video */
94 		pri = PI_AV;
95 		break;
96 	case INTR_TYPE_CLK:
97 		pri = PI_REALTIME;
98 		break;
99 	case INTR_TYPE_MISC:
100 		pri = PI_DULL;          /* don't care */
101 		break;
102 	default:
103 		/* We didn't specify an interrupt level. */
104 		panic("ithread_priority: no interrupt type in flags");
105 	}
106 
107 	return pri;
108 }
109 
110 /*
111  * Regenerate the name (p_comm) and priority for a threaded interrupt thread.
112  */
113 static void
114 ithread_update(struct ithd *ithd)
115 {
116 	struct intrhand *ih;
117 	struct thread *td;
118 	struct proc *p;
119 	int entropy;
120 
121 	mtx_assert(&ithd->it_lock, MA_OWNED);
122 	td = ithd->it_td;
123 	if (td == NULL)
124 		return;
125 	p = td->td_proc;
126 
127 	strlcpy(p->p_comm, ithd->it_name, sizeof(p->p_comm));
128 
129 	ih = TAILQ_FIRST(&ithd->it_handlers);
130 	if (ih == NULL) {
131 		mtx_lock_spin(&sched_lock);
132 		td->td_priority = PRI_MAX_ITHD;
133 		td->td_base_pri = PRI_MAX_ITHD;
134 		mtx_unlock_spin(&sched_lock);
135 		ithd->it_flags &= ~IT_ENTROPY;
136 		return;
137 	}
138 	entropy = 0;
139 	mtx_lock_spin(&sched_lock);
140 	td->td_priority = ih->ih_pri;
141 	td->td_base_pri = ih->ih_pri;
142 	mtx_unlock_spin(&sched_lock);
143 	TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
144 		if (strlen(p->p_comm) + strlen(ih->ih_name) + 1 <
145 		    sizeof(p->p_comm)) {
146 			strcat(p->p_comm, " ");
147 			strcat(p->p_comm, ih->ih_name);
148 		} else if (strlen(p->p_comm) + 1 == sizeof(p->p_comm)) {
149 			if (p->p_comm[sizeof(p->p_comm) - 2] == '+')
150 				p->p_comm[sizeof(p->p_comm) - 2] = '*';
151 			else
152 				p->p_comm[sizeof(p->p_comm) - 2] = '+';
153 		} else
154 			strcat(p->p_comm, "+");
155 		if (ih->ih_flags & IH_ENTROPY)
156 			entropy++;
157 	}
158 	if (entropy)
159 		ithd->it_flags |= IT_ENTROPY;
160 	else
161 		ithd->it_flags &= ~IT_ENTROPY;
162 	CTR2(KTR_INTR, "%s: updated %s", __func__, p->p_comm);
163 }
164 
165 int
166 ithread_create(struct ithd **ithread, int vector, int flags,
167     void (*disable)(int), void (*enable)(int), const char *fmt, ...)
168 {
169 	struct ithd *ithd;
170 	struct thread *td;
171 	struct proc *p;
172 	int error;
173 	va_list ap;
174 
175 	/* The only valid flag during creation is IT_SOFT. */
176 	if ((flags & ~IT_SOFT) != 0)
177 		return (EINVAL);
178 
179 	ithd = malloc(sizeof(struct ithd), M_ITHREAD, M_WAITOK | M_ZERO);
180 	ithd->it_vector = vector;
181 	ithd->it_disable = disable;
182 	ithd->it_enable = enable;
183 	ithd->it_flags = flags;
184 	TAILQ_INIT(&ithd->it_handlers);
185 	mtx_init(&ithd->it_lock, "ithread", NULL, MTX_DEF);
186 
187 	va_start(ap, fmt);
188 	vsnprintf(ithd->it_name, sizeof(ithd->it_name), fmt, ap);
189 	va_end(ap);
190 
191 	error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
192 	    0, "%s", ithd->it_name);
193 	if (error) {
194 		mtx_destroy(&ithd->it_lock);
195 		free(ithd, M_ITHREAD);
196 		return (error);
197 	}
198 	td = FIRST_THREAD_IN_PROC(p);	/* XXXKSE */
199 	mtx_lock_spin(&sched_lock);
200 	td->td_ksegrp->kg_pri_class = PRI_ITHD;
201 	td->td_priority = PRI_MAX_ITHD;
202 	TD_SET_IWAIT(td);
203 	mtx_unlock_spin(&sched_lock);
204 	ithd->it_td = td;
205 	td->td_ithd = ithd;
206 	if (ithread != NULL)
207 		*ithread = ithd;
208 	CTR2(KTR_INTR, "%s: created %s", __func__, ithd->it_name);
209 	return (0);
210 }
211 
212 int
213 ithread_destroy(struct ithd *ithread)
214 {
215 
216 	struct thread *td;
217 	if (ithread == NULL)
218 		return (EINVAL);
219 
220 	td = ithread->it_td;
221 	mtx_lock(&ithread->it_lock);
222 	if (!TAILQ_EMPTY(&ithread->it_handlers)) {
223 		mtx_unlock(&ithread->it_lock);
224 		return (EINVAL);
225 	}
226 	ithread->it_flags |= IT_DEAD;
227 	mtx_lock_spin(&sched_lock);
228 	if (TD_AWAITING_INTR(td)) {
229 		TD_CLR_IWAIT(td);
230 		setrunqueue(td);
231 	}
232 	mtx_unlock_spin(&sched_lock);
233 	mtx_unlock(&ithread->it_lock);
234 	CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_name);
235 	return (0);
236 }
237 
238 int
239 ithread_add_handler(struct ithd* ithread, const char *name,
240     driver_intr_t handler, void *arg, u_char pri, enum intr_type flags,
241     void **cookiep)
242 {
243 	struct intrhand *ih, *temp_ih;
244 
245 	if (ithread == NULL || name == NULL || handler == NULL)
246 		return (EINVAL);
247 	if ((flags & INTR_FAST) !=0)
248 		flags |= INTR_EXCL;
249 
250 	ih = malloc(sizeof(struct intrhand), M_ITHREAD, M_WAITOK | M_ZERO);
251 	ih->ih_handler = handler;
252 	ih->ih_argument = arg;
253 	ih->ih_name = name;
254 	ih->ih_ithread = ithread;
255 	ih->ih_pri = pri;
256 	if (flags & INTR_FAST)
257 		ih->ih_flags = IH_FAST | IH_EXCLUSIVE;
258 	else if (flags & INTR_EXCL)
259 		ih->ih_flags = IH_EXCLUSIVE;
260 	if (flags & INTR_MPSAFE)
261 		ih->ih_flags |= IH_MPSAFE;
262 	if (flags & INTR_ENTROPY)
263 		ih->ih_flags |= IH_ENTROPY;
264 
265 	mtx_lock(&ithread->it_lock);
266 	if ((flags & INTR_EXCL) !=0 && !TAILQ_EMPTY(&ithread->it_handlers))
267 		goto fail;
268 	if (!TAILQ_EMPTY(&ithread->it_handlers) &&
269 	    (TAILQ_FIRST(&ithread->it_handlers)->ih_flags & IH_EXCLUSIVE) != 0)
270 		goto fail;
271 
272 	TAILQ_FOREACH(temp_ih, &ithread->it_handlers, ih_next)
273 	    if (temp_ih->ih_pri > ih->ih_pri)
274 		    break;
275 	if (temp_ih == NULL)
276 		TAILQ_INSERT_TAIL(&ithread->it_handlers, ih, ih_next);
277 	else
278 		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
279 	ithread_update(ithread);
280 	mtx_unlock(&ithread->it_lock);
281 
282 	if (cookiep != NULL)
283 		*cookiep = ih;
284 	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
285 	    ithread->it_name);
286 	return (0);
287 
288 fail:
289 	mtx_unlock(&ithread->it_lock);
290 	free(ih, M_ITHREAD);
291 	return (EINVAL);
292 }
293 
294 int
295 ithread_remove_handler(void *cookie)
296 {
297 	struct intrhand *handler = (struct intrhand *)cookie;
298 	struct ithd *ithread;
299 #ifdef INVARIANTS
300 	struct intrhand *ih;
301 #endif
302 
303 	if (handler == NULL)
304 		return (EINVAL);
305 	ithread = handler->ih_ithread;
306 	KASSERT(ithread != NULL,
307 	    ("interrupt handler \"%s\" has a NULL interrupt thread",
308 		handler->ih_name));
309 	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
310 	    ithread->it_name);
311 	mtx_lock(&ithread->it_lock);
312 #ifdef INVARIANTS
313 	TAILQ_FOREACH(ih, &ithread->it_handlers, ih_next)
314 		if (ih == handler)
315 			goto ok;
316 	mtx_unlock(&ithread->it_lock);
317 	panic("interrupt handler \"%s\" not found in interrupt thread \"%s\"",
318 	    ih->ih_name, ithread->it_name);
319 ok:
320 #endif
321 	/*
322 	 * If the interrupt thread is already running, then just mark this
323 	 * handler as being dead and let the ithread do the actual removal.
324 	 */
325 	mtx_lock_spin(&sched_lock);
326 	if (!TD_AWAITING_INTR(ithread->it_td)) {
327 		handler->ih_flags |= IH_DEAD;
328 
329 		/*
330 		 * Ensure that the thread will process the handler list
331 		 * again and remove this handler if it has already passed
332 		 * it on the list.
333 		 */
334 		ithread->it_need = 1;
335 	} else
336 		TAILQ_REMOVE(&ithread->it_handlers, handler, ih_next);
337 	mtx_unlock_spin(&sched_lock);
338 	if ((handler->ih_flags & IH_DEAD) != 0)
339 		msleep(handler, &ithread->it_lock, PUSER, "itrmh", 0);
340 	ithread_update(ithread);
341 	mtx_unlock(&ithread->it_lock);
342 	free(handler, M_ITHREAD);
343 	return (0);
344 }
345 
346 int
347 ithread_schedule(struct ithd *ithread, int do_switch)
348 {
349 	struct int_entropy entropy;
350 	struct thread *td;
351 	struct thread *ctd;
352 	struct proc *p;
353 
354 	/*
355 	 * If no ithread or no handlers, then we have a stray interrupt.
356 	 */
357 	if ((ithread == NULL) || TAILQ_EMPTY(&ithread->it_handlers))
358 		return (EINVAL);
359 
360 	ctd = curthread;
361 	/*
362 	 * If any of the handlers for this ithread claim to be good
363 	 * sources of entropy, then gather some.
364 	 */
365 	if (harvest.interrupt && ithread->it_flags & IT_ENTROPY) {
366 		entropy.vector = ithread->it_vector;
367 		entropy.proc = ctd->td_proc;
368 		random_harvest(&entropy, sizeof(entropy), 2, 0,
369 		    RANDOM_INTERRUPT);
370 	}
371 
372 	td = ithread->it_td;
373 	p = td->td_proc;
374 	KASSERT(p != NULL, ("ithread %s has no process", ithread->it_name));
375 	CTR4(KTR_INTR, "%s: pid %d: (%s) need = %d",
376 	    __func__, p->p_pid, p->p_comm, ithread->it_need);
377 
378 	/*
379 	 * Set it_need to tell the thread to keep running if it is already
380 	 * running.  Then, grab sched_lock and see if we actually need to
381 	 * put this thread on the runqueue.  If so and the do_switch flag is
382 	 * true and it is safe to switch, then switch to the ithread
383 	 * immediately.  Otherwise, set the needresched flag to guarantee
384 	 * that this ithread will run before any userland processes.
385 	 */
386 	ithread->it_need = 1;
387 	mtx_lock_spin(&sched_lock);
388 	if (TD_AWAITING_INTR(td)) {
389 		CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
390 		TD_CLR_IWAIT(td);
391 		setrunqueue(td);
392 		if (do_switch &&
393 		    (ctd->td_critnest == 1) ) {
394 			KASSERT((TD_IS_RUNNING(ctd)),
395 			    ("ithread_schedule: Bad state for curthread."));
396 			ctd->td_proc->p_stats->p_ru.ru_nivcsw++;
397 			if (ctd->td_flags & TDF_IDLETD)
398 				ctd->td_state = TDS_CAN_RUN; /* XXXKSE */
399 			mi_switch();
400 		} else {
401 			curthread->td_flags |= TDF_NEEDRESCHED;
402 		}
403 	} else {
404 		CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d",
405 		    __func__, p->p_pid, ithread->it_need, td->td_state);
406 	}
407 	mtx_unlock_spin(&sched_lock);
408 
409 	return (0);
410 }
411 
412 int
413 swi_add(struct ithd **ithdp, const char *name, driver_intr_t handler,
414 	    void *arg, int pri, enum intr_type flags, void **cookiep)
415 {
416 	struct ithd *ithd;
417 	int error;
418 
419 	if (flags & (INTR_FAST | INTR_ENTROPY))
420 		return (EINVAL);
421 
422 	ithd = (ithdp != NULL) ? *ithdp : NULL;
423 
424 	if (ithd != NULL) {
425 		if ((ithd->it_flags & IT_SOFT) == 0)
426 			return(EINVAL);
427 	} else {
428 		error = ithread_create(&ithd, pri, IT_SOFT, NULL, NULL,
429 		    "swi%d:", pri);
430 		if (error)
431 			return (error);
432 
433 		if (ithdp != NULL)
434 			*ithdp = ithd;
435 	}
436 	return (ithread_add_handler(ithd, name, handler, arg,
437 		    (pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
438 }
439 
440 
441 /*
442  * Schedule a heavyweight software interrupt process.
443  */
444 void
445 swi_sched(void *cookie, int flags)
446 {
447 	struct intrhand *ih = (struct intrhand *)cookie;
448 	struct ithd *it = ih->ih_ithread;
449 	int error;
450 
451 	atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
452 
453 	CTR3(KTR_INTR, "swi_sched pid %d(%s) need=%d",
454 		it->it_td->td_proc->p_pid, it->it_td->td_proc->p_comm, it->it_need);
455 
456 	/*
457 	 * Set ih_need for this handler so that if the ithread is already
458 	 * running it will execute this handler on the next pass.  Otherwise,
459 	 * it will execute it the next time it runs.
460 	 */
461 	atomic_store_rel_int(&ih->ih_need, 1);
462 	if (!(flags & SWI_DELAY)) {
463 		error = ithread_schedule(it, !cold && !dumping);
464 		KASSERT(error == 0, ("stray software interrupt"));
465 	}
466 }
467 
468 /*
469  * This is the main code for interrupt threads.
470  */
471 static void
472 ithread_loop(void *arg)
473 {
474 	struct ithd *ithd;		/* our thread context */
475 	struct intrhand *ih;		/* and our interrupt handler chain */
476 	struct thread *td;
477 	struct proc *p;
478 
479 	td = curthread;
480 	p = td->td_proc;
481 	ithd = (struct ithd *)arg;	/* point to myself */
482 	KASSERT(ithd->it_td == td && td->td_ithd == ithd,
483 	    ("%s: ithread and proc linkage out of sync", __func__));
484 
485 	/*
486 	 * As long as we have interrupts outstanding, go through the
487 	 * list of handlers, giving each one a go at it.
488 	 */
489 	for (;;) {
490 		/*
491 		 * If we are an orphaned thread, then just die.
492 		 */
493 		if (ithd->it_flags & IT_DEAD) {
494 			CTR3(KTR_INTR, "%s: pid %d: (%s) exiting", __func__,
495 			    p->p_pid, p->p_comm);
496 			td->td_ithd = NULL;
497 			mtx_destroy(&ithd->it_lock);
498 			mtx_lock(&Giant);
499 			free(ithd, M_ITHREAD);
500 			kthread_exit(0);
501 		}
502 
503 		CTR4(KTR_INTR, "%s: pid %d: (%s) need=%d", __func__,
504 		     p->p_pid, p->p_comm, ithd->it_need);
505 		while (ithd->it_need) {
506 			/*
507 			 * Service interrupts.  If another interrupt
508 			 * arrives while we are running, they will set
509 			 * it_need to denote that we should make
510 			 * another pass.
511 			 */
512 			atomic_store_rel_int(&ithd->it_need, 0);
513 restart:
514 			TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
515 				if (ithd->it_flags & IT_SOFT && !ih->ih_need)
516 					continue;
517 				atomic_store_rel_int(&ih->ih_need, 0);
518 				CTR6(KTR_INTR,
519 				    "%s: pid %d ih=%p: %p(%p) flg=%x", __func__,
520 				    p->p_pid, (void *)ih,
521 				    (void *)ih->ih_handler, ih->ih_argument,
522 				    ih->ih_flags);
523 
524 				if ((ih->ih_flags & IH_DEAD) != 0) {
525 					mtx_lock(&ithd->it_lock);
526 					TAILQ_REMOVE(&ithd->it_handlers, ih,
527 					    ih_next);
528 					wakeup(ih);
529 					mtx_unlock(&ithd->it_lock);
530 					goto restart;
531 				}
532 				if ((ih->ih_flags & IH_MPSAFE) == 0)
533 					mtx_lock(&Giant);
534 				ih->ih_handler(ih->ih_argument);
535 				if ((ih->ih_flags & IH_MPSAFE) == 0)
536 					mtx_unlock(&Giant);
537 			}
538 		}
539 
540 		/*
541 		 * Processed all our interrupts.  Now get the sched
542 		 * lock.  This may take a while and it_need may get
543 		 * set again, so we have to check it again.
544 		 */
545 		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
546 		mtx_assert(&Giant, MA_NOTOWNED);
547 		mtx_lock_spin(&sched_lock);
548 		if (!ithd->it_need) {
549 			/*
550 			 * Should we call this earlier in the loop above?
551 			 */
552 			if (ithd->it_enable != NULL)
553 				ithd->it_enable(ithd->it_vector);
554 			TD_SET_IWAIT(td); /* we're idle */
555 			p->p_stats->p_ru.ru_nvcsw++;
556 			CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid);
557 			mi_switch();
558 			CTR2(KTR_INTR, "%s: pid %d: resumed", __func__, p->p_pid);
559 		}
560 		mtx_unlock_spin(&sched_lock);
561 	}
562 }
563 
564 /*
565  * Start standard software interrupt threads
566  */
567 static void
568 start_softintr(void *dummy)
569 {
570 	struct proc *p;
571 
572 	if (swi_add(&clk_ithd, "clock", softclock, NULL, SWI_CLOCK,
573 		INTR_MPSAFE, &softclock_ih) ||
574 	    swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
575 		panic("died while creating standard software ithreads");
576 
577 	p = clk_ithd->it_td->td_proc;
578 	PROC_LOCK(p);
579 	p->p_flag |= P_NOLOAD;
580 	PROC_UNLOCK(p);
581 }
582 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
583 
584 /*
585  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
586  * The data for this machine dependent, and the declarations are in machine
587  * dependent code.  The layout of intrnames and intrcnt however is machine
588  * independent.
589  *
590  * We do not know the length of intrcnt and intrnames at compile time, so
591  * calculate things at run time.
592  */
593 static int
594 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
595 {
596 	return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
597 	   req));
598 }
599 
600 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
601     NULL, 0, sysctl_intrnames, "", "Interrupt Names");
602 
603 static int
604 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
605 {
606 	return (sysctl_handle_opaque(oidp, intrcnt,
607 	    (char *)eintrcnt - (char *)intrcnt, req));
608 }
609 
610 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
611     NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
612