xref: /titanic_44/usr/src/uts/common/io/avintr.c (revision 3aedfe0b5d40c671717b8bec3135984b90d27349)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Autovectored Interrupt Configuration and Deconfiguration
30  */
31 
32 #include <sys/param.h>
33 #include <sys/cmn_err.h>
34 #include <sys/trap.h>
35 #include <sys/t_lock.h>
36 #include <sys/avintr.h>
37 #include <sys/kmem.h>
38 #include <sys/machlock.h>
39 #include <sys/systm.h>
40 #include <sys/machsystm.h>
41 #include <sys/sunddi.h>
42 #include <sys/x_call.h>
43 #include <sys/cpuvar.h>
44 #include <sys/atomic.h>
45 #include <sys/smp_impldefs.h>
46 #include <sys/sdt.h>
47 #include <sys/stack.h>
48 #include <sys/ddi_impldefs.h>
49 
50 typedef struct av_softinfo {
51 	cpuset_t	av_pending;	/* pending bitmasks */
52 } av_softinfo_t;
53 
54 static void insert_av(void *intr_id, struct av_head *vectp, avfunc f,
55 	caddr_t arg1, caddr_t arg2, uint64_t *ticksp, int pri_level,
56 	dev_info_t *dip);
57 static void remove_av(void *intr_id, struct av_head *vectp, avfunc f,
58 	int pri_level, int vect);
59 
60 /*
61  * Arrange for a driver to be called when a particular
62  * auto-vectored interrupt occurs.
63  * NOTE: if a device can generate interrupts on more than
64  * one level, or if a driver services devices that interrupt
65  * on more than one level, then the driver should install
66  * itself on each of those levels.
67  */
68 static char badsoft[] =
69 	"add_avintr: bad soft interrupt level %d for driver '%s'\n";
70 static char multilevel[] =
71 	"!IRQ%d is being shared by drivers with different interrupt levels.\n"
72 	"This may result in reduced system performance.";
73 static char multilevel2[] =
74 	"Cannot register interrupt for '%s' device at IPL %d because it\n"
75 	"conflicts with another device using the same vector %d with an IPL\n"
76 	"of %d. Reconfigure the conflicting devices to use different vectors.";
77 
78 #define	MAX_VECT	256
79 struct autovec *nmivect = NULL;
80 struct av_head autovect[MAX_VECT];
81 struct av_head softvect[LOCK_LEVEL + 1];
82 kmutex_t av_lock;
83 ddi_softint_hdl_impl_t softlevel1_hdl =
84 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL};
85 
86 
87 /*
88  * clear/check softint pending flag corresponding for
89  * the current CPU
90  */
91 void
92 av_clear_softint_pending(av_softinfo_t *infop)
93 {
94 	CPUSET_ATOMIC_DEL(infop->av_pending, CPU->cpu_seqid);
95 }
96 
97 boolean_t
98 av_check_softint_pending(av_softinfo_t *infop, boolean_t check_all)
99 {
100 	if (check_all)
101 		return (!CPUSET_ISNULL(infop->av_pending));
102 	else
103 		return (CPU_IN_SET(infop->av_pending, CPU->cpu_seqid) != 0);
104 }
105 
106 /*
107  * This is the wrapper function which is generally used to set a softint
108  * pending
109  */
110 void
111 av_set_softint_pending(int pri, av_softinfo_t *infop)
112 {
113 	kdi_av_set_softint_pending(pri, infop);
114 }
115 
116 /*
117  * This is kmdb's private entry point to setsoftint called from kdi_siron
118  * It first sets our av softint pending bit for the current CPU,
119  * then it sets the CPU softint pending bit for pri.
120  */
121 void
122 kdi_av_set_softint_pending(int pri, av_softinfo_t *infop)
123 {
124 	CPUSET_ATOMIC_ADD(infop->av_pending, CPU->cpu_seqid);
125 
126 	atomic_or_32((uint32_t *)&CPU->cpu_softinfo.st_pending, 1 << pri);
127 }
128 
129 /*
130  * register nmi interrupt routine. The first arg is used only to order
131  * various nmi interrupt service routines in the chain. Higher lvls will
132  * be called first
133  */
134 int
135 add_nmintr(int lvl, avfunc nmintr, char *name, caddr_t arg)
136 {
137 	struct autovec  *mem;
138 	struct autovec *p, *prev = NULL;
139 
140 	if (nmintr == NULL) {
141 		printf("Attempt to add null vect for %s on nmi\n", name);
142 		return (0);
143 
144 	}
145 
146 	mem = kmem_zalloc(sizeof (struct autovec), KM_SLEEP);
147 	mem->av_vector = nmintr;
148 	mem->av_intarg1 = arg;
149 	mem->av_intarg2 = NULL;
150 	mem->av_intr_id = NULL;
151 	mem->av_prilevel = lvl;
152 	mem->av_dip = NULL;
153 	mem->av_link = NULL;
154 
155 	mutex_enter(&av_lock);
156 
157 	if (!nmivect) {
158 		nmivect = mem;
159 		mutex_exit(&av_lock);
160 		return (1);
161 	}
162 	/* find where it goes in list */
163 	for (p = nmivect; p != NULL; p = p->av_link) {
164 		if (p->av_vector == nmintr && p->av_intarg1 == arg) {
165 			/*
166 			 * already in list
167 			 * So? Somebody added the same interrupt twice.
168 			 */
169 			cmn_err(CE_WARN, "Driver already registered '%s'",
170 			    name);
171 			kmem_free(mem, sizeof (struct autovec));
172 			mutex_exit(&av_lock);
173 			return (0);
174 		}
175 		if (p->av_prilevel < lvl) {
176 			if (p == nmivect) {   /* it's at head of list */
177 				mem->av_link = p;
178 				nmivect = mem;
179 			} else {
180 				mem->av_link = p;
181 				prev->av_link = mem;
182 			}
183 			mutex_exit(&av_lock);
184 			return (1);
185 		}
186 		prev = p;
187 
188 	}
189 	/* didn't find it, add it to the end */
190 	prev->av_link = mem;
191 	mutex_exit(&av_lock);
192 	return (1);
193 
194 }
195 
196 /*
197  * register a hardware interrupt handler.
198  */
199 int
200 add_avintr(void *intr_id, int lvl, avfunc xxintr, char *name, int vect,
201     caddr_t arg1, caddr_t arg2, uint64_t *ticksp, dev_info_t *dip)
202 {
203 	struct av_head *vecp = (struct av_head *)0;
204 	avfunc f;
205 	int s, vectindex;			/* save old spl value */
206 	ushort_t hi_pri;
207 
208 	if ((f = xxintr) == NULL) {
209 		printf("Attempt to add null vect for %s on vector %d\n",
210 		    name, vect);
211 		return (0);
212 
213 	}
214 	vectindex = vect % MAX_VECT;
215 
216 	vecp = &autovect[vectindex];
217 
218 	/*
219 	 * "hi_pri == 0" implies all entries on list are "unused",
220 	 * which means that it's OK to just insert this one.
221 	 */
222 	hi_pri = vecp->avh_hi_pri;
223 	if (vecp->avh_link && (hi_pri != 0)) {
224 		if (((hi_pri > LOCK_LEVEL) && (lvl < LOCK_LEVEL)) ||
225 		    ((hi_pri < LOCK_LEVEL) && (lvl > LOCK_LEVEL))) {
226 			cmn_err(CE_WARN, multilevel2, name, lvl, vect,
227 			    hi_pri);
228 			return (0);
229 		}
230 		if ((vecp->avh_lo_pri != lvl) || (hi_pri != lvl))
231 			cmn_err(CE_NOTE, multilevel, vect);
232 	}
233 
234 	insert_av(intr_id, vecp, f, arg1, arg2, ticksp, lvl, dip);
235 	s = splhi();
236 	/*
237 	 * do what ever machine specific things are necessary
238 	 * to set priority level (e.g. set picmasks)
239 	 */
240 	mutex_enter(&av_lock);
241 	(*addspl)(vect, lvl, vecp->avh_lo_pri, vecp->avh_hi_pri);
242 	mutex_exit(&av_lock);
243 	splx(s);
244 	return (1);
245 
246 }
247 
248 void
249 update_avsoftintr_args(void *intr_id, int lvl, caddr_t arg2)
250 {
251 	struct autovec *p;
252 	struct autovec *target = NULL;
253 	struct av_head *vectp = (struct av_head *)&softvect[lvl];
254 
255 	for (p = vectp->avh_link; p && p->av_vector; p = p->av_link) {
256 		if (p->av_intr_id == intr_id) {
257 			target = p;
258 			break;
259 		}
260 	}
261 
262 	if (target == NULL)
263 		return;
264 	target->av_intarg2 = arg2;
265 }
266 
267 /*
268  * Register a software interrupt handler
269  */
270 int
271 add_avsoftintr(void *intr_id, int lvl, avfunc xxintr, char *name,
272     caddr_t arg1, caddr_t arg2)
273 {
274 	int slvl;
275 	ddi_softint_hdl_impl_t	*hdlp = (ddi_softint_hdl_impl_t *)intr_id;
276 
277 	if ((slvl = slvltovect(lvl)) != -1)
278 		return (add_avintr(intr_id, lvl, xxintr,
279 		    name, slvl, arg1, arg2, NULL, NULL));
280 
281 	if (intr_id == NULL) {
282 		printf("Attempt to add null intr_id for %s on level %d\n",
283 		    name, lvl);
284 		return (0);
285 	}
286 
287 	if (xxintr == NULL) {
288 		printf("Attempt to add null handler for %s on level %d\n",
289 		    name, lvl);
290 		return (0);
291 	}
292 
293 	if (lvl <= 0 || lvl > LOCK_LEVEL) {
294 		printf(badsoft, lvl, name);
295 		return (0);
296 	}
297 
298 	if (hdlp->ih_pending == NULL) {
299 		hdlp->ih_pending =
300 		    kmem_zalloc(sizeof (av_softinfo_t), KM_SLEEP);
301 	}
302 
303 	insert_av(intr_id, &softvect[lvl], xxintr, arg1, arg2, NULL, lvl, NULL);
304 
305 	return (1);
306 }
307 
308 /* insert an interrupt vector into chain */
309 static void
310 insert_av(void *intr_id, struct av_head *vectp, avfunc f, caddr_t arg1,
311     caddr_t arg2, uint64_t *ticksp, int pri_level, dev_info_t *dip)
312 {
313 	/*
314 	 * Protect rewrites of the list
315 	 */
316 	struct autovec *p, *mem;
317 
318 	mem = kmem_zalloc(sizeof (struct autovec), KM_SLEEP);
319 	mem->av_vector = f;
320 	mem->av_intarg1 = arg1;
321 	mem->av_intarg2 = arg2;
322 	mem->av_ticksp = ticksp;
323 	mem->av_intr_id = intr_id;
324 	mem->av_prilevel = pri_level;
325 	mem->av_dip = dip;
326 	mem->av_link = NULL;
327 
328 	mutex_enter(&av_lock);
329 
330 	if (vectp->avh_link == NULL) {	/* Nothing on list - put it at head */
331 		vectp->avh_link = mem;
332 		vectp->avh_hi_pri = vectp->avh_lo_pri = (ushort_t)pri_level;
333 
334 		mutex_exit(&av_lock);
335 		return;
336 	}
337 
338 	/* find where it goes in list */
339 	for (p = vectp->avh_link; p != NULL; p = p->av_link) {
340 		if (p->av_vector == NULL) {	/* freed struct available */
341 			kmem_free(mem, sizeof (struct autovec));
342 			p->av_intarg1 = arg1;
343 			p->av_intarg2 = arg2;
344 			p->av_ticksp = ticksp;
345 			p->av_intr_id = intr_id;
346 			p->av_prilevel = pri_level;
347 			p->av_dip = dip;
348 			if (pri_level > (int)vectp->avh_hi_pri) {
349 				vectp->avh_hi_pri = (ushort_t)pri_level;
350 			}
351 			if (pri_level < (int)vectp->avh_lo_pri) {
352 				vectp->avh_lo_pri = (ushort_t)pri_level;
353 			}
354 			p->av_vector = f;
355 			mutex_exit(&av_lock);
356 			return;
357 		}
358 	}
359 	/* insert new intpt at beginning of chain */
360 	mem->av_link = vectp->avh_link;
361 	vectp->avh_link = mem;
362 	if (pri_level > (int)vectp->avh_hi_pri) {
363 		vectp->avh_hi_pri = (ushort_t)pri_level;
364 	}
365 	if (pri_level < (int)vectp->avh_lo_pri) {
366 		vectp->avh_lo_pri = (ushort_t)pri_level;
367 	}
368 	mutex_exit(&av_lock);
369 }
370 
371 static int
372 av_rem_softintr(void *intr_id, int lvl, avfunc xxintr, boolean_t rem_softinfo)
373 {
374 	struct av_head *vecp = (struct av_head *)0;
375 	int slvl;
376 	ddi_softint_hdl_impl_t	*hdlp = (ddi_softint_hdl_impl_t *)intr_id;
377 	av_softinfo_t *infop = (av_softinfo_t *)hdlp->ih_pending;
378 
379 	if (xxintr == NULL)
380 		return (0);
381 
382 	if ((slvl = slvltovect(lvl)) != -1) {
383 		rem_avintr(intr_id, lvl, xxintr, slvl);
384 		return (1);
385 	}
386 
387 	if (lvl <= 0 && lvl >= LOCK_LEVEL) {
388 		return (0);
389 	}
390 	vecp = &softvect[lvl];
391 	remove_av(intr_id, vecp, xxintr, lvl, 0);
392 
393 	if (rem_softinfo) {
394 		kmem_free(infop, sizeof (av_softinfo_t));
395 		hdlp->ih_pending = NULL;
396 	}
397 
398 	return (1);
399 }
400 
401 int
402 av_softint_movepri(void *intr_id, int old_lvl)
403 {
404 	int ret;
405 	ddi_softint_hdl_impl_t	*hdlp = (ddi_softint_hdl_impl_t *)intr_id;
406 
407 	ret = add_avsoftintr(intr_id, hdlp->ih_pri, hdlp->ih_cb_func,
408 	    DEVI(hdlp->ih_dip)->devi_name, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2);
409 
410 	if (ret) {
411 		(void) av_rem_softintr(intr_id, old_lvl, hdlp->ih_cb_func,
412 		    B_FALSE);
413 	}
414 
415 	return (ret);
416 }
417 
418 /*
419  * Remove a driver from the autovector list.
420  */
421 int
422 rem_avsoftintr(void *intr_id, int lvl, avfunc xxintr)
423 {
424 	return (av_rem_softintr(intr_id, lvl, xxintr, B_TRUE));
425 }
426 
427 void
428 rem_avintr(void *intr_id, int lvl, avfunc xxintr, int vect)
429 {
430 	struct av_head *vecp = (struct av_head *)0;
431 	avfunc f;
432 	int s, vectindex;			/* save old spl value */
433 
434 	if ((f = xxintr) == NULL)
435 		return;
436 
437 	vectindex = vect % MAX_VECT;
438 	vecp = &autovect[vectindex];
439 	remove_av(intr_id, vecp, f, lvl, vect);
440 	s = splhi();
441 	mutex_enter(&av_lock);
442 	(*delspl)(vect, lvl, vecp->avh_lo_pri, vecp->avh_hi_pri);
443 	mutex_exit(&av_lock);
444 	splx(s);
445 }
446 
447 
448 /*
449  * After having made a change to an autovector list, wait until we have
450  * seen each cpu not executing an interrupt at that level--so we know our
451  * change has taken effect completely (no old state in registers, etc).
452  */
453 void
454 wait_till_seen(int ipl)
455 {
456 	int cpu_in_chain, cix;
457 	struct cpu *cpup;
458 	cpuset_t cpus_to_check;
459 
460 	CPUSET_ALL(cpus_to_check);
461 	do {
462 		cpu_in_chain = 0;
463 		for (cix = 0; cix < NCPU; cix++) {
464 			cpup = cpu[cix];
465 			if (cpup != NULL && CPU_IN_SET(cpus_to_check, cix)) {
466 				if (intr_active(cpup, ipl)) {
467 					cpu_in_chain = 1;
468 				} else {
469 					CPUSET_DEL(cpus_to_check, cix);
470 				}
471 			}
472 		}
473 	} while (cpu_in_chain);
474 }
475 
476 /* remove an interrupt vector from the chain */
477 static void
478 remove_av(void *intr_id, struct av_head *vectp, avfunc f, int pri_level,
479 	int vect)
480 {
481 	struct autovec *endp, *p, *target;
482 	int	lo_pri, hi_pri;
483 	int	ipl;
484 	/*
485 	 * Protect rewrites of the list
486 	 */
487 	target = NULL;
488 
489 	mutex_enter(&av_lock);
490 	ipl = pri_level;
491 	lo_pri = MAXIPL;
492 	hi_pri = 0;
493 	for (endp = p = vectp->avh_link; p && p->av_vector; p = p->av_link) {
494 		endp = p;
495 		if ((p->av_vector == f) && (p->av_intr_id == intr_id)) {
496 			/* found the handler */
497 			target = p;
498 			continue;
499 		}
500 		if (p->av_prilevel > hi_pri)
501 			hi_pri = p->av_prilevel;
502 		if (p->av_prilevel < lo_pri)
503 			lo_pri = p->av_prilevel;
504 	}
505 	if (ipl < hi_pri)
506 		ipl = hi_pri;
507 	if (target == NULL) {	/* not found */
508 		printf("Couldn't remove function %p at %d, %d\n",
509 		    (void *)f, vect, pri_level);
510 		mutex_exit(&av_lock);
511 		return;
512 	}
513 
514 	target->av_vector = NULL;
515 	target->av_ticksp = NULL;
516 	wait_till_seen(ipl);
517 	if (endp != target) {	/* vector to be removed is not last in chain */
518 		target->av_vector = endp->av_vector;
519 		target->av_intarg1 = endp->av_intarg1;
520 		target->av_intarg2 = endp->av_intarg2;
521 		target->av_ticksp = endp->av_ticksp;
522 		target->av_intr_id = endp->av_intr_id;
523 		target->av_prilevel = endp->av_prilevel;
524 		target->av_dip = endp->av_dip;
525 		/*
526 		 * We have a hole here where the routine corresponding to
527 		 * endp may not get called. Do a wait_till_seen to take care
528 		 * of this.
529 		 */
530 		wait_till_seen(ipl);
531 		endp->av_vector = NULL;
532 		endp->av_ticksp = NULL;
533 	}
534 
535 	if (lo_pri > hi_pri) {	/* the chain is now empty */
536 		/* Leave the unused entries here for probable future use */
537 		vectp->avh_lo_pri = MAXIPL;
538 		vectp->avh_hi_pri = 0;
539 	} else {
540 		if ((int)vectp->avh_lo_pri < lo_pri)
541 			vectp->avh_lo_pri = (ushort_t)lo_pri;
542 		if ((int)vectp->avh_hi_pri > hi_pri)
543 			vectp->avh_hi_pri = (ushort_t)hi_pri;
544 	}
545 	mutex_exit(&av_lock);
546 	wait_till_seen(ipl);
547 }
548 
549 /*
550  * kmdb uses siron (and thus setsoftint) while the world is stopped in order to
551  * inform its driver component that there's work to be done.  We need to keep
552  * DTrace from instrumenting kmdb's siron and setsoftint.  We duplicate siron,
553  * giving kmdb's version a kdi prefix to keep DTrace at bay.   We also
554  * provide a version of the various setsoftint functions available for kmdb to
555  * use using a kdi_ prefix while the main *setsoftint() functionality is
556  * implemented as a wrapper.  This allows tracing, while still providing a
557  * way for kmdb to sneak in unmolested.
558  */
559 void
560 kdi_siron(void)
561 {
562 	(*kdisetsoftint)(1, softlevel1_hdl.ih_pending);
563 }
564 
565 /*
566  * Trigger a soft interrupt.
567  */
568 void
569 siron(void)
570 {
571 	(*setsoftint)(1, softlevel1_hdl.ih_pending);
572 }
573 
574 /*
575  * The handler which is executed on the target CPU.
576  */
577 /*ARGSUSED*/
578 static int
579 siron_poke_intr(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
580 {
581 	siron();
582 	return (0);
583 }
584 
585 /*
586  * May get called from softcall to poke CPUs.
587  */
588 void
589 siron_poke_cpu(cpuset_t poke)
590 {
591 	int cpuid = CPU->cpu_id;
592 
593 	/*
594 	 * If we are poking to ourself then we can simply
595 	 * generate level1 using siron()
596 	 */
597 	if (CPU_IN_SET(poke, cpuid)) {
598 		siron();
599 		CPUSET_DEL(poke, cpuid);
600 		if (CPUSET_ISNULL(poke))
601 			return;
602 	}
603 
604 	xc_call(0, 0, 0, X_CALL_MEDPRI, poke, (xc_func_t)siron_poke_intr);
605 }
606 
607 /*
608  * Walk the autovector table for this vector, invoking each
609  * interrupt handler as we go.
610  */
611 
612 extern uint64_t intr_get_time(void);
613 
614 void
615 av_dispatch_autovect(uint_t vec)
616 {
617 	struct autovec *av;
618 
619 	ASSERT_STACK_ALIGNED();
620 
621 	while ((av = autovect[vec].avh_link) != NULL) {
622 		uint_t numcalled = 0;
623 		uint_t claimed = 0;
624 
625 		for (; av; av = av->av_link) {
626 			uint_t r;
627 			uint_t (*intr)() = av->av_vector;
628 			caddr_t arg1 = av->av_intarg1;
629 			caddr_t arg2 = av->av_intarg2;
630 			dev_info_t *dip = av->av_dip;
631 
632 			if (intr == NULL)
633 				break;
634 
635 			DTRACE_PROBE4(interrupt__start, dev_info_t *, dip,
636 			    void *, intr, caddr_t, arg1, caddr_t, arg2);
637 			r = (*intr)(arg1, arg2);
638 			DTRACE_PROBE4(interrupt__complete, dev_info_t *, dip,
639 			    void *, intr, caddr_t, arg1, uint_t, r);
640 			numcalled++;
641 			claimed |= r;
642 			if (av->av_ticksp && av->av_prilevel <= LOCK_LEVEL)
643 				atomic_add_64(av->av_ticksp, intr_get_time());
644 		}
645 
646 		/*
647 		 * If there's only one interrupt handler in the chain,
648 		 * or if no-one claimed the interrupt at all give up now.
649 		 */
650 		if (numcalled == 1 || claimed == 0)
651 			break;
652 	}
653 }
654 
655 /*
656  * Call every soft interrupt handler we can find at this level once.
657  */
658 void
659 av_dispatch_softvect(uint_t pil)
660 {
661 	struct autovec *av;
662 	ddi_softint_hdl_impl_t	*hdlp;
663 	uint_t (*intr)();
664 	caddr_t arg1;
665 	caddr_t arg2;
666 
667 	ASSERT_STACK_ALIGNED();
668 	ASSERT(pil >= 0 && pil <= PIL_MAX);
669 
670 	for (av = softvect[pil].avh_link; av; av = av->av_link) {
671 		if ((intr = av->av_vector) == NULL)
672 			break;
673 		arg1 = av->av_intarg1;
674 		arg2 = av->av_intarg2;
675 
676 		hdlp = (ddi_softint_hdl_impl_t *)av->av_intr_id;
677 		ASSERT(hdlp);
678 
679 		/*
680 		 * Each cpu has its own pending bit in hdlp->ih_pending,
681 		 * here av_check/clear_softint_pending is just checking
682 		 * and clearing the pending bit for the current cpu, who
683 		 * has just triggered a softint.
684 		 */
685 		if (av_check_softint_pending(hdlp->ih_pending, B_FALSE)) {
686 			av_clear_softint_pending(hdlp->ih_pending);
687 			(void) (*intr)(arg1, arg2);
688 		}
689 	}
690 }
691 
692 struct regs;
693 
694 /*
695  * Call every NMI handler we know of once.
696  */
697 void
698 av_dispatch_nmivect(struct regs *rp)
699 {
700 	struct autovec *av;
701 
702 	ASSERT_STACK_ALIGNED();
703 
704 	for (av = nmivect; av; av = av->av_link)
705 		(void) (av->av_vector)(av->av_intarg1, rp);
706 }
707