xref: /titanic_41/usr/src/uts/common/io/avintr.c (revision 1fceb383a3f0b59711832b9dc4e8329d7f216604)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Autovectored Interrupt Configuration and Deconfiguration
30  */
31 
32 #include <sys/param.h>
33 #include <sys/cmn_err.h>
34 #include <sys/trap.h>
35 #include <sys/t_lock.h>
36 #include <sys/avintr.h>
37 #include <sys/kmem.h>
38 #include <sys/machlock.h>
39 #include <sys/systm.h>
40 #include <sys/machsystm.h>
41 #include <sys/sunddi.h>
42 #include <sys/x_call.h>
43 #include <sys/cpuvar.h>
44 #include <sys/atomic.h>
45 #include <sys/smp_impldefs.h>
46 #include <sys/sdt.h>
47 #include <sys/stack.h>
48 #include <sys/ddi_impldefs.h>
49 #ifdef __xpv
50 #include <sys/evtchn_impl.h>
51 #endif
52 
53 typedef struct av_softinfo {
54 	cpuset_t	av_pending;	/* pending bitmasks */
55 } av_softinfo_t;
56 
57 static void insert_av(void *intr_id, struct av_head *vectp, avfunc f,
58 	caddr_t arg1, caddr_t arg2, uint64_t *ticksp, int pri_level,
59 	dev_info_t *dip);
60 static void remove_av(void *intr_id, struct av_head *vectp, avfunc f,
61 	int pri_level, int vect);
62 
63 /*
64  * Arrange for a driver to be called when a particular
65  * auto-vectored interrupt occurs.
66  * NOTE: if a device can generate interrupts on more than
67  * one level, or if a driver services devices that interrupt
68  * on more than one level, then the driver should install
69  * itself on each of those levels.
70  */
71 static char badsoft[] =
72 	"add_avintr: bad soft interrupt level %d for driver '%s'\n";
73 static char multilevel[] =
74 	"!IRQ%d is being shared by drivers with different interrupt levels.\n"
75 	"This may result in reduced system performance.";
76 static char multilevel2[] =
77 	"Cannot register interrupt for '%s' device at IPL %d because it\n"
78 	"conflicts with another device using the same vector %d with an IPL\n"
79 	"of %d. Reconfigure the conflicting devices to use different vectors.";
80 
81 #ifdef __xpv
82 #define	MAX_VECT	NR_IRQS
83 #else
84 #define	MAX_VECT	256
85 #endif
86 
87 struct autovec *nmivect = NULL;
88 struct av_head autovect[MAX_VECT];
89 struct av_head softvect[LOCK_LEVEL + 1];
90 kmutex_t av_lock;
91 ddi_softint_hdl_impl_t softlevel1_hdl =
92 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL};
93 
94 
95 /*
96  * clear/check softint pending flag corresponding for
97  * the current CPU
98  */
99 void
100 av_clear_softint_pending(av_softinfo_t *infop)
101 {
102 	CPUSET_ATOMIC_DEL(infop->av_pending, CPU->cpu_seqid);
103 }
104 
105 boolean_t
106 av_check_softint_pending(av_softinfo_t *infop, boolean_t check_all)
107 {
108 	if (check_all)
109 		return (!CPUSET_ISNULL(infop->av_pending));
110 	else
111 		return (CPU_IN_SET(infop->av_pending, CPU->cpu_seqid) != 0);
112 }
113 
114 /*
115  * This is the wrapper function which is generally used to set a softint
116  * pending
117  */
118 void
119 av_set_softint_pending(int pri, av_softinfo_t *infop)
120 {
121 	kdi_av_set_softint_pending(pri, infop);
122 }
123 
124 /*
125  * This is kmdb's private entry point to setsoftint called from kdi_siron
126  * It first sets our av softint pending bit for the current CPU,
127  * then it sets the CPU softint pending bit for pri.
128  */
129 void
130 kdi_av_set_softint_pending(int pri, av_softinfo_t *infop)
131 {
132 	CPUSET_ATOMIC_ADD(infop->av_pending, CPU->cpu_seqid);
133 
134 	atomic_or_32((uint32_t *)&CPU->cpu_softinfo.st_pending, 1 << pri);
135 }
136 
137 /*
138  * register nmi interrupt routine. The first arg is used only to order
139  * various nmi interrupt service routines in the chain. Higher lvls will
140  * be called first
141  */
142 int
143 add_nmintr(int lvl, avfunc nmintr, char *name, caddr_t arg)
144 {
145 	struct autovec  *mem;
146 	struct autovec *p, *prev = NULL;
147 
148 	if (nmintr == NULL) {
149 		printf("Attempt to add null vect for %s on nmi\n", name);
150 		return (0);
151 
152 	}
153 
154 	mem = kmem_zalloc(sizeof (struct autovec), KM_SLEEP);
155 	mem->av_vector = nmintr;
156 	mem->av_intarg1 = arg;
157 	mem->av_intarg2 = NULL;
158 	mem->av_intr_id = NULL;
159 	mem->av_prilevel = lvl;
160 	mem->av_dip = NULL;
161 	mem->av_link = NULL;
162 
163 	mutex_enter(&av_lock);
164 
165 	if (!nmivect) {
166 		nmivect = mem;
167 		mutex_exit(&av_lock);
168 		return (1);
169 	}
170 	/* find where it goes in list */
171 	for (p = nmivect; p != NULL; p = p->av_link) {
172 		if (p->av_vector == nmintr && p->av_intarg1 == arg) {
173 			/*
174 			 * already in list
175 			 * So? Somebody added the same interrupt twice.
176 			 */
177 			cmn_err(CE_WARN, "Driver already registered '%s'",
178 			    name);
179 			kmem_free(mem, sizeof (struct autovec));
180 			mutex_exit(&av_lock);
181 			return (0);
182 		}
183 		if (p->av_prilevel < lvl) {
184 			if (p == nmivect) {   /* it's at head of list */
185 				mem->av_link = p;
186 				nmivect = mem;
187 			} else {
188 				mem->av_link = p;
189 				prev->av_link = mem;
190 			}
191 			mutex_exit(&av_lock);
192 			return (1);
193 		}
194 		prev = p;
195 
196 	}
197 	/* didn't find it, add it to the end */
198 	prev->av_link = mem;
199 	mutex_exit(&av_lock);
200 	return (1);
201 
202 }
203 
204 /*
205  * register a hardware interrupt handler.
206  */
207 int
208 add_avintr(void *intr_id, int lvl, avfunc xxintr, char *name, int vect,
209     caddr_t arg1, caddr_t arg2, uint64_t *ticksp, dev_info_t *dip)
210 {
211 	struct av_head *vecp = (struct av_head *)0;
212 	avfunc f;
213 	int s, vectindex;			/* save old spl value */
214 	ushort_t hi_pri;
215 
216 	if ((f = xxintr) == NULL) {
217 		printf("Attempt to add null vect for %s on vector %d\n",
218 		    name, vect);
219 		return (0);
220 
221 	}
222 	vectindex = vect % MAX_VECT;
223 
224 	vecp = &autovect[vectindex];
225 
226 	/*
227 	 * "hi_pri == 0" implies all entries on list are "unused",
228 	 * which means that it's OK to just insert this one.
229 	 */
230 	hi_pri = vecp->avh_hi_pri;
231 	if (vecp->avh_link && (hi_pri != 0)) {
232 		if (((hi_pri > LOCK_LEVEL) && (lvl < LOCK_LEVEL)) ||
233 		    ((hi_pri < LOCK_LEVEL) && (lvl > LOCK_LEVEL))) {
234 			cmn_err(CE_WARN, multilevel2, name, lvl, vect,
235 			    hi_pri);
236 			return (0);
237 		}
238 		if ((vecp->avh_lo_pri != lvl) || (hi_pri != lvl))
239 			cmn_err(CE_NOTE, multilevel, vect);
240 	}
241 
242 	insert_av(intr_id, vecp, f, arg1, arg2, ticksp, lvl, dip);
243 	s = splhi();
244 	/*
245 	 * do what ever machine specific things are necessary
246 	 * to set priority level (e.g. set picmasks)
247 	 */
248 	mutex_enter(&av_lock);
249 	(*addspl)(vect, lvl, vecp->avh_lo_pri, vecp->avh_hi_pri);
250 	mutex_exit(&av_lock);
251 	splx(s);
252 	return (1);
253 
254 }
255 
256 void
257 update_avsoftintr_args(void *intr_id, int lvl, caddr_t arg2)
258 {
259 	struct autovec *p;
260 	struct autovec *target = NULL;
261 	struct av_head *vectp = (struct av_head *)&softvect[lvl];
262 
263 	for (p = vectp->avh_link; p && p->av_vector; p = p->av_link) {
264 		if (p->av_intr_id == intr_id) {
265 			target = p;
266 			break;
267 		}
268 	}
269 
270 	if (target == NULL)
271 		return;
272 	target->av_intarg2 = arg2;
273 }
274 
275 /*
276  * Register a software interrupt handler
277  */
278 int
279 add_avsoftintr(void *intr_id, int lvl, avfunc xxintr, char *name,
280     caddr_t arg1, caddr_t arg2)
281 {
282 	int slvl;
283 	ddi_softint_hdl_impl_t	*hdlp = (ddi_softint_hdl_impl_t *)intr_id;
284 
285 	if ((slvl = slvltovect(lvl)) != -1)
286 		return (add_avintr(intr_id, lvl, xxintr,
287 		    name, slvl, arg1, arg2, NULL, NULL));
288 
289 	if (intr_id == NULL) {
290 		printf("Attempt to add null intr_id for %s on level %d\n",
291 		    name, lvl);
292 		return (0);
293 	}
294 
295 	if (xxintr == NULL) {
296 		printf("Attempt to add null handler for %s on level %d\n",
297 		    name, lvl);
298 		return (0);
299 	}
300 
301 	if (lvl <= 0 || lvl > LOCK_LEVEL) {
302 		printf(badsoft, lvl, name);
303 		return (0);
304 	}
305 
306 	if (hdlp->ih_pending == NULL) {
307 		hdlp->ih_pending =
308 		    kmem_zalloc(sizeof (av_softinfo_t), KM_SLEEP);
309 	}
310 
311 	insert_av(intr_id, &softvect[lvl], xxintr, arg1, arg2, NULL, lvl, NULL);
312 
313 	return (1);
314 }
315 
316 /* insert an interrupt vector into chain */
317 static void
318 insert_av(void *intr_id, struct av_head *vectp, avfunc f, caddr_t arg1,
319     caddr_t arg2, uint64_t *ticksp, int pri_level, dev_info_t *dip)
320 {
321 	/*
322 	 * Protect rewrites of the list
323 	 */
324 	struct autovec *p, *mem;
325 
326 	mem = kmem_zalloc(sizeof (struct autovec), KM_SLEEP);
327 	mem->av_vector = f;
328 	mem->av_intarg1 = arg1;
329 	mem->av_intarg2 = arg2;
330 	mem->av_ticksp = ticksp;
331 	mem->av_intr_id = intr_id;
332 	mem->av_prilevel = pri_level;
333 	mem->av_dip = dip;
334 	mem->av_link = NULL;
335 
336 	mutex_enter(&av_lock);
337 
338 	if (vectp->avh_link == NULL) {	/* Nothing on list - put it at head */
339 		vectp->avh_link = mem;
340 		vectp->avh_hi_pri = vectp->avh_lo_pri = (ushort_t)pri_level;
341 
342 		mutex_exit(&av_lock);
343 		return;
344 	}
345 
346 	/* find where it goes in list */
347 	for (p = vectp->avh_link; p != NULL; p = p->av_link) {
348 		if (p->av_vector == NULL) {	/* freed struct available */
349 			p->av_intarg1 = arg1;
350 			p->av_intarg2 = arg2;
351 			p->av_ticksp = ticksp;
352 			p->av_intr_id = intr_id;
353 			p->av_prilevel = pri_level;
354 			p->av_dip = dip;
355 			if (pri_level > (int)vectp->avh_hi_pri) {
356 				vectp->avh_hi_pri = (ushort_t)pri_level;
357 			}
358 			if (pri_level < (int)vectp->avh_lo_pri) {
359 				vectp->avh_lo_pri = (ushort_t)pri_level;
360 			}
361 			/*
362 			 * To prevent calling service routine before args
363 			 * and ticksp are ready fill in vector last.
364 			 */
365 			p->av_vector = f;
366 			mutex_exit(&av_lock);
367 			kmem_free(mem, sizeof (struct autovec));
368 			return;
369 		}
370 	}
371 	/* insert new intpt at beginning of chain */
372 	mem->av_link = vectp->avh_link;
373 	vectp->avh_link = mem;
374 	if (pri_level > (int)vectp->avh_hi_pri) {
375 		vectp->avh_hi_pri = (ushort_t)pri_level;
376 	}
377 	if (pri_level < (int)vectp->avh_lo_pri) {
378 		vectp->avh_lo_pri = (ushort_t)pri_level;
379 	}
380 	mutex_exit(&av_lock);
381 }
382 
383 static int
384 av_rem_softintr(void *intr_id, int lvl, avfunc xxintr, boolean_t rem_softinfo)
385 {
386 	struct av_head *vecp = (struct av_head *)0;
387 	int slvl;
388 	ddi_softint_hdl_impl_t	*hdlp = (ddi_softint_hdl_impl_t *)intr_id;
389 	av_softinfo_t *infop = (av_softinfo_t *)hdlp->ih_pending;
390 
391 	if (xxintr == NULL)
392 		return (0);
393 
394 	if ((slvl = slvltovect(lvl)) != -1) {
395 		rem_avintr(intr_id, lvl, xxintr, slvl);
396 		return (1);
397 	}
398 
399 	if (lvl <= 0 && lvl >= LOCK_LEVEL) {
400 		return (0);
401 	}
402 	vecp = &softvect[lvl];
403 	remove_av(intr_id, vecp, xxintr, lvl, 0);
404 
405 	if (rem_softinfo) {
406 		kmem_free(infop, sizeof (av_softinfo_t));
407 		hdlp->ih_pending = NULL;
408 	}
409 
410 	return (1);
411 }
412 
413 int
414 av_softint_movepri(void *intr_id, int old_lvl)
415 {
416 	int ret;
417 	ddi_softint_hdl_impl_t	*hdlp = (ddi_softint_hdl_impl_t *)intr_id;
418 
419 	ret = add_avsoftintr(intr_id, hdlp->ih_pri, hdlp->ih_cb_func,
420 	    DEVI(hdlp->ih_dip)->devi_name, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2);
421 
422 	if (ret) {
423 		(void) av_rem_softintr(intr_id, old_lvl, hdlp->ih_cb_func,
424 		    B_FALSE);
425 	}
426 
427 	return (ret);
428 }
429 
430 /*
431  * Remove a driver from the autovector list.
432  */
433 int
434 rem_avsoftintr(void *intr_id, int lvl, avfunc xxintr)
435 {
436 	return (av_rem_softintr(intr_id, lvl, xxintr, B_TRUE));
437 }
438 
439 void
440 rem_avintr(void *intr_id, int lvl, avfunc xxintr, int vect)
441 {
442 	struct av_head *vecp = (struct av_head *)0;
443 	avfunc f;
444 	int s, vectindex;			/* save old spl value */
445 
446 	if ((f = xxintr) == NULL)
447 		return;
448 
449 	vectindex = vect % MAX_VECT;
450 	vecp = &autovect[vectindex];
451 	remove_av(intr_id, vecp, f, lvl, vect);
452 	s = splhi();
453 	mutex_enter(&av_lock);
454 	(*delspl)(vect, lvl, vecp->avh_lo_pri, vecp->avh_hi_pri);
455 	mutex_exit(&av_lock);
456 	splx(s);
457 }
458 
459 
460 /*
461  * After having made a change to an autovector list, wait until we have
462  * seen each cpu not executing an interrupt at that level--so we know our
463  * change has taken effect completely (no old state in registers, etc).
464  */
465 void
466 wait_till_seen(int ipl)
467 {
468 	int cpu_in_chain, cix;
469 	struct cpu *cpup;
470 	cpuset_t cpus_to_check;
471 
472 	CPUSET_ALL(cpus_to_check);
473 	do {
474 		cpu_in_chain = 0;
475 		for (cix = 0; cix < NCPU; cix++) {
476 			cpup = cpu[cix];
477 			if (cpup != NULL && CPU_IN_SET(cpus_to_check, cix)) {
478 				if (intr_active(cpup, ipl)) {
479 					cpu_in_chain = 1;
480 				} else {
481 					CPUSET_DEL(cpus_to_check, cix);
482 				}
483 			}
484 		}
485 	} while (cpu_in_chain);
486 }
487 
488 static uint64_t dummy_tick;
489 
490 /* remove an interrupt vector from the chain */
491 static void
492 remove_av(void *intr_id, struct av_head *vectp, avfunc f, int pri_level,
493 	int vect)
494 {
495 	struct autovec *p, *target;
496 	int	lo_pri, hi_pri;
497 	int	ipl;
498 	/*
499 	 * Protect rewrites of the list
500 	 */
501 	target = NULL;
502 
503 	mutex_enter(&av_lock);
504 	ipl = pri_level;
505 	lo_pri = MAXIPL;
506 	hi_pri = 0;
507 	for (p = vectp->avh_link; p; p = p->av_link) {
508 		if ((p->av_vector == f) && (p->av_intr_id == intr_id)) {
509 			/* found the handler */
510 			target = p;
511 			continue;
512 		}
513 		if (p->av_vector != NULL) {
514 			if (p->av_prilevel > hi_pri)
515 				hi_pri = p->av_prilevel;
516 			if (p->av_prilevel < lo_pri)
517 				lo_pri = p->av_prilevel;
518 		}
519 	}
520 	if (ipl < hi_pri)
521 		ipl = hi_pri;
522 	if (target == NULL) {	/* not found */
523 		printf("Couldn't remove function %p at %d, %d\n",
524 		    (void *)f, vect, pri_level);
525 		mutex_exit(&av_lock);
526 		return;
527 	}
528 
529 	/*
530 	 * This drops the handler from the chain, it can no longer be called.
531 	 * However, there is no guarantee that the handler is not currently
532 	 * still executing.
533 	 */
534 	target->av_vector = NULL;
535 	/*
536 	 * There is a race where we could be just about to pick up the ticksp
537 	 * pointer to increment it after returning from the service routine
538 	 * in av_dispatch_autovect.  Rather than NULL it out let's just point
539 	 * it off to something safe so that any final tick update attempt
540 	 * won't fault.
541 	 */
542 	target->av_ticksp = &dummy_tick;
543 	wait_till_seen(ipl);
544 
545 	if (lo_pri > hi_pri) {	/* the chain is now empty */
546 		/* Leave the unused entries here for probable future use */
547 		vectp->avh_lo_pri = MAXIPL;
548 		vectp->avh_hi_pri = 0;
549 	} else {
550 		if ((int)vectp->avh_lo_pri < lo_pri)
551 			vectp->avh_lo_pri = (ushort_t)lo_pri;
552 		if ((int)vectp->avh_hi_pri > hi_pri)
553 			vectp->avh_hi_pri = (ushort_t)hi_pri;
554 	}
555 	mutex_exit(&av_lock);
556 	wait_till_seen(ipl);
557 }
558 
559 /*
560  * kmdb uses siron (and thus setsoftint) while the world is stopped in order to
561  * inform its driver component that there's work to be done.  We need to keep
562  * DTrace from instrumenting kmdb's siron and setsoftint.  We duplicate siron,
563  * giving kmdb's version a kdi prefix to keep DTrace at bay.   We also
564  * provide a version of the various setsoftint functions available for kmdb to
565  * use using a kdi_ prefix while the main *setsoftint() functionality is
566  * implemented as a wrapper.  This allows tracing, while still providing a
567  * way for kmdb to sneak in unmolested.
568  */
569 void
570 kdi_siron(void)
571 {
572 	(*kdisetsoftint)(1, softlevel1_hdl.ih_pending);
573 }
574 
575 /*
576  * Trigger a soft interrupt.
577  */
578 void
579 siron(void)
580 {
581 	(*setsoftint)(1, softlevel1_hdl.ih_pending);
582 }
583 
584 /*
585  * The handler which is executed on the target CPU.
586  */
587 /*ARGSUSED*/
588 static int
589 siron_poke_intr(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
590 {
591 	siron();
592 	return (0);
593 }
594 
595 /*
596  * May get called from softcall to poke CPUs.
597  */
598 void
599 siron_poke_cpu(cpuset_t poke)
600 {
601 	int cpuid = CPU->cpu_id;
602 
603 	/*
604 	 * If we are poking to ourself then we can simply
605 	 * generate level1 using siron()
606 	 */
607 	if (CPU_IN_SET(poke, cpuid)) {
608 		siron();
609 		CPUSET_DEL(poke, cpuid);
610 		if (CPUSET_ISNULL(poke))
611 			return;
612 	}
613 
614 	xc_call(0, 0, 0, X_CALL_MEDPRI, poke, (xc_func_t)siron_poke_intr);
615 }
616 
617 /*
618  * Walk the autovector table for this vector, invoking each
619  * interrupt handler as we go.
620  */
621 
622 extern uint64_t intr_get_time(void);
623 
624 void
625 av_dispatch_autovect(uint_t vec)
626 {
627 	struct autovec *av;
628 
629 	ASSERT_STACK_ALIGNED();
630 
631 	while ((av = autovect[vec].avh_link) != NULL) {
632 		uint_t numcalled = 0;
633 		uint_t claimed = 0;
634 
635 		for (; av; av = av->av_link) {
636 			uint_t r;
637 			uint_t (*intr)() = av->av_vector;
638 			caddr_t arg1 = av->av_intarg1;
639 			caddr_t arg2 = av->av_intarg2;
640 			dev_info_t *dip = av->av_dip;
641 
642 			/*
643 			 * We must walk the entire chain.  Removed handlers
644 			 * may be anywhere in the chain.
645 			 */
646 			if (intr == NULL)
647 				continue;
648 
649 			DTRACE_PROBE4(interrupt__start, dev_info_t *, dip,
650 			    void *, intr, caddr_t, arg1, caddr_t, arg2);
651 			r = (*intr)(arg1, arg2);
652 			DTRACE_PROBE4(interrupt__complete, dev_info_t *, dip,
653 			    void *, intr, caddr_t, arg1, uint_t, r);
654 			numcalled++;
655 			claimed |= r;
656 			if (av->av_ticksp && av->av_prilevel <= LOCK_LEVEL)
657 				atomic_add_64(av->av_ticksp, intr_get_time());
658 		}
659 
660 		/*
661 		 * If there's only one interrupt handler in the chain,
662 		 * or if no-one claimed the interrupt at all give up now.
663 		 */
664 		if (numcalled == 1 || claimed == 0)
665 			break;
666 	}
667 }
668 
669 /*
670  * Call every soft interrupt handler we can find at this level once.
671  */
672 void
673 av_dispatch_softvect(uint_t pil)
674 {
675 	struct autovec *av;
676 	ddi_softint_hdl_impl_t	*hdlp;
677 	uint_t (*intr)();
678 	caddr_t arg1;
679 	caddr_t arg2;
680 
681 	ASSERT_STACK_ALIGNED();
682 	ASSERT(pil >= 0 && pil <= PIL_MAX);
683 
684 	for (av = softvect[pil].avh_link; av; av = av->av_link) {
685 		/*
686 		 * We must walk the entire chain.  Removed handlers
687 		 * may be anywhere in the chain.
688 		 */
689 		if ((intr = av->av_vector) == NULL)
690 			continue;
691 		arg1 = av->av_intarg1;
692 		arg2 = av->av_intarg2;
693 
694 		hdlp = (ddi_softint_hdl_impl_t *)av->av_intr_id;
695 		ASSERT(hdlp);
696 
697 		/*
698 		 * Each cpu has its own pending bit in hdlp->ih_pending,
699 		 * here av_check/clear_softint_pending is just checking
700 		 * and clearing the pending bit for the current cpu, who
701 		 * has just triggered a softint.
702 		 */
703 		if (av_check_softint_pending(hdlp->ih_pending, B_FALSE)) {
704 			av_clear_softint_pending(hdlp->ih_pending);
705 			(void) (*intr)(arg1, arg2);
706 		}
707 	}
708 }
709 
710 struct regs;
711 
712 /*
713  * Call every NMI handler we know of once.
714  */
715 void
716 av_dispatch_nmivect(struct regs *rp)
717 {
718 	struct autovec *av;
719 
720 	ASSERT_STACK_ALIGNED();
721 
722 	for (av = nmivect; av; av = av->av_link)
723 		(void) (av->av_vector)(av->av_intarg1, rp);
724 }
725