xref: /illumos-gate/usr/src/uts/common/io/avintr.c (revision 3df2e8b2fd61f45437285750d2880d6416a9200c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * Autovectored Interrupt Configuration and Deconfiguration
27  */
28 
29 #include <sys/param.h>
30 #include <sys/cmn_err.h>
31 #include <sys/trap.h>
32 #include <sys/t_lock.h>
33 #include <sys/avintr.h>
34 #include <sys/kmem.h>
35 #include <sys/machlock.h>
36 #include <sys/systm.h>
37 #include <sys/machsystm.h>
38 #include <sys/sunddi.h>
39 #include <sys/x_call.h>
40 #include <sys/cpuvar.h>
41 #include <sys/atomic.h>
42 #include <sys/smp_impldefs.h>
43 #include <sys/sdt.h>
44 #include <sys/stack.h>
45 #include <sys/ddi_impldefs.h>
46 #ifdef __xpv
47 #include <sys/evtchn_impl.h>
48 #endif
49 
50 typedef struct av_softinfo {
51 	cpuset_t	av_pending;	/* pending bitmasks */
52 } av_softinfo_t;
53 
54 static void insert_av(void *intr_id, struct av_head *vectp, avfunc f,
55 	caddr_t arg1, caddr_t arg2, uint64_t *ticksp, int pri_level,
56 	dev_info_t *dip);
57 static void remove_av(void *intr_id, struct av_head *vectp, avfunc f,
58 	int pri_level, int vect);
59 
60 /*
61  * Arrange for a driver to be called when a particular
62  * auto-vectored interrupt occurs.
63  * NOTE: if a device can generate interrupts on more than
64  * one level, or if a driver services devices that interrupt
65  * on more than one level, then the driver should install
66  * itself on each of those levels.
67  */
68 static char badsoft[] =
69 	"add_avintr: bad soft interrupt level %d for driver '%s'\n";
70 static char multilevel[] =
71 	"!IRQ%d is being shared by drivers with different interrupt levels.\n"
72 	"This may result in reduced system performance.";
73 static char multilevel2[] =
74 	"Cannot register interrupt for '%s' device at IPL %d because it\n"
75 	"conflicts with another device using the same vector %d with an IPL\n"
76 	"of %d. Reconfigure the conflicting devices to use different vectors.";
77 
78 #ifdef __xpv
79 #define	MAX_VECT	NR_IRQS
80 #else
81 #define	MAX_VECT	256
82 #endif
83 
84 struct autovec *nmivect = NULL;
85 struct av_head autovect[MAX_VECT];
86 struct av_head softvect[LOCK_LEVEL + 1];
87 kmutex_t av_lock;
88 /*
89  * These are software interrupt handlers dedicated to ddi timer.
90  * The interrupt levels up to 10 are supported, but high interrupts
91  * must not be used there.
92  */
93 ddi_softint_hdl_impl_t softlevel_hdl[DDI_IPL_10] = {
94 	{0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 1 */
95 	{0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 2 */
96 	{0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 3 */
97 	{0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 4 */
98 	{0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 5 */
99 	{0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 6 */
100 	{0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 7 */
101 	{0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 8 */
102 	{0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 9 */
103 	{0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 10 */
104 };
105 ddi_softint_hdl_impl_t softlevel1_hdl =
106 	{0, 0, NULL, NULL, 0, NULL, NULL, NULL};
107 
108 /*
109  * clear/check softint pending flag corresponding for
110  * the current CPU
111  */
112 void
av_clear_softint_pending(av_softinfo_t * infop)113 av_clear_softint_pending(av_softinfo_t *infop)
114 {
115 	CPUSET_ATOMIC_DEL(infop->av_pending, CPU->cpu_seqid);
116 }
117 
118 boolean_t
av_check_softint_pending(av_softinfo_t * infop,boolean_t check_all)119 av_check_softint_pending(av_softinfo_t *infop, boolean_t check_all)
120 {
121 	if (check_all)
122 		return (!CPUSET_ISNULL(infop->av_pending));
123 	else
124 		return (CPU_IN_SET(infop->av_pending, CPU->cpu_seqid) != 0);
125 }
126 
127 /*
128  * This is the wrapper function which is generally used to set a softint
129  * pending
130  */
131 void
av_set_softint_pending(int pri,av_softinfo_t * infop)132 av_set_softint_pending(int pri, av_softinfo_t *infop)
133 {
134 	kdi_av_set_softint_pending(pri, infop);
135 }
136 
137 /*
138  * This is kmdb's private entry point to setsoftint called from kdi_siron
139  * It first sets our av softint pending bit for the current CPU,
140  * then it sets the CPU softint pending bit for pri.
141  */
142 void
kdi_av_set_softint_pending(int pri,av_softinfo_t * infop)143 kdi_av_set_softint_pending(int pri, av_softinfo_t *infop)
144 {
145 	CPUSET_ATOMIC_ADD(infop->av_pending, CPU->cpu_seqid);
146 
147 	atomic_or_32((uint32_t *)&CPU->cpu_softinfo.st_pending, 1 << pri);
148 }
149 
150 /*
151  * register nmi interrupt routine. The first arg is used only to order
152  * various nmi interrupt service routines in the chain. Higher lvls will
153  * be called first
154  */
155 int
add_nmintr(int lvl,avfunc nmintr,char * name,caddr_t arg)156 add_nmintr(int lvl, avfunc nmintr, char *name, caddr_t arg)
157 {
158 	struct autovec  *mem;
159 	struct autovec *p, *prev = NULL;
160 
161 	if (nmintr == NULL) {
162 		printf("Attempt to add null vect for %s on nmi\n", name);
163 		return (0);
164 
165 	}
166 
167 	mem = kmem_zalloc(sizeof (struct autovec), KM_SLEEP);
168 	mem->av_vector = nmintr;
169 	mem->av_intarg1 = arg;
170 	mem->av_intarg2 = NULL;
171 	mem->av_intr_id = NULL;
172 	mem->av_prilevel = lvl;
173 	mem->av_dip = NULL;
174 	mem->av_link = NULL;
175 
176 	mutex_enter(&av_lock);
177 
178 	if (!nmivect) {
179 		nmivect = mem;
180 		mutex_exit(&av_lock);
181 		return (1);
182 	}
183 	/* find where it goes in list */
184 	for (p = nmivect; p != NULL; p = p->av_link) {
185 		if (p->av_vector == nmintr && p->av_intarg1 == arg) {
186 			/*
187 			 * already in list
188 			 * So? Somebody added the same interrupt twice.
189 			 */
190 			cmn_err(CE_WARN, "Driver already registered '%s'",
191 			    name);
192 			kmem_free(mem, sizeof (struct autovec));
193 			mutex_exit(&av_lock);
194 			return (0);
195 		}
196 		if (p->av_prilevel < lvl) {
197 			if (p == nmivect) {   /* it's at head of list */
198 				mem->av_link = p;
199 				nmivect = mem;
200 			} else {
201 				mem->av_link = p;
202 				prev->av_link = mem;
203 			}
204 			mutex_exit(&av_lock);
205 			return (1);
206 		}
207 		prev = p;
208 
209 	}
210 	/* didn't find it, add it to the end */
211 	prev->av_link = mem;
212 	mutex_exit(&av_lock);
213 	return (1);
214 
215 }
216 
217 /*
218  * register a hardware interrupt handler.
219  *
220  * The autovect data structure only supports globally 256 interrupts.
221  * In order to support 256 * #LocalAPIC interrupts, a new PSM module
222  * apix is introduced. It defines PSM private data structures for the
223  * interrupt handlers. The PSM module initializes addintr to a PSM
224  * private function so that it could override add_avintr() to operate
225  * on its private data structures.
226  */
227 int
add_avintr(void * intr_id,int lvl,avfunc xxintr,char * name,int vect,caddr_t arg1,caddr_t arg2,uint64_t * ticksp,dev_info_t * dip)228 add_avintr(void *intr_id, int lvl, avfunc xxintr, char *name, int vect,
229     caddr_t arg1, caddr_t arg2, uint64_t *ticksp, dev_info_t *dip)
230 {
231 	struct av_head *vecp = (struct av_head *)0;
232 	avfunc f;
233 	int s, vectindex;			/* save old spl value */
234 	ushort_t hi_pri;
235 
236 	if (addintr) {
237 		return ((*addintr)(intr_id, lvl, xxintr, name, vect,
238 		    arg1, arg2, ticksp, dip));
239 	}
240 
241 	if ((f = xxintr) == NULL) {
242 		printf("Attempt to add null vect for %s on vector %d\n",
243 		    name, vect);
244 		return (0);
245 
246 	}
247 	vectindex = vect % MAX_VECT;
248 
249 	vecp = &autovect[vectindex];
250 
251 	/*
252 	 * "hi_pri == 0" implies all entries on list are "unused",
253 	 * which means that it's OK to just insert this one.
254 	 */
255 	hi_pri = vecp->avh_hi_pri;
256 	if (vecp->avh_link && (hi_pri != 0)) {
257 		if (((hi_pri > LOCK_LEVEL) && (lvl < LOCK_LEVEL)) ||
258 		    ((hi_pri < LOCK_LEVEL) && (lvl > LOCK_LEVEL))) {
259 			cmn_err(CE_WARN, multilevel2, name, lvl, vect,
260 			    hi_pri);
261 			return (0);
262 		}
263 		if ((vecp->avh_lo_pri != lvl) || (hi_pri != lvl))
264 			cmn_err(CE_NOTE, multilevel, vect);
265 	}
266 
267 	insert_av(intr_id, vecp, f, arg1, arg2, ticksp, lvl, dip);
268 	s = splhi();
269 	/*
270 	 * do what ever machine specific things are necessary
271 	 * to set priority level (e.g. set picmasks)
272 	 */
273 	mutex_enter(&av_lock);
274 	(*addspl)(vect, lvl, vecp->avh_lo_pri, vecp->avh_hi_pri);
275 	mutex_exit(&av_lock);
276 	splx(s);
277 	return (1);
278 
279 }
280 
281 void
update_avsoftintr_args(void * intr_id,int lvl,caddr_t arg2)282 update_avsoftintr_args(void *intr_id, int lvl, caddr_t arg2)
283 {
284 	struct autovec *p;
285 	struct autovec *target = NULL;
286 	struct av_head *vectp = (struct av_head *)&softvect[lvl];
287 
288 	for (p = vectp->avh_link; p && p->av_vector; p = p->av_link) {
289 		if (p->av_intr_id == intr_id) {
290 			target = p;
291 			break;
292 		}
293 	}
294 
295 	if (target == NULL)
296 		return;
297 	target->av_intarg2 = arg2;
298 }
299 
300 /*
301  * Register a software interrupt handler
302  */
303 int
add_avsoftintr(void * intr_id,int lvl,avfunc xxintr,char * name,caddr_t arg1,caddr_t arg2)304 add_avsoftintr(void *intr_id, int lvl, avfunc xxintr, char *name,
305     caddr_t arg1, caddr_t arg2)
306 {
307 	int slvl;
308 	ddi_softint_hdl_impl_t	*hdlp = (ddi_softint_hdl_impl_t *)intr_id;
309 
310 	if ((slvl = slvltovect(lvl)) != -1)
311 		return (add_avintr(intr_id, lvl, xxintr,
312 		    name, slvl, arg1, arg2, NULL, NULL));
313 
314 	if (intr_id == NULL) {
315 		printf("Attempt to add null intr_id for %s on level %d\n",
316 		    name, lvl);
317 		return (0);
318 	}
319 
320 	if (xxintr == NULL) {
321 		printf("Attempt to add null handler for %s on level %d\n",
322 		    name, lvl);
323 		return (0);
324 	}
325 
326 	if (lvl <= 0 || lvl > LOCK_LEVEL) {
327 		printf(badsoft, lvl, name);
328 		return (0);
329 	}
330 
331 	if (hdlp->ih_pending == NULL) {
332 		hdlp->ih_pending =
333 		    kmem_zalloc(sizeof (av_softinfo_t), KM_SLEEP);
334 	}
335 
336 	insert_av(intr_id, &softvect[lvl], xxintr, arg1, arg2, NULL, lvl, NULL);
337 
338 	return (1);
339 }
340 
341 /*
342  * insert an interrupt vector into chain by its priority from high
343  * to low
344  */
345 static void
insert_av(void * intr_id,struct av_head * vectp,avfunc f,caddr_t arg1,caddr_t arg2,uint64_t * ticksp,int pri_level,dev_info_t * dip)346 insert_av(void *intr_id, struct av_head *vectp, avfunc f, caddr_t arg1,
347     caddr_t arg2, uint64_t *ticksp, int pri_level, dev_info_t *dip)
348 {
349 	/*
350 	 * Protect rewrites of the list
351 	 */
352 	struct autovec *p, *prep, *mem;
353 
354 	mem = kmem_zalloc(sizeof (struct autovec), KM_SLEEP);
355 	mem->av_vector = f;
356 	mem->av_intarg1 = arg1;
357 	mem->av_intarg2 = arg2;
358 	mem->av_ticksp = ticksp;
359 	mem->av_intr_id = intr_id;
360 	mem->av_prilevel = pri_level;
361 	mem->av_dip = dip;
362 	mem->av_link = NULL;
363 
364 	mutex_enter(&av_lock);
365 
366 	if (vectp->avh_link == NULL) {	/* Nothing on list - put it at head */
367 		vectp->avh_link = mem;
368 		vectp->avh_hi_pri = vectp->avh_lo_pri = (ushort_t)pri_level;
369 
370 		mutex_exit(&av_lock);
371 		return;
372 	}
373 
374 	/* find where it goes in list */
375 	prep = NULL;
376 	for (p = vectp->avh_link; p != NULL; p = p->av_link) {
377 		if (p->av_vector && p->av_prilevel <= pri_level)
378 			break;
379 		prep = p;
380 	}
381 	if (prep != NULL) {
382 		if (prep->av_vector == NULL) {	/* freed struct available */
383 			p = prep;
384 			p->av_intarg1 = arg1;
385 			p->av_intarg2 = arg2;
386 			p->av_ticksp = ticksp;
387 			p->av_intr_id = intr_id;
388 			p->av_prilevel = pri_level;
389 			p->av_dip = dip;
390 			if (pri_level > (int)vectp->avh_hi_pri) {
391 				vectp->avh_hi_pri = (ushort_t)pri_level;
392 			}
393 			if (pri_level < (int)vectp->avh_lo_pri) {
394 				vectp->avh_lo_pri = (ushort_t)pri_level;
395 			}
396 			/*
397 			 * To prevent calling service routine before args
398 			 * and ticksp are ready fill in vector last.
399 			 */
400 			p->av_vector = f;
401 			mutex_exit(&av_lock);
402 			kmem_free(mem, sizeof (struct autovec));
403 			return;
404 		}
405 
406 		mem->av_link = prep->av_link;
407 		prep->av_link = mem;
408 	} else {
409 		/* insert new intpt at beginning of chain */
410 		mem->av_link = vectp->avh_link;
411 		vectp->avh_link = mem;
412 	}
413 	if (pri_level > (int)vectp->avh_hi_pri) {
414 		vectp->avh_hi_pri = (ushort_t)pri_level;
415 	}
416 	if (pri_level < (int)vectp->avh_lo_pri) {
417 		vectp->avh_lo_pri = (ushort_t)pri_level;
418 	}
419 	mutex_exit(&av_lock);
420 }
421 
422 static int
av_rem_softintr(void * intr_id,int lvl,avfunc xxintr,boolean_t rem_softinfo)423 av_rem_softintr(void *intr_id, int lvl, avfunc xxintr, boolean_t rem_softinfo)
424 {
425 	struct av_head *vecp = (struct av_head *)0;
426 	int slvl;
427 	ddi_softint_hdl_impl_t	*hdlp = (ddi_softint_hdl_impl_t *)intr_id;
428 	av_softinfo_t *infop = (av_softinfo_t *)hdlp->ih_pending;
429 
430 	if (xxintr == NULL)
431 		return (0);
432 
433 	if ((slvl = slvltovect(lvl)) != -1) {
434 		rem_avintr(intr_id, lvl, xxintr, slvl);
435 		return (1);
436 	}
437 
438 	if (lvl <= 0 && lvl >= LOCK_LEVEL) {
439 		return (0);
440 	}
441 	vecp = &softvect[lvl];
442 	remove_av(intr_id, vecp, xxintr, lvl, 0);
443 
444 	if (rem_softinfo) {
445 		kmem_free(infop, sizeof (av_softinfo_t));
446 		hdlp->ih_pending = NULL;
447 	}
448 
449 	return (1);
450 }
451 
452 int
av_softint_movepri(void * intr_id,int old_lvl)453 av_softint_movepri(void *intr_id, int old_lvl)
454 {
455 	int ret;
456 	ddi_softint_hdl_impl_t	*hdlp = (ddi_softint_hdl_impl_t *)intr_id;
457 
458 	ret = add_avsoftintr(intr_id, hdlp->ih_pri, hdlp->ih_cb_func,
459 	    DEVI(hdlp->ih_dip)->devi_name, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2);
460 
461 	if (ret) {
462 		(void) av_rem_softintr(intr_id, old_lvl, hdlp->ih_cb_func,
463 		    B_FALSE);
464 	}
465 
466 	return (ret);
467 }
468 
469 /*
470  * Remove a driver from the autovector list.
471  */
472 int
rem_avsoftintr(void * intr_id,int lvl,avfunc xxintr)473 rem_avsoftintr(void *intr_id, int lvl, avfunc xxintr)
474 {
475 	return (av_rem_softintr(intr_id, lvl, xxintr, B_TRUE));
476 }
477 
478 /*
479  * Remove specified interrupt handler.
480  *
481  * PSM module could initialize remintr to some PSM private function
482  * so that it could override rem_avintr() to operate on its private
483  * data structures.
484  */
485 void
rem_avintr(void * intr_id,int lvl,avfunc xxintr,int vect)486 rem_avintr(void *intr_id, int lvl, avfunc xxintr, int vect)
487 {
488 	struct av_head *vecp = (struct av_head *)0;
489 	avfunc f;
490 	int s, vectindex;			/* save old spl value */
491 
492 	if (remintr) {
493 		(*remintr)(intr_id, lvl, xxintr, vect);
494 		return;
495 	}
496 
497 	if ((f = xxintr) == NULL)
498 		return;
499 
500 	vectindex = vect % MAX_VECT;
501 	vecp = &autovect[vectindex];
502 	remove_av(intr_id, vecp, f, lvl, vect);
503 	s = splhi();
504 	mutex_enter(&av_lock);
505 	(*delspl)(vect, lvl, vecp->avh_lo_pri, vecp->avh_hi_pri);
506 	mutex_exit(&av_lock);
507 	splx(s);
508 }
509 
510 
511 /*
512  * After having made a change to an autovector list, wait until we have
513  * seen each cpu not executing an interrupt at that level--so we know our
514  * change has taken effect completely (no old state in registers, etc).
515  */
516 void
wait_till_seen(int ipl)517 wait_till_seen(int ipl)
518 {
519 	int cpu_in_chain, cix;
520 	struct cpu *cpup;
521 	cpuset_t cpus_to_check;
522 
523 	CPUSET_ALL(cpus_to_check);
524 	do {
525 		cpu_in_chain = 0;
526 		for (cix = 0; cix < NCPU; cix++) {
527 			cpup = cpu[cix];
528 			if (cpup != NULL && CPU_IN_SET(cpus_to_check, cix)) {
529 				if (INTR_ACTIVE(cpup, ipl)) {
530 					cpu_in_chain = 1;
531 				} else {
532 					CPUSET_DEL(cpus_to_check, cix);
533 				}
534 			}
535 		}
536 	} while (cpu_in_chain);
537 }
538 
539 static uint64_t dummy_tick;
540 
541 /* remove an interrupt vector from the chain */
542 static void
remove_av(void * intr_id,struct av_head * vectp,avfunc f,int pri_level,int vect)543 remove_av(void *intr_id, struct av_head *vectp, avfunc f, int pri_level,
544     int vect)
545 {
546 	struct autovec *p, *target;
547 	int	lo_pri, hi_pri;
548 	int	ipl;
549 	/*
550 	 * Protect rewrites of the list
551 	 */
552 	target = NULL;
553 
554 	mutex_enter(&av_lock);
555 	ipl = pri_level;
556 	lo_pri = MAXIPL;
557 	hi_pri = 0;
558 	for (p = vectp->avh_link; p; p = p->av_link) {
559 		if ((p->av_vector == f) && (p->av_intr_id == intr_id)) {
560 			/* found the handler */
561 			target = p;
562 			continue;
563 		}
564 		if (p->av_vector != NULL) {
565 			if (p->av_prilevel > hi_pri)
566 				hi_pri = p->av_prilevel;
567 			if (p->av_prilevel < lo_pri)
568 				lo_pri = p->av_prilevel;
569 		}
570 	}
571 	if (ipl < hi_pri)
572 		ipl = hi_pri;
573 	if (target == NULL) {	/* not found */
574 		printf("Couldn't remove function %p at %d, %d\n",
575 		    (void *)f, vect, pri_level);
576 		mutex_exit(&av_lock);
577 		return;
578 	}
579 
580 	/*
581 	 * This drops the handler from the chain, it can no longer be called.
582 	 * However, there is no guarantee that the handler is not currently
583 	 * still executing.
584 	 */
585 	target->av_vector = NULL;
586 	/*
587 	 * There is a race where we could be just about to pick up the ticksp
588 	 * pointer to increment it after returning from the service routine
589 	 * in av_dispatch_autovect.  Rather than NULL it out let's just point
590 	 * it off to something safe so that any final tick update attempt
591 	 * won't fault.
592 	 */
593 	target->av_ticksp = &dummy_tick;
594 	wait_till_seen(ipl);
595 
596 	if (lo_pri > hi_pri) {	/* the chain is now empty */
597 		/* Leave the unused entries here for probable future use */
598 		vectp->avh_lo_pri = MAXIPL;
599 		vectp->avh_hi_pri = 0;
600 	} else {
601 		if ((int)vectp->avh_lo_pri < lo_pri)
602 			vectp->avh_lo_pri = (ushort_t)lo_pri;
603 		if ((int)vectp->avh_hi_pri > hi_pri)
604 			vectp->avh_hi_pri = (ushort_t)hi_pri;
605 	}
606 	mutex_exit(&av_lock);
607 	wait_till_seen(ipl);
608 }
609 
610 /*
611  * kmdb uses siron (and thus setsoftint) while the world is stopped in order to
612  * inform its driver component that there's work to be done.  We need to keep
613  * DTrace from instrumenting kmdb's siron and setsoftint.  We duplicate siron,
614  * giving kmdb's version a kdi prefix to keep DTrace at bay.   We also
615  * provide a version of the various setsoftint functions available for kmdb to
616  * use using a kdi_ prefix while the main *setsoftint() functionality is
617  * implemented as a wrapper.  This allows tracing, while still providing a
618  * way for kmdb to sneak in unmolested.
619  */
620 void
kdi_siron(void)621 kdi_siron(void)
622 {
623 	(*kdisetsoftint)(1, softlevel1_hdl.ih_pending);
624 }
625 
626 /*
627  * Trigger a soft interrupt.
628  */
629 void
siron(void)630 siron(void)
631 {
632 	/* Level 1 software interrupt */
633 	(*setsoftint)(1, softlevel1_hdl.ih_pending);
634 }
635 
636 /*
637  * Trigger software interrupts dedicated to ddi timer.
638  */
639 void
sir_on(int level)640 sir_on(int level)
641 {
642 	ASSERT(level >= DDI_IPL_1 && level <= DDI_IPL_10);
643 	(*setsoftint)(level, softlevel_hdl[level-1].ih_pending);
644 }
645 
646 /*
647  * The handler which is executed on the target CPU.
648  */
649 /*ARGSUSED*/
650 static int
siron_poke_intr(xc_arg_t a1,xc_arg_t a2,xc_arg_t a3)651 siron_poke_intr(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
652 {
653 	siron();
654 	return (0);
655 }
656 
657 /*
658  * May get called from softcall to poke CPUs.
659  */
660 void
siron_poke_cpu(cpuset_t poke)661 siron_poke_cpu(cpuset_t poke)
662 {
663 	int cpuid = CPU->cpu_id;
664 
665 	/*
666 	 * If we are poking to ourself then we can simply
667 	 * generate level1 using siron()
668 	 */
669 	if (CPU_IN_SET(poke, cpuid)) {
670 		siron();
671 		CPUSET_DEL(poke, cpuid);
672 		if (CPUSET_ISNULL(poke))
673 			return;
674 	}
675 
676 	xc_call(0, 0, 0, CPUSET2BV(poke), (xc_func_t)siron_poke_intr);
677 }
678 
679 /*
680  * Walk the autovector table for this vector, invoking each
681  * interrupt handler as we go.
682  */
683 
684 extern uint64_t intr_get_time(void);
685 
686 void
av_dispatch_autovect(uint_t vec)687 av_dispatch_autovect(uint_t vec)
688 {
689 	struct autovec *av;
690 
691 	ASSERT_STACK_ALIGNED();
692 
693 	while ((av = autovect[vec].avh_link) != NULL) {
694 		uint_t numcalled = 0;
695 		uint_t claimed = 0;
696 
697 		for (; av; av = av->av_link) {
698 			uint_t r;
699 			uint_t (*intr)() = av->av_vector;
700 			caddr_t arg1 = av->av_intarg1;
701 			caddr_t arg2 = av->av_intarg2;
702 			dev_info_t *dip = av->av_dip;
703 
704 			/*
705 			 * We must walk the entire chain.  Removed handlers
706 			 * may be anywhere in the chain.
707 			 */
708 			if (intr == NULL)
709 				continue;
710 
711 			DTRACE_PROBE4(interrupt__start, dev_info_t *, dip,
712 			    void *, intr, caddr_t, arg1, caddr_t, arg2);
713 			r = (*intr)(arg1, arg2);
714 			DTRACE_PROBE4(interrupt__complete, dev_info_t *, dip,
715 			    void *, intr, caddr_t, arg1, uint_t, r);
716 			numcalled++;
717 			claimed |= r;
718 			if (av->av_ticksp && av->av_prilevel <= LOCK_LEVEL)
719 				atomic_add_64(av->av_ticksp, intr_get_time());
720 		}
721 
722 		/*
723 		 * If there's only one interrupt handler in the chain,
724 		 * or if no-one claimed the interrupt at all give up now.
725 		 */
726 		if (numcalled == 1 || claimed == 0)
727 			break;
728 	}
729 }
730 
731 /*
732  * Call every soft interrupt handler we can find at this level once.
733  */
734 void
av_dispatch_softvect(uint_t pil)735 av_dispatch_softvect(uint_t pil)
736 {
737 	struct autovec *av;
738 	ddi_softint_hdl_impl_t	*hdlp;
739 	uint_t (*intr)();
740 	caddr_t arg1;
741 	caddr_t arg2;
742 
743 	ASSERT_STACK_ALIGNED();
744 	ASSERT3U(pil, <=, PIL_MAX);
745 
746 	for (av = softvect[pil].avh_link; av; av = av->av_link) {
747 		/*
748 		 * We must walk the entire chain.  Removed handlers
749 		 * may be anywhere in the chain.
750 		 */
751 		if ((intr = av->av_vector) == NULL)
752 			continue;
753 		arg1 = av->av_intarg1;
754 		arg2 = av->av_intarg2;
755 
756 		hdlp = (ddi_softint_hdl_impl_t *)av->av_intr_id;
757 		ASSERT(hdlp);
758 
759 		/*
760 		 * Each cpu has its own pending bit in hdlp->ih_pending,
761 		 * here av_check/clear_softint_pending is just checking
762 		 * and clearing the pending bit for the current cpu, who
763 		 * has just triggered a softint.
764 		 */
765 		if (av_check_softint_pending(hdlp->ih_pending, B_FALSE)) {
766 			av_clear_softint_pending(hdlp->ih_pending);
767 			(void) (*intr)(arg1, arg2);
768 		}
769 	}
770 }
771 
772 struct regs;
773 
774 /*
775  * Call every NMI handler we know of once.
776  */
777 void
av_dispatch_nmivect(struct regs * rp)778 av_dispatch_nmivect(struct regs *rp)
779 {
780 	struct autovec *av;
781 
782 	ASSERT_STACK_ALIGNED();
783 
784 	for (av = nmivect; av; av = av->av_link)
785 		(void) (av->av_vector)(av->av_intarg1, rp);
786 }
787