xref: /freebsd/sys/kern/kern_intr.c (revision 77a0943ded95b9e6438f7db70c4a28e4d93946d4)
1 /*
2  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  *
28  */
29 
30 
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/rtprio.h>
34 #include <sys/systm.h>
35 #include <sys/ipl.h>
36 #include <sys/interrupt.h>
37 #include <sys/kernel.h>
38 #include <sys/kthread.h>
39 #include <sys/ktr.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/proc.h>
43 #include <sys/unistd.h>
44 #include <sys/vmmeter.h>
45 #include <machine/atomic.h>
46 #include <machine/cpu.h>
47 #include <machine/md_var.h>
48 
49 #include <net/netisr.h>		/* prototype for legacy_setsoftnet */
50 
51 struct intrhand *net_ih;
52 struct intrhand *vm_ih;
53 struct intrhand *softclock_ih;
54 struct ithd	*clk_ithd;
55 struct ithd	*tty_ithd;
56 
57 static void start_softintr(void *);
58 static void swi_net(void *);
59 
60 int
61 ithread_priority(flags)
62 	int flags;
63 {
64 	int pri;
65 
66 	flags &= ~INTR_MPSAFE;
67 	switch (flags) {
68 	case INTR_TYPE_TTY:             /* keyboard or parallel port */
69 		pri = PI_TTYLOW;
70 		break;
71 	case (INTR_TYPE_TTY | INTR_FAST): /* sio */
72 		pri = PI_TTYHIGH;
73 		break;
74 	case INTR_TYPE_BIO:
75 		/*
76 		 * XXX We need to refine this.  BSD/OS distinguishes
77 		 * between tape and disk priorities.
78 		 */
79 		pri = PI_DISK;
80 		break;
81 	case INTR_TYPE_NET:
82 		pri = PI_NET;
83 		break;
84 	case INTR_TYPE_CAM:
85 		pri = PI_DISK;          /* XXX or PI_CAM? */
86 		break;
87 	case INTR_TYPE_MISC:
88 		pri = PI_DULL;          /* don't care */
89 		break;
90 	/* We didn't specify an interrupt level. */
91 	default:
92 		panic("ithread_priority: no interrupt type in flags");
93 	}
94 
95 	return pri;
96 }
97 
98 void sithd_loop(void *);
99 
100 struct intrhand *
101 sinthand_add(const char *name, struct ithd **ithdp, driver_intr_t handler,
102 	    void *arg, int pri, int flags)
103 {
104 	struct proc *p;
105 	struct ithd *ithd;
106 	struct intrhand *ih;
107 	struct intrhand *this_ih;
108 
109 	ithd = (ithdp != NULL) ? *ithdp : NULL;
110 
111 
112 	if (ithd == NULL) {
113 		int error;
114 		ithd = malloc(sizeof (struct ithd), M_DEVBUF, M_WAITOK | M_ZERO);
115 		error = kthread_create(sithd_loop, NULL, &p,
116 			RFSTOPPED | RFHIGHPID, "swi%d: %s", pri, name);
117 		if (error)
118 			panic("inthand_add: Can't create interrupt thread");
119 		ithd->it_proc = p;
120 		p->p_ithd = ithd;
121 		p->p_rtprio.type = RTP_PRIO_ITHREAD;
122 		p->p_rtprio.prio = pri + PI_SOFT;	/* soft interrupt */
123 		p->p_stat = SWAIT;			/* we're idle */
124 		/* XXX - some hacks are _really_ gross */
125 		if (pri == SWI_CLOCK)
126 			p->p_flag |= P_NOLOAD;
127 		if (ithdp != NULL)
128 			*ithdp = ithd;
129 	}
130 	this_ih = malloc(sizeof (struct intrhand), M_DEVBUF, M_WAITOK | M_ZERO);
131 	this_ih->ih_handler = handler;
132 	this_ih->ih_argument = arg;
133 	this_ih->ih_flags = flags;
134 	this_ih->ih_ithd = ithd;
135 	this_ih->ih_name = malloc(strlen(name) + 1, M_DEVBUF, M_WAITOK);
136 	if ((ih = ithd->it_ih)) {
137 		while (ih->ih_next != NULL)
138 			ih = ih->ih_next;
139 		ih->ih_next = this_ih;
140 	} else
141 		ithd->it_ih = this_ih;
142 	strcpy(this_ih->ih_name, name);
143 	return (this_ih);
144 }
145 
146 
147 /*
148  * Schedule a heavyweight software interrupt process.
149  */
150 void
151 sched_swi(struct intrhand *ih, int flag)
152 {
153 	struct ithd *it = ih->ih_ithd;	/* and the process that does it */
154 	struct proc *p = it->it_proc;
155 
156 	atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
157 
158 	CTR3(KTR_INTR, "sched_sihand pid %d(%s) need=%d",
159 		p->p_pid, p->p_comm, it->it_need);
160 
161 	/*
162 	 * Set it_need so that if the thread is already running but close
163 	 * to done, it will do another go-round.  Then get the sched lock
164 	 * and see if the thread is on whichkqs yet.  If not, put it on
165 	 * there.  In any case, kick everyone so that if the new thread
166 	 * is higher priority than their current thread, it gets run now.
167 	 */
168 	ih->ih_need = 1;
169 	if (!(flag & SWI_DELAY)) {
170 		it->it_need = 1;
171 		mtx_enter(&sched_lock, MTX_SPIN);
172 		if (p->p_stat == SWAIT) { /* not on run queue */
173 			CTR1(KTR_INTR, "sched_swi: setrunqueue %d", p->p_pid);
174 /*			membar_lock(); */
175 			p->p_stat = SRUN;
176 			setrunqueue(p);
177 			aston();
178 		}
179 		else {
180 			CTR3(KTR_INTR, "sched_swi %d: it_need %d, state %d",
181 				p->p_pid, it->it_need, p->p_stat );
182 		}
183 		mtx_exit(&sched_lock, MTX_SPIN);
184 		need_resched();
185 	}
186 }
187 
188 /*
189  * This is the main code for soft interrupt threads.
190  */
191 void
192 sithd_loop(void *dummy)
193 {
194 	struct ithd *it;		/* our thread context */
195 	struct intrhand *ih;		/* and our interrupt handler chain */
196 
197 	struct proc *p = curproc;
198 	it = p->p_ithd;			/* point to myself */
199 
200 	/*
201 	 * As long as we have interrupts outstanding, go through the
202 	 * list of handlers, giving each one a go at it.
203 	 */
204 	for (;;) {
205 		CTR3(KTR_INTR, "sithd_loop pid %d(%s) need=%d",
206 		     p->p_pid, p->p_comm, it->it_need);
207 		while (it->it_need) {
208 			/*
209 			 * Service interrupts.  If another interrupt
210 			 * arrives while we are running, they will set
211 			 * it_need to denote that we should make
212 			 * another pass.
213 			 */
214 			it->it_need = 0;
215 			for (ih = it->it_ih; ih != NULL; ih = ih->ih_next) {
216 				if (!ih->ih_need)
217 					continue;
218 				ih->ih_need = 0;
219 				CTR5(KTR_INTR,
220 				    "sithd_loop pid %d ih=%p: %p(%p) flg=%x",
221 				    p->p_pid, (void *)ih,
222 				    (void *)ih->ih_handler, ih->ih_argument,
223 				    ih->ih_flags);
224 
225 				if ((ih->ih_flags & INTR_MPSAFE) == 0)
226 					mtx_enter(&Giant, MTX_DEF);
227 				ih->ih_handler(ih->ih_argument);
228 				if ((ih->ih_flags & INTR_MPSAFE) == 0)
229 					mtx_exit(&Giant, MTX_DEF);
230 			}
231 		}
232 
233 		/*
234 		 * Processed all our interrupts.  Now get the sched
235 		 * lock.  This may take a while and it_need may get
236 		 * set again, so we have to check it again.
237 		 */
238 		mtx_assert(&Giant, MA_NOTOWNED);
239 		mtx_enter(&sched_lock, MTX_SPIN);
240 		if (!it->it_need) {
241 			p->p_stat = SWAIT; /* we're idle */
242 			CTR1(KTR_INTR, "sithd_loop pid %d: done", p->p_pid);
243 			mi_switch();
244 			CTR1(KTR_INTR, "sithd_loop pid %d: resumed", p->p_pid);
245 		}
246 		mtx_exit(&sched_lock, MTX_SPIN);
247 	}
248 }
249 
250 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
251 
252 /*
253  * Start standard software interrupt threads
254  */
255 static void
256 start_softintr(dummy)
257 	void *dummy;
258 {
259 	net_ih = sinthand_add("net", NULL, swi_net, NULL, SWI_NET, 0);
260 	softclock_ih =
261 	    sinthand_add("clock", &clk_ithd, softclock, NULL, SWI_CLOCK, INTR_MPSAFE);
262 	vm_ih = sinthand_add("vm", NULL, swi_vm, NULL, SWI_VM, 0);
263 }
264 
265 void
266 legacy_setsoftnet()
267 {
268 	sched_swi(net_ih, SWI_NOSWITCH);
269 }
270 
271 /*
272  * XXX: This should really be in the network code somewhere and installed
273  * via a SI_SUB_SOFINTR, SI_ORDER_MIDDLE sysinit.
274  */
275 void	(*netisrs[32]) __P((void));
276 u_int	netisr;
277 
278 static void
279 swi_net(void *dummy)
280 {
281 	u_int bits;
282 	int i;
283 
284 	bits = atomic_readandclear_int(&netisr);
285 	while ((i = ffs(bits)) != 0) {
286 		i--;
287 		netisrs[i]();
288 		bits &= ~(1 << i);
289 	}
290 }
291 
292 /*
293  * Dummy spl calls.  The only reason for these is to not break
294  * all the code which expects to call them.
295  */
296 void spl0 (void) {}
297 void splx (intrmask_t x) {}
298 intrmask_t  splq(intrmask_t mask) { return 0; }
299 intrmask_t  splbio(void) { return 0; }
300 intrmask_t  splcam(void) { return 0; }
301 intrmask_t  splclock(void) { return 0; }
302 intrmask_t  splhigh(void) { return 0; }
303 intrmask_t  splimp(void) { return 0; }
304 intrmask_t  splnet(void) { return 0; }
305 intrmask_t  splsoftcam(void) { return 0; }
306 intrmask_t  splsoftcambio(void) { return 0; }
307 intrmask_t  splsoftcamnet(void) { return 0; }
308 intrmask_t  splsoftclock(void) { return 0; }
309 intrmask_t  splsofttty(void) { return 0; }
310 intrmask_t  splsoftvm(void) { return 0; }
311 intrmask_t  splsofttq(void) { return 0; }
312 intrmask_t  splstatclock(void) { return 0; }
313 intrmask_t  spltty(void) { return 0; }
314 intrmask_t  splvm(void) { return 0; }
315