xref: /freebsd/sys/net/netisr.c (revision 81d1ffee089aab2652954909acbe6aadd8a1a72c)
1 /*-
2  * Copyright (c) 2001,2002,2003 Jonathan Lemon <jlemon@FreeBSD.org>
3  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/rtprio.h>
33 #include <sys/systm.h>
34 #include <sys/interrupt.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/random.h>
41 #include <sys/resourcevar.h>
42 #include <sys/sysctl.h>
43 #include <sys/unistd.h>
44 #include <machine/atomic.h>
45 #include <machine/cpu.h>
46 #include <machine/stdarg.h>
47 
48 #include <sys/mbuf.h>
49 #include <sys/socket.h>
50 
51 #include <net/if.h>
52 #include <net/if_types.h>
53 #include <net/if_var.h>
54 #include <net/netisr.h>
55 
56 volatile unsigned int	netisr;	/* scheduling bits for network */
57 
58 struct netisr {
59 	netisr_t	*ni_handler;
60 	struct ifqueue	*ni_queue;
61 } netisrs[32];
62 
63 static struct mtx netisr_mtx;
64 static void *net_ih;
65 
66 void
67 legacy_setsoftnet(void)
68 {
69 	swi_sched(net_ih, 0);
70 }
71 
72 void
73 netisr_register(int num, netisr_t *handler, struct ifqueue *inq)
74 {
75 
76 	KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
77 	    ("bad isr %d", num));
78 	netisrs[num].ni_handler = handler;
79 	netisrs[num].ni_queue = inq;
80 }
81 
82 void
83 netisr_unregister(int num)
84 {
85 	struct netisr *ni;
86 	int s;
87 
88 	KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
89 	    ("bad isr %d", num));
90 	ni = &netisrs[num];
91 	ni->ni_handler = NULL;
92 	if (ni->ni_queue != NULL) {
93 		s = splimp();
94 		IF_DRAIN(ni->ni_queue);
95 		splx(s);
96 	}
97 }
98 
99 struct isrstat {
100 	int	isrs_count;			/* dispatch count */
101 	int	isrs_directed;			/* ...successfully dispatched */
102 	int	isrs_deferred;			/* ...queued instead */
103 	int	isrs_bypassed;			/* bypassed queued packets */
104 	int	isrs_queued;			/* intentionally queueued */
105 	int	isrs_swi_count;			/* swi_net handlers called */
106 };
107 static struct isrstat isrstat;
108 
109 SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr counters");
110 
111 static int	netisr_enable = 0;
112 SYSCTL_INT(_net_isr, OID_AUTO, enable, CTLFLAG_RW,
113     &netisr_enable, 0, "enable direct dispatch");
114 
115 SYSCTL_INT(_net_isr, OID_AUTO, count, CTLFLAG_RD,
116     &isrstat.isrs_count, 0, "");
117 SYSCTL_INT(_net_isr, OID_AUTO, directed, CTLFLAG_RD,
118     &isrstat.isrs_directed, 0, "");
119 SYSCTL_INT(_net_isr, OID_AUTO, deferred, CTLFLAG_RD,
120     &isrstat.isrs_deferred, 0, "");
121 SYSCTL_INT(_net_isr, OID_AUTO, bypassed, CTLFLAG_RD,
122     &isrstat.isrs_bypassed, 0, "");
123 SYSCTL_INT(_net_isr, OID_AUTO, queued, CTLFLAG_RD,
124     &isrstat.isrs_queued, 0, "");
125 SYSCTL_INT(_net_isr, OID_AUTO, swi_count, CTLFLAG_RD,
126     &isrstat.isrs_swi_count, 0, "");
127 
128 /*
129  * Call the netisr directly instead of queueing the packet, if possible.
130  *
131  * Ideally, the permissibility of calling the routine would be determined
132  * by checking if splnet() was asserted at the time the device interrupt
133  * occurred; if so, this indicates that someone is in the network stack.
134  *
135  * However, bus_setup_intr uses INTR_TYPE_NET, which sets splnet before
136  * calling the interrupt handler, so the previous mask is unavailable.
137  * Approximate this by checking intr_nesting_level instead; if any SWI
138  * handlers are running, the packet is queued instead.
139  */
140 void
141 netisr_dispatch(int num, struct mbuf *m)
142 {
143 	struct netisr *ni;
144 
145 	isrstat.isrs_count++;
146 	KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
147 	    ("bad isr %d", num));
148 	ni = &netisrs[num];
149 	KASSERT(ni->ni_queue != NULL, ("no queue for isr %d", num));
150 	if (netisr_enable && mtx_trylock(&netisr_mtx)) {
151 		isrstat.isrs_directed++;
152 		/*
153 		 * One slight problem here is that packets might bypass
154 		 * each other in the stack, if an earlier one happened
155 		 * to get stuck in the queue.
156 		 *
157 		 * we can either:
158 		 *	a. drain the queue before handling this packet,
159 		 *	b. fallback to queueing the packet,
160 		 *	c. sweep the issue under the rug and ignore it.
161 		 *
162 		 * Currently, we do c), and keep a rough event counter.
163 		 */
164 		if (_IF_QLEN(ni->ni_queue) > 0)
165 			isrstat.isrs_bypassed++;
166 		ni->ni_handler(m);
167 		mtx_unlock(&netisr_mtx);
168 	} else {
169 		isrstat.isrs_deferred++;
170 		if (IF_HANDOFF(ni->ni_queue, m, NULL))
171 			schednetisr(num);
172 	}
173 }
174 
175 /*
176  * Same as above, but always queue.
177  * This is either used in places where we are not confident that
178  * direct dispatch is possible, or where queueing is required.
179  */
180 int
181 netisr_queue(int num, struct mbuf *m)
182 {
183 	struct netisr *ni;
184 
185 	KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
186 	    ("bad isr %d", num));
187 	ni = &netisrs[num];
188 	KASSERT(ni->ni_queue != NULL, ("no queue for isr %d", num));
189 	isrstat.isrs_queued++;
190 	if (!IF_HANDOFF(ni->ni_queue, m, NULL))
191 		return (0);
192 	schednetisr(num);
193 	return (1);
194 }
195 
196 static void
197 swi_net(void *dummy)
198 {
199 	struct netisr *ni;
200 	struct mbuf *m;
201 	u_int bits;
202 	int i;
203 #ifdef DEVICE_POLLING
204 	const int polling = 1;
205 #else
206 	const int polling = 0;
207 #endif
208 
209 	mtx_lock(&netisr_mtx);
210 	do {
211 		bits = atomic_readandclear_int(&netisr);
212 		if (bits == 0)
213 			break;
214 		while ((i = ffs(bits)) != 0) {
215 			isrstat.isrs_swi_count++;
216 			i--;
217 			bits &= ~(1 << i);
218 			ni = &netisrs[i];
219 			if (ni->ni_handler == NULL) {
220 				printf("swi_net: unregistered isr %d.\n", i);
221 				continue;
222 			}
223 			if (ni->ni_queue == NULL)
224 				ni->ni_handler(NULL);
225 			else
226 				for (;;) {
227 					IF_DEQUEUE(ni->ni_queue, m);
228 					if (m == NULL)
229 						break;
230 					ni->ni_handler(m);
231 				}
232 		}
233 	} while (polling);
234 	mtx_unlock(&netisr_mtx);
235 }
236 
237 static void
238 start_netisr(void *dummy)
239 {
240 
241 	mtx_init(&netisr_mtx, "netisr lock", NULL, MTX_DEF);
242 	if (swi_add(NULL, "net", swi_net, NULL, SWI_NET, 0, &net_ih))
243 		panic("start_netisr");
244 }
245 SYSINIT(start_netisr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_netisr, NULL)
246