xref: /freebsd/sys/net/netisr.c (revision 729362425c09cf6b362366aabc6fb547eee8035a)
1 /*-
2  * Copyright (c) 2001,2002,2003 Jonathan Lemon <jlemon@FreeBSD.org>
3  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/rtprio.h>
33 #include <sys/systm.h>
34 #include <sys/interrupt.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/random.h>
41 #include <sys/resourcevar.h>
42 #include <sys/sysctl.h>
43 #include <sys/unistd.h>
44 #include <machine/atomic.h>
45 #include <machine/cpu.h>
46 #include <machine/stdarg.h>
47 
48 #include <sys/mbuf.h>
49 #include <sys/socket.h>
50 
51 #include <net/if.h>
52 #include <net/if_types.h>
53 #include <net/if_var.h>
54 #include <net/netisr.h>
55 
56 volatile unsigned int	netisr;	/* scheduling bits for network */
57 
58 struct netisr {
59 	netisr_t	*ni_handler;
60 	struct ifqueue	*ni_queue;
61 } netisrs[32];
62 
63 static struct mtx netisr_mtx;
64 static void *net_ih;
65 
66 void
67 legacy_setsoftnet(void)
68 {
69 	swi_sched(net_ih, 0);
70 }
71 
72 void
73 netisr_register(int num, netisr_t *handler, struct ifqueue *inq)
74 {
75 
76 	KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
77 	    ("bad isr %d", num));
78 	netisrs[num].ni_handler = handler;
79 	netisrs[num].ni_queue = inq;
80 }
81 
82 void
83 netisr_unregister(int num)
84 {
85 	struct netisr *ni;
86 	int s;
87 
88 	KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
89 	    ("bad isr %d", num));
90 	ni = &netisrs[num];
91 	ni->ni_handler = NULL;
92 	if (ni->ni_queue != NULL) {
93 		s = splimp();
94 		IF_DRAIN(ni->ni_queue);
95 		splx(s);
96 	}
97 }
98 
99 struct isrstat {
100 	int	isrs_count;			/* dispatch count */
101 	int	isrs_directed;			/* ...successfully dispatched */
102 	int	isrs_deferred;			/* ...queued instead */
103 	int	isrs_bypassed;			/* bypassed queued packets */
104 	int	isrs_queued;			/* intentionally queueued */
105 	int	isrs_swi_count;			/* swi_net handlers called */
106 };
107 static struct isrstat isrstat;
108 
109 SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr counters");
110 
111 static int	netisr_enable = 0;
112 SYSCTL_INT(_net_isr, OID_AUTO, enable, CTLFLAG_RW,
113     &netisr_enable, 0, "enable direct dispatch");
114 
115 SYSCTL_INT(_net_isr, OID_AUTO, count, CTLFLAG_RD,
116     &isrstat.isrs_count, 0, "");
117 SYSCTL_INT(_net_isr, OID_AUTO, directed, CTLFLAG_RD,
118     &isrstat.isrs_directed, 0, "");
119 SYSCTL_INT(_net_isr, OID_AUTO, deferred, CTLFLAG_RD,
120     &isrstat.isrs_deferred, 0, "");
121 SYSCTL_INT(_net_isr, OID_AUTO, bypassed, CTLFLAG_RD,
122     &isrstat.isrs_bypassed, 0, "");
123 SYSCTL_INT(_net_isr, OID_AUTO, queued, CTLFLAG_RD,
124     &isrstat.isrs_queued, 0, "");
125 SYSCTL_INT(_net_isr, OID_AUTO, swi_count, CTLFLAG_RD,
126     &isrstat.isrs_swi_count, 0, "");
127 
128 /*
129  * Call the netisr directly instead of queueing the packet, if possible.
130  *
131  * Ideally, the permissibility of calling the routine would be determined
132  * by checking if splnet() was asserted at the time the device interrupt
133  * occurred; if so, this indicates that someone is in the network stack.
134  *
135  * However, bus_setup_intr uses INTR_TYPE_NET, which sets splnet before
136  * calling the interrupt handler, so the previous mask is unavailable.
137  * Approximate this by checking intr_nesting_level instead; if any SWI
138  * handlers are running, the packet is queued instead.
139  */
140 void
141 netisr_dispatch(int num, struct mbuf *m)
142 {
143 	struct netisr *ni;
144 
145 	isrstat.isrs_count++;
146 	KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
147 	    ("bad isr %d", num));
148 	ni = &netisrs[num];
149 	if (ni->ni_queue == NULL) {
150 		m_freem(m);
151 		return;
152 	}
153 	if (netisr_enable && mtx_trylock(&netisr_mtx)) {
154 		isrstat.isrs_directed++;
155 		/*
156 		 * One slight problem here is that packets might bypass
157 		 * each other in the stack, if an earlier one happened
158 		 * to get stuck in the queue.
159 		 *
160 		 * we can either:
161 		 *	a. drain the queue before handling this packet,
162 		 *	b. fallback to queueing the packet,
163 		 *	c. sweep the issue under the rug and ignore it.
164 		 *
165 		 * Currently, we do c), and keep a rough event counter.
166 		 */
167 		if (_IF_QLEN(ni->ni_queue) > 0)
168 			isrstat.isrs_bypassed++;
169 		ni->ni_handler(m);
170 		mtx_unlock(&netisr_mtx);
171 	} else {
172 		isrstat.isrs_deferred++;
173 		if (IF_HANDOFF(ni->ni_queue, m, NULL))
174 			schednetisr(num);
175 	}
176 }
177 
178 /*
179  * Same as above, but always queue.
180  * This is either used in places where we are not confident that
181  * direct dispatch is possible, or where queueing is required.
182  */
183 int
184 netisr_queue(int num, struct mbuf *m)
185 {
186 	struct netisr *ni;
187 
188 	KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
189 	    ("bad isr %d", num));
190 	ni = &netisrs[num];
191 	if (ni->ni_queue == NULL) {
192 		m_freem(m);
193 		return (1);
194 	}
195 	isrstat.isrs_queued++;
196 	if (!IF_HANDOFF(ni->ni_queue, m, NULL))
197 		return (0);
198 	schednetisr(num);
199 	return (1);
200 }
201 
202 static void
203 swi_net(void *dummy)
204 {
205 	struct netisr *ni;
206 	struct mbuf *m;
207 	u_int bits;
208 	int i;
209 #ifdef DEVICE_POLLING
210 	const int polling = 1;
211 #else
212 	const int polling = 0;
213 #endif
214 
215 	mtx_lock(&netisr_mtx);
216 	do {
217 		bits = atomic_readandclear_int(&netisr);
218 		if (bits == 0)
219 			break;
220 		while ((i = ffs(bits)) != 0) {
221 			isrstat.isrs_swi_count++;
222 			i--;
223 			bits &= ~(1 << i);
224 			ni = &netisrs[i];
225 			if (ni->ni_handler == NULL) {
226 				printf("swi_net: unregistered isr %d.\n", i);
227 				continue;
228 			}
229 			if (ni->ni_queue == NULL)
230 				ni->ni_handler(NULL);
231 			else
232 				for (;;) {
233 					IF_DEQUEUE(ni->ni_queue, m);
234 					if (m == NULL)
235 						break;
236 					ni->ni_handler(m);
237 				}
238 		}
239 	} while (polling);
240 	mtx_unlock(&netisr_mtx);
241 }
242 
243 static void
244 start_netisr(void *dummy)
245 {
246 
247 	mtx_init(&netisr_mtx, "netisr lock", NULL, MTX_DEF);
248 	if (swi_add(NULL, "net", swi_net, NULL, SWI_NET, 0, &net_ih))
249 		panic("start_netisr");
250 }
251 SYSINIT(start_netisr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_netisr, NULL)
252