1 /*- 2 * Copyright (c) 2001,2002,2003 Jonathan Lemon <jlemon@FreeBSD.org> 3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 */ 29 30 #include "opt_device_polling.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/rtprio.h> 35 #include <sys/systm.h> 36 #include <sys/interrupt.h> 37 #include <sys/kernel.h> 38 #include <sys/kthread.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/proc.h> 42 #include <sys/random.h> 43 #include <sys/resourcevar.h> 44 #include <sys/sysctl.h> 45 #include <sys/unistd.h> 46 #include <machine/atomic.h> 47 #include <machine/cpu.h> 48 #include <machine/stdarg.h> 49 50 #include <sys/mbuf.h> 51 #include <sys/socket.h> 52 53 #include <net/if.h> 54 #include <net/if_types.h> 55 #include <net/if_var.h> 56 #include <net/netisr.h> 57 58 volatile unsigned int netisr; /* scheduling bits for network */ 59 60 struct netisr { 61 netisr_t *ni_handler; 62 struct ifqueue *ni_queue; 63 int ni_flags; 64 } netisrs[32]; 65 66 static void *net_ih; 67 68 void 69 legacy_setsoftnet(void) 70 { 71 swi_sched(net_ih, 0); 72 } 73 74 void 75 netisr_register(int num, netisr_t *handler, struct ifqueue *inq, int flags) 76 { 77 78 KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))), 79 ("bad isr %d", num)); 80 netisrs[num].ni_handler = handler; 81 netisrs[num].ni_queue = inq; 82 netisrs[num].ni_flags = flags; 83 } 84 85 void 86 netisr_unregister(int num) 87 { 88 struct netisr *ni; 89 90 KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))), 91 ("bad isr %d", num)); 92 ni = &netisrs[num]; 93 ni->ni_handler = NULL; 94 if (ni->ni_queue != NULL) 95 IF_DRAIN(ni->ni_queue); 96 ni->ni_queue = NULL; 97 } 98 99 struct isrstat { 100 int isrs_count; /* dispatch count */ 101 int isrs_directed; /* ...directly dispatched */ 102 int isrs_deferred; /* ...queued instead */ 103 int isrs_queued; /* intentionally queueued */ 104 int isrs_drop; /* dropped 'cuz no handler */ 105 int isrs_swi_count; /* swi_net handlers called */ 106 }; 107 static struct isrstat isrstat; 108 109 SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr counters"); 110 111 static int netisr_direct = 1; 112 SYSCTL_INT(_net_isr, OID_AUTO, direct, CTLFLAG_RW, 113 &netisr_direct, 0, "enable direct dispatch"); 114 TUNABLE_INT("net.isr.direct", &netisr_direct); 115 116 SYSCTL_INT(_net_isr, OID_AUTO, count, CTLFLAG_RD, 117 &isrstat.isrs_count, 0, ""); 118 SYSCTL_INT(_net_isr, OID_AUTO, directed, CTLFLAG_RD, 119 &isrstat.isrs_directed, 0, ""); 120 SYSCTL_INT(_net_isr, OID_AUTO, deferred, CTLFLAG_RD, 121 &isrstat.isrs_deferred, 0, ""); 122 SYSCTL_INT(_net_isr, OID_AUTO, queued, CTLFLAG_RD, 123 &isrstat.isrs_queued, 0, ""); 124 SYSCTL_INT(_net_isr, OID_AUTO, drop, CTLFLAG_RD, 125 &isrstat.isrs_drop, 0, ""); 126 SYSCTL_INT(_net_isr, OID_AUTO, swi_count, CTLFLAG_RD, 127 &isrstat.isrs_swi_count, 0, ""); 128 129 /* 130 * Process all packets currently present in a netisr queue. Used to 131 * drain an existing set of packets waiting for processing when we 132 * begin direct dispatch, to avoid processing packets out of order. 133 */ 134 static void 135 netisr_processqueue(struct netisr *ni) 136 { 137 struct mbuf *m; 138 139 for (;;) { 140 IF_DEQUEUE(ni->ni_queue, m); 141 if (m == NULL) 142 break; 143 ni->ni_handler(m); 144 } 145 } 146 147 /* 148 * Call the netisr directly instead of queueing the packet, if possible. 149 */ 150 void 151 netisr_dispatch(int num, struct mbuf *m) 152 { 153 struct netisr *ni; 154 155 isrstat.isrs_count++; /* XXX redundant */ 156 KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))), 157 ("bad isr %d", num)); 158 ni = &netisrs[num]; 159 if (ni->ni_queue == NULL) { 160 isrstat.isrs_drop++; 161 m_freem(m); 162 return; 163 } 164 /* 165 * Do direct dispatch only for MPSAFE netisrs (and 166 * only when enabled). Note that when a netisr is 167 * marked MPSAFE we permit multiple concurrent instances 168 * to run. We guarantee only the order in which 169 * packets are processed for each "dispatch point" in 170 * the system (i.e. call to netisr_dispatch or 171 * netisr_queue). This insures ordering of packets 172 * from an interface but does not guarantee ordering 173 * between multiple places in the system (e.g. IP 174 * dispatched from interfaces vs. IP queued from IPSec). 175 */ 176 if (netisr_direct && (ni->ni_flags & NETISR_MPSAFE)) { 177 isrstat.isrs_directed++; 178 /* 179 * NB: We used to drain the queue before handling 180 * the packet but now do not. Doing so here will 181 * not preserve ordering so instead we fallback to 182 * guaranteeing order only from dispatch points 183 * in the system (see above). 184 */ 185 ni->ni_handler(m); 186 } else { 187 isrstat.isrs_deferred++; 188 if (IF_HANDOFF(ni->ni_queue, m, NULL)) 189 schednetisr(num); 190 } 191 } 192 193 /* 194 * Same as above, but always queue. 195 * This is either used in places where we are not confident that 196 * direct dispatch is possible, or where queueing is required. 197 * It returns (0) on success and ERRNO on failure. On failure the 198 * mbuf has been free'd. 199 */ 200 int 201 netisr_queue(int num, struct mbuf *m) 202 { 203 struct netisr *ni; 204 205 KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))), 206 ("bad isr %d", num)); 207 ni = &netisrs[num]; 208 if (ni->ni_queue == NULL) { 209 isrstat.isrs_drop++; 210 m_freem(m); 211 return (ENXIO); 212 } 213 isrstat.isrs_queued++; 214 if (!IF_HANDOFF(ni->ni_queue, m, NULL)) 215 return (ENOBUFS); /* IF_HANDOFF has free'd the mbuf */ 216 schednetisr(num); 217 return (0); 218 } 219 220 static void 221 swi_net(void *dummy) 222 { 223 struct netisr *ni; 224 u_int bits; 225 int i; 226 #ifdef DEVICE_POLLING 227 const int polling = 1; 228 #else 229 const int polling = 0; 230 #endif 231 232 do { 233 bits = atomic_readandclear_int(&netisr); 234 if (bits == 0) 235 break; 236 while ((i = ffs(bits)) != 0) { 237 isrstat.isrs_swi_count++; 238 i--; 239 bits &= ~(1 << i); 240 ni = &netisrs[i]; 241 if (ni->ni_handler == NULL) { 242 printf("swi_net: unregistered isr %d.\n", i); 243 continue; 244 } 245 if ((ni->ni_flags & NETISR_MPSAFE) == 0) { 246 mtx_lock(&Giant); 247 if (ni->ni_queue == NULL) 248 ni->ni_handler(NULL); 249 else 250 netisr_processqueue(ni); 251 mtx_unlock(&Giant); 252 } else { 253 if (ni->ni_queue == NULL) 254 ni->ni_handler(NULL); 255 else 256 netisr_processqueue(ni); 257 } 258 } 259 } while (polling); 260 } 261 262 static void 263 start_netisr(void *dummy) 264 { 265 266 if (swi_add(NULL, "net", swi_net, NULL, SWI_NET, INTR_MPSAFE, &net_ih)) 267 panic("start_netisr"); 268 } 269 SYSINIT(start_netisr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_netisr, NULL) 270