11cafed39SJonathan Lemon /*- 21cafed39SJonathan Lemon * Copyright (c) 2001,2002,2003 Jonathan Lemon <jlemon@FreeBSD.org> 3e3b6e33cSJake Burkholder * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 4e3b6e33cSJake Burkholder * All rights reserved. 5e3b6e33cSJake Burkholder * 6e3b6e33cSJake Burkholder * Redistribution and use in source and binary forms, with or without 7e3b6e33cSJake Burkholder * modification, are permitted provided that the following conditions 8e3b6e33cSJake Burkholder * are met: 9e3b6e33cSJake Burkholder * 1. Redistributions of source code must retain the above copyright 101cafed39SJonathan Lemon * notice, this list of conditions and the following disclaimer. 11e3b6e33cSJake Burkholder * 2. Redistributions in binary form must reproduce the above copyright 12e3b6e33cSJake Burkholder * notice, this list of conditions and the following disclaimer in the 13e3b6e33cSJake Burkholder * documentation and/or other materials provided with the distribution. 14e3b6e33cSJake Burkholder * 151cafed39SJonathan Lemon * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 161cafed39SJonathan Lemon * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 171cafed39SJonathan Lemon * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 181cafed39SJonathan Lemon * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 191cafed39SJonathan Lemon * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 201cafed39SJonathan Lemon * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 211cafed39SJonathan Lemon * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 221cafed39SJonathan Lemon * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 231cafed39SJonathan Lemon * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 241cafed39SJonathan Lemon * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 251cafed39SJonathan Lemon * SUCH DAMAGE. 26e3b6e33cSJake Burkholder * 27e3b6e33cSJake Burkholder * $FreeBSD$ 28e3b6e33cSJake Burkholder */ 29e3b6e33cSJake Burkholder 301d8cd39eSRobert Watson #include "opt_net.h" 311d8cd39eSRobert Watson 32e3b6e33cSJake Burkholder #include <sys/param.h> 33e3b6e33cSJake Burkholder #include <sys/bus.h> 341cafed39SJonathan Lemon #include <sys/rtprio.h> 351cafed39SJonathan Lemon #include <sys/systm.h> 36e3b6e33cSJake Burkholder #include <sys/interrupt.h> 37e3b6e33cSJake Burkholder #include <sys/kernel.h> 381cafed39SJonathan Lemon #include <sys/kthread.h> 391cafed39SJonathan Lemon #include <sys/lock.h> 401cafed39SJonathan Lemon #include <sys/malloc.h> 411cafed39SJonathan Lemon #include <sys/proc.h> 421cafed39SJonathan Lemon #include <sys/random.h> 431cafed39SJonathan Lemon #include <sys/resourcevar.h> 441cafed39SJonathan Lemon #include <sys/sysctl.h> 451cafed39SJonathan Lemon #include <sys/unistd.h> 461cafed39SJonathan Lemon #include <machine/atomic.h> 471cafed39SJonathan Lemon #include <machine/cpu.h> 481cafed39SJonathan Lemon #include <machine/stdarg.h> 49e3b6e33cSJake Burkholder 501cafed39SJonathan Lemon #include <sys/mbuf.h> 511cafed39SJonathan Lemon #include <sys/socket.h> 521cafed39SJonathan Lemon 531cafed39SJonathan Lemon #include <net/if.h> 541cafed39SJonathan Lemon #include <net/if_types.h> 551cafed39SJonathan Lemon #include <net/if_var.h> 56e3b6e33cSJake Burkholder #include <net/netisr.h> 57e3b6e33cSJake Burkholder 58d3be1471SSam Leffler /* 5908f85b08SRobert Watson * debug_mpsafenet controls network subsystem-wide use of the Giant lock, 601d8cd39eSRobert Watson * from system calls down to interrupt handlers. It can be changed only via 611d8cd39eSRobert Watson * a tunable at boot, not at run-time, due to the complexity of unwinding. 621d8cd39eSRobert Watson * The compiled default is set via a kernel option; right now, the default 631d8cd39eSRobert Watson * unless otherwise specified is to run the network stack without Giant. 64d3be1471SSam Leffler */ 651d8cd39eSRobert Watson #ifdef NET_WITH_GIANT 66d3be1471SSam Leffler int debug_mpsafenet = 0; 671d8cd39eSRobert Watson #else 681d8cd39eSRobert Watson int debug_mpsafenet = 1; 691d8cd39eSRobert Watson #endif 701d8cd39eSRobert Watson int debug_mpsafenet_toolatetotwiddle = 0; 711d8cd39eSRobert Watson 72d3be1471SSam Leffler TUNABLE_INT("debug.mpsafenet", &debug_mpsafenet); 73d3be1471SSam Leffler SYSCTL_INT(_debug, OID_AUTO, mpsafenet, CTLFLAG_RD, &debug_mpsafenet, 0, 74d3be1471SSam Leffler "Enable/disable MPSAFE network support"); 75d3be1471SSam Leffler 761cafed39SJonathan Lemon volatile unsigned int netisr; /* scheduling bits for network */ 77e3b6e33cSJake Burkholder 781cafed39SJonathan Lemon struct netisr { 791cafed39SJonathan Lemon netisr_t *ni_handler; 801cafed39SJonathan Lemon struct ifqueue *ni_queue; 817902224cSSam Leffler int ni_flags; 821cafed39SJonathan Lemon } netisrs[32]; 831cafed39SJonathan Lemon 841cafed39SJonathan Lemon static void *net_ih; 85e3b6e33cSJake Burkholder 861d8cd39eSRobert Watson /* 871d8cd39eSRobert Watson * Note all network code is currently capable of running MPSAFE; however, 881d8cd39eSRobert Watson * most of it is. Since those sections that are not are generally optional 891d8cd39eSRobert Watson * components not shipped with default kernels, we provide a basic way to 901d8cd39eSRobert Watson * determine whether MPSAFE operation is permitted: based on a default of 911d8cd39eSRobert Watson * yes, we permit non-MPSAFE components to use a registration call to 921d8cd39eSRobert Watson * identify that they require Giant. If the system is early in the boot 931d8cd39eSRobert Watson * process still, then we change the debug_mpsafenet setting to choose a 941d8cd39eSRobert Watson * non-MPSAFE execution mode (degraded). If it's too late for that (since 951d8cd39eSRobert Watson * the setting cannot be changed at run time), we generate a console warning 961d8cd39eSRobert Watson * that the configuration may be unsafe. 971d8cd39eSRobert Watson */ 981d8cd39eSRobert Watson static int mpsafe_warn_count; 991d8cd39eSRobert Watson 1001d8cd39eSRobert Watson /* 1011d8cd39eSRobert Watson * Function call implementing registration of a non-MPSAFE network component. 1021d8cd39eSRobert Watson */ 1031d8cd39eSRobert Watson void 1041d8cd39eSRobert Watson net_warn_not_mpsafe(const char *component) 1051d8cd39eSRobert Watson { 1061d8cd39eSRobert Watson 1071d8cd39eSRobert Watson /* 1081d8cd39eSRobert Watson * If we're running with Giant over the network stack, there is no 1091d8cd39eSRobert Watson * problem. 1101d8cd39eSRobert Watson */ 1111d8cd39eSRobert Watson if (!debug_mpsafenet) 1121d8cd39eSRobert Watson return; 1131d8cd39eSRobert Watson 1141d8cd39eSRobert Watson /* 1151d8cd39eSRobert Watson * If it's not too late to change the MPSAFE setting for the network 1161d8cd39eSRobert Watson * stack, do so now. This effectively suppresses warnings by 1171d8cd39eSRobert Watson * components registering later. 1181d8cd39eSRobert Watson */ 1191d8cd39eSRobert Watson if (!debug_mpsafenet_toolatetotwiddle) { 1201d8cd39eSRobert Watson debug_mpsafenet = 0; 1211d8cd39eSRobert Watson printf("WARNING: debug.mpsafenet forced to = as %s requires " 1221d8cd39eSRobert Watson "Giant\n", component); 1231d8cd39eSRobert Watson return; 1241d8cd39eSRobert Watson } 1251d8cd39eSRobert Watson 1261d8cd39eSRobert Watson /* 1271d8cd39eSRobert Watson * We must run without Giant, so generate a console warning with some 1281d8cd39eSRobert Watson * information with what to do about it. The system may be operating 1291d8cd39eSRobert Watson * unsafely, however. 1301d8cd39eSRobert Watson */ 1311d8cd39eSRobert Watson printf("WARNING: Network stack Giant-free, but %s requires Giant.\n", 1321d8cd39eSRobert Watson component); 1331d8cd39eSRobert Watson if (mpsafe_warn_count == 0) 1341d8cd39eSRobert Watson printf(" Consider adding 'options NET_WITH_GIANT' or " 1351d8cd39eSRobert Watson "setting debug.mpsafenet=0\n"); 1361d8cd39eSRobert Watson mpsafe_warn_count++; 1371d8cd39eSRobert Watson } 1381d8cd39eSRobert Watson 1391d8cd39eSRobert Watson /* 1401d8cd39eSRobert Watson * This sysinit is run after any pre-loaded or compiled-in components have 1411d8cd39eSRobert Watson * announced that they require Giant, but before any modules loaded at 1421d8cd39eSRobert Watson * run-time. 1431d8cd39eSRobert Watson */ 1441d8cd39eSRobert Watson static void 1451d8cd39eSRobert Watson net_mpsafe_toolate(void *arg) 1461d8cd39eSRobert Watson { 1471d8cd39eSRobert Watson 1481d8cd39eSRobert Watson debug_mpsafenet_toolatetotwiddle = 1; 1491d8cd39eSRobert Watson 1501d8cd39eSRobert Watson if (!debug_mpsafenet) 1511d8cd39eSRobert Watson printf("WARNING: MPSAFE network stack disabled, expect " 1521d8cd39eSRobert Watson "reduced performance.\n"); 1531d8cd39eSRobert Watson } 1541d8cd39eSRobert Watson 1551d8cd39eSRobert Watson SYSINIT(net_mpsafe_toolate, SI_SUB_SETTINGS, SI_ORDER_ANY, net_mpsafe_toolate, 1561d8cd39eSRobert Watson NULL); 1571d8cd39eSRobert Watson 158e3b6e33cSJake Burkholder void 159e3b6e33cSJake Burkholder legacy_setsoftnet(void) 160e3b6e33cSJake Burkholder { 161e3b6e33cSJake Burkholder swi_sched(net_ih, 0); 162e3b6e33cSJake Burkholder } 163e3b6e33cSJake Burkholder 1641cafed39SJonathan Lemon void 1657902224cSSam Leffler netisr_register(int num, netisr_t *handler, struct ifqueue *inq, int flags) 166e3b6e33cSJake Burkholder { 167e3b6e33cSJake Burkholder 1681cafed39SJonathan Lemon KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))), 1691cafed39SJonathan Lemon ("bad isr %d", num)); 1701cafed39SJonathan Lemon netisrs[num].ni_handler = handler; 1711cafed39SJonathan Lemon netisrs[num].ni_queue = inq; 1727902224cSSam Leffler if ((flags & NETISR_MPSAFE) && !debug_mpsafenet) 1737902224cSSam Leffler flags &= ~NETISR_MPSAFE; 1747902224cSSam Leffler netisrs[num].ni_flags = flags; 175e3b6e33cSJake Burkholder } 176e3b6e33cSJake Burkholder 1771cafed39SJonathan Lemon void 1781cafed39SJonathan Lemon netisr_unregister(int num) 179e3b6e33cSJake Burkholder { 1801cafed39SJonathan Lemon struct netisr *ni; 181e3b6e33cSJake Burkholder 1821cafed39SJonathan Lemon KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))), 1831cafed39SJonathan Lemon ("bad isr %d", num)); 1841cafed39SJonathan Lemon ni = &netisrs[num]; 1851cafed39SJonathan Lemon ni->ni_handler = NULL; 1867902224cSSam Leffler if (ni->ni_queue != NULL) 1871cafed39SJonathan Lemon IF_DRAIN(ni->ni_queue); 1881cafed39SJonathan Lemon } 1891cafed39SJonathan Lemon 1901cafed39SJonathan Lemon struct isrstat { 1911cafed39SJonathan Lemon int isrs_count; /* dispatch count */ 1927902224cSSam Leffler int isrs_directed; /* ...directly dispatched */ 1931cafed39SJonathan Lemon int isrs_deferred; /* ...queued instead */ 1941cafed39SJonathan Lemon int isrs_queued; /* intentionally queueued */ 1957902224cSSam Leffler int isrs_drop; /* dropped 'cuz no handler */ 1961cafed39SJonathan Lemon int isrs_swi_count; /* swi_net handlers called */ 1971cafed39SJonathan Lemon }; 1981cafed39SJonathan Lemon static struct isrstat isrstat; 1991cafed39SJonathan Lemon 2001cafed39SJonathan Lemon SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr counters"); 2011cafed39SJonathan Lemon 2023164565dSRobert Watson static int netisr_enable = 0; 2031cafed39SJonathan Lemon SYSCTL_INT(_net_isr, OID_AUTO, enable, CTLFLAG_RW, 2041cafed39SJonathan Lemon &netisr_enable, 0, "enable direct dispatch"); 205e590eca2SRobert Watson TUNABLE_INT("net.isr.enable", &netisr_enable); 2061cafed39SJonathan Lemon 2071cafed39SJonathan Lemon SYSCTL_INT(_net_isr, OID_AUTO, count, CTLFLAG_RD, 2081cafed39SJonathan Lemon &isrstat.isrs_count, 0, ""); 2091cafed39SJonathan Lemon SYSCTL_INT(_net_isr, OID_AUTO, directed, CTLFLAG_RD, 2101cafed39SJonathan Lemon &isrstat.isrs_directed, 0, ""); 2111cafed39SJonathan Lemon SYSCTL_INT(_net_isr, OID_AUTO, deferred, CTLFLAG_RD, 2121cafed39SJonathan Lemon &isrstat.isrs_deferred, 0, ""); 2131cafed39SJonathan Lemon SYSCTL_INT(_net_isr, OID_AUTO, queued, CTLFLAG_RD, 2141cafed39SJonathan Lemon &isrstat.isrs_queued, 0, ""); 2157902224cSSam Leffler SYSCTL_INT(_net_isr, OID_AUTO, drop, CTLFLAG_RD, 2167902224cSSam Leffler &isrstat.isrs_drop, 0, ""); 2171cafed39SJonathan Lemon SYSCTL_INT(_net_isr, OID_AUTO, swi_count, CTLFLAG_RD, 2181cafed39SJonathan Lemon &isrstat.isrs_swi_count, 0, ""); 2191cafed39SJonathan Lemon 2201cafed39SJonathan Lemon /* 2215fd04e38SRobert Watson * Process all packets currently present in a netisr queue. Used to 2225fd04e38SRobert Watson * drain an existing set of packets waiting for processing when we 2235fd04e38SRobert Watson * begin direct dispatch, to avoid processing packets out of order. 2245fd04e38SRobert Watson */ 2255fd04e38SRobert Watson static void 2265fd04e38SRobert Watson netisr_processqueue(struct netisr *ni) 2275fd04e38SRobert Watson { 2285fd04e38SRobert Watson struct mbuf *m; 2295fd04e38SRobert Watson 2305fd04e38SRobert Watson for (;;) { 2315fd04e38SRobert Watson IF_DEQUEUE(ni->ni_queue, m); 2325fd04e38SRobert Watson if (m == NULL) 2335fd04e38SRobert Watson break; 2345fd04e38SRobert Watson ni->ni_handler(m); 2355fd04e38SRobert Watson } 2365fd04e38SRobert Watson } 2375fd04e38SRobert Watson 2385fd04e38SRobert Watson /* 2391cafed39SJonathan Lemon * Call the netisr directly instead of queueing the packet, if possible. 2401cafed39SJonathan Lemon */ 2411cafed39SJonathan Lemon void 2421cafed39SJonathan Lemon netisr_dispatch(int num, struct mbuf *m) 2431cafed39SJonathan Lemon { 2441cafed39SJonathan Lemon struct netisr *ni; 2451cafed39SJonathan Lemon 2467902224cSSam Leffler isrstat.isrs_count++; /* XXX redundant */ 2471cafed39SJonathan Lemon KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))), 2481cafed39SJonathan Lemon ("bad isr %d", num)); 2491cafed39SJonathan Lemon ni = &netisrs[num]; 250fb68148fSJonathan Lemon if (ni->ni_queue == NULL) { 2517902224cSSam Leffler isrstat.isrs_drop++; 252fb68148fSJonathan Lemon m_freem(m); 253fb68148fSJonathan Lemon return; 254fb68148fSJonathan Lemon } 2557902224cSSam Leffler /* 2567902224cSSam Leffler * Do direct dispatch only for MPSAFE netisrs (and 2577902224cSSam Leffler * only when enabled). Note that when a netisr is 2587902224cSSam Leffler * marked MPSAFE we permit multiple concurrent instances 2597902224cSSam Leffler * to run. We guarantee only the order in which 2607902224cSSam Leffler * packets are processed for each "dispatch point" in 2617902224cSSam Leffler * the system (i.e. call to netisr_dispatch or 2627902224cSSam Leffler * netisr_queue). This insures ordering of packets 2637902224cSSam Leffler * from an interface but does not guarantee ordering 2647902224cSSam Leffler * between multiple places in the system (e.g. IP 2657902224cSSam Leffler * dispatched from interfaces vs. IP queued from IPSec). 2667902224cSSam Leffler */ 2677902224cSSam Leffler if (netisr_enable && (ni->ni_flags & NETISR_MPSAFE)) { 2681cafed39SJonathan Lemon isrstat.isrs_directed++; 2691cafed39SJonathan Lemon /* 2707902224cSSam Leffler * NB: We used to drain the queue before handling 2717902224cSSam Leffler * the packet but now do not. Doing so here will 2727902224cSSam Leffler * not preserve ordering so instead we fallback to 2737902224cSSam Leffler * guaranteeing order only from dispatch points 2747902224cSSam Leffler * in the system (see above). 2751cafed39SJonathan Lemon */ 2761cafed39SJonathan Lemon ni->ni_handler(m); 2771cafed39SJonathan Lemon } else { 2781cafed39SJonathan Lemon isrstat.isrs_deferred++; 2791cafed39SJonathan Lemon if (IF_HANDOFF(ni->ni_queue, m, NULL)) 2801cafed39SJonathan Lemon schednetisr(num); 2811cafed39SJonathan Lemon } 2821cafed39SJonathan Lemon } 2831cafed39SJonathan Lemon 2841cafed39SJonathan Lemon /* 2851cafed39SJonathan Lemon * Same as above, but always queue. 2861cafed39SJonathan Lemon * This is either used in places where we are not confident that 2871cafed39SJonathan Lemon * direct dispatch is possible, or where queueing is required. 2883161f583SAndre Oppermann * It returns (0) on success and ERRNO on failure. On failure the 2893161f583SAndre Oppermann * mbuf has been free'd. 2901cafed39SJonathan Lemon */ 2911cafed39SJonathan Lemon int 2921cafed39SJonathan Lemon netisr_queue(int num, struct mbuf *m) 2931cafed39SJonathan Lemon { 2941cafed39SJonathan Lemon struct netisr *ni; 2951cafed39SJonathan Lemon 2961cafed39SJonathan Lemon KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))), 2971cafed39SJonathan Lemon ("bad isr %d", num)); 2981cafed39SJonathan Lemon ni = &netisrs[num]; 299fb68148fSJonathan Lemon if (ni->ni_queue == NULL) { 3007902224cSSam Leffler isrstat.isrs_drop++; 301fb68148fSJonathan Lemon m_freem(m); 3023161f583SAndre Oppermann return (ENXIO); 303fb68148fSJonathan Lemon } 3041cafed39SJonathan Lemon isrstat.isrs_queued++; 3051cafed39SJonathan Lemon if (!IF_HANDOFF(ni->ni_queue, m, NULL)) 3063161f583SAndre Oppermann return (ENOBUFS); /* IF_HANDOFF has free'd the mbuf */ 3071cafed39SJonathan Lemon schednetisr(num); 3083161f583SAndre Oppermann return (0); 309e3b6e33cSJake Burkholder } 310e3b6e33cSJake Burkholder 311e3b6e33cSJake Burkholder static void 312e3b6e33cSJake Burkholder swi_net(void *dummy) 313e3b6e33cSJake Burkholder { 3141cafed39SJonathan Lemon struct netisr *ni; 315e3b6e33cSJake Burkholder u_int bits; 316e3b6e33cSJake Burkholder int i; 3171cafed39SJonathan Lemon #ifdef DEVICE_POLLING 3181cafed39SJonathan Lemon const int polling = 1; 3191cafed39SJonathan Lemon #else 3201cafed39SJonathan Lemon const int polling = 0; 3211cafed39SJonathan Lemon #endif 322e3b6e33cSJake Burkholder 3231cafed39SJonathan Lemon do { 324e3b6e33cSJake Burkholder bits = atomic_readandclear_int(&netisr); 325e3b6e33cSJake Burkholder if (bits == 0) 3261cafed39SJonathan Lemon break; 327e3b6e33cSJake Burkholder while ((i = ffs(bits)) != 0) { 3281cafed39SJonathan Lemon isrstat.isrs_swi_count++; 329e3b6e33cSJake Burkholder i--; 330e3b6e33cSJake Burkholder bits &= ~(1 << i); 3311cafed39SJonathan Lemon ni = &netisrs[i]; 3321cafed39SJonathan Lemon if (ni->ni_handler == NULL) { 3331cafed39SJonathan Lemon printf("swi_net: unregistered isr %d.\n", i); 3341cafed39SJonathan Lemon continue; 335e3b6e33cSJake Burkholder } 3367902224cSSam Leffler if ((ni->ni_flags & NETISR_MPSAFE) == 0) { 3377902224cSSam Leffler mtx_lock(&Giant); 3387902224cSSam Leffler if (ni->ni_queue == NULL) 3397902224cSSam Leffler ni->ni_handler(NULL); 3407902224cSSam Leffler else 3417902224cSSam Leffler netisr_processqueue(ni); 3427902224cSSam Leffler mtx_unlock(&Giant); 3437902224cSSam Leffler } else { 3441cafed39SJonathan Lemon if (ni->ni_queue == NULL) 3451cafed39SJonathan Lemon ni->ni_handler(NULL); 3461cafed39SJonathan Lemon else 3475fd04e38SRobert Watson netisr_processqueue(ni); 3481cafed39SJonathan Lemon } 3497902224cSSam Leffler } 3501cafed39SJonathan Lemon } while (polling); 351e3b6e33cSJake Burkholder } 352e3b6e33cSJake Burkholder 353e3b6e33cSJake Burkholder static void 354e3b6e33cSJake Burkholder start_netisr(void *dummy) 355e3b6e33cSJake Burkholder { 356e3b6e33cSJake Burkholder 3577902224cSSam Leffler if (swi_add(NULL, "net", swi_net, NULL, SWI_NET, INTR_MPSAFE, &net_ih)) 358e3b6e33cSJake Burkholder panic("start_netisr"); 359e3b6e33cSJake Burkholder } 360e3b6e33cSJake Burkholder SYSINIT(start_netisr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_netisr, NULL) 361