11cafed39SJonathan Lemon /*- 2d4b5cae4SRobert Watson * Copyright (c) 2007-2009 Robert N. M. Watson 3*f2d2d694SRobert Watson * Copyright (c) 2010-2011 Juniper Networks, Inc. 4e3b6e33cSJake Burkholder * All rights reserved. 5e3b6e33cSJake Burkholder * 62d22f334SRobert Watson * This software was developed by Robert N. M. Watson under contract 72d22f334SRobert Watson * to Juniper Networks, Inc. 82d22f334SRobert Watson * 9e3b6e33cSJake Burkholder * Redistribution and use in source and binary forms, with or without 10e3b6e33cSJake Burkholder * modification, are permitted provided that the following conditions 11e3b6e33cSJake Burkholder * are met: 12e3b6e33cSJake Burkholder * 1. Redistributions of source code must retain the above copyright 131cafed39SJonathan Lemon * notice, this list of conditions and the following disclaimer. 14e3b6e33cSJake Burkholder * 2. Redistributions in binary form must reproduce the above copyright 15e3b6e33cSJake Burkholder * notice, this list of conditions and the following disclaimer in the 16e3b6e33cSJake Burkholder * documentation and/or other materials provided with the distribution. 17e3b6e33cSJake Burkholder * 181cafed39SJonathan Lemon * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 191cafed39SJonathan Lemon * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 201cafed39SJonathan Lemon * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 211cafed39SJonathan Lemon * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 221cafed39SJonathan Lemon * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 231cafed39SJonathan Lemon * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 241cafed39SJonathan Lemon * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 251cafed39SJonathan Lemon * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 261cafed39SJonathan Lemon * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 271cafed39SJonathan Lemon * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 281cafed39SJonathan Lemon * SUCH DAMAGE. 29e3b6e33cSJake Burkholder */ 30e3b6e33cSJake Burkholder 31d4b5cae4SRobert Watson #include <sys/cdefs.h> 32d4b5cae4SRobert Watson __FBSDID("$FreeBSD$"); 33d4b5cae4SRobert Watson 34d4b5cae4SRobert Watson /* 35d4b5cae4SRobert Watson * netisr is a packet dispatch service, allowing synchronous (directly 36d4b5cae4SRobert Watson * dispatched) and asynchronous (deferred dispatch) processing of packets by 37d4b5cae4SRobert Watson * registered protocol handlers. Callers pass a protocol identifier and 38d4b5cae4SRobert Watson * packet to netisr, along with a direct dispatch hint, and work will either 390a32e29fSRobert Watson * be immediately processed by the registered handler, or passed to a 400a32e29fSRobert Watson * software interrupt (SWI) thread for deferred dispatch. Callers will 410a32e29fSRobert Watson * generally select one or the other based on: 42d4b5cae4SRobert Watson * 430a32e29fSRobert Watson * - Whether directly dispatching a netisr handler lead to code reentrance or 44d4b5cae4SRobert Watson * lock recursion, such as entering the socket code from the socket code. 450a32e29fSRobert Watson * - Whether directly dispatching a netisr handler lead to recursive 46d4b5cae4SRobert Watson * processing, such as when decapsulating several wrapped layers of tunnel 47d4b5cae4SRobert Watson * information (IPSEC within IPSEC within ...). 48d4b5cae4SRobert Watson * 49d4b5cae4SRobert Watson * Maintaining ordering for protocol streams is a critical design concern. 50d4b5cae4SRobert Watson * Enforcing ordering limits the opportunity for concurrency, but maintains 51d4b5cae4SRobert Watson * the strong ordering requirements found in some protocols, such as TCP. Of 52d4b5cae4SRobert Watson * related concern is CPU affinity--it is desirable to process all data 53d4b5cae4SRobert Watson * associated with a particular stream on the same CPU over time in order to 54d4b5cae4SRobert Watson * avoid acquiring locks associated with the connection on different CPUs, 55d4b5cae4SRobert Watson * keep connection data in one cache, and to generally encourage associated 56d4b5cae4SRobert Watson * user threads to live on the same CPU as the stream. It's also desirable 57d4b5cae4SRobert Watson * to avoid lock migration and contention where locks are associated with 58d4b5cae4SRobert Watson * more than one flow. 59d4b5cae4SRobert Watson * 60d4b5cae4SRobert Watson * netisr supports several policy variations, represented by the 610a32e29fSRobert Watson * NETISR_POLICY_* constants, allowing protocols to play various roles in 62d4b5cae4SRobert Watson * identifying flows, assigning work to CPUs, etc. These are described in 630a32e29fSRobert Watson * netisr.h. 64d4b5cae4SRobert Watson */ 65d4b5cae4SRobert Watson 66d4b5cae4SRobert Watson #include "opt_ddb.h" 67f0796cd2SGleb Smirnoff #include "opt_device_polling.h" 681d8cd39eSRobert Watson 69e3b6e33cSJake Burkholder #include <sys/param.h> 70e3b6e33cSJake Burkholder #include <sys/bus.h> 71e3b6e33cSJake Burkholder #include <sys/kernel.h> 721cafed39SJonathan Lemon #include <sys/kthread.h> 73d4b5cae4SRobert Watson #include <sys/interrupt.h> 741cafed39SJonathan Lemon #include <sys/lock.h> 751cafed39SJonathan Lemon #include <sys/mbuf.h> 76d4b5cae4SRobert Watson #include <sys/mutex.h> 7753402767SRobert Watson #include <sys/pcpu.h> 78d4b5cae4SRobert Watson #include <sys/proc.h> 79d4b5cae4SRobert Watson #include <sys/rmlock.h> 80d4b5cae4SRobert Watson #include <sys/sched.h> 81d4b5cae4SRobert Watson #include <sys/smp.h> 821cafed39SJonathan Lemon #include <sys/socket.h> 83d4b5cae4SRobert Watson #include <sys/sysctl.h> 84d4b5cae4SRobert Watson #include <sys/systm.h> 85d4b5cae4SRobert Watson 86d4b5cae4SRobert Watson #ifdef DDB 87d4b5cae4SRobert Watson #include <ddb/ddb.h> 88d4b5cae4SRobert Watson #endif 891cafed39SJonathan Lemon 90938448cdSRobert Watson #define _WANT_NETISR_INTERNAL /* Enable definitions from netisr_internal.h */ 911cafed39SJonathan Lemon #include <net/if.h> 921cafed39SJonathan Lemon #include <net/if_var.h> 93e3b6e33cSJake Burkholder #include <net/netisr.h> 94938448cdSRobert Watson #include <net/netisr_internal.h> 95530c0060SRobert Watson #include <net/vnet.h> 96e3b6e33cSJake Burkholder 97d4b5cae4SRobert Watson /*- 98d4b5cae4SRobert Watson * Synchronize use and modification of the registered netisr data structures; 99d4b5cae4SRobert Watson * acquire a read lock while modifying the set of registered protocols to 100d4b5cae4SRobert Watson * prevent partially registered or unregistered protocols from being run. 101d4b5cae4SRobert Watson * 102d4b5cae4SRobert Watson * The following data structures and fields are protected by this lock: 103d4b5cae4SRobert Watson * 104938448cdSRobert Watson * - The netisr_proto array, including all fields of struct netisr_proto. 105d4b5cae4SRobert Watson * - The nws array, including all fields of struct netisr_worker. 106d4b5cae4SRobert Watson * - The nws_array array. 107d4b5cae4SRobert Watson * 108d4b5cae4SRobert Watson * Note: the NETISR_LOCKING define controls whether read locks are acquired 109d4b5cae4SRobert Watson * in packet processing paths requiring netisr registration stability. This 1100a32e29fSRobert Watson * is disabled by default as it can lead to measurable performance 111d4b5cae4SRobert Watson * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and 112d4b5cae4SRobert Watson * because netisr registration and unregistration is extremely rare at 113d4b5cae4SRobert Watson * runtime. If it becomes more common, this decision should be revisited. 114d4b5cae4SRobert Watson * 115d4b5cae4SRobert Watson * XXXRW: rmlocks don't support assertions. 116d4b5cae4SRobert Watson */ 117d4b5cae4SRobert Watson static struct rmlock netisr_rmlock; 118d4b5cae4SRobert Watson #define NETISR_LOCK_INIT() rm_init_flags(&netisr_rmlock, "netisr", \ 119d4b5cae4SRobert Watson RM_NOWITNESS) 120d4b5cae4SRobert Watson #define NETISR_LOCK_ASSERT() 121d4b5cae4SRobert Watson #define NETISR_RLOCK(tracker) rm_rlock(&netisr_rmlock, (tracker)) 122d4b5cae4SRobert Watson #define NETISR_RUNLOCK(tracker) rm_runlock(&netisr_rmlock, (tracker)) 123d4b5cae4SRobert Watson #define NETISR_WLOCK() rm_wlock(&netisr_rmlock) 124d4b5cae4SRobert Watson #define NETISR_WUNLOCK() rm_wunlock(&netisr_rmlock) 125d4b5cae4SRobert Watson /* #define NETISR_LOCKING */ 126e3b6e33cSJake Burkholder 127d4b5cae4SRobert Watson SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr"); 1281cafed39SJonathan Lemon 129d4b5cae4SRobert Watson /*- 130*f2d2d694SRobert Watson * Three global direct dispatch policies are supported: 131d4b5cae4SRobert Watson * 132*f2d2d694SRobert Watson * NETISR_DISPATCH_QUEUED: All work is deferred for a netisr, regardless of 133*f2d2d694SRobert Watson * context (may be overriden by protocols). 134d4b5cae4SRobert Watson * 135*f2d2d694SRobert Watson * NETISR_DISPATCH_HYBRID: If the executing context allows direct dispatch, 136*f2d2d694SRobert Watson * and we're running on the CPU the work would be performed on, then direct 137*f2d2d694SRobert Watson * dispatch it if it wouldn't violate ordering constraints on the workstream. 138d4b5cae4SRobert Watson * 139*f2d2d694SRobert Watson * NETISR_DISPATCH_DIRECT: If the executing context allows direct dispatch, 140*f2d2d694SRobert Watson * always direct dispatch. (The default.) 141d4b5cae4SRobert Watson * 142d4b5cae4SRobert Watson * Notice that changing the global policy could lead to short periods of 143d4b5cae4SRobert Watson * misordered processing, but this is considered acceptable as compared to 144*f2d2d694SRobert Watson * the complexity of enforcing ordering during policy changes. Protocols can 145*f2d2d694SRobert Watson * override the global policy (when they're not doing that, they select 146*f2d2d694SRobert Watson * NETISR_DISPATCH_DEFAULT). 147d4b5cae4SRobert Watson */ 148*f2d2d694SRobert Watson #define NETISR_DISPATCH_POLICY_DEFAULT NETISR_DISPATCH_DIRECT 149*f2d2d694SRobert Watson #define NETISR_DISPATCH_POLICY_MAXSTR 20 /* Used for temporary buffers. */ 150*f2d2d694SRobert Watson static u_int netisr_dispatch_policy = NETISR_DISPATCH_POLICY_DEFAULT; 151*f2d2d694SRobert Watson static int sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS); 152*f2d2d694SRobert Watson SYSCTL_PROC(_net_isr, OID_AUTO, dispatch, CTLTYPE_STRING | CTLFLAG_RW | 153*f2d2d694SRobert Watson CTLFLAG_TUN, 0, 0, sysctl_netisr_dispatch_policy, "A", 154*f2d2d694SRobert Watson "netisr dispatch policy"); 155e3b6e33cSJake Burkholder 156*f2d2d694SRobert Watson /* 157*f2d2d694SRobert Watson * These sysctls were used in previous versions to control and export 158*f2d2d694SRobert Watson * dispatch policy state. Now, we provide read-only export via them so that 159*f2d2d694SRobert Watson * older netstat binaries work. At some point they can be garbage collected. 160*f2d2d694SRobert Watson */ 161*f2d2d694SRobert Watson static int netisr_direct_force; 162*f2d2d694SRobert Watson SYSCTL_INT(_net_isr, OID_AUTO, direct_force, CTLFLAG_RD, 163*f2d2d694SRobert Watson &netisr_direct_force, 0, "compat: force direct dispatch"); 164*f2d2d694SRobert Watson 165*f2d2d694SRobert Watson static int netisr_direct; 166*f2d2d694SRobert Watson SYSCTL_INT(_net_isr, OID_AUTO, direct, CTLFLAG_RD, &netisr_direct, 0, 167*f2d2d694SRobert Watson "compat: enable direct dispatch"); 1681cafed39SJonathan Lemon 1691cafed39SJonathan Lemon /* 170d4b5cae4SRobert Watson * Allow the administrator to limit the number of threads (CPUs) to use for 171d4b5cae4SRobert Watson * netisr. We don't check netisr_maxthreads before creating the thread for 172d4b5cae4SRobert Watson * CPU 0, so in practice we ignore values <= 1. This must be set at boot. 173d4b5cae4SRobert Watson * We will create at most one thread per CPU. 1745fd04e38SRobert Watson */ 1759e6e01ebSRobert Watson static int netisr_maxthreads = -1; /* Max number of threads. */ 176d4b5cae4SRobert Watson TUNABLE_INT("net.isr.maxthreads", &netisr_maxthreads); 17778494902SPawel Jakub Dawidek SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN, 178d4b5cae4SRobert Watson &netisr_maxthreads, 0, 179d4b5cae4SRobert Watson "Use at most this many CPUs for netisr processing"); 1805fd04e38SRobert Watson 181d4b5cae4SRobert Watson static int netisr_bindthreads = 0; /* Bind threads to CPUs. */ 182d4b5cae4SRobert Watson TUNABLE_INT("net.isr.bindthreads", &netisr_bindthreads); 18378494902SPawel Jakub Dawidek SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN, 184d4b5cae4SRobert Watson &netisr_bindthreads, 0, "Bind netisr threads to CPUs."); 185d4b5cae4SRobert Watson 186d4b5cae4SRobert Watson /* 1870a32e29fSRobert Watson * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit, 1880a32e29fSRobert Watson * both for initial configuration and later modification using 1890a32e29fSRobert Watson * netisr_setqlimit(). 190d4b5cae4SRobert Watson */ 191d4b5cae4SRobert Watson #define NETISR_DEFAULT_MAXQLIMIT 10240 192d4b5cae4SRobert Watson static u_int netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT; 193d4b5cae4SRobert Watson TUNABLE_INT("net.isr.maxqlimit", &netisr_maxqlimit); 194f88910cdSMatthew D Fleming SYSCTL_UINT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN, 195d4b5cae4SRobert Watson &netisr_maxqlimit, 0, 196d4b5cae4SRobert Watson "Maximum netisr per-protocol, per-CPU queue depth."); 197d4b5cae4SRobert Watson 198d4b5cae4SRobert Watson /* 1990a32e29fSRobert Watson * The default per-workstream mbuf queue limit for protocols that don't 2000a32e29fSRobert Watson * initialize the nh_qlimit field of their struct netisr_handler. If this is 2010a32e29fSRobert Watson * set above netisr_maxqlimit, we truncate it to the maximum during boot. 202d4b5cae4SRobert Watson */ 203d4b5cae4SRobert Watson #define NETISR_DEFAULT_DEFAULTQLIMIT 256 204d4b5cae4SRobert Watson static u_int netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT; 205d4b5cae4SRobert Watson TUNABLE_INT("net.isr.defaultqlimit", &netisr_defaultqlimit); 206f88910cdSMatthew D Fleming SYSCTL_UINT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN, 207d4b5cae4SRobert Watson &netisr_defaultqlimit, 0, 208d4b5cae4SRobert Watson "Default netisr per-protocol, per-CPU queue limit if not set by protocol"); 209d4b5cae4SRobert Watson 210d4b5cae4SRobert Watson /* 211938448cdSRobert Watson * Store and export the compile-time constant NETISR_MAXPROT limit on the 212938448cdSRobert Watson * number of protocols that can register with netisr at a time. This is 213938448cdSRobert Watson * required for crashdump analysis, as it sizes netisr_proto[]. 214d4b5cae4SRobert Watson */ 215938448cdSRobert Watson static u_int netisr_maxprot = NETISR_MAXPROT; 216f88910cdSMatthew D Fleming SYSCTL_UINT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD, 217938448cdSRobert Watson &netisr_maxprot, 0, 218938448cdSRobert Watson "Compile-time limit on the number of protocols supported by netisr."); 219d4b5cae4SRobert Watson 220d4b5cae4SRobert Watson /* 221938448cdSRobert Watson * The netisr_proto array describes all registered protocols, indexed by 222938448cdSRobert Watson * protocol number. See netisr_internal.h for more details. 223d4b5cae4SRobert Watson */ 224938448cdSRobert Watson static struct netisr_proto netisr_proto[NETISR_MAXPROT]; 225d4b5cae4SRobert Watson 226d4b5cae4SRobert Watson /* 227938448cdSRobert Watson * Per-CPU workstream data. See netisr_internal.h for more details. 228d4b5cae4SRobert Watson */ 22953402767SRobert Watson DPCPU_DEFINE(struct netisr_workstream, nws); 230d4b5cae4SRobert Watson 231d4b5cae4SRobert Watson /* 232d4b5cae4SRobert Watson * Map contiguous values between 0 and nws_count into CPU IDs appropriate for 23353402767SRobert Watson * accessing workstreams. This allows constructions of the form 23453402767SRobert Watson * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws). 235d4b5cae4SRobert Watson */ 236d4b5cae4SRobert Watson static u_int nws_array[MAXCPU]; 237d4b5cae4SRobert Watson 238d4b5cae4SRobert Watson /* 239d4b5cae4SRobert Watson * Number of registered workstreams. Will be at most the number of running 240d4b5cae4SRobert Watson * CPUs once fully started. 241d4b5cae4SRobert Watson */ 242d4b5cae4SRobert Watson static u_int nws_count; 243f88910cdSMatthew D Fleming SYSCTL_UINT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD, 244d4b5cae4SRobert Watson &nws_count, 0, "Number of extant netisr threads."); 245d4b5cae4SRobert Watson 246d4b5cae4SRobert Watson /* 247d4b5cae4SRobert Watson * Synchronization for each workstream: a mutex protects all mutable fields 248d4b5cae4SRobert Watson * in each stream, including per-protocol state (mbuf queues). The SWI is 249d4b5cae4SRobert Watson * woken up if asynchronous dispatch is required. 250d4b5cae4SRobert Watson */ 251d4b5cae4SRobert Watson #define NWS_LOCK(s) mtx_lock(&(s)->nws_mtx) 252d4b5cae4SRobert Watson #define NWS_LOCK_ASSERT(s) mtx_assert(&(s)->nws_mtx, MA_OWNED) 253d4b5cae4SRobert Watson #define NWS_UNLOCK(s) mtx_unlock(&(s)->nws_mtx) 254d4b5cae4SRobert Watson #define NWS_SIGNAL(s) swi_sched((s)->nws_swi_cookie, 0) 255d4b5cae4SRobert Watson 256d4b5cae4SRobert Watson /* 257d4b5cae4SRobert Watson * Utility routines for protocols that implement their own mapping of flows 258d4b5cae4SRobert Watson * to CPUs. 259d4b5cae4SRobert Watson */ 260d4b5cae4SRobert Watson u_int 261d4b5cae4SRobert Watson netisr_get_cpucount(void) 262d4b5cae4SRobert Watson { 263d4b5cae4SRobert Watson 264d4b5cae4SRobert Watson return (nws_count); 2655fd04e38SRobert Watson } 266d4b5cae4SRobert Watson 267d4b5cae4SRobert Watson u_int 268d4b5cae4SRobert Watson netisr_get_cpuid(u_int cpunumber) 269d4b5cae4SRobert Watson { 270d4b5cae4SRobert Watson 271d4b5cae4SRobert Watson KASSERT(cpunumber < nws_count, ("%s: %u > %u", __func__, cpunumber, 272d4b5cae4SRobert Watson nws_count)); 273d4b5cae4SRobert Watson 274d4b5cae4SRobert Watson return (nws_array[cpunumber]); 2755fd04e38SRobert Watson } 2765fd04e38SRobert Watson 2775fd04e38SRobert Watson /* 2780a32e29fSRobert Watson * The default implementation of flow -> CPU ID mapping. 279d4b5cae4SRobert Watson * 280d4b5cae4SRobert Watson * Non-static so that protocols can use it to map their own work to specific 281d4b5cae4SRobert Watson * CPUs in a manner consistent to netisr for affinity purposes. 282d4b5cae4SRobert Watson */ 283d4b5cae4SRobert Watson u_int 284d4b5cae4SRobert Watson netisr_default_flow2cpu(u_int flowid) 285d4b5cae4SRobert Watson { 286d4b5cae4SRobert Watson 287d4b5cae4SRobert Watson return (nws_array[flowid % nws_count]); 288d4b5cae4SRobert Watson } 289d4b5cae4SRobert Watson 290d4b5cae4SRobert Watson /* 291*f2d2d694SRobert Watson * Dispatch tunable and sysctl configuration. 292*f2d2d694SRobert Watson */ 293*f2d2d694SRobert Watson struct netisr_dispatch_table_entry { 294*f2d2d694SRobert Watson u_int ndte_policy; 295*f2d2d694SRobert Watson const char *ndte_policy_str; 296*f2d2d694SRobert Watson }; 297*f2d2d694SRobert Watson static const struct netisr_dispatch_table_entry netisr_dispatch_table[] = { 298*f2d2d694SRobert Watson { NETISR_DISPATCH_DEFAULT, "default" }, 299*f2d2d694SRobert Watson { NETISR_DISPATCH_DEFERRED, "deferred" }, 300*f2d2d694SRobert Watson { NETISR_DISPATCH_HYBRID, "hybrid" }, 301*f2d2d694SRobert Watson { NETISR_DISPATCH_DIRECT, "direct" }, 302*f2d2d694SRobert Watson }; 303*f2d2d694SRobert Watson static const u_int netisr_dispatch_table_len = 304*f2d2d694SRobert Watson (sizeof(netisr_dispatch_table) / sizeof(netisr_dispatch_table[0])); 305*f2d2d694SRobert Watson 306*f2d2d694SRobert Watson static void 307*f2d2d694SRobert Watson netisr_dispatch_policy_to_str(u_int dispatch_policy, char *buffer, 308*f2d2d694SRobert Watson u_int buflen) 309*f2d2d694SRobert Watson { 310*f2d2d694SRobert Watson const struct netisr_dispatch_table_entry *ndtep; 311*f2d2d694SRobert Watson const char *str; 312*f2d2d694SRobert Watson u_int i; 313*f2d2d694SRobert Watson 314*f2d2d694SRobert Watson str = "unknown"; 315*f2d2d694SRobert Watson for (i = 0; i < netisr_dispatch_table_len; i++) { 316*f2d2d694SRobert Watson ndtep = &netisr_dispatch_table[i]; 317*f2d2d694SRobert Watson if (ndtep->ndte_policy == dispatch_policy) { 318*f2d2d694SRobert Watson str = ndtep->ndte_policy_str; 319*f2d2d694SRobert Watson break; 320*f2d2d694SRobert Watson } 321*f2d2d694SRobert Watson } 322*f2d2d694SRobert Watson snprintf(buffer, buflen, "%s", str); 323*f2d2d694SRobert Watson } 324*f2d2d694SRobert Watson 325*f2d2d694SRobert Watson static int 326*f2d2d694SRobert Watson netisr_dispatch_policy_from_str(const char *str, u_int *dispatch_policyp) 327*f2d2d694SRobert Watson { 328*f2d2d694SRobert Watson const struct netisr_dispatch_table_entry *ndtep; 329*f2d2d694SRobert Watson u_int i; 330*f2d2d694SRobert Watson 331*f2d2d694SRobert Watson for (i = 0; i < netisr_dispatch_table_len; i++) { 332*f2d2d694SRobert Watson ndtep = &netisr_dispatch_table[i]; 333*f2d2d694SRobert Watson if (strcmp(ndtep->ndte_policy_str, str) == 0) { 334*f2d2d694SRobert Watson *dispatch_policyp = ndtep->ndte_policy; 335*f2d2d694SRobert Watson return (0); 336*f2d2d694SRobert Watson } 337*f2d2d694SRobert Watson } 338*f2d2d694SRobert Watson return (EINVAL); 339*f2d2d694SRobert Watson } 340*f2d2d694SRobert Watson 341*f2d2d694SRobert Watson static void 342*f2d2d694SRobert Watson netisr_dispatch_policy_compat(void) 343*f2d2d694SRobert Watson { 344*f2d2d694SRobert Watson 345*f2d2d694SRobert Watson switch (netisr_dispatch_policy) { 346*f2d2d694SRobert Watson case NETISR_DISPATCH_DEFERRED: 347*f2d2d694SRobert Watson netisr_direct_force = 0; 348*f2d2d694SRobert Watson netisr_direct = 0; 349*f2d2d694SRobert Watson break; 350*f2d2d694SRobert Watson 351*f2d2d694SRobert Watson case NETISR_DISPATCH_HYBRID: 352*f2d2d694SRobert Watson netisr_direct_force = 0; 353*f2d2d694SRobert Watson netisr_direct = 1; 354*f2d2d694SRobert Watson break; 355*f2d2d694SRobert Watson 356*f2d2d694SRobert Watson case NETISR_DISPATCH_DIRECT: 357*f2d2d694SRobert Watson netisr_direct_force = 1; 358*f2d2d694SRobert Watson netisr_direct = 1; 359*f2d2d694SRobert Watson break; 360*f2d2d694SRobert Watson 361*f2d2d694SRobert Watson default: 362*f2d2d694SRobert Watson panic("%s: unknown policy %u", __func__, 363*f2d2d694SRobert Watson netisr_dispatch_policy); 364*f2d2d694SRobert Watson } 365*f2d2d694SRobert Watson } 366*f2d2d694SRobert Watson 367*f2d2d694SRobert Watson static int 368*f2d2d694SRobert Watson sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS) 369*f2d2d694SRobert Watson { 370*f2d2d694SRobert Watson char tmp[NETISR_DISPATCH_POLICY_MAXSTR]; 371*f2d2d694SRobert Watson u_int dispatch_policy; 372*f2d2d694SRobert Watson int error; 373*f2d2d694SRobert Watson 374*f2d2d694SRobert Watson netisr_dispatch_policy_to_str(netisr_dispatch_policy, tmp, 375*f2d2d694SRobert Watson sizeof(tmp)); 376*f2d2d694SRobert Watson error = sysctl_handle_string(oidp, tmp, sizeof(tmp), req); 377*f2d2d694SRobert Watson if (error == 0 && req->newptr != NULL) { 378*f2d2d694SRobert Watson error = netisr_dispatch_policy_from_str(tmp, 379*f2d2d694SRobert Watson &dispatch_policy); 380*f2d2d694SRobert Watson if (error == 0 && dispatch_policy == NETISR_DISPATCH_DEFAULT) 381*f2d2d694SRobert Watson error = EINVAL; 382*f2d2d694SRobert Watson if (error == 0) { 383*f2d2d694SRobert Watson netisr_dispatch_policy = dispatch_policy; 384*f2d2d694SRobert Watson netisr_dispatch_policy_compat(); 385*f2d2d694SRobert Watson } 386*f2d2d694SRobert Watson } 387*f2d2d694SRobert Watson return (error); 388*f2d2d694SRobert Watson } 389*f2d2d694SRobert Watson 390*f2d2d694SRobert Watson /* 391d4b5cae4SRobert Watson * Register a new netisr handler, which requires initializing per-protocol 392d4b5cae4SRobert Watson * fields for each workstream. All netisr work is briefly suspended while 393d4b5cae4SRobert Watson * the protocol is installed. 3941cafed39SJonathan Lemon */ 3951cafed39SJonathan Lemon void 396d4b5cae4SRobert Watson netisr_register(const struct netisr_handler *nhp) 3971cafed39SJonathan Lemon { 398d4b5cae4SRobert Watson struct netisr_work *npwp; 399d4b5cae4SRobert Watson const char *name; 400d4b5cae4SRobert Watson u_int i, proto; 4011cafed39SJonathan Lemon 402d4b5cae4SRobert Watson proto = nhp->nh_proto; 403d4b5cae4SRobert Watson name = nhp->nh_name; 40459dd72d0SRobert Watson 4057902224cSSam Leffler /* 406d4b5cae4SRobert Watson * Test that the requested registration is valid. 4077902224cSSam Leffler */ 408d4b5cae4SRobert Watson KASSERT(nhp->nh_name != NULL, 409d4b5cae4SRobert Watson ("%s: nh_name NULL for %u", __func__, proto)); 410d4b5cae4SRobert Watson KASSERT(nhp->nh_handler != NULL, 411d4b5cae4SRobert Watson ("%s: nh_handler NULL for %s", __func__, name)); 412d4b5cae4SRobert Watson KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE || 413d4b5cae4SRobert Watson nhp->nh_policy == NETISR_POLICY_FLOW || 414d4b5cae4SRobert Watson nhp->nh_policy == NETISR_POLICY_CPU, 415d4b5cae4SRobert Watson ("%s: unsupported nh_policy %u for %s", __func__, 416d4b5cae4SRobert Watson nhp->nh_policy, name)); 417d4b5cae4SRobert Watson KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW || 418d4b5cae4SRobert Watson nhp->nh_m2flow == NULL, 419d4b5cae4SRobert Watson ("%s: nh_policy != FLOW but m2flow defined for %s", __func__, 420d4b5cae4SRobert Watson name)); 421d4b5cae4SRobert Watson KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL, 422d4b5cae4SRobert Watson ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__, 423d4b5cae4SRobert Watson name)); 424d4b5cae4SRobert Watson KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL, 425d4b5cae4SRobert Watson ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__, 426d4b5cae4SRobert Watson name)); 427*f2d2d694SRobert Watson KASSERT(nhp->nh_dispatch == NETISR_DISPATCH_DEFAULT || 428*f2d2d694SRobert Watson nhp->nh_dispatch == NETISR_DISPATCH_DEFERRED || 429*f2d2d694SRobert Watson nhp->nh_dispatch == NETISR_DISPATCH_HYBRID || 430*f2d2d694SRobert Watson nhp->nh_dispatch == NETISR_DISPATCH_DIRECT, 431*f2d2d694SRobert Watson ("%s: invalid nh_dispatch (%u)", __func__, nhp->nh_dispatch)); 432*f2d2d694SRobert Watson 433d4b5cae4SRobert Watson KASSERT(proto < NETISR_MAXPROT, 434d4b5cae4SRobert Watson ("%s(%u, %s): protocol too big", __func__, proto, name)); 435d4b5cae4SRobert Watson 436d4b5cae4SRobert Watson /* 437d4b5cae4SRobert Watson * Test that no existing registration exists for this protocol. 438d4b5cae4SRobert Watson */ 439d4b5cae4SRobert Watson NETISR_WLOCK(); 440938448cdSRobert Watson KASSERT(netisr_proto[proto].np_name == NULL, 441d4b5cae4SRobert Watson ("%s(%u, %s): name present", __func__, proto, name)); 442938448cdSRobert Watson KASSERT(netisr_proto[proto].np_handler == NULL, 443d4b5cae4SRobert Watson ("%s(%u, %s): handler present", __func__, proto, name)); 444d4b5cae4SRobert Watson 445938448cdSRobert Watson netisr_proto[proto].np_name = name; 446938448cdSRobert Watson netisr_proto[proto].np_handler = nhp->nh_handler; 447938448cdSRobert Watson netisr_proto[proto].np_m2flow = nhp->nh_m2flow; 448938448cdSRobert Watson netisr_proto[proto].np_m2cpuid = nhp->nh_m2cpuid; 449938448cdSRobert Watson netisr_proto[proto].np_drainedcpu = nhp->nh_drainedcpu; 450d4b5cae4SRobert Watson if (nhp->nh_qlimit == 0) 451938448cdSRobert Watson netisr_proto[proto].np_qlimit = netisr_defaultqlimit; 452d4b5cae4SRobert Watson else if (nhp->nh_qlimit > netisr_maxqlimit) { 453d4b5cae4SRobert Watson printf("%s: %s requested queue limit %u capped to " 454d4b5cae4SRobert Watson "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit, 455d4b5cae4SRobert Watson netisr_maxqlimit); 456938448cdSRobert Watson netisr_proto[proto].np_qlimit = netisr_maxqlimit; 457d4b5cae4SRobert Watson } else 458938448cdSRobert Watson netisr_proto[proto].np_qlimit = nhp->nh_qlimit; 459938448cdSRobert Watson netisr_proto[proto].np_policy = nhp->nh_policy; 460*f2d2d694SRobert Watson netisr_proto[proto].np_dispatch = nhp->nh_dispatch; 4613aa6d94eSJohn Baldwin CPU_FOREACH(i) { 46253402767SRobert Watson npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 463d4b5cae4SRobert Watson bzero(npwp, sizeof(*npwp)); 464938448cdSRobert Watson npwp->nw_qlimit = netisr_proto[proto].np_qlimit; 4651cafed39SJonathan Lemon } 466d4b5cae4SRobert Watson NETISR_WUNLOCK(); 4671cafed39SJonathan Lemon } 4681cafed39SJonathan Lemon 4691cafed39SJonathan Lemon /* 470d4b5cae4SRobert Watson * Clear drop counters across all workstreams for a protocol. 471d4b5cae4SRobert Watson */ 472d4b5cae4SRobert Watson void 473d4b5cae4SRobert Watson netisr_clearqdrops(const struct netisr_handler *nhp) 474d4b5cae4SRobert Watson { 475d4b5cae4SRobert Watson struct netisr_work *npwp; 476d4b5cae4SRobert Watson #ifdef INVARIANTS 477d4b5cae4SRobert Watson const char *name; 478d4b5cae4SRobert Watson #endif 479d4b5cae4SRobert Watson u_int i, proto; 480d4b5cae4SRobert Watson 481d4b5cae4SRobert Watson proto = nhp->nh_proto; 482d4b5cae4SRobert Watson #ifdef INVARIANTS 483d4b5cae4SRobert Watson name = nhp->nh_name; 484d4b5cae4SRobert Watson #endif 485d4b5cae4SRobert Watson KASSERT(proto < NETISR_MAXPROT, 486d4b5cae4SRobert Watson ("%s(%u): protocol too big for %s", __func__, proto, name)); 487d4b5cae4SRobert Watson 488d4b5cae4SRobert Watson NETISR_WLOCK(); 489938448cdSRobert Watson KASSERT(netisr_proto[proto].np_handler != NULL, 490d4b5cae4SRobert Watson ("%s(%u): protocol not registered for %s", __func__, proto, 491d4b5cae4SRobert Watson name)); 492d4b5cae4SRobert Watson 4933aa6d94eSJohn Baldwin CPU_FOREACH(i) { 49453402767SRobert Watson npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 495d4b5cae4SRobert Watson npwp->nw_qdrops = 0; 496d4b5cae4SRobert Watson } 497d4b5cae4SRobert Watson NETISR_WUNLOCK(); 498d4b5cae4SRobert Watson } 499d4b5cae4SRobert Watson 500d4b5cae4SRobert Watson /* 5010a32e29fSRobert Watson * Query current drop counters across all workstreams for a protocol. 502d4b5cae4SRobert Watson */ 503d4b5cae4SRobert Watson void 504d4b5cae4SRobert Watson netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp) 505d4b5cae4SRobert Watson { 506d4b5cae4SRobert Watson struct netisr_work *npwp; 507d4b5cae4SRobert Watson struct rm_priotracker tracker; 508d4b5cae4SRobert Watson #ifdef INVARIANTS 509d4b5cae4SRobert Watson const char *name; 510d4b5cae4SRobert Watson #endif 511d4b5cae4SRobert Watson u_int i, proto; 512d4b5cae4SRobert Watson 513d4b5cae4SRobert Watson *qdropp = 0; 514d4b5cae4SRobert Watson proto = nhp->nh_proto; 515d4b5cae4SRobert Watson #ifdef INVARIANTS 516d4b5cae4SRobert Watson name = nhp->nh_name; 517d4b5cae4SRobert Watson #endif 518d4b5cae4SRobert Watson KASSERT(proto < NETISR_MAXPROT, 519d4b5cae4SRobert Watson ("%s(%u): protocol too big for %s", __func__, proto, name)); 520d4b5cae4SRobert Watson 521d4b5cae4SRobert Watson NETISR_RLOCK(&tracker); 522938448cdSRobert Watson KASSERT(netisr_proto[proto].np_handler != NULL, 523d4b5cae4SRobert Watson ("%s(%u): protocol not registered for %s", __func__, proto, 524d4b5cae4SRobert Watson name)); 525d4b5cae4SRobert Watson 5263aa6d94eSJohn Baldwin CPU_FOREACH(i) { 52753402767SRobert Watson npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 528d4b5cae4SRobert Watson *qdropp += npwp->nw_qdrops; 529d4b5cae4SRobert Watson } 530d4b5cae4SRobert Watson NETISR_RUNLOCK(&tracker); 531d4b5cae4SRobert Watson } 532d4b5cae4SRobert Watson 533d4b5cae4SRobert Watson /* 5340a32e29fSRobert Watson * Query current per-workstream queue limit for a protocol. 535d4b5cae4SRobert Watson */ 536d4b5cae4SRobert Watson void 537d4b5cae4SRobert Watson netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp) 538d4b5cae4SRobert Watson { 539d4b5cae4SRobert Watson struct rm_priotracker tracker; 540d4b5cae4SRobert Watson #ifdef INVARIANTS 541d4b5cae4SRobert Watson const char *name; 542d4b5cae4SRobert Watson #endif 543d4b5cae4SRobert Watson u_int proto; 544d4b5cae4SRobert Watson 545d4b5cae4SRobert Watson proto = nhp->nh_proto; 546d4b5cae4SRobert Watson #ifdef INVARIANTS 547d4b5cae4SRobert Watson name = nhp->nh_name; 548d4b5cae4SRobert Watson #endif 549d4b5cae4SRobert Watson KASSERT(proto < NETISR_MAXPROT, 550d4b5cae4SRobert Watson ("%s(%u): protocol too big for %s", __func__, proto, name)); 551d4b5cae4SRobert Watson 552d4b5cae4SRobert Watson NETISR_RLOCK(&tracker); 553938448cdSRobert Watson KASSERT(netisr_proto[proto].np_handler != NULL, 554d4b5cae4SRobert Watson ("%s(%u): protocol not registered for %s", __func__, proto, 555d4b5cae4SRobert Watson name)); 556938448cdSRobert Watson *qlimitp = netisr_proto[proto].np_qlimit; 557d4b5cae4SRobert Watson NETISR_RUNLOCK(&tracker); 558d4b5cae4SRobert Watson } 559d4b5cae4SRobert Watson 560d4b5cae4SRobert Watson /* 561d4b5cae4SRobert Watson * Update the queue limit across per-workstream queues for a protocol. We 562d4b5cae4SRobert Watson * simply change the limits, and don't drain overflowed packets as they will 563d4b5cae4SRobert Watson * (hopefully) take care of themselves shortly. 5641cafed39SJonathan Lemon */ 5651cafed39SJonathan Lemon int 566d4b5cae4SRobert Watson netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit) 5671cafed39SJonathan Lemon { 568d4b5cae4SRobert Watson struct netisr_work *npwp; 569d4b5cae4SRobert Watson #ifdef INVARIANTS 570d4b5cae4SRobert Watson const char *name; 571d4b5cae4SRobert Watson #endif 572d4b5cae4SRobert Watson u_int i, proto; 5731cafed39SJonathan Lemon 574d4b5cae4SRobert Watson if (qlimit > netisr_maxqlimit) 575d4b5cae4SRobert Watson return (EINVAL); 576d4b5cae4SRobert Watson 577d4b5cae4SRobert Watson proto = nhp->nh_proto; 578d4b5cae4SRobert Watson #ifdef INVARIANTS 579d4b5cae4SRobert Watson name = nhp->nh_name; 580d4b5cae4SRobert Watson #endif 581d4b5cae4SRobert Watson KASSERT(proto < NETISR_MAXPROT, 582d4b5cae4SRobert Watson ("%s(%u): protocol too big for %s", __func__, proto, name)); 583d4b5cae4SRobert Watson 584d4b5cae4SRobert Watson NETISR_WLOCK(); 585938448cdSRobert Watson KASSERT(netisr_proto[proto].np_handler != NULL, 586d4b5cae4SRobert Watson ("%s(%u): protocol not registered for %s", __func__, proto, 587d4b5cae4SRobert Watson name)); 588d4b5cae4SRobert Watson 589938448cdSRobert Watson netisr_proto[proto].np_qlimit = qlimit; 5903aa6d94eSJohn Baldwin CPU_FOREACH(i) { 59153402767SRobert Watson npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 592d4b5cae4SRobert Watson npwp->nw_qlimit = qlimit; 593fb68148fSJonathan Lemon } 594d4b5cae4SRobert Watson NETISR_WUNLOCK(); 5953161f583SAndre Oppermann return (0); 596e3b6e33cSJake Burkholder } 597e3b6e33cSJake Burkholder 598d4b5cae4SRobert Watson /* 599d4b5cae4SRobert Watson * Drain all packets currently held in a particular protocol work queue. 600d4b5cae4SRobert Watson */ 601e3b6e33cSJake Burkholder static void 602d4b5cae4SRobert Watson netisr_drain_proto(struct netisr_work *npwp) 603e3b6e33cSJake Burkholder { 604d4b5cae4SRobert Watson struct mbuf *m; 605d4b5cae4SRobert Watson 606d4b5cae4SRobert Watson /* 607d4b5cae4SRobert Watson * We would assert the lock on the workstream but it's not passed in. 608d4b5cae4SRobert Watson */ 609d4b5cae4SRobert Watson while ((m = npwp->nw_head) != NULL) { 610d4b5cae4SRobert Watson npwp->nw_head = m->m_nextpkt; 611d4b5cae4SRobert Watson m->m_nextpkt = NULL; 612d4b5cae4SRobert Watson if (npwp->nw_head == NULL) 613d4b5cae4SRobert Watson npwp->nw_tail = NULL; 614d4b5cae4SRobert Watson npwp->nw_len--; 615d4b5cae4SRobert Watson m_freem(m); 616d4b5cae4SRobert Watson } 617d4b5cae4SRobert Watson KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__)); 618d4b5cae4SRobert Watson KASSERT(npwp->nw_len == 0, ("%s: len", __func__)); 619d4b5cae4SRobert Watson } 620d4b5cae4SRobert Watson 621d4b5cae4SRobert Watson /* 622d4b5cae4SRobert Watson * Remove the registration of a network protocol, which requires clearing 623d4b5cae4SRobert Watson * per-protocol fields across all workstreams, including freeing all mbufs in 624d4b5cae4SRobert Watson * the queues at time of unregister. All work in netisr is briefly suspended 625d4b5cae4SRobert Watson * while this takes place. 626d4b5cae4SRobert Watson */ 627d4b5cae4SRobert Watson void 628d4b5cae4SRobert Watson netisr_unregister(const struct netisr_handler *nhp) 629d4b5cae4SRobert Watson { 630d4b5cae4SRobert Watson struct netisr_work *npwp; 631d4b5cae4SRobert Watson #ifdef INVARIANTS 632d4b5cae4SRobert Watson const char *name; 633d4b5cae4SRobert Watson #endif 634d4b5cae4SRobert Watson u_int i, proto; 635d4b5cae4SRobert Watson 636d4b5cae4SRobert Watson proto = nhp->nh_proto; 637d4b5cae4SRobert Watson #ifdef INVARIANTS 638d4b5cae4SRobert Watson name = nhp->nh_name; 639d4b5cae4SRobert Watson #endif 640d4b5cae4SRobert Watson KASSERT(proto < NETISR_MAXPROT, 641d4b5cae4SRobert Watson ("%s(%u): protocol too big for %s", __func__, proto, name)); 642d4b5cae4SRobert Watson 643d4b5cae4SRobert Watson NETISR_WLOCK(); 644938448cdSRobert Watson KASSERT(netisr_proto[proto].np_handler != NULL, 645d4b5cae4SRobert Watson ("%s(%u): protocol not registered for %s", __func__, proto, 646d4b5cae4SRobert Watson name)); 647d4b5cae4SRobert Watson 648938448cdSRobert Watson netisr_proto[proto].np_name = NULL; 649938448cdSRobert Watson netisr_proto[proto].np_handler = NULL; 650938448cdSRobert Watson netisr_proto[proto].np_m2flow = NULL; 651938448cdSRobert Watson netisr_proto[proto].np_m2cpuid = NULL; 652938448cdSRobert Watson netisr_proto[proto].np_qlimit = 0; 653938448cdSRobert Watson netisr_proto[proto].np_policy = 0; 6543aa6d94eSJohn Baldwin CPU_FOREACH(i) { 65553402767SRobert Watson npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 656d4b5cae4SRobert Watson netisr_drain_proto(npwp); 657d4b5cae4SRobert Watson bzero(npwp, sizeof(*npwp)); 658d4b5cae4SRobert Watson } 659d4b5cae4SRobert Watson NETISR_WUNLOCK(); 660d4b5cae4SRobert Watson } 661d4b5cae4SRobert Watson 662d4b5cae4SRobert Watson /* 663*f2d2d694SRobert Watson * Compose the global and per-protocol policies on dispatch, and return the 664*f2d2d694SRobert Watson * dispatch policy to use. 665*f2d2d694SRobert Watson */ 666*f2d2d694SRobert Watson static u_int 667*f2d2d694SRobert Watson netisr_get_dispatch(struct netisr_proto *npp) 668*f2d2d694SRobert Watson { 669*f2d2d694SRobert Watson 670*f2d2d694SRobert Watson /* 671*f2d2d694SRobert Watson * Protocol-specific configuration overrides the global default. 672*f2d2d694SRobert Watson */ 673*f2d2d694SRobert Watson if (npp->np_dispatch != NETISR_DISPATCH_DEFAULT) 674*f2d2d694SRobert Watson return (npp->np_dispatch); 675*f2d2d694SRobert Watson return (netisr_dispatch_policy); 676*f2d2d694SRobert Watson } 677*f2d2d694SRobert Watson 678*f2d2d694SRobert Watson /* 679d4b5cae4SRobert Watson * Look up the workstream given a packet and source identifier. Do this by 680d4b5cae4SRobert Watson * checking the protocol's policy, and optionally call out to the protocol 681d4b5cae4SRobert Watson * for assistance if required. 682d4b5cae4SRobert Watson */ 683d4b5cae4SRobert Watson static struct mbuf * 684*f2d2d694SRobert Watson netisr_select_cpuid(struct netisr_proto *npp, u_int dispatch_policy, 685*f2d2d694SRobert Watson uintptr_t source, struct mbuf *m, u_int *cpuidp) 686d4b5cae4SRobert Watson { 687d4b5cae4SRobert Watson struct ifnet *ifp; 688*f2d2d694SRobert Watson u_int policy; 689d4b5cae4SRobert Watson 690d4b5cae4SRobert Watson NETISR_LOCK_ASSERT(); 691d4b5cae4SRobert Watson 692d4b5cae4SRobert Watson /* 693d4b5cae4SRobert Watson * In the event we have only one worker, shortcut and deliver to it 694d4b5cae4SRobert Watson * without further ado. 695d4b5cae4SRobert Watson */ 696d4b5cae4SRobert Watson if (nws_count == 1) { 697d4b5cae4SRobert Watson *cpuidp = nws_array[0]; 698d4b5cae4SRobert Watson return (m); 699d4b5cae4SRobert Watson } 700d4b5cae4SRobert Watson 701d4b5cae4SRobert Watson /* 702d4b5cae4SRobert Watson * What happens next depends on the policy selected by the protocol. 703d4b5cae4SRobert Watson * If we want to support per-interface policies, we should do that 704d4b5cae4SRobert Watson * here first. 705d4b5cae4SRobert Watson */ 706*f2d2d694SRobert Watson policy = npp->np_policy; 707*f2d2d694SRobert Watson if (policy == NETISR_POLICY_CPU) { 708*f2d2d694SRobert Watson m = npp->np_m2cpuid(m, source, cpuidp); 709*f2d2d694SRobert Watson if (m == NULL) 710*f2d2d694SRobert Watson return (NULL); 711d4b5cae4SRobert Watson 712*f2d2d694SRobert Watson /* 713*f2d2d694SRobert Watson * It's possible for a protocol not to have a good idea about 714*f2d2d694SRobert Watson * where to process a packet, in which case we fall back on 715*f2d2d694SRobert Watson * the netisr code to decide. In the hybrid case, return the 716*f2d2d694SRobert Watson * current CPU ID, which will force an immediate direct 717*f2d2d694SRobert Watson * dispatch. In the queued case, fall back on the SOURCE 718*f2d2d694SRobert Watson * policy. 719*f2d2d694SRobert Watson */ 720*f2d2d694SRobert Watson if (*cpuidp != NETISR_CPUID_NONE) 721*f2d2d694SRobert Watson return (m); 722*f2d2d694SRobert Watson if (dispatch_policy == NETISR_DISPATCH_HYBRID) { 723*f2d2d694SRobert Watson *cpuidp = curcpu; 724*f2d2d694SRobert Watson return (m); 725*f2d2d694SRobert Watson } 726*f2d2d694SRobert Watson policy = NETISR_POLICY_SOURCE; 727*f2d2d694SRobert Watson } 728*f2d2d694SRobert Watson 729*f2d2d694SRobert Watson if (policy == NETISR_POLICY_FLOW) { 730d4b5cae4SRobert Watson if (!(m->m_flags & M_FLOWID) && npp->np_m2flow != NULL) { 731d4b5cae4SRobert Watson m = npp->np_m2flow(m, source); 732d4b5cae4SRobert Watson if (m == NULL) 733d4b5cae4SRobert Watson return (NULL); 734d4b5cae4SRobert Watson } 735d4b5cae4SRobert Watson if (m->m_flags & M_FLOWID) { 736d4b5cae4SRobert Watson *cpuidp = 737d4b5cae4SRobert Watson netisr_default_flow2cpu(m->m_pkthdr.flowid); 738d4b5cae4SRobert Watson return (m); 739d4b5cae4SRobert Watson } 740*f2d2d694SRobert Watson policy = NETISR_POLICY_SOURCE; 741*f2d2d694SRobert Watson } 742d4b5cae4SRobert Watson 743*f2d2d694SRobert Watson KASSERT(policy == NETISR_POLICY_SOURCE, 744*f2d2d694SRobert Watson ("%s: invalid policy %u for %s", __func__, npp->np_policy, 745*f2d2d694SRobert Watson npp->np_name)); 746*f2d2d694SRobert Watson 747d4b5cae4SRobert Watson ifp = m->m_pkthdr.rcvif; 748d4b5cae4SRobert Watson if (ifp != NULL) 749*f2d2d694SRobert Watson *cpuidp = nws_array[(ifp->if_index + source) % nws_count]; 750d4b5cae4SRobert Watson else 751d4b5cae4SRobert Watson *cpuidp = nws_array[source % nws_count]; 752d4b5cae4SRobert Watson return (m); 753d4b5cae4SRobert Watson } 754d4b5cae4SRobert Watson 755d4b5cae4SRobert Watson /* 756d4b5cae4SRobert Watson * Process packets associated with a workstream and protocol. For reasons of 757d4b5cae4SRobert Watson * fairness, we process up to one complete netisr queue at a time, moving the 758d4b5cae4SRobert Watson * queue to a stack-local queue for processing, but do not loop refreshing 759d4b5cae4SRobert Watson * from the global queue. The caller is responsible for deciding whether to 760d4b5cae4SRobert Watson * loop, and for setting the NWS_RUNNING flag. The passed workstream will be 761d4b5cae4SRobert Watson * locked on entry and relocked before return, but will be released while 762d4b5cae4SRobert Watson * processing. The number of packets processed is returned. 763d4b5cae4SRobert Watson */ 764d4b5cae4SRobert Watson static u_int 765d4b5cae4SRobert Watson netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto) 766d4b5cae4SRobert Watson { 767d4b5cae4SRobert Watson struct netisr_work local_npw, *npwp; 768d4b5cae4SRobert Watson u_int handled; 769d4b5cae4SRobert Watson struct mbuf *m; 770d4b5cae4SRobert Watson 771d4b5cae4SRobert Watson NETISR_LOCK_ASSERT(); 772d4b5cae4SRobert Watson NWS_LOCK_ASSERT(nwsp); 773d4b5cae4SRobert Watson 774d4b5cae4SRobert Watson KASSERT(nwsp->nws_flags & NWS_RUNNING, 775d4b5cae4SRobert Watson ("%s(%u): not running", __func__, proto)); 776d4b5cae4SRobert Watson KASSERT(proto >= 0 && proto < NETISR_MAXPROT, 777d4b5cae4SRobert Watson ("%s(%u): invalid proto\n", __func__, proto)); 778d4b5cae4SRobert Watson 779d4b5cae4SRobert Watson npwp = &nwsp->nws_work[proto]; 780d4b5cae4SRobert Watson if (npwp->nw_len == 0) 781d4b5cae4SRobert Watson return (0); 782d4b5cae4SRobert Watson 783d4b5cae4SRobert Watson /* 784d4b5cae4SRobert Watson * Move the global work queue to a thread-local work queue. 785d4b5cae4SRobert Watson * 786d4b5cae4SRobert Watson * Notice that this means the effective maximum length of the queue 787d4b5cae4SRobert Watson * is actually twice that of the maximum queue length specified in 788d4b5cae4SRobert Watson * the protocol registration call. 789d4b5cae4SRobert Watson */ 790d4b5cae4SRobert Watson handled = npwp->nw_len; 791d4b5cae4SRobert Watson local_npw = *npwp; 792d4b5cae4SRobert Watson npwp->nw_head = NULL; 793d4b5cae4SRobert Watson npwp->nw_tail = NULL; 794d4b5cae4SRobert Watson npwp->nw_len = 0; 795d4b5cae4SRobert Watson nwsp->nws_pendingbits &= ~(1 << proto); 796d4b5cae4SRobert Watson NWS_UNLOCK(nwsp); 797d4b5cae4SRobert Watson while ((m = local_npw.nw_head) != NULL) { 798d4b5cae4SRobert Watson local_npw.nw_head = m->m_nextpkt; 799d4b5cae4SRobert Watson m->m_nextpkt = NULL; 800d4b5cae4SRobert Watson if (local_npw.nw_head == NULL) 801d4b5cae4SRobert Watson local_npw.nw_tail = NULL; 802d4b5cae4SRobert Watson local_npw.nw_len--; 8030028e524SBjoern A. Zeeb VNET_ASSERT(m->m_pkthdr.rcvif != NULL, 8040028e524SBjoern A. Zeeb ("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m)); 805d4b5cae4SRobert Watson CURVNET_SET(m->m_pkthdr.rcvif->if_vnet); 806938448cdSRobert Watson netisr_proto[proto].np_handler(m); 807d4b5cae4SRobert Watson CURVNET_RESTORE(); 808d4b5cae4SRobert Watson } 809d4b5cae4SRobert Watson KASSERT(local_npw.nw_len == 0, 810d4b5cae4SRobert Watson ("%s(%u): len %u", __func__, proto, local_npw.nw_len)); 811938448cdSRobert Watson if (netisr_proto[proto].np_drainedcpu) 812938448cdSRobert Watson netisr_proto[proto].np_drainedcpu(nwsp->nws_cpu); 813d4b5cae4SRobert Watson NWS_LOCK(nwsp); 814d4b5cae4SRobert Watson npwp->nw_handled += handled; 815d4b5cae4SRobert Watson return (handled); 816d4b5cae4SRobert Watson } 817d4b5cae4SRobert Watson 818d4b5cae4SRobert Watson /* 8190a32e29fSRobert Watson * SWI handler for netisr -- processes packets in a set of workstreams that 820d4b5cae4SRobert Watson * it owns, woken up by calls to NWS_SIGNAL(). If this workstream is already 821d4b5cae4SRobert Watson * being direct dispatched, go back to sleep and wait for the dispatching 822d4b5cae4SRobert Watson * thread to wake us up again. 823d4b5cae4SRobert Watson */ 824d4b5cae4SRobert Watson static void 825d4b5cae4SRobert Watson swi_net(void *arg) 826d4b5cae4SRobert Watson { 827d4b5cae4SRobert Watson #ifdef NETISR_LOCKING 828d4b5cae4SRobert Watson struct rm_priotracker tracker; 829d4b5cae4SRobert Watson #endif 830d4b5cae4SRobert Watson struct netisr_workstream *nwsp; 831d4b5cae4SRobert Watson u_int bits, prot; 832d4b5cae4SRobert Watson 833d4b5cae4SRobert Watson nwsp = arg; 834d4b5cae4SRobert Watson 8351cafed39SJonathan Lemon #ifdef DEVICE_POLLING 836d4b5cae4SRobert Watson KASSERT(nws_count == 1, 837d4b5cae4SRobert Watson ("%s: device_polling but nws_count != 1", __func__)); 838d4b5cae4SRobert Watson netisr_poll(); 839d4b5cae4SRobert Watson #endif 840d4b5cae4SRobert Watson #ifdef NETISR_LOCKING 841d4b5cae4SRobert Watson NETISR_RLOCK(&tracker); 842d4b5cae4SRobert Watson #endif 843d4b5cae4SRobert Watson NWS_LOCK(nwsp); 844d4b5cae4SRobert Watson KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running")); 845d4b5cae4SRobert Watson if (nwsp->nws_flags & NWS_DISPATCHING) 846d4b5cae4SRobert Watson goto out; 847d4b5cae4SRobert Watson nwsp->nws_flags |= NWS_RUNNING; 848d4b5cae4SRobert Watson nwsp->nws_flags &= ~NWS_SCHEDULED; 849d4b5cae4SRobert Watson while ((bits = nwsp->nws_pendingbits) != 0) { 850d4b5cae4SRobert Watson while ((prot = ffs(bits)) != 0) { 851d4b5cae4SRobert Watson prot--; 852d4b5cae4SRobert Watson bits &= ~(1 << prot); 853d4b5cae4SRobert Watson (void)netisr_process_workstream_proto(nwsp, prot); 854d4b5cae4SRobert Watson } 855d4b5cae4SRobert Watson } 856d4b5cae4SRobert Watson nwsp->nws_flags &= ~NWS_RUNNING; 857d4b5cae4SRobert Watson out: 858d4b5cae4SRobert Watson NWS_UNLOCK(nwsp); 859d4b5cae4SRobert Watson #ifdef NETISR_LOCKING 860d4b5cae4SRobert Watson NETISR_RUNLOCK(&tracker); 861d4b5cae4SRobert Watson #endif 862d4b5cae4SRobert Watson #ifdef DEVICE_POLLING 863d4b5cae4SRobert Watson netisr_pollmore(); 864d4b5cae4SRobert Watson #endif 865d4b5cae4SRobert Watson } 866d4b5cae4SRobert Watson 867d4b5cae4SRobert Watson static int 868d4b5cae4SRobert Watson netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto, 869d4b5cae4SRobert Watson struct netisr_work *npwp, struct mbuf *m, int *dosignalp) 870d4b5cae4SRobert Watson { 871d4b5cae4SRobert Watson 872d4b5cae4SRobert Watson NWS_LOCK_ASSERT(nwsp); 873d4b5cae4SRobert Watson 874d4b5cae4SRobert Watson *dosignalp = 0; 875d4b5cae4SRobert Watson if (npwp->nw_len < npwp->nw_qlimit) { 876d4b5cae4SRobert Watson m->m_nextpkt = NULL; 877d4b5cae4SRobert Watson if (npwp->nw_head == NULL) { 878d4b5cae4SRobert Watson npwp->nw_head = m; 879d4b5cae4SRobert Watson npwp->nw_tail = m; 880d4b5cae4SRobert Watson } else { 881d4b5cae4SRobert Watson npwp->nw_tail->m_nextpkt = m; 882d4b5cae4SRobert Watson npwp->nw_tail = m; 883d4b5cae4SRobert Watson } 884d4b5cae4SRobert Watson npwp->nw_len++; 885d4b5cae4SRobert Watson if (npwp->nw_len > npwp->nw_watermark) 886d4b5cae4SRobert Watson npwp->nw_watermark = npwp->nw_len; 8870a32e29fSRobert Watson 8880a32e29fSRobert Watson /* 8890a32e29fSRobert Watson * We must set the bit regardless of NWS_RUNNING, so that 8900a32e29fSRobert Watson * swi_net() keeps calling netisr_process_workstream_proto(). 8910a32e29fSRobert Watson */ 892d4b5cae4SRobert Watson nwsp->nws_pendingbits |= (1 << proto); 893d4b5cae4SRobert Watson if (!(nwsp->nws_flags & 894d4b5cae4SRobert Watson (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) { 895d4b5cae4SRobert Watson nwsp->nws_flags |= NWS_SCHEDULED; 896d4b5cae4SRobert Watson *dosignalp = 1; /* Defer until unlocked. */ 897d4b5cae4SRobert Watson } 898d4b5cae4SRobert Watson npwp->nw_queued++; 899d4b5cae4SRobert Watson return (0); 900d4b5cae4SRobert Watson } else { 901ba3b25b3SBjoern A. Zeeb m_freem(m); 902d4b5cae4SRobert Watson npwp->nw_qdrops++; 903d4b5cae4SRobert Watson return (ENOBUFS); 904d4b5cae4SRobert Watson } 905d4b5cae4SRobert Watson } 906d4b5cae4SRobert Watson 907d4b5cae4SRobert Watson static int 908d4b5cae4SRobert Watson netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid) 909d4b5cae4SRobert Watson { 910d4b5cae4SRobert Watson struct netisr_workstream *nwsp; 911d4b5cae4SRobert Watson struct netisr_work *npwp; 912d4b5cae4SRobert Watson int dosignal, error; 913d4b5cae4SRobert Watson 914d4b5cae4SRobert Watson #ifdef NETISR_LOCKING 915d4b5cae4SRobert Watson NETISR_LOCK_ASSERT(); 916d4b5cae4SRobert Watson #endif 9179e6e01ebSRobert Watson KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__, 9189e6e01ebSRobert Watson cpuid, mp_maxid)); 91953402767SRobert Watson KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 920d4b5cae4SRobert Watson 921d4b5cae4SRobert Watson dosignal = 0; 922d4b5cae4SRobert Watson error = 0; 92353402767SRobert Watson nwsp = DPCPU_ID_PTR(cpuid, nws); 924d4b5cae4SRobert Watson npwp = &nwsp->nws_work[proto]; 925d4b5cae4SRobert Watson NWS_LOCK(nwsp); 926d4b5cae4SRobert Watson error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal); 927d4b5cae4SRobert Watson NWS_UNLOCK(nwsp); 928d4b5cae4SRobert Watson if (dosignal) 929d4b5cae4SRobert Watson NWS_SIGNAL(nwsp); 930d4b5cae4SRobert Watson return (error); 931d4b5cae4SRobert Watson } 932d4b5cae4SRobert Watson 933d4b5cae4SRobert Watson int 934d4b5cae4SRobert Watson netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m) 935d4b5cae4SRobert Watson { 936d4b5cae4SRobert Watson #ifdef NETISR_LOCKING 937d4b5cae4SRobert Watson struct rm_priotracker tracker; 938d4b5cae4SRobert Watson #endif 939d4b5cae4SRobert Watson u_int cpuid; 940d4b5cae4SRobert Watson int error; 941d4b5cae4SRobert Watson 942d4b5cae4SRobert Watson KASSERT(proto < NETISR_MAXPROT, 943d4b5cae4SRobert Watson ("%s: invalid proto %u", __func__, proto)); 944d4b5cae4SRobert Watson 945d4b5cae4SRobert Watson #ifdef NETISR_LOCKING 946d4b5cae4SRobert Watson NETISR_RLOCK(&tracker); 947d4b5cae4SRobert Watson #endif 948938448cdSRobert Watson KASSERT(netisr_proto[proto].np_handler != NULL, 949d4b5cae4SRobert Watson ("%s: invalid proto %u", __func__, proto)); 950d4b5cae4SRobert Watson 951*f2d2d694SRobert Watson m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_DEFERRED, 952*f2d2d694SRobert Watson source, m, &cpuid); 95353402767SRobert Watson if (m != NULL) { 95453402767SRobert Watson KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, 95553402767SRobert Watson cpuid)); 956d4b5cae4SRobert Watson error = netisr_queue_internal(proto, m, cpuid); 95753402767SRobert Watson } else 958d4b5cae4SRobert Watson error = ENOBUFS; 959d4b5cae4SRobert Watson #ifdef NETISR_LOCKING 960d4b5cae4SRobert Watson NETISR_RUNLOCK(&tracker); 961d4b5cae4SRobert Watson #endif 962d4b5cae4SRobert Watson return (error); 963d4b5cae4SRobert Watson } 964d4b5cae4SRobert Watson 965d4b5cae4SRobert Watson int 966d4b5cae4SRobert Watson netisr_queue(u_int proto, struct mbuf *m) 967d4b5cae4SRobert Watson { 968d4b5cae4SRobert Watson 969d4b5cae4SRobert Watson return (netisr_queue_src(proto, 0, m)); 970d4b5cae4SRobert Watson } 971d4b5cae4SRobert Watson 972d4b5cae4SRobert Watson /* 9730a32e29fSRobert Watson * Dispatch a packet for netisr processing; direct dispatch is permitted by 974d4b5cae4SRobert Watson * calling context. 975d4b5cae4SRobert Watson */ 976d4b5cae4SRobert Watson int 977d4b5cae4SRobert Watson netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m) 978d4b5cae4SRobert Watson { 979d4b5cae4SRobert Watson #ifdef NETISR_LOCKING 980d4b5cae4SRobert Watson struct rm_priotracker tracker; 981d4b5cae4SRobert Watson #endif 982d4b5cae4SRobert Watson struct netisr_workstream *nwsp; 983*f2d2d694SRobert Watson struct netisr_proto *npp; 984d4b5cae4SRobert Watson struct netisr_work *npwp; 985d4b5cae4SRobert Watson int dosignal, error; 986*f2d2d694SRobert Watson u_int cpuid, dispatch_policy; 987d4b5cae4SRobert Watson 988d4b5cae4SRobert Watson KASSERT(proto < NETISR_MAXPROT, 989d4b5cae4SRobert Watson ("%s: invalid proto %u", __func__, proto)); 990d4b5cae4SRobert Watson #ifdef NETISR_LOCKING 991d4b5cae4SRobert Watson NETISR_RLOCK(&tracker); 992d4b5cae4SRobert Watson #endif 993*f2d2d694SRobert Watson npp = &netisr_proto[proto]; 994*f2d2d694SRobert Watson KASSERT(npp->np_handler != NULL, ("%s: invalid proto %u", __func__, 995*f2d2d694SRobert Watson proto)); 996*f2d2d694SRobert Watson 997*f2d2d694SRobert Watson dispatch_policy = netisr_get_dispatch(npp); 998*f2d2d694SRobert Watson if (dispatch_policy == NETISR_DISPATCH_DEFERRED) 999*f2d2d694SRobert Watson return (netisr_queue_src(proto, source, m)); 1000d4b5cae4SRobert Watson 1001d4b5cae4SRobert Watson /* 1002d4b5cae4SRobert Watson * If direct dispatch is forced, then unconditionally dispatch 1003d4b5cae4SRobert Watson * without a formal CPU selection. Borrow the current CPU's stats, 1004d4b5cae4SRobert Watson * even if there's no worker on it. In this case we don't update 1005d4b5cae4SRobert Watson * nws_flags because all netisr processing will be source ordered due 1006d4b5cae4SRobert Watson * to always being forced to directly dispatch. 1007d4b5cae4SRobert Watson */ 1008*f2d2d694SRobert Watson if (dispatch_policy == NETISR_DISPATCH_DIRECT) { 100953402767SRobert Watson nwsp = DPCPU_PTR(nws); 1010d4b5cae4SRobert Watson npwp = &nwsp->nws_work[proto]; 1011d4b5cae4SRobert Watson npwp->nw_dispatched++; 1012d4b5cae4SRobert Watson npwp->nw_handled++; 1013938448cdSRobert Watson netisr_proto[proto].np_handler(m); 1014d4b5cae4SRobert Watson error = 0; 1015d4b5cae4SRobert Watson goto out_unlock; 1016d4b5cae4SRobert Watson } 1017d4b5cae4SRobert Watson 1018*f2d2d694SRobert Watson KASSERT(dispatch_policy == NETISR_DISPATCH_HYBRID, 1019*f2d2d694SRobert Watson ("%s: unknown dispatch policy (%u)", __func__, dispatch_policy)); 1020*f2d2d694SRobert Watson 1021d4b5cae4SRobert Watson /* 1022d4b5cae4SRobert Watson * Otherwise, we execute in a hybrid mode where we will try to direct 1023d4b5cae4SRobert Watson * dispatch if we're on the right CPU and the netisr worker isn't 1024d4b5cae4SRobert Watson * already running. 1025d4b5cae4SRobert Watson */ 1026*f2d2d694SRobert Watson sched_pin(); 1027*f2d2d694SRobert Watson m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_HYBRID, 1028*f2d2d694SRobert Watson source, m, &cpuid); 1029d4b5cae4SRobert Watson if (m == NULL) { 1030d4b5cae4SRobert Watson error = ENOBUFS; 1031*f2d2d694SRobert Watson goto out_unpin; 1032d4b5cae4SRobert Watson } 103353402767SRobert Watson KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 1034d4b5cae4SRobert Watson if (cpuid != curcpu) 1035d4b5cae4SRobert Watson goto queue_fallback; 103653402767SRobert Watson nwsp = DPCPU_PTR(nws); 1037d4b5cae4SRobert Watson npwp = &nwsp->nws_work[proto]; 1038d4b5cae4SRobert Watson 1039d4b5cae4SRobert Watson /*- 1040d4b5cae4SRobert Watson * We are willing to direct dispatch only if three conditions hold: 1041d4b5cae4SRobert Watson * 1042d4b5cae4SRobert Watson * (1) The netisr worker isn't already running, 1043d4b5cae4SRobert Watson * (2) Another thread isn't already directly dispatching, and 1044d4b5cae4SRobert Watson * (3) The netisr hasn't already been woken up. 1045d4b5cae4SRobert Watson */ 1046d4b5cae4SRobert Watson NWS_LOCK(nwsp); 1047d4b5cae4SRobert Watson if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) { 1048d4b5cae4SRobert Watson error = netisr_queue_workstream(nwsp, proto, npwp, m, 1049d4b5cae4SRobert Watson &dosignal); 105053402767SRobert Watson NWS_UNLOCK(nwsp); 1051d4b5cae4SRobert Watson if (dosignal) 1052d4b5cae4SRobert Watson NWS_SIGNAL(nwsp); 1053d4b5cae4SRobert Watson goto out_unpin; 1054d4b5cae4SRobert Watson } 1055d4b5cae4SRobert Watson 1056d4b5cae4SRobert Watson /* 1057d4b5cae4SRobert Watson * The current thread is now effectively the netisr worker, so set 1058d4b5cae4SRobert Watson * the dispatching flag to prevent concurrent processing of the 1059d4b5cae4SRobert Watson * stream from another thread (even the netisr worker), which could 1060d4b5cae4SRobert Watson * otherwise lead to effective misordering of the stream. 1061d4b5cae4SRobert Watson */ 1062d4b5cae4SRobert Watson nwsp->nws_flags |= NWS_DISPATCHING; 1063d4b5cae4SRobert Watson NWS_UNLOCK(nwsp); 1064938448cdSRobert Watson netisr_proto[proto].np_handler(m); 1065d4b5cae4SRobert Watson NWS_LOCK(nwsp); 1066d4b5cae4SRobert Watson nwsp->nws_flags &= ~NWS_DISPATCHING; 1067d4b5cae4SRobert Watson npwp->nw_handled++; 1068d4b5cae4SRobert Watson npwp->nw_hybrid_dispatched++; 1069d4b5cae4SRobert Watson 1070d4b5cae4SRobert Watson /* 1071d4b5cae4SRobert Watson * If other work was enqueued by another thread while we were direct 1072d4b5cae4SRobert Watson * dispatching, we need to signal the netisr worker to do that work. 1073d4b5cae4SRobert Watson * In the future, we might want to do some of that work in the 1074d4b5cae4SRobert Watson * current thread, rather than trigger further context switches. If 1075d4b5cae4SRobert Watson * so, we'll want to establish a reasonable bound on the work done in 1076d4b5cae4SRobert Watson * the "borrowed" context. 1077d4b5cae4SRobert Watson */ 1078d4b5cae4SRobert Watson if (nwsp->nws_pendingbits != 0) { 1079d4b5cae4SRobert Watson nwsp->nws_flags |= NWS_SCHEDULED; 1080d4b5cae4SRobert Watson dosignal = 1; 1081d4b5cae4SRobert Watson } else 1082d4b5cae4SRobert Watson dosignal = 0; 1083d4b5cae4SRobert Watson NWS_UNLOCK(nwsp); 1084d4b5cae4SRobert Watson if (dosignal) 1085d4b5cae4SRobert Watson NWS_SIGNAL(nwsp); 1086d4b5cae4SRobert Watson error = 0; 1087d4b5cae4SRobert Watson goto out_unpin; 1088d4b5cae4SRobert Watson 1089d4b5cae4SRobert Watson queue_fallback: 1090d4b5cae4SRobert Watson error = netisr_queue_internal(proto, m, cpuid); 1091d4b5cae4SRobert Watson out_unpin: 1092d4b5cae4SRobert Watson sched_unpin(); 1093d4b5cae4SRobert Watson out_unlock: 1094d4b5cae4SRobert Watson #ifdef NETISR_LOCKING 1095d4b5cae4SRobert Watson NETISR_RUNLOCK(&tracker); 1096d4b5cae4SRobert Watson #endif 1097d4b5cae4SRobert Watson return (error); 1098d4b5cae4SRobert Watson } 1099d4b5cae4SRobert Watson 1100d4b5cae4SRobert Watson int 1101d4b5cae4SRobert Watson netisr_dispatch(u_int proto, struct mbuf *m) 1102d4b5cae4SRobert Watson { 1103d4b5cae4SRobert Watson 1104d4b5cae4SRobert Watson return (netisr_dispatch_src(proto, 0, m)); 1105d4b5cae4SRobert Watson } 1106d4b5cae4SRobert Watson 1107d4b5cae4SRobert Watson #ifdef DEVICE_POLLING 1108d4b5cae4SRobert Watson /* 1109d4b5cae4SRobert Watson * Kernel polling borrows a netisr thread to run interface polling in; this 1110d4b5cae4SRobert Watson * function allows kernel polling to request that the netisr thread be 1111d4b5cae4SRobert Watson * scheduled even if no packets are pending for protocols. 1112d4b5cae4SRobert Watson */ 1113d4b5cae4SRobert Watson void 1114d4b5cae4SRobert Watson netisr_sched_poll(void) 1115d4b5cae4SRobert Watson { 1116d4b5cae4SRobert Watson struct netisr_workstream *nwsp; 1117d4b5cae4SRobert Watson 111853402767SRobert Watson nwsp = DPCPU_ID_PTR(nws_array[0], nws); 1119d4b5cae4SRobert Watson NWS_SIGNAL(nwsp); 1120d4b5cae4SRobert Watson } 11211cafed39SJonathan Lemon #endif 1122e3b6e33cSJake Burkholder 1123d4b5cae4SRobert Watson static void 1124d4b5cae4SRobert Watson netisr_start_swi(u_int cpuid, struct pcpu *pc) 1125d4b5cae4SRobert Watson { 1126d4b5cae4SRobert Watson char swiname[12]; 1127d4b5cae4SRobert Watson struct netisr_workstream *nwsp; 1128d4b5cae4SRobert Watson int error; 1129d4b5cae4SRobert Watson 113053402767SRobert Watson KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 113153402767SRobert Watson 113253402767SRobert Watson nwsp = DPCPU_ID_PTR(cpuid, nws); 1133d4b5cae4SRobert Watson mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF); 1134d4b5cae4SRobert Watson nwsp->nws_cpu = cpuid; 1135d4b5cae4SRobert Watson snprintf(swiname, sizeof(swiname), "netisr %u", cpuid); 1136d4b5cae4SRobert Watson error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp, 1137d4b5cae4SRobert Watson SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie); 1138d4b5cae4SRobert Watson if (error) 1139d4b5cae4SRobert Watson panic("%s: swi_add %d", __func__, error); 1140d4b5cae4SRobert Watson pc->pc_netisr = nwsp->nws_intr_event; 1141d4b5cae4SRobert Watson if (netisr_bindthreads) { 1142d4b5cae4SRobert Watson error = intr_event_bind(nwsp->nws_intr_event, cpuid); 1143d4b5cae4SRobert Watson if (error != 0) 1144d4b5cae4SRobert Watson printf("%s: cpu %u: intr_event_bind: %d", __func__, 1145d4b5cae4SRobert Watson cpuid, error); 1146e3b6e33cSJake Burkholder } 1147d4b5cae4SRobert Watson NETISR_WLOCK(); 1148d4b5cae4SRobert Watson nws_array[nws_count] = nwsp->nws_cpu; 1149d4b5cae4SRobert Watson nws_count++; 1150d4b5cae4SRobert Watson NETISR_WUNLOCK(); 1151e3b6e33cSJake Burkholder } 1152e3b6e33cSJake Burkholder 1153d4b5cae4SRobert Watson /* 1154d4b5cae4SRobert Watson * Initialize the netisr subsystem. We rely on BSS and static initialization 1155d4b5cae4SRobert Watson * of most fields in global data structures. 1156d4b5cae4SRobert Watson * 1157d4b5cae4SRobert Watson * Start a worker thread for the boot CPU so that we can support network 1158d4b5cae4SRobert Watson * traffic immediately in case the network stack is used before additional 1159d4b5cae4SRobert Watson * CPUs are started (for example, diskless boot). 1160d4b5cae4SRobert Watson */ 1161e3b6e33cSJake Burkholder static void 1162d4b5cae4SRobert Watson netisr_init(void *arg) 1163e3b6e33cSJake Burkholder { 1164*f2d2d694SRobert Watson char tmp[NETISR_DISPATCH_POLICY_MAXSTR]; 1165*f2d2d694SRobert Watson u_int dispatch_policy; 1166*f2d2d694SRobert Watson int error; 1167e3b6e33cSJake Burkholder 1168d4b5cae4SRobert Watson KASSERT(curcpu == 0, ("%s: not on CPU 0", __func__)); 1169d4b5cae4SRobert Watson 1170d4b5cae4SRobert Watson NETISR_LOCK_INIT(); 11719e6e01ebSRobert Watson if (netisr_maxthreads < 1) 1172d4b5cae4SRobert Watson netisr_maxthreads = 1; 11739e6e01ebSRobert Watson if (netisr_maxthreads > mp_ncpus) { 1174912f6323SRobert Watson printf("netisr_init: forcing maxthreads from %d to %d\n", 11759e6e01ebSRobert Watson netisr_maxthreads, mp_ncpus); 11769e6e01ebSRobert Watson netisr_maxthreads = mp_ncpus; 1177ed54411cSRobert Watson } 1178ed54411cSRobert Watson if (netisr_defaultqlimit > netisr_maxqlimit) { 1179912f6323SRobert Watson printf("netisr_init: forcing defaultqlimit from %d to %d\n", 11809e6e01ebSRobert Watson netisr_defaultqlimit, netisr_maxqlimit); 1181d4b5cae4SRobert Watson netisr_defaultqlimit = netisr_maxqlimit; 1182ed54411cSRobert Watson } 1183d4b5cae4SRobert Watson #ifdef DEVICE_POLLING 1184d4b5cae4SRobert Watson /* 1185d4b5cae4SRobert Watson * The device polling code is not yet aware of how to deal with 1186d4b5cae4SRobert Watson * multiple netisr threads, so for the time being compiling in device 1187d4b5cae4SRobert Watson * polling disables parallel netisr workers. 1188d4b5cae4SRobert Watson */ 1189ed54411cSRobert Watson if (netisr_maxthreads != 1 || netisr_bindthreads != 0) { 1190912f6323SRobert Watson printf("netisr_init: forcing maxthreads to 1 and " 1191912f6323SRobert Watson "bindthreads to 0 for device polling\n"); 1192d4b5cae4SRobert Watson netisr_maxthreads = 1; 1193d4b5cae4SRobert Watson netisr_bindthreads = 0; 1194ed54411cSRobert Watson } 1195d4b5cae4SRobert Watson #endif 1196d4b5cae4SRobert Watson 1197*f2d2d694SRobert Watson if (TUNABLE_STR_FETCH("net.isr.dispatch", tmp, sizeof(tmp))) { 1198*f2d2d694SRobert Watson error = netisr_dispatch_policy_from_str(tmp, 1199*f2d2d694SRobert Watson &dispatch_policy); 1200*f2d2d694SRobert Watson if (error == 0 && dispatch_policy == NETISR_DISPATCH_DEFAULT) 1201*f2d2d694SRobert Watson error = EINVAL; 1202*f2d2d694SRobert Watson if (error == 0) { 1203*f2d2d694SRobert Watson netisr_dispatch_policy = dispatch_policy; 1204*f2d2d694SRobert Watson netisr_dispatch_policy_compat(); 1205*f2d2d694SRobert Watson } else 1206*f2d2d694SRobert Watson printf( 1207*f2d2d694SRobert Watson "%s: invalid dispatch policy %s, using default\n", 1208*f2d2d694SRobert Watson __func__, tmp); 1209*f2d2d694SRobert Watson } 1210*f2d2d694SRobert Watson 1211d4b5cae4SRobert Watson netisr_start_swi(curcpu, pcpu_find(curcpu)); 1212e3b6e33cSJake Burkholder } 1213d4b5cae4SRobert Watson SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL); 1214d4b5cae4SRobert Watson 1215d4b5cae4SRobert Watson /* 1216d4b5cae4SRobert Watson * Start worker threads for additional CPUs. No attempt to gracefully handle 1217d4b5cae4SRobert Watson * work reassignment, we don't yet support dynamic reconfiguration. 1218d4b5cae4SRobert Watson */ 1219d4b5cae4SRobert Watson static void 1220d4b5cae4SRobert Watson netisr_start(void *arg) 1221d4b5cae4SRobert Watson { 1222d4b5cae4SRobert Watson struct pcpu *pc; 1223d4b5cae4SRobert Watson 1224d4b5cae4SRobert Watson SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 1225d4b5cae4SRobert Watson if (nws_count >= netisr_maxthreads) 1226d4b5cae4SRobert Watson break; 1227d4b5cae4SRobert Watson /* XXXRW: Is skipping absent CPUs still required here? */ 1228d4b5cae4SRobert Watson if (CPU_ABSENT(pc->pc_cpuid)) 1229d4b5cae4SRobert Watson continue; 1230d4b5cae4SRobert Watson /* Worker will already be present for boot CPU. */ 1231d4b5cae4SRobert Watson if (pc->pc_netisr != NULL) 1232d4b5cae4SRobert Watson continue; 1233d4b5cae4SRobert Watson netisr_start_swi(pc->pc_cpuid, pc); 1234d4b5cae4SRobert Watson } 1235d4b5cae4SRobert Watson } 1236d4b5cae4SRobert Watson SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL); 1237d4b5cae4SRobert Watson 12382d22f334SRobert Watson /* 12392d22f334SRobert Watson * Sysctl monitoring for netisr: query a list of registered protocols. 12402d22f334SRobert Watson */ 12412d22f334SRobert Watson static int 12422d22f334SRobert Watson sysctl_netisr_proto(SYSCTL_HANDLER_ARGS) 12432d22f334SRobert Watson { 12442d22f334SRobert Watson struct rm_priotracker tracker; 12452d22f334SRobert Watson struct sysctl_netisr_proto *snpp, *snp_array; 12462d22f334SRobert Watson struct netisr_proto *npp; 12472d22f334SRobert Watson u_int counter, proto; 12482d22f334SRobert Watson int error; 12492d22f334SRobert Watson 12502d22f334SRobert Watson if (req->newptr != NULL) 12512d22f334SRobert Watson return (EINVAL); 12522d22f334SRobert Watson snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP, 12532d22f334SRobert Watson M_ZERO | M_WAITOK); 12542d22f334SRobert Watson counter = 0; 12552d22f334SRobert Watson NETISR_RLOCK(&tracker); 12562d22f334SRobert Watson for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1257938448cdSRobert Watson npp = &netisr_proto[proto]; 12582d22f334SRobert Watson if (npp->np_name == NULL) 12592d22f334SRobert Watson continue; 12602d22f334SRobert Watson snpp = &snp_array[counter]; 12612d22f334SRobert Watson snpp->snp_version = sizeof(*snpp); 12622d22f334SRobert Watson strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN); 12632d22f334SRobert Watson snpp->snp_proto = proto; 12642d22f334SRobert Watson snpp->snp_qlimit = npp->np_qlimit; 12652d22f334SRobert Watson snpp->snp_policy = npp->np_policy; 1266*f2d2d694SRobert Watson snpp->snp_dispatch = npp->np_dispatch; 12672d22f334SRobert Watson if (npp->np_m2flow != NULL) 12682d22f334SRobert Watson snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW; 12692d22f334SRobert Watson if (npp->np_m2cpuid != NULL) 12702d22f334SRobert Watson snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID; 12712d22f334SRobert Watson if (npp->np_drainedcpu != NULL) 12722d22f334SRobert Watson snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU; 12732d22f334SRobert Watson counter++; 12742d22f334SRobert Watson } 12752d22f334SRobert Watson NETISR_RUNLOCK(&tracker); 12767f450febSRobert Watson KASSERT(counter <= NETISR_MAXPROT, 12772d22f334SRobert Watson ("sysctl_netisr_proto: counter too big (%d)", counter)); 12782d22f334SRobert Watson error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter); 12792d22f334SRobert Watson free(snp_array, M_TEMP); 12802d22f334SRobert Watson return (error); 12812d22f334SRobert Watson } 12822d22f334SRobert Watson 12832d22f334SRobert Watson SYSCTL_PROC(_net_isr, OID_AUTO, proto, 12842d22f334SRobert Watson CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto, 12852d22f334SRobert Watson "S,sysctl_netisr_proto", 12862d22f334SRobert Watson "Return list of protocols registered with netisr"); 12872d22f334SRobert Watson 12882d22f334SRobert Watson /* 12892d22f334SRobert Watson * Sysctl monitoring for netisr: query a list of workstreams. 12902d22f334SRobert Watson */ 12912d22f334SRobert Watson static int 12922d22f334SRobert Watson sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS) 12932d22f334SRobert Watson { 12942d22f334SRobert Watson struct rm_priotracker tracker; 12952d22f334SRobert Watson struct sysctl_netisr_workstream *snwsp, *snws_array; 12962d22f334SRobert Watson struct netisr_workstream *nwsp; 12972d22f334SRobert Watson u_int counter, cpuid; 12982d22f334SRobert Watson int error; 12992d22f334SRobert Watson 13002d22f334SRobert Watson if (req->newptr != NULL) 13012d22f334SRobert Watson return (EINVAL); 13022d22f334SRobert Watson snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP, 13032d22f334SRobert Watson M_ZERO | M_WAITOK); 13042d22f334SRobert Watson counter = 0; 13052d22f334SRobert Watson NETISR_RLOCK(&tracker); 13063aa6d94eSJohn Baldwin CPU_FOREACH(cpuid) { 13072d22f334SRobert Watson nwsp = DPCPU_ID_PTR(cpuid, nws); 13082d22f334SRobert Watson if (nwsp->nws_intr_event == NULL) 13092d22f334SRobert Watson continue; 13102d22f334SRobert Watson NWS_LOCK(nwsp); 13112d22f334SRobert Watson snwsp = &snws_array[counter]; 13122d22f334SRobert Watson snwsp->snws_version = sizeof(*snwsp); 13132d22f334SRobert Watson 13142d22f334SRobert Watson /* 13152d22f334SRobert Watson * For now, we equate workstream IDs and CPU IDs in the 13162d22f334SRobert Watson * kernel, but expose them independently to userspace in case 13172d22f334SRobert Watson * that assumption changes in the future. 13182d22f334SRobert Watson */ 13192d22f334SRobert Watson snwsp->snws_wsid = cpuid; 13202d22f334SRobert Watson snwsp->snws_cpu = cpuid; 13212d22f334SRobert Watson if (nwsp->nws_intr_event != NULL) 13222d22f334SRobert Watson snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR; 13232d22f334SRobert Watson NWS_UNLOCK(nwsp); 13242d22f334SRobert Watson counter++; 13252d22f334SRobert Watson } 13262d22f334SRobert Watson NETISR_RUNLOCK(&tracker); 13277f450febSRobert Watson KASSERT(counter <= MAXCPU, 13282d22f334SRobert Watson ("sysctl_netisr_workstream: counter too big (%d)", counter)); 13292d22f334SRobert Watson error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter); 13302d22f334SRobert Watson free(snws_array, M_TEMP); 13312d22f334SRobert Watson return (error); 13322d22f334SRobert Watson } 13332d22f334SRobert Watson 13342d22f334SRobert Watson SYSCTL_PROC(_net_isr, OID_AUTO, workstream, 13352d22f334SRobert Watson CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream, 13362d22f334SRobert Watson "S,sysctl_netisr_workstream", 13372d22f334SRobert Watson "Return list of workstreams implemented by netisr"); 13382d22f334SRobert Watson 13392d22f334SRobert Watson /* 13402d22f334SRobert Watson * Sysctl monitoring for netisr: query per-protocol data across all 13412d22f334SRobert Watson * workstreams. 13422d22f334SRobert Watson */ 13432d22f334SRobert Watson static int 13442d22f334SRobert Watson sysctl_netisr_work(SYSCTL_HANDLER_ARGS) 13452d22f334SRobert Watson { 13462d22f334SRobert Watson struct rm_priotracker tracker; 13472d22f334SRobert Watson struct sysctl_netisr_work *snwp, *snw_array; 13482d22f334SRobert Watson struct netisr_workstream *nwsp; 13492d22f334SRobert Watson struct netisr_proto *npp; 13502d22f334SRobert Watson struct netisr_work *nwp; 13512d22f334SRobert Watson u_int counter, cpuid, proto; 13522d22f334SRobert Watson int error; 13532d22f334SRobert Watson 13542d22f334SRobert Watson if (req->newptr != NULL) 13552d22f334SRobert Watson return (EINVAL); 13562d22f334SRobert Watson snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT, 13572d22f334SRobert Watson M_TEMP, M_ZERO | M_WAITOK); 13582d22f334SRobert Watson counter = 0; 13592d22f334SRobert Watson NETISR_RLOCK(&tracker); 13603aa6d94eSJohn Baldwin CPU_FOREACH(cpuid) { 13612d22f334SRobert Watson nwsp = DPCPU_ID_PTR(cpuid, nws); 13622d22f334SRobert Watson if (nwsp->nws_intr_event == NULL) 13632d22f334SRobert Watson continue; 13642d22f334SRobert Watson NWS_LOCK(nwsp); 13652d22f334SRobert Watson for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1366938448cdSRobert Watson npp = &netisr_proto[proto]; 13672d22f334SRobert Watson if (npp->np_name == NULL) 13682d22f334SRobert Watson continue; 13692d22f334SRobert Watson nwp = &nwsp->nws_work[proto]; 13702d22f334SRobert Watson snwp = &snw_array[counter]; 13712d22f334SRobert Watson snwp->snw_version = sizeof(*snwp); 13722d22f334SRobert Watson snwp->snw_wsid = cpuid; /* See comment above. */ 13732d22f334SRobert Watson snwp->snw_proto = proto; 13742d22f334SRobert Watson snwp->snw_len = nwp->nw_len; 13752d22f334SRobert Watson snwp->snw_watermark = nwp->nw_watermark; 13762d22f334SRobert Watson snwp->snw_dispatched = nwp->nw_dispatched; 13772d22f334SRobert Watson snwp->snw_hybrid_dispatched = 13782d22f334SRobert Watson nwp->nw_hybrid_dispatched; 13792d22f334SRobert Watson snwp->snw_qdrops = nwp->nw_qdrops; 13802d22f334SRobert Watson snwp->snw_queued = nwp->nw_queued; 13812d22f334SRobert Watson snwp->snw_handled = nwp->nw_handled; 13822d22f334SRobert Watson counter++; 13832d22f334SRobert Watson } 13842d22f334SRobert Watson NWS_UNLOCK(nwsp); 13852d22f334SRobert Watson } 13867f450febSRobert Watson KASSERT(counter <= MAXCPU * NETISR_MAXPROT, 13872d22f334SRobert Watson ("sysctl_netisr_work: counter too big (%d)", counter)); 13882d22f334SRobert Watson NETISR_RUNLOCK(&tracker); 13892d22f334SRobert Watson error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter); 13902d22f334SRobert Watson free(snw_array, M_TEMP); 13912d22f334SRobert Watson return (error); 13922d22f334SRobert Watson } 13932d22f334SRobert Watson 13942d22f334SRobert Watson SYSCTL_PROC(_net_isr, OID_AUTO, work, 13952d22f334SRobert Watson CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work, 13962d22f334SRobert Watson "S,sysctl_netisr_work", 13972d22f334SRobert Watson "Return list of per-workstream, per-protocol work in netisr"); 13982d22f334SRobert Watson 1399d4b5cae4SRobert Watson #ifdef DDB 1400d4b5cae4SRobert Watson DB_SHOW_COMMAND(netisr, db_show_netisr) 1401d4b5cae4SRobert Watson { 1402d4b5cae4SRobert Watson struct netisr_workstream *nwsp; 1403d4b5cae4SRobert Watson struct netisr_work *nwp; 1404d4b5cae4SRobert Watson int first, proto; 140553402767SRobert Watson u_int cpuid; 1406d4b5cae4SRobert Watson 1407d4b5cae4SRobert Watson db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto", 1408d4b5cae4SRobert Watson "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue"); 14093aa6d94eSJohn Baldwin CPU_FOREACH(cpuid) { 141053402767SRobert Watson nwsp = DPCPU_ID_PTR(cpuid, nws); 1411d4b5cae4SRobert Watson if (nwsp->nws_intr_event == NULL) 1412d4b5cae4SRobert Watson continue; 1413d4b5cae4SRobert Watson first = 1; 1414d4b5cae4SRobert Watson for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1415938448cdSRobert Watson if (netisr_proto[proto].np_handler == NULL) 1416d4b5cae4SRobert Watson continue; 1417d4b5cae4SRobert Watson nwp = &nwsp->nws_work[proto]; 1418d4b5cae4SRobert Watson if (first) { 141953402767SRobert Watson db_printf("%3d ", cpuid); 1420d4b5cae4SRobert Watson first = 0; 1421d4b5cae4SRobert Watson } else 1422d4b5cae4SRobert Watson db_printf("%3s ", ""); 1423d4b5cae4SRobert Watson db_printf( 1424d4b5cae4SRobert Watson "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n", 1425938448cdSRobert Watson netisr_proto[proto].np_name, nwp->nw_len, 1426d4b5cae4SRobert Watson nwp->nw_watermark, nwp->nw_qlimit, 1427d4b5cae4SRobert Watson nwp->nw_dispatched, nwp->nw_hybrid_dispatched, 1428d4b5cae4SRobert Watson nwp->nw_qdrops, nwp->nw_queued); 1429d4b5cae4SRobert Watson } 1430d4b5cae4SRobert Watson } 1431d4b5cae4SRobert Watson } 1432d4b5cae4SRobert Watson #endif 1433