xref: /freebsd/sys/net/netisr.c (revision 938448cd871ff41f72bef12c9bc43865260cd194)
11cafed39SJonathan Lemon /*-
2d4b5cae4SRobert Watson  * Copyright (c) 2007-2009 Robert N. M. Watson
32d22f334SRobert Watson  * Copyright (c) 2010 Juniper Networks, Inc.
4e3b6e33cSJake Burkholder  * All rights reserved.
5e3b6e33cSJake Burkholder  *
62d22f334SRobert Watson  * This software was developed by Robert N. M. Watson under contract
72d22f334SRobert Watson  * to Juniper Networks, Inc.
82d22f334SRobert Watson  *
9e3b6e33cSJake Burkholder  * Redistribution and use in source and binary forms, with or without
10e3b6e33cSJake Burkholder  * modification, are permitted provided that the following conditions
11e3b6e33cSJake Burkholder  * are met:
12e3b6e33cSJake Burkholder  * 1. Redistributions of source code must retain the above copyright
131cafed39SJonathan Lemon  *    notice, this list of conditions and the following disclaimer.
14e3b6e33cSJake Burkholder  * 2. Redistributions in binary form must reproduce the above copyright
15e3b6e33cSJake Burkholder  *    notice, this list of conditions and the following disclaimer in the
16e3b6e33cSJake Burkholder  *    documentation and/or other materials provided with the distribution.
17e3b6e33cSJake Burkholder  *
181cafed39SJonathan Lemon  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
191cafed39SJonathan Lemon  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
201cafed39SJonathan Lemon  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
211cafed39SJonathan Lemon  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
221cafed39SJonathan Lemon  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
231cafed39SJonathan Lemon  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
241cafed39SJonathan Lemon  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
251cafed39SJonathan Lemon  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
261cafed39SJonathan Lemon  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
271cafed39SJonathan Lemon  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
281cafed39SJonathan Lemon  * SUCH DAMAGE.
29e3b6e33cSJake Burkholder  */
30e3b6e33cSJake Burkholder 
31d4b5cae4SRobert Watson #include <sys/cdefs.h>
32d4b5cae4SRobert Watson __FBSDID("$FreeBSD$");
33d4b5cae4SRobert Watson 
34d4b5cae4SRobert Watson /*
35d4b5cae4SRobert Watson  * netisr is a packet dispatch service, allowing synchronous (directly
36d4b5cae4SRobert Watson  * dispatched) and asynchronous (deferred dispatch) processing of packets by
37d4b5cae4SRobert Watson  * registered protocol handlers.  Callers pass a protocol identifier and
38d4b5cae4SRobert Watson  * packet to netisr, along with a direct dispatch hint, and work will either
390a32e29fSRobert Watson  * be immediately processed by the registered handler, or passed to a
400a32e29fSRobert Watson  * software interrupt (SWI) thread for deferred dispatch.  Callers will
410a32e29fSRobert Watson  * generally select one or the other based on:
42d4b5cae4SRobert Watson  *
430a32e29fSRobert Watson  * - Whether directly dispatching a netisr handler lead to code reentrance or
44d4b5cae4SRobert Watson  *   lock recursion, such as entering the socket code from the socket code.
450a32e29fSRobert Watson  * - Whether directly dispatching a netisr handler lead to recursive
46d4b5cae4SRobert Watson  *   processing, such as when decapsulating several wrapped layers of tunnel
47d4b5cae4SRobert Watson  *   information (IPSEC within IPSEC within ...).
48d4b5cae4SRobert Watson  *
49d4b5cae4SRobert Watson  * Maintaining ordering for protocol streams is a critical design concern.
50d4b5cae4SRobert Watson  * Enforcing ordering limits the opportunity for concurrency, but maintains
51d4b5cae4SRobert Watson  * the strong ordering requirements found in some protocols, such as TCP.  Of
52d4b5cae4SRobert Watson  * related concern is CPU affinity--it is desirable to process all data
53d4b5cae4SRobert Watson  * associated with a particular stream on the same CPU over time in order to
54d4b5cae4SRobert Watson  * avoid acquiring locks associated with the connection on different CPUs,
55d4b5cae4SRobert Watson  * keep connection data in one cache, and to generally encourage associated
56d4b5cae4SRobert Watson  * user threads to live on the same CPU as the stream.  It's also desirable
57d4b5cae4SRobert Watson  * to avoid lock migration and contention where locks are associated with
58d4b5cae4SRobert Watson  * more than one flow.
59d4b5cae4SRobert Watson  *
60d4b5cae4SRobert Watson  * netisr supports several policy variations, represented by the
610a32e29fSRobert Watson  * NETISR_POLICY_* constants, allowing protocols to play various roles in
62d4b5cae4SRobert Watson  * identifying flows, assigning work to CPUs, etc.  These are described in
630a32e29fSRobert Watson  * netisr.h.
64d4b5cae4SRobert Watson  */
65d4b5cae4SRobert Watson 
66d4b5cae4SRobert Watson #include "opt_ddb.h"
67f0796cd2SGleb Smirnoff #include "opt_device_polling.h"
681d8cd39eSRobert Watson 
69e3b6e33cSJake Burkholder #include <sys/param.h>
70e3b6e33cSJake Burkholder #include <sys/bus.h>
71e3b6e33cSJake Burkholder #include <sys/kernel.h>
721cafed39SJonathan Lemon #include <sys/kthread.h>
73d4b5cae4SRobert Watson #include <sys/interrupt.h>
741cafed39SJonathan Lemon #include <sys/lock.h>
751cafed39SJonathan Lemon #include <sys/mbuf.h>
76d4b5cae4SRobert Watson #include <sys/mutex.h>
7753402767SRobert Watson #include <sys/pcpu.h>
78d4b5cae4SRobert Watson #include <sys/proc.h>
79d4b5cae4SRobert Watson #include <sys/rmlock.h>
80d4b5cae4SRobert Watson #include <sys/sched.h>
81d4b5cae4SRobert Watson #include <sys/smp.h>
821cafed39SJonathan Lemon #include <sys/socket.h>
83d4b5cae4SRobert Watson #include <sys/sysctl.h>
84d4b5cae4SRobert Watson #include <sys/systm.h>
85d4b5cae4SRobert Watson 
86d4b5cae4SRobert Watson #ifdef DDB
87d4b5cae4SRobert Watson #include <ddb/ddb.h>
88d4b5cae4SRobert Watson #endif
891cafed39SJonathan Lemon 
90938448cdSRobert Watson #define	_WANT_NETISR_INTERNAL	/* Enable definitions from netisr_internal.h */
911cafed39SJonathan Lemon #include <net/if.h>
921cafed39SJonathan Lemon #include <net/if_var.h>
93e3b6e33cSJake Burkholder #include <net/netisr.h>
94938448cdSRobert Watson #include <net/netisr_internal.h>
95530c0060SRobert Watson #include <net/vnet.h>
96e3b6e33cSJake Burkholder 
97d4b5cae4SRobert Watson /*-
98d4b5cae4SRobert Watson  * Synchronize use and modification of the registered netisr data structures;
99d4b5cae4SRobert Watson  * acquire a read lock while modifying the set of registered protocols to
100d4b5cae4SRobert Watson  * prevent partially registered or unregistered protocols from being run.
101d4b5cae4SRobert Watson  *
102d4b5cae4SRobert Watson  * The following data structures and fields are protected by this lock:
103d4b5cae4SRobert Watson  *
104938448cdSRobert Watson  * - The netisr_proto array, including all fields of struct netisr_proto.
105d4b5cae4SRobert Watson  * - The nws array, including all fields of struct netisr_worker.
106d4b5cae4SRobert Watson  * - The nws_array array.
107d4b5cae4SRobert Watson  *
108d4b5cae4SRobert Watson  * Note: the NETISR_LOCKING define controls whether read locks are acquired
109d4b5cae4SRobert Watson  * in packet processing paths requiring netisr registration stability.  This
1100a32e29fSRobert Watson  * is disabled by default as it can lead to measurable performance
111d4b5cae4SRobert Watson  * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and
112d4b5cae4SRobert Watson  * because netisr registration and unregistration is extremely rare at
113d4b5cae4SRobert Watson  * runtime.  If it becomes more common, this decision should be revisited.
114d4b5cae4SRobert Watson  *
115d4b5cae4SRobert Watson  * XXXRW: rmlocks don't support assertions.
116d4b5cae4SRobert Watson  */
117d4b5cae4SRobert Watson static struct rmlock	netisr_rmlock;
118d4b5cae4SRobert Watson #define	NETISR_LOCK_INIT()	rm_init_flags(&netisr_rmlock, "netisr", \
119d4b5cae4SRobert Watson 				    RM_NOWITNESS)
120d4b5cae4SRobert Watson #define	NETISR_LOCK_ASSERT()
121d4b5cae4SRobert Watson #define	NETISR_RLOCK(tracker)	rm_rlock(&netisr_rmlock, (tracker))
122d4b5cae4SRobert Watson #define	NETISR_RUNLOCK(tracker)	rm_runlock(&netisr_rmlock, (tracker))
123d4b5cae4SRobert Watson #define	NETISR_WLOCK()		rm_wlock(&netisr_rmlock)
124d4b5cae4SRobert Watson #define	NETISR_WUNLOCK()	rm_wunlock(&netisr_rmlock)
125d4b5cae4SRobert Watson /* #define	NETISR_LOCKING */
126e3b6e33cSJake Burkholder 
127d4b5cae4SRobert Watson SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr");
1281cafed39SJonathan Lemon 
129d4b5cae4SRobert Watson /*-
130d4b5cae4SRobert Watson  * Three direct dispatch policies are supported:
131d4b5cae4SRobert Watson  *
132d4b5cae4SRobert Watson  * - Always defer: all work is scheduled for a netisr, regardless of context.
133d4b5cae4SRobert Watson  *   (!direct)
134d4b5cae4SRobert Watson  *
135d4b5cae4SRobert Watson  * - Hybrid: if the executing context allows direct dispatch, and we're
136d4b5cae4SRobert Watson  *   running on the CPU the work would be done on, then direct dispatch if it
137d4b5cae4SRobert Watson  *   wouldn't violate ordering constraints on the workstream.
138d4b5cae4SRobert Watson  *   (direct && !direct_force)
139d4b5cae4SRobert Watson  *
140d4b5cae4SRobert Watson  * - Always direct: if the executing context allows direct dispatch, always
141d4b5cae4SRobert Watson  *   direct dispatch.  (direct && direct_force)
142d4b5cae4SRobert Watson  *
143d4b5cae4SRobert Watson  * Notice that changing the global policy could lead to short periods of
144d4b5cae4SRobert Watson  * misordered processing, but this is considered acceptable as compared to
145d4b5cae4SRobert Watson  * the complexity of enforcing ordering during policy changes.
146d4b5cae4SRobert Watson  */
147d4b5cae4SRobert Watson static int	netisr_direct_force = 1;	/* Always direct dispatch. */
148d4b5cae4SRobert Watson TUNABLE_INT("net.isr.direct_force", &netisr_direct_force);
149d4b5cae4SRobert Watson SYSCTL_INT(_net_isr, OID_AUTO, direct_force, CTLFLAG_RW,
150d4b5cae4SRobert Watson     &netisr_direct_force, 0, "Force direct dispatch");
151e3b6e33cSJake Burkholder 
152d4b5cae4SRobert Watson static int	netisr_direct = 1;	/* Enable direct dispatch. */
153cea2165bSRobert Watson TUNABLE_INT("net.isr.direct", &netisr_direct);
154d4b5cae4SRobert Watson SYSCTL_INT(_net_isr, OID_AUTO, direct, CTLFLAG_RW,
155d4b5cae4SRobert Watson     &netisr_direct, 0, "Enable direct dispatch");
1561cafed39SJonathan Lemon 
1571cafed39SJonathan Lemon /*
158d4b5cae4SRobert Watson  * Allow the administrator to limit the number of threads (CPUs) to use for
159d4b5cae4SRobert Watson  * netisr.  We don't check netisr_maxthreads before creating the thread for
160d4b5cae4SRobert Watson  * CPU 0, so in practice we ignore values <= 1.  This must be set at boot.
161d4b5cae4SRobert Watson  * We will create at most one thread per CPU.
1625fd04e38SRobert Watson  */
1639e6e01ebSRobert Watson static int	netisr_maxthreads = -1;		/* Max number of threads. */
164d4b5cae4SRobert Watson TUNABLE_INT("net.isr.maxthreads", &netisr_maxthreads);
16578494902SPawel Jakub Dawidek SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN,
166d4b5cae4SRobert Watson     &netisr_maxthreads, 0,
167d4b5cae4SRobert Watson     "Use at most this many CPUs for netisr processing");
1685fd04e38SRobert Watson 
169d4b5cae4SRobert Watson static int	netisr_bindthreads = 0;		/* Bind threads to CPUs. */
170d4b5cae4SRobert Watson TUNABLE_INT("net.isr.bindthreads", &netisr_bindthreads);
17178494902SPawel Jakub Dawidek SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN,
172d4b5cae4SRobert Watson     &netisr_bindthreads, 0, "Bind netisr threads to CPUs.");
173d4b5cae4SRobert Watson 
174d4b5cae4SRobert Watson /*
1750a32e29fSRobert Watson  * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit,
1760a32e29fSRobert Watson  * both for initial configuration and later modification using
1770a32e29fSRobert Watson  * netisr_setqlimit().
178d4b5cae4SRobert Watson  */
179d4b5cae4SRobert Watson #define	NETISR_DEFAULT_MAXQLIMIT	10240
180d4b5cae4SRobert Watson static u_int	netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT;
181d4b5cae4SRobert Watson TUNABLE_INT("net.isr.maxqlimit", &netisr_maxqlimit);
18278494902SPawel Jakub Dawidek SYSCTL_INT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN,
183d4b5cae4SRobert Watson     &netisr_maxqlimit, 0,
184d4b5cae4SRobert Watson     "Maximum netisr per-protocol, per-CPU queue depth.");
185d4b5cae4SRobert Watson 
186d4b5cae4SRobert Watson /*
1870a32e29fSRobert Watson  * The default per-workstream mbuf queue limit for protocols that don't
1880a32e29fSRobert Watson  * initialize the nh_qlimit field of their struct netisr_handler.  If this is
1890a32e29fSRobert Watson  * set above netisr_maxqlimit, we truncate it to the maximum during boot.
190d4b5cae4SRobert Watson  */
191d4b5cae4SRobert Watson #define	NETISR_DEFAULT_DEFAULTQLIMIT	256
192d4b5cae4SRobert Watson static u_int	netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT;
193d4b5cae4SRobert Watson TUNABLE_INT("net.isr.defaultqlimit", &netisr_defaultqlimit);
19478494902SPawel Jakub Dawidek SYSCTL_INT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN,
195d4b5cae4SRobert Watson     &netisr_defaultqlimit, 0,
196d4b5cae4SRobert Watson     "Default netisr per-protocol, per-CPU queue limit if not set by protocol");
197d4b5cae4SRobert Watson 
198d4b5cae4SRobert Watson /*
199938448cdSRobert Watson  * Store and export the compile-time constant NETISR_MAXPROT limit on the
200938448cdSRobert Watson  * number of protocols that can register with netisr at a time.  This is
201938448cdSRobert Watson  * required for crashdump analysis, as it sizes netisr_proto[].
202d4b5cae4SRobert Watson  */
203938448cdSRobert Watson static u_int	netisr_maxprot = NETISR_MAXPROT;
204938448cdSRobert Watson SYSCTL_INT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD,
205938448cdSRobert Watson     &netisr_maxprot, 0,
206938448cdSRobert Watson     "Compile-time limit on the number of protocols supported by netisr.");
207d4b5cae4SRobert Watson 
208d4b5cae4SRobert Watson /*
209938448cdSRobert Watson  * The netisr_proto array describes all registered protocols, indexed by
210938448cdSRobert Watson  * protocol number.  See netisr_internal.h for more details.
211d4b5cae4SRobert Watson  */
212938448cdSRobert Watson static struct netisr_proto	netisr_proto[NETISR_MAXPROT];
213d4b5cae4SRobert Watson 
214d4b5cae4SRobert Watson /*
215938448cdSRobert Watson  * Per-CPU workstream data.  See netisr_internal.h for more details.
216d4b5cae4SRobert Watson  */
21753402767SRobert Watson DPCPU_DEFINE(struct netisr_workstream, nws);
218d4b5cae4SRobert Watson 
219d4b5cae4SRobert Watson /*
220d4b5cae4SRobert Watson  * Map contiguous values between 0 and nws_count into CPU IDs appropriate for
22153402767SRobert Watson  * accessing workstreams.  This allows constructions of the form
22253402767SRobert Watson  * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws).
223d4b5cae4SRobert Watson  */
224d4b5cae4SRobert Watson static u_int				 nws_array[MAXCPU];
225d4b5cae4SRobert Watson 
226d4b5cae4SRobert Watson /*
227d4b5cae4SRobert Watson  * Number of registered workstreams.  Will be at most the number of running
228d4b5cae4SRobert Watson  * CPUs once fully started.
229d4b5cae4SRobert Watson  */
230d4b5cae4SRobert Watson static u_int				 nws_count;
231d4b5cae4SRobert Watson SYSCTL_INT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD,
232d4b5cae4SRobert Watson     &nws_count, 0, "Number of extant netisr threads.");
233d4b5cae4SRobert Watson 
234d4b5cae4SRobert Watson /*
235d4b5cae4SRobert Watson  * Synchronization for each workstream: a mutex protects all mutable fields
236d4b5cae4SRobert Watson  * in each stream, including per-protocol state (mbuf queues).  The SWI is
237d4b5cae4SRobert Watson  * woken up if asynchronous dispatch is required.
238d4b5cae4SRobert Watson  */
239d4b5cae4SRobert Watson #define	NWS_LOCK(s)		mtx_lock(&(s)->nws_mtx)
240d4b5cae4SRobert Watson #define	NWS_LOCK_ASSERT(s)	mtx_assert(&(s)->nws_mtx, MA_OWNED)
241d4b5cae4SRobert Watson #define	NWS_UNLOCK(s)		mtx_unlock(&(s)->nws_mtx)
242d4b5cae4SRobert Watson #define	NWS_SIGNAL(s)		swi_sched((s)->nws_swi_cookie, 0)
243d4b5cae4SRobert Watson 
244d4b5cae4SRobert Watson /*
245d4b5cae4SRobert Watson  * Utility routines for protocols that implement their own mapping of flows
246d4b5cae4SRobert Watson  * to CPUs.
247d4b5cae4SRobert Watson  */
248d4b5cae4SRobert Watson u_int
249d4b5cae4SRobert Watson netisr_get_cpucount(void)
250d4b5cae4SRobert Watson {
251d4b5cae4SRobert Watson 
252d4b5cae4SRobert Watson 	return (nws_count);
2535fd04e38SRobert Watson }
254d4b5cae4SRobert Watson 
255d4b5cae4SRobert Watson u_int
256d4b5cae4SRobert Watson netisr_get_cpuid(u_int cpunumber)
257d4b5cae4SRobert Watson {
258d4b5cae4SRobert Watson 
259d4b5cae4SRobert Watson 	KASSERT(cpunumber < nws_count, ("%s: %u > %u", __func__, cpunumber,
260d4b5cae4SRobert Watson 	    nws_count));
261d4b5cae4SRobert Watson 
262d4b5cae4SRobert Watson 	return (nws_array[cpunumber]);
2635fd04e38SRobert Watson }
2645fd04e38SRobert Watson 
2655fd04e38SRobert Watson /*
2660a32e29fSRobert Watson  * The default implementation of flow -> CPU ID mapping.
267d4b5cae4SRobert Watson  *
268d4b5cae4SRobert Watson  * Non-static so that protocols can use it to map their own work to specific
269d4b5cae4SRobert Watson  * CPUs in a manner consistent to netisr for affinity purposes.
270d4b5cae4SRobert Watson  */
271d4b5cae4SRobert Watson u_int
272d4b5cae4SRobert Watson netisr_default_flow2cpu(u_int flowid)
273d4b5cae4SRobert Watson {
274d4b5cae4SRobert Watson 
275d4b5cae4SRobert Watson 	return (nws_array[flowid % nws_count]);
276d4b5cae4SRobert Watson }
277d4b5cae4SRobert Watson 
278d4b5cae4SRobert Watson /*
279d4b5cae4SRobert Watson  * Register a new netisr handler, which requires initializing per-protocol
280d4b5cae4SRobert Watson  * fields for each workstream.  All netisr work is briefly suspended while
281d4b5cae4SRobert Watson  * the protocol is installed.
2821cafed39SJonathan Lemon  */
2831cafed39SJonathan Lemon void
284d4b5cae4SRobert Watson netisr_register(const struct netisr_handler *nhp)
2851cafed39SJonathan Lemon {
286d4b5cae4SRobert Watson 	struct netisr_work *npwp;
287d4b5cae4SRobert Watson 	const char *name;
288d4b5cae4SRobert Watson 	u_int i, proto;
2891cafed39SJonathan Lemon 
290d4b5cae4SRobert Watson 	proto = nhp->nh_proto;
291d4b5cae4SRobert Watson 	name = nhp->nh_name;
29259dd72d0SRobert Watson 
2937902224cSSam Leffler 	/*
294d4b5cae4SRobert Watson 	 * Test that the requested registration is valid.
2957902224cSSam Leffler 	 */
296d4b5cae4SRobert Watson 	KASSERT(nhp->nh_name != NULL,
297d4b5cae4SRobert Watson 	    ("%s: nh_name NULL for %u", __func__, proto));
298d4b5cae4SRobert Watson 	KASSERT(nhp->nh_handler != NULL,
299d4b5cae4SRobert Watson 	    ("%s: nh_handler NULL for %s", __func__, name));
300d4b5cae4SRobert Watson 	KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE ||
301d4b5cae4SRobert Watson 	    nhp->nh_policy == NETISR_POLICY_FLOW ||
302d4b5cae4SRobert Watson 	    nhp->nh_policy == NETISR_POLICY_CPU,
303d4b5cae4SRobert Watson 	    ("%s: unsupported nh_policy %u for %s", __func__,
304d4b5cae4SRobert Watson 	    nhp->nh_policy, name));
305d4b5cae4SRobert Watson 	KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW ||
306d4b5cae4SRobert Watson 	    nhp->nh_m2flow == NULL,
307d4b5cae4SRobert Watson 	    ("%s: nh_policy != FLOW but m2flow defined for %s", __func__,
308d4b5cae4SRobert Watson 	    name));
309d4b5cae4SRobert Watson 	KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL,
310d4b5cae4SRobert Watson 	    ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__,
311d4b5cae4SRobert Watson 	    name));
312d4b5cae4SRobert Watson 	KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL,
313d4b5cae4SRobert Watson 	    ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__,
314d4b5cae4SRobert Watson 	    name));
315d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
316d4b5cae4SRobert Watson 	    ("%s(%u, %s): protocol too big", __func__, proto, name));
317d4b5cae4SRobert Watson 
318d4b5cae4SRobert Watson 	/*
319d4b5cae4SRobert Watson 	 * Test that no existing registration exists for this protocol.
320d4b5cae4SRobert Watson 	 */
321d4b5cae4SRobert Watson 	NETISR_WLOCK();
322938448cdSRobert Watson 	KASSERT(netisr_proto[proto].np_name == NULL,
323d4b5cae4SRobert Watson 	    ("%s(%u, %s): name present", __func__, proto, name));
324938448cdSRobert Watson 	KASSERT(netisr_proto[proto].np_handler == NULL,
325d4b5cae4SRobert Watson 	    ("%s(%u, %s): handler present", __func__, proto, name));
326d4b5cae4SRobert Watson 
327938448cdSRobert Watson 	netisr_proto[proto].np_name = name;
328938448cdSRobert Watson 	netisr_proto[proto].np_handler = nhp->nh_handler;
329938448cdSRobert Watson 	netisr_proto[proto].np_m2flow = nhp->nh_m2flow;
330938448cdSRobert Watson 	netisr_proto[proto].np_m2cpuid = nhp->nh_m2cpuid;
331938448cdSRobert Watson 	netisr_proto[proto].np_drainedcpu = nhp->nh_drainedcpu;
332d4b5cae4SRobert Watson 	if (nhp->nh_qlimit == 0)
333938448cdSRobert Watson 		netisr_proto[proto].np_qlimit = netisr_defaultqlimit;
334d4b5cae4SRobert Watson 	else if (nhp->nh_qlimit > netisr_maxqlimit) {
335d4b5cae4SRobert Watson 		printf("%s: %s requested queue limit %u capped to "
336d4b5cae4SRobert Watson 		    "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit,
337d4b5cae4SRobert Watson 		    netisr_maxqlimit);
338938448cdSRobert Watson 		netisr_proto[proto].np_qlimit = netisr_maxqlimit;
339d4b5cae4SRobert Watson 	} else
340938448cdSRobert Watson 		netisr_proto[proto].np_qlimit = nhp->nh_qlimit;
341938448cdSRobert Watson 	netisr_proto[proto].np_policy = nhp->nh_policy;
3429e6e01ebSRobert Watson 	for (i = 0; i <= mp_maxid; i++) {
34353402767SRobert Watson 		if (CPU_ABSENT(i))
34453402767SRobert Watson 			continue;
34553402767SRobert Watson 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
346d4b5cae4SRobert Watson 		bzero(npwp, sizeof(*npwp));
347938448cdSRobert Watson 		npwp->nw_qlimit = netisr_proto[proto].np_qlimit;
3481cafed39SJonathan Lemon 	}
349d4b5cae4SRobert Watson 	NETISR_WUNLOCK();
3501cafed39SJonathan Lemon }
3511cafed39SJonathan Lemon 
3521cafed39SJonathan Lemon /*
353d4b5cae4SRobert Watson  * Clear drop counters across all workstreams for a protocol.
354d4b5cae4SRobert Watson  */
355d4b5cae4SRobert Watson void
356d4b5cae4SRobert Watson netisr_clearqdrops(const struct netisr_handler *nhp)
357d4b5cae4SRobert Watson {
358d4b5cae4SRobert Watson 	struct netisr_work *npwp;
359d4b5cae4SRobert Watson #ifdef INVARIANTS
360d4b5cae4SRobert Watson 	const char *name;
361d4b5cae4SRobert Watson #endif
362d4b5cae4SRobert Watson 	u_int i, proto;
363d4b5cae4SRobert Watson 
364d4b5cae4SRobert Watson 	proto = nhp->nh_proto;
365d4b5cae4SRobert Watson #ifdef INVARIANTS
366d4b5cae4SRobert Watson 	name = nhp->nh_name;
367d4b5cae4SRobert Watson #endif
368d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
369d4b5cae4SRobert Watson 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
370d4b5cae4SRobert Watson 
371d4b5cae4SRobert Watson 	NETISR_WLOCK();
372938448cdSRobert Watson 	KASSERT(netisr_proto[proto].np_handler != NULL,
373d4b5cae4SRobert Watson 	    ("%s(%u): protocol not registered for %s", __func__, proto,
374d4b5cae4SRobert Watson 	    name));
375d4b5cae4SRobert Watson 
3769e6e01ebSRobert Watson 	for (i = 0; i <= mp_maxid; i++) {
37753402767SRobert Watson 		if (CPU_ABSENT(i))
37853402767SRobert Watson 			continue;
37953402767SRobert Watson 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
380d4b5cae4SRobert Watson 		npwp->nw_qdrops = 0;
381d4b5cae4SRobert Watson 	}
382d4b5cae4SRobert Watson 	NETISR_WUNLOCK();
383d4b5cae4SRobert Watson }
384d4b5cae4SRobert Watson 
385d4b5cae4SRobert Watson /*
3860a32e29fSRobert Watson  * Query current drop counters across all workstreams for a protocol.
387d4b5cae4SRobert Watson  */
388d4b5cae4SRobert Watson void
389d4b5cae4SRobert Watson netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp)
390d4b5cae4SRobert Watson {
391d4b5cae4SRobert Watson 	struct netisr_work *npwp;
392d4b5cae4SRobert Watson 	struct rm_priotracker tracker;
393d4b5cae4SRobert Watson #ifdef INVARIANTS
394d4b5cae4SRobert Watson 	const char *name;
395d4b5cae4SRobert Watson #endif
396d4b5cae4SRobert Watson 	u_int i, proto;
397d4b5cae4SRobert Watson 
398d4b5cae4SRobert Watson 	*qdropp = 0;
399d4b5cae4SRobert Watson 	proto = nhp->nh_proto;
400d4b5cae4SRobert Watson #ifdef INVARIANTS
401d4b5cae4SRobert Watson 	name = nhp->nh_name;
402d4b5cae4SRobert Watson #endif
403d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
404d4b5cae4SRobert Watson 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
405d4b5cae4SRobert Watson 
406d4b5cae4SRobert Watson 	NETISR_RLOCK(&tracker);
407938448cdSRobert Watson 	KASSERT(netisr_proto[proto].np_handler != NULL,
408d4b5cae4SRobert Watson 	    ("%s(%u): protocol not registered for %s", __func__, proto,
409d4b5cae4SRobert Watson 	    name));
410d4b5cae4SRobert Watson 
4119e6e01ebSRobert Watson 	for (i = 0; i <= mp_maxid; i++) {
41253402767SRobert Watson 		if (CPU_ABSENT(i))
41353402767SRobert Watson 			continue;
41453402767SRobert Watson 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
415d4b5cae4SRobert Watson 		*qdropp += npwp->nw_qdrops;
416d4b5cae4SRobert Watson 	}
417d4b5cae4SRobert Watson 	NETISR_RUNLOCK(&tracker);
418d4b5cae4SRobert Watson }
419d4b5cae4SRobert Watson 
420d4b5cae4SRobert Watson /*
4210a32e29fSRobert Watson  * Query current per-workstream queue limit for a protocol.
422d4b5cae4SRobert Watson  */
423d4b5cae4SRobert Watson void
424d4b5cae4SRobert Watson netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp)
425d4b5cae4SRobert Watson {
426d4b5cae4SRobert Watson 	struct rm_priotracker tracker;
427d4b5cae4SRobert Watson #ifdef INVARIANTS
428d4b5cae4SRobert Watson 	const char *name;
429d4b5cae4SRobert Watson #endif
430d4b5cae4SRobert Watson 	u_int proto;
431d4b5cae4SRobert Watson 
432d4b5cae4SRobert Watson 	proto = nhp->nh_proto;
433d4b5cae4SRobert Watson #ifdef INVARIANTS
434d4b5cae4SRobert Watson 	name = nhp->nh_name;
435d4b5cae4SRobert Watson #endif
436d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
437d4b5cae4SRobert Watson 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
438d4b5cae4SRobert Watson 
439d4b5cae4SRobert Watson 	NETISR_RLOCK(&tracker);
440938448cdSRobert Watson 	KASSERT(netisr_proto[proto].np_handler != NULL,
441d4b5cae4SRobert Watson 	    ("%s(%u): protocol not registered for %s", __func__, proto,
442d4b5cae4SRobert Watson 	    name));
443938448cdSRobert Watson 	*qlimitp = netisr_proto[proto].np_qlimit;
444d4b5cae4SRobert Watson 	NETISR_RUNLOCK(&tracker);
445d4b5cae4SRobert Watson }
446d4b5cae4SRobert Watson 
447d4b5cae4SRobert Watson /*
448d4b5cae4SRobert Watson  * Update the queue limit across per-workstream queues for a protocol.  We
449d4b5cae4SRobert Watson  * simply change the limits, and don't drain overflowed packets as they will
450d4b5cae4SRobert Watson  * (hopefully) take care of themselves shortly.
4511cafed39SJonathan Lemon  */
4521cafed39SJonathan Lemon int
453d4b5cae4SRobert Watson netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit)
4541cafed39SJonathan Lemon {
455d4b5cae4SRobert Watson 	struct netisr_work *npwp;
456d4b5cae4SRobert Watson #ifdef INVARIANTS
457d4b5cae4SRobert Watson 	const char *name;
458d4b5cae4SRobert Watson #endif
459d4b5cae4SRobert Watson 	u_int i, proto;
4601cafed39SJonathan Lemon 
461d4b5cae4SRobert Watson 	if (qlimit > netisr_maxqlimit)
462d4b5cae4SRobert Watson 		return (EINVAL);
463d4b5cae4SRobert Watson 
464d4b5cae4SRobert Watson 	proto = nhp->nh_proto;
465d4b5cae4SRobert Watson #ifdef INVARIANTS
466d4b5cae4SRobert Watson 	name = nhp->nh_name;
467d4b5cae4SRobert Watson #endif
468d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
469d4b5cae4SRobert Watson 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
470d4b5cae4SRobert Watson 
471d4b5cae4SRobert Watson 	NETISR_WLOCK();
472938448cdSRobert Watson 	KASSERT(netisr_proto[proto].np_handler != NULL,
473d4b5cae4SRobert Watson 	    ("%s(%u): protocol not registered for %s", __func__, proto,
474d4b5cae4SRobert Watson 	    name));
475d4b5cae4SRobert Watson 
476938448cdSRobert Watson 	netisr_proto[proto].np_qlimit = qlimit;
4779e6e01ebSRobert Watson 	for (i = 0; i <= mp_maxid; i++) {
47853402767SRobert Watson 		if (CPU_ABSENT(i))
47953402767SRobert Watson 			continue;
48053402767SRobert Watson 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
481d4b5cae4SRobert Watson 		npwp->nw_qlimit = qlimit;
482fb68148fSJonathan Lemon 	}
483d4b5cae4SRobert Watson 	NETISR_WUNLOCK();
4843161f583SAndre Oppermann 	return (0);
485e3b6e33cSJake Burkholder }
486e3b6e33cSJake Burkholder 
487d4b5cae4SRobert Watson /*
488d4b5cae4SRobert Watson  * Drain all packets currently held in a particular protocol work queue.
489d4b5cae4SRobert Watson  */
490e3b6e33cSJake Burkholder static void
491d4b5cae4SRobert Watson netisr_drain_proto(struct netisr_work *npwp)
492e3b6e33cSJake Burkholder {
493d4b5cae4SRobert Watson 	struct mbuf *m;
494d4b5cae4SRobert Watson 
495d4b5cae4SRobert Watson 	/*
496d4b5cae4SRobert Watson 	 * We would assert the lock on the workstream but it's not passed in.
497d4b5cae4SRobert Watson 	 */
498d4b5cae4SRobert Watson 	while ((m = npwp->nw_head) != NULL) {
499d4b5cae4SRobert Watson 		npwp->nw_head = m->m_nextpkt;
500d4b5cae4SRobert Watson 		m->m_nextpkt = NULL;
501d4b5cae4SRobert Watson 		if (npwp->nw_head == NULL)
502d4b5cae4SRobert Watson 			npwp->nw_tail = NULL;
503d4b5cae4SRobert Watson 		npwp->nw_len--;
504d4b5cae4SRobert Watson 		m_freem(m);
505d4b5cae4SRobert Watson 	}
506d4b5cae4SRobert Watson 	KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__));
507d4b5cae4SRobert Watson 	KASSERT(npwp->nw_len == 0, ("%s: len", __func__));
508d4b5cae4SRobert Watson }
509d4b5cae4SRobert Watson 
510d4b5cae4SRobert Watson /*
511d4b5cae4SRobert Watson  * Remove the registration of a network protocol, which requires clearing
512d4b5cae4SRobert Watson  * per-protocol fields across all workstreams, including freeing all mbufs in
513d4b5cae4SRobert Watson  * the queues at time of unregister.  All work in netisr is briefly suspended
514d4b5cae4SRobert Watson  * while this takes place.
515d4b5cae4SRobert Watson  */
516d4b5cae4SRobert Watson void
517d4b5cae4SRobert Watson netisr_unregister(const struct netisr_handler *nhp)
518d4b5cae4SRobert Watson {
519d4b5cae4SRobert Watson 	struct netisr_work *npwp;
520d4b5cae4SRobert Watson #ifdef INVARIANTS
521d4b5cae4SRobert Watson 	const char *name;
522d4b5cae4SRobert Watson #endif
523d4b5cae4SRobert Watson 	u_int i, proto;
524d4b5cae4SRobert Watson 
525d4b5cae4SRobert Watson 	proto = nhp->nh_proto;
526d4b5cae4SRobert Watson #ifdef INVARIANTS
527d4b5cae4SRobert Watson 	name = nhp->nh_name;
528d4b5cae4SRobert Watson #endif
529d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
530d4b5cae4SRobert Watson 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
531d4b5cae4SRobert Watson 
532d4b5cae4SRobert Watson 	NETISR_WLOCK();
533938448cdSRobert Watson 	KASSERT(netisr_proto[proto].np_handler != NULL,
534d4b5cae4SRobert Watson 	    ("%s(%u): protocol not registered for %s", __func__, proto,
535d4b5cae4SRobert Watson 	    name));
536d4b5cae4SRobert Watson 
537938448cdSRobert Watson 	netisr_proto[proto].np_name = NULL;
538938448cdSRobert Watson 	netisr_proto[proto].np_handler = NULL;
539938448cdSRobert Watson 	netisr_proto[proto].np_m2flow = NULL;
540938448cdSRobert Watson 	netisr_proto[proto].np_m2cpuid = NULL;
541938448cdSRobert Watson 	netisr_proto[proto].np_qlimit = 0;
542938448cdSRobert Watson 	netisr_proto[proto].np_policy = 0;
5439e6e01ebSRobert Watson 	for (i = 0; i <= mp_maxid; i++) {
54453402767SRobert Watson 		if (CPU_ABSENT(i))
54553402767SRobert Watson 			continue;
54653402767SRobert Watson 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
547d4b5cae4SRobert Watson 		netisr_drain_proto(npwp);
548d4b5cae4SRobert Watson 		bzero(npwp, sizeof(*npwp));
549d4b5cae4SRobert Watson 	}
550d4b5cae4SRobert Watson 	NETISR_WUNLOCK();
551d4b5cae4SRobert Watson }
552d4b5cae4SRobert Watson 
553d4b5cae4SRobert Watson /*
554d4b5cae4SRobert Watson  * Look up the workstream given a packet and source identifier.  Do this by
555d4b5cae4SRobert Watson  * checking the protocol's policy, and optionally call out to the protocol
556d4b5cae4SRobert Watson  * for assistance if required.
557d4b5cae4SRobert Watson  */
558d4b5cae4SRobert Watson static struct mbuf *
559d4b5cae4SRobert Watson netisr_select_cpuid(struct netisr_proto *npp, uintptr_t source,
560d4b5cae4SRobert Watson     struct mbuf *m, u_int *cpuidp)
561d4b5cae4SRobert Watson {
562d4b5cae4SRobert Watson 	struct ifnet *ifp;
563d4b5cae4SRobert Watson 
564d4b5cae4SRobert Watson 	NETISR_LOCK_ASSERT();
565d4b5cae4SRobert Watson 
566d4b5cae4SRobert Watson 	/*
567d4b5cae4SRobert Watson 	 * In the event we have only one worker, shortcut and deliver to it
568d4b5cae4SRobert Watson 	 * without further ado.
569d4b5cae4SRobert Watson 	 */
570d4b5cae4SRobert Watson 	if (nws_count == 1) {
571d4b5cae4SRobert Watson 		*cpuidp = nws_array[0];
572d4b5cae4SRobert Watson 		return (m);
573d4b5cae4SRobert Watson 	}
574d4b5cae4SRobert Watson 
575d4b5cae4SRobert Watson 	/*
576d4b5cae4SRobert Watson 	 * What happens next depends on the policy selected by the protocol.
577d4b5cae4SRobert Watson 	 * If we want to support per-interface policies, we should do that
578d4b5cae4SRobert Watson 	 * here first.
579d4b5cae4SRobert Watson 	 */
580d4b5cae4SRobert Watson 	switch (npp->np_policy) {
581d4b5cae4SRobert Watson 	case NETISR_POLICY_CPU:
582d4b5cae4SRobert Watson 		return (npp->np_m2cpuid(m, source, cpuidp));
583d4b5cae4SRobert Watson 
584d4b5cae4SRobert Watson 	case NETISR_POLICY_FLOW:
585d4b5cae4SRobert Watson 		if (!(m->m_flags & M_FLOWID) && npp->np_m2flow != NULL) {
586d4b5cae4SRobert Watson 			m = npp->np_m2flow(m, source);
587d4b5cae4SRobert Watson 			if (m == NULL)
588d4b5cae4SRobert Watson 				return (NULL);
589d4b5cae4SRobert Watson 		}
590d4b5cae4SRobert Watson 		if (m->m_flags & M_FLOWID) {
591d4b5cae4SRobert Watson 			*cpuidp =
592d4b5cae4SRobert Watson 			    netisr_default_flow2cpu(m->m_pkthdr.flowid);
593d4b5cae4SRobert Watson 			return (m);
594d4b5cae4SRobert Watson 		}
595d4b5cae4SRobert Watson 		/* FALLTHROUGH */
596d4b5cae4SRobert Watson 
597d4b5cae4SRobert Watson 	case NETISR_POLICY_SOURCE:
598d4b5cae4SRobert Watson 		ifp = m->m_pkthdr.rcvif;
599d4b5cae4SRobert Watson 		if (ifp != NULL)
600d4b5cae4SRobert Watson 			*cpuidp = nws_array[(ifp->if_index + source) %
601d4b5cae4SRobert Watson 			    nws_count];
602d4b5cae4SRobert Watson 		else
603d4b5cae4SRobert Watson 			*cpuidp = nws_array[source % nws_count];
604d4b5cae4SRobert Watson 		return (m);
605d4b5cae4SRobert Watson 
606d4b5cae4SRobert Watson 	default:
607d4b5cae4SRobert Watson 		panic("%s: invalid policy %u for %s", __func__,
608d4b5cae4SRobert Watson 		    npp->np_policy, npp->np_name);
609d4b5cae4SRobert Watson 	}
610d4b5cae4SRobert Watson }
611d4b5cae4SRobert Watson 
612d4b5cae4SRobert Watson /*
613d4b5cae4SRobert Watson  * Process packets associated with a workstream and protocol.  For reasons of
614d4b5cae4SRobert Watson  * fairness, we process up to one complete netisr queue at a time, moving the
615d4b5cae4SRobert Watson  * queue to a stack-local queue for processing, but do not loop refreshing
616d4b5cae4SRobert Watson  * from the global queue.  The caller is responsible for deciding whether to
617d4b5cae4SRobert Watson  * loop, and for setting the NWS_RUNNING flag.  The passed workstream will be
618d4b5cae4SRobert Watson  * locked on entry and relocked before return, but will be released while
619d4b5cae4SRobert Watson  * processing.  The number of packets processed is returned.
620d4b5cae4SRobert Watson  */
621d4b5cae4SRobert Watson static u_int
622d4b5cae4SRobert Watson netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto)
623d4b5cae4SRobert Watson {
624d4b5cae4SRobert Watson 	struct netisr_work local_npw, *npwp;
625d4b5cae4SRobert Watson 	u_int handled;
626d4b5cae4SRobert Watson 	struct mbuf *m;
627d4b5cae4SRobert Watson 
628d4b5cae4SRobert Watson 	NETISR_LOCK_ASSERT();
629d4b5cae4SRobert Watson 	NWS_LOCK_ASSERT(nwsp);
630d4b5cae4SRobert Watson 
631d4b5cae4SRobert Watson 	KASSERT(nwsp->nws_flags & NWS_RUNNING,
632d4b5cae4SRobert Watson 	    ("%s(%u): not running", __func__, proto));
633d4b5cae4SRobert Watson 	KASSERT(proto >= 0 && proto < NETISR_MAXPROT,
634d4b5cae4SRobert Watson 	    ("%s(%u): invalid proto\n", __func__, proto));
635d4b5cae4SRobert Watson 
636d4b5cae4SRobert Watson 	npwp = &nwsp->nws_work[proto];
637d4b5cae4SRobert Watson 	if (npwp->nw_len == 0)
638d4b5cae4SRobert Watson 		return (0);
639d4b5cae4SRobert Watson 
640d4b5cae4SRobert Watson 	/*
641d4b5cae4SRobert Watson 	 * Move the global work queue to a thread-local work queue.
642d4b5cae4SRobert Watson 	 *
643d4b5cae4SRobert Watson 	 * Notice that this means the effective maximum length of the queue
644d4b5cae4SRobert Watson 	 * is actually twice that of the maximum queue length specified in
645d4b5cae4SRobert Watson 	 * the protocol registration call.
646d4b5cae4SRobert Watson 	 */
647d4b5cae4SRobert Watson 	handled = npwp->nw_len;
648d4b5cae4SRobert Watson 	local_npw = *npwp;
649d4b5cae4SRobert Watson 	npwp->nw_head = NULL;
650d4b5cae4SRobert Watson 	npwp->nw_tail = NULL;
651d4b5cae4SRobert Watson 	npwp->nw_len = 0;
652d4b5cae4SRobert Watson 	nwsp->nws_pendingbits &= ~(1 << proto);
653d4b5cae4SRobert Watson 	NWS_UNLOCK(nwsp);
654d4b5cae4SRobert Watson 	while ((m = local_npw.nw_head) != NULL) {
655d4b5cae4SRobert Watson 		local_npw.nw_head = m->m_nextpkt;
656d4b5cae4SRobert Watson 		m->m_nextpkt = NULL;
657d4b5cae4SRobert Watson 		if (local_npw.nw_head == NULL)
658d4b5cae4SRobert Watson 			local_npw.nw_tail = NULL;
659d4b5cae4SRobert Watson 		local_npw.nw_len--;
660d4b5cae4SRobert Watson 		VNET_ASSERT(m->m_pkthdr.rcvif != NULL);
661d4b5cae4SRobert Watson 		CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
662938448cdSRobert Watson 		netisr_proto[proto].np_handler(m);
663d4b5cae4SRobert Watson 		CURVNET_RESTORE();
664d4b5cae4SRobert Watson 	}
665d4b5cae4SRobert Watson 	KASSERT(local_npw.nw_len == 0,
666d4b5cae4SRobert Watson 	    ("%s(%u): len %u", __func__, proto, local_npw.nw_len));
667938448cdSRobert Watson 	if (netisr_proto[proto].np_drainedcpu)
668938448cdSRobert Watson 		netisr_proto[proto].np_drainedcpu(nwsp->nws_cpu);
669d4b5cae4SRobert Watson 	NWS_LOCK(nwsp);
670d4b5cae4SRobert Watson 	npwp->nw_handled += handled;
671d4b5cae4SRobert Watson 	return (handled);
672d4b5cae4SRobert Watson }
673d4b5cae4SRobert Watson 
674d4b5cae4SRobert Watson /*
6750a32e29fSRobert Watson  * SWI handler for netisr -- processes packets in a set of workstreams that
676d4b5cae4SRobert Watson  * it owns, woken up by calls to NWS_SIGNAL().  If this workstream is already
677d4b5cae4SRobert Watson  * being direct dispatched, go back to sleep and wait for the dispatching
678d4b5cae4SRobert Watson  * thread to wake us up again.
679d4b5cae4SRobert Watson  */
680d4b5cae4SRobert Watson static void
681d4b5cae4SRobert Watson swi_net(void *arg)
682d4b5cae4SRobert Watson {
683d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
684d4b5cae4SRobert Watson 	struct rm_priotracker tracker;
685d4b5cae4SRobert Watson #endif
686d4b5cae4SRobert Watson 	struct netisr_workstream *nwsp;
687d4b5cae4SRobert Watson 	u_int bits, prot;
688d4b5cae4SRobert Watson 
689d4b5cae4SRobert Watson 	nwsp = arg;
690d4b5cae4SRobert Watson 
6911cafed39SJonathan Lemon #ifdef DEVICE_POLLING
692d4b5cae4SRobert Watson 	KASSERT(nws_count == 1,
693d4b5cae4SRobert Watson 	    ("%s: device_polling but nws_count != 1", __func__));
694d4b5cae4SRobert Watson 	netisr_poll();
695d4b5cae4SRobert Watson #endif
696d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
697d4b5cae4SRobert Watson 	NETISR_RLOCK(&tracker);
698d4b5cae4SRobert Watson #endif
699d4b5cae4SRobert Watson 	NWS_LOCK(nwsp);
700d4b5cae4SRobert Watson 	KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running"));
701d4b5cae4SRobert Watson 	if (nwsp->nws_flags & NWS_DISPATCHING)
702d4b5cae4SRobert Watson 		goto out;
703d4b5cae4SRobert Watson 	nwsp->nws_flags |= NWS_RUNNING;
704d4b5cae4SRobert Watson 	nwsp->nws_flags &= ~NWS_SCHEDULED;
705d4b5cae4SRobert Watson 	while ((bits = nwsp->nws_pendingbits) != 0) {
706d4b5cae4SRobert Watson 		while ((prot = ffs(bits)) != 0) {
707d4b5cae4SRobert Watson 			prot--;
708d4b5cae4SRobert Watson 			bits &= ~(1 << prot);
709d4b5cae4SRobert Watson 			(void)netisr_process_workstream_proto(nwsp, prot);
710d4b5cae4SRobert Watson 		}
711d4b5cae4SRobert Watson 	}
712d4b5cae4SRobert Watson 	nwsp->nws_flags &= ~NWS_RUNNING;
713d4b5cae4SRobert Watson out:
714d4b5cae4SRobert Watson 	NWS_UNLOCK(nwsp);
715d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
716d4b5cae4SRobert Watson 	NETISR_RUNLOCK(&tracker);
717d4b5cae4SRobert Watson #endif
718d4b5cae4SRobert Watson #ifdef DEVICE_POLLING
719d4b5cae4SRobert Watson 	netisr_pollmore();
720d4b5cae4SRobert Watson #endif
721d4b5cae4SRobert Watson }
722d4b5cae4SRobert Watson 
723d4b5cae4SRobert Watson static int
724d4b5cae4SRobert Watson netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto,
725d4b5cae4SRobert Watson     struct netisr_work *npwp, struct mbuf *m, int *dosignalp)
726d4b5cae4SRobert Watson {
727d4b5cae4SRobert Watson 
728d4b5cae4SRobert Watson 	NWS_LOCK_ASSERT(nwsp);
729d4b5cae4SRobert Watson 
730d4b5cae4SRobert Watson 	*dosignalp = 0;
731d4b5cae4SRobert Watson 	if (npwp->nw_len < npwp->nw_qlimit) {
732d4b5cae4SRobert Watson 		m->m_nextpkt = NULL;
733d4b5cae4SRobert Watson 		if (npwp->nw_head == NULL) {
734d4b5cae4SRobert Watson 			npwp->nw_head = m;
735d4b5cae4SRobert Watson 			npwp->nw_tail = m;
736d4b5cae4SRobert Watson 		} else {
737d4b5cae4SRobert Watson 			npwp->nw_tail->m_nextpkt = m;
738d4b5cae4SRobert Watson 			npwp->nw_tail = m;
739d4b5cae4SRobert Watson 		}
740d4b5cae4SRobert Watson 		npwp->nw_len++;
741d4b5cae4SRobert Watson 		if (npwp->nw_len > npwp->nw_watermark)
742d4b5cae4SRobert Watson 			npwp->nw_watermark = npwp->nw_len;
7430a32e29fSRobert Watson 
7440a32e29fSRobert Watson 		/*
7450a32e29fSRobert Watson 		 * We must set the bit regardless of NWS_RUNNING, so that
7460a32e29fSRobert Watson 		 * swi_net() keeps calling netisr_process_workstream_proto().
7470a32e29fSRobert Watson 		 */
748d4b5cae4SRobert Watson 		nwsp->nws_pendingbits |= (1 << proto);
749d4b5cae4SRobert Watson 		if (!(nwsp->nws_flags &
750d4b5cae4SRobert Watson 		    (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) {
751d4b5cae4SRobert Watson 			nwsp->nws_flags |= NWS_SCHEDULED;
752d4b5cae4SRobert Watson 			*dosignalp = 1;	/* Defer until unlocked. */
753d4b5cae4SRobert Watson 		}
754d4b5cae4SRobert Watson 		npwp->nw_queued++;
755d4b5cae4SRobert Watson 		return (0);
756d4b5cae4SRobert Watson 	} else {
757ba3b25b3SBjoern A. Zeeb 		m_freem(m);
758d4b5cae4SRobert Watson 		npwp->nw_qdrops++;
759d4b5cae4SRobert Watson 		return (ENOBUFS);
760d4b5cae4SRobert Watson 	}
761d4b5cae4SRobert Watson }
762d4b5cae4SRobert Watson 
763d4b5cae4SRobert Watson static int
764d4b5cae4SRobert Watson netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid)
765d4b5cae4SRobert Watson {
766d4b5cae4SRobert Watson 	struct netisr_workstream *nwsp;
767d4b5cae4SRobert Watson 	struct netisr_work *npwp;
768d4b5cae4SRobert Watson 	int dosignal, error;
769d4b5cae4SRobert Watson 
770d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
771d4b5cae4SRobert Watson 	NETISR_LOCK_ASSERT();
772d4b5cae4SRobert Watson #endif
7739e6e01ebSRobert Watson 	KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__,
7749e6e01ebSRobert Watson 	    cpuid, mp_maxid));
77553402767SRobert Watson 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
776d4b5cae4SRobert Watson 
777d4b5cae4SRobert Watson 	dosignal = 0;
778d4b5cae4SRobert Watson 	error = 0;
77953402767SRobert Watson 	nwsp = DPCPU_ID_PTR(cpuid, nws);
780d4b5cae4SRobert Watson 	npwp = &nwsp->nws_work[proto];
781d4b5cae4SRobert Watson 	NWS_LOCK(nwsp);
782d4b5cae4SRobert Watson 	error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal);
783d4b5cae4SRobert Watson 	NWS_UNLOCK(nwsp);
784d4b5cae4SRobert Watson 	if (dosignal)
785d4b5cae4SRobert Watson 		NWS_SIGNAL(nwsp);
786d4b5cae4SRobert Watson 	return (error);
787d4b5cae4SRobert Watson }
788d4b5cae4SRobert Watson 
789d4b5cae4SRobert Watson int
790d4b5cae4SRobert Watson netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m)
791d4b5cae4SRobert Watson {
792d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
793d4b5cae4SRobert Watson 	struct rm_priotracker tracker;
794d4b5cae4SRobert Watson #endif
795d4b5cae4SRobert Watson 	u_int cpuid;
796d4b5cae4SRobert Watson 	int error;
797d4b5cae4SRobert Watson 
798d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
799d4b5cae4SRobert Watson 	    ("%s: invalid proto %u", __func__, proto));
800d4b5cae4SRobert Watson 
801d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
802d4b5cae4SRobert Watson 	NETISR_RLOCK(&tracker);
803d4b5cae4SRobert Watson #endif
804938448cdSRobert Watson 	KASSERT(netisr_proto[proto].np_handler != NULL,
805d4b5cae4SRobert Watson 	    ("%s: invalid proto %u", __func__, proto));
806d4b5cae4SRobert Watson 
807938448cdSRobert Watson 	m = netisr_select_cpuid(&netisr_proto[proto], source, m, &cpuid);
80853402767SRobert Watson 	if (m != NULL) {
80953402767SRobert Watson 		KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__,
81053402767SRobert Watson 		    cpuid));
811d4b5cae4SRobert Watson 		error = netisr_queue_internal(proto, m, cpuid);
81253402767SRobert Watson 	} else
813d4b5cae4SRobert Watson 		error = ENOBUFS;
814d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
815d4b5cae4SRobert Watson 	NETISR_RUNLOCK(&tracker);
816d4b5cae4SRobert Watson #endif
817d4b5cae4SRobert Watson 	return (error);
818d4b5cae4SRobert Watson }
819d4b5cae4SRobert Watson 
820d4b5cae4SRobert Watson int
821d4b5cae4SRobert Watson netisr_queue(u_int proto, struct mbuf *m)
822d4b5cae4SRobert Watson {
823d4b5cae4SRobert Watson 
824d4b5cae4SRobert Watson 	return (netisr_queue_src(proto, 0, m));
825d4b5cae4SRobert Watson }
826d4b5cae4SRobert Watson 
827d4b5cae4SRobert Watson /*
8280a32e29fSRobert Watson  * Dispatch a packet for netisr processing; direct dispatch is permitted by
829d4b5cae4SRobert Watson  * calling context.
830d4b5cae4SRobert Watson  */
831d4b5cae4SRobert Watson int
832d4b5cae4SRobert Watson netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m)
833d4b5cae4SRobert Watson {
834d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
835d4b5cae4SRobert Watson 	struct rm_priotracker tracker;
836d4b5cae4SRobert Watson #endif
837d4b5cae4SRobert Watson 	struct netisr_workstream *nwsp;
838d4b5cae4SRobert Watson 	struct netisr_work *npwp;
839d4b5cae4SRobert Watson 	int dosignal, error;
840d4b5cae4SRobert Watson 	u_int cpuid;
841d4b5cae4SRobert Watson 
842d4b5cae4SRobert Watson 	/*
843d4b5cae4SRobert Watson 	 * If direct dispatch is entirely disabled, fall back on queueing.
844d4b5cae4SRobert Watson 	 */
845d4b5cae4SRobert Watson 	if (!netisr_direct)
846d4b5cae4SRobert Watson 		return (netisr_queue_src(proto, source, m));
847d4b5cae4SRobert Watson 
848d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
849d4b5cae4SRobert Watson 	    ("%s: invalid proto %u", __func__, proto));
850d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
851d4b5cae4SRobert Watson 	NETISR_RLOCK(&tracker);
852d4b5cae4SRobert Watson #endif
853938448cdSRobert Watson 	KASSERT(netisr_proto[proto].np_handler != NULL,
854d4b5cae4SRobert Watson 	    ("%s: invalid proto %u", __func__, proto));
855d4b5cae4SRobert Watson 
856d4b5cae4SRobert Watson 	/*
857d4b5cae4SRobert Watson 	 * If direct dispatch is forced, then unconditionally dispatch
858d4b5cae4SRobert Watson 	 * without a formal CPU selection.  Borrow the current CPU's stats,
859d4b5cae4SRobert Watson 	 * even if there's no worker on it.  In this case we don't update
860d4b5cae4SRobert Watson 	 * nws_flags because all netisr processing will be source ordered due
861d4b5cae4SRobert Watson 	 * to always being forced to directly dispatch.
862d4b5cae4SRobert Watson 	 */
863d4b5cae4SRobert Watson 	if (netisr_direct_force) {
86453402767SRobert Watson 		nwsp = DPCPU_PTR(nws);
865d4b5cae4SRobert Watson 		npwp = &nwsp->nws_work[proto];
866d4b5cae4SRobert Watson 		npwp->nw_dispatched++;
867d4b5cae4SRobert Watson 		npwp->nw_handled++;
868938448cdSRobert Watson 		netisr_proto[proto].np_handler(m);
869d4b5cae4SRobert Watson 		error = 0;
870d4b5cae4SRobert Watson 		goto out_unlock;
871d4b5cae4SRobert Watson 	}
872d4b5cae4SRobert Watson 
873d4b5cae4SRobert Watson 	/*
874d4b5cae4SRobert Watson 	 * Otherwise, we execute in a hybrid mode where we will try to direct
875d4b5cae4SRobert Watson 	 * dispatch if we're on the right CPU and the netisr worker isn't
876d4b5cae4SRobert Watson 	 * already running.
877d4b5cae4SRobert Watson 	 */
878938448cdSRobert Watson 	m = netisr_select_cpuid(&netisr_proto[proto], source, m, &cpuid);
879d4b5cae4SRobert Watson 	if (m == NULL) {
880d4b5cae4SRobert Watson 		error = ENOBUFS;
881d4b5cae4SRobert Watson 		goto out_unlock;
882d4b5cae4SRobert Watson 	}
88353402767SRobert Watson 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
884d4b5cae4SRobert Watson 	sched_pin();
885d4b5cae4SRobert Watson 	if (cpuid != curcpu)
886d4b5cae4SRobert Watson 		goto queue_fallback;
88753402767SRobert Watson 	nwsp = DPCPU_PTR(nws);
888d4b5cae4SRobert Watson 	npwp = &nwsp->nws_work[proto];
889d4b5cae4SRobert Watson 
890d4b5cae4SRobert Watson 	/*-
891d4b5cae4SRobert Watson 	 * We are willing to direct dispatch only if three conditions hold:
892d4b5cae4SRobert Watson 	 *
893d4b5cae4SRobert Watson 	 * (1) The netisr worker isn't already running,
894d4b5cae4SRobert Watson 	 * (2) Another thread isn't already directly dispatching, and
895d4b5cae4SRobert Watson 	 * (3) The netisr hasn't already been woken up.
896d4b5cae4SRobert Watson 	 */
897d4b5cae4SRobert Watson 	NWS_LOCK(nwsp);
898d4b5cae4SRobert Watson 	if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) {
899d4b5cae4SRobert Watson 		error = netisr_queue_workstream(nwsp, proto, npwp, m,
900d4b5cae4SRobert Watson 		    &dosignal);
90153402767SRobert Watson 		NWS_UNLOCK(nwsp);
902d4b5cae4SRobert Watson 		if (dosignal)
903d4b5cae4SRobert Watson 			NWS_SIGNAL(nwsp);
904d4b5cae4SRobert Watson 		goto out_unpin;
905d4b5cae4SRobert Watson 	}
906d4b5cae4SRobert Watson 
907d4b5cae4SRobert Watson 	/*
908d4b5cae4SRobert Watson 	 * The current thread is now effectively the netisr worker, so set
909d4b5cae4SRobert Watson 	 * the dispatching flag to prevent concurrent processing of the
910d4b5cae4SRobert Watson 	 * stream from another thread (even the netisr worker), which could
911d4b5cae4SRobert Watson 	 * otherwise lead to effective misordering of the stream.
912d4b5cae4SRobert Watson 	 */
913d4b5cae4SRobert Watson 	nwsp->nws_flags |= NWS_DISPATCHING;
914d4b5cae4SRobert Watson 	NWS_UNLOCK(nwsp);
915938448cdSRobert Watson 	netisr_proto[proto].np_handler(m);
916d4b5cae4SRobert Watson 	NWS_LOCK(nwsp);
917d4b5cae4SRobert Watson 	nwsp->nws_flags &= ~NWS_DISPATCHING;
918d4b5cae4SRobert Watson 	npwp->nw_handled++;
919d4b5cae4SRobert Watson 	npwp->nw_hybrid_dispatched++;
920d4b5cae4SRobert Watson 
921d4b5cae4SRobert Watson 	/*
922d4b5cae4SRobert Watson 	 * If other work was enqueued by another thread while we were direct
923d4b5cae4SRobert Watson 	 * dispatching, we need to signal the netisr worker to do that work.
924d4b5cae4SRobert Watson 	 * In the future, we might want to do some of that work in the
925d4b5cae4SRobert Watson 	 * current thread, rather than trigger further context switches.  If
926d4b5cae4SRobert Watson 	 * so, we'll want to establish a reasonable bound on the work done in
927d4b5cae4SRobert Watson 	 * the "borrowed" context.
928d4b5cae4SRobert Watson 	 */
929d4b5cae4SRobert Watson 	if (nwsp->nws_pendingbits != 0) {
930d4b5cae4SRobert Watson 		nwsp->nws_flags |= NWS_SCHEDULED;
931d4b5cae4SRobert Watson 		dosignal = 1;
932d4b5cae4SRobert Watson 	} else
933d4b5cae4SRobert Watson 		dosignal = 0;
934d4b5cae4SRobert Watson 	NWS_UNLOCK(nwsp);
935d4b5cae4SRobert Watson 	if (dosignal)
936d4b5cae4SRobert Watson 		NWS_SIGNAL(nwsp);
937d4b5cae4SRobert Watson 	error = 0;
938d4b5cae4SRobert Watson 	goto out_unpin;
939d4b5cae4SRobert Watson 
940d4b5cae4SRobert Watson queue_fallback:
941d4b5cae4SRobert Watson 	error = netisr_queue_internal(proto, m, cpuid);
942d4b5cae4SRobert Watson out_unpin:
943d4b5cae4SRobert Watson 	sched_unpin();
944d4b5cae4SRobert Watson out_unlock:
945d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
946d4b5cae4SRobert Watson 	NETISR_RUNLOCK(&tracker);
947d4b5cae4SRobert Watson #endif
948d4b5cae4SRobert Watson 	return (error);
949d4b5cae4SRobert Watson }
950d4b5cae4SRobert Watson 
951d4b5cae4SRobert Watson int
952d4b5cae4SRobert Watson netisr_dispatch(u_int proto, struct mbuf *m)
953d4b5cae4SRobert Watson {
954d4b5cae4SRobert Watson 
955d4b5cae4SRobert Watson 	return (netisr_dispatch_src(proto, 0, m));
956d4b5cae4SRobert Watson }
957d4b5cae4SRobert Watson 
958d4b5cae4SRobert Watson #ifdef DEVICE_POLLING
959d4b5cae4SRobert Watson /*
960d4b5cae4SRobert Watson  * Kernel polling borrows a netisr thread to run interface polling in; this
961d4b5cae4SRobert Watson  * function allows kernel polling to request that the netisr thread be
962d4b5cae4SRobert Watson  * scheduled even if no packets are pending for protocols.
963d4b5cae4SRobert Watson  */
964d4b5cae4SRobert Watson void
965d4b5cae4SRobert Watson netisr_sched_poll(void)
966d4b5cae4SRobert Watson {
967d4b5cae4SRobert Watson 	struct netisr_workstream *nwsp;
968d4b5cae4SRobert Watson 
96953402767SRobert Watson 	nwsp = DPCPU_ID_PTR(nws_array[0], nws);
970d4b5cae4SRobert Watson 	NWS_SIGNAL(nwsp);
971d4b5cae4SRobert Watson }
9721cafed39SJonathan Lemon #endif
973e3b6e33cSJake Burkholder 
974d4b5cae4SRobert Watson static void
975d4b5cae4SRobert Watson netisr_start_swi(u_int cpuid, struct pcpu *pc)
976d4b5cae4SRobert Watson {
977d4b5cae4SRobert Watson 	char swiname[12];
978d4b5cae4SRobert Watson 	struct netisr_workstream *nwsp;
979d4b5cae4SRobert Watson 	int error;
980d4b5cae4SRobert Watson 
98153402767SRobert Watson 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
98253402767SRobert Watson 
98353402767SRobert Watson 	nwsp = DPCPU_ID_PTR(cpuid, nws);
984d4b5cae4SRobert Watson 	mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF);
985d4b5cae4SRobert Watson 	nwsp->nws_cpu = cpuid;
986d4b5cae4SRobert Watson 	snprintf(swiname, sizeof(swiname), "netisr %u", cpuid);
987d4b5cae4SRobert Watson 	error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp,
988d4b5cae4SRobert Watson 	    SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie);
989d4b5cae4SRobert Watson 	if (error)
990d4b5cae4SRobert Watson 		panic("%s: swi_add %d", __func__, error);
991d4b5cae4SRobert Watson 	pc->pc_netisr = nwsp->nws_intr_event;
992d4b5cae4SRobert Watson 	if (netisr_bindthreads) {
993d4b5cae4SRobert Watson 		error = intr_event_bind(nwsp->nws_intr_event, cpuid);
994d4b5cae4SRobert Watson 		if (error != 0)
995d4b5cae4SRobert Watson 			printf("%s: cpu %u: intr_event_bind: %d", __func__,
996d4b5cae4SRobert Watson 			    cpuid, error);
997e3b6e33cSJake Burkholder 	}
998d4b5cae4SRobert Watson 	NETISR_WLOCK();
999d4b5cae4SRobert Watson 	nws_array[nws_count] = nwsp->nws_cpu;
1000d4b5cae4SRobert Watson 	nws_count++;
1001d4b5cae4SRobert Watson 	NETISR_WUNLOCK();
1002e3b6e33cSJake Burkholder }
1003e3b6e33cSJake Burkholder 
1004d4b5cae4SRobert Watson /*
1005d4b5cae4SRobert Watson  * Initialize the netisr subsystem.  We rely on BSS and static initialization
1006d4b5cae4SRobert Watson  * of most fields in global data structures.
1007d4b5cae4SRobert Watson  *
1008d4b5cae4SRobert Watson  * Start a worker thread for the boot CPU so that we can support network
1009d4b5cae4SRobert Watson  * traffic immediately in case the network stack is used before additional
1010d4b5cae4SRobert Watson  * CPUs are started (for example, diskless boot).
1011d4b5cae4SRobert Watson  */
1012e3b6e33cSJake Burkholder static void
1013d4b5cae4SRobert Watson netisr_init(void *arg)
1014e3b6e33cSJake Burkholder {
1015e3b6e33cSJake Burkholder 
1016d4b5cae4SRobert Watson 	KASSERT(curcpu == 0, ("%s: not on CPU 0", __func__));
1017d4b5cae4SRobert Watson 
1018d4b5cae4SRobert Watson 	NETISR_LOCK_INIT();
10199e6e01ebSRobert Watson 	if (netisr_maxthreads < 1)
1020d4b5cae4SRobert Watson 		netisr_maxthreads = 1;
10219e6e01ebSRobert Watson 	if (netisr_maxthreads > mp_ncpus) {
1022912f6323SRobert Watson 		printf("netisr_init: forcing maxthreads from %d to %d\n",
10239e6e01ebSRobert Watson 		    netisr_maxthreads, mp_ncpus);
10249e6e01ebSRobert Watson 		netisr_maxthreads = mp_ncpus;
1025ed54411cSRobert Watson 	}
1026ed54411cSRobert Watson 	if (netisr_defaultqlimit > netisr_maxqlimit) {
1027912f6323SRobert Watson 		printf("netisr_init: forcing defaultqlimit from %d to %d\n",
10289e6e01ebSRobert Watson 		    netisr_defaultqlimit, netisr_maxqlimit);
1029d4b5cae4SRobert Watson 		netisr_defaultqlimit = netisr_maxqlimit;
1030ed54411cSRobert Watson 	}
1031d4b5cae4SRobert Watson #ifdef DEVICE_POLLING
1032d4b5cae4SRobert Watson 	/*
1033d4b5cae4SRobert Watson 	 * The device polling code is not yet aware of how to deal with
1034d4b5cae4SRobert Watson 	 * multiple netisr threads, so for the time being compiling in device
1035d4b5cae4SRobert Watson 	 * polling disables parallel netisr workers.
1036d4b5cae4SRobert Watson 	 */
1037ed54411cSRobert Watson 	if (netisr_maxthreads != 1 || netisr_bindthreads != 0) {
1038912f6323SRobert Watson 		printf("netisr_init: forcing maxthreads to 1 and "
1039912f6323SRobert Watson 		    "bindthreads to 0 for device polling\n");
1040d4b5cae4SRobert Watson 		netisr_maxthreads = 1;
1041d4b5cae4SRobert Watson 		netisr_bindthreads = 0;
1042ed54411cSRobert Watson 	}
1043d4b5cae4SRobert Watson #endif
1044d4b5cae4SRobert Watson 
1045d4b5cae4SRobert Watson 	netisr_start_swi(curcpu, pcpu_find(curcpu));
1046e3b6e33cSJake Burkholder }
1047d4b5cae4SRobert Watson SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL);
1048d4b5cae4SRobert Watson 
1049d4b5cae4SRobert Watson /*
1050d4b5cae4SRobert Watson  * Start worker threads for additional CPUs.  No attempt to gracefully handle
1051d4b5cae4SRobert Watson  * work reassignment, we don't yet support dynamic reconfiguration.
1052d4b5cae4SRobert Watson  */
1053d4b5cae4SRobert Watson static void
1054d4b5cae4SRobert Watson netisr_start(void *arg)
1055d4b5cae4SRobert Watson {
1056d4b5cae4SRobert Watson 	struct pcpu *pc;
1057d4b5cae4SRobert Watson 
1058d4b5cae4SRobert Watson 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
1059d4b5cae4SRobert Watson 		if (nws_count >= netisr_maxthreads)
1060d4b5cae4SRobert Watson 			break;
1061d4b5cae4SRobert Watson 		/* XXXRW: Is skipping absent CPUs still required here? */
1062d4b5cae4SRobert Watson 		if (CPU_ABSENT(pc->pc_cpuid))
1063d4b5cae4SRobert Watson 			continue;
1064d4b5cae4SRobert Watson 		/* Worker will already be present for boot CPU. */
1065d4b5cae4SRobert Watson 		if (pc->pc_netisr != NULL)
1066d4b5cae4SRobert Watson 			continue;
1067d4b5cae4SRobert Watson 		netisr_start_swi(pc->pc_cpuid, pc);
1068d4b5cae4SRobert Watson 	}
1069d4b5cae4SRobert Watson }
1070d4b5cae4SRobert Watson SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL);
1071d4b5cae4SRobert Watson 
10722d22f334SRobert Watson /*
10732d22f334SRobert Watson  * Sysctl monitoring for netisr: query a list of registered protocols.
10742d22f334SRobert Watson  */
10752d22f334SRobert Watson static int
10762d22f334SRobert Watson sysctl_netisr_proto(SYSCTL_HANDLER_ARGS)
10772d22f334SRobert Watson {
10782d22f334SRobert Watson 	struct rm_priotracker tracker;
10792d22f334SRobert Watson 	struct sysctl_netisr_proto *snpp, *snp_array;
10802d22f334SRobert Watson 	struct netisr_proto *npp;
10812d22f334SRobert Watson 	u_int counter, proto;
10822d22f334SRobert Watson 	int error;
10832d22f334SRobert Watson 
10842d22f334SRobert Watson 	if (req->newptr != NULL)
10852d22f334SRobert Watson 		return (EINVAL);
10862d22f334SRobert Watson 	snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP,
10872d22f334SRobert Watson 	    M_ZERO | M_WAITOK);
10882d22f334SRobert Watson 	counter = 0;
10892d22f334SRobert Watson 	NETISR_RLOCK(&tracker);
10902d22f334SRobert Watson 	for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1091938448cdSRobert Watson 		npp = &netisr_proto[proto];
10922d22f334SRobert Watson 		if (npp->np_name == NULL)
10932d22f334SRobert Watson 			continue;
10942d22f334SRobert Watson 		snpp = &snp_array[counter];
10952d22f334SRobert Watson 		snpp->snp_version = sizeof(*snpp);
10962d22f334SRobert Watson 		strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN);
10972d22f334SRobert Watson 		snpp->snp_proto = proto;
10982d22f334SRobert Watson 		snpp->snp_qlimit = npp->np_qlimit;
10992d22f334SRobert Watson 		snpp->snp_policy = npp->np_policy;
11002d22f334SRobert Watson 		if (npp->np_m2flow != NULL)
11012d22f334SRobert Watson 			snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW;
11022d22f334SRobert Watson 		if (npp->np_m2cpuid != NULL)
11032d22f334SRobert Watson 			snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID;
11042d22f334SRobert Watson 		if (npp->np_drainedcpu != NULL)
11052d22f334SRobert Watson 			snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU;
11062d22f334SRobert Watson 		counter++;
11072d22f334SRobert Watson 	}
11082d22f334SRobert Watson 	NETISR_RUNLOCK(&tracker);
11097f450febSRobert Watson 	KASSERT(counter <= NETISR_MAXPROT,
11102d22f334SRobert Watson 	    ("sysctl_netisr_proto: counter too big (%d)", counter));
11112d22f334SRobert Watson 	error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter);
11122d22f334SRobert Watson 	free(snp_array, M_TEMP);
11132d22f334SRobert Watson 	return (error);
11142d22f334SRobert Watson }
11152d22f334SRobert Watson 
11162d22f334SRobert Watson SYSCTL_PROC(_net_isr, OID_AUTO, proto,
11172d22f334SRobert Watson     CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto,
11182d22f334SRobert Watson     "S,sysctl_netisr_proto",
11192d22f334SRobert Watson     "Return list of protocols registered with netisr");
11202d22f334SRobert Watson 
11212d22f334SRobert Watson /*
11222d22f334SRobert Watson  * Sysctl monitoring for netisr: query a list of workstreams.
11232d22f334SRobert Watson  */
11242d22f334SRobert Watson static int
11252d22f334SRobert Watson sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS)
11262d22f334SRobert Watson {
11272d22f334SRobert Watson 	struct rm_priotracker tracker;
11282d22f334SRobert Watson 	struct sysctl_netisr_workstream *snwsp, *snws_array;
11292d22f334SRobert Watson 	struct netisr_workstream *nwsp;
11302d22f334SRobert Watson 	u_int counter, cpuid;
11312d22f334SRobert Watson 	int error;
11322d22f334SRobert Watson 
11332d22f334SRobert Watson 	if (req->newptr != NULL)
11342d22f334SRobert Watson 		return (EINVAL);
11352d22f334SRobert Watson 	snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP,
11362d22f334SRobert Watson 	    M_ZERO | M_WAITOK);
11372d22f334SRobert Watson 	counter = 0;
11382d22f334SRobert Watson 	NETISR_RLOCK(&tracker);
11392d22f334SRobert Watson 	for (cpuid = 0; cpuid < MAXCPU; cpuid++) {
11402d22f334SRobert Watson 		if (CPU_ABSENT(cpuid))
11412d22f334SRobert Watson 			continue;
11422d22f334SRobert Watson 		nwsp = DPCPU_ID_PTR(cpuid, nws);
11432d22f334SRobert Watson 		if (nwsp->nws_intr_event == NULL)
11442d22f334SRobert Watson 			continue;
11452d22f334SRobert Watson 		NWS_LOCK(nwsp);
11462d22f334SRobert Watson 		snwsp = &snws_array[counter];
11472d22f334SRobert Watson 		snwsp->snws_version = sizeof(*snwsp);
11482d22f334SRobert Watson 
11492d22f334SRobert Watson 		/*
11502d22f334SRobert Watson 		 * For now, we equate workstream IDs and CPU IDs in the
11512d22f334SRobert Watson 		 * kernel, but expose them independently to userspace in case
11522d22f334SRobert Watson 		 * that assumption changes in the future.
11532d22f334SRobert Watson 		 */
11542d22f334SRobert Watson 		snwsp->snws_wsid = cpuid;
11552d22f334SRobert Watson 		snwsp->snws_cpu = cpuid;
11562d22f334SRobert Watson 		if (nwsp->nws_intr_event != NULL)
11572d22f334SRobert Watson 			snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR;
11582d22f334SRobert Watson 		NWS_UNLOCK(nwsp);
11592d22f334SRobert Watson 		counter++;
11602d22f334SRobert Watson 	}
11612d22f334SRobert Watson 	NETISR_RUNLOCK(&tracker);
11627f450febSRobert Watson 	KASSERT(counter <= MAXCPU,
11632d22f334SRobert Watson 	    ("sysctl_netisr_workstream: counter too big (%d)", counter));
11642d22f334SRobert Watson 	error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter);
11652d22f334SRobert Watson 	free(snws_array, M_TEMP);
11662d22f334SRobert Watson 	return (error);
11672d22f334SRobert Watson }
11682d22f334SRobert Watson 
11692d22f334SRobert Watson SYSCTL_PROC(_net_isr, OID_AUTO, workstream,
11702d22f334SRobert Watson     CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream,
11712d22f334SRobert Watson     "S,sysctl_netisr_workstream",
11722d22f334SRobert Watson     "Return list of workstreams implemented by netisr");
11732d22f334SRobert Watson 
11742d22f334SRobert Watson /*
11752d22f334SRobert Watson  * Sysctl monitoring for netisr: query per-protocol data across all
11762d22f334SRobert Watson  * workstreams.
11772d22f334SRobert Watson  */
11782d22f334SRobert Watson static int
11792d22f334SRobert Watson sysctl_netisr_work(SYSCTL_HANDLER_ARGS)
11802d22f334SRobert Watson {
11812d22f334SRobert Watson 	struct rm_priotracker tracker;
11822d22f334SRobert Watson 	struct sysctl_netisr_work *snwp, *snw_array;
11832d22f334SRobert Watson 	struct netisr_workstream *nwsp;
11842d22f334SRobert Watson 	struct netisr_proto *npp;
11852d22f334SRobert Watson 	struct netisr_work *nwp;
11862d22f334SRobert Watson 	u_int counter, cpuid, proto;
11872d22f334SRobert Watson 	int error;
11882d22f334SRobert Watson 
11892d22f334SRobert Watson 	if (req->newptr != NULL)
11902d22f334SRobert Watson 		return (EINVAL);
11912d22f334SRobert Watson 	snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT,
11922d22f334SRobert Watson 	    M_TEMP, M_ZERO | M_WAITOK);
11932d22f334SRobert Watson 	counter = 0;
11942d22f334SRobert Watson 	NETISR_RLOCK(&tracker);
11952d22f334SRobert Watson 	for (cpuid = 0; cpuid < MAXCPU; cpuid++) {
11962d22f334SRobert Watson 		if (CPU_ABSENT(cpuid))
11972d22f334SRobert Watson 			continue;
11982d22f334SRobert Watson 		nwsp = DPCPU_ID_PTR(cpuid, nws);
11992d22f334SRobert Watson 		if (nwsp->nws_intr_event == NULL)
12002d22f334SRobert Watson 			continue;
12012d22f334SRobert Watson 		NWS_LOCK(nwsp);
12022d22f334SRobert Watson 		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1203938448cdSRobert Watson 			npp = &netisr_proto[proto];
12042d22f334SRobert Watson 			if (npp->np_name == NULL)
12052d22f334SRobert Watson 				continue;
12062d22f334SRobert Watson 			nwp = &nwsp->nws_work[proto];
12072d22f334SRobert Watson 			snwp = &snw_array[counter];
12082d22f334SRobert Watson 			snwp->snw_version = sizeof(*snwp);
12092d22f334SRobert Watson 			snwp->snw_wsid = cpuid;		/* See comment above. */
12102d22f334SRobert Watson 			snwp->snw_proto = proto;
12112d22f334SRobert Watson 			snwp->snw_len = nwp->nw_len;
12122d22f334SRobert Watson 			snwp->snw_watermark = nwp->nw_watermark;
12132d22f334SRobert Watson 			snwp->snw_dispatched = nwp->nw_dispatched;
12142d22f334SRobert Watson 			snwp->snw_hybrid_dispatched =
12152d22f334SRobert Watson 			    nwp->nw_hybrid_dispatched;
12162d22f334SRobert Watson 			snwp->snw_qdrops = nwp->nw_qdrops;
12172d22f334SRobert Watson 			snwp->snw_queued = nwp->nw_queued;
12182d22f334SRobert Watson 			snwp->snw_handled = nwp->nw_handled;
12192d22f334SRobert Watson 			counter++;
12202d22f334SRobert Watson 		}
12212d22f334SRobert Watson 		NWS_UNLOCK(nwsp);
12222d22f334SRobert Watson 	}
12237f450febSRobert Watson 	KASSERT(counter <= MAXCPU * NETISR_MAXPROT,
12242d22f334SRobert Watson 	    ("sysctl_netisr_work: counter too big (%d)", counter));
12252d22f334SRobert Watson 	NETISR_RUNLOCK(&tracker);
12262d22f334SRobert Watson 	error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter);
12272d22f334SRobert Watson 	free(snw_array, M_TEMP);
12282d22f334SRobert Watson 	return (error);
12292d22f334SRobert Watson }
12302d22f334SRobert Watson 
12312d22f334SRobert Watson SYSCTL_PROC(_net_isr, OID_AUTO, work,
12322d22f334SRobert Watson     CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work,
12332d22f334SRobert Watson     "S,sysctl_netisr_work",
12342d22f334SRobert Watson     "Return list of per-workstream, per-protocol work in netisr");
12352d22f334SRobert Watson 
1236d4b5cae4SRobert Watson #ifdef DDB
1237d4b5cae4SRobert Watson DB_SHOW_COMMAND(netisr, db_show_netisr)
1238d4b5cae4SRobert Watson {
1239d4b5cae4SRobert Watson 	struct netisr_workstream *nwsp;
1240d4b5cae4SRobert Watson 	struct netisr_work *nwp;
1241d4b5cae4SRobert Watson 	int first, proto;
124253402767SRobert Watson 	u_int cpuid;
1243d4b5cae4SRobert Watson 
1244d4b5cae4SRobert Watson 	db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto",
1245d4b5cae4SRobert Watson 	    "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue");
12469e6e01ebSRobert Watson 	for (cpuid = 0; cpuid <= mp_maxid; cpuid++) {
124753402767SRobert Watson 		if (CPU_ABSENT(cpuid))
124853402767SRobert Watson 			continue;
124953402767SRobert Watson 		nwsp = DPCPU_ID_PTR(cpuid, nws);
1250d4b5cae4SRobert Watson 		if (nwsp->nws_intr_event == NULL)
1251d4b5cae4SRobert Watson 			continue;
1252d4b5cae4SRobert Watson 		first = 1;
1253d4b5cae4SRobert Watson 		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1254938448cdSRobert Watson 			if (netisr_proto[proto].np_handler == NULL)
1255d4b5cae4SRobert Watson 				continue;
1256d4b5cae4SRobert Watson 			nwp = &nwsp->nws_work[proto];
1257d4b5cae4SRobert Watson 			if (first) {
125853402767SRobert Watson 				db_printf("%3d ", cpuid);
1259d4b5cae4SRobert Watson 				first = 0;
1260d4b5cae4SRobert Watson 			} else
1261d4b5cae4SRobert Watson 				db_printf("%3s ", "");
1262d4b5cae4SRobert Watson 			db_printf(
1263d4b5cae4SRobert Watson 			    "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n",
1264938448cdSRobert Watson 			    netisr_proto[proto].np_name, nwp->nw_len,
1265d4b5cae4SRobert Watson 			    nwp->nw_watermark, nwp->nw_qlimit,
1266d4b5cae4SRobert Watson 			    nwp->nw_dispatched, nwp->nw_hybrid_dispatched,
1267d4b5cae4SRobert Watson 			    nwp->nw_qdrops, nwp->nw_queued);
1268d4b5cae4SRobert Watson 		}
1269d4b5cae4SRobert Watson 	}
1270d4b5cae4SRobert Watson }
1271d4b5cae4SRobert Watson #endif
1272