xref: /freebsd/sys/net/netisr.c (revision 784949026c45368ca3795c9e67fe17186b3183cb)
11cafed39SJonathan Lemon /*-
2d4b5cae4SRobert Watson  * Copyright (c) 2007-2009 Robert N. M. Watson
3e3b6e33cSJake Burkholder  * All rights reserved.
4e3b6e33cSJake Burkholder  *
5e3b6e33cSJake Burkholder  * Redistribution and use in source and binary forms, with or without
6e3b6e33cSJake Burkholder  * modification, are permitted provided that the following conditions
7e3b6e33cSJake Burkholder  * are met:
8e3b6e33cSJake Burkholder  * 1. Redistributions of source code must retain the above copyright
91cafed39SJonathan Lemon  *    notice, this list of conditions and the following disclaimer.
10e3b6e33cSJake Burkholder  * 2. Redistributions in binary form must reproduce the above copyright
11e3b6e33cSJake Burkholder  *    notice, this list of conditions and the following disclaimer in the
12e3b6e33cSJake Burkholder  *    documentation and/or other materials provided with the distribution.
13e3b6e33cSJake Burkholder  *
141cafed39SJonathan Lemon  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
151cafed39SJonathan Lemon  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
161cafed39SJonathan Lemon  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
171cafed39SJonathan Lemon  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
181cafed39SJonathan Lemon  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
191cafed39SJonathan Lemon  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
201cafed39SJonathan Lemon  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
211cafed39SJonathan Lemon  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
221cafed39SJonathan Lemon  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
231cafed39SJonathan Lemon  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
241cafed39SJonathan Lemon  * SUCH DAMAGE.
25e3b6e33cSJake Burkholder  */
26e3b6e33cSJake Burkholder 
27d4b5cae4SRobert Watson #include <sys/cdefs.h>
28d4b5cae4SRobert Watson __FBSDID("$FreeBSD$");
29d4b5cae4SRobert Watson 
30d4b5cae4SRobert Watson /*
31d4b5cae4SRobert Watson  * netisr is a packet dispatch service, allowing synchronous (directly
32d4b5cae4SRobert Watson  * dispatched) and asynchronous (deferred dispatch) processing of packets by
33d4b5cae4SRobert Watson  * registered protocol handlers.  Callers pass a protocol identifier and
34d4b5cae4SRobert Watson  * packet to netisr, along with a direct dispatch hint, and work will either
350a32e29fSRobert Watson  * be immediately processed by the registered handler, or passed to a
360a32e29fSRobert Watson  * software interrupt (SWI) thread for deferred dispatch.  Callers will
370a32e29fSRobert Watson  * generally select one or the other based on:
38d4b5cae4SRobert Watson  *
390a32e29fSRobert Watson  * - Whether directly dispatching a netisr handler lead to code reentrance or
40d4b5cae4SRobert Watson  *   lock recursion, such as entering the socket code from the socket code.
410a32e29fSRobert Watson  * - Whether directly dispatching a netisr handler lead to recursive
42d4b5cae4SRobert Watson  *   processing, such as when decapsulating several wrapped layers of tunnel
43d4b5cae4SRobert Watson  *   information (IPSEC within IPSEC within ...).
44d4b5cae4SRobert Watson  *
45d4b5cae4SRobert Watson  * Maintaining ordering for protocol streams is a critical design concern.
46d4b5cae4SRobert Watson  * Enforcing ordering limits the opportunity for concurrency, but maintains
47d4b5cae4SRobert Watson  * the strong ordering requirements found in some protocols, such as TCP.  Of
48d4b5cae4SRobert Watson  * related concern is CPU affinity--it is desirable to process all data
49d4b5cae4SRobert Watson  * associated with a particular stream on the same CPU over time in order to
50d4b5cae4SRobert Watson  * avoid acquiring locks associated with the connection on different CPUs,
51d4b5cae4SRobert Watson  * keep connection data in one cache, and to generally encourage associated
52d4b5cae4SRobert Watson  * user threads to live on the same CPU as the stream.  It's also desirable
53d4b5cae4SRobert Watson  * to avoid lock migration and contention where locks are associated with
54d4b5cae4SRobert Watson  * more than one flow.
55d4b5cae4SRobert Watson  *
56d4b5cae4SRobert Watson  * netisr supports several policy variations, represented by the
570a32e29fSRobert Watson  * NETISR_POLICY_* constants, allowing protocols to play various roles in
58d4b5cae4SRobert Watson  * identifying flows, assigning work to CPUs, etc.  These are described in
590a32e29fSRobert Watson  * netisr.h.
60d4b5cae4SRobert Watson  */
61d4b5cae4SRobert Watson 
62d4b5cae4SRobert Watson #include "opt_ddb.h"
63f0796cd2SGleb Smirnoff #include "opt_device_polling.h"
641d8cd39eSRobert Watson 
65e3b6e33cSJake Burkholder #include <sys/param.h>
66e3b6e33cSJake Burkholder #include <sys/bus.h>
67e3b6e33cSJake Burkholder #include <sys/kernel.h>
681cafed39SJonathan Lemon #include <sys/kthread.h>
69d4b5cae4SRobert Watson #include <sys/interrupt.h>
701cafed39SJonathan Lemon #include <sys/lock.h>
711cafed39SJonathan Lemon #include <sys/mbuf.h>
72d4b5cae4SRobert Watson #include <sys/mutex.h>
7353402767SRobert Watson #include <sys/pcpu.h>
74d4b5cae4SRobert Watson #include <sys/proc.h>
75d4b5cae4SRobert Watson #include <sys/rmlock.h>
76d4b5cae4SRobert Watson #include <sys/sched.h>
77d4b5cae4SRobert Watson #include <sys/smp.h>
781cafed39SJonathan Lemon #include <sys/socket.h>
79d4b5cae4SRobert Watson #include <sys/sysctl.h>
80d4b5cae4SRobert Watson #include <sys/systm.h>
81d4b5cae4SRobert Watson 
82d4b5cae4SRobert Watson #ifdef DDB
83d4b5cae4SRobert Watson #include <ddb/ddb.h>
84d4b5cae4SRobert Watson #endif
851cafed39SJonathan Lemon 
861cafed39SJonathan Lemon #include <net/if.h>
871cafed39SJonathan Lemon #include <net/if_var.h>
88e3b6e33cSJake Burkholder #include <net/netisr.h>
89530c0060SRobert Watson #include <net/vnet.h>
90e3b6e33cSJake Burkholder 
91d4b5cae4SRobert Watson /*-
92d4b5cae4SRobert Watson  * Synchronize use and modification of the registered netisr data structures;
93d4b5cae4SRobert Watson  * acquire a read lock while modifying the set of registered protocols to
94d4b5cae4SRobert Watson  * prevent partially registered or unregistered protocols from being run.
95d4b5cae4SRobert Watson  *
96d4b5cae4SRobert Watson  * The following data structures and fields are protected by this lock:
97d4b5cae4SRobert Watson  *
98d4b5cae4SRobert Watson  * - The np array, including all fields of struct netisr_proto.
99d4b5cae4SRobert Watson  * - The nws array, including all fields of struct netisr_worker.
100d4b5cae4SRobert Watson  * - The nws_array array.
101d4b5cae4SRobert Watson  *
102d4b5cae4SRobert Watson  * Note: the NETISR_LOCKING define controls whether read locks are acquired
103d4b5cae4SRobert Watson  * in packet processing paths requiring netisr registration stability.  This
1040a32e29fSRobert Watson  * is disabled by default as it can lead to measurable performance
105d4b5cae4SRobert Watson  * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and
106d4b5cae4SRobert Watson  * because netisr registration and unregistration is extremely rare at
107d4b5cae4SRobert Watson  * runtime.  If it becomes more common, this decision should be revisited.
108d4b5cae4SRobert Watson  *
109d4b5cae4SRobert Watson  * XXXRW: rmlocks don't support assertions.
110d4b5cae4SRobert Watson  */
111d4b5cae4SRobert Watson static struct rmlock	netisr_rmlock;
112d4b5cae4SRobert Watson #define	NETISR_LOCK_INIT()	rm_init_flags(&netisr_rmlock, "netisr", \
113d4b5cae4SRobert Watson 				    RM_NOWITNESS)
114d4b5cae4SRobert Watson #define	NETISR_LOCK_ASSERT()
115d4b5cae4SRobert Watson #define	NETISR_RLOCK(tracker)	rm_rlock(&netisr_rmlock, (tracker))
116d4b5cae4SRobert Watson #define	NETISR_RUNLOCK(tracker)	rm_runlock(&netisr_rmlock, (tracker))
117d4b5cae4SRobert Watson #define	NETISR_WLOCK()		rm_wlock(&netisr_rmlock)
118d4b5cae4SRobert Watson #define	NETISR_WUNLOCK()	rm_wunlock(&netisr_rmlock)
119d4b5cae4SRobert Watson /* #define	NETISR_LOCKING */
120e3b6e33cSJake Burkholder 
121d4b5cae4SRobert Watson SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr");
1221cafed39SJonathan Lemon 
123d4b5cae4SRobert Watson /*-
124d4b5cae4SRobert Watson  * Three direct dispatch policies are supported:
125d4b5cae4SRobert Watson  *
126d4b5cae4SRobert Watson  * - Always defer: all work is scheduled for a netisr, regardless of context.
127d4b5cae4SRobert Watson  *   (!direct)
128d4b5cae4SRobert Watson  *
129d4b5cae4SRobert Watson  * - Hybrid: if the executing context allows direct dispatch, and we're
130d4b5cae4SRobert Watson  *   running on the CPU the work would be done on, then direct dispatch if it
131d4b5cae4SRobert Watson  *   wouldn't violate ordering constraints on the workstream.
132d4b5cae4SRobert Watson  *   (direct && !direct_force)
133d4b5cae4SRobert Watson  *
134d4b5cae4SRobert Watson  * - Always direct: if the executing context allows direct dispatch, always
135d4b5cae4SRobert Watson  *   direct dispatch.  (direct && direct_force)
136d4b5cae4SRobert Watson  *
137d4b5cae4SRobert Watson  * Notice that changing the global policy could lead to short periods of
138d4b5cae4SRobert Watson  * misordered processing, but this is considered acceptable as compared to
139d4b5cae4SRobert Watson  * the complexity of enforcing ordering during policy changes.
140d4b5cae4SRobert Watson  */
141d4b5cae4SRobert Watson static int	netisr_direct_force = 1;	/* Always direct dispatch. */
142d4b5cae4SRobert Watson TUNABLE_INT("net.isr.direct_force", &netisr_direct_force);
143d4b5cae4SRobert Watson SYSCTL_INT(_net_isr, OID_AUTO, direct_force, CTLFLAG_RW,
144d4b5cae4SRobert Watson     &netisr_direct_force, 0, "Force direct dispatch");
145e3b6e33cSJake Burkholder 
146d4b5cae4SRobert Watson static int	netisr_direct = 1;	/* Enable direct dispatch. */
147cea2165bSRobert Watson TUNABLE_INT("net.isr.direct", &netisr_direct);
148d4b5cae4SRobert Watson SYSCTL_INT(_net_isr, OID_AUTO, direct, CTLFLAG_RW,
149d4b5cae4SRobert Watson     &netisr_direct, 0, "Enable direct dispatch");
1501cafed39SJonathan Lemon 
1511cafed39SJonathan Lemon /*
152d4b5cae4SRobert Watson  * Allow the administrator to limit the number of threads (CPUs) to use for
153d4b5cae4SRobert Watson  * netisr.  We don't check netisr_maxthreads before creating the thread for
154d4b5cae4SRobert Watson  * CPU 0, so in practice we ignore values <= 1.  This must be set at boot.
155d4b5cae4SRobert Watson  * We will create at most one thread per CPU.
1565fd04e38SRobert Watson  */
1579e6e01ebSRobert Watson static int	netisr_maxthreads = -1;		/* Max number of threads. */
158d4b5cae4SRobert Watson TUNABLE_INT("net.isr.maxthreads", &netisr_maxthreads);
15978494902SPawel Jakub Dawidek SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN,
160d4b5cae4SRobert Watson     &netisr_maxthreads, 0,
161d4b5cae4SRobert Watson     "Use at most this many CPUs for netisr processing");
1625fd04e38SRobert Watson 
163d4b5cae4SRobert Watson static int	netisr_bindthreads = 0;		/* Bind threads to CPUs. */
164d4b5cae4SRobert Watson TUNABLE_INT("net.isr.bindthreads", &netisr_bindthreads);
16578494902SPawel Jakub Dawidek SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN,
166d4b5cae4SRobert Watson     &netisr_bindthreads, 0, "Bind netisr threads to CPUs.");
167d4b5cae4SRobert Watson 
168d4b5cae4SRobert Watson /*
1690a32e29fSRobert Watson  * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit,
1700a32e29fSRobert Watson  * both for initial configuration and later modification using
1710a32e29fSRobert Watson  * netisr_setqlimit().
172d4b5cae4SRobert Watson  */
173d4b5cae4SRobert Watson #define	NETISR_DEFAULT_MAXQLIMIT	10240
174d4b5cae4SRobert Watson static u_int	netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT;
175d4b5cae4SRobert Watson TUNABLE_INT("net.isr.maxqlimit", &netisr_maxqlimit);
17678494902SPawel Jakub Dawidek SYSCTL_INT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN,
177d4b5cae4SRobert Watson     &netisr_maxqlimit, 0,
178d4b5cae4SRobert Watson     "Maximum netisr per-protocol, per-CPU queue depth.");
179d4b5cae4SRobert Watson 
180d4b5cae4SRobert Watson /*
1810a32e29fSRobert Watson  * The default per-workstream mbuf queue limit for protocols that don't
1820a32e29fSRobert Watson  * initialize the nh_qlimit field of their struct netisr_handler.  If this is
1830a32e29fSRobert Watson  * set above netisr_maxqlimit, we truncate it to the maximum during boot.
184d4b5cae4SRobert Watson  */
185d4b5cae4SRobert Watson #define	NETISR_DEFAULT_DEFAULTQLIMIT	256
186d4b5cae4SRobert Watson static u_int	netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT;
187d4b5cae4SRobert Watson TUNABLE_INT("net.isr.defaultqlimit", &netisr_defaultqlimit);
18878494902SPawel Jakub Dawidek SYSCTL_INT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN,
189d4b5cae4SRobert Watson     &netisr_defaultqlimit, 0,
190d4b5cae4SRobert Watson     "Default netisr per-protocol, per-CPU queue limit if not set by protocol");
191d4b5cae4SRobert Watson 
192d4b5cae4SRobert Watson /*
193d4b5cae4SRobert Watson  * Each protocol is described by a struct netisr_proto, which holds all
194d4b5cae4SRobert Watson  * global per-protocol information.  This data structure is set up by
195d4b5cae4SRobert Watson  * netisr_register(), and derived from the public struct netisr_handler.
196d4b5cae4SRobert Watson  */
197d4b5cae4SRobert Watson struct netisr_proto {
198d4b5cae4SRobert Watson 	const char	*np_name;	/* Character string protocol name. */
199d4b5cae4SRobert Watson 	netisr_handler_t *np_handler;	/* Protocol handler. */
200d4b5cae4SRobert Watson 	netisr_m2flow_t	*np_m2flow;	/* Query flow for untagged packet. */
201d4b5cae4SRobert Watson 	netisr_m2cpuid_t *np_m2cpuid;	/* Query CPU to process packet on. */
202ed655c8cSBjoern A. Zeeb 	netisr_drainedcpu_t *np_drainedcpu; /* Callback when drained a queue. */
203d4b5cae4SRobert Watson 	u_int		 np_qlimit;	/* Maximum per-CPU queue depth. */
204d4b5cae4SRobert Watson 	u_int		 np_policy;	/* Work placement policy. */
205d4b5cae4SRobert Watson };
206d4b5cae4SRobert Watson 
207ed54411cSRobert Watson #define	NETISR_MAXPROT		16		/* Compile-time limit. */
208d4b5cae4SRobert Watson 
209d4b5cae4SRobert Watson /*
210d4b5cae4SRobert Watson  * The np array describes all registered protocols, indexed by protocol
211d4b5cae4SRobert Watson  * number.
212d4b5cae4SRobert Watson  */
213d4b5cae4SRobert Watson static struct netisr_proto	np[NETISR_MAXPROT];
214d4b5cae4SRobert Watson 
215d4b5cae4SRobert Watson /*
216d4b5cae4SRobert Watson  * Protocol-specific work for each workstream is described by struct
217d4b5cae4SRobert Watson  * netisr_work.  Each work descriptor consists of an mbuf queue and
218d4b5cae4SRobert Watson  * statistics.
219d4b5cae4SRobert Watson  */
220d4b5cae4SRobert Watson struct netisr_work {
221d4b5cae4SRobert Watson 	/*
222d4b5cae4SRobert Watson 	 * Packet queue, linked by m_nextpkt.
223d4b5cae4SRobert Watson 	 */
224d4b5cae4SRobert Watson 	struct mbuf	*nw_head;
225d4b5cae4SRobert Watson 	struct mbuf	*nw_tail;
226d4b5cae4SRobert Watson 	u_int		 nw_len;
227d4b5cae4SRobert Watson 	u_int		 nw_qlimit;
228d4b5cae4SRobert Watson 	u_int		 nw_watermark;
229d4b5cae4SRobert Watson 
230d4b5cae4SRobert Watson 	/*
231d4b5cae4SRobert Watson 	 * Statistics -- written unlocked, but mostly from curcpu.
232d4b5cae4SRobert Watson 	 */
233d4b5cae4SRobert Watson 	u_int64_t	 nw_dispatched; /* Number of direct dispatches. */
234d4b5cae4SRobert Watson 	u_int64_t	 nw_hybrid_dispatched; /* "" hybrid dispatches. */
235d4b5cae4SRobert Watson 	u_int64_t	 nw_qdrops;	/* "" drops. */
236d4b5cae4SRobert Watson 	u_int64_t	 nw_queued;	/* "" enqueues. */
237d4b5cae4SRobert Watson 	u_int64_t	 nw_handled;	/* "" handled in worker. */
238d4b5cae4SRobert Watson };
239d4b5cae4SRobert Watson 
240d4b5cae4SRobert Watson /*
2410a32e29fSRobert Watson  * Workstreams hold a queue of ordered work across each protocol, and are
242d4b5cae4SRobert Watson  * described by netisr_workstream.  Each workstream is associated with a
243d4b5cae4SRobert Watson  * worker thread, which in turn is pinned to a CPU.  Work associated with a
244d4b5cae4SRobert Watson  * workstream can be processd in other threads during direct dispatch;
245d4b5cae4SRobert Watson  * concurrent processing is prevented by the NWS_RUNNING flag, which
2460a32e29fSRobert Watson  * indicates that a thread is already processing the work queue.  It is
2470a32e29fSRobert Watson  * important to prevent a directly dispatched packet from "skipping ahead" of
2480a32e29fSRobert Watson  * work already in the workstream queue.
249d4b5cae4SRobert Watson  */
250d4b5cae4SRobert Watson struct netisr_workstream {
251d4b5cae4SRobert Watson 	struct intr_event *nws_intr_event;	/* Handler for stream. */
252d4b5cae4SRobert Watson 	void		*nws_swi_cookie;	/* swi(9) cookie for stream. */
253d4b5cae4SRobert Watson 	struct mtx	 nws_mtx;		/* Synchronize work. */
254d4b5cae4SRobert Watson 	u_int		 nws_cpu;		/* CPU pinning. */
255d4b5cae4SRobert Watson 	u_int		 nws_flags;		/* Wakeup flags. */
256d4b5cae4SRobert Watson 	u_int		 nws_pendingbits;	/* Scheduled protocols. */
257d4b5cae4SRobert Watson 
258d4b5cae4SRobert Watson 	/*
259d4b5cae4SRobert Watson 	 * Each protocol has per-workstream data.
260d4b5cae4SRobert Watson 	 */
261d4b5cae4SRobert Watson 	struct netisr_work	nws_work[NETISR_MAXPROT];
262d4b5cae4SRobert Watson } __aligned(CACHE_LINE_SIZE);
263d4b5cae4SRobert Watson 
264d4b5cae4SRobert Watson /*
26553402767SRobert Watson  * Per-CPU workstream data.
266d4b5cae4SRobert Watson  */
26753402767SRobert Watson DPCPU_DEFINE(struct netisr_workstream, nws);
268d4b5cae4SRobert Watson 
269d4b5cae4SRobert Watson /*
270d4b5cae4SRobert Watson  * Map contiguous values between 0 and nws_count into CPU IDs appropriate for
27153402767SRobert Watson  * accessing workstreams.  This allows constructions of the form
27253402767SRobert Watson  * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws).
273d4b5cae4SRobert Watson  */
274d4b5cae4SRobert Watson static u_int				 nws_array[MAXCPU];
275d4b5cae4SRobert Watson 
276d4b5cae4SRobert Watson /*
277d4b5cae4SRobert Watson  * Number of registered workstreams.  Will be at most the number of running
278d4b5cae4SRobert Watson  * CPUs once fully started.
279d4b5cae4SRobert Watson  */
280d4b5cae4SRobert Watson static u_int				 nws_count;
281d4b5cae4SRobert Watson SYSCTL_INT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD,
282d4b5cae4SRobert Watson     &nws_count, 0, "Number of extant netisr threads.");
283d4b5cae4SRobert Watson 
284d4b5cae4SRobert Watson /*
285d4b5cae4SRobert Watson  * Per-workstream flags.
286d4b5cae4SRobert Watson  */
287d4b5cae4SRobert Watson #define	NWS_RUNNING	0x00000001	/* Currently running in a thread. */
288d4b5cae4SRobert Watson #define	NWS_DISPATCHING	0x00000002	/* Currently being direct-dispatched. */
289d4b5cae4SRobert Watson #define	NWS_SCHEDULED	0x00000004	/* Signal issued. */
290d4b5cae4SRobert Watson 
291d4b5cae4SRobert Watson /*
292d4b5cae4SRobert Watson  * Synchronization for each workstream: a mutex protects all mutable fields
293d4b5cae4SRobert Watson  * in each stream, including per-protocol state (mbuf queues).  The SWI is
294d4b5cae4SRobert Watson  * woken up if asynchronous dispatch is required.
295d4b5cae4SRobert Watson  */
296d4b5cae4SRobert Watson #define	NWS_LOCK(s)		mtx_lock(&(s)->nws_mtx)
297d4b5cae4SRobert Watson #define	NWS_LOCK_ASSERT(s)	mtx_assert(&(s)->nws_mtx, MA_OWNED)
298d4b5cae4SRobert Watson #define	NWS_UNLOCK(s)		mtx_unlock(&(s)->nws_mtx)
299d4b5cae4SRobert Watson #define	NWS_SIGNAL(s)		swi_sched((s)->nws_swi_cookie, 0)
300d4b5cae4SRobert Watson 
301d4b5cae4SRobert Watson /*
302d4b5cae4SRobert Watson  * Utility routines for protocols that implement their own mapping of flows
303d4b5cae4SRobert Watson  * to CPUs.
304d4b5cae4SRobert Watson  */
305d4b5cae4SRobert Watson u_int
306d4b5cae4SRobert Watson netisr_get_cpucount(void)
307d4b5cae4SRobert Watson {
308d4b5cae4SRobert Watson 
309d4b5cae4SRobert Watson 	return (nws_count);
3105fd04e38SRobert Watson }
311d4b5cae4SRobert Watson 
312d4b5cae4SRobert Watson u_int
313d4b5cae4SRobert Watson netisr_get_cpuid(u_int cpunumber)
314d4b5cae4SRobert Watson {
315d4b5cae4SRobert Watson 
316d4b5cae4SRobert Watson 	KASSERT(cpunumber < nws_count, ("%s: %u > %u", __func__, cpunumber,
317d4b5cae4SRobert Watson 	    nws_count));
318d4b5cae4SRobert Watson 
319d4b5cae4SRobert Watson 	return (nws_array[cpunumber]);
3205fd04e38SRobert Watson }
3215fd04e38SRobert Watson 
3225fd04e38SRobert Watson /*
3230a32e29fSRobert Watson  * The default implementation of flow -> CPU ID mapping.
324d4b5cae4SRobert Watson  *
325d4b5cae4SRobert Watson  * Non-static so that protocols can use it to map their own work to specific
326d4b5cae4SRobert Watson  * CPUs in a manner consistent to netisr for affinity purposes.
327d4b5cae4SRobert Watson  */
328d4b5cae4SRobert Watson u_int
329d4b5cae4SRobert Watson netisr_default_flow2cpu(u_int flowid)
330d4b5cae4SRobert Watson {
331d4b5cae4SRobert Watson 
332d4b5cae4SRobert Watson 	return (nws_array[flowid % nws_count]);
333d4b5cae4SRobert Watson }
334d4b5cae4SRobert Watson 
335d4b5cae4SRobert Watson /*
336d4b5cae4SRobert Watson  * Register a new netisr handler, which requires initializing per-protocol
337d4b5cae4SRobert Watson  * fields for each workstream.  All netisr work is briefly suspended while
338d4b5cae4SRobert Watson  * the protocol is installed.
3391cafed39SJonathan Lemon  */
3401cafed39SJonathan Lemon void
341d4b5cae4SRobert Watson netisr_register(const struct netisr_handler *nhp)
3421cafed39SJonathan Lemon {
343d4b5cae4SRobert Watson 	struct netisr_work *npwp;
344d4b5cae4SRobert Watson 	const char *name;
345d4b5cae4SRobert Watson 	u_int i, proto;
3461cafed39SJonathan Lemon 
347d4b5cae4SRobert Watson 	proto = nhp->nh_proto;
348d4b5cae4SRobert Watson 	name = nhp->nh_name;
34959dd72d0SRobert Watson 
3507902224cSSam Leffler 	/*
351d4b5cae4SRobert Watson 	 * Test that the requested registration is valid.
3527902224cSSam Leffler 	 */
353d4b5cae4SRobert Watson 	KASSERT(nhp->nh_name != NULL,
354d4b5cae4SRobert Watson 	    ("%s: nh_name NULL for %u", __func__, proto));
355d4b5cae4SRobert Watson 	KASSERT(nhp->nh_handler != NULL,
356d4b5cae4SRobert Watson 	    ("%s: nh_handler NULL for %s", __func__, name));
357d4b5cae4SRobert Watson 	KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE ||
358d4b5cae4SRobert Watson 	    nhp->nh_policy == NETISR_POLICY_FLOW ||
359d4b5cae4SRobert Watson 	    nhp->nh_policy == NETISR_POLICY_CPU,
360d4b5cae4SRobert Watson 	    ("%s: unsupported nh_policy %u for %s", __func__,
361d4b5cae4SRobert Watson 	    nhp->nh_policy, name));
362d4b5cae4SRobert Watson 	KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW ||
363d4b5cae4SRobert Watson 	    nhp->nh_m2flow == NULL,
364d4b5cae4SRobert Watson 	    ("%s: nh_policy != FLOW but m2flow defined for %s", __func__,
365d4b5cae4SRobert Watson 	    name));
366d4b5cae4SRobert Watson 	KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL,
367d4b5cae4SRobert Watson 	    ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__,
368d4b5cae4SRobert Watson 	    name));
369d4b5cae4SRobert Watson 	KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL,
370d4b5cae4SRobert Watson 	    ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__,
371d4b5cae4SRobert Watson 	    name));
372d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
373d4b5cae4SRobert Watson 	    ("%s(%u, %s): protocol too big", __func__, proto, name));
374d4b5cae4SRobert Watson 
375d4b5cae4SRobert Watson 	/*
376d4b5cae4SRobert Watson 	 * Test that no existing registration exists for this protocol.
377d4b5cae4SRobert Watson 	 */
378d4b5cae4SRobert Watson 	NETISR_WLOCK();
379d4b5cae4SRobert Watson 	KASSERT(np[proto].np_name == NULL,
380d4b5cae4SRobert Watson 	    ("%s(%u, %s): name present", __func__, proto, name));
381d4b5cae4SRobert Watson 	KASSERT(np[proto].np_handler == NULL,
382d4b5cae4SRobert Watson 	    ("%s(%u, %s): handler present", __func__, proto, name));
383d4b5cae4SRobert Watson 
384d4b5cae4SRobert Watson 	np[proto].np_name = name;
385d4b5cae4SRobert Watson 	np[proto].np_handler = nhp->nh_handler;
386d4b5cae4SRobert Watson 	np[proto].np_m2flow = nhp->nh_m2flow;
387d4b5cae4SRobert Watson 	np[proto].np_m2cpuid = nhp->nh_m2cpuid;
388ed655c8cSBjoern A. Zeeb 	np[proto].np_drainedcpu = nhp->nh_drainedcpu;
389d4b5cae4SRobert Watson 	if (nhp->nh_qlimit == 0)
390d4b5cae4SRobert Watson 		np[proto].np_qlimit = netisr_defaultqlimit;
391d4b5cae4SRobert Watson 	else if (nhp->nh_qlimit > netisr_maxqlimit) {
392d4b5cae4SRobert Watson 		printf("%s: %s requested queue limit %u capped to "
393d4b5cae4SRobert Watson 		    "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit,
394d4b5cae4SRobert Watson 		    netisr_maxqlimit);
395d4b5cae4SRobert Watson 		np[proto].np_qlimit = netisr_maxqlimit;
396d4b5cae4SRobert Watson 	} else
397d4b5cae4SRobert Watson 		np[proto].np_qlimit = nhp->nh_qlimit;
398d4b5cae4SRobert Watson 	np[proto].np_policy = nhp->nh_policy;
3999e6e01ebSRobert Watson 	for (i = 0; i <= mp_maxid; i++) {
40053402767SRobert Watson 		if (CPU_ABSENT(i))
40153402767SRobert Watson 			continue;
40253402767SRobert Watson 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
403d4b5cae4SRobert Watson 		bzero(npwp, sizeof(*npwp));
404d4b5cae4SRobert Watson 		npwp->nw_qlimit = np[proto].np_qlimit;
4051cafed39SJonathan Lemon 	}
406d4b5cae4SRobert Watson 	NETISR_WUNLOCK();
4071cafed39SJonathan Lemon }
4081cafed39SJonathan Lemon 
4091cafed39SJonathan Lemon /*
410d4b5cae4SRobert Watson  * Clear drop counters across all workstreams for a protocol.
411d4b5cae4SRobert Watson  */
412d4b5cae4SRobert Watson void
413d4b5cae4SRobert Watson netisr_clearqdrops(const struct netisr_handler *nhp)
414d4b5cae4SRobert Watson {
415d4b5cae4SRobert Watson 	struct netisr_work *npwp;
416d4b5cae4SRobert Watson #ifdef INVARIANTS
417d4b5cae4SRobert Watson 	const char *name;
418d4b5cae4SRobert Watson #endif
419d4b5cae4SRobert Watson 	u_int i, proto;
420d4b5cae4SRobert Watson 
421d4b5cae4SRobert Watson 	proto = nhp->nh_proto;
422d4b5cae4SRobert Watson #ifdef INVARIANTS
423d4b5cae4SRobert Watson 	name = nhp->nh_name;
424d4b5cae4SRobert Watson #endif
425d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
426d4b5cae4SRobert Watson 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
427d4b5cae4SRobert Watson 
428d4b5cae4SRobert Watson 	NETISR_WLOCK();
429d4b5cae4SRobert Watson 	KASSERT(np[proto].np_handler != NULL,
430d4b5cae4SRobert Watson 	    ("%s(%u): protocol not registered for %s", __func__, proto,
431d4b5cae4SRobert Watson 	    name));
432d4b5cae4SRobert Watson 
4339e6e01ebSRobert Watson 	for (i = 0; i <= mp_maxid; i++) {
43453402767SRobert Watson 		if (CPU_ABSENT(i))
43553402767SRobert Watson 			continue;
43653402767SRobert Watson 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
437d4b5cae4SRobert Watson 		npwp->nw_qdrops = 0;
438d4b5cae4SRobert Watson 	}
439d4b5cae4SRobert Watson 	NETISR_WUNLOCK();
440d4b5cae4SRobert Watson }
441d4b5cae4SRobert Watson 
442d4b5cae4SRobert Watson /*
4430a32e29fSRobert Watson  * Query current drop counters across all workstreams for a protocol.
444d4b5cae4SRobert Watson  */
445d4b5cae4SRobert Watson void
446d4b5cae4SRobert Watson netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp)
447d4b5cae4SRobert Watson {
448d4b5cae4SRobert Watson 	struct netisr_work *npwp;
449d4b5cae4SRobert Watson 	struct rm_priotracker tracker;
450d4b5cae4SRobert Watson #ifdef INVARIANTS
451d4b5cae4SRobert Watson 	const char *name;
452d4b5cae4SRobert Watson #endif
453d4b5cae4SRobert Watson 	u_int i, proto;
454d4b5cae4SRobert Watson 
455d4b5cae4SRobert Watson 	*qdropp = 0;
456d4b5cae4SRobert Watson 	proto = nhp->nh_proto;
457d4b5cae4SRobert Watson #ifdef INVARIANTS
458d4b5cae4SRobert Watson 	name = nhp->nh_name;
459d4b5cae4SRobert Watson #endif
460d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
461d4b5cae4SRobert Watson 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
462d4b5cae4SRobert Watson 
463d4b5cae4SRobert Watson 	NETISR_RLOCK(&tracker);
464d4b5cae4SRobert Watson 	KASSERT(np[proto].np_handler != NULL,
465d4b5cae4SRobert Watson 	    ("%s(%u): protocol not registered for %s", __func__, proto,
466d4b5cae4SRobert Watson 	    name));
467d4b5cae4SRobert Watson 
4689e6e01ebSRobert Watson 	for (i = 0; i <= mp_maxid; i++) {
46953402767SRobert Watson 		if (CPU_ABSENT(i))
47053402767SRobert Watson 			continue;
47153402767SRobert Watson 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
472d4b5cae4SRobert Watson 		*qdropp += npwp->nw_qdrops;
473d4b5cae4SRobert Watson 	}
474d4b5cae4SRobert Watson 	NETISR_RUNLOCK(&tracker);
475d4b5cae4SRobert Watson }
476d4b5cae4SRobert Watson 
477d4b5cae4SRobert Watson /*
4780a32e29fSRobert Watson  * Query current per-workstream queue limit for a protocol.
479d4b5cae4SRobert Watson  */
480d4b5cae4SRobert Watson void
481d4b5cae4SRobert Watson netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp)
482d4b5cae4SRobert Watson {
483d4b5cae4SRobert Watson 	struct rm_priotracker tracker;
484d4b5cae4SRobert Watson #ifdef INVARIANTS
485d4b5cae4SRobert Watson 	const char *name;
486d4b5cae4SRobert Watson #endif
487d4b5cae4SRobert Watson 	u_int proto;
488d4b5cae4SRobert Watson 
489d4b5cae4SRobert Watson 	proto = nhp->nh_proto;
490d4b5cae4SRobert Watson #ifdef INVARIANTS
491d4b5cae4SRobert Watson 	name = nhp->nh_name;
492d4b5cae4SRobert Watson #endif
493d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
494d4b5cae4SRobert Watson 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
495d4b5cae4SRobert Watson 
496d4b5cae4SRobert Watson 	NETISR_RLOCK(&tracker);
497d4b5cae4SRobert Watson 	KASSERT(np[proto].np_handler != NULL,
498d4b5cae4SRobert Watson 	    ("%s(%u): protocol not registered for %s", __func__, proto,
499d4b5cae4SRobert Watson 	    name));
500d4b5cae4SRobert Watson 	*qlimitp = np[proto].np_qlimit;
501d4b5cae4SRobert Watson 	NETISR_RUNLOCK(&tracker);
502d4b5cae4SRobert Watson }
503d4b5cae4SRobert Watson 
504d4b5cae4SRobert Watson /*
505d4b5cae4SRobert Watson  * Update the queue limit across per-workstream queues for a protocol.  We
506d4b5cae4SRobert Watson  * simply change the limits, and don't drain overflowed packets as they will
507d4b5cae4SRobert Watson  * (hopefully) take care of themselves shortly.
5081cafed39SJonathan Lemon  */
5091cafed39SJonathan Lemon int
510d4b5cae4SRobert Watson netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit)
5111cafed39SJonathan Lemon {
512d4b5cae4SRobert Watson 	struct netisr_work *npwp;
513d4b5cae4SRobert Watson #ifdef INVARIANTS
514d4b5cae4SRobert Watson 	const char *name;
515d4b5cae4SRobert Watson #endif
516d4b5cae4SRobert Watson 	u_int i, proto;
5171cafed39SJonathan Lemon 
518d4b5cae4SRobert Watson 	if (qlimit > netisr_maxqlimit)
519d4b5cae4SRobert Watson 		return (EINVAL);
520d4b5cae4SRobert Watson 
521d4b5cae4SRobert Watson 	proto = nhp->nh_proto;
522d4b5cae4SRobert Watson #ifdef INVARIANTS
523d4b5cae4SRobert Watson 	name = nhp->nh_name;
524d4b5cae4SRobert Watson #endif
525d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
526d4b5cae4SRobert Watson 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
527d4b5cae4SRobert Watson 
528d4b5cae4SRobert Watson 	NETISR_WLOCK();
529d4b5cae4SRobert Watson 	KASSERT(np[proto].np_handler != NULL,
530d4b5cae4SRobert Watson 	    ("%s(%u): protocol not registered for %s", __func__, proto,
531d4b5cae4SRobert Watson 	    name));
532d4b5cae4SRobert Watson 
533d4b5cae4SRobert Watson 	np[proto].np_qlimit = qlimit;
5349e6e01ebSRobert Watson 	for (i = 0; i <= mp_maxid; i++) {
53553402767SRobert Watson 		if (CPU_ABSENT(i))
53653402767SRobert Watson 			continue;
53753402767SRobert Watson 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
538d4b5cae4SRobert Watson 		npwp->nw_qlimit = qlimit;
539fb68148fSJonathan Lemon 	}
540d4b5cae4SRobert Watson 	NETISR_WUNLOCK();
5413161f583SAndre Oppermann 	return (0);
542e3b6e33cSJake Burkholder }
543e3b6e33cSJake Burkholder 
544d4b5cae4SRobert Watson /*
545d4b5cae4SRobert Watson  * Drain all packets currently held in a particular protocol work queue.
546d4b5cae4SRobert Watson  */
547e3b6e33cSJake Burkholder static void
548d4b5cae4SRobert Watson netisr_drain_proto(struct netisr_work *npwp)
549e3b6e33cSJake Burkholder {
550d4b5cae4SRobert Watson 	struct mbuf *m;
551d4b5cae4SRobert Watson 
552d4b5cae4SRobert Watson 	/*
553d4b5cae4SRobert Watson 	 * We would assert the lock on the workstream but it's not passed in.
554d4b5cae4SRobert Watson 	 */
555d4b5cae4SRobert Watson 	while ((m = npwp->nw_head) != NULL) {
556d4b5cae4SRobert Watson 		npwp->nw_head = m->m_nextpkt;
557d4b5cae4SRobert Watson 		m->m_nextpkt = NULL;
558d4b5cae4SRobert Watson 		if (npwp->nw_head == NULL)
559d4b5cae4SRobert Watson 			npwp->nw_tail = NULL;
560d4b5cae4SRobert Watson 		npwp->nw_len--;
561d4b5cae4SRobert Watson 		m_freem(m);
562d4b5cae4SRobert Watson 	}
563d4b5cae4SRobert Watson 	KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__));
564d4b5cae4SRobert Watson 	KASSERT(npwp->nw_len == 0, ("%s: len", __func__));
565d4b5cae4SRobert Watson }
566d4b5cae4SRobert Watson 
567d4b5cae4SRobert Watson /*
568d4b5cae4SRobert Watson  * Remove the registration of a network protocol, which requires clearing
569d4b5cae4SRobert Watson  * per-protocol fields across all workstreams, including freeing all mbufs in
570d4b5cae4SRobert Watson  * the queues at time of unregister.  All work in netisr is briefly suspended
571d4b5cae4SRobert Watson  * while this takes place.
572d4b5cae4SRobert Watson  */
573d4b5cae4SRobert Watson void
574d4b5cae4SRobert Watson netisr_unregister(const struct netisr_handler *nhp)
575d4b5cae4SRobert Watson {
576d4b5cae4SRobert Watson 	struct netisr_work *npwp;
577d4b5cae4SRobert Watson #ifdef INVARIANTS
578d4b5cae4SRobert Watson 	const char *name;
579d4b5cae4SRobert Watson #endif
580d4b5cae4SRobert Watson 	u_int i, proto;
581d4b5cae4SRobert Watson 
582d4b5cae4SRobert Watson 	proto = nhp->nh_proto;
583d4b5cae4SRobert Watson #ifdef INVARIANTS
584d4b5cae4SRobert Watson 	name = nhp->nh_name;
585d4b5cae4SRobert Watson #endif
586d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
587d4b5cae4SRobert Watson 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
588d4b5cae4SRobert Watson 
589d4b5cae4SRobert Watson 	NETISR_WLOCK();
590d4b5cae4SRobert Watson 	KASSERT(np[proto].np_handler != NULL,
591d4b5cae4SRobert Watson 	    ("%s(%u): protocol not registered for %s", __func__, proto,
592d4b5cae4SRobert Watson 	    name));
593d4b5cae4SRobert Watson 
594d4b5cae4SRobert Watson 	np[proto].np_name = NULL;
595d4b5cae4SRobert Watson 	np[proto].np_handler = NULL;
596d4b5cae4SRobert Watson 	np[proto].np_m2flow = NULL;
597d4b5cae4SRobert Watson 	np[proto].np_m2cpuid = NULL;
598d4b5cae4SRobert Watson 	np[proto].np_qlimit = 0;
599d4b5cae4SRobert Watson 	np[proto].np_policy = 0;
6009e6e01ebSRobert Watson 	for (i = 0; i <= mp_maxid; i++) {
60153402767SRobert Watson 		if (CPU_ABSENT(i))
60253402767SRobert Watson 			continue;
60353402767SRobert Watson 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
604d4b5cae4SRobert Watson 		netisr_drain_proto(npwp);
605d4b5cae4SRobert Watson 		bzero(npwp, sizeof(*npwp));
606d4b5cae4SRobert Watson 	}
607d4b5cae4SRobert Watson 	NETISR_WUNLOCK();
608d4b5cae4SRobert Watson }
609d4b5cae4SRobert Watson 
610d4b5cae4SRobert Watson /*
611d4b5cae4SRobert Watson  * Look up the workstream given a packet and source identifier.  Do this by
612d4b5cae4SRobert Watson  * checking the protocol's policy, and optionally call out to the protocol
613d4b5cae4SRobert Watson  * for assistance if required.
614d4b5cae4SRobert Watson  */
615d4b5cae4SRobert Watson static struct mbuf *
616d4b5cae4SRobert Watson netisr_select_cpuid(struct netisr_proto *npp, uintptr_t source,
617d4b5cae4SRobert Watson     struct mbuf *m, u_int *cpuidp)
618d4b5cae4SRobert Watson {
619d4b5cae4SRobert Watson 	struct ifnet *ifp;
620d4b5cae4SRobert Watson 
621d4b5cae4SRobert Watson 	NETISR_LOCK_ASSERT();
622d4b5cae4SRobert Watson 
623d4b5cae4SRobert Watson 	/*
624d4b5cae4SRobert Watson 	 * In the event we have only one worker, shortcut and deliver to it
625d4b5cae4SRobert Watson 	 * without further ado.
626d4b5cae4SRobert Watson 	 */
627d4b5cae4SRobert Watson 	if (nws_count == 1) {
628d4b5cae4SRobert Watson 		*cpuidp = nws_array[0];
629d4b5cae4SRobert Watson 		return (m);
630d4b5cae4SRobert Watson 	}
631d4b5cae4SRobert Watson 
632d4b5cae4SRobert Watson 	/*
633d4b5cae4SRobert Watson 	 * What happens next depends on the policy selected by the protocol.
634d4b5cae4SRobert Watson 	 * If we want to support per-interface policies, we should do that
635d4b5cae4SRobert Watson 	 * here first.
636d4b5cae4SRobert Watson 	 */
637d4b5cae4SRobert Watson 	switch (npp->np_policy) {
638d4b5cae4SRobert Watson 	case NETISR_POLICY_CPU:
639d4b5cae4SRobert Watson 		return (npp->np_m2cpuid(m, source, cpuidp));
640d4b5cae4SRobert Watson 
641d4b5cae4SRobert Watson 	case NETISR_POLICY_FLOW:
642d4b5cae4SRobert Watson 		if (!(m->m_flags & M_FLOWID) && npp->np_m2flow != NULL) {
643d4b5cae4SRobert Watson 			m = npp->np_m2flow(m, source);
644d4b5cae4SRobert Watson 			if (m == NULL)
645d4b5cae4SRobert Watson 				return (NULL);
646d4b5cae4SRobert Watson 		}
647d4b5cae4SRobert Watson 		if (m->m_flags & M_FLOWID) {
648d4b5cae4SRobert Watson 			*cpuidp =
649d4b5cae4SRobert Watson 			    netisr_default_flow2cpu(m->m_pkthdr.flowid);
650d4b5cae4SRobert Watson 			return (m);
651d4b5cae4SRobert Watson 		}
652d4b5cae4SRobert Watson 		/* FALLTHROUGH */
653d4b5cae4SRobert Watson 
654d4b5cae4SRobert Watson 	case NETISR_POLICY_SOURCE:
655d4b5cae4SRobert Watson 		ifp = m->m_pkthdr.rcvif;
656d4b5cae4SRobert Watson 		if (ifp != NULL)
657d4b5cae4SRobert Watson 			*cpuidp = nws_array[(ifp->if_index + source) %
658d4b5cae4SRobert Watson 			    nws_count];
659d4b5cae4SRobert Watson 		else
660d4b5cae4SRobert Watson 			*cpuidp = nws_array[source % nws_count];
661d4b5cae4SRobert Watson 		return (m);
662d4b5cae4SRobert Watson 
663d4b5cae4SRobert Watson 	default:
664d4b5cae4SRobert Watson 		panic("%s: invalid policy %u for %s", __func__,
665d4b5cae4SRobert Watson 		    npp->np_policy, npp->np_name);
666d4b5cae4SRobert Watson 	}
667d4b5cae4SRobert Watson }
668d4b5cae4SRobert Watson 
669d4b5cae4SRobert Watson /*
670d4b5cae4SRobert Watson  * Process packets associated with a workstream and protocol.  For reasons of
671d4b5cae4SRobert Watson  * fairness, we process up to one complete netisr queue at a time, moving the
672d4b5cae4SRobert Watson  * queue to a stack-local queue for processing, but do not loop refreshing
673d4b5cae4SRobert Watson  * from the global queue.  The caller is responsible for deciding whether to
674d4b5cae4SRobert Watson  * loop, and for setting the NWS_RUNNING flag.  The passed workstream will be
675d4b5cae4SRobert Watson  * locked on entry and relocked before return, but will be released while
676d4b5cae4SRobert Watson  * processing.  The number of packets processed is returned.
677d4b5cae4SRobert Watson  */
678d4b5cae4SRobert Watson static u_int
679d4b5cae4SRobert Watson netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto)
680d4b5cae4SRobert Watson {
681d4b5cae4SRobert Watson 	struct netisr_work local_npw, *npwp;
682d4b5cae4SRobert Watson 	u_int handled;
683d4b5cae4SRobert Watson 	struct mbuf *m;
684d4b5cae4SRobert Watson 
685d4b5cae4SRobert Watson 	NETISR_LOCK_ASSERT();
686d4b5cae4SRobert Watson 	NWS_LOCK_ASSERT(nwsp);
687d4b5cae4SRobert Watson 
688d4b5cae4SRobert Watson 	KASSERT(nwsp->nws_flags & NWS_RUNNING,
689d4b5cae4SRobert Watson 	    ("%s(%u): not running", __func__, proto));
690d4b5cae4SRobert Watson 	KASSERT(proto >= 0 && proto < NETISR_MAXPROT,
691d4b5cae4SRobert Watson 	    ("%s(%u): invalid proto\n", __func__, proto));
692d4b5cae4SRobert Watson 
693d4b5cae4SRobert Watson 	npwp = &nwsp->nws_work[proto];
694d4b5cae4SRobert Watson 	if (npwp->nw_len == 0)
695d4b5cae4SRobert Watson 		return (0);
696d4b5cae4SRobert Watson 
697d4b5cae4SRobert Watson 	/*
698d4b5cae4SRobert Watson 	 * Move the global work queue to a thread-local work queue.
699d4b5cae4SRobert Watson 	 *
700d4b5cae4SRobert Watson 	 * Notice that this means the effective maximum length of the queue
701d4b5cae4SRobert Watson 	 * is actually twice that of the maximum queue length specified in
702d4b5cae4SRobert Watson 	 * the protocol registration call.
703d4b5cae4SRobert Watson 	 */
704d4b5cae4SRobert Watson 	handled = npwp->nw_len;
705d4b5cae4SRobert Watson 	local_npw = *npwp;
706d4b5cae4SRobert Watson 	npwp->nw_head = NULL;
707d4b5cae4SRobert Watson 	npwp->nw_tail = NULL;
708d4b5cae4SRobert Watson 	npwp->nw_len = 0;
709d4b5cae4SRobert Watson 	nwsp->nws_pendingbits &= ~(1 << proto);
710d4b5cae4SRobert Watson 	NWS_UNLOCK(nwsp);
711d4b5cae4SRobert Watson 	while ((m = local_npw.nw_head) != NULL) {
712d4b5cae4SRobert Watson 		local_npw.nw_head = m->m_nextpkt;
713d4b5cae4SRobert Watson 		m->m_nextpkt = NULL;
714d4b5cae4SRobert Watson 		if (local_npw.nw_head == NULL)
715d4b5cae4SRobert Watson 			local_npw.nw_tail = NULL;
716d4b5cae4SRobert Watson 		local_npw.nw_len--;
717d4b5cae4SRobert Watson 		VNET_ASSERT(m->m_pkthdr.rcvif != NULL);
718d4b5cae4SRobert Watson 		CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
719d4b5cae4SRobert Watson 		np[proto].np_handler(m);
720d4b5cae4SRobert Watson 		CURVNET_RESTORE();
721d4b5cae4SRobert Watson 	}
722d4b5cae4SRobert Watson 	KASSERT(local_npw.nw_len == 0,
723d4b5cae4SRobert Watson 	    ("%s(%u): len %u", __func__, proto, local_npw.nw_len));
724ed655c8cSBjoern A. Zeeb 	if (np[proto].np_drainedcpu)
725ed655c8cSBjoern A. Zeeb 		np[proto].np_drainedcpu(nwsp->nws_cpu);
726d4b5cae4SRobert Watson 	NWS_LOCK(nwsp);
727d4b5cae4SRobert Watson 	npwp->nw_handled += handled;
728d4b5cae4SRobert Watson 	return (handled);
729d4b5cae4SRobert Watson }
730d4b5cae4SRobert Watson 
731d4b5cae4SRobert Watson /*
7320a32e29fSRobert Watson  * SWI handler for netisr -- processes packets in a set of workstreams that
733d4b5cae4SRobert Watson  * it owns, woken up by calls to NWS_SIGNAL().  If this workstream is already
734d4b5cae4SRobert Watson  * being direct dispatched, go back to sleep and wait for the dispatching
735d4b5cae4SRobert Watson  * thread to wake us up again.
736d4b5cae4SRobert Watson  */
737d4b5cae4SRobert Watson static void
738d4b5cae4SRobert Watson swi_net(void *arg)
739d4b5cae4SRobert Watson {
740d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
741d4b5cae4SRobert Watson 	struct rm_priotracker tracker;
742d4b5cae4SRobert Watson #endif
743d4b5cae4SRobert Watson 	struct netisr_workstream *nwsp;
744d4b5cae4SRobert Watson 	u_int bits, prot;
745d4b5cae4SRobert Watson 
746d4b5cae4SRobert Watson 	nwsp = arg;
747d4b5cae4SRobert Watson 
7481cafed39SJonathan Lemon #ifdef DEVICE_POLLING
749d4b5cae4SRobert Watson 	KASSERT(nws_count == 1,
750d4b5cae4SRobert Watson 	    ("%s: device_polling but nws_count != 1", __func__));
751d4b5cae4SRobert Watson 	netisr_poll();
752d4b5cae4SRobert Watson #endif
753d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
754d4b5cae4SRobert Watson 	NETISR_RLOCK(&tracker);
755d4b5cae4SRobert Watson #endif
756d4b5cae4SRobert Watson 	NWS_LOCK(nwsp);
757d4b5cae4SRobert Watson 	KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running"));
758d4b5cae4SRobert Watson 	if (nwsp->nws_flags & NWS_DISPATCHING)
759d4b5cae4SRobert Watson 		goto out;
760d4b5cae4SRobert Watson 	nwsp->nws_flags |= NWS_RUNNING;
761d4b5cae4SRobert Watson 	nwsp->nws_flags &= ~NWS_SCHEDULED;
762d4b5cae4SRobert Watson 	while ((bits = nwsp->nws_pendingbits) != 0) {
763d4b5cae4SRobert Watson 		while ((prot = ffs(bits)) != 0) {
764d4b5cae4SRobert Watson 			prot--;
765d4b5cae4SRobert Watson 			bits &= ~(1 << prot);
766d4b5cae4SRobert Watson 			(void)netisr_process_workstream_proto(nwsp, prot);
767d4b5cae4SRobert Watson 		}
768d4b5cae4SRobert Watson 	}
769d4b5cae4SRobert Watson 	nwsp->nws_flags &= ~NWS_RUNNING;
770d4b5cae4SRobert Watson out:
771d4b5cae4SRobert Watson 	NWS_UNLOCK(nwsp);
772d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
773d4b5cae4SRobert Watson 	NETISR_RUNLOCK(&tracker);
774d4b5cae4SRobert Watson #endif
775d4b5cae4SRobert Watson #ifdef DEVICE_POLLING
776d4b5cae4SRobert Watson 	netisr_pollmore();
777d4b5cae4SRobert Watson #endif
778d4b5cae4SRobert Watson }
779d4b5cae4SRobert Watson 
780d4b5cae4SRobert Watson static int
781d4b5cae4SRobert Watson netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto,
782d4b5cae4SRobert Watson     struct netisr_work *npwp, struct mbuf *m, int *dosignalp)
783d4b5cae4SRobert Watson {
784d4b5cae4SRobert Watson 
785d4b5cae4SRobert Watson 	NWS_LOCK_ASSERT(nwsp);
786d4b5cae4SRobert Watson 
787d4b5cae4SRobert Watson 	*dosignalp = 0;
788d4b5cae4SRobert Watson 	if (npwp->nw_len < npwp->nw_qlimit) {
789d4b5cae4SRobert Watson 		m->m_nextpkt = NULL;
790d4b5cae4SRobert Watson 		if (npwp->nw_head == NULL) {
791d4b5cae4SRobert Watson 			npwp->nw_head = m;
792d4b5cae4SRobert Watson 			npwp->nw_tail = m;
793d4b5cae4SRobert Watson 		} else {
794d4b5cae4SRobert Watson 			npwp->nw_tail->m_nextpkt = m;
795d4b5cae4SRobert Watson 			npwp->nw_tail = m;
796d4b5cae4SRobert Watson 		}
797d4b5cae4SRobert Watson 		npwp->nw_len++;
798d4b5cae4SRobert Watson 		if (npwp->nw_len > npwp->nw_watermark)
799d4b5cae4SRobert Watson 			npwp->nw_watermark = npwp->nw_len;
8000a32e29fSRobert Watson 
8010a32e29fSRobert Watson 		/*
8020a32e29fSRobert Watson 		 * We must set the bit regardless of NWS_RUNNING, so that
8030a32e29fSRobert Watson 		 * swi_net() keeps calling netisr_process_workstream_proto().
8040a32e29fSRobert Watson 		 */
805d4b5cae4SRobert Watson 		nwsp->nws_pendingbits |= (1 << proto);
806d4b5cae4SRobert Watson 		if (!(nwsp->nws_flags &
807d4b5cae4SRobert Watson 		    (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) {
808d4b5cae4SRobert Watson 			nwsp->nws_flags |= NWS_SCHEDULED;
809d4b5cae4SRobert Watson 			*dosignalp = 1;	/* Defer until unlocked. */
810d4b5cae4SRobert Watson 		}
811d4b5cae4SRobert Watson 		npwp->nw_queued++;
812d4b5cae4SRobert Watson 		return (0);
813d4b5cae4SRobert Watson 	} else {
814ba3b25b3SBjoern A. Zeeb 		m_freem(m);
815d4b5cae4SRobert Watson 		npwp->nw_qdrops++;
816d4b5cae4SRobert Watson 		return (ENOBUFS);
817d4b5cae4SRobert Watson 	}
818d4b5cae4SRobert Watson }
819d4b5cae4SRobert Watson 
820d4b5cae4SRobert Watson static int
821d4b5cae4SRobert Watson netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid)
822d4b5cae4SRobert Watson {
823d4b5cae4SRobert Watson 	struct netisr_workstream *nwsp;
824d4b5cae4SRobert Watson 	struct netisr_work *npwp;
825d4b5cae4SRobert Watson 	int dosignal, error;
826d4b5cae4SRobert Watson 
827d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
828d4b5cae4SRobert Watson 	NETISR_LOCK_ASSERT();
829d4b5cae4SRobert Watson #endif
8309e6e01ebSRobert Watson 	KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__,
8319e6e01ebSRobert Watson 	    cpuid, mp_maxid));
83253402767SRobert Watson 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
833d4b5cae4SRobert Watson 
834d4b5cae4SRobert Watson 	dosignal = 0;
835d4b5cae4SRobert Watson 	error = 0;
83653402767SRobert Watson 	nwsp = DPCPU_ID_PTR(cpuid, nws);
837d4b5cae4SRobert Watson 	npwp = &nwsp->nws_work[proto];
838d4b5cae4SRobert Watson 	NWS_LOCK(nwsp);
839d4b5cae4SRobert Watson 	error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal);
840d4b5cae4SRobert Watson 	NWS_UNLOCK(nwsp);
841d4b5cae4SRobert Watson 	if (dosignal)
842d4b5cae4SRobert Watson 		NWS_SIGNAL(nwsp);
843d4b5cae4SRobert Watson 	return (error);
844d4b5cae4SRobert Watson }
845d4b5cae4SRobert Watson 
846d4b5cae4SRobert Watson int
847d4b5cae4SRobert Watson netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m)
848d4b5cae4SRobert Watson {
849d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
850d4b5cae4SRobert Watson 	struct rm_priotracker tracker;
851d4b5cae4SRobert Watson #endif
852d4b5cae4SRobert Watson 	u_int cpuid;
853d4b5cae4SRobert Watson 	int error;
854d4b5cae4SRobert Watson 
855d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
856d4b5cae4SRobert Watson 	    ("%s: invalid proto %u", __func__, proto));
857d4b5cae4SRobert Watson 
858d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
859d4b5cae4SRobert Watson 	NETISR_RLOCK(&tracker);
860d4b5cae4SRobert Watson #endif
861d4b5cae4SRobert Watson 	KASSERT(np[proto].np_handler != NULL,
862d4b5cae4SRobert Watson 	    ("%s: invalid proto %u", __func__, proto));
863d4b5cae4SRobert Watson 
864d4b5cae4SRobert Watson 	m = netisr_select_cpuid(&np[proto], source, m, &cpuid);
86553402767SRobert Watson 	if (m != NULL) {
86653402767SRobert Watson 		KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__,
86753402767SRobert Watson 		    cpuid));
868d4b5cae4SRobert Watson 		error = netisr_queue_internal(proto, m, cpuid);
86953402767SRobert Watson 	} else
870d4b5cae4SRobert Watson 		error = ENOBUFS;
871d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
872d4b5cae4SRobert Watson 	NETISR_RUNLOCK(&tracker);
873d4b5cae4SRobert Watson #endif
874d4b5cae4SRobert Watson 	return (error);
875d4b5cae4SRobert Watson }
876d4b5cae4SRobert Watson 
877d4b5cae4SRobert Watson int
878d4b5cae4SRobert Watson netisr_queue(u_int proto, struct mbuf *m)
879d4b5cae4SRobert Watson {
880d4b5cae4SRobert Watson 
881d4b5cae4SRobert Watson 	return (netisr_queue_src(proto, 0, m));
882d4b5cae4SRobert Watson }
883d4b5cae4SRobert Watson 
884d4b5cae4SRobert Watson /*
8850a32e29fSRobert Watson  * Dispatch a packet for netisr processing; direct dispatch is permitted by
886d4b5cae4SRobert Watson  * calling context.
887d4b5cae4SRobert Watson  */
888d4b5cae4SRobert Watson int
889d4b5cae4SRobert Watson netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m)
890d4b5cae4SRobert Watson {
891d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
892d4b5cae4SRobert Watson 	struct rm_priotracker tracker;
893d4b5cae4SRobert Watson #endif
894d4b5cae4SRobert Watson 	struct netisr_workstream *nwsp;
895d4b5cae4SRobert Watson 	struct netisr_work *npwp;
896d4b5cae4SRobert Watson 	int dosignal, error;
897d4b5cae4SRobert Watson 	u_int cpuid;
898d4b5cae4SRobert Watson 
899d4b5cae4SRobert Watson 	/*
900d4b5cae4SRobert Watson 	 * If direct dispatch is entirely disabled, fall back on queueing.
901d4b5cae4SRobert Watson 	 */
902d4b5cae4SRobert Watson 	if (!netisr_direct)
903d4b5cae4SRobert Watson 		return (netisr_queue_src(proto, source, m));
904d4b5cae4SRobert Watson 
905d4b5cae4SRobert Watson 	KASSERT(proto < NETISR_MAXPROT,
906d4b5cae4SRobert Watson 	    ("%s: invalid proto %u", __func__, proto));
907d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
908d4b5cae4SRobert Watson 	NETISR_RLOCK(&tracker);
909d4b5cae4SRobert Watson #endif
910d4b5cae4SRobert Watson 	KASSERT(np[proto].np_handler != NULL,
911d4b5cae4SRobert Watson 	    ("%s: invalid proto %u", __func__, proto));
912d4b5cae4SRobert Watson 
913d4b5cae4SRobert Watson 	/*
914d4b5cae4SRobert Watson 	 * If direct dispatch is forced, then unconditionally dispatch
915d4b5cae4SRobert Watson 	 * without a formal CPU selection.  Borrow the current CPU's stats,
916d4b5cae4SRobert Watson 	 * even if there's no worker on it.  In this case we don't update
917d4b5cae4SRobert Watson 	 * nws_flags because all netisr processing will be source ordered due
918d4b5cae4SRobert Watson 	 * to always being forced to directly dispatch.
919d4b5cae4SRobert Watson 	 */
920d4b5cae4SRobert Watson 	if (netisr_direct_force) {
92153402767SRobert Watson 		nwsp = DPCPU_PTR(nws);
922d4b5cae4SRobert Watson 		npwp = &nwsp->nws_work[proto];
923d4b5cae4SRobert Watson 		npwp->nw_dispatched++;
924d4b5cae4SRobert Watson 		npwp->nw_handled++;
925d4b5cae4SRobert Watson 		np[proto].np_handler(m);
926d4b5cae4SRobert Watson 		error = 0;
927d4b5cae4SRobert Watson 		goto out_unlock;
928d4b5cae4SRobert Watson 	}
929d4b5cae4SRobert Watson 
930d4b5cae4SRobert Watson 	/*
931d4b5cae4SRobert Watson 	 * Otherwise, we execute in a hybrid mode where we will try to direct
932d4b5cae4SRobert Watson 	 * dispatch if we're on the right CPU and the netisr worker isn't
933d4b5cae4SRobert Watson 	 * already running.
934d4b5cae4SRobert Watson 	 */
935d4b5cae4SRobert Watson 	m = netisr_select_cpuid(&np[proto], source, m, &cpuid);
936d4b5cae4SRobert Watson 	if (m == NULL) {
937d4b5cae4SRobert Watson 		error = ENOBUFS;
938d4b5cae4SRobert Watson 		goto out_unlock;
939d4b5cae4SRobert Watson 	}
94053402767SRobert Watson 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
941d4b5cae4SRobert Watson 	sched_pin();
942d4b5cae4SRobert Watson 	if (cpuid != curcpu)
943d4b5cae4SRobert Watson 		goto queue_fallback;
94453402767SRobert Watson 	nwsp = DPCPU_PTR(nws);
945d4b5cae4SRobert Watson 	npwp = &nwsp->nws_work[proto];
946d4b5cae4SRobert Watson 
947d4b5cae4SRobert Watson 	/*-
948d4b5cae4SRobert Watson 	 * We are willing to direct dispatch only if three conditions hold:
949d4b5cae4SRobert Watson 	 *
950d4b5cae4SRobert Watson 	 * (1) The netisr worker isn't already running,
951d4b5cae4SRobert Watson 	 * (2) Another thread isn't already directly dispatching, and
952d4b5cae4SRobert Watson 	 * (3) The netisr hasn't already been woken up.
953d4b5cae4SRobert Watson 	 */
954d4b5cae4SRobert Watson 	NWS_LOCK(nwsp);
955d4b5cae4SRobert Watson 	if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) {
956d4b5cae4SRobert Watson 		error = netisr_queue_workstream(nwsp, proto, npwp, m,
957d4b5cae4SRobert Watson 		    &dosignal);
95853402767SRobert Watson 		NWS_UNLOCK(nwsp);
959d4b5cae4SRobert Watson 		if (dosignal)
960d4b5cae4SRobert Watson 			NWS_SIGNAL(nwsp);
961d4b5cae4SRobert Watson 		goto out_unpin;
962d4b5cae4SRobert Watson 	}
963d4b5cae4SRobert Watson 
964d4b5cae4SRobert Watson 	/*
965d4b5cae4SRobert Watson 	 * The current thread is now effectively the netisr worker, so set
966d4b5cae4SRobert Watson 	 * the dispatching flag to prevent concurrent processing of the
967d4b5cae4SRobert Watson 	 * stream from another thread (even the netisr worker), which could
968d4b5cae4SRobert Watson 	 * otherwise lead to effective misordering of the stream.
969d4b5cae4SRobert Watson 	 */
970d4b5cae4SRobert Watson 	nwsp->nws_flags |= NWS_DISPATCHING;
971d4b5cae4SRobert Watson 	NWS_UNLOCK(nwsp);
972d4b5cae4SRobert Watson 	np[proto].np_handler(m);
973d4b5cae4SRobert Watson 	NWS_LOCK(nwsp);
974d4b5cae4SRobert Watson 	nwsp->nws_flags &= ~NWS_DISPATCHING;
975d4b5cae4SRobert Watson 	npwp->nw_handled++;
976d4b5cae4SRobert Watson 	npwp->nw_hybrid_dispatched++;
977d4b5cae4SRobert Watson 
978d4b5cae4SRobert Watson 	/*
979d4b5cae4SRobert Watson 	 * If other work was enqueued by another thread while we were direct
980d4b5cae4SRobert Watson 	 * dispatching, we need to signal the netisr worker to do that work.
981d4b5cae4SRobert Watson 	 * In the future, we might want to do some of that work in the
982d4b5cae4SRobert Watson 	 * current thread, rather than trigger further context switches.  If
983d4b5cae4SRobert Watson 	 * so, we'll want to establish a reasonable bound on the work done in
984d4b5cae4SRobert Watson 	 * the "borrowed" context.
985d4b5cae4SRobert Watson 	 */
986d4b5cae4SRobert Watson 	if (nwsp->nws_pendingbits != 0) {
987d4b5cae4SRobert Watson 		nwsp->nws_flags |= NWS_SCHEDULED;
988d4b5cae4SRobert Watson 		dosignal = 1;
989d4b5cae4SRobert Watson 	} else
990d4b5cae4SRobert Watson 		dosignal = 0;
991d4b5cae4SRobert Watson 	NWS_UNLOCK(nwsp);
992d4b5cae4SRobert Watson 	if (dosignal)
993d4b5cae4SRobert Watson 		NWS_SIGNAL(nwsp);
994d4b5cae4SRobert Watson 	error = 0;
995d4b5cae4SRobert Watson 	goto out_unpin;
996d4b5cae4SRobert Watson 
997d4b5cae4SRobert Watson queue_fallback:
998d4b5cae4SRobert Watson 	error = netisr_queue_internal(proto, m, cpuid);
999d4b5cae4SRobert Watson out_unpin:
1000d4b5cae4SRobert Watson 	sched_unpin();
1001d4b5cae4SRobert Watson out_unlock:
1002d4b5cae4SRobert Watson #ifdef NETISR_LOCKING
1003d4b5cae4SRobert Watson 	NETISR_RUNLOCK(&tracker);
1004d4b5cae4SRobert Watson #endif
1005d4b5cae4SRobert Watson 	return (error);
1006d4b5cae4SRobert Watson }
1007d4b5cae4SRobert Watson 
1008d4b5cae4SRobert Watson int
1009d4b5cae4SRobert Watson netisr_dispatch(u_int proto, struct mbuf *m)
1010d4b5cae4SRobert Watson {
1011d4b5cae4SRobert Watson 
1012d4b5cae4SRobert Watson 	return (netisr_dispatch_src(proto, 0, m));
1013d4b5cae4SRobert Watson }
1014d4b5cae4SRobert Watson 
1015d4b5cae4SRobert Watson #ifdef DEVICE_POLLING
1016d4b5cae4SRobert Watson /*
1017d4b5cae4SRobert Watson  * Kernel polling borrows a netisr thread to run interface polling in; this
1018d4b5cae4SRobert Watson  * function allows kernel polling to request that the netisr thread be
1019d4b5cae4SRobert Watson  * scheduled even if no packets are pending for protocols.
1020d4b5cae4SRobert Watson  */
1021d4b5cae4SRobert Watson void
1022d4b5cae4SRobert Watson netisr_sched_poll(void)
1023d4b5cae4SRobert Watson {
1024d4b5cae4SRobert Watson 	struct netisr_workstream *nwsp;
1025d4b5cae4SRobert Watson 
102653402767SRobert Watson 	nwsp = DPCPU_ID_PTR(nws_array[0], nws);
1027d4b5cae4SRobert Watson 	NWS_SIGNAL(nwsp);
1028d4b5cae4SRobert Watson }
10291cafed39SJonathan Lemon #endif
1030e3b6e33cSJake Burkholder 
1031d4b5cae4SRobert Watson static void
1032d4b5cae4SRobert Watson netisr_start_swi(u_int cpuid, struct pcpu *pc)
1033d4b5cae4SRobert Watson {
1034d4b5cae4SRobert Watson 	char swiname[12];
1035d4b5cae4SRobert Watson 	struct netisr_workstream *nwsp;
1036d4b5cae4SRobert Watson 	int error;
1037d4b5cae4SRobert Watson 
103853402767SRobert Watson 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
103953402767SRobert Watson 
104053402767SRobert Watson 	nwsp = DPCPU_ID_PTR(cpuid, nws);
1041d4b5cae4SRobert Watson 	mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF);
1042d4b5cae4SRobert Watson 	nwsp->nws_cpu = cpuid;
1043d4b5cae4SRobert Watson 	snprintf(swiname, sizeof(swiname), "netisr %u", cpuid);
1044d4b5cae4SRobert Watson 	error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp,
1045d4b5cae4SRobert Watson 	    SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie);
1046d4b5cae4SRobert Watson 	if (error)
1047d4b5cae4SRobert Watson 		panic("%s: swi_add %d", __func__, error);
1048d4b5cae4SRobert Watson 	pc->pc_netisr = nwsp->nws_intr_event;
1049d4b5cae4SRobert Watson 	if (netisr_bindthreads) {
1050d4b5cae4SRobert Watson 		error = intr_event_bind(nwsp->nws_intr_event, cpuid);
1051d4b5cae4SRobert Watson 		if (error != 0)
1052d4b5cae4SRobert Watson 			printf("%s: cpu %u: intr_event_bind: %d", __func__,
1053d4b5cae4SRobert Watson 			    cpuid, error);
1054e3b6e33cSJake Burkholder 	}
1055d4b5cae4SRobert Watson 	NETISR_WLOCK();
1056d4b5cae4SRobert Watson 	nws_array[nws_count] = nwsp->nws_cpu;
1057d4b5cae4SRobert Watson 	nws_count++;
1058d4b5cae4SRobert Watson 	NETISR_WUNLOCK();
1059e3b6e33cSJake Burkholder }
1060e3b6e33cSJake Burkholder 
1061d4b5cae4SRobert Watson /*
1062d4b5cae4SRobert Watson  * Initialize the netisr subsystem.  We rely on BSS and static initialization
1063d4b5cae4SRobert Watson  * of most fields in global data structures.
1064d4b5cae4SRobert Watson  *
1065d4b5cae4SRobert Watson  * Start a worker thread for the boot CPU so that we can support network
1066d4b5cae4SRobert Watson  * traffic immediately in case the network stack is used before additional
1067d4b5cae4SRobert Watson  * CPUs are started (for example, diskless boot).
1068d4b5cae4SRobert Watson  */
1069e3b6e33cSJake Burkholder static void
1070d4b5cae4SRobert Watson netisr_init(void *arg)
1071e3b6e33cSJake Burkholder {
1072e3b6e33cSJake Burkholder 
1073d4b5cae4SRobert Watson 	KASSERT(curcpu == 0, ("%s: not on CPU 0", __func__));
1074d4b5cae4SRobert Watson 
1075d4b5cae4SRobert Watson 	NETISR_LOCK_INIT();
10769e6e01ebSRobert Watson 	if (netisr_maxthreads < 1)
1077d4b5cae4SRobert Watson 		netisr_maxthreads = 1;
10789e6e01ebSRobert Watson 	if (netisr_maxthreads > mp_ncpus) {
1079912f6323SRobert Watson 		printf("netisr_init: forcing maxthreads from %d to %d\n",
10809e6e01ebSRobert Watson 		    netisr_maxthreads, mp_ncpus);
10819e6e01ebSRobert Watson 		netisr_maxthreads = mp_ncpus;
1082ed54411cSRobert Watson 	}
1083ed54411cSRobert Watson 	if (netisr_defaultqlimit > netisr_maxqlimit) {
1084912f6323SRobert Watson 		printf("netisr_init: forcing defaultqlimit from %d to %d\n",
10859e6e01ebSRobert Watson 		    netisr_defaultqlimit, netisr_maxqlimit);
1086d4b5cae4SRobert Watson 		netisr_defaultqlimit = netisr_maxqlimit;
1087ed54411cSRobert Watson 	}
1088d4b5cae4SRobert Watson #ifdef DEVICE_POLLING
1089d4b5cae4SRobert Watson 	/*
1090d4b5cae4SRobert Watson 	 * The device polling code is not yet aware of how to deal with
1091d4b5cae4SRobert Watson 	 * multiple netisr threads, so for the time being compiling in device
1092d4b5cae4SRobert Watson 	 * polling disables parallel netisr workers.
1093d4b5cae4SRobert Watson 	 */
1094ed54411cSRobert Watson 	if (netisr_maxthreads != 1 || netisr_bindthreads != 0) {
1095912f6323SRobert Watson 		printf("netisr_init: forcing maxthreads to 1 and "
1096912f6323SRobert Watson 		    "bindthreads to 0 for device polling\n");
1097d4b5cae4SRobert Watson 		netisr_maxthreads = 1;
1098d4b5cae4SRobert Watson 		netisr_bindthreads = 0;
1099ed54411cSRobert Watson 	}
1100d4b5cae4SRobert Watson #endif
1101d4b5cae4SRobert Watson 
1102d4b5cae4SRobert Watson 	netisr_start_swi(curcpu, pcpu_find(curcpu));
1103e3b6e33cSJake Burkholder }
1104d4b5cae4SRobert Watson SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL);
1105d4b5cae4SRobert Watson 
1106d4b5cae4SRobert Watson /*
1107d4b5cae4SRobert Watson  * Start worker threads for additional CPUs.  No attempt to gracefully handle
1108d4b5cae4SRobert Watson  * work reassignment, we don't yet support dynamic reconfiguration.
1109d4b5cae4SRobert Watson  */
1110d4b5cae4SRobert Watson static void
1111d4b5cae4SRobert Watson netisr_start(void *arg)
1112d4b5cae4SRobert Watson {
1113d4b5cae4SRobert Watson 	struct pcpu *pc;
1114d4b5cae4SRobert Watson 
1115d4b5cae4SRobert Watson 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
1116d4b5cae4SRobert Watson 		if (nws_count >= netisr_maxthreads)
1117d4b5cae4SRobert Watson 			break;
1118d4b5cae4SRobert Watson 		/* XXXRW: Is skipping absent CPUs still required here? */
1119d4b5cae4SRobert Watson 		if (CPU_ABSENT(pc->pc_cpuid))
1120d4b5cae4SRobert Watson 			continue;
1121d4b5cae4SRobert Watson 		/* Worker will already be present for boot CPU. */
1122d4b5cae4SRobert Watson 		if (pc->pc_netisr != NULL)
1123d4b5cae4SRobert Watson 			continue;
1124d4b5cae4SRobert Watson 		netisr_start_swi(pc->pc_cpuid, pc);
1125d4b5cae4SRobert Watson 	}
1126d4b5cae4SRobert Watson }
1127d4b5cae4SRobert Watson SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL);
1128d4b5cae4SRobert Watson 
1129d4b5cae4SRobert Watson #ifdef DDB
1130d4b5cae4SRobert Watson DB_SHOW_COMMAND(netisr, db_show_netisr)
1131d4b5cae4SRobert Watson {
1132d4b5cae4SRobert Watson 	struct netisr_workstream *nwsp;
1133d4b5cae4SRobert Watson 	struct netisr_work *nwp;
1134d4b5cae4SRobert Watson 	int first, proto;
113553402767SRobert Watson 	u_int cpuid;
1136d4b5cae4SRobert Watson 
1137d4b5cae4SRobert Watson 	db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto",
1138d4b5cae4SRobert Watson 	    "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue");
11399e6e01ebSRobert Watson 	for (cpuid = 0; cpuid <= mp_maxid; cpuid++) {
114053402767SRobert Watson 		if (CPU_ABSENT(cpuid))
114153402767SRobert Watson 			continue;
114253402767SRobert Watson 		nwsp = DPCPU_ID_PTR(cpuid, nws);
1143d4b5cae4SRobert Watson 		if (nwsp->nws_intr_event == NULL)
1144d4b5cae4SRobert Watson 			continue;
1145d4b5cae4SRobert Watson 		first = 1;
1146d4b5cae4SRobert Watson 		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1147d4b5cae4SRobert Watson 			if (np[proto].np_handler == NULL)
1148d4b5cae4SRobert Watson 				continue;
1149d4b5cae4SRobert Watson 			nwp = &nwsp->nws_work[proto];
1150d4b5cae4SRobert Watson 			if (first) {
115153402767SRobert Watson 				db_printf("%3d ", cpuid);
1152d4b5cae4SRobert Watson 				first = 0;
1153d4b5cae4SRobert Watson 			} else
1154d4b5cae4SRobert Watson 				db_printf("%3s ", "");
1155d4b5cae4SRobert Watson 			db_printf(
1156d4b5cae4SRobert Watson 			    "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n",
1157d4b5cae4SRobert Watson 			    np[proto].np_name, nwp->nw_len,
1158d4b5cae4SRobert Watson 			    nwp->nw_watermark, nwp->nw_qlimit,
1159d4b5cae4SRobert Watson 			    nwp->nw_dispatched, nwp->nw_hybrid_dispatched,
1160d4b5cae4SRobert Watson 			    nwp->nw_qdrops, nwp->nw_queued);
1161d4b5cae4SRobert Watson 		}
1162d4b5cae4SRobert Watson 	}
1163d4b5cae4SRobert Watson }
1164d4b5cae4SRobert Watson #endif
1165