xref: /freebsd/sys/net/netisr.c (revision 9fd69f37d28cfd7438cac3eeb45fe9dd46b4d7dd)
1 /*-
2  * Copyright (c) 2007-2009 Robert N. M. Watson
3  * Copyright (c) 2010 Juniper Networks, Inc.
4  * All rights reserved.
5  *
6  * This software was developed by Robert N. M. Watson under contract
7  * to Juniper Networks, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * netisr is a packet dispatch service, allowing synchronous (directly
36  * dispatched) and asynchronous (deferred dispatch) processing of packets by
37  * registered protocol handlers.  Callers pass a protocol identifier and
38  * packet to netisr, along with a direct dispatch hint, and work will either
39  * be immediately processed by the registered handler, or passed to a
40  * software interrupt (SWI) thread for deferred dispatch.  Callers will
41  * generally select one or the other based on:
42  *
43  * - Whether directly dispatching a netisr handler lead to code reentrance or
44  *   lock recursion, such as entering the socket code from the socket code.
45  * - Whether directly dispatching a netisr handler lead to recursive
46  *   processing, such as when decapsulating several wrapped layers of tunnel
47  *   information (IPSEC within IPSEC within ...).
48  *
49  * Maintaining ordering for protocol streams is a critical design concern.
50  * Enforcing ordering limits the opportunity for concurrency, but maintains
51  * the strong ordering requirements found in some protocols, such as TCP.  Of
52  * related concern is CPU affinity--it is desirable to process all data
53  * associated with a particular stream on the same CPU over time in order to
54  * avoid acquiring locks associated with the connection on different CPUs,
55  * keep connection data in one cache, and to generally encourage associated
56  * user threads to live on the same CPU as the stream.  It's also desirable
57  * to avoid lock migration and contention where locks are associated with
58  * more than one flow.
59  *
60  * netisr supports several policy variations, represented by the
61  * NETISR_POLICY_* constants, allowing protocols to play various roles in
62  * identifying flows, assigning work to CPUs, etc.  These are described in
63  * netisr.h.
64  */
65 
66 #include "opt_ddb.h"
67 #include "opt_device_polling.h"
68 
69 #include <sys/param.h>
70 #include <sys/bus.h>
71 #include <sys/kernel.h>
72 #include <sys/kthread.h>
73 #include <sys/interrupt.h>
74 #include <sys/lock.h>
75 #include <sys/mbuf.h>
76 #include <sys/mutex.h>
77 #include <sys/pcpu.h>
78 #include <sys/proc.h>
79 #include <sys/rmlock.h>
80 #include <sys/sched.h>
81 #include <sys/smp.h>
82 #include <sys/socket.h>
83 #include <sys/sysctl.h>
84 #include <sys/systm.h>
85 
86 #ifdef DDB
87 #include <ddb/ddb.h>
88 #endif
89 
90 #include <net/if.h>
91 #include <net/if_var.h>
92 #include <net/netisr.h>
93 #include <net/vnet.h>
94 
95 /*-
96  * Synchronize use and modification of the registered netisr data structures;
97  * acquire a read lock while modifying the set of registered protocols to
98  * prevent partially registered or unregistered protocols from being run.
99  *
100  * The following data structures and fields are protected by this lock:
101  *
102  * - The np array, including all fields of struct netisr_proto.
103  * - The nws array, including all fields of struct netisr_worker.
104  * - The nws_array array.
105  *
106  * Note: the NETISR_LOCKING define controls whether read locks are acquired
107  * in packet processing paths requiring netisr registration stability.  This
108  * is disabled by default as it can lead to measurable performance
109  * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and
110  * because netisr registration and unregistration is extremely rare at
111  * runtime.  If it becomes more common, this decision should be revisited.
112  *
113  * XXXRW: rmlocks don't support assertions.
114  */
115 static struct rmlock	netisr_rmlock;
116 #define	NETISR_LOCK_INIT()	rm_init_flags(&netisr_rmlock, "netisr", \
117 				    RM_NOWITNESS)
118 #define	NETISR_LOCK_ASSERT()
119 #define	NETISR_RLOCK(tracker)	rm_rlock(&netisr_rmlock, (tracker))
120 #define	NETISR_RUNLOCK(tracker)	rm_runlock(&netisr_rmlock, (tracker))
121 #define	NETISR_WLOCK()		rm_wlock(&netisr_rmlock)
122 #define	NETISR_WUNLOCK()	rm_wunlock(&netisr_rmlock)
123 /* #define	NETISR_LOCKING */
124 
125 SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr");
126 
127 /*-
128  * Three direct dispatch policies are supported:
129  *
130  * - Always defer: all work is scheduled for a netisr, regardless of context.
131  *   (!direct)
132  *
133  * - Hybrid: if the executing context allows direct dispatch, and we're
134  *   running on the CPU the work would be done on, then direct dispatch if it
135  *   wouldn't violate ordering constraints on the workstream.
136  *   (direct && !direct_force)
137  *
138  * - Always direct: if the executing context allows direct dispatch, always
139  *   direct dispatch.  (direct && direct_force)
140  *
141  * Notice that changing the global policy could lead to short periods of
142  * misordered processing, but this is considered acceptable as compared to
143  * the complexity of enforcing ordering during policy changes.
144  */
145 static int	netisr_direct_force = 1;	/* Always direct dispatch. */
146 TUNABLE_INT("net.isr.direct_force", &netisr_direct_force);
147 SYSCTL_INT(_net_isr, OID_AUTO, direct_force, CTLFLAG_RW,
148     &netisr_direct_force, 0, "Force direct dispatch");
149 
150 static int	netisr_direct = 1;	/* Enable direct dispatch. */
151 TUNABLE_INT("net.isr.direct", &netisr_direct);
152 SYSCTL_INT(_net_isr, OID_AUTO, direct, CTLFLAG_RW,
153     &netisr_direct, 0, "Enable direct dispatch");
154 
155 /*
156  * Allow the administrator to limit the number of threads (CPUs) to use for
157  * netisr.  We don't check netisr_maxthreads before creating the thread for
158  * CPU 0, so in practice we ignore values <= 1.  This must be set at boot.
159  * We will create at most one thread per CPU.
160  */
161 static int	netisr_maxthreads = -1;		/* Max number of threads. */
162 TUNABLE_INT("net.isr.maxthreads", &netisr_maxthreads);
163 SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN,
164     &netisr_maxthreads, 0,
165     "Use at most this many CPUs for netisr processing");
166 
167 static int	netisr_bindthreads = 0;		/* Bind threads to CPUs. */
168 TUNABLE_INT("net.isr.bindthreads", &netisr_bindthreads);
169 SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN,
170     &netisr_bindthreads, 0, "Bind netisr threads to CPUs.");
171 
172 /*
173  * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit,
174  * both for initial configuration and later modification using
175  * netisr_setqlimit().
176  */
177 #define	NETISR_DEFAULT_MAXQLIMIT	10240
178 static u_int	netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT;
179 TUNABLE_INT("net.isr.maxqlimit", &netisr_maxqlimit);
180 SYSCTL_INT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN,
181     &netisr_maxqlimit, 0,
182     "Maximum netisr per-protocol, per-CPU queue depth.");
183 
184 /*
185  * The default per-workstream mbuf queue limit for protocols that don't
186  * initialize the nh_qlimit field of their struct netisr_handler.  If this is
187  * set above netisr_maxqlimit, we truncate it to the maximum during boot.
188  */
189 #define	NETISR_DEFAULT_DEFAULTQLIMIT	256
190 static u_int	netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT;
191 TUNABLE_INT("net.isr.defaultqlimit", &netisr_defaultqlimit);
192 SYSCTL_INT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN,
193     &netisr_defaultqlimit, 0,
194     "Default netisr per-protocol, per-CPU queue limit if not set by protocol");
195 
196 /*
197  * Each protocol is described by a struct netisr_proto, which holds all
198  * global per-protocol information.  This data structure is set up by
199  * netisr_register(), and derived from the public struct netisr_handler.
200  */
201 struct netisr_proto {
202 	const char	*np_name;	/* Character string protocol name. */
203 	netisr_handler_t *np_handler;	/* Protocol handler. */
204 	netisr_m2flow_t	*np_m2flow;	/* Query flow for untagged packet. */
205 	netisr_m2cpuid_t *np_m2cpuid;	/* Query CPU to process packet on. */
206 	netisr_drainedcpu_t *np_drainedcpu; /* Callback when drained a queue. */
207 	u_int		 np_qlimit;	/* Maximum per-CPU queue depth. */
208 	u_int		 np_policy;	/* Work placement policy. */
209 };
210 
211 #define	NETISR_MAXPROT		16		/* Compile-time limit. */
212 
213 /*
214  * The np array describes all registered protocols, indexed by protocol
215  * number.
216  */
217 static struct netisr_proto	np[NETISR_MAXPROT];
218 
219 /*
220  * Protocol-specific work for each workstream is described by struct
221  * netisr_work.  Each work descriptor consists of an mbuf queue and
222  * statistics.
223  */
224 struct netisr_work {
225 	/*
226 	 * Packet queue, linked by m_nextpkt.
227 	 */
228 	struct mbuf	*nw_head;
229 	struct mbuf	*nw_tail;
230 	u_int		 nw_len;
231 	u_int		 nw_qlimit;
232 	u_int		 nw_watermark;
233 
234 	/*
235 	 * Statistics -- written unlocked, but mostly from curcpu.
236 	 */
237 	u_int64_t	 nw_dispatched; /* Number of direct dispatches. */
238 	u_int64_t	 nw_hybrid_dispatched; /* "" hybrid dispatches. */
239 	u_int64_t	 nw_qdrops;	/* "" drops. */
240 	u_int64_t	 nw_queued;	/* "" enqueues. */
241 	u_int64_t	 nw_handled;	/* "" handled in worker. */
242 };
243 
244 /*
245  * Workstreams hold a queue of ordered work across each protocol, and are
246  * described by netisr_workstream.  Each workstream is associated with a
247  * worker thread, which in turn is pinned to a CPU.  Work associated with a
248  * workstream can be processd in other threads during direct dispatch;
249  * concurrent processing is prevented by the NWS_RUNNING flag, which
250  * indicates that a thread is already processing the work queue.  It is
251  * important to prevent a directly dispatched packet from "skipping ahead" of
252  * work already in the workstream queue.
253  */
254 struct netisr_workstream {
255 	struct intr_event *nws_intr_event;	/* Handler for stream. */
256 	void		*nws_swi_cookie;	/* swi(9) cookie for stream. */
257 	struct mtx	 nws_mtx;		/* Synchronize work. */
258 	u_int		 nws_cpu;		/* CPU pinning. */
259 	u_int		 nws_flags;		/* Wakeup flags. */
260 	u_int		 nws_pendingbits;	/* Scheduled protocols. */
261 
262 	/*
263 	 * Each protocol has per-workstream data.
264 	 */
265 	struct netisr_work	nws_work[NETISR_MAXPROT];
266 } __aligned(CACHE_LINE_SIZE);
267 
268 /*
269  * Per-CPU workstream data.
270  */
271 DPCPU_DEFINE(struct netisr_workstream, nws);
272 
273 /*
274  * Map contiguous values between 0 and nws_count into CPU IDs appropriate for
275  * accessing workstreams.  This allows constructions of the form
276  * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws).
277  */
278 static u_int				 nws_array[MAXCPU];
279 
280 /*
281  * Number of registered workstreams.  Will be at most the number of running
282  * CPUs once fully started.
283  */
284 static u_int				 nws_count;
285 SYSCTL_INT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD,
286     &nws_count, 0, "Number of extant netisr threads.");
287 
288 /*
289  * Per-workstream flags.
290  */
291 #define	NWS_RUNNING	0x00000001	/* Currently running in a thread. */
292 #define	NWS_DISPATCHING	0x00000002	/* Currently being direct-dispatched. */
293 #define	NWS_SCHEDULED	0x00000004	/* Signal issued. */
294 
295 /*
296  * Synchronization for each workstream: a mutex protects all mutable fields
297  * in each stream, including per-protocol state (mbuf queues).  The SWI is
298  * woken up if asynchronous dispatch is required.
299  */
300 #define	NWS_LOCK(s)		mtx_lock(&(s)->nws_mtx)
301 #define	NWS_LOCK_ASSERT(s)	mtx_assert(&(s)->nws_mtx, MA_OWNED)
302 #define	NWS_UNLOCK(s)		mtx_unlock(&(s)->nws_mtx)
303 #define	NWS_SIGNAL(s)		swi_sched((s)->nws_swi_cookie, 0)
304 
305 /*
306  * Utility routines for protocols that implement their own mapping of flows
307  * to CPUs.
308  */
309 u_int
310 netisr_get_cpucount(void)
311 {
312 
313 	return (nws_count);
314 }
315 
316 u_int
317 netisr_get_cpuid(u_int cpunumber)
318 {
319 
320 	KASSERT(cpunumber < nws_count, ("%s: %u > %u", __func__, cpunumber,
321 	    nws_count));
322 
323 	return (nws_array[cpunumber]);
324 }
325 
326 /*
327  * The default implementation of flow -> CPU ID mapping.
328  *
329  * Non-static so that protocols can use it to map their own work to specific
330  * CPUs in a manner consistent to netisr for affinity purposes.
331  */
332 u_int
333 netisr_default_flow2cpu(u_int flowid)
334 {
335 
336 	return (nws_array[flowid % nws_count]);
337 }
338 
339 /*
340  * Register a new netisr handler, which requires initializing per-protocol
341  * fields for each workstream.  All netisr work is briefly suspended while
342  * the protocol is installed.
343  */
344 void
345 netisr_register(const struct netisr_handler *nhp)
346 {
347 	struct netisr_work *npwp;
348 	const char *name;
349 	u_int i, proto;
350 
351 	proto = nhp->nh_proto;
352 	name = nhp->nh_name;
353 
354 	/*
355 	 * Test that the requested registration is valid.
356 	 */
357 	KASSERT(nhp->nh_name != NULL,
358 	    ("%s: nh_name NULL for %u", __func__, proto));
359 	KASSERT(nhp->nh_handler != NULL,
360 	    ("%s: nh_handler NULL for %s", __func__, name));
361 	KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE ||
362 	    nhp->nh_policy == NETISR_POLICY_FLOW ||
363 	    nhp->nh_policy == NETISR_POLICY_CPU,
364 	    ("%s: unsupported nh_policy %u for %s", __func__,
365 	    nhp->nh_policy, name));
366 	KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW ||
367 	    nhp->nh_m2flow == NULL,
368 	    ("%s: nh_policy != FLOW but m2flow defined for %s", __func__,
369 	    name));
370 	KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL,
371 	    ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__,
372 	    name));
373 	KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL,
374 	    ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__,
375 	    name));
376 	KASSERT(proto < NETISR_MAXPROT,
377 	    ("%s(%u, %s): protocol too big", __func__, proto, name));
378 
379 	/*
380 	 * Test that no existing registration exists for this protocol.
381 	 */
382 	NETISR_WLOCK();
383 	KASSERT(np[proto].np_name == NULL,
384 	    ("%s(%u, %s): name present", __func__, proto, name));
385 	KASSERT(np[proto].np_handler == NULL,
386 	    ("%s(%u, %s): handler present", __func__, proto, name));
387 
388 	np[proto].np_name = name;
389 	np[proto].np_handler = nhp->nh_handler;
390 	np[proto].np_m2flow = nhp->nh_m2flow;
391 	np[proto].np_m2cpuid = nhp->nh_m2cpuid;
392 	np[proto].np_drainedcpu = nhp->nh_drainedcpu;
393 	if (nhp->nh_qlimit == 0)
394 		np[proto].np_qlimit = netisr_defaultqlimit;
395 	else if (nhp->nh_qlimit > netisr_maxqlimit) {
396 		printf("%s: %s requested queue limit %u capped to "
397 		    "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit,
398 		    netisr_maxqlimit);
399 		np[proto].np_qlimit = netisr_maxqlimit;
400 	} else
401 		np[proto].np_qlimit = nhp->nh_qlimit;
402 	np[proto].np_policy = nhp->nh_policy;
403 	for (i = 0; i <= mp_maxid; i++) {
404 		if (CPU_ABSENT(i))
405 			continue;
406 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
407 		bzero(npwp, sizeof(*npwp));
408 		npwp->nw_qlimit = np[proto].np_qlimit;
409 	}
410 	NETISR_WUNLOCK();
411 }
412 
413 /*
414  * Clear drop counters across all workstreams for a protocol.
415  */
416 void
417 netisr_clearqdrops(const struct netisr_handler *nhp)
418 {
419 	struct netisr_work *npwp;
420 #ifdef INVARIANTS
421 	const char *name;
422 #endif
423 	u_int i, proto;
424 
425 	proto = nhp->nh_proto;
426 #ifdef INVARIANTS
427 	name = nhp->nh_name;
428 #endif
429 	KASSERT(proto < NETISR_MAXPROT,
430 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
431 
432 	NETISR_WLOCK();
433 	KASSERT(np[proto].np_handler != NULL,
434 	    ("%s(%u): protocol not registered for %s", __func__, proto,
435 	    name));
436 
437 	for (i = 0; i <= mp_maxid; i++) {
438 		if (CPU_ABSENT(i))
439 			continue;
440 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
441 		npwp->nw_qdrops = 0;
442 	}
443 	NETISR_WUNLOCK();
444 }
445 
446 /*
447  * Query current drop counters across all workstreams for a protocol.
448  */
449 void
450 netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp)
451 {
452 	struct netisr_work *npwp;
453 	struct rm_priotracker tracker;
454 #ifdef INVARIANTS
455 	const char *name;
456 #endif
457 	u_int i, proto;
458 
459 	*qdropp = 0;
460 	proto = nhp->nh_proto;
461 #ifdef INVARIANTS
462 	name = nhp->nh_name;
463 #endif
464 	KASSERT(proto < NETISR_MAXPROT,
465 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
466 
467 	NETISR_RLOCK(&tracker);
468 	KASSERT(np[proto].np_handler != NULL,
469 	    ("%s(%u): protocol not registered for %s", __func__, proto,
470 	    name));
471 
472 	for (i = 0; i <= mp_maxid; i++) {
473 		if (CPU_ABSENT(i))
474 			continue;
475 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
476 		*qdropp += npwp->nw_qdrops;
477 	}
478 	NETISR_RUNLOCK(&tracker);
479 }
480 
481 /*
482  * Query current per-workstream queue limit for a protocol.
483  */
484 void
485 netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp)
486 {
487 	struct rm_priotracker tracker;
488 #ifdef INVARIANTS
489 	const char *name;
490 #endif
491 	u_int proto;
492 
493 	proto = nhp->nh_proto;
494 #ifdef INVARIANTS
495 	name = nhp->nh_name;
496 #endif
497 	KASSERT(proto < NETISR_MAXPROT,
498 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
499 
500 	NETISR_RLOCK(&tracker);
501 	KASSERT(np[proto].np_handler != NULL,
502 	    ("%s(%u): protocol not registered for %s", __func__, proto,
503 	    name));
504 	*qlimitp = np[proto].np_qlimit;
505 	NETISR_RUNLOCK(&tracker);
506 }
507 
508 /*
509  * Update the queue limit across per-workstream queues for a protocol.  We
510  * simply change the limits, and don't drain overflowed packets as they will
511  * (hopefully) take care of themselves shortly.
512  */
513 int
514 netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit)
515 {
516 	struct netisr_work *npwp;
517 #ifdef INVARIANTS
518 	const char *name;
519 #endif
520 	u_int i, proto;
521 
522 	if (qlimit > netisr_maxqlimit)
523 		return (EINVAL);
524 
525 	proto = nhp->nh_proto;
526 #ifdef INVARIANTS
527 	name = nhp->nh_name;
528 #endif
529 	KASSERT(proto < NETISR_MAXPROT,
530 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
531 
532 	NETISR_WLOCK();
533 	KASSERT(np[proto].np_handler != NULL,
534 	    ("%s(%u): protocol not registered for %s", __func__, proto,
535 	    name));
536 
537 	np[proto].np_qlimit = qlimit;
538 	for (i = 0; i <= mp_maxid; i++) {
539 		if (CPU_ABSENT(i))
540 			continue;
541 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
542 		npwp->nw_qlimit = qlimit;
543 	}
544 	NETISR_WUNLOCK();
545 	return (0);
546 }
547 
548 /*
549  * Drain all packets currently held in a particular protocol work queue.
550  */
551 static void
552 netisr_drain_proto(struct netisr_work *npwp)
553 {
554 	struct mbuf *m;
555 
556 	/*
557 	 * We would assert the lock on the workstream but it's not passed in.
558 	 */
559 	while ((m = npwp->nw_head) != NULL) {
560 		npwp->nw_head = m->m_nextpkt;
561 		m->m_nextpkt = NULL;
562 		if (npwp->nw_head == NULL)
563 			npwp->nw_tail = NULL;
564 		npwp->nw_len--;
565 		m_freem(m);
566 	}
567 	KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__));
568 	KASSERT(npwp->nw_len == 0, ("%s: len", __func__));
569 }
570 
571 /*
572  * Remove the registration of a network protocol, which requires clearing
573  * per-protocol fields across all workstreams, including freeing all mbufs in
574  * the queues at time of unregister.  All work in netisr is briefly suspended
575  * while this takes place.
576  */
577 void
578 netisr_unregister(const struct netisr_handler *nhp)
579 {
580 	struct netisr_work *npwp;
581 #ifdef INVARIANTS
582 	const char *name;
583 #endif
584 	u_int i, proto;
585 
586 	proto = nhp->nh_proto;
587 #ifdef INVARIANTS
588 	name = nhp->nh_name;
589 #endif
590 	KASSERT(proto < NETISR_MAXPROT,
591 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
592 
593 	NETISR_WLOCK();
594 	KASSERT(np[proto].np_handler != NULL,
595 	    ("%s(%u): protocol not registered for %s", __func__, proto,
596 	    name));
597 
598 	np[proto].np_name = NULL;
599 	np[proto].np_handler = NULL;
600 	np[proto].np_m2flow = NULL;
601 	np[proto].np_m2cpuid = NULL;
602 	np[proto].np_qlimit = 0;
603 	np[proto].np_policy = 0;
604 	for (i = 0; i <= mp_maxid; i++) {
605 		if (CPU_ABSENT(i))
606 			continue;
607 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
608 		netisr_drain_proto(npwp);
609 		bzero(npwp, sizeof(*npwp));
610 	}
611 	NETISR_WUNLOCK();
612 }
613 
614 /*
615  * Look up the workstream given a packet and source identifier.  Do this by
616  * checking the protocol's policy, and optionally call out to the protocol
617  * for assistance if required.
618  */
619 static struct mbuf *
620 netisr_select_cpuid(struct netisr_proto *npp, uintptr_t source,
621     struct mbuf *m, u_int *cpuidp)
622 {
623 	struct ifnet *ifp;
624 
625 	NETISR_LOCK_ASSERT();
626 
627 	/*
628 	 * In the event we have only one worker, shortcut and deliver to it
629 	 * without further ado.
630 	 */
631 	if (nws_count == 1) {
632 		*cpuidp = nws_array[0];
633 		return (m);
634 	}
635 
636 	/*
637 	 * What happens next depends on the policy selected by the protocol.
638 	 * If we want to support per-interface policies, we should do that
639 	 * here first.
640 	 */
641 	switch (npp->np_policy) {
642 	case NETISR_POLICY_CPU:
643 		return (npp->np_m2cpuid(m, source, cpuidp));
644 
645 	case NETISR_POLICY_FLOW:
646 		if (!(m->m_flags & M_FLOWID) && npp->np_m2flow != NULL) {
647 			m = npp->np_m2flow(m, source);
648 			if (m == NULL)
649 				return (NULL);
650 		}
651 		if (m->m_flags & M_FLOWID) {
652 			*cpuidp =
653 			    netisr_default_flow2cpu(m->m_pkthdr.flowid);
654 			return (m);
655 		}
656 		/* FALLTHROUGH */
657 
658 	case NETISR_POLICY_SOURCE:
659 		ifp = m->m_pkthdr.rcvif;
660 		if (ifp != NULL)
661 			*cpuidp = nws_array[(ifp->if_index + source) %
662 			    nws_count];
663 		else
664 			*cpuidp = nws_array[source % nws_count];
665 		return (m);
666 
667 	default:
668 		panic("%s: invalid policy %u for %s", __func__,
669 		    npp->np_policy, npp->np_name);
670 	}
671 }
672 
673 /*
674  * Process packets associated with a workstream and protocol.  For reasons of
675  * fairness, we process up to one complete netisr queue at a time, moving the
676  * queue to a stack-local queue for processing, but do not loop refreshing
677  * from the global queue.  The caller is responsible for deciding whether to
678  * loop, and for setting the NWS_RUNNING flag.  The passed workstream will be
679  * locked on entry and relocked before return, but will be released while
680  * processing.  The number of packets processed is returned.
681  */
682 static u_int
683 netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto)
684 {
685 	struct netisr_work local_npw, *npwp;
686 	u_int handled;
687 	struct mbuf *m;
688 
689 	NETISR_LOCK_ASSERT();
690 	NWS_LOCK_ASSERT(nwsp);
691 
692 	KASSERT(nwsp->nws_flags & NWS_RUNNING,
693 	    ("%s(%u): not running", __func__, proto));
694 	KASSERT(proto >= 0 && proto < NETISR_MAXPROT,
695 	    ("%s(%u): invalid proto\n", __func__, proto));
696 
697 	npwp = &nwsp->nws_work[proto];
698 	if (npwp->nw_len == 0)
699 		return (0);
700 
701 	/*
702 	 * Move the global work queue to a thread-local work queue.
703 	 *
704 	 * Notice that this means the effective maximum length of the queue
705 	 * is actually twice that of the maximum queue length specified in
706 	 * the protocol registration call.
707 	 */
708 	handled = npwp->nw_len;
709 	local_npw = *npwp;
710 	npwp->nw_head = NULL;
711 	npwp->nw_tail = NULL;
712 	npwp->nw_len = 0;
713 	nwsp->nws_pendingbits &= ~(1 << proto);
714 	NWS_UNLOCK(nwsp);
715 	while ((m = local_npw.nw_head) != NULL) {
716 		local_npw.nw_head = m->m_nextpkt;
717 		m->m_nextpkt = NULL;
718 		if (local_npw.nw_head == NULL)
719 			local_npw.nw_tail = NULL;
720 		local_npw.nw_len--;
721 		VNET_ASSERT(m->m_pkthdr.rcvif != NULL);
722 		CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
723 		np[proto].np_handler(m);
724 		CURVNET_RESTORE();
725 	}
726 	KASSERT(local_npw.nw_len == 0,
727 	    ("%s(%u): len %u", __func__, proto, local_npw.nw_len));
728 	if (np[proto].np_drainedcpu)
729 		np[proto].np_drainedcpu(nwsp->nws_cpu);
730 	NWS_LOCK(nwsp);
731 	npwp->nw_handled += handled;
732 	return (handled);
733 }
734 
735 /*
736  * SWI handler for netisr -- processes packets in a set of workstreams that
737  * it owns, woken up by calls to NWS_SIGNAL().  If this workstream is already
738  * being direct dispatched, go back to sleep and wait for the dispatching
739  * thread to wake us up again.
740  */
741 static void
742 swi_net(void *arg)
743 {
744 #ifdef NETISR_LOCKING
745 	struct rm_priotracker tracker;
746 #endif
747 	struct netisr_workstream *nwsp;
748 	u_int bits, prot;
749 
750 	nwsp = arg;
751 
752 #ifdef DEVICE_POLLING
753 	KASSERT(nws_count == 1,
754 	    ("%s: device_polling but nws_count != 1", __func__));
755 	netisr_poll();
756 #endif
757 #ifdef NETISR_LOCKING
758 	NETISR_RLOCK(&tracker);
759 #endif
760 	NWS_LOCK(nwsp);
761 	KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running"));
762 	if (nwsp->nws_flags & NWS_DISPATCHING)
763 		goto out;
764 	nwsp->nws_flags |= NWS_RUNNING;
765 	nwsp->nws_flags &= ~NWS_SCHEDULED;
766 	while ((bits = nwsp->nws_pendingbits) != 0) {
767 		while ((prot = ffs(bits)) != 0) {
768 			prot--;
769 			bits &= ~(1 << prot);
770 			(void)netisr_process_workstream_proto(nwsp, prot);
771 		}
772 	}
773 	nwsp->nws_flags &= ~NWS_RUNNING;
774 out:
775 	NWS_UNLOCK(nwsp);
776 #ifdef NETISR_LOCKING
777 	NETISR_RUNLOCK(&tracker);
778 #endif
779 #ifdef DEVICE_POLLING
780 	netisr_pollmore();
781 #endif
782 }
783 
784 static int
785 netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto,
786     struct netisr_work *npwp, struct mbuf *m, int *dosignalp)
787 {
788 
789 	NWS_LOCK_ASSERT(nwsp);
790 
791 	*dosignalp = 0;
792 	if (npwp->nw_len < npwp->nw_qlimit) {
793 		m->m_nextpkt = NULL;
794 		if (npwp->nw_head == NULL) {
795 			npwp->nw_head = m;
796 			npwp->nw_tail = m;
797 		} else {
798 			npwp->nw_tail->m_nextpkt = m;
799 			npwp->nw_tail = m;
800 		}
801 		npwp->nw_len++;
802 		if (npwp->nw_len > npwp->nw_watermark)
803 			npwp->nw_watermark = npwp->nw_len;
804 
805 		/*
806 		 * We must set the bit regardless of NWS_RUNNING, so that
807 		 * swi_net() keeps calling netisr_process_workstream_proto().
808 		 */
809 		nwsp->nws_pendingbits |= (1 << proto);
810 		if (!(nwsp->nws_flags &
811 		    (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) {
812 			nwsp->nws_flags |= NWS_SCHEDULED;
813 			*dosignalp = 1;	/* Defer until unlocked. */
814 		}
815 		npwp->nw_queued++;
816 		return (0);
817 	} else {
818 		m_freem(m);
819 		npwp->nw_qdrops++;
820 		return (ENOBUFS);
821 	}
822 }
823 
824 static int
825 netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid)
826 {
827 	struct netisr_workstream *nwsp;
828 	struct netisr_work *npwp;
829 	int dosignal, error;
830 
831 #ifdef NETISR_LOCKING
832 	NETISR_LOCK_ASSERT();
833 #endif
834 	KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__,
835 	    cpuid, mp_maxid));
836 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
837 
838 	dosignal = 0;
839 	error = 0;
840 	nwsp = DPCPU_ID_PTR(cpuid, nws);
841 	npwp = &nwsp->nws_work[proto];
842 	NWS_LOCK(nwsp);
843 	error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal);
844 	NWS_UNLOCK(nwsp);
845 	if (dosignal)
846 		NWS_SIGNAL(nwsp);
847 	return (error);
848 }
849 
850 int
851 netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m)
852 {
853 #ifdef NETISR_LOCKING
854 	struct rm_priotracker tracker;
855 #endif
856 	u_int cpuid;
857 	int error;
858 
859 	KASSERT(proto < NETISR_MAXPROT,
860 	    ("%s: invalid proto %u", __func__, proto));
861 
862 #ifdef NETISR_LOCKING
863 	NETISR_RLOCK(&tracker);
864 #endif
865 	KASSERT(np[proto].np_handler != NULL,
866 	    ("%s: invalid proto %u", __func__, proto));
867 
868 	m = netisr_select_cpuid(&np[proto], source, m, &cpuid);
869 	if (m != NULL) {
870 		KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__,
871 		    cpuid));
872 		error = netisr_queue_internal(proto, m, cpuid);
873 	} else
874 		error = ENOBUFS;
875 #ifdef NETISR_LOCKING
876 	NETISR_RUNLOCK(&tracker);
877 #endif
878 	return (error);
879 }
880 
881 int
882 netisr_queue(u_int proto, struct mbuf *m)
883 {
884 
885 	return (netisr_queue_src(proto, 0, m));
886 }
887 
888 /*
889  * Dispatch a packet for netisr processing; direct dispatch is permitted by
890  * calling context.
891  */
892 int
893 netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m)
894 {
895 #ifdef NETISR_LOCKING
896 	struct rm_priotracker tracker;
897 #endif
898 	struct netisr_workstream *nwsp;
899 	struct netisr_work *npwp;
900 	int dosignal, error;
901 	u_int cpuid;
902 
903 	/*
904 	 * If direct dispatch is entirely disabled, fall back on queueing.
905 	 */
906 	if (!netisr_direct)
907 		return (netisr_queue_src(proto, source, m));
908 
909 	KASSERT(proto < NETISR_MAXPROT,
910 	    ("%s: invalid proto %u", __func__, proto));
911 #ifdef NETISR_LOCKING
912 	NETISR_RLOCK(&tracker);
913 #endif
914 	KASSERT(np[proto].np_handler != NULL,
915 	    ("%s: invalid proto %u", __func__, proto));
916 
917 	/*
918 	 * If direct dispatch is forced, then unconditionally dispatch
919 	 * without a formal CPU selection.  Borrow the current CPU's stats,
920 	 * even if there's no worker on it.  In this case we don't update
921 	 * nws_flags because all netisr processing will be source ordered due
922 	 * to always being forced to directly dispatch.
923 	 */
924 	if (netisr_direct_force) {
925 		nwsp = DPCPU_PTR(nws);
926 		npwp = &nwsp->nws_work[proto];
927 		npwp->nw_dispatched++;
928 		npwp->nw_handled++;
929 		np[proto].np_handler(m);
930 		error = 0;
931 		goto out_unlock;
932 	}
933 
934 	/*
935 	 * Otherwise, we execute in a hybrid mode where we will try to direct
936 	 * dispatch if we're on the right CPU and the netisr worker isn't
937 	 * already running.
938 	 */
939 	m = netisr_select_cpuid(&np[proto], source, m, &cpuid);
940 	if (m == NULL) {
941 		error = ENOBUFS;
942 		goto out_unlock;
943 	}
944 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
945 	sched_pin();
946 	if (cpuid != curcpu)
947 		goto queue_fallback;
948 	nwsp = DPCPU_PTR(nws);
949 	npwp = &nwsp->nws_work[proto];
950 
951 	/*-
952 	 * We are willing to direct dispatch only if three conditions hold:
953 	 *
954 	 * (1) The netisr worker isn't already running,
955 	 * (2) Another thread isn't already directly dispatching, and
956 	 * (3) The netisr hasn't already been woken up.
957 	 */
958 	NWS_LOCK(nwsp);
959 	if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) {
960 		error = netisr_queue_workstream(nwsp, proto, npwp, m,
961 		    &dosignal);
962 		NWS_UNLOCK(nwsp);
963 		if (dosignal)
964 			NWS_SIGNAL(nwsp);
965 		goto out_unpin;
966 	}
967 
968 	/*
969 	 * The current thread is now effectively the netisr worker, so set
970 	 * the dispatching flag to prevent concurrent processing of the
971 	 * stream from another thread (even the netisr worker), which could
972 	 * otherwise lead to effective misordering of the stream.
973 	 */
974 	nwsp->nws_flags |= NWS_DISPATCHING;
975 	NWS_UNLOCK(nwsp);
976 	np[proto].np_handler(m);
977 	NWS_LOCK(nwsp);
978 	nwsp->nws_flags &= ~NWS_DISPATCHING;
979 	npwp->nw_handled++;
980 	npwp->nw_hybrid_dispatched++;
981 
982 	/*
983 	 * If other work was enqueued by another thread while we were direct
984 	 * dispatching, we need to signal the netisr worker to do that work.
985 	 * In the future, we might want to do some of that work in the
986 	 * current thread, rather than trigger further context switches.  If
987 	 * so, we'll want to establish a reasonable bound on the work done in
988 	 * the "borrowed" context.
989 	 */
990 	if (nwsp->nws_pendingbits != 0) {
991 		nwsp->nws_flags |= NWS_SCHEDULED;
992 		dosignal = 1;
993 	} else
994 		dosignal = 0;
995 	NWS_UNLOCK(nwsp);
996 	if (dosignal)
997 		NWS_SIGNAL(nwsp);
998 	error = 0;
999 	goto out_unpin;
1000 
1001 queue_fallback:
1002 	error = netisr_queue_internal(proto, m, cpuid);
1003 out_unpin:
1004 	sched_unpin();
1005 out_unlock:
1006 #ifdef NETISR_LOCKING
1007 	NETISR_RUNLOCK(&tracker);
1008 #endif
1009 	return (error);
1010 }
1011 
1012 int
1013 netisr_dispatch(u_int proto, struct mbuf *m)
1014 {
1015 
1016 	return (netisr_dispatch_src(proto, 0, m));
1017 }
1018 
1019 #ifdef DEVICE_POLLING
1020 /*
1021  * Kernel polling borrows a netisr thread to run interface polling in; this
1022  * function allows kernel polling to request that the netisr thread be
1023  * scheduled even if no packets are pending for protocols.
1024  */
1025 void
1026 netisr_sched_poll(void)
1027 {
1028 	struct netisr_workstream *nwsp;
1029 
1030 	nwsp = DPCPU_ID_PTR(nws_array[0], nws);
1031 	NWS_SIGNAL(nwsp);
1032 }
1033 #endif
1034 
1035 static void
1036 netisr_start_swi(u_int cpuid, struct pcpu *pc)
1037 {
1038 	char swiname[12];
1039 	struct netisr_workstream *nwsp;
1040 	int error;
1041 
1042 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
1043 
1044 	nwsp = DPCPU_ID_PTR(cpuid, nws);
1045 	mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF);
1046 	nwsp->nws_cpu = cpuid;
1047 	snprintf(swiname, sizeof(swiname), "netisr %u", cpuid);
1048 	error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp,
1049 	    SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie);
1050 	if (error)
1051 		panic("%s: swi_add %d", __func__, error);
1052 	pc->pc_netisr = nwsp->nws_intr_event;
1053 	if (netisr_bindthreads) {
1054 		error = intr_event_bind(nwsp->nws_intr_event, cpuid);
1055 		if (error != 0)
1056 			printf("%s: cpu %u: intr_event_bind: %d", __func__,
1057 			    cpuid, error);
1058 	}
1059 	NETISR_WLOCK();
1060 	nws_array[nws_count] = nwsp->nws_cpu;
1061 	nws_count++;
1062 	NETISR_WUNLOCK();
1063 }
1064 
1065 /*
1066  * Initialize the netisr subsystem.  We rely on BSS and static initialization
1067  * of most fields in global data structures.
1068  *
1069  * Start a worker thread for the boot CPU so that we can support network
1070  * traffic immediately in case the network stack is used before additional
1071  * CPUs are started (for example, diskless boot).
1072  */
1073 static void
1074 netisr_init(void *arg)
1075 {
1076 
1077 	KASSERT(curcpu == 0, ("%s: not on CPU 0", __func__));
1078 
1079 	NETISR_LOCK_INIT();
1080 	if (netisr_maxthreads < 1)
1081 		netisr_maxthreads = 1;
1082 	if (netisr_maxthreads > mp_ncpus) {
1083 		printf("netisr_init: forcing maxthreads from %d to %d\n",
1084 		    netisr_maxthreads, mp_ncpus);
1085 		netisr_maxthreads = mp_ncpus;
1086 	}
1087 	if (netisr_defaultqlimit > netisr_maxqlimit) {
1088 		printf("netisr_init: forcing defaultqlimit from %d to %d\n",
1089 		    netisr_defaultqlimit, netisr_maxqlimit);
1090 		netisr_defaultqlimit = netisr_maxqlimit;
1091 	}
1092 #ifdef DEVICE_POLLING
1093 	/*
1094 	 * The device polling code is not yet aware of how to deal with
1095 	 * multiple netisr threads, so for the time being compiling in device
1096 	 * polling disables parallel netisr workers.
1097 	 */
1098 	if (netisr_maxthreads != 1 || netisr_bindthreads != 0) {
1099 		printf("netisr_init: forcing maxthreads to 1 and "
1100 		    "bindthreads to 0 for device polling\n");
1101 		netisr_maxthreads = 1;
1102 		netisr_bindthreads = 0;
1103 	}
1104 #endif
1105 
1106 	netisr_start_swi(curcpu, pcpu_find(curcpu));
1107 }
1108 SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL);
1109 
1110 /*
1111  * Start worker threads for additional CPUs.  No attempt to gracefully handle
1112  * work reassignment, we don't yet support dynamic reconfiguration.
1113  */
1114 static void
1115 netisr_start(void *arg)
1116 {
1117 	struct pcpu *pc;
1118 
1119 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
1120 		if (nws_count >= netisr_maxthreads)
1121 			break;
1122 		/* XXXRW: Is skipping absent CPUs still required here? */
1123 		if (CPU_ABSENT(pc->pc_cpuid))
1124 			continue;
1125 		/* Worker will already be present for boot CPU. */
1126 		if (pc->pc_netisr != NULL)
1127 			continue;
1128 		netisr_start_swi(pc->pc_cpuid, pc);
1129 	}
1130 }
1131 SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL);
1132 
1133 /*
1134  * Sysctl monitoring for netisr: query a list of registered protocols.
1135  */
1136 static int
1137 sysctl_netisr_proto(SYSCTL_HANDLER_ARGS)
1138 {
1139 	struct rm_priotracker tracker;
1140 	struct sysctl_netisr_proto *snpp, *snp_array;
1141 	struct netisr_proto *npp;
1142 	u_int counter, proto;
1143 	int error;
1144 
1145 	if (req->newptr != NULL)
1146 		return (EINVAL);
1147 	snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP,
1148 	    M_ZERO | M_WAITOK);
1149 	counter = 0;
1150 	NETISR_RLOCK(&tracker);
1151 	for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1152 		npp = &np[proto];
1153 		if (npp->np_name == NULL)
1154 			continue;
1155 		snpp = &snp_array[counter];
1156 		snpp->snp_version = sizeof(*snpp);
1157 		strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN);
1158 		snpp->snp_proto = proto;
1159 		snpp->snp_qlimit = npp->np_qlimit;
1160 		snpp->snp_policy = npp->np_policy;
1161 		if (npp->np_m2flow != NULL)
1162 			snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW;
1163 		if (npp->np_m2cpuid != NULL)
1164 			snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID;
1165 		if (npp->np_drainedcpu != NULL)
1166 			snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU;
1167 		counter++;
1168 	}
1169 	NETISR_RUNLOCK(&tracker);
1170 	KASSERT(counter <= NETISR_MAXPROT,
1171 	    ("sysctl_netisr_proto: counter too big (%d)", counter));
1172 	error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter);
1173 	free(snp_array, M_TEMP);
1174 	return (error);
1175 }
1176 
1177 SYSCTL_PROC(_net_isr, OID_AUTO, proto,
1178     CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto,
1179     "S,sysctl_netisr_proto",
1180     "Return list of protocols registered with netisr");
1181 
1182 /*
1183  * Sysctl monitoring for netisr: query a list of workstreams.
1184  */
1185 static int
1186 sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS)
1187 {
1188 	struct rm_priotracker tracker;
1189 	struct sysctl_netisr_workstream *snwsp, *snws_array;
1190 	struct netisr_workstream *nwsp;
1191 	u_int counter, cpuid;
1192 	int error;
1193 
1194 	if (req->newptr != NULL)
1195 		return (EINVAL);
1196 	snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP,
1197 	    M_ZERO | M_WAITOK);
1198 	counter = 0;
1199 	NETISR_RLOCK(&tracker);
1200 	for (cpuid = 0; cpuid < MAXCPU; cpuid++) {
1201 		if (CPU_ABSENT(cpuid))
1202 			continue;
1203 		nwsp = DPCPU_ID_PTR(cpuid, nws);
1204 		if (nwsp->nws_intr_event == NULL)
1205 			continue;
1206 		NWS_LOCK(nwsp);
1207 		snwsp = &snws_array[counter];
1208 		snwsp->snws_version = sizeof(*snwsp);
1209 
1210 		/*
1211 		 * For now, we equate workstream IDs and CPU IDs in the
1212 		 * kernel, but expose them independently to userspace in case
1213 		 * that assumption changes in the future.
1214 		 */
1215 		snwsp->snws_wsid = cpuid;
1216 		snwsp->snws_cpu = cpuid;
1217 		if (nwsp->nws_intr_event != NULL)
1218 			snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR;
1219 		NWS_UNLOCK(nwsp);
1220 		counter++;
1221 	}
1222 	NETISR_RUNLOCK(&tracker);
1223 	KASSERT(counter <= MAXCPU,
1224 	    ("sysctl_netisr_workstream: counter too big (%d)", counter));
1225 	error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter);
1226 	free(snws_array, M_TEMP);
1227 	return (error);
1228 }
1229 
1230 SYSCTL_PROC(_net_isr, OID_AUTO, workstream,
1231     CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream,
1232     "S,sysctl_netisr_workstream",
1233     "Return list of workstreams implemented by netisr");
1234 
1235 /*
1236  * Sysctl monitoring for netisr: query per-protocol data across all
1237  * workstreams.
1238  */
1239 static int
1240 sysctl_netisr_work(SYSCTL_HANDLER_ARGS)
1241 {
1242 	struct rm_priotracker tracker;
1243 	struct sysctl_netisr_work *snwp, *snw_array;
1244 	struct netisr_workstream *nwsp;
1245 	struct netisr_proto *npp;
1246 	struct netisr_work *nwp;
1247 	u_int counter, cpuid, proto;
1248 	int error;
1249 
1250 	if (req->newptr != NULL)
1251 		return (EINVAL);
1252 	snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT,
1253 	    M_TEMP, M_ZERO | M_WAITOK);
1254 	counter = 0;
1255 	NETISR_RLOCK(&tracker);
1256 	for (cpuid = 0; cpuid < MAXCPU; cpuid++) {
1257 		if (CPU_ABSENT(cpuid))
1258 			continue;
1259 		nwsp = DPCPU_ID_PTR(cpuid, nws);
1260 		if (nwsp->nws_intr_event == NULL)
1261 			continue;
1262 		NWS_LOCK(nwsp);
1263 		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1264 			npp = &np[proto];
1265 			if (npp->np_name == NULL)
1266 				continue;
1267 			nwp = &nwsp->nws_work[proto];
1268 			snwp = &snw_array[counter];
1269 			snwp->snw_version = sizeof(*snwp);
1270 			snwp->snw_wsid = cpuid;		/* See comment above. */
1271 			snwp->snw_proto = proto;
1272 			snwp->snw_len = nwp->nw_len;
1273 			snwp->snw_watermark = nwp->nw_watermark;
1274 			snwp->snw_dispatched = nwp->nw_dispatched;
1275 			snwp->snw_hybrid_dispatched =
1276 			    nwp->nw_hybrid_dispatched;
1277 			snwp->snw_qdrops = nwp->nw_qdrops;
1278 			snwp->snw_queued = nwp->nw_queued;
1279 			snwp->snw_handled = nwp->nw_handled;
1280 			counter++;
1281 		}
1282 		NWS_UNLOCK(nwsp);
1283 	}
1284 	KASSERT(counter <= MAXCPU * NETISR_MAXPROT,
1285 	    ("sysctl_netisr_work: counter too big (%d)", counter));
1286 	NETISR_RUNLOCK(&tracker);
1287 	error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter);
1288 	free(snw_array, M_TEMP);
1289 	return (error);
1290 }
1291 
1292 SYSCTL_PROC(_net_isr, OID_AUTO, work,
1293     CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work,
1294     "S,sysctl_netisr_work",
1295     "Return list of per-workstream, per-protocol work in netisr");
1296 
1297 #ifdef DDB
1298 DB_SHOW_COMMAND(netisr, db_show_netisr)
1299 {
1300 	struct netisr_workstream *nwsp;
1301 	struct netisr_work *nwp;
1302 	int first, proto;
1303 	u_int cpuid;
1304 
1305 	db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto",
1306 	    "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue");
1307 	for (cpuid = 0; cpuid <= mp_maxid; cpuid++) {
1308 		if (CPU_ABSENT(cpuid))
1309 			continue;
1310 		nwsp = DPCPU_ID_PTR(cpuid, nws);
1311 		if (nwsp->nws_intr_event == NULL)
1312 			continue;
1313 		first = 1;
1314 		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1315 			if (np[proto].np_handler == NULL)
1316 				continue;
1317 			nwp = &nwsp->nws_work[proto];
1318 			if (first) {
1319 				db_printf("%3d ", cpuid);
1320 				first = 0;
1321 			} else
1322 				db_printf("%3s ", "");
1323 			db_printf(
1324 			    "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n",
1325 			    np[proto].np_name, nwp->nw_len,
1326 			    nwp->nw_watermark, nwp->nw_qlimit,
1327 			    nwp->nw_dispatched, nwp->nw_hybrid_dispatched,
1328 			    nwp->nw_qdrops, nwp->nw_queued);
1329 		}
1330 	}
1331 }
1332 #endif
1333