xref: /freebsd/sys/net/netisr.c (revision 7aa383846770374466b1dcb2cefd71bde9acf463)
1 /*-
2  * Copyright (c) 2007-2009 Robert N. M. Watson
3  * Copyright (c) 2010 Juniper Networks, Inc.
4  * All rights reserved.
5  *
6  * This software was developed by Robert N. M. Watson under contract
7  * to Juniper Networks, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * netisr is a packet dispatch service, allowing synchronous (directly
36  * dispatched) and asynchronous (deferred dispatch) processing of packets by
37  * registered protocol handlers.  Callers pass a protocol identifier and
38  * packet to netisr, along with a direct dispatch hint, and work will either
39  * be immediately processed by the registered handler, or passed to a
40  * software interrupt (SWI) thread for deferred dispatch.  Callers will
41  * generally select one or the other based on:
42  *
43  * - Whether directly dispatching a netisr handler lead to code reentrance or
44  *   lock recursion, such as entering the socket code from the socket code.
45  * - Whether directly dispatching a netisr handler lead to recursive
46  *   processing, such as when decapsulating several wrapped layers of tunnel
47  *   information (IPSEC within IPSEC within ...).
48  *
49  * Maintaining ordering for protocol streams is a critical design concern.
50  * Enforcing ordering limits the opportunity for concurrency, but maintains
51  * the strong ordering requirements found in some protocols, such as TCP.  Of
52  * related concern is CPU affinity--it is desirable to process all data
53  * associated with a particular stream on the same CPU over time in order to
54  * avoid acquiring locks associated with the connection on different CPUs,
55  * keep connection data in one cache, and to generally encourage associated
56  * user threads to live on the same CPU as the stream.  It's also desirable
57  * to avoid lock migration and contention where locks are associated with
58  * more than one flow.
59  *
60  * netisr supports several policy variations, represented by the
61  * NETISR_POLICY_* constants, allowing protocols to play various roles in
62  * identifying flows, assigning work to CPUs, etc.  These are described in
63  * netisr.h.
64  */
65 
66 #include "opt_ddb.h"
67 #include "opt_device_polling.h"
68 
69 #include <sys/param.h>
70 #include <sys/bus.h>
71 #include <sys/kernel.h>
72 #include <sys/kthread.h>
73 #include <sys/interrupt.h>
74 #include <sys/lock.h>
75 #include <sys/mbuf.h>
76 #include <sys/mutex.h>
77 #include <sys/pcpu.h>
78 #include <sys/proc.h>
79 #include <sys/rmlock.h>
80 #include <sys/sched.h>
81 #include <sys/smp.h>
82 #include <sys/socket.h>
83 #include <sys/sysctl.h>
84 #include <sys/systm.h>
85 
86 #ifdef DDB
87 #include <ddb/ddb.h>
88 #endif
89 
90 #define	_WANT_NETISR_INTERNAL	/* Enable definitions from netisr_internal.h */
91 #include <net/if.h>
92 #include <net/if_var.h>
93 #include <net/netisr.h>
94 #include <net/netisr_internal.h>
95 #include <net/vnet.h>
96 
97 /*-
98  * Synchronize use and modification of the registered netisr data structures;
99  * acquire a read lock while modifying the set of registered protocols to
100  * prevent partially registered or unregistered protocols from being run.
101  *
102  * The following data structures and fields are protected by this lock:
103  *
104  * - The netisr_proto array, including all fields of struct netisr_proto.
105  * - The nws array, including all fields of struct netisr_worker.
106  * - The nws_array array.
107  *
108  * Note: the NETISR_LOCKING define controls whether read locks are acquired
109  * in packet processing paths requiring netisr registration stability.  This
110  * is disabled by default as it can lead to measurable performance
111  * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and
112  * because netisr registration and unregistration is extremely rare at
113  * runtime.  If it becomes more common, this decision should be revisited.
114  *
115  * XXXRW: rmlocks don't support assertions.
116  */
117 static struct rmlock	netisr_rmlock;
118 #define	NETISR_LOCK_INIT()	rm_init_flags(&netisr_rmlock, "netisr", \
119 				    RM_NOWITNESS)
120 #define	NETISR_LOCK_ASSERT()
121 #define	NETISR_RLOCK(tracker)	rm_rlock(&netisr_rmlock, (tracker))
122 #define	NETISR_RUNLOCK(tracker)	rm_runlock(&netisr_rmlock, (tracker))
123 #define	NETISR_WLOCK()		rm_wlock(&netisr_rmlock)
124 #define	NETISR_WUNLOCK()	rm_wunlock(&netisr_rmlock)
125 /* #define	NETISR_LOCKING */
126 
127 SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr");
128 
129 /*-
130  * Three direct dispatch policies are supported:
131  *
132  * - Always defer: all work is scheduled for a netisr, regardless of context.
133  *   (!direct)
134  *
135  * - Hybrid: if the executing context allows direct dispatch, and we're
136  *   running on the CPU the work would be done on, then direct dispatch if it
137  *   wouldn't violate ordering constraints on the workstream.
138  *   (direct && !direct_force)
139  *
140  * - Always direct: if the executing context allows direct dispatch, always
141  *   direct dispatch.  (direct && direct_force)
142  *
143  * Notice that changing the global policy could lead to short periods of
144  * misordered processing, but this is considered acceptable as compared to
145  * the complexity of enforcing ordering during policy changes.
146  */
147 static int	netisr_direct_force = 1;	/* Always direct dispatch. */
148 TUNABLE_INT("net.isr.direct_force", &netisr_direct_force);
149 SYSCTL_INT(_net_isr, OID_AUTO, direct_force, CTLFLAG_RW,
150     &netisr_direct_force, 0, "Force direct dispatch");
151 
152 static int	netisr_direct = 1;	/* Enable direct dispatch. */
153 TUNABLE_INT("net.isr.direct", &netisr_direct);
154 SYSCTL_INT(_net_isr, OID_AUTO, direct, CTLFLAG_RW,
155     &netisr_direct, 0, "Enable direct dispatch");
156 
157 /*
158  * Allow the administrator to limit the number of threads (CPUs) to use for
159  * netisr.  We don't check netisr_maxthreads before creating the thread for
160  * CPU 0, so in practice we ignore values <= 1.  This must be set at boot.
161  * We will create at most one thread per CPU.
162  */
163 static int	netisr_maxthreads = -1;		/* Max number of threads. */
164 TUNABLE_INT("net.isr.maxthreads", &netisr_maxthreads);
165 SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN,
166     &netisr_maxthreads, 0,
167     "Use at most this many CPUs for netisr processing");
168 
169 static int	netisr_bindthreads = 0;		/* Bind threads to CPUs. */
170 TUNABLE_INT("net.isr.bindthreads", &netisr_bindthreads);
171 SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN,
172     &netisr_bindthreads, 0, "Bind netisr threads to CPUs.");
173 
174 /*
175  * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit,
176  * both for initial configuration and later modification using
177  * netisr_setqlimit().
178  */
179 #define	NETISR_DEFAULT_MAXQLIMIT	10240
180 static u_int	netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT;
181 TUNABLE_INT("net.isr.maxqlimit", &netisr_maxqlimit);
182 SYSCTL_INT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN,
183     &netisr_maxqlimit, 0,
184     "Maximum netisr per-protocol, per-CPU queue depth.");
185 
186 /*
187  * The default per-workstream mbuf queue limit for protocols that don't
188  * initialize the nh_qlimit field of their struct netisr_handler.  If this is
189  * set above netisr_maxqlimit, we truncate it to the maximum during boot.
190  */
191 #define	NETISR_DEFAULT_DEFAULTQLIMIT	256
192 static u_int	netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT;
193 TUNABLE_INT("net.isr.defaultqlimit", &netisr_defaultqlimit);
194 SYSCTL_INT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN,
195     &netisr_defaultqlimit, 0,
196     "Default netisr per-protocol, per-CPU queue limit if not set by protocol");
197 
198 /*
199  * Store and export the compile-time constant NETISR_MAXPROT limit on the
200  * number of protocols that can register with netisr at a time.  This is
201  * required for crashdump analysis, as it sizes netisr_proto[].
202  */
203 static u_int	netisr_maxprot = NETISR_MAXPROT;
204 SYSCTL_INT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD,
205     &netisr_maxprot, 0,
206     "Compile-time limit on the number of protocols supported by netisr.");
207 
208 /*
209  * The netisr_proto array describes all registered protocols, indexed by
210  * protocol number.  See netisr_internal.h for more details.
211  */
212 static struct netisr_proto	netisr_proto[NETISR_MAXPROT];
213 
214 /*
215  * Per-CPU workstream data.  See netisr_internal.h for more details.
216  */
217 DPCPU_DEFINE(struct netisr_workstream, nws);
218 
219 /*
220  * Map contiguous values between 0 and nws_count into CPU IDs appropriate for
221  * accessing workstreams.  This allows constructions of the form
222  * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws).
223  */
224 static u_int				 nws_array[MAXCPU];
225 
226 /*
227  * Number of registered workstreams.  Will be at most the number of running
228  * CPUs once fully started.
229  */
230 static u_int				 nws_count;
231 SYSCTL_INT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD,
232     &nws_count, 0, "Number of extant netisr threads.");
233 
234 /*
235  * Synchronization for each workstream: a mutex protects all mutable fields
236  * in each stream, including per-protocol state (mbuf queues).  The SWI is
237  * woken up if asynchronous dispatch is required.
238  */
239 #define	NWS_LOCK(s)		mtx_lock(&(s)->nws_mtx)
240 #define	NWS_LOCK_ASSERT(s)	mtx_assert(&(s)->nws_mtx, MA_OWNED)
241 #define	NWS_UNLOCK(s)		mtx_unlock(&(s)->nws_mtx)
242 #define	NWS_SIGNAL(s)		swi_sched((s)->nws_swi_cookie, 0)
243 
244 /*
245  * Utility routines for protocols that implement their own mapping of flows
246  * to CPUs.
247  */
248 u_int
249 netisr_get_cpucount(void)
250 {
251 
252 	return (nws_count);
253 }
254 
255 u_int
256 netisr_get_cpuid(u_int cpunumber)
257 {
258 
259 	KASSERT(cpunumber < nws_count, ("%s: %u > %u", __func__, cpunumber,
260 	    nws_count));
261 
262 	return (nws_array[cpunumber]);
263 }
264 
265 /*
266  * The default implementation of flow -> CPU ID mapping.
267  *
268  * Non-static so that protocols can use it to map their own work to specific
269  * CPUs in a manner consistent to netisr for affinity purposes.
270  */
271 u_int
272 netisr_default_flow2cpu(u_int flowid)
273 {
274 
275 	return (nws_array[flowid % nws_count]);
276 }
277 
278 /*
279  * Register a new netisr handler, which requires initializing per-protocol
280  * fields for each workstream.  All netisr work is briefly suspended while
281  * the protocol is installed.
282  */
283 void
284 netisr_register(const struct netisr_handler *nhp)
285 {
286 	struct netisr_work *npwp;
287 	const char *name;
288 	u_int i, proto;
289 
290 	proto = nhp->nh_proto;
291 	name = nhp->nh_name;
292 
293 	/*
294 	 * Test that the requested registration is valid.
295 	 */
296 	KASSERT(nhp->nh_name != NULL,
297 	    ("%s: nh_name NULL for %u", __func__, proto));
298 	KASSERT(nhp->nh_handler != NULL,
299 	    ("%s: nh_handler NULL for %s", __func__, name));
300 	KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE ||
301 	    nhp->nh_policy == NETISR_POLICY_FLOW ||
302 	    nhp->nh_policy == NETISR_POLICY_CPU,
303 	    ("%s: unsupported nh_policy %u for %s", __func__,
304 	    nhp->nh_policy, name));
305 	KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW ||
306 	    nhp->nh_m2flow == NULL,
307 	    ("%s: nh_policy != FLOW but m2flow defined for %s", __func__,
308 	    name));
309 	KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL,
310 	    ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__,
311 	    name));
312 	KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL,
313 	    ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__,
314 	    name));
315 	KASSERT(proto < NETISR_MAXPROT,
316 	    ("%s(%u, %s): protocol too big", __func__, proto, name));
317 
318 	/*
319 	 * Test that no existing registration exists for this protocol.
320 	 */
321 	NETISR_WLOCK();
322 	KASSERT(netisr_proto[proto].np_name == NULL,
323 	    ("%s(%u, %s): name present", __func__, proto, name));
324 	KASSERT(netisr_proto[proto].np_handler == NULL,
325 	    ("%s(%u, %s): handler present", __func__, proto, name));
326 
327 	netisr_proto[proto].np_name = name;
328 	netisr_proto[proto].np_handler = nhp->nh_handler;
329 	netisr_proto[proto].np_m2flow = nhp->nh_m2flow;
330 	netisr_proto[proto].np_m2cpuid = nhp->nh_m2cpuid;
331 	netisr_proto[proto].np_drainedcpu = nhp->nh_drainedcpu;
332 	if (nhp->nh_qlimit == 0)
333 		netisr_proto[proto].np_qlimit = netisr_defaultqlimit;
334 	else if (nhp->nh_qlimit > netisr_maxqlimit) {
335 		printf("%s: %s requested queue limit %u capped to "
336 		    "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit,
337 		    netisr_maxqlimit);
338 		netisr_proto[proto].np_qlimit = netisr_maxqlimit;
339 	} else
340 		netisr_proto[proto].np_qlimit = nhp->nh_qlimit;
341 	netisr_proto[proto].np_policy = nhp->nh_policy;
342 	CPU_FOREACH(i) {
343 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
344 		bzero(npwp, sizeof(*npwp));
345 		npwp->nw_qlimit = netisr_proto[proto].np_qlimit;
346 	}
347 	NETISR_WUNLOCK();
348 }
349 
350 /*
351  * Clear drop counters across all workstreams for a protocol.
352  */
353 void
354 netisr_clearqdrops(const struct netisr_handler *nhp)
355 {
356 	struct netisr_work *npwp;
357 #ifdef INVARIANTS
358 	const char *name;
359 #endif
360 	u_int i, proto;
361 
362 	proto = nhp->nh_proto;
363 #ifdef INVARIANTS
364 	name = nhp->nh_name;
365 #endif
366 	KASSERT(proto < NETISR_MAXPROT,
367 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
368 
369 	NETISR_WLOCK();
370 	KASSERT(netisr_proto[proto].np_handler != NULL,
371 	    ("%s(%u): protocol not registered for %s", __func__, proto,
372 	    name));
373 
374 	CPU_FOREACH(i) {
375 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
376 		npwp->nw_qdrops = 0;
377 	}
378 	NETISR_WUNLOCK();
379 }
380 
381 /*
382  * Query current drop counters across all workstreams for a protocol.
383  */
384 void
385 netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp)
386 {
387 	struct netisr_work *npwp;
388 	struct rm_priotracker tracker;
389 #ifdef INVARIANTS
390 	const char *name;
391 #endif
392 	u_int i, proto;
393 
394 	*qdropp = 0;
395 	proto = nhp->nh_proto;
396 #ifdef INVARIANTS
397 	name = nhp->nh_name;
398 #endif
399 	KASSERT(proto < NETISR_MAXPROT,
400 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
401 
402 	NETISR_RLOCK(&tracker);
403 	KASSERT(netisr_proto[proto].np_handler != NULL,
404 	    ("%s(%u): protocol not registered for %s", __func__, proto,
405 	    name));
406 
407 	CPU_FOREACH(i) {
408 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
409 		*qdropp += npwp->nw_qdrops;
410 	}
411 	NETISR_RUNLOCK(&tracker);
412 }
413 
414 /*
415  * Query current per-workstream queue limit for a protocol.
416  */
417 void
418 netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp)
419 {
420 	struct rm_priotracker tracker;
421 #ifdef INVARIANTS
422 	const char *name;
423 #endif
424 	u_int proto;
425 
426 	proto = nhp->nh_proto;
427 #ifdef INVARIANTS
428 	name = nhp->nh_name;
429 #endif
430 	KASSERT(proto < NETISR_MAXPROT,
431 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
432 
433 	NETISR_RLOCK(&tracker);
434 	KASSERT(netisr_proto[proto].np_handler != NULL,
435 	    ("%s(%u): protocol not registered for %s", __func__, proto,
436 	    name));
437 	*qlimitp = netisr_proto[proto].np_qlimit;
438 	NETISR_RUNLOCK(&tracker);
439 }
440 
441 /*
442  * Update the queue limit across per-workstream queues for a protocol.  We
443  * simply change the limits, and don't drain overflowed packets as they will
444  * (hopefully) take care of themselves shortly.
445  */
446 int
447 netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit)
448 {
449 	struct netisr_work *npwp;
450 #ifdef INVARIANTS
451 	const char *name;
452 #endif
453 	u_int i, proto;
454 
455 	if (qlimit > netisr_maxqlimit)
456 		return (EINVAL);
457 
458 	proto = nhp->nh_proto;
459 #ifdef INVARIANTS
460 	name = nhp->nh_name;
461 #endif
462 	KASSERT(proto < NETISR_MAXPROT,
463 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
464 
465 	NETISR_WLOCK();
466 	KASSERT(netisr_proto[proto].np_handler != NULL,
467 	    ("%s(%u): protocol not registered for %s", __func__, proto,
468 	    name));
469 
470 	netisr_proto[proto].np_qlimit = qlimit;
471 	CPU_FOREACH(i) {
472 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
473 		npwp->nw_qlimit = qlimit;
474 	}
475 	NETISR_WUNLOCK();
476 	return (0);
477 }
478 
479 /*
480  * Drain all packets currently held in a particular protocol work queue.
481  */
482 static void
483 netisr_drain_proto(struct netisr_work *npwp)
484 {
485 	struct mbuf *m;
486 
487 	/*
488 	 * We would assert the lock on the workstream but it's not passed in.
489 	 */
490 	while ((m = npwp->nw_head) != NULL) {
491 		npwp->nw_head = m->m_nextpkt;
492 		m->m_nextpkt = NULL;
493 		if (npwp->nw_head == NULL)
494 			npwp->nw_tail = NULL;
495 		npwp->nw_len--;
496 		m_freem(m);
497 	}
498 	KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__));
499 	KASSERT(npwp->nw_len == 0, ("%s: len", __func__));
500 }
501 
502 /*
503  * Remove the registration of a network protocol, which requires clearing
504  * per-protocol fields across all workstreams, including freeing all mbufs in
505  * the queues at time of unregister.  All work in netisr is briefly suspended
506  * while this takes place.
507  */
508 void
509 netisr_unregister(const struct netisr_handler *nhp)
510 {
511 	struct netisr_work *npwp;
512 #ifdef INVARIANTS
513 	const char *name;
514 #endif
515 	u_int i, proto;
516 
517 	proto = nhp->nh_proto;
518 #ifdef INVARIANTS
519 	name = nhp->nh_name;
520 #endif
521 	KASSERT(proto < NETISR_MAXPROT,
522 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
523 
524 	NETISR_WLOCK();
525 	KASSERT(netisr_proto[proto].np_handler != NULL,
526 	    ("%s(%u): protocol not registered for %s", __func__, proto,
527 	    name));
528 
529 	netisr_proto[proto].np_name = NULL;
530 	netisr_proto[proto].np_handler = NULL;
531 	netisr_proto[proto].np_m2flow = NULL;
532 	netisr_proto[proto].np_m2cpuid = NULL;
533 	netisr_proto[proto].np_qlimit = 0;
534 	netisr_proto[proto].np_policy = 0;
535 	CPU_FOREACH(i) {
536 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
537 		netisr_drain_proto(npwp);
538 		bzero(npwp, sizeof(*npwp));
539 	}
540 	NETISR_WUNLOCK();
541 }
542 
543 /*
544  * Look up the workstream given a packet and source identifier.  Do this by
545  * checking the protocol's policy, and optionally call out to the protocol
546  * for assistance if required.
547  */
548 static struct mbuf *
549 netisr_select_cpuid(struct netisr_proto *npp, uintptr_t source,
550     struct mbuf *m, u_int *cpuidp)
551 {
552 	struct ifnet *ifp;
553 
554 	NETISR_LOCK_ASSERT();
555 
556 	/*
557 	 * In the event we have only one worker, shortcut and deliver to it
558 	 * without further ado.
559 	 */
560 	if (nws_count == 1) {
561 		*cpuidp = nws_array[0];
562 		return (m);
563 	}
564 
565 	/*
566 	 * What happens next depends on the policy selected by the protocol.
567 	 * If we want to support per-interface policies, we should do that
568 	 * here first.
569 	 */
570 	switch (npp->np_policy) {
571 	case NETISR_POLICY_CPU:
572 		return (npp->np_m2cpuid(m, source, cpuidp));
573 
574 	case NETISR_POLICY_FLOW:
575 		if (!(m->m_flags & M_FLOWID) && npp->np_m2flow != NULL) {
576 			m = npp->np_m2flow(m, source);
577 			if (m == NULL)
578 				return (NULL);
579 		}
580 		if (m->m_flags & M_FLOWID) {
581 			*cpuidp =
582 			    netisr_default_flow2cpu(m->m_pkthdr.flowid);
583 			return (m);
584 		}
585 		/* FALLTHROUGH */
586 
587 	case NETISR_POLICY_SOURCE:
588 		ifp = m->m_pkthdr.rcvif;
589 		if (ifp != NULL)
590 			*cpuidp = nws_array[(ifp->if_index + source) %
591 			    nws_count];
592 		else
593 			*cpuidp = nws_array[source % nws_count];
594 		return (m);
595 
596 	default:
597 		panic("%s: invalid policy %u for %s", __func__,
598 		    npp->np_policy, npp->np_name);
599 	}
600 }
601 
602 /*
603  * Process packets associated with a workstream and protocol.  For reasons of
604  * fairness, we process up to one complete netisr queue at a time, moving the
605  * queue to a stack-local queue for processing, but do not loop refreshing
606  * from the global queue.  The caller is responsible for deciding whether to
607  * loop, and for setting the NWS_RUNNING flag.  The passed workstream will be
608  * locked on entry and relocked before return, but will be released while
609  * processing.  The number of packets processed is returned.
610  */
611 static u_int
612 netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto)
613 {
614 	struct netisr_work local_npw, *npwp;
615 	u_int handled;
616 	struct mbuf *m;
617 
618 	NETISR_LOCK_ASSERT();
619 	NWS_LOCK_ASSERT(nwsp);
620 
621 	KASSERT(nwsp->nws_flags & NWS_RUNNING,
622 	    ("%s(%u): not running", __func__, proto));
623 	KASSERT(proto >= 0 && proto < NETISR_MAXPROT,
624 	    ("%s(%u): invalid proto\n", __func__, proto));
625 
626 	npwp = &nwsp->nws_work[proto];
627 	if (npwp->nw_len == 0)
628 		return (0);
629 
630 	/*
631 	 * Move the global work queue to a thread-local work queue.
632 	 *
633 	 * Notice that this means the effective maximum length of the queue
634 	 * is actually twice that of the maximum queue length specified in
635 	 * the protocol registration call.
636 	 */
637 	handled = npwp->nw_len;
638 	local_npw = *npwp;
639 	npwp->nw_head = NULL;
640 	npwp->nw_tail = NULL;
641 	npwp->nw_len = 0;
642 	nwsp->nws_pendingbits &= ~(1 << proto);
643 	NWS_UNLOCK(nwsp);
644 	while ((m = local_npw.nw_head) != NULL) {
645 		local_npw.nw_head = m->m_nextpkt;
646 		m->m_nextpkt = NULL;
647 		if (local_npw.nw_head == NULL)
648 			local_npw.nw_tail = NULL;
649 		local_npw.nw_len--;
650 		VNET_ASSERT(m->m_pkthdr.rcvif != NULL);
651 		CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
652 		netisr_proto[proto].np_handler(m);
653 		CURVNET_RESTORE();
654 	}
655 	KASSERT(local_npw.nw_len == 0,
656 	    ("%s(%u): len %u", __func__, proto, local_npw.nw_len));
657 	if (netisr_proto[proto].np_drainedcpu)
658 		netisr_proto[proto].np_drainedcpu(nwsp->nws_cpu);
659 	NWS_LOCK(nwsp);
660 	npwp->nw_handled += handled;
661 	return (handled);
662 }
663 
664 /*
665  * SWI handler for netisr -- processes packets in a set of workstreams that
666  * it owns, woken up by calls to NWS_SIGNAL().  If this workstream is already
667  * being direct dispatched, go back to sleep and wait for the dispatching
668  * thread to wake us up again.
669  */
670 static void
671 swi_net(void *arg)
672 {
673 #ifdef NETISR_LOCKING
674 	struct rm_priotracker tracker;
675 #endif
676 	struct netisr_workstream *nwsp;
677 	u_int bits, prot;
678 
679 	nwsp = arg;
680 
681 #ifdef DEVICE_POLLING
682 	KASSERT(nws_count == 1,
683 	    ("%s: device_polling but nws_count != 1", __func__));
684 	netisr_poll();
685 #endif
686 #ifdef NETISR_LOCKING
687 	NETISR_RLOCK(&tracker);
688 #endif
689 	NWS_LOCK(nwsp);
690 	KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running"));
691 	if (nwsp->nws_flags & NWS_DISPATCHING)
692 		goto out;
693 	nwsp->nws_flags |= NWS_RUNNING;
694 	nwsp->nws_flags &= ~NWS_SCHEDULED;
695 	while ((bits = nwsp->nws_pendingbits) != 0) {
696 		while ((prot = ffs(bits)) != 0) {
697 			prot--;
698 			bits &= ~(1 << prot);
699 			(void)netisr_process_workstream_proto(nwsp, prot);
700 		}
701 	}
702 	nwsp->nws_flags &= ~NWS_RUNNING;
703 out:
704 	NWS_UNLOCK(nwsp);
705 #ifdef NETISR_LOCKING
706 	NETISR_RUNLOCK(&tracker);
707 #endif
708 #ifdef DEVICE_POLLING
709 	netisr_pollmore();
710 #endif
711 }
712 
713 static int
714 netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto,
715     struct netisr_work *npwp, struct mbuf *m, int *dosignalp)
716 {
717 
718 	NWS_LOCK_ASSERT(nwsp);
719 
720 	*dosignalp = 0;
721 	if (npwp->nw_len < npwp->nw_qlimit) {
722 		m->m_nextpkt = NULL;
723 		if (npwp->nw_head == NULL) {
724 			npwp->nw_head = m;
725 			npwp->nw_tail = m;
726 		} else {
727 			npwp->nw_tail->m_nextpkt = m;
728 			npwp->nw_tail = m;
729 		}
730 		npwp->nw_len++;
731 		if (npwp->nw_len > npwp->nw_watermark)
732 			npwp->nw_watermark = npwp->nw_len;
733 
734 		/*
735 		 * We must set the bit regardless of NWS_RUNNING, so that
736 		 * swi_net() keeps calling netisr_process_workstream_proto().
737 		 */
738 		nwsp->nws_pendingbits |= (1 << proto);
739 		if (!(nwsp->nws_flags &
740 		    (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) {
741 			nwsp->nws_flags |= NWS_SCHEDULED;
742 			*dosignalp = 1;	/* Defer until unlocked. */
743 		}
744 		npwp->nw_queued++;
745 		return (0);
746 	} else {
747 		m_freem(m);
748 		npwp->nw_qdrops++;
749 		return (ENOBUFS);
750 	}
751 }
752 
753 static int
754 netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid)
755 {
756 	struct netisr_workstream *nwsp;
757 	struct netisr_work *npwp;
758 	int dosignal, error;
759 
760 #ifdef NETISR_LOCKING
761 	NETISR_LOCK_ASSERT();
762 #endif
763 	KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__,
764 	    cpuid, mp_maxid));
765 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
766 
767 	dosignal = 0;
768 	error = 0;
769 	nwsp = DPCPU_ID_PTR(cpuid, nws);
770 	npwp = &nwsp->nws_work[proto];
771 	NWS_LOCK(nwsp);
772 	error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal);
773 	NWS_UNLOCK(nwsp);
774 	if (dosignal)
775 		NWS_SIGNAL(nwsp);
776 	return (error);
777 }
778 
779 int
780 netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m)
781 {
782 #ifdef NETISR_LOCKING
783 	struct rm_priotracker tracker;
784 #endif
785 	u_int cpuid;
786 	int error;
787 
788 	KASSERT(proto < NETISR_MAXPROT,
789 	    ("%s: invalid proto %u", __func__, proto));
790 
791 #ifdef NETISR_LOCKING
792 	NETISR_RLOCK(&tracker);
793 #endif
794 	KASSERT(netisr_proto[proto].np_handler != NULL,
795 	    ("%s: invalid proto %u", __func__, proto));
796 
797 	m = netisr_select_cpuid(&netisr_proto[proto], source, m, &cpuid);
798 	if (m != NULL) {
799 		KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__,
800 		    cpuid));
801 		error = netisr_queue_internal(proto, m, cpuid);
802 	} else
803 		error = ENOBUFS;
804 #ifdef NETISR_LOCKING
805 	NETISR_RUNLOCK(&tracker);
806 #endif
807 	return (error);
808 }
809 
810 int
811 netisr_queue(u_int proto, struct mbuf *m)
812 {
813 
814 	return (netisr_queue_src(proto, 0, m));
815 }
816 
817 /*
818  * Dispatch a packet for netisr processing; direct dispatch is permitted by
819  * calling context.
820  */
821 int
822 netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m)
823 {
824 #ifdef NETISR_LOCKING
825 	struct rm_priotracker tracker;
826 #endif
827 	struct netisr_workstream *nwsp;
828 	struct netisr_work *npwp;
829 	int dosignal, error;
830 	u_int cpuid;
831 
832 	/*
833 	 * If direct dispatch is entirely disabled, fall back on queueing.
834 	 */
835 	if (!netisr_direct)
836 		return (netisr_queue_src(proto, source, m));
837 
838 	KASSERT(proto < NETISR_MAXPROT,
839 	    ("%s: invalid proto %u", __func__, proto));
840 #ifdef NETISR_LOCKING
841 	NETISR_RLOCK(&tracker);
842 #endif
843 	KASSERT(netisr_proto[proto].np_handler != NULL,
844 	    ("%s: invalid proto %u", __func__, proto));
845 
846 	/*
847 	 * If direct dispatch is forced, then unconditionally dispatch
848 	 * without a formal CPU selection.  Borrow the current CPU's stats,
849 	 * even if there's no worker on it.  In this case we don't update
850 	 * nws_flags because all netisr processing will be source ordered due
851 	 * to always being forced to directly dispatch.
852 	 */
853 	if (netisr_direct_force) {
854 		nwsp = DPCPU_PTR(nws);
855 		npwp = &nwsp->nws_work[proto];
856 		npwp->nw_dispatched++;
857 		npwp->nw_handled++;
858 		netisr_proto[proto].np_handler(m);
859 		error = 0;
860 		goto out_unlock;
861 	}
862 
863 	/*
864 	 * Otherwise, we execute in a hybrid mode where we will try to direct
865 	 * dispatch if we're on the right CPU and the netisr worker isn't
866 	 * already running.
867 	 */
868 	m = netisr_select_cpuid(&netisr_proto[proto], source, m, &cpuid);
869 	if (m == NULL) {
870 		error = ENOBUFS;
871 		goto out_unlock;
872 	}
873 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
874 	sched_pin();
875 	if (cpuid != curcpu)
876 		goto queue_fallback;
877 	nwsp = DPCPU_PTR(nws);
878 	npwp = &nwsp->nws_work[proto];
879 
880 	/*-
881 	 * We are willing to direct dispatch only if three conditions hold:
882 	 *
883 	 * (1) The netisr worker isn't already running,
884 	 * (2) Another thread isn't already directly dispatching, and
885 	 * (3) The netisr hasn't already been woken up.
886 	 */
887 	NWS_LOCK(nwsp);
888 	if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) {
889 		error = netisr_queue_workstream(nwsp, proto, npwp, m,
890 		    &dosignal);
891 		NWS_UNLOCK(nwsp);
892 		if (dosignal)
893 			NWS_SIGNAL(nwsp);
894 		goto out_unpin;
895 	}
896 
897 	/*
898 	 * The current thread is now effectively the netisr worker, so set
899 	 * the dispatching flag to prevent concurrent processing of the
900 	 * stream from another thread (even the netisr worker), which could
901 	 * otherwise lead to effective misordering of the stream.
902 	 */
903 	nwsp->nws_flags |= NWS_DISPATCHING;
904 	NWS_UNLOCK(nwsp);
905 	netisr_proto[proto].np_handler(m);
906 	NWS_LOCK(nwsp);
907 	nwsp->nws_flags &= ~NWS_DISPATCHING;
908 	npwp->nw_handled++;
909 	npwp->nw_hybrid_dispatched++;
910 
911 	/*
912 	 * If other work was enqueued by another thread while we were direct
913 	 * dispatching, we need to signal the netisr worker to do that work.
914 	 * In the future, we might want to do some of that work in the
915 	 * current thread, rather than trigger further context switches.  If
916 	 * so, we'll want to establish a reasonable bound on the work done in
917 	 * the "borrowed" context.
918 	 */
919 	if (nwsp->nws_pendingbits != 0) {
920 		nwsp->nws_flags |= NWS_SCHEDULED;
921 		dosignal = 1;
922 	} else
923 		dosignal = 0;
924 	NWS_UNLOCK(nwsp);
925 	if (dosignal)
926 		NWS_SIGNAL(nwsp);
927 	error = 0;
928 	goto out_unpin;
929 
930 queue_fallback:
931 	error = netisr_queue_internal(proto, m, cpuid);
932 out_unpin:
933 	sched_unpin();
934 out_unlock:
935 #ifdef NETISR_LOCKING
936 	NETISR_RUNLOCK(&tracker);
937 #endif
938 	return (error);
939 }
940 
941 int
942 netisr_dispatch(u_int proto, struct mbuf *m)
943 {
944 
945 	return (netisr_dispatch_src(proto, 0, m));
946 }
947 
948 #ifdef DEVICE_POLLING
949 /*
950  * Kernel polling borrows a netisr thread to run interface polling in; this
951  * function allows kernel polling to request that the netisr thread be
952  * scheduled even if no packets are pending for protocols.
953  */
954 void
955 netisr_sched_poll(void)
956 {
957 	struct netisr_workstream *nwsp;
958 
959 	nwsp = DPCPU_ID_PTR(nws_array[0], nws);
960 	NWS_SIGNAL(nwsp);
961 }
962 #endif
963 
964 static void
965 netisr_start_swi(u_int cpuid, struct pcpu *pc)
966 {
967 	char swiname[12];
968 	struct netisr_workstream *nwsp;
969 	int error;
970 
971 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
972 
973 	nwsp = DPCPU_ID_PTR(cpuid, nws);
974 	mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF);
975 	nwsp->nws_cpu = cpuid;
976 	snprintf(swiname, sizeof(swiname), "netisr %u", cpuid);
977 	error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp,
978 	    SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie);
979 	if (error)
980 		panic("%s: swi_add %d", __func__, error);
981 	pc->pc_netisr = nwsp->nws_intr_event;
982 	if (netisr_bindthreads) {
983 		error = intr_event_bind(nwsp->nws_intr_event, cpuid);
984 		if (error != 0)
985 			printf("%s: cpu %u: intr_event_bind: %d", __func__,
986 			    cpuid, error);
987 	}
988 	NETISR_WLOCK();
989 	nws_array[nws_count] = nwsp->nws_cpu;
990 	nws_count++;
991 	NETISR_WUNLOCK();
992 }
993 
994 /*
995  * Initialize the netisr subsystem.  We rely on BSS and static initialization
996  * of most fields in global data structures.
997  *
998  * Start a worker thread for the boot CPU so that we can support network
999  * traffic immediately in case the network stack is used before additional
1000  * CPUs are started (for example, diskless boot).
1001  */
1002 static void
1003 netisr_init(void *arg)
1004 {
1005 
1006 	KASSERT(curcpu == 0, ("%s: not on CPU 0", __func__));
1007 
1008 	NETISR_LOCK_INIT();
1009 	if (netisr_maxthreads < 1)
1010 		netisr_maxthreads = 1;
1011 	if (netisr_maxthreads > mp_ncpus) {
1012 		printf("netisr_init: forcing maxthreads from %d to %d\n",
1013 		    netisr_maxthreads, mp_ncpus);
1014 		netisr_maxthreads = mp_ncpus;
1015 	}
1016 	if (netisr_defaultqlimit > netisr_maxqlimit) {
1017 		printf("netisr_init: forcing defaultqlimit from %d to %d\n",
1018 		    netisr_defaultqlimit, netisr_maxqlimit);
1019 		netisr_defaultqlimit = netisr_maxqlimit;
1020 	}
1021 #ifdef DEVICE_POLLING
1022 	/*
1023 	 * The device polling code is not yet aware of how to deal with
1024 	 * multiple netisr threads, so for the time being compiling in device
1025 	 * polling disables parallel netisr workers.
1026 	 */
1027 	if (netisr_maxthreads != 1 || netisr_bindthreads != 0) {
1028 		printf("netisr_init: forcing maxthreads to 1 and "
1029 		    "bindthreads to 0 for device polling\n");
1030 		netisr_maxthreads = 1;
1031 		netisr_bindthreads = 0;
1032 	}
1033 #endif
1034 
1035 	netisr_start_swi(curcpu, pcpu_find(curcpu));
1036 }
1037 SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL);
1038 
1039 /*
1040  * Start worker threads for additional CPUs.  No attempt to gracefully handle
1041  * work reassignment, we don't yet support dynamic reconfiguration.
1042  */
1043 static void
1044 netisr_start(void *arg)
1045 {
1046 	struct pcpu *pc;
1047 
1048 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
1049 		if (nws_count >= netisr_maxthreads)
1050 			break;
1051 		/* XXXRW: Is skipping absent CPUs still required here? */
1052 		if (CPU_ABSENT(pc->pc_cpuid))
1053 			continue;
1054 		/* Worker will already be present for boot CPU. */
1055 		if (pc->pc_netisr != NULL)
1056 			continue;
1057 		netisr_start_swi(pc->pc_cpuid, pc);
1058 	}
1059 }
1060 SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL);
1061 
1062 /*
1063  * Sysctl monitoring for netisr: query a list of registered protocols.
1064  */
1065 static int
1066 sysctl_netisr_proto(SYSCTL_HANDLER_ARGS)
1067 {
1068 	struct rm_priotracker tracker;
1069 	struct sysctl_netisr_proto *snpp, *snp_array;
1070 	struct netisr_proto *npp;
1071 	u_int counter, proto;
1072 	int error;
1073 
1074 	if (req->newptr != NULL)
1075 		return (EINVAL);
1076 	snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP,
1077 	    M_ZERO | M_WAITOK);
1078 	counter = 0;
1079 	NETISR_RLOCK(&tracker);
1080 	for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1081 		npp = &netisr_proto[proto];
1082 		if (npp->np_name == NULL)
1083 			continue;
1084 		snpp = &snp_array[counter];
1085 		snpp->snp_version = sizeof(*snpp);
1086 		strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN);
1087 		snpp->snp_proto = proto;
1088 		snpp->snp_qlimit = npp->np_qlimit;
1089 		snpp->snp_policy = npp->np_policy;
1090 		if (npp->np_m2flow != NULL)
1091 			snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW;
1092 		if (npp->np_m2cpuid != NULL)
1093 			snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID;
1094 		if (npp->np_drainedcpu != NULL)
1095 			snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU;
1096 		counter++;
1097 	}
1098 	NETISR_RUNLOCK(&tracker);
1099 	KASSERT(counter <= NETISR_MAXPROT,
1100 	    ("sysctl_netisr_proto: counter too big (%d)", counter));
1101 	error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter);
1102 	free(snp_array, M_TEMP);
1103 	return (error);
1104 }
1105 
1106 SYSCTL_PROC(_net_isr, OID_AUTO, proto,
1107     CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto,
1108     "S,sysctl_netisr_proto",
1109     "Return list of protocols registered with netisr");
1110 
1111 /*
1112  * Sysctl monitoring for netisr: query a list of workstreams.
1113  */
1114 static int
1115 sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS)
1116 {
1117 	struct rm_priotracker tracker;
1118 	struct sysctl_netisr_workstream *snwsp, *snws_array;
1119 	struct netisr_workstream *nwsp;
1120 	u_int counter, cpuid;
1121 	int error;
1122 
1123 	if (req->newptr != NULL)
1124 		return (EINVAL);
1125 	snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP,
1126 	    M_ZERO | M_WAITOK);
1127 	counter = 0;
1128 	NETISR_RLOCK(&tracker);
1129 	CPU_FOREACH(cpuid) {
1130 		nwsp = DPCPU_ID_PTR(cpuid, nws);
1131 		if (nwsp->nws_intr_event == NULL)
1132 			continue;
1133 		NWS_LOCK(nwsp);
1134 		snwsp = &snws_array[counter];
1135 		snwsp->snws_version = sizeof(*snwsp);
1136 
1137 		/*
1138 		 * For now, we equate workstream IDs and CPU IDs in the
1139 		 * kernel, but expose them independently to userspace in case
1140 		 * that assumption changes in the future.
1141 		 */
1142 		snwsp->snws_wsid = cpuid;
1143 		snwsp->snws_cpu = cpuid;
1144 		if (nwsp->nws_intr_event != NULL)
1145 			snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR;
1146 		NWS_UNLOCK(nwsp);
1147 		counter++;
1148 	}
1149 	NETISR_RUNLOCK(&tracker);
1150 	KASSERT(counter <= MAXCPU,
1151 	    ("sysctl_netisr_workstream: counter too big (%d)", counter));
1152 	error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter);
1153 	free(snws_array, M_TEMP);
1154 	return (error);
1155 }
1156 
1157 SYSCTL_PROC(_net_isr, OID_AUTO, workstream,
1158     CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream,
1159     "S,sysctl_netisr_workstream",
1160     "Return list of workstreams implemented by netisr");
1161 
1162 /*
1163  * Sysctl monitoring for netisr: query per-protocol data across all
1164  * workstreams.
1165  */
1166 static int
1167 sysctl_netisr_work(SYSCTL_HANDLER_ARGS)
1168 {
1169 	struct rm_priotracker tracker;
1170 	struct sysctl_netisr_work *snwp, *snw_array;
1171 	struct netisr_workstream *nwsp;
1172 	struct netisr_proto *npp;
1173 	struct netisr_work *nwp;
1174 	u_int counter, cpuid, proto;
1175 	int error;
1176 
1177 	if (req->newptr != NULL)
1178 		return (EINVAL);
1179 	snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT,
1180 	    M_TEMP, M_ZERO | M_WAITOK);
1181 	counter = 0;
1182 	NETISR_RLOCK(&tracker);
1183 	CPU_FOREACH(cpuid) {
1184 		nwsp = DPCPU_ID_PTR(cpuid, nws);
1185 		if (nwsp->nws_intr_event == NULL)
1186 			continue;
1187 		NWS_LOCK(nwsp);
1188 		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1189 			npp = &netisr_proto[proto];
1190 			if (npp->np_name == NULL)
1191 				continue;
1192 			nwp = &nwsp->nws_work[proto];
1193 			snwp = &snw_array[counter];
1194 			snwp->snw_version = sizeof(*snwp);
1195 			snwp->snw_wsid = cpuid;		/* See comment above. */
1196 			snwp->snw_proto = proto;
1197 			snwp->snw_len = nwp->nw_len;
1198 			snwp->snw_watermark = nwp->nw_watermark;
1199 			snwp->snw_dispatched = nwp->nw_dispatched;
1200 			snwp->snw_hybrid_dispatched =
1201 			    nwp->nw_hybrid_dispatched;
1202 			snwp->snw_qdrops = nwp->nw_qdrops;
1203 			snwp->snw_queued = nwp->nw_queued;
1204 			snwp->snw_handled = nwp->nw_handled;
1205 			counter++;
1206 		}
1207 		NWS_UNLOCK(nwsp);
1208 	}
1209 	KASSERT(counter <= MAXCPU * NETISR_MAXPROT,
1210 	    ("sysctl_netisr_work: counter too big (%d)", counter));
1211 	NETISR_RUNLOCK(&tracker);
1212 	error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter);
1213 	free(snw_array, M_TEMP);
1214 	return (error);
1215 }
1216 
1217 SYSCTL_PROC(_net_isr, OID_AUTO, work,
1218     CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work,
1219     "S,sysctl_netisr_work",
1220     "Return list of per-workstream, per-protocol work in netisr");
1221 
1222 #ifdef DDB
1223 DB_SHOW_COMMAND(netisr, db_show_netisr)
1224 {
1225 	struct netisr_workstream *nwsp;
1226 	struct netisr_work *nwp;
1227 	int first, proto;
1228 	u_int cpuid;
1229 
1230 	db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto",
1231 	    "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue");
1232 	CPU_FOREACH(cpuid) {
1233 		nwsp = DPCPU_ID_PTR(cpuid, nws);
1234 		if (nwsp->nws_intr_event == NULL)
1235 			continue;
1236 		first = 1;
1237 		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1238 			if (netisr_proto[proto].np_handler == NULL)
1239 				continue;
1240 			nwp = &nwsp->nws_work[proto];
1241 			if (first) {
1242 				db_printf("%3d ", cpuid);
1243 				first = 0;
1244 			} else
1245 				db_printf("%3s ", "");
1246 			db_printf(
1247 			    "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n",
1248 			    netisr_proto[proto].np_name, nwp->nw_len,
1249 			    nwp->nw_watermark, nwp->nw_qlimit,
1250 			    nwp->nw_dispatched, nwp->nw_hybrid_dispatched,
1251 			    nwp->nw_qdrops, nwp->nw_queued);
1252 		}
1253 	}
1254 }
1255 #endif
1256