xref: /freebsd/sys/net/netisr.c (revision 195ebc7e9e4b129de810833791a19dfb4349d6a9)
1 /*-
2  * Copyright (c) 2007-2009 Robert N. M. Watson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 /*
31  * netisr is a packet dispatch service, allowing synchronous (directly
32  * dispatched) and asynchronous (deferred dispatch) processing of packets by
33  * registered protocol handlers.  Callers pass a protocol identifier and
34  * packet to netisr, along with a direct dispatch hint, and work will either
35  * be immediately processed with the registered handler, or passed to a
36  * kernel software interrupt (SWI) thread for deferred dispatch.  Callers
37  * will generally select one or the other based on:
38  *
39  * - Might directly dispatching a netisr handler lead to code reentrance or
40  *   lock recursion, such as entering the socket code from the socket code.
41  * - Might directly dispatching a netisr handler lead to recursive
42  *   processing, such as when decapsulating several wrapped layers of tunnel
43  *   information (IPSEC within IPSEC within ...).
44  *
45  * Maintaining ordering for protocol streams is a critical design concern.
46  * Enforcing ordering limits the opportunity for concurrency, but maintains
47  * the strong ordering requirements found in some protocols, such as TCP.  Of
48  * related concern is CPU affinity--it is desirable to process all data
49  * associated with a particular stream on the same CPU over time in order to
50  * avoid acquiring locks associated with the connection on different CPUs,
51  * keep connection data in one cache, and to generally encourage associated
52  * user threads to live on the same CPU as the stream.  It's also desirable
53  * to avoid lock migration and contention where locks are associated with
54  * more than one flow.
55  *
56  * netisr supports several policy variations, represented by the
57  * NETISR_POLICY_* constants, allowing protocols to play a varying role in
58  * identifying flows, assigning work to CPUs, etc.  These are described in
59  * detail in netisr.h.
60  */
61 
62 #include "opt_ddb.h"
63 #include "opt_device_polling.h"
64 
65 #include <sys/param.h>
66 #include <sys/bus.h>
67 #include <sys/kernel.h>
68 #include <sys/kthread.h>
69 #include <sys/interrupt.h>
70 #include <sys/lock.h>
71 #include <sys/mbuf.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/rmlock.h>
75 #include <sys/sched.h>
76 #include <sys/smp.h>
77 #include <sys/socket.h>
78 #include <sys/sysctl.h>
79 #include <sys/systm.h>
80 #include <sys/vimage.h>
81 
82 #ifdef DDB
83 #include <ddb/ddb.h>
84 #endif
85 
86 #include <net/if.h>
87 #include <net/if_var.h>
88 #include <net/netisr.h>
89 
90 /*-
91  * Synchronize use and modification of the registered netisr data structures;
92  * acquire a read lock while modifying the set of registered protocols to
93  * prevent partially registered or unregistered protocols from being run.
94  *
95  * The following data structures and fields are protected by this lock:
96  *
97  * - The np array, including all fields of struct netisr_proto.
98  * - The nws array, including all fields of struct netisr_worker.
99  * - The nws_array array.
100  *
101  * Note: the NETISR_LOCKING define controls whether read locks are acquired
102  * in packet processing paths requiring netisr registration stability.  This
103  * is disabled by default as it can lead to a measurable performance
104  * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and
105  * because netisr registration and unregistration is extremely rare at
106  * runtime.  If it becomes more common, this decision should be revisited.
107  *
108  * XXXRW: rmlocks don't support assertions.
109  */
110 static struct rmlock	netisr_rmlock;
111 #define	NETISR_LOCK_INIT()	rm_init_flags(&netisr_rmlock, "netisr", \
112 				    RM_NOWITNESS)
113 #define	NETISR_LOCK_ASSERT()
114 #define	NETISR_RLOCK(tracker)	rm_rlock(&netisr_rmlock, (tracker))
115 #define	NETISR_RUNLOCK(tracker)	rm_runlock(&netisr_rmlock, (tracker))
116 #define	NETISR_WLOCK()		rm_wlock(&netisr_rmlock)
117 #define	NETISR_WUNLOCK()	rm_wunlock(&netisr_rmlock)
118 /* #define	NETISR_LOCKING */
119 
120 SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr");
121 
122 /*-
123  * Three direct dispatch policies are supported:
124  *
125  * - Always defer: all work is scheduled for a netisr, regardless of context.
126  *   (!direct)
127  *
128  * - Hybrid: if the executing context allows direct dispatch, and we're
129  *   running on the CPU the work would be done on, then direct dispatch if it
130  *   wouldn't violate ordering constraints on the workstream.
131  *   (direct && !direct_force)
132  *
133  * - Always direct: if the executing context allows direct dispatch, always
134  *   direct dispatch.  (direct && direct_force)
135  *
136  * Notice that changing the global policy could lead to short periods of
137  * misordered processing, but this is considered acceptable as compared to
138  * the complexity of enforcing ordering during policy changes.
139  */
140 static int	netisr_direct_force = 1;	/* Always direct dispatch. */
141 TUNABLE_INT("net.isr.direct_force", &netisr_direct_force);
142 SYSCTL_INT(_net_isr, OID_AUTO, direct_force, CTLFLAG_RW,
143     &netisr_direct_force, 0, "Force direct dispatch");
144 
145 static int	netisr_direct = 1;	/* Enable direct dispatch. */
146 TUNABLE_INT("net.isr.direct", &netisr_direct);
147 SYSCTL_INT(_net_isr, OID_AUTO, direct, CTLFLAG_RW,
148     &netisr_direct, 0, "Enable direct dispatch");
149 
150 /*
151  * Allow the administrator to limit the number of threads (CPUs) to use for
152  * netisr.  We don't check netisr_maxthreads before creating the thread for
153  * CPU 0, so in practice we ignore values <= 1.  This must be set at boot.
154  * We will create at most one thread per CPU.
155  */
156 static int	netisr_maxthreads = 1;		/* Max number of threads. */
157 TUNABLE_INT("net.isr.maxthreads", &netisr_maxthreads);
158 SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RD,
159     &netisr_maxthreads, 0,
160     "Use at most this many CPUs for netisr processing");
161 
162 static int	netisr_bindthreads = 0;		/* Bind threads to CPUs. */
163 TUNABLE_INT("net.isr.bindthreads", &netisr_bindthreads);
164 SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RD,
165     &netisr_bindthreads, 0, "Bind netisr threads to CPUs.");
166 
167 /*
168  * Limit per-workstream queues to at most net.isr.maxqlimit, both for initial
169  * configuration and later modification using netisr_setqlimit().
170  */
171 #define	NETISR_DEFAULT_MAXQLIMIT	10240
172 static u_int	netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT;
173 TUNABLE_INT("net.isr.maxqlimit", &netisr_maxqlimit);
174 SYSCTL_INT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RD,
175     &netisr_maxqlimit, 0,
176     "Maximum netisr per-protocol, per-CPU queue depth.");
177 
178 /*
179  * The default per-workstream queue limit for protocols that don't initialize
180  * the nh_qlimit field of their struct netisr_handler.  If this is set above
181  * netisr_maxqlimit, we truncate it to the maximum during boot.
182  */
183 #define	NETISR_DEFAULT_DEFAULTQLIMIT	256
184 static u_int	netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT;
185 TUNABLE_INT("net.isr.defaultqlimit", &netisr_defaultqlimit);
186 SYSCTL_INT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RD,
187     &netisr_defaultqlimit, 0,
188     "Default netisr per-protocol, per-CPU queue limit if not set by protocol");
189 
190 /*
191  * Each protocol is described by a struct netisr_proto, which holds all
192  * global per-protocol information.  This data structure is set up by
193  * netisr_register(), and derived from the public struct netisr_handler.
194  */
195 struct netisr_proto {
196 	const char	*np_name;	/* Character string protocol name. */
197 	netisr_handler_t *np_handler;	/* Protocol handler. */
198 	netisr_m2flow_t	*np_m2flow;	/* Query flow for untagged packet. */
199 	netisr_m2cpuid_t *np_m2cpuid;	/* Query CPU to process packet on. */
200 	u_int		 np_qlimit;	/* Maximum per-CPU queue depth. */
201 	u_int		 np_policy;	/* Work placement policy. */
202 };
203 
204 #define	NETISR_MAXPROT		16		/* Compile-time limit. */
205 
206 /*
207  * The np array describes all registered protocols, indexed by protocol
208  * number.
209  */
210 static struct netisr_proto	np[NETISR_MAXPROT];
211 
212 /*
213  * Protocol-specific work for each workstream is described by struct
214  * netisr_work.  Each work descriptor consists of an mbuf queue and
215  * statistics.
216  */
217 struct netisr_work {
218 	/*
219 	 * Packet queue, linked by m_nextpkt.
220 	 */
221 	struct mbuf	*nw_head;
222 	struct mbuf	*nw_tail;
223 	u_int		 nw_len;
224 	u_int		 nw_qlimit;
225 	u_int		 nw_watermark;
226 
227 	/*
228 	 * Statistics -- written unlocked, but mostly from curcpu.
229 	 */
230 	u_int64_t	 nw_dispatched; /* Number of direct dispatches. */
231 	u_int64_t	 nw_hybrid_dispatched; /* "" hybrid dispatches. */
232 	u_int64_t	 nw_qdrops;	/* "" drops. */
233 	u_int64_t	 nw_queued;	/* "" enqueues. */
234 	u_int64_t	 nw_handled;	/* "" handled in worker. */
235 };
236 
237 /*
238  * Workstreams hold a set of ordered work across each protocol, and are
239  * described by netisr_workstream.  Each workstream is associated with a
240  * worker thread, which in turn is pinned to a CPU.  Work associated with a
241  * workstream can be processd in other threads during direct dispatch;
242  * concurrent processing is prevented by the NWS_RUNNING flag, which
243  * indicates that a thread is already processing the work queue.
244  */
245 struct netisr_workstream {
246 	struct intr_event *nws_intr_event;	/* Handler for stream. */
247 	void		*nws_swi_cookie;	/* swi(9) cookie for stream. */
248 	struct mtx	 nws_mtx;		/* Synchronize work. */
249 	u_int		 nws_cpu;		/* CPU pinning. */
250 	u_int		 nws_flags;		/* Wakeup flags. */
251 	u_int		 nws_pendingbits;	/* Scheduled protocols. */
252 
253 	/*
254 	 * Each protocol has per-workstream data.
255 	 */
256 	struct netisr_work	nws_work[NETISR_MAXPROT];
257 } __aligned(CACHE_LINE_SIZE);
258 
259 /*
260  * Per-CPU workstream data, indexed by CPU ID.
261  */
262 static struct netisr_workstream		 nws[MAXCPU];
263 
264 /*
265  * Map contiguous values between 0 and nws_count into CPU IDs appropriate for
266  * indexing the nws[] array.  This allows constructions of the form
267  * nws[nws_array(arbitraryvalue % nws_count)].
268  */
269 static u_int				 nws_array[MAXCPU];
270 
271 /*
272  * Number of registered workstreams.  Will be at most the number of running
273  * CPUs once fully started.
274  */
275 static u_int				 nws_count;
276 SYSCTL_INT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD,
277     &nws_count, 0, "Number of extant netisr threads.");
278 
279 /*
280  * Per-workstream flags.
281  */
282 #define	NWS_RUNNING	0x00000001	/* Currently running in a thread. */
283 #define	NWS_DISPATCHING	0x00000002	/* Currently being direct-dispatched. */
284 #define	NWS_SCHEDULED	0x00000004	/* Signal issued. */
285 
286 /*
287  * Synchronization for each workstream: a mutex protects all mutable fields
288  * in each stream, including per-protocol state (mbuf queues).  The SWI is
289  * woken up if asynchronous dispatch is required.
290  */
291 #define	NWS_LOCK(s)		mtx_lock(&(s)->nws_mtx)
292 #define	NWS_LOCK_ASSERT(s)	mtx_assert(&(s)->nws_mtx, MA_OWNED)
293 #define	NWS_UNLOCK(s)		mtx_unlock(&(s)->nws_mtx)
294 #define	NWS_SIGNAL(s)		swi_sched((s)->nws_swi_cookie, 0)
295 
296 /*
297  * Utility routines for protocols that implement their own mapping of flows
298  * to CPUs.
299  */
300 u_int
301 netisr_get_cpucount(void)
302 {
303 
304 	return (nws_count);
305 }
306 
307 u_int
308 netisr_get_cpuid(u_int cpunumber)
309 {
310 
311 	KASSERT(cpunumber < nws_count, ("%s: %u > %u", __func__, cpunumber,
312 	    nws_count));
313 
314 	return (nws_array[cpunumber]);
315 }
316 
317 /*
318  * The default implementation of -> CPU ID mapping.
319  *
320  * Non-static so that protocols can use it to map their own work to specific
321  * CPUs in a manner consistent to netisr for affinity purposes.
322  */
323 u_int
324 netisr_default_flow2cpu(u_int flowid)
325 {
326 
327 	return (nws_array[flowid % nws_count]);
328 }
329 
330 /*
331  * Register a new netisr handler, which requires initializing per-protocol
332  * fields for each workstream.  All netisr work is briefly suspended while
333  * the protocol is installed.
334  */
335 void
336 netisr_register(const struct netisr_handler *nhp)
337 {
338 	struct netisr_work *npwp;
339 	const char *name;
340 	u_int i, proto;
341 
342 	proto = nhp->nh_proto;
343 	name = nhp->nh_name;
344 
345 	/*
346 	 * Test that the requested registration is valid.
347 	 */
348 	KASSERT(nhp->nh_name != NULL,
349 	    ("%s: nh_name NULL for %u", __func__, proto));
350 	KASSERT(nhp->nh_handler != NULL,
351 	    ("%s: nh_handler NULL for %s", __func__, name));
352 	KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE ||
353 	    nhp->nh_policy == NETISR_POLICY_FLOW ||
354 	    nhp->nh_policy == NETISR_POLICY_CPU,
355 	    ("%s: unsupported nh_policy %u for %s", __func__,
356 	    nhp->nh_policy, name));
357 	KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW ||
358 	    nhp->nh_m2flow == NULL,
359 	    ("%s: nh_policy != FLOW but m2flow defined for %s", __func__,
360 	    name));
361 	KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL,
362 	    ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__,
363 	    name));
364 	KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL,
365 	    ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__,
366 	    name));
367 	KASSERT(proto < NETISR_MAXPROT,
368 	    ("%s(%u, %s): protocol too big", __func__, proto, name));
369 
370 	/*
371 	 * Test that no existing registration exists for this protocol.
372 	 */
373 	NETISR_WLOCK();
374 	KASSERT(np[proto].np_name == NULL,
375 	    ("%s(%u, %s): name present", __func__, proto, name));
376 	KASSERT(np[proto].np_handler == NULL,
377 	    ("%s(%u, %s): handler present", __func__, proto, name));
378 
379 	np[proto].np_name = name;
380 	np[proto].np_handler = nhp->nh_handler;
381 	np[proto].np_m2flow = nhp->nh_m2flow;
382 	np[proto].np_m2cpuid = nhp->nh_m2cpuid;
383 	if (nhp->nh_qlimit == 0)
384 		np[proto].np_qlimit = netisr_defaultqlimit;
385 	else if (nhp->nh_qlimit > netisr_maxqlimit) {
386 		printf("%s: %s requested queue limit %u capped to "
387 		    "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit,
388 		    netisr_maxqlimit);
389 		np[proto].np_qlimit = netisr_maxqlimit;
390 	} else
391 		np[proto].np_qlimit = nhp->nh_qlimit;
392 	np[proto].np_policy = nhp->nh_policy;
393 	for (i = 0; i < MAXCPU; i++) {
394 		npwp = &nws[i].nws_work[proto];
395 		bzero(npwp, sizeof(*npwp));
396 		npwp->nw_qlimit = np[proto].np_qlimit;
397 	}
398 	NETISR_WUNLOCK();
399 }
400 
401 /*
402  * Clear drop counters across all workstreams for a protocol.
403  */
404 void
405 netisr_clearqdrops(const struct netisr_handler *nhp)
406 {
407 	struct netisr_work *npwp;
408 #ifdef INVARIANTS
409 	const char *name;
410 #endif
411 	u_int i, proto;
412 
413 	proto = nhp->nh_proto;
414 #ifdef INVARIANTS
415 	name = nhp->nh_name;
416 #endif
417 	KASSERT(proto < NETISR_MAXPROT,
418 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
419 
420 	NETISR_WLOCK();
421 	KASSERT(np[proto].np_handler != NULL,
422 	    ("%s(%u): protocol not registered for %s", __func__, proto,
423 	    name));
424 
425 	for (i = 0; i < MAXCPU; i++) {
426 		npwp = &nws[i].nws_work[proto];
427 		npwp->nw_qdrops = 0;
428 	}
429 	NETISR_WUNLOCK();
430 }
431 
432 /*
433  * Query the current drop counters across all workstreams for a protocol.
434  */
435 void
436 netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp)
437 {
438 	struct netisr_work *npwp;
439 	struct rm_priotracker tracker;
440 #ifdef INVARIANTS
441 	const char *name;
442 #endif
443 	u_int i, proto;
444 
445 	*qdropp = 0;
446 	proto = nhp->nh_proto;
447 #ifdef INVARIANTS
448 	name = nhp->nh_name;
449 #endif
450 	KASSERT(proto < NETISR_MAXPROT,
451 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
452 
453 	NETISR_RLOCK(&tracker);
454 	KASSERT(np[proto].np_handler != NULL,
455 	    ("%s(%u): protocol not registered for %s", __func__, proto,
456 	    name));
457 
458 	for (i = 0; i < MAXCPU; i++) {
459 		npwp = &nws[i].nws_work[proto];
460 		*qdropp += npwp->nw_qdrops;
461 	}
462 	NETISR_RUNLOCK(&tracker);
463 }
464 
465 /*
466  * Query the current queue limit for per-workstream queues for a protocol.
467  */
468 void
469 netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp)
470 {
471 	struct rm_priotracker tracker;
472 #ifdef INVARIANTS
473 	const char *name;
474 #endif
475 	u_int proto;
476 
477 	proto = nhp->nh_proto;
478 #ifdef INVARIANTS
479 	name = nhp->nh_name;
480 #endif
481 	KASSERT(proto < NETISR_MAXPROT,
482 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
483 
484 	NETISR_RLOCK(&tracker);
485 	KASSERT(np[proto].np_handler != NULL,
486 	    ("%s(%u): protocol not registered for %s", __func__, proto,
487 	    name));
488 	*qlimitp = np[proto].np_qlimit;
489 	NETISR_RUNLOCK(&tracker);
490 }
491 
492 /*
493  * Update the queue limit across per-workstream queues for a protocol.  We
494  * simply change the limits, and don't drain overflowed packets as they will
495  * (hopefully) take care of themselves shortly.
496  */
497 int
498 netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit)
499 {
500 	struct netisr_work *npwp;
501 #ifdef INVARIANTS
502 	const char *name;
503 #endif
504 	u_int i, proto;
505 
506 	if (qlimit > netisr_maxqlimit)
507 		return (EINVAL);
508 
509 	proto = nhp->nh_proto;
510 #ifdef INVARIANTS
511 	name = nhp->nh_name;
512 #endif
513 	KASSERT(proto < NETISR_MAXPROT,
514 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
515 
516 	NETISR_WLOCK();
517 	KASSERT(np[proto].np_handler != NULL,
518 	    ("%s(%u): protocol not registered for %s", __func__, proto,
519 	    name));
520 
521 	np[proto].np_qlimit = qlimit;
522 	for (i = 0; i < MAXCPU; i++) {
523 		npwp = &nws[i].nws_work[proto];
524 		npwp->nw_qlimit = qlimit;
525 	}
526 	NETISR_WUNLOCK();
527 	return (0);
528 }
529 
530 /*
531  * Drain all packets currently held in a particular protocol work queue.
532  */
533 static void
534 netisr_drain_proto(struct netisr_work *npwp)
535 {
536 	struct mbuf *m;
537 
538 	/*
539 	 * We would assert the lock on the workstream but it's not passed in.
540 	 */
541 	while ((m = npwp->nw_head) != NULL) {
542 		npwp->nw_head = m->m_nextpkt;
543 		m->m_nextpkt = NULL;
544 		if (npwp->nw_head == NULL)
545 			npwp->nw_tail = NULL;
546 		npwp->nw_len--;
547 		m_freem(m);
548 	}
549 	KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__));
550 	KASSERT(npwp->nw_len == 0, ("%s: len", __func__));
551 }
552 
553 /*
554  * Remove the registration of a network protocol, which requires clearing
555  * per-protocol fields across all workstreams, including freeing all mbufs in
556  * the queues at time of unregister.  All work in netisr is briefly suspended
557  * while this takes place.
558  */
559 void
560 netisr_unregister(const struct netisr_handler *nhp)
561 {
562 	struct netisr_work *npwp;
563 #ifdef INVARIANTS
564 	const char *name;
565 #endif
566 	u_int i, proto;
567 
568 	proto = nhp->nh_proto;
569 #ifdef INVARIANTS
570 	name = nhp->nh_name;
571 #endif
572 	KASSERT(proto < NETISR_MAXPROT,
573 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
574 
575 	NETISR_WLOCK();
576 	KASSERT(np[proto].np_handler != NULL,
577 	    ("%s(%u): protocol not registered for %s", __func__, proto,
578 	    name));
579 
580 	np[proto].np_name = NULL;
581 	np[proto].np_handler = NULL;
582 	np[proto].np_m2flow = NULL;
583 	np[proto].np_m2cpuid = NULL;
584 	np[proto].np_qlimit = 0;
585 	np[proto].np_policy = 0;
586 	for (i = 0; i < MAXCPU; i++) {
587 		npwp = &nws[i].nws_work[proto];
588 		netisr_drain_proto(npwp);
589 		bzero(npwp, sizeof(*npwp));
590 	}
591 	NETISR_WUNLOCK();
592 }
593 
594 /*
595  * Look up the workstream given a packet and source identifier.  Do this by
596  * checking the protocol's policy, and optionally call out to the protocol
597  * for assistance if required.
598  */
599 static struct mbuf *
600 netisr_select_cpuid(struct netisr_proto *npp, uintptr_t source,
601     struct mbuf *m, u_int *cpuidp)
602 {
603 	struct ifnet *ifp;
604 
605 	NETISR_LOCK_ASSERT();
606 
607 	/*
608 	 * In the event we have only one worker, shortcut and deliver to it
609 	 * without further ado.
610 	 */
611 	if (nws_count == 1) {
612 		*cpuidp = nws_array[0];
613 		return (m);
614 	}
615 
616 	/*
617 	 * What happens next depends on the policy selected by the protocol.
618 	 * If we want to support per-interface policies, we should do that
619 	 * here first.
620 	 */
621 	switch (npp->np_policy) {
622 	case NETISR_POLICY_CPU:
623 		return (npp->np_m2cpuid(m, source, cpuidp));
624 
625 	case NETISR_POLICY_FLOW:
626 		if (!(m->m_flags & M_FLOWID) && npp->np_m2flow != NULL) {
627 			m = npp->np_m2flow(m, source);
628 			if (m == NULL)
629 				return (NULL);
630 		}
631 		if (m->m_flags & M_FLOWID) {
632 			*cpuidp =
633 			    netisr_default_flow2cpu(m->m_pkthdr.flowid);
634 			return (m);
635 		}
636 		/* FALLTHROUGH */
637 
638 	case NETISR_POLICY_SOURCE:
639 		ifp = m->m_pkthdr.rcvif;
640 		if (ifp != NULL)
641 			*cpuidp = nws_array[(ifp->if_index + source) %
642 			    nws_count];
643 		else
644 			*cpuidp = nws_array[source % nws_count];
645 		return (m);
646 
647 	default:
648 		panic("%s: invalid policy %u for %s", __func__,
649 		    npp->np_policy, npp->np_name);
650 	}
651 }
652 
653 /*
654  * Process packets associated with a workstream and protocol.  For reasons of
655  * fairness, we process up to one complete netisr queue at a time, moving the
656  * queue to a stack-local queue for processing, but do not loop refreshing
657  * from the global queue.  The caller is responsible for deciding whether to
658  * loop, and for setting the NWS_RUNNING flag.  The passed workstream will be
659  * locked on entry and relocked before return, but will be released while
660  * processing.  The number of packets processed is returned.
661  */
662 static u_int
663 netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto)
664 {
665 	struct netisr_work local_npw, *npwp;
666 	u_int handled;
667 	struct mbuf *m;
668 
669 	NETISR_LOCK_ASSERT();
670 	NWS_LOCK_ASSERT(nwsp);
671 
672 	KASSERT(nwsp->nws_flags & NWS_RUNNING,
673 	    ("%s(%u): not running", __func__, proto));
674 	KASSERT(proto >= 0 && proto < NETISR_MAXPROT,
675 	    ("%s(%u): invalid proto\n", __func__, proto));
676 
677 	npwp = &nwsp->nws_work[proto];
678 	if (npwp->nw_len == 0)
679 		return (0);
680 
681 	/*
682 	 * Move the global work queue to a thread-local work queue.
683 	 *
684 	 * Notice that this means the effective maximum length of the queue
685 	 * is actually twice that of the maximum queue length specified in
686 	 * the protocol registration call.
687 	 */
688 	handled = npwp->nw_len;
689 	local_npw = *npwp;
690 	npwp->nw_head = NULL;
691 	npwp->nw_tail = NULL;
692 	npwp->nw_len = 0;
693 	nwsp->nws_pendingbits &= ~(1 << proto);
694 	NWS_UNLOCK(nwsp);
695 	while ((m = local_npw.nw_head) != NULL) {
696 		local_npw.nw_head = m->m_nextpkt;
697 		m->m_nextpkt = NULL;
698 		if (local_npw.nw_head == NULL)
699 			local_npw.nw_tail = NULL;
700 		local_npw.nw_len--;
701 		VNET_ASSERT(m->m_pkthdr.rcvif != NULL);
702 		CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
703 		np[proto].np_handler(m);
704 		CURVNET_RESTORE();
705 	}
706 	KASSERT(local_npw.nw_len == 0,
707 	    ("%s(%u): len %u", __func__, proto, local_npw.nw_len));
708 	NWS_LOCK(nwsp);
709 	npwp->nw_handled += handled;
710 	return (handled);
711 }
712 
713 /*
714  * SWI handler for netisr -- processes prackets in a set of workstreams that
715  * it owns, woken up by calls to NWS_SIGNAL().  If this workstream is already
716  * being direct dispatched, go back to sleep and wait for the dispatching
717  * thread to wake us up again.
718  */
719 static void
720 swi_net(void *arg)
721 {
722 #ifdef NETISR_LOCKING
723 	struct rm_priotracker tracker;
724 #endif
725 	struct netisr_workstream *nwsp;
726 	u_int bits, prot;
727 
728 	nwsp = arg;
729 
730 #ifdef DEVICE_POLLING
731 	KASSERT(nws_count == 1,
732 	    ("%s: device_polling but nws_count != 1", __func__));
733 	netisr_poll();
734 #endif
735 #ifdef NETISR_LOCKING
736 	NETISR_RLOCK(&tracker);
737 #endif
738 	NWS_LOCK(nwsp);
739 	KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running"));
740 	if (nwsp->nws_flags & NWS_DISPATCHING)
741 		goto out;
742 	nwsp->nws_flags |= NWS_RUNNING;
743 	nwsp->nws_flags &= ~NWS_SCHEDULED;
744 	while ((bits = nwsp->nws_pendingbits) != 0) {
745 		while ((prot = ffs(bits)) != 0) {
746 			prot--;
747 			bits &= ~(1 << prot);
748 			(void)netisr_process_workstream_proto(nwsp, prot);
749 		}
750 	}
751 	nwsp->nws_flags &= ~NWS_RUNNING;
752 out:
753 	NWS_UNLOCK(nwsp);
754 #ifdef NETISR_LOCKING
755 	NETISR_RUNLOCK(&tracker);
756 #endif
757 #ifdef DEVICE_POLLING
758 	netisr_pollmore();
759 #endif
760 }
761 
762 static int
763 netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto,
764     struct netisr_work *npwp, struct mbuf *m, int *dosignalp)
765 {
766 
767 	NWS_LOCK_ASSERT(nwsp);
768 
769 	*dosignalp = 0;
770 	if (npwp->nw_len < npwp->nw_qlimit) {
771 		m->m_nextpkt = NULL;
772 		if (npwp->nw_head == NULL) {
773 			npwp->nw_head = m;
774 			npwp->nw_tail = m;
775 		} else {
776 			npwp->nw_tail->m_nextpkt = m;
777 			npwp->nw_tail = m;
778 		}
779 		npwp->nw_len++;
780 		if (npwp->nw_len > npwp->nw_watermark)
781 			npwp->nw_watermark = npwp->nw_len;
782 		nwsp->nws_pendingbits |= (1 << proto);
783 		if (!(nwsp->nws_flags &
784 		    (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) {
785 			nwsp->nws_flags |= NWS_SCHEDULED;
786 			*dosignalp = 1;	/* Defer until unlocked. */
787 		}
788 		npwp->nw_queued++;
789 		return (0);
790 	} else {
791 		npwp->nw_qdrops++;
792 		return (ENOBUFS);
793 	}
794 }
795 
796 static int
797 netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid)
798 {
799 	struct netisr_workstream *nwsp;
800 	struct netisr_work *npwp;
801 	int dosignal, error;
802 
803 #ifdef NETISR_LOCKING
804 	NETISR_LOCK_ASSERT();
805 #endif
806 	KASSERT(cpuid < MAXCPU, ("%s: cpuid too big (%u, %u)", __func__,
807 	    cpuid, MAXCPU));
808 
809 	dosignal = 0;
810 	error = 0;
811 	nwsp = &nws[cpuid];
812 	npwp = &nwsp->nws_work[proto];
813 	NWS_LOCK(nwsp);
814 	error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal);
815 	NWS_UNLOCK(nwsp);
816 	if (dosignal)
817 		NWS_SIGNAL(nwsp);
818 	return (error);
819 }
820 
821 int
822 netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m)
823 {
824 #ifdef NETISR_LOCKING
825 	struct rm_priotracker tracker;
826 #endif
827 	u_int cpuid;
828 	int error;
829 
830 	KASSERT(proto < NETISR_MAXPROT,
831 	    ("%s: invalid proto %u", __func__, proto));
832 
833 #ifdef NETISR_LOCKING
834 	NETISR_RLOCK(&tracker);
835 #endif
836 	KASSERT(np[proto].np_handler != NULL,
837 	    ("%s: invalid proto %u", __func__, proto));
838 
839 	m = netisr_select_cpuid(&np[proto], source, m, &cpuid);
840 	if (m != NULL)
841 		error = netisr_queue_internal(proto, m, cpuid);
842 	else
843 		error = ENOBUFS;
844 #ifdef NETISR_LOCKING
845 	NETISR_RUNLOCK(&tracker);
846 #endif
847 	return (error);
848 }
849 
850 int
851 netisr_queue(u_int proto, struct mbuf *m)
852 {
853 
854 	return (netisr_queue_src(proto, 0, m));
855 }
856 
857 /*
858  * Dispatch a packet for netisr processing, direct dispatch permitted by
859  * calling context.
860  */
861 int
862 netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m)
863 {
864 #ifdef NETISR_LOCKING
865 	struct rm_priotracker tracker;
866 #endif
867 	struct netisr_workstream *nwsp;
868 	struct netisr_work *npwp;
869 	int dosignal, error;
870 	u_int cpuid;
871 
872 	/*
873 	 * If direct dispatch is entirely disabled, fall back on queueing.
874 	 */
875 	if (!netisr_direct)
876 		return (netisr_queue_src(proto, source, m));
877 
878 	KASSERT(proto < NETISR_MAXPROT,
879 	    ("%s: invalid proto %u", __func__, proto));
880 #ifdef NETISR_LOCKING
881 	NETISR_RLOCK(&tracker);
882 #endif
883 	KASSERT(np[proto].np_handler != NULL,
884 	    ("%s: invalid proto %u", __func__, proto));
885 
886 	/*
887 	 * If direct dispatch is forced, then unconditionally dispatch
888 	 * without a formal CPU selection.  Borrow the current CPU's stats,
889 	 * even if there's no worker on it.  In this case we don't update
890 	 * nws_flags because all netisr processing will be source ordered due
891 	 * to always being forced to directly dispatch.
892 	 */
893 	if (netisr_direct_force) {
894 		nwsp = &nws[curcpu];
895 		npwp = &nwsp->nws_work[proto];
896 		npwp->nw_dispatched++;
897 		npwp->nw_handled++;
898 		np[proto].np_handler(m);
899 		error = 0;
900 		goto out_unlock;
901 	}
902 
903 	/*
904 	 * Otherwise, we execute in a hybrid mode where we will try to direct
905 	 * dispatch if we're on the right CPU and the netisr worker isn't
906 	 * already running.
907 	 */
908 	m = netisr_select_cpuid(&np[proto], source, m, &cpuid);
909 	if (m == NULL) {
910 		error = ENOBUFS;
911 		goto out_unlock;
912 	}
913 	sched_pin();
914 	if (cpuid != curcpu)
915 		goto queue_fallback;
916 	nwsp = &nws[cpuid];
917 	npwp = &nwsp->nws_work[proto];
918 
919 	/*-
920 	 * We are willing to direct dispatch only if three conditions hold:
921 	 *
922 	 * (1) The netisr worker isn't already running,
923 	 * (2) Another thread isn't already directly dispatching, and
924 	 * (3) The netisr hasn't already been woken up.
925 	 */
926 	NWS_LOCK(nwsp);
927 	if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) {
928 		error = netisr_queue_workstream(nwsp, proto, npwp, m,
929 		    &dosignal);
930 		NWS_UNLOCK(nws);
931 		if (dosignal)
932 			NWS_SIGNAL(nwsp);
933 		goto out_unpin;
934 	}
935 
936 	/*
937 	 * The current thread is now effectively the netisr worker, so set
938 	 * the dispatching flag to prevent concurrent processing of the
939 	 * stream from another thread (even the netisr worker), which could
940 	 * otherwise lead to effective misordering of the stream.
941 	 */
942 	nwsp->nws_flags |= NWS_DISPATCHING;
943 	NWS_UNLOCK(nwsp);
944 	np[proto].np_handler(m);
945 	NWS_LOCK(nwsp);
946 	nwsp->nws_flags &= ~NWS_DISPATCHING;
947 	npwp->nw_handled++;
948 	npwp->nw_hybrid_dispatched++;
949 
950 	/*
951 	 * If other work was enqueued by another thread while we were direct
952 	 * dispatching, we need to signal the netisr worker to do that work.
953 	 * In the future, we might want to do some of that work in the
954 	 * current thread, rather than trigger further context switches.  If
955 	 * so, we'll want to establish a reasonable bound on the work done in
956 	 * the "borrowed" context.
957 	 */
958 	if (nwsp->nws_pendingbits != 0) {
959 		nwsp->nws_flags |= NWS_SCHEDULED;
960 		dosignal = 1;
961 	} else
962 		dosignal = 0;
963 	NWS_UNLOCK(nwsp);
964 	if (dosignal)
965 		NWS_SIGNAL(nwsp);
966 	error = 0;
967 	goto out_unpin;
968 
969 queue_fallback:
970 	error = netisr_queue_internal(proto, m, cpuid);
971 out_unpin:
972 	sched_unpin();
973 out_unlock:
974 #ifdef NETISR_LOCKING
975 	NETISR_RUNLOCK(&tracker);
976 #endif
977 	return (error);
978 }
979 
980 int
981 netisr_dispatch(u_int proto, struct mbuf *m)
982 {
983 
984 	return (netisr_dispatch_src(proto, 0, m));
985 }
986 
987 #ifdef DEVICE_POLLING
988 /*
989  * Kernel polling borrows a netisr thread to run interface polling in; this
990  * function allows kernel polling to request that the netisr thread be
991  * scheduled even if no packets are pending for protocols.
992  */
993 void
994 netisr_sched_poll(void)
995 {
996 	struct netisr_workstream *nwsp;
997 
998 	nwsp = &nws[nws_array[0]];
999 	NWS_SIGNAL(nwsp);
1000 }
1001 #endif
1002 
1003 static void
1004 netisr_start_swi(u_int cpuid, struct pcpu *pc)
1005 {
1006 	char swiname[12];
1007 	struct netisr_workstream *nwsp;
1008 	int error;
1009 
1010 	nwsp = &nws[cpuid];
1011 	mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF);
1012 	nwsp->nws_cpu = cpuid;
1013 	snprintf(swiname, sizeof(swiname), "netisr %u", cpuid);
1014 	error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp,
1015 	    SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie);
1016 	if (error)
1017 		panic("%s: swi_add %d", __func__, error);
1018 	pc->pc_netisr = nwsp->nws_intr_event;
1019 	if (netisr_bindthreads) {
1020 		error = intr_event_bind(nwsp->nws_intr_event, cpuid);
1021 		if (error != 0)
1022 			printf("%s: cpu %u: intr_event_bind: %d", __func__,
1023 			    cpuid, error);
1024 	}
1025 	NETISR_WLOCK();
1026 	nws_array[nws_count] = nwsp->nws_cpu;
1027 	nws_count++;
1028 	NETISR_WUNLOCK();
1029 }
1030 
1031 /*
1032  * Initialize the netisr subsystem.  We rely on BSS and static initialization
1033  * of most fields in global data structures.
1034  *
1035  * Start a worker thread for the boot CPU so that we can support network
1036  * traffic immediately in case the network stack is used before additional
1037  * CPUs are started (for example, diskless boot).
1038  */
1039 static void
1040 netisr_init(void *arg)
1041 {
1042 
1043 	KASSERT(curcpu == 0, ("%s: not on CPU 0", __func__));
1044 
1045 	NETISR_LOCK_INIT();
1046 	if (netisr_maxthreads < 1) {
1047 		printf("netisr2: forcing maxthreads to 1\n");
1048 		netisr_maxthreads = 1;
1049 	}
1050 	if (netisr_maxthreads > MAXCPU) {
1051 		printf("netisr2: forcing maxthreads to %d\n", MAXCPU);
1052 		netisr_maxthreads = MAXCPU;
1053 	}
1054 	if (netisr_defaultqlimit > netisr_maxqlimit) {
1055 		printf("netisr2: forcing defaultqlimit to %d\n",
1056 		    netisr_maxqlimit);
1057 		netisr_defaultqlimit = netisr_maxqlimit;
1058 	}
1059 #ifdef DEVICE_POLLING
1060 	/*
1061 	 * The device polling code is not yet aware of how to deal with
1062 	 * multiple netisr threads, so for the time being compiling in device
1063 	 * polling disables parallel netisr workers.
1064 	 */
1065 	if (netisr_maxthreads != 1 || netisr_bindthreads != 0) {
1066 		printf("netisr2: forcing maxthreads to 1 and bindthreads to "
1067 		    "0 for device polling\n");
1068 		netisr_maxthreads = 1;
1069 		netisr_bindthreads = 0;
1070 	}
1071 #endif
1072 
1073 	netisr_start_swi(curcpu, pcpu_find(curcpu));
1074 }
1075 SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL);
1076 
1077 /*
1078  * Start worker threads for additional CPUs.  No attempt to gracefully handle
1079  * work reassignment, we don't yet support dynamic reconfiguration.
1080  */
1081 static void
1082 netisr_start(void *arg)
1083 {
1084 	struct pcpu *pc;
1085 
1086 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
1087 		if (nws_count >= netisr_maxthreads)
1088 			break;
1089 		/* XXXRW: Is skipping absent CPUs still required here? */
1090 		if (CPU_ABSENT(pc->pc_cpuid))
1091 			continue;
1092 		/* Worker will already be present for boot CPU. */
1093 		if (pc->pc_netisr != NULL)
1094 			continue;
1095 		netisr_start_swi(pc->pc_cpuid, pc);
1096 	}
1097 }
1098 SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL);
1099 
1100 #ifdef DDB
1101 DB_SHOW_COMMAND(netisr, db_show_netisr)
1102 {
1103 	struct netisr_workstream *nwsp;
1104 	struct netisr_work *nwp;
1105 	int first, proto;
1106 	u_int cpu;
1107 
1108 	db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto",
1109 	    "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue");
1110 	for (cpu = 0; cpu < MAXCPU; cpu++) {
1111 		nwsp = &nws[cpu];
1112 		if (nwsp->nws_intr_event == NULL)
1113 			continue;
1114 		first = 1;
1115 		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1116 			if (np[proto].np_handler == NULL)
1117 				continue;
1118 			nwp = &nwsp->nws_work[proto];
1119 			if (first) {
1120 				db_printf("%3d ", cpu);
1121 				first = 0;
1122 			} else
1123 				db_printf("%3s ", "");
1124 			db_printf(
1125 			    "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n",
1126 			    np[proto].np_name, nwp->nw_len,
1127 			    nwp->nw_watermark, nwp->nw_qlimit,
1128 			    nwp->nw_dispatched, nwp->nw_hybrid_dispatched,
1129 			    nwp->nw_qdrops, nwp->nw_queued);
1130 		}
1131 	}
1132 }
1133 #endif
1134