1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2007-2009 Robert N. M. Watson 5 * Copyright (c) 2010-2011 Juniper Networks, Inc. 6 * All rights reserved. 7 * 8 * This software was developed by Robert N. M. Watson under contract 9 * to Juniper Networks, Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * netisr is a packet dispatch service, allowing synchronous (directly 38 * dispatched) and asynchronous (deferred dispatch) processing of packets by 39 * registered protocol handlers. Callers pass a protocol identifier and 40 * packet to netisr, along with a direct dispatch hint, and work will either 41 * be immediately processed by the registered handler, or passed to a 42 * software interrupt (SWI) thread for deferred dispatch. Callers will 43 * generally select one or the other based on: 44 * 45 * - Whether directly dispatching a netisr handler lead to code reentrance or 46 * lock recursion, such as entering the socket code from the socket code. 47 * - Whether directly dispatching a netisr handler lead to recursive 48 * processing, such as when decapsulating several wrapped layers of tunnel 49 * information (IPSEC within IPSEC within ...). 50 * 51 * Maintaining ordering for protocol streams is a critical design concern. 52 * Enforcing ordering limits the opportunity for concurrency, but maintains 53 * the strong ordering requirements found in some protocols, such as TCP. Of 54 * related concern is CPU affinity--it is desirable to process all data 55 * associated with a particular stream on the same CPU over time in order to 56 * avoid acquiring locks associated with the connection on different CPUs, 57 * keep connection data in one cache, and to generally encourage associated 58 * user threads to live on the same CPU as the stream. It's also desirable 59 * to avoid lock migration and contention where locks are associated with 60 * more than one flow. 61 * 62 * netisr supports several policy variations, represented by the 63 * NETISR_POLICY_* constants, allowing protocols to play various roles in 64 * identifying flows, assigning work to CPUs, etc. These are described in 65 * netisr.h. 66 */ 67 68 #include "opt_ddb.h" 69 #include "opt_device_polling.h" 70 71 #include <sys/param.h> 72 #include <sys/bus.h> 73 #include <sys/kernel.h> 74 #include <sys/kthread.h> 75 #include <sys/malloc.h> 76 #include <sys/interrupt.h> 77 #include <sys/lock.h> 78 #include <sys/mbuf.h> 79 #include <sys/mutex.h> 80 #include <sys/pcpu.h> 81 #include <sys/proc.h> 82 #include <sys/rmlock.h> 83 #include <sys/sched.h> 84 #include <sys/smp.h> 85 #include <sys/socket.h> 86 #include <sys/sysctl.h> 87 #include <sys/systm.h> 88 89 #ifdef DDB 90 #include <ddb/ddb.h> 91 #endif 92 93 #define _WANT_NETISR_INTERNAL /* Enable definitions from netisr_internal.h */ 94 #include <net/if.h> 95 #include <net/if_var.h> 96 #include <net/netisr.h> 97 #include <net/netisr_internal.h> 98 #include <net/vnet.h> 99 100 /*- 101 * Synchronize use and modification of the registered netisr data structures; 102 * acquire a read lock while modifying the set of registered protocols to 103 * prevent partially registered or unregistered protocols from being run. 104 * 105 * The following data structures and fields are protected by this lock: 106 * 107 * - The netisr_proto array, including all fields of struct netisr_proto. 108 * - The nws array, including all fields of struct netisr_worker. 109 * - The nws_array array. 110 * 111 * Note: the NETISR_LOCKING define controls whether read locks are acquired 112 * in packet processing paths requiring netisr registration stability. This 113 * is disabled by default as it can lead to measurable performance 114 * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and 115 * because netisr registration and unregistration is extremely rare at 116 * runtime. If it becomes more common, this decision should be revisited. 117 * 118 * XXXRW: rmlocks don't support assertions. 119 */ 120 static struct rmlock netisr_rmlock; 121 #define NETISR_LOCK_INIT() rm_init_flags(&netisr_rmlock, "netisr", \ 122 RM_NOWITNESS) 123 #define NETISR_LOCK_ASSERT() 124 #define NETISR_RLOCK(tracker) rm_rlock(&netisr_rmlock, (tracker)) 125 #define NETISR_RUNLOCK(tracker) rm_runlock(&netisr_rmlock, (tracker)) 126 #define NETISR_WLOCK() rm_wlock(&netisr_rmlock) 127 #define NETISR_WUNLOCK() rm_wunlock(&netisr_rmlock) 128 /* #define NETISR_LOCKING */ 129 130 static SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 131 "netisr"); 132 133 /*- 134 * Three global direct dispatch policies are supported: 135 * 136 * NETISR_DISPATCH_DEFERRED: All work is deferred for a netisr, regardless of 137 * context (may be overriden by protocols). 138 * 139 * NETISR_DISPATCH_HYBRID: If the executing context allows direct dispatch, 140 * and we're running on the CPU the work would be performed on, then direct 141 * dispatch it if it wouldn't violate ordering constraints on the workstream. 142 * 143 * NETISR_DISPATCH_DIRECT: If the executing context allows direct dispatch, 144 * always direct dispatch. (The default.) 145 * 146 * Notice that changing the global policy could lead to short periods of 147 * misordered processing, but this is considered acceptable as compared to 148 * the complexity of enforcing ordering during policy changes. Protocols can 149 * override the global policy (when they're not doing that, they select 150 * NETISR_DISPATCH_DEFAULT). 151 */ 152 #define NETISR_DISPATCH_POLICY_DEFAULT NETISR_DISPATCH_DIRECT 153 #define NETISR_DISPATCH_POLICY_MAXSTR 20 /* Used for temporary buffers. */ 154 static u_int netisr_dispatch_policy = NETISR_DISPATCH_POLICY_DEFAULT; 155 static int sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS); 156 SYSCTL_PROC(_net_isr, OID_AUTO, dispatch, 157 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 158 0, 0, sysctl_netisr_dispatch_policy, "A", 159 "netisr dispatch policy"); 160 161 /* 162 * Allow the administrator to limit the number of threads (CPUs) to use for 163 * netisr. We don't check netisr_maxthreads before creating the thread for 164 * CPU 0. This must be set at boot. We will create at most one thread per CPU. 165 * By default we initialize this to 1 which would assign just 1 cpu (cpu0) and 166 * therefore only 1 workstream. If set to -1, netisr would use all cpus 167 * (mp_ncpus) and therefore would have those many workstreams. One workstream 168 * per thread (CPU). 169 */ 170 static int netisr_maxthreads = 1; /* Max number of threads. */ 171 SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN, 172 &netisr_maxthreads, 0, 173 "Use at most this many CPUs for netisr processing"); 174 175 static int netisr_bindthreads = 0; /* Bind threads to CPUs. */ 176 SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN, 177 &netisr_bindthreads, 0, "Bind netisr threads to CPUs."); 178 179 /* 180 * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit, 181 * both for initial configuration and later modification using 182 * netisr_setqlimit(). 183 */ 184 #define NETISR_DEFAULT_MAXQLIMIT 10240 185 static u_int netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT; 186 SYSCTL_UINT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN, 187 &netisr_maxqlimit, 0, 188 "Maximum netisr per-protocol, per-CPU queue depth."); 189 190 /* 191 * The default per-workstream mbuf queue limit for protocols that don't 192 * initialize the nh_qlimit field of their struct netisr_handler. If this is 193 * set above netisr_maxqlimit, we truncate it to the maximum during boot. 194 */ 195 #define NETISR_DEFAULT_DEFAULTQLIMIT 256 196 static u_int netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT; 197 SYSCTL_UINT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN, 198 &netisr_defaultqlimit, 0, 199 "Default netisr per-protocol, per-CPU queue limit if not set by protocol"); 200 201 /* 202 * Store and export the compile-time constant NETISR_MAXPROT limit on the 203 * number of protocols that can register with netisr at a time. This is 204 * required for crashdump analysis, as it sizes netisr_proto[]. 205 */ 206 static u_int netisr_maxprot = NETISR_MAXPROT; 207 SYSCTL_UINT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD, 208 &netisr_maxprot, 0, 209 "Compile-time limit on the number of protocols supported by netisr."); 210 211 /* 212 * The netisr_proto array describes all registered protocols, indexed by 213 * protocol number. See netisr_internal.h for more details. 214 */ 215 static struct netisr_proto netisr_proto[NETISR_MAXPROT]; 216 217 #ifdef VIMAGE 218 /* 219 * The netisr_enable array describes a per-VNET flag for registered 220 * protocols on whether this netisr is active in this VNET or not. 221 * netisr_register() will automatically enable the netisr for the 222 * default VNET and all currently active instances. 223 * netisr_unregister() will disable all active VNETs, including vnet0. 224 * Individual network stack instances can be enabled/disabled by the 225 * netisr_(un)register _vnet() functions. 226 * With this we keep the one netisr_proto per protocol but add a 227 * mechanism to stop netisr processing for vnet teardown. 228 * Apart from that we expect a VNET to always be enabled. 229 */ 230 VNET_DEFINE_STATIC(u_int, netisr_enable[NETISR_MAXPROT]); 231 #define V_netisr_enable VNET(netisr_enable) 232 #endif 233 234 /* 235 * Per-CPU workstream data. See netisr_internal.h for more details. 236 */ 237 DPCPU_DEFINE(struct netisr_workstream, nws); 238 239 /* 240 * Map contiguous values between 0 and nws_count into CPU IDs appropriate for 241 * accessing workstreams. This allows constructions of the form 242 * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws). 243 */ 244 static u_int nws_array[MAXCPU]; 245 246 /* 247 * Number of registered workstreams. Will be at most the number of running 248 * CPUs once fully started. 249 */ 250 static u_int nws_count; 251 SYSCTL_UINT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD, 252 &nws_count, 0, "Number of extant netisr threads."); 253 254 /* 255 * Synchronization for each workstream: a mutex protects all mutable fields 256 * in each stream, including per-protocol state (mbuf queues). The SWI is 257 * woken up if asynchronous dispatch is required. 258 */ 259 #define NWS_LOCK(s) mtx_lock(&(s)->nws_mtx) 260 #define NWS_LOCK_ASSERT(s) mtx_assert(&(s)->nws_mtx, MA_OWNED) 261 #define NWS_UNLOCK(s) mtx_unlock(&(s)->nws_mtx) 262 #define NWS_SIGNAL(s) swi_sched((s)->nws_swi_cookie, 0) 263 264 /* 265 * Utility routines for protocols that implement their own mapping of flows 266 * to CPUs. 267 */ 268 u_int 269 netisr_get_cpucount(void) 270 { 271 272 return (nws_count); 273 } 274 275 u_int 276 netisr_get_cpuid(u_int cpunumber) 277 { 278 279 return (nws_array[cpunumber % nws_count]); 280 } 281 282 /* 283 * The default implementation of flow -> CPU ID mapping. 284 * 285 * Non-static so that protocols can use it to map their own work to specific 286 * CPUs in a manner consistent to netisr for affinity purposes. 287 */ 288 u_int 289 netisr_default_flow2cpu(u_int flowid) 290 { 291 292 return (nws_array[flowid % nws_count]); 293 } 294 295 /* 296 * Dispatch tunable and sysctl configuration. 297 */ 298 struct netisr_dispatch_table_entry { 299 u_int ndte_policy; 300 const char *ndte_policy_str; 301 }; 302 static const struct netisr_dispatch_table_entry netisr_dispatch_table[] = { 303 { NETISR_DISPATCH_DEFAULT, "default" }, 304 { NETISR_DISPATCH_DEFERRED, "deferred" }, 305 { NETISR_DISPATCH_HYBRID, "hybrid" }, 306 { NETISR_DISPATCH_DIRECT, "direct" }, 307 }; 308 309 static void 310 netisr_dispatch_policy_to_str(u_int dispatch_policy, char *buffer, 311 u_int buflen) 312 { 313 const struct netisr_dispatch_table_entry *ndtep; 314 const char *str; 315 u_int i; 316 317 str = "unknown"; 318 for (i = 0; i < nitems(netisr_dispatch_table); i++) { 319 ndtep = &netisr_dispatch_table[i]; 320 if (ndtep->ndte_policy == dispatch_policy) { 321 str = ndtep->ndte_policy_str; 322 break; 323 } 324 } 325 snprintf(buffer, buflen, "%s", str); 326 } 327 328 static int 329 netisr_dispatch_policy_from_str(const char *str, u_int *dispatch_policyp) 330 { 331 const struct netisr_dispatch_table_entry *ndtep; 332 u_int i; 333 334 for (i = 0; i < nitems(netisr_dispatch_table); i++) { 335 ndtep = &netisr_dispatch_table[i]; 336 if (strcmp(ndtep->ndte_policy_str, str) == 0) { 337 *dispatch_policyp = ndtep->ndte_policy; 338 return (0); 339 } 340 } 341 return (EINVAL); 342 } 343 344 static int 345 sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS) 346 { 347 char tmp[NETISR_DISPATCH_POLICY_MAXSTR]; 348 size_t len; 349 u_int dispatch_policy; 350 int error; 351 352 netisr_dispatch_policy_to_str(netisr_dispatch_policy, tmp, 353 sizeof(tmp)); 354 /* 355 * netisr is initialised very early during the boot when malloc isn't 356 * available yet so we can't use sysctl_handle_string() to process 357 * any non-default value that was potentially set via loader. 358 */ 359 if (req->newptr != NULL) { 360 len = req->newlen - req->newidx; 361 if (len >= NETISR_DISPATCH_POLICY_MAXSTR) 362 return (EINVAL); 363 error = SYSCTL_IN(req, tmp, len); 364 if (error == 0) { 365 tmp[len] = '\0'; 366 error = netisr_dispatch_policy_from_str(tmp, 367 &dispatch_policy); 368 if (error == 0 && 369 dispatch_policy == NETISR_DISPATCH_DEFAULT) 370 error = EINVAL; 371 if (error == 0) 372 netisr_dispatch_policy = dispatch_policy; 373 } 374 } else { 375 error = sysctl_handle_string(oidp, tmp, sizeof(tmp), req); 376 } 377 return (error); 378 } 379 380 /* 381 * Register a new netisr handler, which requires initializing per-protocol 382 * fields for each workstream. All netisr work is briefly suspended while 383 * the protocol is installed. 384 */ 385 void 386 netisr_register(const struct netisr_handler *nhp) 387 { 388 VNET_ITERATOR_DECL(vnet_iter); 389 struct netisr_work *npwp; 390 const char *name; 391 u_int i, proto; 392 393 proto = nhp->nh_proto; 394 name = nhp->nh_name; 395 396 /* 397 * Test that the requested registration is valid. 398 */ 399 KASSERT(nhp->nh_name != NULL, 400 ("%s: nh_name NULL for %u", __func__, proto)); 401 KASSERT(nhp->nh_handler != NULL, 402 ("%s: nh_handler NULL for %s", __func__, name)); 403 KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE || 404 nhp->nh_policy == NETISR_POLICY_FLOW || 405 nhp->nh_policy == NETISR_POLICY_CPU, 406 ("%s: unsupported nh_policy %u for %s", __func__, 407 nhp->nh_policy, name)); 408 KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW || 409 nhp->nh_m2flow == NULL, 410 ("%s: nh_policy != FLOW but m2flow defined for %s", __func__, 411 name)); 412 KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL, 413 ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__, 414 name)); 415 KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL, 416 ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__, 417 name)); 418 KASSERT(nhp->nh_dispatch == NETISR_DISPATCH_DEFAULT || 419 nhp->nh_dispatch == NETISR_DISPATCH_DEFERRED || 420 nhp->nh_dispatch == NETISR_DISPATCH_HYBRID || 421 nhp->nh_dispatch == NETISR_DISPATCH_DIRECT, 422 ("%s: invalid nh_dispatch (%u)", __func__, nhp->nh_dispatch)); 423 424 KASSERT(proto < NETISR_MAXPROT, 425 ("%s(%u, %s): protocol too big", __func__, proto, name)); 426 427 /* 428 * Test that no existing registration exists for this protocol. 429 */ 430 NETISR_WLOCK(); 431 KASSERT(netisr_proto[proto].np_name == NULL, 432 ("%s(%u, %s): name present", __func__, proto, name)); 433 KASSERT(netisr_proto[proto].np_handler == NULL, 434 ("%s(%u, %s): handler present", __func__, proto, name)); 435 436 netisr_proto[proto].np_name = name; 437 netisr_proto[proto].np_handler = nhp->nh_handler; 438 netisr_proto[proto].np_m2flow = nhp->nh_m2flow; 439 netisr_proto[proto].np_m2cpuid = nhp->nh_m2cpuid; 440 netisr_proto[proto].np_drainedcpu = nhp->nh_drainedcpu; 441 if (nhp->nh_qlimit == 0) 442 netisr_proto[proto].np_qlimit = netisr_defaultqlimit; 443 else if (nhp->nh_qlimit > netisr_maxqlimit) { 444 printf("%s: %s requested queue limit %u capped to " 445 "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit, 446 netisr_maxqlimit); 447 netisr_proto[proto].np_qlimit = netisr_maxqlimit; 448 } else 449 netisr_proto[proto].np_qlimit = nhp->nh_qlimit; 450 netisr_proto[proto].np_policy = nhp->nh_policy; 451 netisr_proto[proto].np_dispatch = nhp->nh_dispatch; 452 CPU_FOREACH(i) { 453 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 454 bzero(npwp, sizeof(*npwp)); 455 npwp->nw_qlimit = netisr_proto[proto].np_qlimit; 456 } 457 458 #ifdef VIMAGE 459 /* 460 * Test that we are in vnet0 and have a curvnet set. 461 */ 462 KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__)); 463 KASSERT(IS_DEFAULT_VNET(curvnet), ("%s: curvnet %p is not vnet0 %p", 464 __func__, curvnet, vnet0)); 465 VNET_LIST_RLOCK_NOSLEEP(); 466 VNET_FOREACH(vnet_iter) { 467 CURVNET_SET(vnet_iter); 468 V_netisr_enable[proto] = 1; 469 CURVNET_RESTORE(); 470 } 471 VNET_LIST_RUNLOCK_NOSLEEP(); 472 #endif 473 NETISR_WUNLOCK(); 474 } 475 476 /* 477 * Clear drop counters across all workstreams for a protocol. 478 */ 479 void 480 netisr_clearqdrops(const struct netisr_handler *nhp) 481 { 482 struct netisr_work *npwp; 483 #ifdef INVARIANTS 484 const char *name; 485 #endif 486 u_int i, proto; 487 488 proto = nhp->nh_proto; 489 #ifdef INVARIANTS 490 name = nhp->nh_name; 491 #endif 492 KASSERT(proto < NETISR_MAXPROT, 493 ("%s(%u): protocol too big for %s", __func__, proto, name)); 494 495 NETISR_WLOCK(); 496 KASSERT(netisr_proto[proto].np_handler != NULL, 497 ("%s(%u): protocol not registered for %s", __func__, proto, 498 name)); 499 500 CPU_FOREACH(i) { 501 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 502 npwp->nw_qdrops = 0; 503 } 504 NETISR_WUNLOCK(); 505 } 506 507 /* 508 * Query current drop counters across all workstreams for a protocol. 509 */ 510 void 511 netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp) 512 { 513 struct netisr_work *npwp; 514 struct rm_priotracker tracker; 515 #ifdef INVARIANTS 516 const char *name; 517 #endif 518 u_int i, proto; 519 520 *qdropp = 0; 521 proto = nhp->nh_proto; 522 #ifdef INVARIANTS 523 name = nhp->nh_name; 524 #endif 525 KASSERT(proto < NETISR_MAXPROT, 526 ("%s(%u): protocol too big for %s", __func__, proto, name)); 527 528 NETISR_RLOCK(&tracker); 529 KASSERT(netisr_proto[proto].np_handler != NULL, 530 ("%s(%u): protocol not registered for %s", __func__, proto, 531 name)); 532 533 CPU_FOREACH(i) { 534 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 535 *qdropp += npwp->nw_qdrops; 536 } 537 NETISR_RUNLOCK(&tracker); 538 } 539 540 /* 541 * Query current per-workstream queue limit for a protocol. 542 */ 543 void 544 netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp) 545 { 546 struct rm_priotracker tracker; 547 #ifdef INVARIANTS 548 const char *name; 549 #endif 550 u_int proto; 551 552 proto = nhp->nh_proto; 553 #ifdef INVARIANTS 554 name = nhp->nh_name; 555 #endif 556 KASSERT(proto < NETISR_MAXPROT, 557 ("%s(%u): protocol too big for %s", __func__, proto, name)); 558 559 NETISR_RLOCK(&tracker); 560 KASSERT(netisr_proto[proto].np_handler != NULL, 561 ("%s(%u): protocol not registered for %s", __func__, proto, 562 name)); 563 *qlimitp = netisr_proto[proto].np_qlimit; 564 NETISR_RUNLOCK(&tracker); 565 } 566 567 /* 568 * Update the queue limit across per-workstream queues for a protocol. We 569 * simply change the limits, and don't drain overflowed packets as they will 570 * (hopefully) take care of themselves shortly. 571 */ 572 int 573 netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit) 574 { 575 struct netisr_work *npwp; 576 #ifdef INVARIANTS 577 const char *name; 578 #endif 579 u_int i, proto; 580 581 if (qlimit > netisr_maxqlimit) 582 return (EINVAL); 583 584 proto = nhp->nh_proto; 585 #ifdef INVARIANTS 586 name = nhp->nh_name; 587 #endif 588 KASSERT(proto < NETISR_MAXPROT, 589 ("%s(%u): protocol too big for %s", __func__, proto, name)); 590 591 NETISR_WLOCK(); 592 KASSERT(netisr_proto[proto].np_handler != NULL, 593 ("%s(%u): protocol not registered for %s", __func__, proto, 594 name)); 595 596 netisr_proto[proto].np_qlimit = qlimit; 597 CPU_FOREACH(i) { 598 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 599 npwp->nw_qlimit = qlimit; 600 } 601 NETISR_WUNLOCK(); 602 return (0); 603 } 604 605 /* 606 * Drain all packets currently held in a particular protocol work queue. 607 */ 608 static void 609 netisr_drain_proto(struct netisr_work *npwp) 610 { 611 struct mbuf *m; 612 613 /* 614 * We would assert the lock on the workstream but it's not passed in. 615 */ 616 while ((m = npwp->nw_head) != NULL) { 617 npwp->nw_head = m->m_nextpkt; 618 m->m_nextpkt = NULL; 619 if (npwp->nw_head == NULL) 620 npwp->nw_tail = NULL; 621 npwp->nw_len--; 622 m_freem(m); 623 } 624 KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__)); 625 KASSERT(npwp->nw_len == 0, ("%s: len", __func__)); 626 } 627 628 /* 629 * Remove the registration of a network protocol, which requires clearing 630 * per-protocol fields across all workstreams, including freeing all mbufs in 631 * the queues at time of unregister. All work in netisr is briefly suspended 632 * while this takes place. 633 */ 634 void 635 netisr_unregister(const struct netisr_handler *nhp) 636 { 637 VNET_ITERATOR_DECL(vnet_iter); 638 struct netisr_work *npwp; 639 #ifdef INVARIANTS 640 const char *name; 641 #endif 642 u_int i, proto; 643 644 proto = nhp->nh_proto; 645 #ifdef INVARIANTS 646 name = nhp->nh_name; 647 #endif 648 KASSERT(proto < NETISR_MAXPROT, 649 ("%s(%u): protocol too big for %s", __func__, proto, name)); 650 651 NETISR_WLOCK(); 652 KASSERT(netisr_proto[proto].np_handler != NULL, 653 ("%s(%u): protocol not registered for %s", __func__, proto, 654 name)); 655 656 #ifdef VIMAGE 657 VNET_LIST_RLOCK_NOSLEEP(); 658 VNET_FOREACH(vnet_iter) { 659 CURVNET_SET(vnet_iter); 660 V_netisr_enable[proto] = 0; 661 CURVNET_RESTORE(); 662 } 663 VNET_LIST_RUNLOCK_NOSLEEP(); 664 #endif 665 666 netisr_proto[proto].np_name = NULL; 667 netisr_proto[proto].np_handler = NULL; 668 netisr_proto[proto].np_m2flow = NULL; 669 netisr_proto[proto].np_m2cpuid = NULL; 670 netisr_proto[proto].np_qlimit = 0; 671 netisr_proto[proto].np_policy = 0; 672 CPU_FOREACH(i) { 673 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 674 netisr_drain_proto(npwp); 675 bzero(npwp, sizeof(*npwp)); 676 } 677 NETISR_WUNLOCK(); 678 } 679 680 #ifdef VIMAGE 681 void 682 netisr_register_vnet(const struct netisr_handler *nhp) 683 { 684 u_int proto; 685 686 proto = nhp->nh_proto; 687 688 KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__)); 689 KASSERT(proto < NETISR_MAXPROT, 690 ("%s(%u): protocol too big for %s", __func__, proto, nhp->nh_name)); 691 NETISR_WLOCK(); 692 KASSERT(netisr_proto[proto].np_handler != NULL, 693 ("%s(%u): protocol not registered for %s", __func__, proto, 694 nhp->nh_name)); 695 696 V_netisr_enable[proto] = 1; 697 NETISR_WUNLOCK(); 698 } 699 700 static void 701 netisr_drain_proto_vnet(struct vnet *vnet, u_int proto) 702 { 703 struct netisr_workstream *nwsp; 704 struct netisr_work *npwp; 705 struct mbuf *m, *mp, *n, *ne; 706 u_int i; 707 708 KASSERT(vnet != NULL, ("%s: vnet is NULL", __func__)); 709 NETISR_LOCK_ASSERT(); 710 711 CPU_FOREACH(i) { 712 nwsp = DPCPU_ID_PTR(i, nws); 713 if (nwsp->nws_intr_event == NULL) 714 continue; 715 npwp = &nwsp->nws_work[proto]; 716 NWS_LOCK(nwsp); 717 718 /* 719 * Rather than dissecting and removing mbufs from the middle 720 * of the chain, we build a new chain if the packet stays and 721 * update the head and tail pointers at the end. All packets 722 * matching the given vnet are freed. 723 */ 724 m = npwp->nw_head; 725 n = ne = NULL; 726 while (m != NULL) { 727 mp = m; 728 m = m->m_nextpkt; 729 mp->m_nextpkt = NULL; 730 if (mp->m_pkthdr.rcvif->if_vnet != vnet) { 731 if (n == NULL) { 732 n = ne = mp; 733 } else { 734 ne->m_nextpkt = mp; 735 ne = mp; 736 } 737 continue; 738 } 739 /* This is a packet in the selected vnet. Free it. */ 740 npwp->nw_len--; 741 m_freem(mp); 742 } 743 npwp->nw_head = n; 744 npwp->nw_tail = ne; 745 NWS_UNLOCK(nwsp); 746 } 747 } 748 749 void 750 netisr_unregister_vnet(const struct netisr_handler *nhp) 751 { 752 u_int proto; 753 754 proto = nhp->nh_proto; 755 756 KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__)); 757 KASSERT(proto < NETISR_MAXPROT, 758 ("%s(%u): protocol too big for %s", __func__, proto, nhp->nh_name)); 759 NETISR_WLOCK(); 760 KASSERT(netisr_proto[proto].np_handler != NULL, 761 ("%s(%u): protocol not registered for %s", __func__, proto, 762 nhp->nh_name)); 763 764 V_netisr_enable[proto] = 0; 765 766 netisr_drain_proto_vnet(curvnet, proto); 767 NETISR_WUNLOCK(); 768 } 769 #endif 770 771 /* 772 * Compose the global and per-protocol policies on dispatch, and return the 773 * dispatch policy to use. 774 */ 775 static u_int 776 netisr_get_dispatch(struct netisr_proto *npp) 777 { 778 779 /* 780 * Protocol-specific configuration overrides the global default. 781 */ 782 if (npp->np_dispatch != NETISR_DISPATCH_DEFAULT) 783 return (npp->np_dispatch); 784 return (netisr_dispatch_policy); 785 } 786 787 /* 788 * Look up the workstream given a packet and source identifier. Do this by 789 * checking the protocol's policy, and optionally call out to the protocol 790 * for assistance if required. 791 */ 792 static struct mbuf * 793 netisr_select_cpuid(struct netisr_proto *npp, u_int dispatch_policy, 794 uintptr_t source, struct mbuf *m, u_int *cpuidp) 795 { 796 struct ifnet *ifp; 797 u_int policy; 798 799 NETISR_LOCK_ASSERT(); 800 801 /* 802 * In the event we have only one worker, shortcut and deliver to it 803 * without further ado. 804 */ 805 if (nws_count == 1) { 806 *cpuidp = nws_array[0]; 807 return (m); 808 } 809 810 /* 811 * What happens next depends on the policy selected by the protocol. 812 * If we want to support per-interface policies, we should do that 813 * here first. 814 */ 815 policy = npp->np_policy; 816 if (policy == NETISR_POLICY_CPU) { 817 m = npp->np_m2cpuid(m, source, cpuidp); 818 if (m == NULL) 819 return (NULL); 820 821 /* 822 * It's possible for a protocol not to have a good idea about 823 * where to process a packet, in which case we fall back on 824 * the netisr code to decide. In the hybrid case, return the 825 * current CPU ID, which will force an immediate direct 826 * dispatch. In the queued case, fall back on the SOURCE 827 * policy. 828 */ 829 if (*cpuidp != NETISR_CPUID_NONE) { 830 *cpuidp = netisr_get_cpuid(*cpuidp); 831 return (m); 832 } 833 if (dispatch_policy == NETISR_DISPATCH_HYBRID) { 834 *cpuidp = netisr_get_cpuid(curcpu); 835 return (m); 836 } 837 policy = NETISR_POLICY_SOURCE; 838 } 839 840 if (policy == NETISR_POLICY_FLOW) { 841 if (M_HASHTYPE_GET(m) == M_HASHTYPE_NONE && 842 npp->np_m2flow != NULL) { 843 m = npp->np_m2flow(m, source); 844 if (m == NULL) 845 return (NULL); 846 } 847 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 848 *cpuidp = 849 netisr_default_flow2cpu(m->m_pkthdr.flowid); 850 return (m); 851 } 852 policy = NETISR_POLICY_SOURCE; 853 } 854 855 KASSERT(policy == NETISR_POLICY_SOURCE, 856 ("%s: invalid policy %u for %s", __func__, npp->np_policy, 857 npp->np_name)); 858 859 MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); 860 ifp = m->m_pkthdr.rcvif; 861 if (ifp != NULL) 862 *cpuidp = nws_array[(ifp->if_index + source) % nws_count]; 863 else 864 *cpuidp = nws_array[source % nws_count]; 865 return (m); 866 } 867 868 /* 869 * Process packets associated with a workstream and protocol. For reasons of 870 * fairness, we process up to one complete netisr queue at a time, moving the 871 * queue to a stack-local queue for processing, but do not loop refreshing 872 * from the global queue. The caller is responsible for deciding whether to 873 * loop, and for setting the NWS_RUNNING flag. The passed workstream will be 874 * locked on entry and relocked before return, but will be released while 875 * processing. The number of packets processed is returned. 876 */ 877 static u_int 878 netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto) 879 { 880 struct netisr_work local_npw, *npwp; 881 u_int handled; 882 struct mbuf *m; 883 884 NETISR_LOCK_ASSERT(); 885 NWS_LOCK_ASSERT(nwsp); 886 887 KASSERT(nwsp->nws_flags & NWS_RUNNING, 888 ("%s(%u): not running", __func__, proto)); 889 KASSERT(proto >= 0 && proto < NETISR_MAXPROT, 890 ("%s(%u): invalid proto\n", __func__, proto)); 891 892 npwp = &nwsp->nws_work[proto]; 893 if (npwp->nw_len == 0) 894 return (0); 895 896 /* 897 * Move the global work queue to a thread-local work queue. 898 * 899 * Notice that this means the effective maximum length of the queue 900 * is actually twice that of the maximum queue length specified in 901 * the protocol registration call. 902 */ 903 handled = npwp->nw_len; 904 local_npw = *npwp; 905 npwp->nw_head = NULL; 906 npwp->nw_tail = NULL; 907 npwp->nw_len = 0; 908 nwsp->nws_pendingbits &= ~(1 << proto); 909 NWS_UNLOCK(nwsp); 910 while ((m = local_npw.nw_head) != NULL) { 911 local_npw.nw_head = m->m_nextpkt; 912 m->m_nextpkt = NULL; 913 if (local_npw.nw_head == NULL) 914 local_npw.nw_tail = NULL; 915 local_npw.nw_len--; 916 VNET_ASSERT(m->m_pkthdr.rcvif != NULL, 917 ("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m)); 918 CURVNET_SET(m->m_pkthdr.rcvif->if_vnet); 919 netisr_proto[proto].np_handler(m); 920 CURVNET_RESTORE(); 921 } 922 KASSERT(local_npw.nw_len == 0, 923 ("%s(%u): len %u", __func__, proto, local_npw.nw_len)); 924 if (netisr_proto[proto].np_drainedcpu) 925 netisr_proto[proto].np_drainedcpu(nwsp->nws_cpu); 926 NWS_LOCK(nwsp); 927 npwp->nw_handled += handled; 928 return (handled); 929 } 930 931 /* 932 * SWI handler for netisr -- processes packets in a set of workstreams that 933 * it owns, woken up by calls to NWS_SIGNAL(). If this workstream is already 934 * being direct dispatched, go back to sleep and wait for the dispatching 935 * thread to wake us up again. 936 */ 937 static void 938 swi_net(void *arg) 939 { 940 #ifdef NETISR_LOCKING 941 struct rm_priotracker tracker; 942 #endif 943 struct netisr_workstream *nwsp; 944 u_int bits, prot; 945 946 nwsp = arg; 947 948 #ifdef DEVICE_POLLING 949 KASSERT(nws_count == 1, 950 ("%s: device_polling but nws_count != 1", __func__)); 951 netisr_poll(); 952 #endif 953 #ifdef NETISR_LOCKING 954 NETISR_RLOCK(&tracker); 955 #endif 956 NWS_LOCK(nwsp); 957 KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running")); 958 if (nwsp->nws_flags & NWS_DISPATCHING) 959 goto out; 960 nwsp->nws_flags |= NWS_RUNNING; 961 nwsp->nws_flags &= ~NWS_SCHEDULED; 962 while ((bits = nwsp->nws_pendingbits) != 0) { 963 while ((prot = ffs(bits)) != 0) { 964 prot--; 965 bits &= ~(1 << prot); 966 (void)netisr_process_workstream_proto(nwsp, prot); 967 } 968 } 969 nwsp->nws_flags &= ~NWS_RUNNING; 970 out: 971 NWS_UNLOCK(nwsp); 972 #ifdef NETISR_LOCKING 973 NETISR_RUNLOCK(&tracker); 974 #endif 975 #ifdef DEVICE_POLLING 976 netisr_pollmore(); 977 #endif 978 } 979 980 static int 981 netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto, 982 struct netisr_work *npwp, struct mbuf *m, int *dosignalp) 983 { 984 985 NWS_LOCK_ASSERT(nwsp); 986 987 *dosignalp = 0; 988 if (npwp->nw_len < npwp->nw_qlimit) { 989 m->m_nextpkt = NULL; 990 if (npwp->nw_head == NULL) { 991 npwp->nw_head = m; 992 npwp->nw_tail = m; 993 } else { 994 npwp->nw_tail->m_nextpkt = m; 995 npwp->nw_tail = m; 996 } 997 npwp->nw_len++; 998 if (npwp->nw_len > npwp->nw_watermark) 999 npwp->nw_watermark = npwp->nw_len; 1000 1001 /* 1002 * We must set the bit regardless of NWS_RUNNING, so that 1003 * swi_net() keeps calling netisr_process_workstream_proto(). 1004 */ 1005 nwsp->nws_pendingbits |= (1 << proto); 1006 if (!(nwsp->nws_flags & 1007 (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) { 1008 nwsp->nws_flags |= NWS_SCHEDULED; 1009 *dosignalp = 1; /* Defer until unlocked. */ 1010 } 1011 npwp->nw_queued++; 1012 return (0); 1013 } else { 1014 m_freem(m); 1015 npwp->nw_qdrops++; 1016 return (ENOBUFS); 1017 } 1018 } 1019 1020 static int 1021 netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid) 1022 { 1023 struct netisr_workstream *nwsp; 1024 struct netisr_work *npwp; 1025 int dosignal, error; 1026 1027 #ifdef NETISR_LOCKING 1028 NETISR_LOCK_ASSERT(); 1029 #endif 1030 KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__, 1031 cpuid, mp_maxid)); 1032 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 1033 1034 dosignal = 0; 1035 error = 0; 1036 nwsp = DPCPU_ID_PTR(cpuid, nws); 1037 npwp = &nwsp->nws_work[proto]; 1038 NWS_LOCK(nwsp); 1039 error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal); 1040 NWS_UNLOCK(nwsp); 1041 if (dosignal) 1042 NWS_SIGNAL(nwsp); 1043 return (error); 1044 } 1045 1046 int 1047 netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m) 1048 { 1049 #ifdef NETISR_LOCKING 1050 struct rm_priotracker tracker; 1051 #endif 1052 u_int cpuid; 1053 int error; 1054 1055 KASSERT(proto < NETISR_MAXPROT, 1056 ("%s: invalid proto %u", __func__, proto)); 1057 1058 #ifdef NETISR_LOCKING 1059 NETISR_RLOCK(&tracker); 1060 #endif 1061 KASSERT(netisr_proto[proto].np_handler != NULL, 1062 ("%s: invalid proto %u", __func__, proto)); 1063 1064 #ifdef VIMAGE 1065 if (V_netisr_enable[proto] == 0) { 1066 m_freem(m); 1067 return (ENOPROTOOPT); 1068 } 1069 #endif 1070 1071 m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_DEFERRED, 1072 source, m, &cpuid); 1073 if (m != NULL) { 1074 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, 1075 cpuid)); 1076 VNET_ASSERT(m->m_pkthdr.rcvif != NULL, 1077 ("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m)); 1078 error = netisr_queue_internal(proto, m, cpuid); 1079 } else 1080 error = ENOBUFS; 1081 #ifdef NETISR_LOCKING 1082 NETISR_RUNLOCK(&tracker); 1083 #endif 1084 return (error); 1085 } 1086 1087 int 1088 netisr_queue(u_int proto, struct mbuf *m) 1089 { 1090 1091 return (netisr_queue_src(proto, 0, m)); 1092 } 1093 1094 /* 1095 * Dispatch a packet for netisr processing; direct dispatch is permitted by 1096 * calling context. 1097 */ 1098 int 1099 netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m) 1100 { 1101 #ifdef NETISR_LOCKING 1102 struct rm_priotracker tracker; 1103 #endif 1104 struct netisr_workstream *nwsp; 1105 struct netisr_proto *npp; 1106 struct netisr_work *npwp; 1107 int dosignal, error; 1108 u_int cpuid, dispatch_policy; 1109 1110 NET_EPOCH_ASSERT(); 1111 KASSERT(proto < NETISR_MAXPROT, 1112 ("%s: invalid proto %u", __func__, proto)); 1113 #ifdef NETISR_LOCKING 1114 NETISR_RLOCK(&tracker); 1115 #endif 1116 npp = &netisr_proto[proto]; 1117 KASSERT(npp->np_handler != NULL, ("%s: invalid proto %u", __func__, 1118 proto)); 1119 1120 #ifdef VIMAGE 1121 if (V_netisr_enable[proto] == 0) { 1122 m_freem(m); 1123 return (ENOPROTOOPT); 1124 } 1125 #endif 1126 1127 dispatch_policy = netisr_get_dispatch(npp); 1128 if (dispatch_policy == NETISR_DISPATCH_DEFERRED) 1129 return (netisr_queue_src(proto, source, m)); 1130 1131 /* 1132 * If direct dispatch is forced, then unconditionally dispatch 1133 * without a formal CPU selection. Borrow the current CPU's stats, 1134 * even if there's no worker on it. In this case we don't update 1135 * nws_flags because all netisr processing will be source ordered due 1136 * to always being forced to directly dispatch. 1137 */ 1138 if (dispatch_policy == NETISR_DISPATCH_DIRECT) { 1139 nwsp = DPCPU_PTR(nws); 1140 npwp = &nwsp->nws_work[proto]; 1141 npwp->nw_dispatched++; 1142 npwp->nw_handled++; 1143 netisr_proto[proto].np_handler(m); 1144 error = 0; 1145 goto out_unlock; 1146 } 1147 1148 KASSERT(dispatch_policy == NETISR_DISPATCH_HYBRID, 1149 ("%s: unknown dispatch policy (%u)", __func__, dispatch_policy)); 1150 1151 /* 1152 * Otherwise, we execute in a hybrid mode where we will try to direct 1153 * dispatch if we're on the right CPU and the netisr worker isn't 1154 * already running. 1155 */ 1156 sched_pin(); 1157 m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_HYBRID, 1158 source, m, &cpuid); 1159 if (m == NULL) { 1160 error = ENOBUFS; 1161 goto out_unpin; 1162 } 1163 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 1164 if (cpuid != curcpu) 1165 goto queue_fallback; 1166 nwsp = DPCPU_PTR(nws); 1167 npwp = &nwsp->nws_work[proto]; 1168 1169 /*- 1170 * We are willing to direct dispatch only if three conditions hold: 1171 * 1172 * (1) The netisr worker isn't already running, 1173 * (2) Another thread isn't already directly dispatching, and 1174 * (3) The netisr hasn't already been woken up. 1175 */ 1176 NWS_LOCK(nwsp); 1177 if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) { 1178 error = netisr_queue_workstream(nwsp, proto, npwp, m, 1179 &dosignal); 1180 NWS_UNLOCK(nwsp); 1181 if (dosignal) 1182 NWS_SIGNAL(nwsp); 1183 goto out_unpin; 1184 } 1185 1186 /* 1187 * The current thread is now effectively the netisr worker, so set 1188 * the dispatching flag to prevent concurrent processing of the 1189 * stream from another thread (even the netisr worker), which could 1190 * otherwise lead to effective misordering of the stream. 1191 */ 1192 nwsp->nws_flags |= NWS_DISPATCHING; 1193 NWS_UNLOCK(nwsp); 1194 netisr_proto[proto].np_handler(m); 1195 NWS_LOCK(nwsp); 1196 nwsp->nws_flags &= ~NWS_DISPATCHING; 1197 npwp->nw_handled++; 1198 npwp->nw_hybrid_dispatched++; 1199 1200 /* 1201 * If other work was enqueued by another thread while we were direct 1202 * dispatching, we need to signal the netisr worker to do that work. 1203 * In the future, we might want to do some of that work in the 1204 * current thread, rather than trigger further context switches. If 1205 * so, we'll want to establish a reasonable bound on the work done in 1206 * the "borrowed" context. 1207 */ 1208 if (nwsp->nws_pendingbits != 0) { 1209 nwsp->nws_flags |= NWS_SCHEDULED; 1210 dosignal = 1; 1211 } else 1212 dosignal = 0; 1213 NWS_UNLOCK(nwsp); 1214 if (dosignal) 1215 NWS_SIGNAL(nwsp); 1216 error = 0; 1217 goto out_unpin; 1218 1219 queue_fallback: 1220 error = netisr_queue_internal(proto, m, cpuid); 1221 out_unpin: 1222 sched_unpin(); 1223 out_unlock: 1224 #ifdef NETISR_LOCKING 1225 NETISR_RUNLOCK(&tracker); 1226 #endif 1227 return (error); 1228 } 1229 1230 int 1231 netisr_dispatch(u_int proto, struct mbuf *m) 1232 { 1233 1234 return (netisr_dispatch_src(proto, 0, m)); 1235 } 1236 1237 #ifdef DEVICE_POLLING 1238 /* 1239 * Kernel polling borrows a netisr thread to run interface polling in; this 1240 * function allows kernel polling to request that the netisr thread be 1241 * scheduled even if no packets are pending for protocols. 1242 */ 1243 void 1244 netisr_sched_poll(void) 1245 { 1246 struct netisr_workstream *nwsp; 1247 1248 nwsp = DPCPU_ID_PTR(nws_array[0], nws); 1249 NWS_SIGNAL(nwsp); 1250 } 1251 #endif 1252 1253 static void 1254 netisr_start_swi(u_int cpuid, struct pcpu *pc) 1255 { 1256 char swiname[12]; 1257 struct netisr_workstream *nwsp; 1258 int error; 1259 1260 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 1261 1262 nwsp = DPCPU_ID_PTR(cpuid, nws); 1263 mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF); 1264 nwsp->nws_cpu = cpuid; 1265 snprintf(swiname, sizeof(swiname), "netisr %u", cpuid); 1266 error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp, 1267 SWI_NET, INTR_TYPE_NET | INTR_MPSAFE, &nwsp->nws_swi_cookie); 1268 if (error) 1269 panic("%s: swi_add %d", __func__, error); 1270 pc->pc_netisr = nwsp->nws_intr_event; 1271 if (netisr_bindthreads) { 1272 error = intr_event_bind(nwsp->nws_intr_event, cpuid); 1273 if (error != 0) 1274 printf("%s: cpu %u: intr_event_bind: %d", __func__, 1275 cpuid, error); 1276 } 1277 NETISR_WLOCK(); 1278 nws_array[nws_count] = nwsp->nws_cpu; 1279 nws_count++; 1280 NETISR_WUNLOCK(); 1281 } 1282 1283 /* 1284 * Initialize the netisr subsystem. We rely on BSS and static initialization 1285 * of most fields in global data structures. 1286 * 1287 * Start a worker thread for the boot CPU so that we can support network 1288 * traffic immediately in case the network stack is used before additional 1289 * CPUs are started (for example, diskless boot). 1290 */ 1291 static void 1292 netisr_init(void *arg) 1293 { 1294 struct pcpu *pc; 1295 1296 NETISR_LOCK_INIT(); 1297 if (netisr_maxthreads == 0 || netisr_maxthreads < -1 ) 1298 netisr_maxthreads = 1; /* default behavior */ 1299 else if (netisr_maxthreads == -1) 1300 netisr_maxthreads = mp_ncpus; /* use max cpus */ 1301 if (netisr_maxthreads > mp_ncpus) { 1302 printf("netisr_init: forcing maxthreads from %d to %d\n", 1303 netisr_maxthreads, mp_ncpus); 1304 netisr_maxthreads = mp_ncpus; 1305 } 1306 if (netisr_defaultqlimit > netisr_maxqlimit) { 1307 printf("netisr_init: forcing defaultqlimit from %d to %d\n", 1308 netisr_defaultqlimit, netisr_maxqlimit); 1309 netisr_defaultqlimit = netisr_maxqlimit; 1310 } 1311 #ifdef DEVICE_POLLING 1312 /* 1313 * The device polling code is not yet aware of how to deal with 1314 * multiple netisr threads, so for the time being compiling in device 1315 * polling disables parallel netisr workers. 1316 */ 1317 if (netisr_maxthreads != 1 || netisr_bindthreads != 0) { 1318 printf("netisr_init: forcing maxthreads to 1 and " 1319 "bindthreads to 0 for device polling\n"); 1320 netisr_maxthreads = 1; 1321 netisr_bindthreads = 0; 1322 } 1323 #endif 1324 1325 #ifdef EARLY_AP_STARTUP 1326 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 1327 if (nws_count >= netisr_maxthreads) 1328 break; 1329 netisr_start_swi(pc->pc_cpuid, pc); 1330 } 1331 #else 1332 pc = get_pcpu(); 1333 netisr_start_swi(pc->pc_cpuid, pc); 1334 #endif 1335 } 1336 SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL); 1337 1338 #ifndef EARLY_AP_STARTUP 1339 /* 1340 * Start worker threads for additional CPUs. No attempt to gracefully handle 1341 * work reassignment, we don't yet support dynamic reconfiguration. 1342 */ 1343 static void 1344 netisr_start(void *arg) 1345 { 1346 struct pcpu *pc; 1347 1348 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 1349 if (nws_count >= netisr_maxthreads) 1350 break; 1351 /* Worker will already be present for boot CPU. */ 1352 if (pc->pc_netisr != NULL) 1353 continue; 1354 netisr_start_swi(pc->pc_cpuid, pc); 1355 } 1356 } 1357 SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL); 1358 #endif 1359 1360 /* 1361 * Sysctl monitoring for netisr: query a list of registered protocols. 1362 */ 1363 static int 1364 sysctl_netisr_proto(SYSCTL_HANDLER_ARGS) 1365 { 1366 struct rm_priotracker tracker; 1367 struct sysctl_netisr_proto *snpp, *snp_array; 1368 struct netisr_proto *npp; 1369 u_int counter, proto; 1370 int error; 1371 1372 if (req->newptr != NULL) 1373 return (EINVAL); 1374 snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP, 1375 M_ZERO | M_WAITOK); 1376 counter = 0; 1377 NETISR_RLOCK(&tracker); 1378 for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1379 npp = &netisr_proto[proto]; 1380 if (npp->np_name == NULL) 1381 continue; 1382 snpp = &snp_array[counter]; 1383 snpp->snp_version = sizeof(*snpp); 1384 strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN); 1385 snpp->snp_proto = proto; 1386 snpp->snp_qlimit = npp->np_qlimit; 1387 snpp->snp_policy = npp->np_policy; 1388 snpp->snp_dispatch = npp->np_dispatch; 1389 if (npp->np_m2flow != NULL) 1390 snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW; 1391 if (npp->np_m2cpuid != NULL) 1392 snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID; 1393 if (npp->np_drainedcpu != NULL) 1394 snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU; 1395 counter++; 1396 } 1397 NETISR_RUNLOCK(&tracker); 1398 KASSERT(counter <= NETISR_MAXPROT, 1399 ("sysctl_netisr_proto: counter too big (%d)", counter)); 1400 error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter); 1401 free(snp_array, M_TEMP); 1402 return (error); 1403 } 1404 1405 SYSCTL_PROC(_net_isr, OID_AUTO, proto, 1406 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto, 1407 "S,sysctl_netisr_proto", 1408 "Return list of protocols registered with netisr"); 1409 1410 /* 1411 * Sysctl monitoring for netisr: query a list of workstreams. 1412 */ 1413 static int 1414 sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS) 1415 { 1416 struct rm_priotracker tracker; 1417 struct sysctl_netisr_workstream *snwsp, *snws_array; 1418 struct netisr_workstream *nwsp; 1419 u_int counter, cpuid; 1420 int error; 1421 1422 if (req->newptr != NULL) 1423 return (EINVAL); 1424 snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP, 1425 M_ZERO | M_WAITOK); 1426 counter = 0; 1427 NETISR_RLOCK(&tracker); 1428 CPU_FOREACH(cpuid) { 1429 nwsp = DPCPU_ID_PTR(cpuid, nws); 1430 if (nwsp->nws_intr_event == NULL) 1431 continue; 1432 NWS_LOCK(nwsp); 1433 snwsp = &snws_array[counter]; 1434 snwsp->snws_version = sizeof(*snwsp); 1435 1436 /* 1437 * For now, we equate workstream IDs and CPU IDs in the 1438 * kernel, but expose them independently to userspace in case 1439 * that assumption changes in the future. 1440 */ 1441 snwsp->snws_wsid = cpuid; 1442 snwsp->snws_cpu = cpuid; 1443 if (nwsp->nws_intr_event != NULL) 1444 snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR; 1445 NWS_UNLOCK(nwsp); 1446 counter++; 1447 } 1448 NETISR_RUNLOCK(&tracker); 1449 KASSERT(counter <= MAXCPU, 1450 ("sysctl_netisr_workstream: counter too big (%d)", counter)); 1451 error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter); 1452 free(snws_array, M_TEMP); 1453 return (error); 1454 } 1455 1456 SYSCTL_PROC(_net_isr, OID_AUTO, workstream, 1457 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream, 1458 "S,sysctl_netisr_workstream", 1459 "Return list of workstreams implemented by netisr"); 1460 1461 /* 1462 * Sysctl monitoring for netisr: query per-protocol data across all 1463 * workstreams. 1464 */ 1465 static int 1466 sysctl_netisr_work(SYSCTL_HANDLER_ARGS) 1467 { 1468 struct rm_priotracker tracker; 1469 struct sysctl_netisr_work *snwp, *snw_array; 1470 struct netisr_workstream *nwsp; 1471 struct netisr_proto *npp; 1472 struct netisr_work *nwp; 1473 u_int counter, cpuid, proto; 1474 int error; 1475 1476 if (req->newptr != NULL) 1477 return (EINVAL); 1478 snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT, 1479 M_TEMP, M_ZERO | M_WAITOK); 1480 counter = 0; 1481 NETISR_RLOCK(&tracker); 1482 CPU_FOREACH(cpuid) { 1483 nwsp = DPCPU_ID_PTR(cpuid, nws); 1484 if (nwsp->nws_intr_event == NULL) 1485 continue; 1486 NWS_LOCK(nwsp); 1487 for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1488 npp = &netisr_proto[proto]; 1489 if (npp->np_name == NULL) 1490 continue; 1491 nwp = &nwsp->nws_work[proto]; 1492 snwp = &snw_array[counter]; 1493 snwp->snw_version = sizeof(*snwp); 1494 snwp->snw_wsid = cpuid; /* See comment above. */ 1495 snwp->snw_proto = proto; 1496 snwp->snw_len = nwp->nw_len; 1497 snwp->snw_watermark = nwp->nw_watermark; 1498 snwp->snw_dispatched = nwp->nw_dispatched; 1499 snwp->snw_hybrid_dispatched = 1500 nwp->nw_hybrid_dispatched; 1501 snwp->snw_qdrops = nwp->nw_qdrops; 1502 snwp->snw_queued = nwp->nw_queued; 1503 snwp->snw_handled = nwp->nw_handled; 1504 counter++; 1505 } 1506 NWS_UNLOCK(nwsp); 1507 } 1508 KASSERT(counter <= MAXCPU * NETISR_MAXPROT, 1509 ("sysctl_netisr_work: counter too big (%d)", counter)); 1510 NETISR_RUNLOCK(&tracker); 1511 error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter); 1512 free(snw_array, M_TEMP); 1513 return (error); 1514 } 1515 1516 SYSCTL_PROC(_net_isr, OID_AUTO, work, 1517 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work, 1518 "S,sysctl_netisr_work", 1519 "Return list of per-workstream, per-protocol work in netisr"); 1520 1521 #ifdef DDB 1522 DB_SHOW_COMMAND(netisr, db_show_netisr) 1523 { 1524 struct netisr_workstream *nwsp; 1525 struct netisr_work *nwp; 1526 int first, proto; 1527 u_int cpuid; 1528 1529 db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto", 1530 "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue"); 1531 CPU_FOREACH(cpuid) { 1532 nwsp = DPCPU_ID_PTR(cpuid, nws); 1533 if (nwsp->nws_intr_event == NULL) 1534 continue; 1535 first = 1; 1536 for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1537 if (netisr_proto[proto].np_handler == NULL) 1538 continue; 1539 nwp = &nwsp->nws_work[proto]; 1540 if (first) { 1541 db_printf("%3d ", cpuid); 1542 first = 0; 1543 } else 1544 db_printf("%3s ", ""); 1545 db_printf( 1546 "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n", 1547 netisr_proto[proto].np_name, nwp->nw_len, 1548 nwp->nw_watermark, nwp->nw_qlimit, 1549 nwp->nw_dispatched, nwp->nw_hybrid_dispatched, 1550 nwp->nw_qdrops, nwp->nw_queued); 1551 } 1552 } 1553 } 1554 #endif 1555