1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2007-2009 Robert N. M. Watson 5 * Copyright (c) 2010-2011 Juniper Networks, Inc. 6 * All rights reserved. 7 * 8 * This software was developed by Robert N. M. Watson under contract 9 * to Juniper Networks, Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * netisr is a packet dispatch service, allowing synchronous (directly 38 * dispatched) and asynchronous (deferred dispatch) processing of packets by 39 * registered protocol handlers. Callers pass a protocol identifier and 40 * packet to netisr, along with a direct dispatch hint, and work will either 41 * be immediately processed by the registered handler, or passed to a 42 * software interrupt (SWI) thread for deferred dispatch. Callers will 43 * generally select one or the other based on: 44 * 45 * - Whether directly dispatching a netisr handler lead to code reentrance or 46 * lock recursion, such as entering the socket code from the socket code. 47 * - Whether directly dispatching a netisr handler lead to recursive 48 * processing, such as when decapsulating several wrapped layers of tunnel 49 * information (IPSEC within IPSEC within ...). 50 * 51 * Maintaining ordering for protocol streams is a critical design concern. 52 * Enforcing ordering limits the opportunity for concurrency, but maintains 53 * the strong ordering requirements found in some protocols, such as TCP. Of 54 * related concern is CPU affinity--it is desirable to process all data 55 * associated with a particular stream on the same CPU over time in order to 56 * avoid acquiring locks associated with the connection on different CPUs, 57 * keep connection data in one cache, and to generally encourage associated 58 * user threads to live on the same CPU as the stream. It's also desirable 59 * to avoid lock migration and contention where locks are associated with 60 * more than one flow. 61 * 62 * netisr supports several policy variations, represented by the 63 * NETISR_POLICY_* constants, allowing protocols to play various roles in 64 * identifying flows, assigning work to CPUs, etc. These are described in 65 * netisr.h. 66 */ 67 68 #include "opt_ddb.h" 69 #include "opt_device_polling.h" 70 71 #include <sys/param.h> 72 #include <sys/bus.h> 73 #include <sys/kernel.h> 74 #include <sys/kthread.h> 75 #include <sys/malloc.h> 76 #include <sys/interrupt.h> 77 #include <sys/lock.h> 78 #include <sys/mbuf.h> 79 #include <sys/mutex.h> 80 #include <sys/pcpu.h> 81 #include <sys/proc.h> 82 #include <sys/rmlock.h> 83 #include <sys/sched.h> 84 #include <sys/smp.h> 85 #include <sys/socket.h> 86 #include <sys/sysctl.h> 87 #include <sys/systm.h> 88 89 #ifdef DDB 90 #include <ddb/ddb.h> 91 #endif 92 93 #define _WANT_NETISR_INTERNAL /* Enable definitions from netisr_internal.h */ 94 #include <net/if.h> 95 #include <net/if_var.h> 96 #include <net/netisr.h> 97 #include <net/netisr_internal.h> 98 #include <net/vnet.h> 99 100 /*- 101 * Synchronize use and modification of the registered netisr data structures; 102 * acquire a read lock while modifying the set of registered protocols to 103 * prevent partially registered or unregistered protocols from being run. 104 * 105 * The following data structures and fields are protected by this lock: 106 * 107 * - The netisr_proto array, including all fields of struct netisr_proto. 108 * - The nws array, including all fields of struct netisr_worker. 109 * - The nws_array array. 110 * 111 * Note: the NETISR_LOCKING define controls whether read locks are acquired 112 * in packet processing paths requiring netisr registration stability. This 113 * is disabled by default as it can lead to measurable performance 114 * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and 115 * because netisr registration and unregistration is extremely rare at 116 * runtime. If it becomes more common, this decision should be revisited. 117 * 118 * XXXRW: rmlocks don't support assertions. 119 */ 120 static struct rmlock netisr_rmlock; 121 #define NETISR_LOCK_INIT() rm_init_flags(&netisr_rmlock, "netisr", \ 122 RM_NOWITNESS) 123 #define NETISR_LOCK_ASSERT() 124 #define NETISR_RLOCK(tracker) rm_rlock(&netisr_rmlock, (tracker)) 125 #define NETISR_RUNLOCK(tracker) rm_runlock(&netisr_rmlock, (tracker)) 126 #define NETISR_WLOCK() rm_wlock(&netisr_rmlock) 127 #define NETISR_WUNLOCK() rm_wunlock(&netisr_rmlock) 128 /* #define NETISR_LOCKING */ 129 130 static SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 131 "netisr"); 132 133 /*- 134 * Three global direct dispatch policies are supported: 135 * 136 * NETISR_DISPATCH_DEFERRED: All work is deferred for a netisr, regardless of 137 * context (may be overridden by protocols). 138 * 139 * NETISR_DISPATCH_HYBRID: If the executing context allows direct dispatch, 140 * and we're running on the CPU the work would be performed on, then direct 141 * dispatch it if it wouldn't violate ordering constraints on the workstream. 142 * 143 * NETISR_DISPATCH_DIRECT: If the executing context allows direct dispatch, 144 * always direct dispatch. (The default.) 145 * 146 * Notice that changing the global policy could lead to short periods of 147 * misordered processing, but this is considered acceptable as compared to 148 * the complexity of enforcing ordering during policy changes. Protocols can 149 * override the global policy (when they're not doing that, they select 150 * NETISR_DISPATCH_DEFAULT). 151 */ 152 #define NETISR_DISPATCH_POLICY_DEFAULT NETISR_DISPATCH_DIRECT 153 #define NETISR_DISPATCH_POLICY_MAXSTR 20 /* Used for temporary buffers. */ 154 static u_int netisr_dispatch_policy = NETISR_DISPATCH_POLICY_DEFAULT; 155 static int sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS); 156 SYSCTL_PROC(_net_isr, OID_AUTO, dispatch, 157 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 158 0, 0, sysctl_netisr_dispatch_policy, "A", 159 "netisr dispatch policy"); 160 161 /* 162 * Allow the administrator to limit the number of threads (CPUs) to use for 163 * netisr. We don't check netisr_maxthreads before creating the thread for 164 * CPU 0. This must be set at boot. We will create at most one thread per CPU. 165 * By default we initialize this to 1 which would assign just 1 cpu (cpu0) and 166 * therefore only 1 workstream. If set to -1, netisr would use all cpus 167 * (mp_ncpus) and therefore would have those many workstreams. One workstream 168 * per thread (CPU). 169 */ 170 static int netisr_maxthreads = 1; /* Max number of threads. */ 171 SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN, 172 &netisr_maxthreads, 0, 173 "Use at most this many CPUs for netisr processing"); 174 175 static int netisr_bindthreads = 0; /* Bind threads to CPUs. */ 176 SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN, 177 &netisr_bindthreads, 0, "Bind netisr threads to CPUs."); 178 179 /* 180 * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit, 181 * both for initial configuration and later modification using 182 * netisr_setqlimit(). 183 */ 184 #define NETISR_DEFAULT_MAXQLIMIT 10240 185 static u_int netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT; 186 SYSCTL_UINT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN, 187 &netisr_maxqlimit, 0, 188 "Maximum netisr per-protocol, per-CPU queue depth."); 189 190 /* 191 * The default per-workstream mbuf queue limit for protocols that don't 192 * initialize the nh_qlimit field of their struct netisr_handler. If this is 193 * set above netisr_maxqlimit, we truncate it to the maximum during boot. 194 */ 195 #define NETISR_DEFAULT_DEFAULTQLIMIT 256 196 static u_int netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT; 197 SYSCTL_UINT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN, 198 &netisr_defaultqlimit, 0, 199 "Default netisr per-protocol, per-CPU queue limit if not set by protocol"); 200 201 /* 202 * Store and export the compile-time constant NETISR_MAXPROT limit on the 203 * number of protocols that can register with netisr at a time. This is 204 * required for crashdump analysis, as it sizes netisr_proto[]. 205 */ 206 static u_int netisr_maxprot = NETISR_MAXPROT; 207 SYSCTL_UINT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD, 208 &netisr_maxprot, 0, 209 "Compile-time limit on the number of protocols supported by netisr."); 210 211 /* 212 * The netisr_proto array describes all registered protocols, indexed by 213 * protocol number. See netisr_internal.h for more details. 214 */ 215 static struct netisr_proto netisr_proto[NETISR_MAXPROT]; 216 217 #ifdef VIMAGE 218 /* 219 * The netisr_enable array describes a per-VNET flag for registered 220 * protocols on whether this netisr is active in this VNET or not. 221 * netisr_register() will automatically enable the netisr for the 222 * default VNET and all currently active instances. 223 * netisr_unregister() will disable all active VNETs, including vnet0. 224 * Individual network stack instances can be enabled/disabled by the 225 * netisr_(un)register _vnet() functions. 226 * With this we keep the one netisr_proto per protocol but add a 227 * mechanism to stop netisr processing for vnet teardown. 228 * Apart from that we expect a VNET to always be enabled. 229 */ 230 VNET_DEFINE_STATIC(u_int, netisr_enable[NETISR_MAXPROT]); 231 #define V_netisr_enable VNET(netisr_enable) 232 #endif 233 234 /* 235 * Per-CPU workstream data. See netisr_internal.h for more details. 236 */ 237 DPCPU_DEFINE(struct netisr_workstream, nws); 238 239 /* 240 * Map contiguous values between 0 and nws_count into CPU IDs appropriate for 241 * accessing workstreams. This allows constructions of the form 242 * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws). 243 */ 244 static u_int nws_array[MAXCPU]; 245 246 /* 247 * Number of registered workstreams. Will be at most the number of running 248 * CPUs once fully started. 249 */ 250 static u_int nws_count; 251 SYSCTL_UINT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD, 252 &nws_count, 0, "Number of extant netisr threads."); 253 254 /* 255 * Synchronization for each workstream: a mutex protects all mutable fields 256 * in each stream, including per-protocol state (mbuf queues). The SWI is 257 * woken up if asynchronous dispatch is required. 258 */ 259 #define NWS_LOCK(s) mtx_lock(&(s)->nws_mtx) 260 #define NWS_LOCK_ASSERT(s) mtx_assert(&(s)->nws_mtx, MA_OWNED) 261 #define NWS_UNLOCK(s) mtx_unlock(&(s)->nws_mtx) 262 #define NWS_SIGNAL(s) swi_sched((s)->nws_swi_cookie, 0) 263 264 /* 265 * Utility routines for protocols that implement their own mapping of flows 266 * to CPUs. 267 */ 268 u_int 269 netisr_get_cpucount(void) 270 { 271 272 return (nws_count); 273 } 274 275 u_int 276 netisr_get_cpuid(u_int cpunumber) 277 { 278 279 return (nws_array[cpunumber % nws_count]); 280 } 281 282 /* 283 * The default implementation of flow -> CPU ID mapping. 284 * 285 * Non-static so that protocols can use it to map their own work to specific 286 * CPUs in a manner consistent to netisr for affinity purposes. 287 */ 288 u_int 289 netisr_default_flow2cpu(u_int flowid) 290 { 291 292 return (nws_array[flowid % nws_count]); 293 } 294 295 /* 296 * Dispatch tunable and sysctl configuration. 297 */ 298 struct netisr_dispatch_table_entry { 299 u_int ndte_policy; 300 const char *ndte_policy_str; 301 }; 302 static const struct netisr_dispatch_table_entry netisr_dispatch_table[] = { 303 { NETISR_DISPATCH_DEFAULT, "default" }, 304 { NETISR_DISPATCH_DEFERRED, "deferred" }, 305 { NETISR_DISPATCH_HYBRID, "hybrid" }, 306 { NETISR_DISPATCH_DIRECT, "direct" }, 307 }; 308 309 static void 310 netisr_dispatch_policy_to_str(u_int dispatch_policy, char *buffer, 311 u_int buflen) 312 { 313 const struct netisr_dispatch_table_entry *ndtep; 314 const char *str; 315 u_int i; 316 317 str = "unknown"; 318 for (i = 0; i < nitems(netisr_dispatch_table); i++) { 319 ndtep = &netisr_dispatch_table[i]; 320 if (ndtep->ndte_policy == dispatch_policy) { 321 str = ndtep->ndte_policy_str; 322 break; 323 } 324 } 325 snprintf(buffer, buflen, "%s", str); 326 } 327 328 static int 329 netisr_dispatch_policy_from_str(const char *str, u_int *dispatch_policyp) 330 { 331 const struct netisr_dispatch_table_entry *ndtep; 332 u_int i; 333 334 for (i = 0; i < nitems(netisr_dispatch_table); i++) { 335 ndtep = &netisr_dispatch_table[i]; 336 if (strcmp(ndtep->ndte_policy_str, str) == 0) { 337 *dispatch_policyp = ndtep->ndte_policy; 338 return (0); 339 } 340 } 341 return (EINVAL); 342 } 343 344 static int 345 sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS) 346 { 347 char tmp[NETISR_DISPATCH_POLICY_MAXSTR]; 348 size_t len; 349 u_int dispatch_policy; 350 int error; 351 352 netisr_dispatch_policy_to_str(netisr_dispatch_policy, tmp, 353 sizeof(tmp)); 354 /* 355 * netisr is initialised very early during the boot when malloc isn't 356 * available yet so we can't use sysctl_handle_string() to process 357 * any non-default value that was potentially set via loader. 358 */ 359 if (req->newptr != NULL) { 360 len = req->newlen - req->newidx; 361 if (len >= NETISR_DISPATCH_POLICY_MAXSTR) 362 return (EINVAL); 363 error = SYSCTL_IN(req, tmp, len); 364 if (error == 0) { 365 tmp[len] = '\0'; 366 error = netisr_dispatch_policy_from_str(tmp, 367 &dispatch_policy); 368 if (error == 0 && 369 dispatch_policy == NETISR_DISPATCH_DEFAULT) 370 error = EINVAL; 371 if (error == 0) 372 netisr_dispatch_policy = dispatch_policy; 373 } 374 } else { 375 error = sysctl_handle_string(oidp, tmp, sizeof(tmp), req); 376 } 377 return (error); 378 } 379 380 /* 381 * Register a new netisr handler, which requires initializing per-protocol 382 * fields for each workstream. All netisr work is briefly suspended while 383 * the protocol is installed. 384 */ 385 void 386 netisr_register(const struct netisr_handler *nhp) 387 { 388 VNET_ITERATOR_DECL(vnet_iter); 389 struct netisr_work *npwp; 390 const char *name; 391 u_int i, proto; 392 393 proto = nhp->nh_proto; 394 name = nhp->nh_name; 395 396 /* 397 * Test that the requested registration is valid. 398 */ 399 KASSERT(nhp->nh_name != NULL, 400 ("%s: nh_name NULL for %u", __func__, proto)); 401 KASSERT(nhp->nh_handler != NULL, 402 ("%s: nh_handler NULL for %s", __func__, name)); 403 KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE || 404 nhp->nh_policy == NETISR_POLICY_FLOW || 405 nhp->nh_policy == NETISR_POLICY_CPU, 406 ("%s: unsupported nh_policy %u for %s", __func__, 407 nhp->nh_policy, name)); 408 KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW || 409 nhp->nh_m2flow == NULL, 410 ("%s: nh_policy != FLOW but m2flow defined for %s", __func__, 411 name)); 412 KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL, 413 ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__, 414 name)); 415 KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL, 416 ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__, 417 name)); 418 KASSERT(nhp->nh_dispatch == NETISR_DISPATCH_DEFAULT || 419 nhp->nh_dispatch == NETISR_DISPATCH_DEFERRED || 420 nhp->nh_dispatch == NETISR_DISPATCH_HYBRID || 421 nhp->nh_dispatch == NETISR_DISPATCH_DIRECT, 422 ("%s: invalid nh_dispatch (%u)", __func__, nhp->nh_dispatch)); 423 424 KASSERT(proto < NETISR_MAXPROT, 425 ("%s(%u, %s): protocol too big", __func__, proto, name)); 426 427 /* 428 * Test that no existing registration exists for this protocol. 429 */ 430 NETISR_WLOCK(); 431 KASSERT(netisr_proto[proto].np_name == NULL, 432 ("%s(%u, %s): name present", __func__, proto, name)); 433 KASSERT(netisr_proto[proto].np_handler == NULL, 434 ("%s(%u, %s): handler present", __func__, proto, name)); 435 436 netisr_proto[proto].np_name = name; 437 netisr_proto[proto].np_handler = nhp->nh_handler; 438 netisr_proto[proto].np_m2flow = nhp->nh_m2flow; 439 netisr_proto[proto].np_m2cpuid = nhp->nh_m2cpuid; 440 netisr_proto[proto].np_drainedcpu = nhp->nh_drainedcpu; 441 if (nhp->nh_qlimit == 0) 442 netisr_proto[proto].np_qlimit = netisr_defaultqlimit; 443 else if (nhp->nh_qlimit > netisr_maxqlimit) { 444 printf("%s: %s requested queue limit %u capped to " 445 "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit, 446 netisr_maxqlimit); 447 netisr_proto[proto].np_qlimit = netisr_maxqlimit; 448 } else 449 netisr_proto[proto].np_qlimit = nhp->nh_qlimit; 450 netisr_proto[proto].np_policy = nhp->nh_policy; 451 netisr_proto[proto].np_dispatch = nhp->nh_dispatch; 452 CPU_FOREACH(i) { 453 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 454 bzero(npwp, sizeof(*npwp)); 455 npwp->nw_qlimit = netisr_proto[proto].np_qlimit; 456 } 457 458 #ifdef VIMAGE 459 /* 460 * Test that we are in vnet0 and have a curvnet set. 461 */ 462 KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__)); 463 KASSERT(IS_DEFAULT_VNET(curvnet), ("%s: curvnet %p is not vnet0 %p", 464 __func__, curvnet, vnet0)); 465 VNET_LIST_RLOCK_NOSLEEP(); 466 VNET_FOREACH(vnet_iter) { 467 CURVNET_SET(vnet_iter); 468 V_netisr_enable[proto] = 1; 469 CURVNET_RESTORE(); 470 } 471 VNET_LIST_RUNLOCK_NOSLEEP(); 472 #endif 473 NETISR_WUNLOCK(); 474 } 475 476 /* 477 * Clear drop counters across all workstreams for a protocol. 478 */ 479 void 480 netisr_clearqdrops(const struct netisr_handler *nhp) 481 { 482 struct netisr_work *npwp; 483 #ifdef INVARIANTS 484 const char *name; 485 #endif 486 u_int i, proto; 487 488 proto = nhp->nh_proto; 489 #ifdef INVARIANTS 490 name = nhp->nh_name; 491 #endif 492 KASSERT(proto < NETISR_MAXPROT, 493 ("%s(%u): protocol too big for %s", __func__, proto, name)); 494 495 NETISR_WLOCK(); 496 KASSERT(netisr_proto[proto].np_handler != NULL, 497 ("%s(%u): protocol not registered for %s", __func__, proto, 498 name)); 499 500 CPU_FOREACH(i) { 501 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 502 npwp->nw_qdrops = 0; 503 } 504 NETISR_WUNLOCK(); 505 } 506 507 /* 508 * Query current drop counters across all workstreams for a protocol. 509 */ 510 void 511 netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp) 512 { 513 struct netisr_work *npwp; 514 struct rm_priotracker tracker; 515 #ifdef INVARIANTS 516 const char *name; 517 #endif 518 u_int i, proto; 519 520 *qdropp = 0; 521 proto = nhp->nh_proto; 522 #ifdef INVARIANTS 523 name = nhp->nh_name; 524 #endif 525 KASSERT(proto < NETISR_MAXPROT, 526 ("%s(%u): protocol too big for %s", __func__, proto, name)); 527 528 NETISR_RLOCK(&tracker); 529 KASSERT(netisr_proto[proto].np_handler != NULL, 530 ("%s(%u): protocol not registered for %s", __func__, proto, 531 name)); 532 533 CPU_FOREACH(i) { 534 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 535 *qdropp += npwp->nw_qdrops; 536 } 537 NETISR_RUNLOCK(&tracker); 538 } 539 540 /* 541 * Query current per-workstream queue limit for a protocol. 542 */ 543 void 544 netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp) 545 { 546 struct rm_priotracker tracker; 547 #ifdef INVARIANTS 548 const char *name; 549 #endif 550 u_int proto; 551 552 proto = nhp->nh_proto; 553 #ifdef INVARIANTS 554 name = nhp->nh_name; 555 #endif 556 KASSERT(proto < NETISR_MAXPROT, 557 ("%s(%u): protocol too big for %s", __func__, proto, name)); 558 559 NETISR_RLOCK(&tracker); 560 KASSERT(netisr_proto[proto].np_handler != NULL, 561 ("%s(%u): protocol not registered for %s", __func__, proto, 562 name)); 563 *qlimitp = netisr_proto[proto].np_qlimit; 564 NETISR_RUNLOCK(&tracker); 565 } 566 567 /* 568 * Update the queue limit across per-workstream queues for a protocol. We 569 * simply change the limits, and don't drain overflowed packets as they will 570 * (hopefully) take care of themselves shortly. 571 */ 572 int 573 netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit) 574 { 575 struct netisr_work *npwp; 576 #ifdef INVARIANTS 577 const char *name; 578 #endif 579 u_int i, proto; 580 581 if (qlimit > netisr_maxqlimit) 582 return (EINVAL); 583 584 proto = nhp->nh_proto; 585 #ifdef INVARIANTS 586 name = nhp->nh_name; 587 #endif 588 KASSERT(proto < NETISR_MAXPROT, 589 ("%s(%u): protocol too big for %s", __func__, proto, name)); 590 591 NETISR_WLOCK(); 592 KASSERT(netisr_proto[proto].np_handler != NULL, 593 ("%s(%u): protocol not registered for %s", __func__, proto, 594 name)); 595 596 netisr_proto[proto].np_qlimit = qlimit; 597 CPU_FOREACH(i) { 598 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 599 npwp->nw_qlimit = qlimit; 600 } 601 NETISR_WUNLOCK(); 602 return (0); 603 } 604 605 /* 606 * Drain all packets currently held in a particular protocol work queue. 607 */ 608 static void 609 netisr_drain_proto(struct netisr_work *npwp) 610 { 611 struct mbuf *m; 612 613 /* 614 * We would assert the lock on the workstream but it's not passed in. 615 */ 616 while ((m = npwp->nw_head) != NULL) { 617 npwp->nw_head = m->m_nextpkt; 618 m->m_nextpkt = NULL; 619 if (npwp->nw_head == NULL) 620 npwp->nw_tail = NULL; 621 npwp->nw_len--; 622 m_freem(m); 623 } 624 KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__)); 625 KASSERT(npwp->nw_len == 0, ("%s: len", __func__)); 626 } 627 628 /* 629 * Remove the registration of a network protocol, which requires clearing 630 * per-protocol fields across all workstreams, including freeing all mbufs in 631 * the queues at time of unregister. All work in netisr is briefly suspended 632 * while this takes place. 633 */ 634 void 635 netisr_unregister(const struct netisr_handler *nhp) 636 { 637 VNET_ITERATOR_DECL(vnet_iter); 638 struct netisr_work *npwp; 639 #ifdef INVARIANTS 640 const char *name; 641 #endif 642 u_int i, proto; 643 644 proto = nhp->nh_proto; 645 #ifdef INVARIANTS 646 name = nhp->nh_name; 647 #endif 648 KASSERT(proto < NETISR_MAXPROT, 649 ("%s(%u): protocol too big for %s", __func__, proto, name)); 650 651 NETISR_WLOCK(); 652 KASSERT(netisr_proto[proto].np_handler != NULL, 653 ("%s(%u): protocol not registered for %s", __func__, proto, 654 name)); 655 656 #ifdef VIMAGE 657 VNET_LIST_RLOCK_NOSLEEP(); 658 VNET_FOREACH(vnet_iter) { 659 CURVNET_SET(vnet_iter); 660 V_netisr_enable[proto] = 0; 661 CURVNET_RESTORE(); 662 } 663 VNET_LIST_RUNLOCK_NOSLEEP(); 664 #endif 665 666 netisr_proto[proto].np_name = NULL; 667 netisr_proto[proto].np_handler = NULL; 668 netisr_proto[proto].np_m2flow = NULL; 669 netisr_proto[proto].np_m2cpuid = NULL; 670 netisr_proto[proto].np_qlimit = 0; 671 netisr_proto[proto].np_policy = 0; 672 CPU_FOREACH(i) { 673 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 674 netisr_drain_proto(npwp); 675 bzero(npwp, sizeof(*npwp)); 676 } 677 NETISR_WUNLOCK(); 678 } 679 680 #ifdef VIMAGE 681 void 682 netisr_register_vnet(const struct netisr_handler *nhp) 683 { 684 u_int proto; 685 686 proto = nhp->nh_proto; 687 688 KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__)); 689 KASSERT(proto < NETISR_MAXPROT, 690 ("%s(%u): protocol too big for %s", __func__, proto, nhp->nh_name)); 691 NETISR_WLOCK(); 692 KASSERT(netisr_proto[proto].np_handler != NULL, 693 ("%s(%u): protocol not registered for %s", __func__, proto, 694 nhp->nh_name)); 695 696 V_netisr_enable[proto] = 1; 697 NETISR_WUNLOCK(); 698 } 699 700 static void 701 netisr_drain_proto_vnet(struct vnet *vnet, u_int proto) 702 { 703 struct epoch_tracker et; 704 struct netisr_workstream *nwsp; 705 struct netisr_work *npwp; 706 struct mbuf *m, *mp, *n, *ne; 707 struct ifnet *ifp; 708 u_int i; 709 710 KASSERT(vnet != NULL, ("%s: vnet is NULL", __func__)); 711 NETISR_LOCK_ASSERT(); 712 713 CPU_FOREACH(i) { 714 nwsp = DPCPU_ID_PTR(i, nws); 715 if (nwsp->nws_intr_event == NULL) 716 continue; 717 npwp = &nwsp->nws_work[proto]; 718 NWS_LOCK(nwsp); 719 720 /* 721 * Rather than dissecting and removing mbufs from the middle 722 * of the chain, we build a new chain if the packet stays and 723 * update the head and tail pointers at the end. All packets 724 * matching the given vnet are freed. 725 */ 726 m = npwp->nw_head; 727 n = ne = NULL; 728 NET_EPOCH_ENTER(et); 729 while (m != NULL) { 730 mp = m; 731 m = m->m_nextpkt; 732 mp->m_nextpkt = NULL; 733 if ((ifp = ifnet_byindexgen(mp->m_pkthdr.rcvidx, 734 mp->m_pkthdr.rcvgen)) != NULL && 735 ifp->if_vnet != vnet) { 736 if (n == NULL) { 737 n = ne = mp; 738 } else { 739 ne->m_nextpkt = mp; 740 ne = mp; 741 } 742 continue; 743 } 744 /* This is a packet in the selected vnet, or belongs 745 to destroyed interface. Free it. */ 746 npwp->nw_len--; 747 m_freem(mp); 748 } 749 NET_EPOCH_EXIT(et); 750 npwp->nw_head = n; 751 npwp->nw_tail = ne; 752 NWS_UNLOCK(nwsp); 753 } 754 } 755 756 void 757 netisr_unregister_vnet(const struct netisr_handler *nhp) 758 { 759 u_int proto; 760 761 proto = nhp->nh_proto; 762 763 KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__)); 764 KASSERT(proto < NETISR_MAXPROT, 765 ("%s(%u): protocol too big for %s", __func__, proto, nhp->nh_name)); 766 NETISR_WLOCK(); 767 KASSERT(netisr_proto[proto].np_handler != NULL, 768 ("%s(%u): protocol not registered for %s", __func__, proto, 769 nhp->nh_name)); 770 771 V_netisr_enable[proto] = 0; 772 773 netisr_drain_proto_vnet(curvnet, proto); 774 NETISR_WUNLOCK(); 775 } 776 #endif 777 778 /* 779 * Compose the global and per-protocol policies on dispatch, and return the 780 * dispatch policy to use. 781 */ 782 static u_int 783 netisr_get_dispatch(struct netisr_proto *npp) 784 { 785 786 /* 787 * Protocol-specific configuration overrides the global default. 788 */ 789 if (npp->np_dispatch != NETISR_DISPATCH_DEFAULT) 790 return (npp->np_dispatch); 791 return (netisr_dispatch_policy); 792 } 793 794 /* 795 * Look up the workstream given a packet and source identifier. Do this by 796 * checking the protocol's policy, and optionally call out to the protocol 797 * for assistance if required. 798 */ 799 static struct mbuf * 800 netisr_select_cpuid(struct netisr_proto *npp, u_int dispatch_policy, 801 uintptr_t source, struct mbuf *m, u_int *cpuidp) 802 { 803 struct ifnet *ifp; 804 u_int policy; 805 806 NETISR_LOCK_ASSERT(); 807 808 /* 809 * In the event we have only one worker, shortcut and deliver to it 810 * without further ado. 811 */ 812 if (nws_count == 1) { 813 *cpuidp = nws_array[0]; 814 return (m); 815 } 816 817 /* 818 * What happens next depends on the policy selected by the protocol. 819 * If we want to support per-interface policies, we should do that 820 * here first. 821 */ 822 policy = npp->np_policy; 823 if (policy == NETISR_POLICY_CPU) { 824 m = npp->np_m2cpuid(m, source, cpuidp); 825 if (m == NULL) 826 return (NULL); 827 828 /* 829 * It's possible for a protocol not to have a good idea about 830 * where to process a packet, in which case we fall back on 831 * the netisr code to decide. In the hybrid case, return the 832 * current CPU ID, which will force an immediate direct 833 * dispatch. In the queued case, fall back on the SOURCE 834 * policy. 835 */ 836 if (*cpuidp != NETISR_CPUID_NONE) { 837 *cpuidp = netisr_get_cpuid(*cpuidp); 838 return (m); 839 } 840 if (dispatch_policy == NETISR_DISPATCH_HYBRID) { 841 *cpuidp = netisr_get_cpuid(curcpu); 842 return (m); 843 } 844 policy = NETISR_POLICY_SOURCE; 845 } 846 847 if (policy == NETISR_POLICY_FLOW) { 848 if (M_HASHTYPE_GET(m) == M_HASHTYPE_NONE && 849 npp->np_m2flow != NULL) { 850 m = npp->np_m2flow(m, source); 851 if (m == NULL) 852 return (NULL); 853 } 854 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 855 *cpuidp = 856 netisr_default_flow2cpu(m->m_pkthdr.flowid); 857 return (m); 858 } 859 policy = NETISR_POLICY_SOURCE; 860 } 861 862 KASSERT(policy == NETISR_POLICY_SOURCE, 863 ("%s: invalid policy %u for %s", __func__, npp->np_policy, 864 npp->np_name)); 865 866 MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); 867 ifp = m->m_pkthdr.rcvif; 868 if (ifp != NULL) 869 *cpuidp = nws_array[(ifp->if_index + source) % nws_count]; 870 else 871 *cpuidp = nws_array[source % nws_count]; 872 return (m); 873 } 874 875 /* 876 * Process packets associated with a workstream and protocol. For reasons of 877 * fairness, we process up to one complete netisr queue at a time, moving the 878 * queue to a stack-local queue for processing, but do not loop refreshing 879 * from the global queue. The caller is responsible for deciding whether to 880 * loop, and for setting the NWS_RUNNING flag. The passed workstream will be 881 * locked on entry and relocked before return, but will be released while 882 * processing. The number of packets processed is returned. 883 */ 884 static u_int 885 netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto) 886 { 887 struct netisr_work local_npw, *npwp; 888 u_int handled; 889 struct mbuf *m; 890 891 NETISR_LOCK_ASSERT(); 892 NWS_LOCK_ASSERT(nwsp); 893 894 KASSERT(nwsp->nws_flags & NWS_RUNNING, 895 ("%s(%u): not running", __func__, proto)); 896 KASSERT(proto >= 0 && proto < NETISR_MAXPROT, 897 ("%s(%u): invalid proto\n", __func__, proto)); 898 899 npwp = &nwsp->nws_work[proto]; 900 if (npwp->nw_len == 0) 901 return (0); 902 903 /* 904 * Move the global work queue to a thread-local work queue. 905 * 906 * Notice that this means the effective maximum length of the queue 907 * is actually twice that of the maximum queue length specified in 908 * the protocol registration call. 909 */ 910 handled = npwp->nw_len; 911 local_npw = *npwp; 912 npwp->nw_head = NULL; 913 npwp->nw_tail = NULL; 914 npwp->nw_len = 0; 915 nwsp->nws_pendingbits &= ~(1 << proto); 916 NWS_UNLOCK(nwsp); 917 while ((m = local_npw.nw_head) != NULL) { 918 local_npw.nw_head = m->m_nextpkt; 919 m->m_nextpkt = NULL; 920 if (local_npw.nw_head == NULL) 921 local_npw.nw_tail = NULL; 922 local_npw.nw_len--; 923 if (__predict_false(m_rcvif_restore(m) == NULL)) { 924 m_freem(m); 925 continue; 926 } 927 CURVNET_SET(m->m_pkthdr.rcvif->if_vnet); 928 netisr_proto[proto].np_handler(m); 929 CURVNET_RESTORE(); 930 } 931 KASSERT(local_npw.nw_len == 0, 932 ("%s(%u): len %u", __func__, proto, local_npw.nw_len)); 933 if (netisr_proto[proto].np_drainedcpu) 934 netisr_proto[proto].np_drainedcpu(nwsp->nws_cpu); 935 NWS_LOCK(nwsp); 936 npwp->nw_handled += handled; 937 return (handled); 938 } 939 940 /* 941 * SWI handler for netisr -- processes packets in a set of workstreams that 942 * it owns, woken up by calls to NWS_SIGNAL(). If this workstream is already 943 * being direct dispatched, go back to sleep and wait for the dispatching 944 * thread to wake us up again. 945 */ 946 static void 947 swi_net(void *arg) 948 { 949 #ifdef NETISR_LOCKING 950 struct rm_priotracker tracker; 951 #endif 952 struct netisr_workstream *nwsp; 953 u_int bits, prot; 954 955 nwsp = arg; 956 957 #ifdef DEVICE_POLLING 958 KASSERT(nws_count == 1, 959 ("%s: device_polling but nws_count != 1", __func__)); 960 netisr_poll(); 961 #endif 962 #ifdef NETISR_LOCKING 963 NETISR_RLOCK(&tracker); 964 #endif 965 NWS_LOCK(nwsp); 966 KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running")); 967 if (nwsp->nws_flags & NWS_DISPATCHING) 968 goto out; 969 nwsp->nws_flags |= NWS_RUNNING; 970 nwsp->nws_flags &= ~NWS_SCHEDULED; 971 while ((bits = nwsp->nws_pendingbits) != 0) { 972 while ((prot = ffs(bits)) != 0) { 973 prot--; 974 bits &= ~(1 << prot); 975 (void)netisr_process_workstream_proto(nwsp, prot); 976 } 977 } 978 nwsp->nws_flags &= ~NWS_RUNNING; 979 out: 980 NWS_UNLOCK(nwsp); 981 #ifdef NETISR_LOCKING 982 NETISR_RUNLOCK(&tracker); 983 #endif 984 #ifdef DEVICE_POLLING 985 netisr_pollmore(); 986 #endif 987 } 988 989 static int 990 netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto, 991 struct netisr_work *npwp, struct mbuf *m, int *dosignalp) 992 { 993 994 NWS_LOCK_ASSERT(nwsp); 995 996 *dosignalp = 0; 997 if (npwp->nw_len < npwp->nw_qlimit) { 998 m_rcvif_serialize(m); 999 m->m_nextpkt = NULL; 1000 if (npwp->nw_head == NULL) { 1001 npwp->nw_head = m; 1002 npwp->nw_tail = m; 1003 } else { 1004 npwp->nw_tail->m_nextpkt = m; 1005 npwp->nw_tail = m; 1006 } 1007 npwp->nw_len++; 1008 if (npwp->nw_len > npwp->nw_watermark) 1009 npwp->nw_watermark = npwp->nw_len; 1010 1011 /* 1012 * We must set the bit regardless of NWS_RUNNING, so that 1013 * swi_net() keeps calling netisr_process_workstream_proto(). 1014 */ 1015 nwsp->nws_pendingbits |= (1 << proto); 1016 if (!(nwsp->nws_flags & 1017 (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) { 1018 nwsp->nws_flags |= NWS_SCHEDULED; 1019 *dosignalp = 1; /* Defer until unlocked. */ 1020 } 1021 npwp->nw_queued++; 1022 return (0); 1023 } else { 1024 m_freem(m); 1025 npwp->nw_qdrops++; 1026 return (ENOBUFS); 1027 } 1028 } 1029 1030 static int 1031 netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid) 1032 { 1033 struct netisr_workstream *nwsp; 1034 struct netisr_work *npwp; 1035 int dosignal, error; 1036 1037 #ifdef NETISR_LOCKING 1038 NETISR_LOCK_ASSERT(); 1039 #endif 1040 KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__, 1041 cpuid, mp_maxid)); 1042 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 1043 1044 dosignal = 0; 1045 error = 0; 1046 nwsp = DPCPU_ID_PTR(cpuid, nws); 1047 npwp = &nwsp->nws_work[proto]; 1048 NWS_LOCK(nwsp); 1049 error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal); 1050 NWS_UNLOCK(nwsp); 1051 if (dosignal) 1052 NWS_SIGNAL(nwsp); 1053 return (error); 1054 } 1055 1056 int 1057 netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m) 1058 { 1059 #ifdef NETISR_LOCKING 1060 struct rm_priotracker tracker; 1061 #endif 1062 u_int cpuid; 1063 int error; 1064 1065 KASSERT(proto < NETISR_MAXPROT, 1066 ("%s: invalid proto %u", __func__, proto)); 1067 1068 #ifdef NETISR_LOCKING 1069 NETISR_RLOCK(&tracker); 1070 #endif 1071 KASSERT(netisr_proto[proto].np_handler != NULL, 1072 ("%s: invalid proto %u", __func__, proto)); 1073 1074 #ifdef VIMAGE 1075 if (V_netisr_enable[proto] == 0) { 1076 m_freem(m); 1077 return (ENOPROTOOPT); 1078 } 1079 #endif 1080 1081 m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_DEFERRED, 1082 source, m, &cpuid); 1083 if (m != NULL) { 1084 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, 1085 cpuid)); 1086 VNET_ASSERT(m->m_pkthdr.rcvif != NULL, 1087 ("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m)); 1088 error = netisr_queue_internal(proto, m, cpuid); 1089 } else 1090 error = ENOBUFS; 1091 #ifdef NETISR_LOCKING 1092 NETISR_RUNLOCK(&tracker); 1093 #endif 1094 return (error); 1095 } 1096 1097 int 1098 netisr_queue(u_int proto, struct mbuf *m) 1099 { 1100 1101 return (netisr_queue_src(proto, 0, m)); 1102 } 1103 1104 /* 1105 * Dispatch a packet for netisr processing; direct dispatch is permitted by 1106 * calling context. 1107 */ 1108 int 1109 netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m) 1110 { 1111 #ifdef NETISR_LOCKING 1112 struct rm_priotracker tracker; 1113 #endif 1114 struct netisr_workstream *nwsp; 1115 struct netisr_proto *npp; 1116 struct netisr_work *npwp; 1117 int dosignal, error; 1118 u_int cpuid, dispatch_policy; 1119 1120 NET_EPOCH_ASSERT(); 1121 KASSERT(proto < NETISR_MAXPROT, 1122 ("%s: invalid proto %u", __func__, proto)); 1123 #ifdef NETISR_LOCKING 1124 NETISR_RLOCK(&tracker); 1125 #endif 1126 npp = &netisr_proto[proto]; 1127 KASSERT(npp->np_handler != NULL, ("%s: invalid proto %u", __func__, 1128 proto)); 1129 1130 #ifdef VIMAGE 1131 if (V_netisr_enable[proto] == 0) { 1132 m_freem(m); 1133 return (ENOPROTOOPT); 1134 } 1135 #endif 1136 1137 dispatch_policy = netisr_get_dispatch(npp); 1138 if (dispatch_policy == NETISR_DISPATCH_DEFERRED) 1139 return (netisr_queue_src(proto, source, m)); 1140 1141 /* 1142 * If direct dispatch is forced, then unconditionally dispatch 1143 * without a formal CPU selection. Borrow the current CPU's stats, 1144 * even if there's no worker on it. In this case we don't update 1145 * nws_flags because all netisr processing will be source ordered due 1146 * to always being forced to directly dispatch. 1147 */ 1148 if (dispatch_policy == NETISR_DISPATCH_DIRECT) { 1149 nwsp = DPCPU_PTR(nws); 1150 npwp = &nwsp->nws_work[proto]; 1151 npwp->nw_dispatched++; 1152 npwp->nw_handled++; 1153 netisr_proto[proto].np_handler(m); 1154 error = 0; 1155 goto out_unlock; 1156 } 1157 1158 KASSERT(dispatch_policy == NETISR_DISPATCH_HYBRID, 1159 ("%s: unknown dispatch policy (%u)", __func__, dispatch_policy)); 1160 1161 /* 1162 * Otherwise, we execute in a hybrid mode where we will try to direct 1163 * dispatch if we're on the right CPU and the netisr worker isn't 1164 * already running. 1165 */ 1166 sched_pin(); 1167 m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_HYBRID, 1168 source, m, &cpuid); 1169 if (m == NULL) { 1170 error = ENOBUFS; 1171 goto out_unpin; 1172 } 1173 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 1174 if (cpuid != curcpu) 1175 goto queue_fallback; 1176 nwsp = DPCPU_PTR(nws); 1177 npwp = &nwsp->nws_work[proto]; 1178 1179 /*- 1180 * We are willing to direct dispatch only if three conditions hold: 1181 * 1182 * (1) The netisr worker isn't already running, 1183 * (2) Another thread isn't already directly dispatching, and 1184 * (3) The netisr hasn't already been woken up. 1185 */ 1186 NWS_LOCK(nwsp); 1187 if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) { 1188 error = netisr_queue_workstream(nwsp, proto, npwp, m, 1189 &dosignal); 1190 NWS_UNLOCK(nwsp); 1191 if (dosignal) 1192 NWS_SIGNAL(nwsp); 1193 goto out_unpin; 1194 } 1195 1196 /* 1197 * The current thread is now effectively the netisr worker, so set 1198 * the dispatching flag to prevent concurrent processing of the 1199 * stream from another thread (even the netisr worker), which could 1200 * otherwise lead to effective misordering of the stream. 1201 */ 1202 nwsp->nws_flags |= NWS_DISPATCHING; 1203 NWS_UNLOCK(nwsp); 1204 netisr_proto[proto].np_handler(m); 1205 NWS_LOCK(nwsp); 1206 nwsp->nws_flags &= ~NWS_DISPATCHING; 1207 npwp->nw_handled++; 1208 npwp->nw_hybrid_dispatched++; 1209 1210 /* 1211 * If other work was enqueued by another thread while we were direct 1212 * dispatching, we need to signal the netisr worker to do that work. 1213 * In the future, we might want to do some of that work in the 1214 * current thread, rather than trigger further context switches. If 1215 * so, we'll want to establish a reasonable bound on the work done in 1216 * the "borrowed" context. 1217 */ 1218 if (nwsp->nws_pendingbits != 0) { 1219 nwsp->nws_flags |= NWS_SCHEDULED; 1220 dosignal = 1; 1221 } else 1222 dosignal = 0; 1223 NWS_UNLOCK(nwsp); 1224 if (dosignal) 1225 NWS_SIGNAL(nwsp); 1226 error = 0; 1227 goto out_unpin; 1228 1229 queue_fallback: 1230 error = netisr_queue_internal(proto, m, cpuid); 1231 out_unpin: 1232 sched_unpin(); 1233 out_unlock: 1234 #ifdef NETISR_LOCKING 1235 NETISR_RUNLOCK(&tracker); 1236 #endif 1237 return (error); 1238 } 1239 1240 int 1241 netisr_dispatch(u_int proto, struct mbuf *m) 1242 { 1243 1244 return (netisr_dispatch_src(proto, 0, m)); 1245 } 1246 1247 #ifdef DEVICE_POLLING 1248 /* 1249 * Kernel polling borrows a netisr thread to run interface polling in; this 1250 * function allows kernel polling to request that the netisr thread be 1251 * scheduled even if no packets are pending for protocols. 1252 */ 1253 void 1254 netisr_sched_poll(void) 1255 { 1256 struct netisr_workstream *nwsp; 1257 1258 nwsp = DPCPU_ID_PTR(nws_array[0], nws); 1259 NWS_SIGNAL(nwsp); 1260 } 1261 #endif 1262 1263 static void 1264 netisr_start_swi(u_int cpuid, struct pcpu *pc) 1265 { 1266 char swiname[12]; 1267 struct netisr_workstream *nwsp; 1268 int error; 1269 1270 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 1271 1272 nwsp = DPCPU_ID_PTR(cpuid, nws); 1273 mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF); 1274 nwsp->nws_cpu = cpuid; 1275 snprintf(swiname, sizeof(swiname), "netisr %u", cpuid); 1276 error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp, 1277 SWI_NET, INTR_TYPE_NET | INTR_MPSAFE, &nwsp->nws_swi_cookie); 1278 if (error) 1279 panic("%s: swi_add %d", __func__, error); 1280 pc->pc_netisr = nwsp->nws_intr_event; 1281 if (netisr_bindthreads) { 1282 error = intr_event_bind(nwsp->nws_intr_event, cpuid); 1283 if (error != 0) 1284 printf("%s: cpu %u: intr_event_bind: %d", __func__, 1285 cpuid, error); 1286 } 1287 NETISR_WLOCK(); 1288 nws_array[nws_count] = nwsp->nws_cpu; 1289 nws_count++; 1290 NETISR_WUNLOCK(); 1291 } 1292 1293 /* 1294 * Initialize the netisr subsystem. We rely on BSS and static initialization 1295 * of most fields in global data structures. 1296 * 1297 * Start a worker thread for the boot CPU so that we can support network 1298 * traffic immediately in case the network stack is used before additional 1299 * CPUs are started (for example, diskless boot). 1300 */ 1301 static void 1302 netisr_init(void *arg) 1303 { 1304 struct pcpu *pc; 1305 1306 NETISR_LOCK_INIT(); 1307 if (netisr_maxthreads == 0 || netisr_maxthreads < -1 ) 1308 netisr_maxthreads = 1; /* default behavior */ 1309 else if (netisr_maxthreads == -1) 1310 netisr_maxthreads = mp_ncpus; /* use max cpus */ 1311 if (netisr_maxthreads > mp_ncpus) { 1312 printf("netisr_init: forcing maxthreads from %d to %d\n", 1313 netisr_maxthreads, mp_ncpus); 1314 netisr_maxthreads = mp_ncpus; 1315 } 1316 if (netisr_defaultqlimit > netisr_maxqlimit) { 1317 printf("netisr_init: forcing defaultqlimit from %d to %d\n", 1318 netisr_defaultqlimit, netisr_maxqlimit); 1319 netisr_defaultqlimit = netisr_maxqlimit; 1320 } 1321 #ifdef DEVICE_POLLING 1322 /* 1323 * The device polling code is not yet aware of how to deal with 1324 * multiple netisr threads, so for the time being compiling in device 1325 * polling disables parallel netisr workers. 1326 */ 1327 if (netisr_maxthreads != 1 || netisr_bindthreads != 0) { 1328 printf("netisr_init: forcing maxthreads to 1 and " 1329 "bindthreads to 0 for device polling\n"); 1330 netisr_maxthreads = 1; 1331 netisr_bindthreads = 0; 1332 } 1333 #endif 1334 1335 #ifdef EARLY_AP_STARTUP 1336 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 1337 if (nws_count >= netisr_maxthreads) 1338 break; 1339 netisr_start_swi(pc->pc_cpuid, pc); 1340 } 1341 #else 1342 pc = get_pcpu(); 1343 netisr_start_swi(pc->pc_cpuid, pc); 1344 #endif 1345 } 1346 SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL); 1347 1348 #ifndef EARLY_AP_STARTUP 1349 /* 1350 * Start worker threads for additional CPUs. No attempt to gracefully handle 1351 * work reassignment, we don't yet support dynamic reconfiguration. 1352 */ 1353 static void 1354 netisr_start(void *arg) 1355 { 1356 struct pcpu *pc; 1357 1358 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 1359 if (nws_count >= netisr_maxthreads) 1360 break; 1361 /* Worker will already be present for boot CPU. */ 1362 if (pc->pc_netisr != NULL) 1363 continue; 1364 netisr_start_swi(pc->pc_cpuid, pc); 1365 } 1366 } 1367 SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL); 1368 #endif 1369 1370 /* 1371 * Sysctl monitoring for netisr: query a list of registered protocols. 1372 */ 1373 static int 1374 sysctl_netisr_proto(SYSCTL_HANDLER_ARGS) 1375 { 1376 struct rm_priotracker tracker; 1377 struct sysctl_netisr_proto *snpp, *snp_array; 1378 struct netisr_proto *npp; 1379 u_int counter, proto; 1380 int error; 1381 1382 if (req->newptr != NULL) 1383 return (EINVAL); 1384 snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP, 1385 M_ZERO | M_WAITOK); 1386 counter = 0; 1387 NETISR_RLOCK(&tracker); 1388 for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1389 npp = &netisr_proto[proto]; 1390 if (npp->np_name == NULL) 1391 continue; 1392 snpp = &snp_array[counter]; 1393 snpp->snp_version = sizeof(*snpp); 1394 strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN); 1395 snpp->snp_proto = proto; 1396 snpp->snp_qlimit = npp->np_qlimit; 1397 snpp->snp_policy = npp->np_policy; 1398 snpp->snp_dispatch = npp->np_dispatch; 1399 if (npp->np_m2flow != NULL) 1400 snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW; 1401 if (npp->np_m2cpuid != NULL) 1402 snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID; 1403 if (npp->np_drainedcpu != NULL) 1404 snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU; 1405 counter++; 1406 } 1407 NETISR_RUNLOCK(&tracker); 1408 KASSERT(counter <= NETISR_MAXPROT, 1409 ("sysctl_netisr_proto: counter too big (%d)", counter)); 1410 error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter); 1411 free(snp_array, M_TEMP); 1412 return (error); 1413 } 1414 1415 SYSCTL_PROC(_net_isr, OID_AUTO, proto, 1416 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto, 1417 "S,sysctl_netisr_proto", 1418 "Return list of protocols registered with netisr"); 1419 1420 /* 1421 * Sysctl monitoring for netisr: query a list of workstreams. 1422 */ 1423 static int 1424 sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS) 1425 { 1426 struct rm_priotracker tracker; 1427 struct sysctl_netisr_workstream *snwsp, *snws_array; 1428 struct netisr_workstream *nwsp; 1429 u_int counter, cpuid; 1430 int error; 1431 1432 if (req->newptr != NULL) 1433 return (EINVAL); 1434 snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP, 1435 M_ZERO | M_WAITOK); 1436 counter = 0; 1437 NETISR_RLOCK(&tracker); 1438 CPU_FOREACH(cpuid) { 1439 nwsp = DPCPU_ID_PTR(cpuid, nws); 1440 if (nwsp->nws_intr_event == NULL) 1441 continue; 1442 NWS_LOCK(nwsp); 1443 snwsp = &snws_array[counter]; 1444 snwsp->snws_version = sizeof(*snwsp); 1445 1446 /* 1447 * For now, we equate workstream IDs and CPU IDs in the 1448 * kernel, but expose them independently to userspace in case 1449 * that assumption changes in the future. 1450 */ 1451 snwsp->snws_wsid = cpuid; 1452 snwsp->snws_cpu = cpuid; 1453 if (nwsp->nws_intr_event != NULL) 1454 snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR; 1455 NWS_UNLOCK(nwsp); 1456 counter++; 1457 } 1458 NETISR_RUNLOCK(&tracker); 1459 KASSERT(counter <= MAXCPU, 1460 ("sysctl_netisr_workstream: counter too big (%d)", counter)); 1461 error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter); 1462 free(snws_array, M_TEMP); 1463 return (error); 1464 } 1465 1466 SYSCTL_PROC(_net_isr, OID_AUTO, workstream, 1467 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream, 1468 "S,sysctl_netisr_workstream", 1469 "Return list of workstreams implemented by netisr"); 1470 1471 /* 1472 * Sysctl monitoring for netisr: query per-protocol data across all 1473 * workstreams. 1474 */ 1475 static int 1476 sysctl_netisr_work(SYSCTL_HANDLER_ARGS) 1477 { 1478 struct rm_priotracker tracker; 1479 struct sysctl_netisr_work *snwp, *snw_array; 1480 struct netisr_workstream *nwsp; 1481 struct netisr_proto *npp; 1482 struct netisr_work *nwp; 1483 u_int counter, cpuid, proto; 1484 int error; 1485 1486 if (req->newptr != NULL) 1487 return (EINVAL); 1488 snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT, 1489 M_TEMP, M_ZERO | M_WAITOK); 1490 counter = 0; 1491 NETISR_RLOCK(&tracker); 1492 CPU_FOREACH(cpuid) { 1493 nwsp = DPCPU_ID_PTR(cpuid, nws); 1494 if (nwsp->nws_intr_event == NULL) 1495 continue; 1496 NWS_LOCK(nwsp); 1497 for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1498 npp = &netisr_proto[proto]; 1499 if (npp->np_name == NULL) 1500 continue; 1501 nwp = &nwsp->nws_work[proto]; 1502 snwp = &snw_array[counter]; 1503 snwp->snw_version = sizeof(*snwp); 1504 snwp->snw_wsid = cpuid; /* See comment above. */ 1505 snwp->snw_proto = proto; 1506 snwp->snw_len = nwp->nw_len; 1507 snwp->snw_watermark = nwp->nw_watermark; 1508 snwp->snw_dispatched = nwp->nw_dispatched; 1509 snwp->snw_hybrid_dispatched = 1510 nwp->nw_hybrid_dispatched; 1511 snwp->snw_qdrops = nwp->nw_qdrops; 1512 snwp->snw_queued = nwp->nw_queued; 1513 snwp->snw_handled = nwp->nw_handled; 1514 counter++; 1515 } 1516 NWS_UNLOCK(nwsp); 1517 } 1518 KASSERT(counter <= MAXCPU * NETISR_MAXPROT, 1519 ("sysctl_netisr_work: counter too big (%d)", counter)); 1520 NETISR_RUNLOCK(&tracker); 1521 error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter); 1522 free(snw_array, M_TEMP); 1523 return (error); 1524 } 1525 1526 SYSCTL_PROC(_net_isr, OID_AUTO, work, 1527 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work, 1528 "S,sysctl_netisr_work", 1529 "Return list of per-workstream, per-protocol work in netisr"); 1530 1531 #ifdef DDB 1532 DB_SHOW_COMMAND(netisr, db_show_netisr) 1533 { 1534 struct netisr_workstream *nwsp; 1535 struct netisr_work *nwp; 1536 int first, proto; 1537 u_int cpuid; 1538 1539 db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto", 1540 "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue"); 1541 CPU_FOREACH(cpuid) { 1542 nwsp = DPCPU_ID_PTR(cpuid, nws); 1543 if (nwsp->nws_intr_event == NULL) 1544 continue; 1545 first = 1; 1546 for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1547 if (netisr_proto[proto].np_handler == NULL) 1548 continue; 1549 nwp = &nwsp->nws_work[proto]; 1550 if (first) { 1551 db_printf("%3d ", cpuid); 1552 first = 0; 1553 } else 1554 db_printf("%3s ", ""); 1555 db_printf( 1556 "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n", 1557 netisr_proto[proto].np_name, nwp->nw_len, 1558 nwp->nw_watermark, nwp->nw_qlimit, 1559 nwp->nw_dispatched, nwp->nw_hybrid_dispatched, 1560 nwp->nw_qdrops, nwp->nw_queued); 1561 } 1562 } 1563 } 1564 #endif 1565