1 /*- 2 * Copyright (c) 2007-2009 Robert N. M. Watson 3 * Copyright (c) 2010-2011 Juniper Networks, Inc. 4 * All rights reserved. 5 * 6 * This software was developed by Robert N. M. Watson under contract 7 * to Juniper Networks, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * netisr is a packet dispatch service, allowing synchronous (directly 36 * dispatched) and asynchronous (deferred dispatch) processing of packets by 37 * registered protocol handlers. Callers pass a protocol identifier and 38 * packet to netisr, along with a direct dispatch hint, and work will either 39 * be immediately processed by the registered handler, or passed to a 40 * software interrupt (SWI) thread for deferred dispatch. Callers will 41 * generally select one or the other based on: 42 * 43 * - Whether directly dispatching a netisr handler lead to code reentrance or 44 * lock recursion, such as entering the socket code from the socket code. 45 * - Whether directly dispatching a netisr handler lead to recursive 46 * processing, such as when decapsulating several wrapped layers of tunnel 47 * information (IPSEC within IPSEC within ...). 48 * 49 * Maintaining ordering for protocol streams is a critical design concern. 50 * Enforcing ordering limits the opportunity for concurrency, but maintains 51 * the strong ordering requirements found in some protocols, such as TCP. Of 52 * related concern is CPU affinity--it is desirable to process all data 53 * associated with a particular stream on the same CPU over time in order to 54 * avoid acquiring locks associated with the connection on different CPUs, 55 * keep connection data in one cache, and to generally encourage associated 56 * user threads to live on the same CPU as the stream. It's also desirable 57 * to avoid lock migration and contention where locks are associated with 58 * more than one flow. 59 * 60 * netisr supports several policy variations, represented by the 61 * NETISR_POLICY_* constants, allowing protocols to play various roles in 62 * identifying flows, assigning work to CPUs, etc. These are described in 63 * netisr.h. 64 */ 65 66 #include "opt_ddb.h" 67 #include "opt_device_polling.h" 68 69 #include <sys/param.h> 70 #include <sys/bus.h> 71 #include <sys/kernel.h> 72 #include <sys/kthread.h> 73 #include <sys/malloc.h> 74 #include <sys/interrupt.h> 75 #include <sys/lock.h> 76 #include <sys/mbuf.h> 77 #include <sys/mutex.h> 78 #include <sys/pcpu.h> 79 #include <sys/proc.h> 80 #include <sys/rmlock.h> 81 #include <sys/sched.h> 82 #include <sys/smp.h> 83 #include <sys/socket.h> 84 #include <sys/sysctl.h> 85 #include <sys/systm.h> 86 87 #ifdef DDB 88 #include <ddb/ddb.h> 89 #endif 90 91 #define _WANT_NETISR_INTERNAL /* Enable definitions from netisr_internal.h */ 92 #include <net/if.h> 93 #include <net/if_var.h> 94 #include <net/netisr.h> 95 #include <net/netisr_internal.h> 96 #include <net/vnet.h> 97 98 /*- 99 * Synchronize use and modification of the registered netisr data structures; 100 * acquire a read lock while modifying the set of registered protocols to 101 * prevent partially registered or unregistered protocols from being run. 102 * 103 * The following data structures and fields are protected by this lock: 104 * 105 * - The netisr_proto array, including all fields of struct netisr_proto. 106 * - The nws array, including all fields of struct netisr_worker. 107 * - The nws_array array. 108 * 109 * Note: the NETISR_LOCKING define controls whether read locks are acquired 110 * in packet processing paths requiring netisr registration stability. This 111 * is disabled by default as it can lead to measurable performance 112 * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and 113 * because netisr registration and unregistration is extremely rare at 114 * runtime. If it becomes more common, this decision should be revisited. 115 * 116 * XXXRW: rmlocks don't support assertions. 117 */ 118 static struct rmlock netisr_rmlock; 119 #define NETISR_LOCK_INIT() rm_init_flags(&netisr_rmlock, "netisr", \ 120 RM_NOWITNESS) 121 #define NETISR_LOCK_ASSERT() 122 #define NETISR_RLOCK(tracker) rm_rlock(&netisr_rmlock, (tracker)) 123 #define NETISR_RUNLOCK(tracker) rm_runlock(&netisr_rmlock, (tracker)) 124 #define NETISR_WLOCK() rm_wlock(&netisr_rmlock) 125 #define NETISR_WUNLOCK() rm_wunlock(&netisr_rmlock) 126 /* #define NETISR_LOCKING */ 127 128 static SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr"); 129 130 /*- 131 * Three global direct dispatch policies are supported: 132 * 133 * NETISR_DISPATCH_DEFERRED: All work is deferred for a netisr, regardless of 134 * context (may be overriden by protocols). 135 * 136 * NETISR_DISPATCH_HYBRID: If the executing context allows direct dispatch, 137 * and we're running on the CPU the work would be performed on, then direct 138 * dispatch it if it wouldn't violate ordering constraints on the workstream. 139 * 140 * NETISR_DISPATCH_DIRECT: If the executing context allows direct dispatch, 141 * always direct dispatch. (The default.) 142 * 143 * Notice that changing the global policy could lead to short periods of 144 * misordered processing, but this is considered acceptable as compared to 145 * the complexity of enforcing ordering during policy changes. Protocols can 146 * override the global policy (when they're not doing that, they select 147 * NETISR_DISPATCH_DEFAULT). 148 */ 149 #define NETISR_DISPATCH_POLICY_DEFAULT NETISR_DISPATCH_DIRECT 150 #define NETISR_DISPATCH_POLICY_MAXSTR 20 /* Used for temporary buffers. */ 151 static u_int netisr_dispatch_policy = NETISR_DISPATCH_POLICY_DEFAULT; 152 static int sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS); 153 SYSCTL_PROC(_net_isr, OID_AUTO, dispatch, CTLTYPE_STRING | CTLFLAG_RWTUN, 154 0, 0, sysctl_netisr_dispatch_policy, "A", 155 "netisr dispatch policy"); 156 157 /* 158 * Allow the administrator to limit the number of threads (CPUs) to use for 159 * netisr. We don't check netisr_maxthreads before creating the thread for 160 * CPU 0. This must be set at boot. We will create at most one thread per CPU. 161 * By default we initialize this to 1 which would assign just 1 cpu (cpu0) and 162 * therefore only 1 workstream. If set to -1, netisr would use all cpus 163 * (mp_ncpus) and therefore would have those many workstreams. One workstream 164 * per thread (CPU). 165 */ 166 static int netisr_maxthreads = 1; /* Max number of threads. */ 167 SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN, 168 &netisr_maxthreads, 0, 169 "Use at most this many CPUs for netisr processing"); 170 171 static int netisr_bindthreads = 0; /* Bind threads to CPUs. */ 172 SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN, 173 &netisr_bindthreads, 0, "Bind netisr threads to CPUs."); 174 175 /* 176 * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit, 177 * both for initial configuration and later modification using 178 * netisr_setqlimit(). 179 */ 180 #define NETISR_DEFAULT_MAXQLIMIT 10240 181 static u_int netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT; 182 SYSCTL_UINT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN, 183 &netisr_maxqlimit, 0, 184 "Maximum netisr per-protocol, per-CPU queue depth."); 185 186 /* 187 * The default per-workstream mbuf queue limit for protocols that don't 188 * initialize the nh_qlimit field of their struct netisr_handler. If this is 189 * set above netisr_maxqlimit, we truncate it to the maximum during boot. 190 */ 191 #define NETISR_DEFAULT_DEFAULTQLIMIT 256 192 static u_int netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT; 193 SYSCTL_UINT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN, 194 &netisr_defaultqlimit, 0, 195 "Default netisr per-protocol, per-CPU queue limit if not set by protocol"); 196 197 /* 198 * Store and export the compile-time constant NETISR_MAXPROT limit on the 199 * number of protocols that can register with netisr at a time. This is 200 * required for crashdump analysis, as it sizes netisr_proto[]. 201 */ 202 static u_int netisr_maxprot = NETISR_MAXPROT; 203 SYSCTL_UINT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD, 204 &netisr_maxprot, 0, 205 "Compile-time limit on the number of protocols supported by netisr."); 206 207 /* 208 * The netisr_proto array describes all registered protocols, indexed by 209 * protocol number. See netisr_internal.h for more details. 210 */ 211 static struct netisr_proto netisr_proto[NETISR_MAXPROT]; 212 213 #ifdef VIMAGE 214 /* 215 * The netisr_enable array describes a per-VNET flag for registered 216 * protocols on whether this netisr is active in this VNET or not. 217 * netisr_register() will automatically enable the netisr for the 218 * default VNET and all currently active instances. 219 * netisr_unregister() will disable all active VNETs, including vnet0. 220 * Individual network stack instances can be enabled/disabled by the 221 * netisr_(un)register _vnet() functions. 222 * With this we keep the one netisr_proto per protocol but add a 223 * mechanism to stop netisr processing for vnet teardown. 224 * Apart from that we expect a VNET to always be enabled. 225 */ 226 static VNET_DEFINE(u_int, netisr_enable[NETISR_MAXPROT]); 227 #define V_netisr_enable VNET(netisr_enable) 228 #endif 229 230 /* 231 * Per-CPU workstream data. See netisr_internal.h for more details. 232 */ 233 DPCPU_DEFINE(struct netisr_workstream, nws); 234 235 /* 236 * Map contiguous values between 0 and nws_count into CPU IDs appropriate for 237 * accessing workstreams. This allows constructions of the form 238 * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws). 239 */ 240 static u_int nws_array[MAXCPU]; 241 242 /* 243 * Number of registered workstreams. Will be at most the number of running 244 * CPUs once fully started. 245 */ 246 static u_int nws_count; 247 SYSCTL_UINT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD, 248 &nws_count, 0, "Number of extant netisr threads."); 249 250 /* 251 * Synchronization for each workstream: a mutex protects all mutable fields 252 * in each stream, including per-protocol state (mbuf queues). The SWI is 253 * woken up if asynchronous dispatch is required. 254 */ 255 #define NWS_LOCK(s) mtx_lock(&(s)->nws_mtx) 256 #define NWS_LOCK_ASSERT(s) mtx_assert(&(s)->nws_mtx, MA_OWNED) 257 #define NWS_UNLOCK(s) mtx_unlock(&(s)->nws_mtx) 258 #define NWS_SIGNAL(s) swi_sched((s)->nws_swi_cookie, 0) 259 260 /* 261 * Utility routines for protocols that implement their own mapping of flows 262 * to CPUs. 263 */ 264 u_int 265 netisr_get_cpucount(void) 266 { 267 268 return (nws_count); 269 } 270 271 u_int 272 netisr_get_cpuid(u_int cpunumber) 273 { 274 275 return (nws_array[cpunumber % nws_count]); 276 } 277 278 /* 279 * The default implementation of flow -> CPU ID mapping. 280 * 281 * Non-static so that protocols can use it to map their own work to specific 282 * CPUs in a manner consistent to netisr for affinity purposes. 283 */ 284 u_int 285 netisr_default_flow2cpu(u_int flowid) 286 { 287 288 return (nws_array[flowid % nws_count]); 289 } 290 291 /* 292 * Dispatch tunable and sysctl configuration. 293 */ 294 struct netisr_dispatch_table_entry { 295 u_int ndte_policy; 296 const char *ndte_policy_str; 297 }; 298 static const struct netisr_dispatch_table_entry netisr_dispatch_table[] = { 299 { NETISR_DISPATCH_DEFAULT, "default" }, 300 { NETISR_DISPATCH_DEFERRED, "deferred" }, 301 { NETISR_DISPATCH_HYBRID, "hybrid" }, 302 { NETISR_DISPATCH_DIRECT, "direct" }, 303 }; 304 305 static void 306 netisr_dispatch_policy_to_str(u_int dispatch_policy, char *buffer, 307 u_int buflen) 308 { 309 const struct netisr_dispatch_table_entry *ndtep; 310 const char *str; 311 u_int i; 312 313 str = "unknown"; 314 for (i = 0; i < nitems(netisr_dispatch_table); i++) { 315 ndtep = &netisr_dispatch_table[i]; 316 if (ndtep->ndte_policy == dispatch_policy) { 317 str = ndtep->ndte_policy_str; 318 break; 319 } 320 } 321 snprintf(buffer, buflen, "%s", str); 322 } 323 324 static int 325 netisr_dispatch_policy_from_str(const char *str, u_int *dispatch_policyp) 326 { 327 const struct netisr_dispatch_table_entry *ndtep; 328 u_int i; 329 330 for (i = 0; i < nitems(netisr_dispatch_table); i++) { 331 ndtep = &netisr_dispatch_table[i]; 332 if (strcmp(ndtep->ndte_policy_str, str) == 0) { 333 *dispatch_policyp = ndtep->ndte_policy; 334 return (0); 335 } 336 } 337 return (EINVAL); 338 } 339 340 static int 341 sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS) 342 { 343 char tmp[NETISR_DISPATCH_POLICY_MAXSTR]; 344 u_int dispatch_policy; 345 int error; 346 347 netisr_dispatch_policy_to_str(netisr_dispatch_policy, tmp, 348 sizeof(tmp)); 349 error = sysctl_handle_string(oidp, tmp, sizeof(tmp), req); 350 if (error == 0 && req->newptr != NULL) { 351 error = netisr_dispatch_policy_from_str(tmp, 352 &dispatch_policy); 353 if (error == 0 && dispatch_policy == NETISR_DISPATCH_DEFAULT) 354 error = EINVAL; 355 if (error == 0) 356 netisr_dispatch_policy = dispatch_policy; 357 } 358 return (error); 359 } 360 361 /* 362 * Register a new netisr handler, which requires initializing per-protocol 363 * fields for each workstream. All netisr work is briefly suspended while 364 * the protocol is installed. 365 */ 366 void 367 netisr_register(const struct netisr_handler *nhp) 368 { 369 VNET_ITERATOR_DECL(vnet_iter); 370 struct netisr_work *npwp; 371 const char *name; 372 u_int i, proto; 373 374 proto = nhp->nh_proto; 375 name = nhp->nh_name; 376 377 /* 378 * Test that the requested registration is valid. 379 */ 380 KASSERT(nhp->nh_name != NULL, 381 ("%s: nh_name NULL for %u", __func__, proto)); 382 KASSERT(nhp->nh_handler != NULL, 383 ("%s: nh_handler NULL for %s", __func__, name)); 384 KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE || 385 nhp->nh_policy == NETISR_POLICY_FLOW || 386 nhp->nh_policy == NETISR_POLICY_CPU, 387 ("%s: unsupported nh_policy %u for %s", __func__, 388 nhp->nh_policy, name)); 389 KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW || 390 nhp->nh_m2flow == NULL, 391 ("%s: nh_policy != FLOW but m2flow defined for %s", __func__, 392 name)); 393 KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL, 394 ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__, 395 name)); 396 KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL, 397 ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__, 398 name)); 399 KASSERT(nhp->nh_dispatch == NETISR_DISPATCH_DEFAULT || 400 nhp->nh_dispatch == NETISR_DISPATCH_DEFERRED || 401 nhp->nh_dispatch == NETISR_DISPATCH_HYBRID || 402 nhp->nh_dispatch == NETISR_DISPATCH_DIRECT, 403 ("%s: invalid nh_dispatch (%u)", __func__, nhp->nh_dispatch)); 404 405 KASSERT(proto < NETISR_MAXPROT, 406 ("%s(%u, %s): protocol too big", __func__, proto, name)); 407 408 /* 409 * Test that no existing registration exists for this protocol. 410 */ 411 NETISR_WLOCK(); 412 KASSERT(netisr_proto[proto].np_name == NULL, 413 ("%s(%u, %s): name present", __func__, proto, name)); 414 KASSERT(netisr_proto[proto].np_handler == NULL, 415 ("%s(%u, %s): handler present", __func__, proto, name)); 416 417 netisr_proto[proto].np_name = name; 418 netisr_proto[proto].np_handler = nhp->nh_handler; 419 netisr_proto[proto].np_m2flow = nhp->nh_m2flow; 420 netisr_proto[proto].np_m2cpuid = nhp->nh_m2cpuid; 421 netisr_proto[proto].np_drainedcpu = nhp->nh_drainedcpu; 422 if (nhp->nh_qlimit == 0) 423 netisr_proto[proto].np_qlimit = netisr_defaultqlimit; 424 else if (nhp->nh_qlimit > netisr_maxqlimit) { 425 printf("%s: %s requested queue limit %u capped to " 426 "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit, 427 netisr_maxqlimit); 428 netisr_proto[proto].np_qlimit = netisr_maxqlimit; 429 } else 430 netisr_proto[proto].np_qlimit = nhp->nh_qlimit; 431 netisr_proto[proto].np_policy = nhp->nh_policy; 432 netisr_proto[proto].np_dispatch = nhp->nh_dispatch; 433 CPU_FOREACH(i) { 434 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 435 bzero(npwp, sizeof(*npwp)); 436 npwp->nw_qlimit = netisr_proto[proto].np_qlimit; 437 } 438 439 #ifdef VIMAGE 440 /* 441 * Test that we are in vnet0 and have a curvnet set. 442 */ 443 KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__)); 444 KASSERT(IS_DEFAULT_VNET(curvnet), ("%s: curvnet %p is not vnet0 %p", 445 __func__, curvnet, vnet0)); 446 VNET_LIST_RLOCK_NOSLEEP(); 447 VNET_FOREACH(vnet_iter) { 448 CURVNET_SET(vnet_iter); 449 V_netisr_enable[proto] = 1; 450 CURVNET_RESTORE(); 451 } 452 VNET_LIST_RUNLOCK_NOSLEEP(); 453 #endif 454 NETISR_WUNLOCK(); 455 } 456 457 /* 458 * Clear drop counters across all workstreams for a protocol. 459 */ 460 void 461 netisr_clearqdrops(const struct netisr_handler *nhp) 462 { 463 struct netisr_work *npwp; 464 #ifdef INVARIANTS 465 const char *name; 466 #endif 467 u_int i, proto; 468 469 proto = nhp->nh_proto; 470 #ifdef INVARIANTS 471 name = nhp->nh_name; 472 #endif 473 KASSERT(proto < NETISR_MAXPROT, 474 ("%s(%u): protocol too big for %s", __func__, proto, name)); 475 476 NETISR_WLOCK(); 477 KASSERT(netisr_proto[proto].np_handler != NULL, 478 ("%s(%u): protocol not registered for %s", __func__, proto, 479 name)); 480 481 CPU_FOREACH(i) { 482 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 483 npwp->nw_qdrops = 0; 484 } 485 NETISR_WUNLOCK(); 486 } 487 488 /* 489 * Query current drop counters across all workstreams for a protocol. 490 */ 491 void 492 netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp) 493 { 494 struct netisr_work *npwp; 495 struct rm_priotracker tracker; 496 #ifdef INVARIANTS 497 const char *name; 498 #endif 499 u_int i, proto; 500 501 *qdropp = 0; 502 proto = nhp->nh_proto; 503 #ifdef INVARIANTS 504 name = nhp->nh_name; 505 #endif 506 KASSERT(proto < NETISR_MAXPROT, 507 ("%s(%u): protocol too big for %s", __func__, proto, name)); 508 509 NETISR_RLOCK(&tracker); 510 KASSERT(netisr_proto[proto].np_handler != NULL, 511 ("%s(%u): protocol not registered for %s", __func__, proto, 512 name)); 513 514 CPU_FOREACH(i) { 515 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 516 *qdropp += npwp->nw_qdrops; 517 } 518 NETISR_RUNLOCK(&tracker); 519 } 520 521 /* 522 * Query current per-workstream queue limit for a protocol. 523 */ 524 void 525 netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp) 526 { 527 struct rm_priotracker tracker; 528 #ifdef INVARIANTS 529 const char *name; 530 #endif 531 u_int proto; 532 533 proto = nhp->nh_proto; 534 #ifdef INVARIANTS 535 name = nhp->nh_name; 536 #endif 537 KASSERT(proto < NETISR_MAXPROT, 538 ("%s(%u): protocol too big for %s", __func__, proto, name)); 539 540 NETISR_RLOCK(&tracker); 541 KASSERT(netisr_proto[proto].np_handler != NULL, 542 ("%s(%u): protocol not registered for %s", __func__, proto, 543 name)); 544 *qlimitp = netisr_proto[proto].np_qlimit; 545 NETISR_RUNLOCK(&tracker); 546 } 547 548 /* 549 * Update the queue limit across per-workstream queues for a protocol. We 550 * simply change the limits, and don't drain overflowed packets as they will 551 * (hopefully) take care of themselves shortly. 552 */ 553 int 554 netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit) 555 { 556 struct netisr_work *npwp; 557 #ifdef INVARIANTS 558 const char *name; 559 #endif 560 u_int i, proto; 561 562 if (qlimit > netisr_maxqlimit) 563 return (EINVAL); 564 565 proto = nhp->nh_proto; 566 #ifdef INVARIANTS 567 name = nhp->nh_name; 568 #endif 569 KASSERT(proto < NETISR_MAXPROT, 570 ("%s(%u): protocol too big for %s", __func__, proto, name)); 571 572 NETISR_WLOCK(); 573 KASSERT(netisr_proto[proto].np_handler != NULL, 574 ("%s(%u): protocol not registered for %s", __func__, proto, 575 name)); 576 577 netisr_proto[proto].np_qlimit = qlimit; 578 CPU_FOREACH(i) { 579 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 580 npwp->nw_qlimit = qlimit; 581 } 582 NETISR_WUNLOCK(); 583 return (0); 584 } 585 586 /* 587 * Drain all packets currently held in a particular protocol work queue. 588 */ 589 static void 590 netisr_drain_proto(struct netisr_work *npwp) 591 { 592 struct mbuf *m; 593 594 /* 595 * We would assert the lock on the workstream but it's not passed in. 596 */ 597 while ((m = npwp->nw_head) != NULL) { 598 npwp->nw_head = m->m_nextpkt; 599 m->m_nextpkt = NULL; 600 if (npwp->nw_head == NULL) 601 npwp->nw_tail = NULL; 602 npwp->nw_len--; 603 m_freem(m); 604 } 605 KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__)); 606 KASSERT(npwp->nw_len == 0, ("%s: len", __func__)); 607 } 608 609 /* 610 * Remove the registration of a network protocol, which requires clearing 611 * per-protocol fields across all workstreams, including freeing all mbufs in 612 * the queues at time of unregister. All work in netisr is briefly suspended 613 * while this takes place. 614 */ 615 void 616 netisr_unregister(const struct netisr_handler *nhp) 617 { 618 VNET_ITERATOR_DECL(vnet_iter); 619 struct netisr_work *npwp; 620 #ifdef INVARIANTS 621 const char *name; 622 #endif 623 u_int i, proto; 624 625 proto = nhp->nh_proto; 626 #ifdef INVARIANTS 627 name = nhp->nh_name; 628 #endif 629 KASSERT(proto < NETISR_MAXPROT, 630 ("%s(%u): protocol too big for %s", __func__, proto, name)); 631 632 NETISR_WLOCK(); 633 KASSERT(netisr_proto[proto].np_handler != NULL, 634 ("%s(%u): protocol not registered for %s", __func__, proto, 635 name)); 636 637 #ifdef VIMAGE 638 VNET_LIST_RLOCK_NOSLEEP(); 639 VNET_FOREACH(vnet_iter) { 640 CURVNET_SET(vnet_iter); 641 V_netisr_enable[proto] = 0; 642 CURVNET_RESTORE(); 643 } 644 VNET_LIST_RUNLOCK_NOSLEEP(); 645 #endif 646 647 netisr_proto[proto].np_name = NULL; 648 netisr_proto[proto].np_handler = NULL; 649 netisr_proto[proto].np_m2flow = NULL; 650 netisr_proto[proto].np_m2cpuid = NULL; 651 netisr_proto[proto].np_qlimit = 0; 652 netisr_proto[proto].np_policy = 0; 653 CPU_FOREACH(i) { 654 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 655 netisr_drain_proto(npwp); 656 bzero(npwp, sizeof(*npwp)); 657 } 658 NETISR_WUNLOCK(); 659 } 660 661 #ifdef VIMAGE 662 void 663 netisr_register_vnet(const struct netisr_handler *nhp) 664 { 665 u_int proto; 666 667 proto = nhp->nh_proto; 668 669 KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__)); 670 KASSERT(proto < NETISR_MAXPROT, 671 ("%s(%u): protocol too big for %s", __func__, proto, nhp->nh_name)); 672 NETISR_WLOCK(); 673 KASSERT(netisr_proto[proto].np_handler != NULL, 674 ("%s(%u): protocol not registered for %s", __func__, proto, 675 nhp->nh_name)); 676 677 V_netisr_enable[proto] = 1; 678 NETISR_WUNLOCK(); 679 } 680 681 static void 682 netisr_drain_proto_vnet(struct vnet *vnet, u_int proto) 683 { 684 struct netisr_workstream *nwsp; 685 struct netisr_work *npwp; 686 struct mbuf *m, *mp, *n, *ne; 687 u_int i; 688 689 KASSERT(vnet != NULL, ("%s: vnet is NULL", __func__)); 690 NETISR_LOCK_ASSERT(); 691 692 CPU_FOREACH(i) { 693 nwsp = DPCPU_ID_PTR(i, nws); 694 if (nwsp->nws_intr_event == NULL) 695 continue; 696 npwp = &nwsp->nws_work[proto]; 697 NWS_LOCK(nwsp); 698 699 /* 700 * Rather than dissecting and removing mbufs from the middle 701 * of the chain, we build a new chain if the packet stays and 702 * update the head and tail pointers at the end. All packets 703 * matching the given vnet are freed. 704 */ 705 m = npwp->nw_head; 706 n = ne = NULL; 707 while (m != NULL) { 708 mp = m; 709 m = m->m_nextpkt; 710 mp->m_nextpkt = NULL; 711 if (mp->m_pkthdr.rcvif->if_vnet != vnet) { 712 if (n == NULL) { 713 n = ne = mp; 714 } else { 715 ne->m_nextpkt = mp; 716 ne = mp; 717 } 718 continue; 719 } 720 /* This is a packet in the selected vnet. Free it. */ 721 npwp->nw_len--; 722 m_freem(mp); 723 } 724 npwp->nw_head = n; 725 npwp->nw_tail = ne; 726 NWS_UNLOCK(nwsp); 727 } 728 } 729 730 void 731 netisr_unregister_vnet(const struct netisr_handler *nhp) 732 { 733 u_int proto; 734 735 proto = nhp->nh_proto; 736 737 KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__)); 738 KASSERT(proto < NETISR_MAXPROT, 739 ("%s(%u): protocol too big for %s", __func__, proto, nhp->nh_name)); 740 NETISR_WLOCK(); 741 KASSERT(netisr_proto[proto].np_handler != NULL, 742 ("%s(%u): protocol not registered for %s", __func__, proto, 743 nhp->nh_name)); 744 745 V_netisr_enable[proto] = 0; 746 747 netisr_drain_proto_vnet(curvnet, proto); 748 NETISR_WUNLOCK(); 749 } 750 #endif 751 752 /* 753 * Compose the global and per-protocol policies on dispatch, and return the 754 * dispatch policy to use. 755 */ 756 static u_int 757 netisr_get_dispatch(struct netisr_proto *npp) 758 { 759 760 /* 761 * Protocol-specific configuration overrides the global default. 762 */ 763 if (npp->np_dispatch != NETISR_DISPATCH_DEFAULT) 764 return (npp->np_dispatch); 765 return (netisr_dispatch_policy); 766 } 767 768 /* 769 * Look up the workstream given a packet and source identifier. Do this by 770 * checking the protocol's policy, and optionally call out to the protocol 771 * for assistance if required. 772 */ 773 static struct mbuf * 774 netisr_select_cpuid(struct netisr_proto *npp, u_int dispatch_policy, 775 uintptr_t source, struct mbuf *m, u_int *cpuidp) 776 { 777 struct ifnet *ifp; 778 u_int policy; 779 780 NETISR_LOCK_ASSERT(); 781 782 /* 783 * In the event we have only one worker, shortcut and deliver to it 784 * without further ado. 785 */ 786 if (nws_count == 1) { 787 *cpuidp = nws_array[0]; 788 return (m); 789 } 790 791 /* 792 * What happens next depends on the policy selected by the protocol. 793 * If we want to support per-interface policies, we should do that 794 * here first. 795 */ 796 policy = npp->np_policy; 797 if (policy == NETISR_POLICY_CPU) { 798 m = npp->np_m2cpuid(m, source, cpuidp); 799 if (m == NULL) 800 return (NULL); 801 802 /* 803 * It's possible for a protocol not to have a good idea about 804 * where to process a packet, in which case we fall back on 805 * the netisr code to decide. In the hybrid case, return the 806 * current CPU ID, which will force an immediate direct 807 * dispatch. In the queued case, fall back on the SOURCE 808 * policy. 809 */ 810 if (*cpuidp != NETISR_CPUID_NONE) { 811 *cpuidp = netisr_get_cpuid(*cpuidp); 812 return (m); 813 } 814 if (dispatch_policy == NETISR_DISPATCH_HYBRID) { 815 *cpuidp = netisr_get_cpuid(curcpu); 816 return (m); 817 } 818 policy = NETISR_POLICY_SOURCE; 819 } 820 821 if (policy == NETISR_POLICY_FLOW) { 822 if (M_HASHTYPE_GET(m) == M_HASHTYPE_NONE && 823 npp->np_m2flow != NULL) { 824 m = npp->np_m2flow(m, source); 825 if (m == NULL) 826 return (NULL); 827 } 828 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 829 *cpuidp = 830 netisr_default_flow2cpu(m->m_pkthdr.flowid); 831 return (m); 832 } 833 policy = NETISR_POLICY_SOURCE; 834 } 835 836 KASSERT(policy == NETISR_POLICY_SOURCE, 837 ("%s: invalid policy %u for %s", __func__, npp->np_policy, 838 npp->np_name)); 839 840 ifp = m->m_pkthdr.rcvif; 841 if (ifp != NULL) 842 *cpuidp = nws_array[(ifp->if_index + source) % nws_count]; 843 else 844 *cpuidp = nws_array[source % nws_count]; 845 return (m); 846 } 847 848 /* 849 * Process packets associated with a workstream and protocol. For reasons of 850 * fairness, we process up to one complete netisr queue at a time, moving the 851 * queue to a stack-local queue for processing, but do not loop refreshing 852 * from the global queue. The caller is responsible for deciding whether to 853 * loop, and for setting the NWS_RUNNING flag. The passed workstream will be 854 * locked on entry and relocked before return, but will be released while 855 * processing. The number of packets processed is returned. 856 */ 857 static u_int 858 netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto) 859 { 860 struct netisr_work local_npw, *npwp; 861 u_int handled; 862 struct mbuf *m; 863 864 NETISR_LOCK_ASSERT(); 865 NWS_LOCK_ASSERT(nwsp); 866 867 KASSERT(nwsp->nws_flags & NWS_RUNNING, 868 ("%s(%u): not running", __func__, proto)); 869 KASSERT(proto >= 0 && proto < NETISR_MAXPROT, 870 ("%s(%u): invalid proto\n", __func__, proto)); 871 872 npwp = &nwsp->nws_work[proto]; 873 if (npwp->nw_len == 0) 874 return (0); 875 876 /* 877 * Move the global work queue to a thread-local work queue. 878 * 879 * Notice that this means the effective maximum length of the queue 880 * is actually twice that of the maximum queue length specified in 881 * the protocol registration call. 882 */ 883 handled = npwp->nw_len; 884 local_npw = *npwp; 885 npwp->nw_head = NULL; 886 npwp->nw_tail = NULL; 887 npwp->nw_len = 0; 888 nwsp->nws_pendingbits &= ~(1 << proto); 889 NWS_UNLOCK(nwsp); 890 while ((m = local_npw.nw_head) != NULL) { 891 local_npw.nw_head = m->m_nextpkt; 892 m->m_nextpkt = NULL; 893 if (local_npw.nw_head == NULL) 894 local_npw.nw_tail = NULL; 895 local_npw.nw_len--; 896 VNET_ASSERT(m->m_pkthdr.rcvif != NULL, 897 ("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m)); 898 CURVNET_SET(m->m_pkthdr.rcvif->if_vnet); 899 netisr_proto[proto].np_handler(m); 900 CURVNET_RESTORE(); 901 } 902 KASSERT(local_npw.nw_len == 0, 903 ("%s(%u): len %u", __func__, proto, local_npw.nw_len)); 904 if (netisr_proto[proto].np_drainedcpu) 905 netisr_proto[proto].np_drainedcpu(nwsp->nws_cpu); 906 NWS_LOCK(nwsp); 907 npwp->nw_handled += handled; 908 return (handled); 909 } 910 911 /* 912 * SWI handler for netisr -- processes packets in a set of workstreams that 913 * it owns, woken up by calls to NWS_SIGNAL(). If this workstream is already 914 * being direct dispatched, go back to sleep and wait for the dispatching 915 * thread to wake us up again. 916 */ 917 static void 918 swi_net(void *arg) 919 { 920 #ifdef NETISR_LOCKING 921 struct rm_priotracker tracker; 922 #endif 923 struct netisr_workstream *nwsp; 924 u_int bits, prot; 925 926 nwsp = arg; 927 928 #ifdef DEVICE_POLLING 929 KASSERT(nws_count == 1, 930 ("%s: device_polling but nws_count != 1", __func__)); 931 netisr_poll(); 932 #endif 933 #ifdef NETISR_LOCKING 934 NETISR_RLOCK(&tracker); 935 #endif 936 NWS_LOCK(nwsp); 937 KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running")); 938 if (nwsp->nws_flags & NWS_DISPATCHING) 939 goto out; 940 nwsp->nws_flags |= NWS_RUNNING; 941 nwsp->nws_flags &= ~NWS_SCHEDULED; 942 while ((bits = nwsp->nws_pendingbits) != 0) { 943 while ((prot = ffs(bits)) != 0) { 944 prot--; 945 bits &= ~(1 << prot); 946 (void)netisr_process_workstream_proto(nwsp, prot); 947 } 948 } 949 nwsp->nws_flags &= ~NWS_RUNNING; 950 out: 951 NWS_UNLOCK(nwsp); 952 #ifdef NETISR_LOCKING 953 NETISR_RUNLOCK(&tracker); 954 #endif 955 #ifdef DEVICE_POLLING 956 netisr_pollmore(); 957 #endif 958 } 959 960 static int 961 netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto, 962 struct netisr_work *npwp, struct mbuf *m, int *dosignalp) 963 { 964 965 NWS_LOCK_ASSERT(nwsp); 966 967 *dosignalp = 0; 968 if (npwp->nw_len < npwp->nw_qlimit) { 969 m->m_nextpkt = NULL; 970 if (npwp->nw_head == NULL) { 971 npwp->nw_head = m; 972 npwp->nw_tail = m; 973 } else { 974 npwp->nw_tail->m_nextpkt = m; 975 npwp->nw_tail = m; 976 } 977 npwp->nw_len++; 978 if (npwp->nw_len > npwp->nw_watermark) 979 npwp->nw_watermark = npwp->nw_len; 980 981 /* 982 * We must set the bit regardless of NWS_RUNNING, so that 983 * swi_net() keeps calling netisr_process_workstream_proto(). 984 */ 985 nwsp->nws_pendingbits |= (1 << proto); 986 if (!(nwsp->nws_flags & 987 (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) { 988 nwsp->nws_flags |= NWS_SCHEDULED; 989 *dosignalp = 1; /* Defer until unlocked. */ 990 } 991 npwp->nw_queued++; 992 return (0); 993 } else { 994 m_freem(m); 995 npwp->nw_qdrops++; 996 return (ENOBUFS); 997 } 998 } 999 1000 static int 1001 netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid) 1002 { 1003 struct netisr_workstream *nwsp; 1004 struct netisr_work *npwp; 1005 int dosignal, error; 1006 1007 #ifdef NETISR_LOCKING 1008 NETISR_LOCK_ASSERT(); 1009 #endif 1010 KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__, 1011 cpuid, mp_maxid)); 1012 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 1013 1014 dosignal = 0; 1015 error = 0; 1016 nwsp = DPCPU_ID_PTR(cpuid, nws); 1017 npwp = &nwsp->nws_work[proto]; 1018 NWS_LOCK(nwsp); 1019 error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal); 1020 NWS_UNLOCK(nwsp); 1021 if (dosignal) 1022 NWS_SIGNAL(nwsp); 1023 return (error); 1024 } 1025 1026 int 1027 netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m) 1028 { 1029 #ifdef NETISR_LOCKING 1030 struct rm_priotracker tracker; 1031 #endif 1032 u_int cpuid; 1033 int error; 1034 1035 KASSERT(proto < NETISR_MAXPROT, 1036 ("%s: invalid proto %u", __func__, proto)); 1037 1038 #ifdef NETISR_LOCKING 1039 NETISR_RLOCK(&tracker); 1040 #endif 1041 KASSERT(netisr_proto[proto].np_handler != NULL, 1042 ("%s: invalid proto %u", __func__, proto)); 1043 1044 #ifdef VIMAGE 1045 if (V_netisr_enable[proto] == 0) { 1046 m_freem(m); 1047 return (ENOPROTOOPT); 1048 } 1049 #endif 1050 1051 m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_DEFERRED, 1052 source, m, &cpuid); 1053 if (m != NULL) { 1054 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, 1055 cpuid)); 1056 error = netisr_queue_internal(proto, m, cpuid); 1057 } else 1058 error = ENOBUFS; 1059 #ifdef NETISR_LOCKING 1060 NETISR_RUNLOCK(&tracker); 1061 #endif 1062 return (error); 1063 } 1064 1065 int 1066 netisr_queue(u_int proto, struct mbuf *m) 1067 { 1068 1069 return (netisr_queue_src(proto, 0, m)); 1070 } 1071 1072 /* 1073 * Dispatch a packet for netisr processing; direct dispatch is permitted by 1074 * calling context. 1075 */ 1076 int 1077 netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m) 1078 { 1079 #ifdef NETISR_LOCKING 1080 struct rm_priotracker tracker; 1081 #endif 1082 struct netisr_workstream *nwsp; 1083 struct netisr_proto *npp; 1084 struct netisr_work *npwp; 1085 int dosignal, error; 1086 u_int cpuid, dispatch_policy; 1087 1088 KASSERT(proto < NETISR_MAXPROT, 1089 ("%s: invalid proto %u", __func__, proto)); 1090 #ifdef NETISR_LOCKING 1091 NETISR_RLOCK(&tracker); 1092 #endif 1093 npp = &netisr_proto[proto]; 1094 KASSERT(npp->np_handler != NULL, ("%s: invalid proto %u", __func__, 1095 proto)); 1096 1097 #ifdef VIMAGE 1098 if (V_netisr_enable[proto] == 0) { 1099 m_freem(m); 1100 return (ENOPROTOOPT); 1101 } 1102 #endif 1103 1104 dispatch_policy = netisr_get_dispatch(npp); 1105 if (dispatch_policy == NETISR_DISPATCH_DEFERRED) 1106 return (netisr_queue_src(proto, source, m)); 1107 1108 /* 1109 * If direct dispatch is forced, then unconditionally dispatch 1110 * without a formal CPU selection. Borrow the current CPU's stats, 1111 * even if there's no worker on it. In this case we don't update 1112 * nws_flags because all netisr processing will be source ordered due 1113 * to always being forced to directly dispatch. 1114 */ 1115 if (dispatch_policy == NETISR_DISPATCH_DIRECT) { 1116 nwsp = DPCPU_PTR(nws); 1117 npwp = &nwsp->nws_work[proto]; 1118 npwp->nw_dispatched++; 1119 npwp->nw_handled++; 1120 netisr_proto[proto].np_handler(m); 1121 error = 0; 1122 goto out_unlock; 1123 } 1124 1125 KASSERT(dispatch_policy == NETISR_DISPATCH_HYBRID, 1126 ("%s: unknown dispatch policy (%u)", __func__, dispatch_policy)); 1127 1128 /* 1129 * Otherwise, we execute in a hybrid mode where we will try to direct 1130 * dispatch if we're on the right CPU and the netisr worker isn't 1131 * already running. 1132 */ 1133 sched_pin(); 1134 m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_HYBRID, 1135 source, m, &cpuid); 1136 if (m == NULL) { 1137 error = ENOBUFS; 1138 goto out_unpin; 1139 } 1140 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 1141 if (cpuid != curcpu) 1142 goto queue_fallback; 1143 nwsp = DPCPU_PTR(nws); 1144 npwp = &nwsp->nws_work[proto]; 1145 1146 /*- 1147 * We are willing to direct dispatch only if three conditions hold: 1148 * 1149 * (1) The netisr worker isn't already running, 1150 * (2) Another thread isn't already directly dispatching, and 1151 * (3) The netisr hasn't already been woken up. 1152 */ 1153 NWS_LOCK(nwsp); 1154 if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) { 1155 error = netisr_queue_workstream(nwsp, proto, npwp, m, 1156 &dosignal); 1157 NWS_UNLOCK(nwsp); 1158 if (dosignal) 1159 NWS_SIGNAL(nwsp); 1160 goto out_unpin; 1161 } 1162 1163 /* 1164 * The current thread is now effectively the netisr worker, so set 1165 * the dispatching flag to prevent concurrent processing of the 1166 * stream from another thread (even the netisr worker), which could 1167 * otherwise lead to effective misordering of the stream. 1168 */ 1169 nwsp->nws_flags |= NWS_DISPATCHING; 1170 NWS_UNLOCK(nwsp); 1171 netisr_proto[proto].np_handler(m); 1172 NWS_LOCK(nwsp); 1173 nwsp->nws_flags &= ~NWS_DISPATCHING; 1174 npwp->nw_handled++; 1175 npwp->nw_hybrid_dispatched++; 1176 1177 /* 1178 * If other work was enqueued by another thread while we were direct 1179 * dispatching, we need to signal the netisr worker to do that work. 1180 * In the future, we might want to do some of that work in the 1181 * current thread, rather than trigger further context switches. If 1182 * so, we'll want to establish a reasonable bound on the work done in 1183 * the "borrowed" context. 1184 */ 1185 if (nwsp->nws_pendingbits != 0) { 1186 nwsp->nws_flags |= NWS_SCHEDULED; 1187 dosignal = 1; 1188 } else 1189 dosignal = 0; 1190 NWS_UNLOCK(nwsp); 1191 if (dosignal) 1192 NWS_SIGNAL(nwsp); 1193 error = 0; 1194 goto out_unpin; 1195 1196 queue_fallback: 1197 error = netisr_queue_internal(proto, m, cpuid); 1198 out_unpin: 1199 sched_unpin(); 1200 out_unlock: 1201 #ifdef NETISR_LOCKING 1202 NETISR_RUNLOCK(&tracker); 1203 #endif 1204 return (error); 1205 } 1206 1207 int 1208 netisr_dispatch(u_int proto, struct mbuf *m) 1209 { 1210 1211 return (netisr_dispatch_src(proto, 0, m)); 1212 } 1213 1214 #ifdef DEVICE_POLLING 1215 /* 1216 * Kernel polling borrows a netisr thread to run interface polling in; this 1217 * function allows kernel polling to request that the netisr thread be 1218 * scheduled even if no packets are pending for protocols. 1219 */ 1220 void 1221 netisr_sched_poll(void) 1222 { 1223 struct netisr_workstream *nwsp; 1224 1225 nwsp = DPCPU_ID_PTR(nws_array[0], nws); 1226 NWS_SIGNAL(nwsp); 1227 } 1228 #endif 1229 1230 static void 1231 netisr_start_swi(u_int cpuid, struct pcpu *pc) 1232 { 1233 char swiname[12]; 1234 struct netisr_workstream *nwsp; 1235 int error; 1236 1237 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 1238 1239 nwsp = DPCPU_ID_PTR(cpuid, nws); 1240 mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF); 1241 nwsp->nws_cpu = cpuid; 1242 snprintf(swiname, sizeof(swiname), "netisr %u", cpuid); 1243 error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp, 1244 SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie); 1245 if (error) 1246 panic("%s: swi_add %d", __func__, error); 1247 pc->pc_netisr = nwsp->nws_intr_event; 1248 if (netisr_bindthreads) { 1249 error = intr_event_bind(nwsp->nws_intr_event, cpuid); 1250 if (error != 0) 1251 printf("%s: cpu %u: intr_event_bind: %d", __func__, 1252 cpuid, error); 1253 } 1254 NETISR_WLOCK(); 1255 nws_array[nws_count] = nwsp->nws_cpu; 1256 nws_count++; 1257 NETISR_WUNLOCK(); 1258 } 1259 1260 /* 1261 * Initialize the netisr subsystem. We rely on BSS and static initialization 1262 * of most fields in global data structures. 1263 * 1264 * Start a worker thread for the boot CPU so that we can support network 1265 * traffic immediately in case the network stack is used before additional 1266 * CPUs are started (for example, diskless boot). 1267 */ 1268 static void 1269 netisr_init(void *arg) 1270 { 1271 #ifdef EARLY_AP_STARTUP 1272 struct pcpu *pc; 1273 #endif 1274 1275 NETISR_LOCK_INIT(); 1276 if (netisr_maxthreads == 0 || netisr_maxthreads < -1 ) 1277 netisr_maxthreads = 1; /* default behavior */ 1278 else if (netisr_maxthreads == -1) 1279 netisr_maxthreads = mp_ncpus; /* use max cpus */ 1280 if (netisr_maxthreads > mp_ncpus) { 1281 printf("netisr_init: forcing maxthreads from %d to %d\n", 1282 netisr_maxthreads, mp_ncpus); 1283 netisr_maxthreads = mp_ncpus; 1284 } 1285 if (netisr_defaultqlimit > netisr_maxqlimit) { 1286 printf("netisr_init: forcing defaultqlimit from %d to %d\n", 1287 netisr_defaultqlimit, netisr_maxqlimit); 1288 netisr_defaultqlimit = netisr_maxqlimit; 1289 } 1290 #ifdef DEVICE_POLLING 1291 /* 1292 * The device polling code is not yet aware of how to deal with 1293 * multiple netisr threads, so for the time being compiling in device 1294 * polling disables parallel netisr workers. 1295 */ 1296 if (netisr_maxthreads != 1 || netisr_bindthreads != 0) { 1297 printf("netisr_init: forcing maxthreads to 1 and " 1298 "bindthreads to 0 for device polling\n"); 1299 netisr_maxthreads = 1; 1300 netisr_bindthreads = 0; 1301 } 1302 #endif 1303 1304 #ifdef EARLY_AP_STARTUP 1305 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 1306 if (nws_count >= netisr_maxthreads) 1307 break; 1308 netisr_start_swi(pc->pc_cpuid, pc); 1309 } 1310 #else 1311 netisr_start_swi(curcpu, pcpu_find(curcpu)); 1312 #endif 1313 } 1314 SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL); 1315 1316 #ifndef EARLY_AP_STARTUP 1317 /* 1318 * Start worker threads for additional CPUs. No attempt to gracefully handle 1319 * work reassignment, we don't yet support dynamic reconfiguration. 1320 */ 1321 static void 1322 netisr_start(void *arg) 1323 { 1324 struct pcpu *pc; 1325 1326 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 1327 if (nws_count >= netisr_maxthreads) 1328 break; 1329 /* Worker will already be present for boot CPU. */ 1330 if (pc->pc_netisr != NULL) 1331 continue; 1332 netisr_start_swi(pc->pc_cpuid, pc); 1333 } 1334 } 1335 SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL); 1336 #endif 1337 1338 /* 1339 * Sysctl monitoring for netisr: query a list of registered protocols. 1340 */ 1341 static int 1342 sysctl_netisr_proto(SYSCTL_HANDLER_ARGS) 1343 { 1344 struct rm_priotracker tracker; 1345 struct sysctl_netisr_proto *snpp, *snp_array; 1346 struct netisr_proto *npp; 1347 u_int counter, proto; 1348 int error; 1349 1350 if (req->newptr != NULL) 1351 return (EINVAL); 1352 snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP, 1353 M_ZERO | M_WAITOK); 1354 counter = 0; 1355 NETISR_RLOCK(&tracker); 1356 for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1357 npp = &netisr_proto[proto]; 1358 if (npp->np_name == NULL) 1359 continue; 1360 snpp = &snp_array[counter]; 1361 snpp->snp_version = sizeof(*snpp); 1362 strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN); 1363 snpp->snp_proto = proto; 1364 snpp->snp_qlimit = npp->np_qlimit; 1365 snpp->snp_policy = npp->np_policy; 1366 snpp->snp_dispatch = npp->np_dispatch; 1367 if (npp->np_m2flow != NULL) 1368 snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW; 1369 if (npp->np_m2cpuid != NULL) 1370 snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID; 1371 if (npp->np_drainedcpu != NULL) 1372 snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU; 1373 counter++; 1374 } 1375 NETISR_RUNLOCK(&tracker); 1376 KASSERT(counter <= NETISR_MAXPROT, 1377 ("sysctl_netisr_proto: counter too big (%d)", counter)); 1378 error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter); 1379 free(snp_array, M_TEMP); 1380 return (error); 1381 } 1382 1383 SYSCTL_PROC(_net_isr, OID_AUTO, proto, 1384 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto, 1385 "S,sysctl_netisr_proto", 1386 "Return list of protocols registered with netisr"); 1387 1388 /* 1389 * Sysctl monitoring for netisr: query a list of workstreams. 1390 */ 1391 static int 1392 sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS) 1393 { 1394 struct rm_priotracker tracker; 1395 struct sysctl_netisr_workstream *snwsp, *snws_array; 1396 struct netisr_workstream *nwsp; 1397 u_int counter, cpuid; 1398 int error; 1399 1400 if (req->newptr != NULL) 1401 return (EINVAL); 1402 snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP, 1403 M_ZERO | M_WAITOK); 1404 counter = 0; 1405 NETISR_RLOCK(&tracker); 1406 CPU_FOREACH(cpuid) { 1407 nwsp = DPCPU_ID_PTR(cpuid, nws); 1408 if (nwsp->nws_intr_event == NULL) 1409 continue; 1410 NWS_LOCK(nwsp); 1411 snwsp = &snws_array[counter]; 1412 snwsp->snws_version = sizeof(*snwsp); 1413 1414 /* 1415 * For now, we equate workstream IDs and CPU IDs in the 1416 * kernel, but expose them independently to userspace in case 1417 * that assumption changes in the future. 1418 */ 1419 snwsp->snws_wsid = cpuid; 1420 snwsp->snws_cpu = cpuid; 1421 if (nwsp->nws_intr_event != NULL) 1422 snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR; 1423 NWS_UNLOCK(nwsp); 1424 counter++; 1425 } 1426 NETISR_RUNLOCK(&tracker); 1427 KASSERT(counter <= MAXCPU, 1428 ("sysctl_netisr_workstream: counter too big (%d)", counter)); 1429 error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter); 1430 free(snws_array, M_TEMP); 1431 return (error); 1432 } 1433 1434 SYSCTL_PROC(_net_isr, OID_AUTO, workstream, 1435 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream, 1436 "S,sysctl_netisr_workstream", 1437 "Return list of workstreams implemented by netisr"); 1438 1439 /* 1440 * Sysctl monitoring for netisr: query per-protocol data across all 1441 * workstreams. 1442 */ 1443 static int 1444 sysctl_netisr_work(SYSCTL_HANDLER_ARGS) 1445 { 1446 struct rm_priotracker tracker; 1447 struct sysctl_netisr_work *snwp, *snw_array; 1448 struct netisr_workstream *nwsp; 1449 struct netisr_proto *npp; 1450 struct netisr_work *nwp; 1451 u_int counter, cpuid, proto; 1452 int error; 1453 1454 if (req->newptr != NULL) 1455 return (EINVAL); 1456 snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT, 1457 M_TEMP, M_ZERO | M_WAITOK); 1458 counter = 0; 1459 NETISR_RLOCK(&tracker); 1460 CPU_FOREACH(cpuid) { 1461 nwsp = DPCPU_ID_PTR(cpuid, nws); 1462 if (nwsp->nws_intr_event == NULL) 1463 continue; 1464 NWS_LOCK(nwsp); 1465 for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1466 npp = &netisr_proto[proto]; 1467 if (npp->np_name == NULL) 1468 continue; 1469 nwp = &nwsp->nws_work[proto]; 1470 snwp = &snw_array[counter]; 1471 snwp->snw_version = sizeof(*snwp); 1472 snwp->snw_wsid = cpuid; /* See comment above. */ 1473 snwp->snw_proto = proto; 1474 snwp->snw_len = nwp->nw_len; 1475 snwp->snw_watermark = nwp->nw_watermark; 1476 snwp->snw_dispatched = nwp->nw_dispatched; 1477 snwp->snw_hybrid_dispatched = 1478 nwp->nw_hybrid_dispatched; 1479 snwp->snw_qdrops = nwp->nw_qdrops; 1480 snwp->snw_queued = nwp->nw_queued; 1481 snwp->snw_handled = nwp->nw_handled; 1482 counter++; 1483 } 1484 NWS_UNLOCK(nwsp); 1485 } 1486 KASSERT(counter <= MAXCPU * NETISR_MAXPROT, 1487 ("sysctl_netisr_work: counter too big (%d)", counter)); 1488 NETISR_RUNLOCK(&tracker); 1489 error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter); 1490 free(snw_array, M_TEMP); 1491 return (error); 1492 } 1493 1494 SYSCTL_PROC(_net_isr, OID_AUTO, work, 1495 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work, 1496 "S,sysctl_netisr_work", 1497 "Return list of per-workstream, per-protocol work in netisr"); 1498 1499 #ifdef DDB 1500 DB_SHOW_COMMAND(netisr, db_show_netisr) 1501 { 1502 struct netisr_workstream *nwsp; 1503 struct netisr_work *nwp; 1504 int first, proto; 1505 u_int cpuid; 1506 1507 db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto", 1508 "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue"); 1509 CPU_FOREACH(cpuid) { 1510 nwsp = DPCPU_ID_PTR(cpuid, nws); 1511 if (nwsp->nws_intr_event == NULL) 1512 continue; 1513 first = 1; 1514 for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1515 if (netisr_proto[proto].np_handler == NULL) 1516 continue; 1517 nwp = &nwsp->nws_work[proto]; 1518 if (first) { 1519 db_printf("%3d ", cpuid); 1520 first = 0; 1521 } else 1522 db_printf("%3s ", ""); 1523 db_printf( 1524 "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n", 1525 netisr_proto[proto].np_name, nwp->nw_len, 1526 nwp->nw_watermark, nwp->nw_qlimit, 1527 nwp->nw_dispatched, nwp->nw_hybrid_dispatched, 1528 nwp->nw_qdrops, nwp->nw_queued); 1529 } 1530 } 1531 } 1532 #endif 1533