1 /*- 2 * Copyright (c) 2007-2009 Robert N. M. Watson 3 * Copyright (c) 2010-2011 Juniper Networks, Inc. 4 * All rights reserved. 5 * 6 * This software was developed by Robert N. M. Watson under contract 7 * to Juniper Networks, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * netisr is a packet dispatch service, allowing synchronous (directly 36 * dispatched) and asynchronous (deferred dispatch) processing of packets by 37 * registered protocol handlers. Callers pass a protocol identifier and 38 * packet to netisr, along with a direct dispatch hint, and work will either 39 * be immediately processed by the registered handler, or passed to a 40 * software interrupt (SWI) thread for deferred dispatch. Callers will 41 * generally select one or the other based on: 42 * 43 * - Whether directly dispatching a netisr handler lead to code reentrance or 44 * lock recursion, such as entering the socket code from the socket code. 45 * - Whether directly dispatching a netisr handler lead to recursive 46 * processing, such as when decapsulating several wrapped layers of tunnel 47 * information (IPSEC within IPSEC within ...). 48 * 49 * Maintaining ordering for protocol streams is a critical design concern. 50 * Enforcing ordering limits the opportunity for concurrency, but maintains 51 * the strong ordering requirements found in some protocols, such as TCP. Of 52 * related concern is CPU affinity--it is desirable to process all data 53 * associated with a particular stream on the same CPU over time in order to 54 * avoid acquiring locks associated with the connection on different CPUs, 55 * keep connection data in one cache, and to generally encourage associated 56 * user threads to live on the same CPU as the stream. It's also desirable 57 * to avoid lock migration and contention where locks are associated with 58 * more than one flow. 59 * 60 * netisr supports several policy variations, represented by the 61 * NETISR_POLICY_* constants, allowing protocols to play various roles in 62 * identifying flows, assigning work to CPUs, etc. These are described in 63 * netisr.h. 64 */ 65 66 #include "opt_ddb.h" 67 #include "opt_device_polling.h" 68 69 #include <sys/param.h> 70 #include <sys/bus.h> 71 #include <sys/kernel.h> 72 #include <sys/kthread.h> 73 #include <sys/interrupt.h> 74 #include <sys/lock.h> 75 #include <sys/mbuf.h> 76 #include <sys/mutex.h> 77 #include <sys/pcpu.h> 78 #include <sys/proc.h> 79 #include <sys/rmlock.h> 80 #include <sys/sched.h> 81 #include <sys/smp.h> 82 #include <sys/socket.h> 83 #include <sys/sysctl.h> 84 #include <sys/systm.h> 85 86 #ifdef DDB 87 #include <ddb/ddb.h> 88 #endif 89 90 #define _WANT_NETISR_INTERNAL /* Enable definitions from netisr_internal.h */ 91 #include <net/if.h> 92 #include <net/if_var.h> 93 #include <net/netisr.h> 94 #include <net/netisr_internal.h> 95 #include <net/vnet.h> 96 97 /*- 98 * Synchronize use and modification of the registered netisr data structures; 99 * acquire a read lock while modifying the set of registered protocols to 100 * prevent partially registered or unregistered protocols from being run. 101 * 102 * The following data structures and fields are protected by this lock: 103 * 104 * - The netisr_proto array, including all fields of struct netisr_proto. 105 * - The nws array, including all fields of struct netisr_worker. 106 * - The nws_array array. 107 * 108 * Note: the NETISR_LOCKING define controls whether read locks are acquired 109 * in packet processing paths requiring netisr registration stability. This 110 * is disabled by default as it can lead to measurable performance 111 * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and 112 * because netisr registration and unregistration is extremely rare at 113 * runtime. If it becomes more common, this decision should be revisited. 114 * 115 * XXXRW: rmlocks don't support assertions. 116 */ 117 static struct rmlock netisr_rmlock; 118 #define NETISR_LOCK_INIT() rm_init_flags(&netisr_rmlock, "netisr", \ 119 RM_NOWITNESS) 120 #define NETISR_LOCK_ASSERT() 121 #define NETISR_RLOCK(tracker) rm_rlock(&netisr_rmlock, (tracker)) 122 #define NETISR_RUNLOCK(tracker) rm_runlock(&netisr_rmlock, (tracker)) 123 #define NETISR_WLOCK() rm_wlock(&netisr_rmlock) 124 #define NETISR_WUNLOCK() rm_wunlock(&netisr_rmlock) 125 /* #define NETISR_LOCKING */ 126 127 static SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr"); 128 129 #ifdef DEVICE_POLLING 130 static int netisr_polling = 0; /* Enable Polling. */ 131 TUNABLE_INT("net.isr.polling_enable", &netisr_polling); 132 SYSCTL_INT(_net_isr, OID_AUTO, polling_enable, CTLFLAG_RW, 133 &netisr_polling, 0, "Enable polling"); 134 #endif 135 136 /*- 137 * Three global direct dispatch policies are supported: 138 * 139 * NETISR_DISPATCH_DEFERRED: All work is deferred for a netisr, regardless of 140 * context (may be overriden by protocols). 141 * 142 * NETISR_DISPATCH_HYBRID: If the executing context allows direct dispatch, 143 * and we're running on the CPU the work would be performed on, then direct 144 * dispatch it if it wouldn't violate ordering constraints on the workstream. 145 * 146 * NETISR_DISPATCH_DIRECT: If the executing context allows direct dispatch, 147 * always direct dispatch. (The default.) 148 * 149 * Notice that changing the global policy could lead to short periods of 150 * misordered processing, but this is considered acceptable as compared to 151 * the complexity of enforcing ordering during policy changes. Protocols can 152 * override the global policy (when they're not doing that, they select 153 * NETISR_DISPATCH_DEFAULT). 154 */ 155 #define NETISR_DISPATCH_POLICY_DEFAULT NETISR_DISPATCH_DIRECT 156 #define NETISR_DISPATCH_POLICY_MAXSTR 20 /* Used for temporary buffers. */ 157 static u_int netisr_dispatch_policy = NETISR_DISPATCH_POLICY_DEFAULT; 158 static int sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS); 159 SYSCTL_PROC(_net_isr, OID_AUTO, dispatch, CTLTYPE_STRING | CTLFLAG_RWTUN, 160 0, 0, sysctl_netisr_dispatch_policy, "A", 161 "netisr dispatch policy"); 162 163 /* 164 * Allow the administrator to limit the number of threads (CPUs) to use for 165 * netisr. We don't check netisr_maxthreads before creating the thread for 166 * CPU 0, so in practice we ignore values <= 1. This must be set at boot. 167 * We will create at most one thread per CPU. 168 */ 169 static int netisr_maxthreads = -1; /* Max number of threads. */ 170 SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN, 171 &netisr_maxthreads, 0, 172 "Use at most this many CPUs for netisr processing"); 173 174 static int netisr_bindthreads = 0; /* Bind threads to CPUs. */ 175 SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN, 176 &netisr_bindthreads, 0, "Bind netisr threads to CPUs."); 177 178 /* 179 * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit, 180 * both for initial configuration and later modification using 181 * netisr_setqlimit(). 182 */ 183 #define NETISR_DEFAULT_MAXQLIMIT 10240 184 static u_int netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT; 185 SYSCTL_UINT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN, 186 &netisr_maxqlimit, 0, 187 "Maximum netisr per-protocol, per-CPU queue depth."); 188 189 /* 190 * The default per-workstream mbuf queue limit for protocols that don't 191 * initialize the nh_qlimit field of their struct netisr_handler. If this is 192 * set above netisr_maxqlimit, we truncate it to the maximum during boot. 193 */ 194 #define NETISR_DEFAULT_DEFAULTQLIMIT 256 195 static u_int netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT; 196 SYSCTL_UINT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN, 197 &netisr_defaultqlimit, 0, 198 "Default netisr per-protocol, per-CPU queue limit if not set by protocol"); 199 200 /* 201 * Store and export the compile-time constant NETISR_MAXPROT limit on the 202 * number of protocols that can register with netisr at a time. This is 203 * required for crashdump analysis, as it sizes netisr_proto[]. 204 */ 205 static u_int netisr_maxprot = NETISR_MAXPROT; 206 SYSCTL_UINT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD, 207 &netisr_maxprot, 0, 208 "Compile-time limit on the number of protocols supported by netisr."); 209 210 /* 211 * The netisr_proto array describes all registered protocols, indexed by 212 * protocol number. See netisr_internal.h for more details. 213 */ 214 static struct netisr_proto netisr_proto[NETISR_MAXPROT]; 215 216 /* 217 * Per-CPU workstream data. See netisr_internal.h for more details. 218 */ 219 DPCPU_DEFINE(struct netisr_workstream, nws); 220 221 /* 222 * Map contiguous values between 0 and nws_count into CPU IDs appropriate for 223 * accessing workstreams. This allows constructions of the form 224 * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws). 225 */ 226 static u_int nws_array[MAXCPU]; 227 228 /* 229 * Number of registered workstreams. Will be at most the number of running 230 * CPUs once fully started. 231 */ 232 static u_int nws_count; 233 SYSCTL_UINT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD, 234 &nws_count, 0, "Number of extant netisr threads."); 235 236 /* 237 * Synchronization for each workstream: a mutex protects all mutable fields 238 * in each stream, including per-protocol state (mbuf queues). The SWI is 239 * woken up if asynchronous dispatch is required. 240 */ 241 #define NWS_LOCK(s) mtx_lock(&(s)->nws_mtx) 242 #define NWS_LOCK_ASSERT(s) mtx_assert(&(s)->nws_mtx, MA_OWNED) 243 #define NWS_UNLOCK(s) mtx_unlock(&(s)->nws_mtx) 244 #define NWS_SIGNAL(s) swi_sched((s)->nws_swi_cookie, 0) 245 246 /* 247 * Utility routines for protocols that implement their own mapping of flows 248 * to CPUs. 249 */ 250 u_int 251 netisr_get_cpucount(void) 252 { 253 254 return (nws_count); 255 } 256 257 u_int 258 netisr_get_cpuid(u_int cpunumber) 259 { 260 261 KASSERT(cpunumber < nws_count, ("%s: %u > %u", __func__, cpunumber, 262 nws_count)); 263 264 return (nws_array[cpunumber]); 265 } 266 267 /* 268 * The default implementation of flow -> CPU ID mapping. 269 * 270 * Non-static so that protocols can use it to map their own work to specific 271 * CPUs in a manner consistent to netisr for affinity purposes. 272 */ 273 u_int 274 netisr_default_flow2cpu(u_int flowid) 275 { 276 277 return (nws_array[flowid % nws_count]); 278 } 279 280 /* 281 * Dispatch tunable and sysctl configuration. 282 */ 283 struct netisr_dispatch_table_entry { 284 u_int ndte_policy; 285 const char *ndte_policy_str; 286 }; 287 static const struct netisr_dispatch_table_entry netisr_dispatch_table[] = { 288 { NETISR_DISPATCH_DEFAULT, "default" }, 289 { NETISR_DISPATCH_DEFERRED, "deferred" }, 290 { NETISR_DISPATCH_HYBRID, "hybrid" }, 291 { NETISR_DISPATCH_DIRECT, "direct" }, 292 }; 293 static const u_int netisr_dispatch_table_len = 294 (sizeof(netisr_dispatch_table) / sizeof(netisr_dispatch_table[0])); 295 296 static void 297 netisr_dispatch_policy_to_str(u_int dispatch_policy, char *buffer, 298 u_int buflen) 299 { 300 const struct netisr_dispatch_table_entry *ndtep; 301 const char *str; 302 u_int i; 303 304 str = "unknown"; 305 for (i = 0; i < netisr_dispatch_table_len; i++) { 306 ndtep = &netisr_dispatch_table[i]; 307 if (ndtep->ndte_policy == dispatch_policy) { 308 str = ndtep->ndte_policy_str; 309 break; 310 } 311 } 312 snprintf(buffer, buflen, "%s", str); 313 } 314 315 static int 316 netisr_dispatch_policy_from_str(const char *str, u_int *dispatch_policyp) 317 { 318 const struct netisr_dispatch_table_entry *ndtep; 319 u_int i; 320 321 for (i = 0; i < netisr_dispatch_table_len; i++) { 322 ndtep = &netisr_dispatch_table[i]; 323 if (strcmp(ndtep->ndte_policy_str, str) == 0) { 324 *dispatch_policyp = ndtep->ndte_policy; 325 return (0); 326 } 327 } 328 return (EINVAL); 329 } 330 331 static int 332 sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS) 333 { 334 char tmp[NETISR_DISPATCH_POLICY_MAXSTR]; 335 u_int dispatch_policy; 336 int error; 337 338 netisr_dispatch_policy_to_str(netisr_dispatch_policy, tmp, 339 sizeof(tmp)); 340 error = sysctl_handle_string(oidp, tmp, sizeof(tmp), req); 341 if (error == 0 && req->newptr != NULL) { 342 error = netisr_dispatch_policy_from_str(tmp, 343 &dispatch_policy); 344 if (error == 0 && dispatch_policy == NETISR_DISPATCH_DEFAULT) 345 error = EINVAL; 346 if (error == 0) 347 netisr_dispatch_policy = dispatch_policy; 348 } 349 return (error); 350 } 351 352 /* 353 * Register a new netisr handler, which requires initializing per-protocol 354 * fields for each workstream. All netisr work is briefly suspended while 355 * the protocol is installed. 356 */ 357 void 358 netisr_register(const struct netisr_handler *nhp) 359 { 360 struct netisr_work *npwp; 361 const char *name; 362 u_int i, proto; 363 364 proto = nhp->nh_proto; 365 name = nhp->nh_name; 366 367 /* 368 * Test that the requested registration is valid. 369 */ 370 KASSERT(nhp->nh_name != NULL, 371 ("%s: nh_name NULL for %u", __func__, proto)); 372 KASSERT(nhp->nh_handler != NULL, 373 ("%s: nh_handler NULL for %s", __func__, name)); 374 KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE || 375 nhp->nh_policy == NETISR_POLICY_FLOW || 376 nhp->nh_policy == NETISR_POLICY_CPU, 377 ("%s: unsupported nh_policy %u for %s", __func__, 378 nhp->nh_policy, name)); 379 KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW || 380 nhp->nh_m2flow == NULL, 381 ("%s: nh_policy != FLOW but m2flow defined for %s", __func__, 382 name)); 383 KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL, 384 ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__, 385 name)); 386 KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL, 387 ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__, 388 name)); 389 KASSERT(nhp->nh_dispatch == NETISR_DISPATCH_DEFAULT || 390 nhp->nh_dispatch == NETISR_DISPATCH_DEFERRED || 391 nhp->nh_dispatch == NETISR_DISPATCH_HYBRID || 392 nhp->nh_dispatch == NETISR_DISPATCH_DIRECT, 393 ("%s: invalid nh_dispatch (%u)", __func__, nhp->nh_dispatch)); 394 395 KASSERT(proto < NETISR_MAXPROT, 396 ("%s(%u, %s): protocol too big", __func__, proto, name)); 397 398 /* 399 * Test that no existing registration exists for this protocol. 400 */ 401 NETISR_WLOCK(); 402 KASSERT(netisr_proto[proto].np_name == NULL, 403 ("%s(%u, %s): name present", __func__, proto, name)); 404 KASSERT(netisr_proto[proto].np_handler == NULL, 405 ("%s(%u, %s): handler present", __func__, proto, name)); 406 407 netisr_proto[proto].np_name = name; 408 netisr_proto[proto].np_handler = nhp->nh_handler; 409 netisr_proto[proto].np_m2flow = nhp->nh_m2flow; 410 netisr_proto[proto].np_m2cpuid = nhp->nh_m2cpuid; 411 netisr_proto[proto].np_drainedcpu = nhp->nh_drainedcpu; 412 if (nhp->nh_qlimit == 0) 413 netisr_proto[proto].np_qlimit = netisr_defaultqlimit; 414 else if (nhp->nh_qlimit > netisr_maxqlimit) { 415 printf("%s: %s requested queue limit %u capped to " 416 "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit, 417 netisr_maxqlimit); 418 netisr_proto[proto].np_qlimit = netisr_maxqlimit; 419 } else 420 netisr_proto[proto].np_qlimit = nhp->nh_qlimit; 421 netisr_proto[proto].np_policy = nhp->nh_policy; 422 netisr_proto[proto].np_dispatch = nhp->nh_dispatch; 423 CPU_FOREACH(i) { 424 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 425 bzero(npwp, sizeof(*npwp)); 426 npwp->nw_qlimit = netisr_proto[proto].np_qlimit; 427 } 428 NETISR_WUNLOCK(); 429 } 430 431 /* 432 * Clear drop counters across all workstreams for a protocol. 433 */ 434 void 435 netisr_clearqdrops(const struct netisr_handler *nhp) 436 { 437 struct netisr_work *npwp; 438 #ifdef INVARIANTS 439 const char *name; 440 #endif 441 u_int i, proto; 442 443 proto = nhp->nh_proto; 444 #ifdef INVARIANTS 445 name = nhp->nh_name; 446 #endif 447 KASSERT(proto < NETISR_MAXPROT, 448 ("%s(%u): protocol too big for %s", __func__, proto, name)); 449 450 NETISR_WLOCK(); 451 KASSERT(netisr_proto[proto].np_handler != NULL, 452 ("%s(%u): protocol not registered for %s", __func__, proto, 453 name)); 454 455 CPU_FOREACH(i) { 456 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 457 npwp->nw_qdrops = 0; 458 } 459 NETISR_WUNLOCK(); 460 } 461 462 /* 463 * Query current drop counters across all workstreams for a protocol. 464 */ 465 void 466 netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp) 467 { 468 struct netisr_work *npwp; 469 struct rm_priotracker tracker; 470 #ifdef INVARIANTS 471 const char *name; 472 #endif 473 u_int i, proto; 474 475 *qdropp = 0; 476 proto = nhp->nh_proto; 477 #ifdef INVARIANTS 478 name = nhp->nh_name; 479 #endif 480 KASSERT(proto < NETISR_MAXPROT, 481 ("%s(%u): protocol too big for %s", __func__, proto, name)); 482 483 NETISR_RLOCK(&tracker); 484 KASSERT(netisr_proto[proto].np_handler != NULL, 485 ("%s(%u): protocol not registered for %s", __func__, proto, 486 name)); 487 488 CPU_FOREACH(i) { 489 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 490 *qdropp += npwp->nw_qdrops; 491 } 492 NETISR_RUNLOCK(&tracker); 493 } 494 495 /* 496 * Query current per-workstream queue limit for a protocol. 497 */ 498 void 499 netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp) 500 { 501 struct rm_priotracker tracker; 502 #ifdef INVARIANTS 503 const char *name; 504 #endif 505 u_int proto; 506 507 proto = nhp->nh_proto; 508 #ifdef INVARIANTS 509 name = nhp->nh_name; 510 #endif 511 KASSERT(proto < NETISR_MAXPROT, 512 ("%s(%u): protocol too big for %s", __func__, proto, name)); 513 514 NETISR_RLOCK(&tracker); 515 KASSERT(netisr_proto[proto].np_handler != NULL, 516 ("%s(%u): protocol not registered for %s", __func__, proto, 517 name)); 518 *qlimitp = netisr_proto[proto].np_qlimit; 519 NETISR_RUNLOCK(&tracker); 520 } 521 522 /* 523 * Update the queue limit across per-workstream queues for a protocol. We 524 * simply change the limits, and don't drain overflowed packets as they will 525 * (hopefully) take care of themselves shortly. 526 */ 527 int 528 netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit) 529 { 530 struct netisr_work *npwp; 531 #ifdef INVARIANTS 532 const char *name; 533 #endif 534 u_int i, proto; 535 536 if (qlimit > netisr_maxqlimit) 537 return (EINVAL); 538 539 proto = nhp->nh_proto; 540 #ifdef INVARIANTS 541 name = nhp->nh_name; 542 #endif 543 KASSERT(proto < NETISR_MAXPROT, 544 ("%s(%u): protocol too big for %s", __func__, proto, name)); 545 546 NETISR_WLOCK(); 547 KASSERT(netisr_proto[proto].np_handler != NULL, 548 ("%s(%u): protocol not registered for %s", __func__, proto, 549 name)); 550 551 netisr_proto[proto].np_qlimit = qlimit; 552 CPU_FOREACH(i) { 553 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 554 npwp->nw_qlimit = qlimit; 555 } 556 NETISR_WUNLOCK(); 557 return (0); 558 } 559 560 /* 561 * Drain all packets currently held in a particular protocol work queue. 562 */ 563 static void 564 netisr_drain_proto(struct netisr_work *npwp) 565 { 566 struct mbuf *m; 567 568 /* 569 * We would assert the lock on the workstream but it's not passed in. 570 */ 571 while ((m = npwp->nw_head) != NULL) { 572 npwp->nw_head = m->m_nextpkt; 573 m->m_nextpkt = NULL; 574 if (npwp->nw_head == NULL) 575 npwp->nw_tail = NULL; 576 npwp->nw_len--; 577 m_freem(m); 578 } 579 KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__)); 580 KASSERT(npwp->nw_len == 0, ("%s: len", __func__)); 581 } 582 583 /* 584 * Remove the registration of a network protocol, which requires clearing 585 * per-protocol fields across all workstreams, including freeing all mbufs in 586 * the queues at time of unregister. All work in netisr is briefly suspended 587 * while this takes place. 588 */ 589 void 590 netisr_unregister(const struct netisr_handler *nhp) 591 { 592 struct netisr_work *npwp; 593 #ifdef INVARIANTS 594 const char *name; 595 #endif 596 u_int i, proto; 597 598 proto = nhp->nh_proto; 599 #ifdef INVARIANTS 600 name = nhp->nh_name; 601 #endif 602 KASSERT(proto < NETISR_MAXPROT, 603 ("%s(%u): protocol too big for %s", __func__, proto, name)); 604 605 NETISR_WLOCK(); 606 KASSERT(netisr_proto[proto].np_handler != NULL, 607 ("%s(%u): protocol not registered for %s", __func__, proto, 608 name)); 609 610 netisr_proto[proto].np_name = NULL; 611 netisr_proto[proto].np_handler = NULL; 612 netisr_proto[proto].np_m2flow = NULL; 613 netisr_proto[proto].np_m2cpuid = NULL; 614 netisr_proto[proto].np_qlimit = 0; 615 netisr_proto[proto].np_policy = 0; 616 CPU_FOREACH(i) { 617 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; 618 netisr_drain_proto(npwp); 619 bzero(npwp, sizeof(*npwp)); 620 } 621 NETISR_WUNLOCK(); 622 } 623 624 /* 625 * Compose the global and per-protocol policies on dispatch, and return the 626 * dispatch policy to use. 627 */ 628 static u_int 629 netisr_get_dispatch(struct netisr_proto *npp) 630 { 631 632 /* 633 * Protocol-specific configuration overrides the global default. 634 */ 635 if (npp->np_dispatch != NETISR_DISPATCH_DEFAULT) 636 return (npp->np_dispatch); 637 return (netisr_dispatch_policy); 638 } 639 640 /* 641 * Look up the workstream given a packet and source identifier. Do this by 642 * checking the protocol's policy, and optionally call out to the protocol 643 * for assistance if required. 644 */ 645 static struct mbuf * 646 netisr_select_cpuid(struct netisr_proto *npp, u_int dispatch_policy, 647 uintptr_t source, struct mbuf *m, u_int *cpuidp) 648 { 649 struct ifnet *ifp; 650 u_int policy; 651 652 NETISR_LOCK_ASSERT(); 653 654 /* 655 * In the event we have only one worker, shortcut and deliver to it 656 * without further ado. 657 */ 658 if (nws_count == 1) { 659 *cpuidp = nws_array[0]; 660 return (m); 661 } 662 663 /* 664 * What happens next depends on the policy selected by the protocol. 665 * If we want to support per-interface policies, we should do that 666 * here first. 667 */ 668 policy = npp->np_policy; 669 if (policy == NETISR_POLICY_CPU) { 670 m = npp->np_m2cpuid(m, source, cpuidp); 671 if (m == NULL) 672 return (NULL); 673 674 /* 675 * It's possible for a protocol not to have a good idea about 676 * where to process a packet, in which case we fall back on 677 * the netisr code to decide. In the hybrid case, return the 678 * current CPU ID, which will force an immediate direct 679 * dispatch. In the queued case, fall back on the SOURCE 680 * policy. 681 */ 682 if (*cpuidp != NETISR_CPUID_NONE) 683 return (m); 684 if (dispatch_policy == NETISR_DISPATCH_HYBRID) { 685 *cpuidp = curcpu; 686 return (m); 687 } 688 policy = NETISR_POLICY_SOURCE; 689 } 690 691 if (policy == NETISR_POLICY_FLOW) { 692 if (M_HASHTYPE_GET(m) == M_HASHTYPE_NONE && 693 npp->np_m2flow != NULL) { 694 m = npp->np_m2flow(m, source); 695 if (m == NULL) 696 return (NULL); 697 } 698 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 699 *cpuidp = 700 netisr_default_flow2cpu(m->m_pkthdr.flowid); 701 return (m); 702 } 703 policy = NETISR_POLICY_SOURCE; 704 } 705 706 KASSERT(policy == NETISR_POLICY_SOURCE, 707 ("%s: invalid policy %u for %s", __func__, npp->np_policy, 708 npp->np_name)); 709 710 ifp = m->m_pkthdr.rcvif; 711 if (ifp != NULL) 712 *cpuidp = nws_array[(ifp->if_index + source) % nws_count]; 713 else 714 *cpuidp = nws_array[source % nws_count]; 715 return (m); 716 } 717 718 /* 719 * Process packets associated with a workstream and protocol. For reasons of 720 * fairness, we process up to one complete netisr queue at a time, moving the 721 * queue to a stack-local queue for processing, but do not loop refreshing 722 * from the global queue. The caller is responsible for deciding whether to 723 * loop, and for setting the NWS_RUNNING flag. The passed workstream will be 724 * locked on entry and relocked before return, but will be released while 725 * processing. The number of packets processed is returned. 726 */ 727 static u_int 728 netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto) 729 { 730 struct netisr_work local_npw, *npwp; 731 u_int handled; 732 struct mbuf *m; 733 734 NETISR_LOCK_ASSERT(); 735 NWS_LOCK_ASSERT(nwsp); 736 737 KASSERT(nwsp->nws_flags & NWS_RUNNING, 738 ("%s(%u): not running", __func__, proto)); 739 KASSERT(proto >= 0 && proto < NETISR_MAXPROT, 740 ("%s(%u): invalid proto\n", __func__, proto)); 741 742 npwp = &nwsp->nws_work[proto]; 743 if (npwp->nw_len == 0) 744 return (0); 745 746 /* 747 * Move the global work queue to a thread-local work queue. 748 * 749 * Notice that this means the effective maximum length of the queue 750 * is actually twice that of the maximum queue length specified in 751 * the protocol registration call. 752 */ 753 handled = npwp->nw_len; 754 local_npw = *npwp; 755 npwp->nw_head = NULL; 756 npwp->nw_tail = NULL; 757 npwp->nw_len = 0; 758 nwsp->nws_pendingbits &= ~(1 << proto); 759 NWS_UNLOCK(nwsp); 760 while ((m = local_npw.nw_head) != NULL) { 761 local_npw.nw_head = m->m_nextpkt; 762 m->m_nextpkt = NULL; 763 if (local_npw.nw_head == NULL) 764 local_npw.nw_tail = NULL; 765 local_npw.nw_len--; 766 VNET_ASSERT(m->m_pkthdr.rcvif != NULL, 767 ("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m)); 768 CURVNET_SET(m->m_pkthdr.rcvif->if_vnet); 769 netisr_proto[proto].np_handler(m); 770 CURVNET_RESTORE(); 771 } 772 KASSERT(local_npw.nw_len == 0, 773 ("%s(%u): len %u", __func__, proto, local_npw.nw_len)); 774 if (netisr_proto[proto].np_drainedcpu) 775 netisr_proto[proto].np_drainedcpu(nwsp->nws_cpu); 776 NWS_LOCK(nwsp); 777 npwp->nw_handled += handled; 778 return (handled); 779 } 780 781 /* 782 * SWI handler for netisr -- processes packets in a set of workstreams that 783 * it owns, woken up by calls to NWS_SIGNAL(). If this workstream is already 784 * being direct dispatched, go back to sleep and wait for the dispatching 785 * thread to wake us up again. 786 */ 787 static void 788 swi_net(void *arg) 789 { 790 #ifdef NETISR_LOCKING 791 struct rm_priotracker tracker; 792 #endif 793 struct netisr_workstream *nwsp; 794 u_int bits, prot; 795 796 nwsp = arg; 797 798 #ifdef DEVICE_POLLING 799 if (netisr_polling) { 800 KASSERT(nws_count == 1, 801 ("%s: device_polling but nws_count != 1", __func__)); 802 netisr_poll(); 803 } 804 #endif 805 #ifdef NETISR_LOCKING 806 NETISR_RLOCK(&tracker); 807 #endif 808 NWS_LOCK(nwsp); 809 KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running")); 810 if (nwsp->nws_flags & NWS_DISPATCHING) 811 goto out; 812 nwsp->nws_flags |= NWS_RUNNING; 813 nwsp->nws_flags &= ~NWS_SCHEDULED; 814 while ((bits = nwsp->nws_pendingbits) != 0) { 815 while ((prot = ffs(bits)) != 0) { 816 prot--; 817 bits &= ~(1 << prot); 818 (void)netisr_process_workstream_proto(nwsp, prot); 819 } 820 } 821 nwsp->nws_flags &= ~NWS_RUNNING; 822 out: 823 NWS_UNLOCK(nwsp); 824 #ifdef NETISR_LOCKING 825 NETISR_RUNLOCK(&tracker); 826 #endif 827 #ifdef DEVICE_POLLING 828 if (netisr_polling) 829 netisr_pollmore(); 830 #endif 831 } 832 833 static int 834 netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto, 835 struct netisr_work *npwp, struct mbuf *m, int *dosignalp) 836 { 837 838 NWS_LOCK_ASSERT(nwsp); 839 840 *dosignalp = 0; 841 if (npwp->nw_len < npwp->nw_qlimit) { 842 m->m_nextpkt = NULL; 843 if (npwp->nw_head == NULL) { 844 npwp->nw_head = m; 845 npwp->nw_tail = m; 846 } else { 847 npwp->nw_tail->m_nextpkt = m; 848 npwp->nw_tail = m; 849 } 850 npwp->nw_len++; 851 if (npwp->nw_len > npwp->nw_watermark) 852 npwp->nw_watermark = npwp->nw_len; 853 854 /* 855 * We must set the bit regardless of NWS_RUNNING, so that 856 * swi_net() keeps calling netisr_process_workstream_proto(). 857 */ 858 nwsp->nws_pendingbits |= (1 << proto); 859 if (!(nwsp->nws_flags & 860 (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) { 861 nwsp->nws_flags |= NWS_SCHEDULED; 862 *dosignalp = 1; /* Defer until unlocked. */ 863 } 864 npwp->nw_queued++; 865 return (0); 866 } else { 867 m_freem(m); 868 npwp->nw_qdrops++; 869 return (ENOBUFS); 870 } 871 } 872 873 static int 874 netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid) 875 { 876 struct netisr_workstream *nwsp; 877 struct netisr_work *npwp; 878 int dosignal, error; 879 880 #ifdef NETISR_LOCKING 881 NETISR_LOCK_ASSERT(); 882 #endif 883 KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__, 884 cpuid, mp_maxid)); 885 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 886 887 dosignal = 0; 888 error = 0; 889 nwsp = DPCPU_ID_PTR(cpuid, nws); 890 npwp = &nwsp->nws_work[proto]; 891 NWS_LOCK(nwsp); 892 error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal); 893 NWS_UNLOCK(nwsp); 894 if (dosignal) 895 NWS_SIGNAL(nwsp); 896 return (error); 897 } 898 899 int 900 netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m) 901 { 902 #ifdef NETISR_LOCKING 903 struct rm_priotracker tracker; 904 #endif 905 u_int cpuid; 906 int error; 907 908 KASSERT(proto < NETISR_MAXPROT, 909 ("%s: invalid proto %u", __func__, proto)); 910 911 #ifdef NETISR_LOCKING 912 NETISR_RLOCK(&tracker); 913 #endif 914 KASSERT(netisr_proto[proto].np_handler != NULL, 915 ("%s: invalid proto %u", __func__, proto)); 916 917 m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_DEFERRED, 918 source, m, &cpuid); 919 if (m != NULL) { 920 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, 921 cpuid)); 922 error = netisr_queue_internal(proto, m, cpuid); 923 } else 924 error = ENOBUFS; 925 #ifdef NETISR_LOCKING 926 NETISR_RUNLOCK(&tracker); 927 #endif 928 return (error); 929 } 930 931 int 932 netisr_queue(u_int proto, struct mbuf *m) 933 { 934 935 return (netisr_queue_src(proto, 0, m)); 936 } 937 938 /* 939 * Dispatch a packet for netisr processing; direct dispatch is permitted by 940 * calling context. 941 */ 942 int 943 netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m) 944 { 945 #ifdef NETISR_LOCKING 946 struct rm_priotracker tracker; 947 #endif 948 struct netisr_workstream *nwsp; 949 struct netisr_proto *npp; 950 struct netisr_work *npwp; 951 int dosignal, error; 952 u_int cpuid, dispatch_policy; 953 954 KASSERT(proto < NETISR_MAXPROT, 955 ("%s: invalid proto %u", __func__, proto)); 956 #ifdef NETISR_LOCKING 957 NETISR_RLOCK(&tracker); 958 #endif 959 npp = &netisr_proto[proto]; 960 KASSERT(npp->np_handler != NULL, ("%s: invalid proto %u", __func__, 961 proto)); 962 963 dispatch_policy = netisr_get_dispatch(npp); 964 if (dispatch_policy == NETISR_DISPATCH_DEFERRED) 965 return (netisr_queue_src(proto, source, m)); 966 967 /* 968 * If direct dispatch is forced, then unconditionally dispatch 969 * without a formal CPU selection. Borrow the current CPU's stats, 970 * even if there's no worker on it. In this case we don't update 971 * nws_flags because all netisr processing will be source ordered due 972 * to always being forced to directly dispatch. 973 */ 974 if (dispatch_policy == NETISR_DISPATCH_DIRECT) { 975 nwsp = DPCPU_PTR(nws); 976 npwp = &nwsp->nws_work[proto]; 977 npwp->nw_dispatched++; 978 npwp->nw_handled++; 979 netisr_proto[proto].np_handler(m); 980 error = 0; 981 goto out_unlock; 982 } 983 984 KASSERT(dispatch_policy == NETISR_DISPATCH_HYBRID, 985 ("%s: unknown dispatch policy (%u)", __func__, dispatch_policy)); 986 987 /* 988 * Otherwise, we execute in a hybrid mode where we will try to direct 989 * dispatch if we're on the right CPU and the netisr worker isn't 990 * already running. 991 */ 992 sched_pin(); 993 m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_HYBRID, 994 source, m, &cpuid); 995 if (m == NULL) { 996 error = ENOBUFS; 997 goto out_unpin; 998 } 999 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 1000 if (cpuid != curcpu) 1001 goto queue_fallback; 1002 nwsp = DPCPU_PTR(nws); 1003 npwp = &nwsp->nws_work[proto]; 1004 1005 /*- 1006 * We are willing to direct dispatch only if three conditions hold: 1007 * 1008 * (1) The netisr worker isn't already running, 1009 * (2) Another thread isn't already directly dispatching, and 1010 * (3) The netisr hasn't already been woken up. 1011 */ 1012 NWS_LOCK(nwsp); 1013 if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) { 1014 error = netisr_queue_workstream(nwsp, proto, npwp, m, 1015 &dosignal); 1016 NWS_UNLOCK(nwsp); 1017 if (dosignal) 1018 NWS_SIGNAL(nwsp); 1019 goto out_unpin; 1020 } 1021 1022 /* 1023 * The current thread is now effectively the netisr worker, so set 1024 * the dispatching flag to prevent concurrent processing of the 1025 * stream from another thread (even the netisr worker), which could 1026 * otherwise lead to effective misordering of the stream. 1027 */ 1028 nwsp->nws_flags |= NWS_DISPATCHING; 1029 NWS_UNLOCK(nwsp); 1030 netisr_proto[proto].np_handler(m); 1031 NWS_LOCK(nwsp); 1032 nwsp->nws_flags &= ~NWS_DISPATCHING; 1033 npwp->nw_handled++; 1034 npwp->nw_hybrid_dispatched++; 1035 1036 /* 1037 * If other work was enqueued by another thread while we were direct 1038 * dispatching, we need to signal the netisr worker to do that work. 1039 * In the future, we might want to do some of that work in the 1040 * current thread, rather than trigger further context switches. If 1041 * so, we'll want to establish a reasonable bound on the work done in 1042 * the "borrowed" context. 1043 */ 1044 if (nwsp->nws_pendingbits != 0) { 1045 nwsp->nws_flags |= NWS_SCHEDULED; 1046 dosignal = 1; 1047 } else 1048 dosignal = 0; 1049 NWS_UNLOCK(nwsp); 1050 if (dosignal) 1051 NWS_SIGNAL(nwsp); 1052 error = 0; 1053 goto out_unpin; 1054 1055 queue_fallback: 1056 error = netisr_queue_internal(proto, m, cpuid); 1057 out_unpin: 1058 sched_unpin(); 1059 out_unlock: 1060 #ifdef NETISR_LOCKING 1061 NETISR_RUNLOCK(&tracker); 1062 #endif 1063 return (error); 1064 } 1065 1066 int 1067 netisr_dispatch(u_int proto, struct mbuf *m) 1068 { 1069 1070 return (netisr_dispatch_src(proto, 0, m)); 1071 } 1072 1073 #ifdef DEVICE_POLLING 1074 /* 1075 * Kernel polling borrows a netisr thread to run interface polling in; this 1076 * function allows kernel polling to request that the netisr thread be 1077 * scheduled even if no packets are pending for protocols. 1078 */ 1079 void 1080 netisr_sched_poll(void) 1081 { 1082 struct netisr_workstream *nwsp; 1083 1084 if (!netisr_polling) 1085 return; 1086 1087 nwsp = DPCPU_ID_PTR(nws_array[0], nws); 1088 NWS_SIGNAL(nwsp); 1089 } 1090 #endif 1091 1092 static void 1093 netisr_start_swi(u_int cpuid, struct pcpu *pc) 1094 { 1095 char swiname[12]; 1096 struct netisr_workstream *nwsp; 1097 int error; 1098 1099 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); 1100 1101 nwsp = DPCPU_ID_PTR(cpuid, nws); 1102 mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF); 1103 nwsp->nws_cpu = cpuid; 1104 snprintf(swiname, sizeof(swiname), "netisr %u", cpuid); 1105 error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp, 1106 SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie); 1107 if (error) 1108 panic("%s: swi_add %d", __func__, error); 1109 pc->pc_netisr = nwsp->nws_intr_event; 1110 if (netisr_bindthreads) { 1111 error = intr_event_bind(nwsp->nws_intr_event, cpuid); 1112 if (error != 0) 1113 printf("%s: cpu %u: intr_event_bind: %d", __func__, 1114 cpuid, error); 1115 } 1116 NETISR_WLOCK(); 1117 nws_array[nws_count] = nwsp->nws_cpu; 1118 nws_count++; 1119 NETISR_WUNLOCK(); 1120 } 1121 1122 /* 1123 * Initialize the netisr subsystem. We rely on BSS and static initialization 1124 * of most fields in global data structures. 1125 * 1126 * Start a worker thread for the boot CPU so that we can support network 1127 * traffic immediately in case the network stack is used before additional 1128 * CPUs are started (for example, diskless boot). 1129 */ 1130 static void 1131 netisr_init(void *arg) 1132 { 1133 KASSERT(curcpu == 0, ("%s: not on CPU 0", __func__)); 1134 1135 NETISR_LOCK_INIT(); 1136 if (netisr_maxthreads < 1) 1137 netisr_maxthreads = 1; 1138 if (netisr_maxthreads > mp_ncpus) { 1139 printf("netisr_init: forcing maxthreads from %d to %d\n", 1140 netisr_maxthreads, mp_ncpus); 1141 netisr_maxthreads = mp_ncpus; 1142 } 1143 if (netisr_defaultqlimit > netisr_maxqlimit) { 1144 printf("netisr_init: forcing defaultqlimit from %d to %d\n", 1145 netisr_defaultqlimit, netisr_maxqlimit); 1146 netisr_defaultqlimit = netisr_maxqlimit; 1147 } 1148 #ifdef DEVICE_POLLING 1149 /* 1150 * The device polling code is not yet aware of how to deal with 1151 * multiple netisr threads, so for the time being compiling in device 1152 * polling disables parallel netisr workers. 1153 */ 1154 if (netisr_polling && (netisr_maxthreads != 1 || netisr_bindthreads != 0)) { 1155 printf("netisr_init: forcing maxthreads to 1 and " 1156 "bindthreads to 0 for device polling\n"); 1157 netisr_maxthreads = 1; 1158 netisr_bindthreads = 0; 1159 } 1160 #endif 1161 netisr_start_swi(curcpu, pcpu_find(curcpu)); 1162 } 1163 SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL); 1164 1165 /* 1166 * Start worker threads for additional CPUs. No attempt to gracefully handle 1167 * work reassignment, we don't yet support dynamic reconfiguration. 1168 */ 1169 static void 1170 netisr_start(void *arg) 1171 { 1172 struct pcpu *pc; 1173 1174 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 1175 if (nws_count >= netisr_maxthreads) 1176 break; 1177 /* XXXRW: Is skipping absent CPUs still required here? */ 1178 if (CPU_ABSENT(pc->pc_cpuid)) 1179 continue; 1180 /* Worker will already be present for boot CPU. */ 1181 if (pc->pc_netisr != NULL) 1182 continue; 1183 netisr_start_swi(pc->pc_cpuid, pc); 1184 } 1185 } 1186 SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL); 1187 1188 /* 1189 * Sysctl monitoring for netisr: query a list of registered protocols. 1190 */ 1191 static int 1192 sysctl_netisr_proto(SYSCTL_HANDLER_ARGS) 1193 { 1194 struct rm_priotracker tracker; 1195 struct sysctl_netisr_proto *snpp, *snp_array; 1196 struct netisr_proto *npp; 1197 u_int counter, proto; 1198 int error; 1199 1200 if (req->newptr != NULL) 1201 return (EINVAL); 1202 snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP, 1203 M_ZERO | M_WAITOK); 1204 counter = 0; 1205 NETISR_RLOCK(&tracker); 1206 for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1207 npp = &netisr_proto[proto]; 1208 if (npp->np_name == NULL) 1209 continue; 1210 snpp = &snp_array[counter]; 1211 snpp->snp_version = sizeof(*snpp); 1212 strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN); 1213 snpp->snp_proto = proto; 1214 snpp->snp_qlimit = npp->np_qlimit; 1215 snpp->snp_policy = npp->np_policy; 1216 snpp->snp_dispatch = npp->np_dispatch; 1217 if (npp->np_m2flow != NULL) 1218 snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW; 1219 if (npp->np_m2cpuid != NULL) 1220 snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID; 1221 if (npp->np_drainedcpu != NULL) 1222 snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU; 1223 counter++; 1224 } 1225 NETISR_RUNLOCK(&tracker); 1226 KASSERT(counter <= NETISR_MAXPROT, 1227 ("sysctl_netisr_proto: counter too big (%d)", counter)); 1228 error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter); 1229 free(snp_array, M_TEMP); 1230 return (error); 1231 } 1232 1233 SYSCTL_PROC(_net_isr, OID_AUTO, proto, 1234 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto, 1235 "S,sysctl_netisr_proto", 1236 "Return list of protocols registered with netisr"); 1237 1238 /* 1239 * Sysctl monitoring for netisr: query a list of workstreams. 1240 */ 1241 static int 1242 sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS) 1243 { 1244 struct rm_priotracker tracker; 1245 struct sysctl_netisr_workstream *snwsp, *snws_array; 1246 struct netisr_workstream *nwsp; 1247 u_int counter, cpuid; 1248 int error; 1249 1250 if (req->newptr != NULL) 1251 return (EINVAL); 1252 snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP, 1253 M_ZERO | M_WAITOK); 1254 counter = 0; 1255 NETISR_RLOCK(&tracker); 1256 CPU_FOREACH(cpuid) { 1257 nwsp = DPCPU_ID_PTR(cpuid, nws); 1258 if (nwsp->nws_intr_event == NULL) 1259 continue; 1260 NWS_LOCK(nwsp); 1261 snwsp = &snws_array[counter]; 1262 snwsp->snws_version = sizeof(*snwsp); 1263 1264 /* 1265 * For now, we equate workstream IDs and CPU IDs in the 1266 * kernel, but expose them independently to userspace in case 1267 * that assumption changes in the future. 1268 */ 1269 snwsp->snws_wsid = cpuid; 1270 snwsp->snws_cpu = cpuid; 1271 if (nwsp->nws_intr_event != NULL) 1272 snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR; 1273 NWS_UNLOCK(nwsp); 1274 counter++; 1275 } 1276 NETISR_RUNLOCK(&tracker); 1277 KASSERT(counter <= MAXCPU, 1278 ("sysctl_netisr_workstream: counter too big (%d)", counter)); 1279 error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter); 1280 free(snws_array, M_TEMP); 1281 return (error); 1282 } 1283 1284 SYSCTL_PROC(_net_isr, OID_AUTO, workstream, 1285 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream, 1286 "S,sysctl_netisr_workstream", 1287 "Return list of workstreams implemented by netisr"); 1288 1289 /* 1290 * Sysctl monitoring for netisr: query per-protocol data across all 1291 * workstreams. 1292 */ 1293 static int 1294 sysctl_netisr_work(SYSCTL_HANDLER_ARGS) 1295 { 1296 struct rm_priotracker tracker; 1297 struct sysctl_netisr_work *snwp, *snw_array; 1298 struct netisr_workstream *nwsp; 1299 struct netisr_proto *npp; 1300 struct netisr_work *nwp; 1301 u_int counter, cpuid, proto; 1302 int error; 1303 1304 if (req->newptr != NULL) 1305 return (EINVAL); 1306 snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT, 1307 M_TEMP, M_ZERO | M_WAITOK); 1308 counter = 0; 1309 NETISR_RLOCK(&tracker); 1310 CPU_FOREACH(cpuid) { 1311 nwsp = DPCPU_ID_PTR(cpuid, nws); 1312 if (nwsp->nws_intr_event == NULL) 1313 continue; 1314 NWS_LOCK(nwsp); 1315 for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1316 npp = &netisr_proto[proto]; 1317 if (npp->np_name == NULL) 1318 continue; 1319 nwp = &nwsp->nws_work[proto]; 1320 snwp = &snw_array[counter]; 1321 snwp->snw_version = sizeof(*snwp); 1322 snwp->snw_wsid = cpuid; /* See comment above. */ 1323 snwp->snw_proto = proto; 1324 snwp->snw_len = nwp->nw_len; 1325 snwp->snw_watermark = nwp->nw_watermark; 1326 snwp->snw_dispatched = nwp->nw_dispatched; 1327 snwp->snw_hybrid_dispatched = 1328 nwp->nw_hybrid_dispatched; 1329 snwp->snw_qdrops = nwp->nw_qdrops; 1330 snwp->snw_queued = nwp->nw_queued; 1331 snwp->snw_handled = nwp->nw_handled; 1332 counter++; 1333 } 1334 NWS_UNLOCK(nwsp); 1335 } 1336 KASSERT(counter <= MAXCPU * NETISR_MAXPROT, 1337 ("sysctl_netisr_work: counter too big (%d)", counter)); 1338 NETISR_RUNLOCK(&tracker); 1339 error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter); 1340 free(snw_array, M_TEMP); 1341 return (error); 1342 } 1343 1344 SYSCTL_PROC(_net_isr, OID_AUTO, work, 1345 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work, 1346 "S,sysctl_netisr_work", 1347 "Return list of per-workstream, per-protocol work in netisr"); 1348 1349 #ifdef DDB 1350 DB_SHOW_COMMAND(netisr, db_show_netisr) 1351 { 1352 struct netisr_workstream *nwsp; 1353 struct netisr_work *nwp; 1354 int first, proto; 1355 u_int cpuid; 1356 1357 db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto", 1358 "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue"); 1359 CPU_FOREACH(cpuid) { 1360 nwsp = DPCPU_ID_PTR(cpuid, nws); 1361 if (nwsp->nws_intr_event == NULL) 1362 continue; 1363 first = 1; 1364 for (proto = 0; proto < NETISR_MAXPROT; proto++) { 1365 if (netisr_proto[proto].np_handler == NULL) 1366 continue; 1367 nwp = &nwsp->nws_work[proto]; 1368 if (first) { 1369 db_printf("%3d ", cpuid); 1370 first = 0; 1371 } else 1372 db_printf("%3s ", ""); 1373 db_printf( 1374 "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n", 1375 netisr_proto[proto].np_name, nwp->nw_len, 1376 nwp->nw_watermark, nwp->nw_qlimit, 1377 nwp->nw_dispatched, nwp->nw_hybrid_dispatched, 1378 nwp->nw_qdrops, nwp->nw_queued); 1379 } 1380 } 1381 } 1382 #endif 1383