1 /*- 2 * Copyright (c) 2001-2002 Luigi Rizzo 3 * 4 * Supported by: the Xorp Project (www.xorp.org) 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_device_polling.h" 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/kthread.h> 37 #include <sys/proc.h> 38 #include <sys/resourcevar.h> 39 #include <sys/socket.h> /* needed by net/if.h */ 40 #include <sys/sockio.h> 41 #include <sys/sysctl.h> 42 #include <sys/syslog.h> 43 #include <sys/vimage.h> 44 45 #include <net/if.h> /* for IFF_* flags */ 46 #include <net/netisr.h> /* for NETISR_POLL */ 47 #include <net/vnet.h> 48 49 static void netisr_poll(void); /* the two netisr handlers */ 50 static void netisr_pollmore(void); 51 static int poll_switch(SYSCTL_HANDLER_ARGS); 52 53 void hardclock_device_poll(void); /* hook from hardclock */ 54 void ether_poll(int); /* polling in idle loop */ 55 56 static struct mtx poll_mtx; 57 58 /* 59 * Polling support for [network] device drivers. 60 * 61 * Drivers which support this feature can register with the 62 * polling code. 63 * 64 * If registration is successful, the driver must disable interrupts, 65 * and further I/O is performed through the handler, which is invoked 66 * (at least once per clock tick) with 3 arguments: the "arg" passed at 67 * register time (a struct ifnet pointer), a command, and a "count" limit. 68 * 69 * The command can be one of the following: 70 * POLL_ONLY: quick move of "count" packets from input/output queues. 71 * POLL_AND_CHECK_STATUS: as above, plus check status registers or do 72 * other more expensive operations. This command is issued periodically 73 * but less frequently than POLL_ONLY. 74 * 75 * The count limit specifies how much work the handler can do during the 76 * call -- typically this is the number of packets to be received, or 77 * transmitted, etc. (drivers are free to interpret this number, as long 78 * as the max time spent in the function grows roughly linearly with the 79 * count). 80 * 81 * Polling is enabled and disabled via setting IFCAP_POLLING flag on 82 * the interface. The driver ioctl handler should register interface 83 * with polling and disable interrupts, if registration was successful. 84 * 85 * A second variable controls the sharing of CPU between polling/kernel 86 * network processing, and other activities (typically userlevel tasks): 87 * kern.polling.user_frac (between 0 and 100, default 50) sets the share 88 * of CPU allocated to user tasks. CPU is allocated proportionally to the 89 * shares, by dynamically adjusting the "count" (poll_burst). 90 * 91 * Other parameters can should be left to their default values. 92 * The following constraints hold 93 * 94 * 1 <= poll_each_burst <= poll_burst <= poll_burst_max 95 * 0 <= poll_each_burst 96 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX 97 */ 98 99 #define MIN_POLL_BURST_MAX 10 100 #define MAX_POLL_BURST_MAX 1000 101 102 static uint32_t poll_burst = 5; 103 static uint32_t poll_burst_max = 150; /* good for 100Mbit net and HZ=1000 */ 104 static uint32_t poll_each_burst = 5; 105 106 SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0, 107 "Device polling parameters"); 108 109 SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RD, 110 &poll_burst, 0, "Current polling burst size"); 111 112 static int poll_burst_max_sysctl(SYSCTL_HANDLER_ARGS) 113 { 114 uint32_t val = poll_burst_max; 115 int error; 116 117 error = sysctl_handle_int(oidp, &val, 0, req); 118 if (error || !req->newptr ) 119 return (error); 120 if (val < MIN_POLL_BURST_MAX || val > MAX_POLL_BURST_MAX) 121 return (EINVAL); 122 123 mtx_lock(&poll_mtx); 124 poll_burst_max = val; 125 if (poll_burst > poll_burst_max) 126 poll_burst = poll_burst_max; 127 if (poll_each_burst > poll_burst_max) 128 poll_each_burst = MIN_POLL_BURST_MAX; 129 mtx_unlock(&poll_mtx); 130 131 return (0); 132 } 133 SYSCTL_PROC(_kern_polling, OID_AUTO, burst_max, CTLTYPE_UINT | CTLFLAG_RW, 134 0, sizeof(uint32_t), poll_burst_max_sysctl, "I", "Max Polling burst size"); 135 136 static int poll_each_burst_sysctl(SYSCTL_HANDLER_ARGS) 137 { 138 uint32_t val = poll_each_burst; 139 int error; 140 141 error = sysctl_handle_int(oidp, &val, 0, req); 142 if (error || !req->newptr ) 143 return (error); 144 if (val < 1) 145 return (EINVAL); 146 147 mtx_lock(&poll_mtx); 148 if (val > poll_burst_max) { 149 mtx_unlock(&poll_mtx); 150 return (EINVAL); 151 } 152 poll_each_burst = val; 153 mtx_unlock(&poll_mtx); 154 155 return (0); 156 } 157 SYSCTL_PROC(_kern_polling, OID_AUTO, each_burst, CTLTYPE_UINT | CTLFLAG_RW, 158 0, sizeof(uint32_t), poll_each_burst_sysctl, "I", 159 "Max size of each burst"); 160 161 static uint32_t poll_in_idle_loop=0; /* do we poll in idle loop ? */ 162 SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW, 163 &poll_in_idle_loop, 0, "Enable device polling in idle loop"); 164 165 static uint32_t user_frac = 50; 166 static int user_frac_sysctl(SYSCTL_HANDLER_ARGS) 167 { 168 uint32_t val = user_frac; 169 int error; 170 171 error = sysctl_handle_int(oidp, &val, 0, req); 172 if (error || !req->newptr ) 173 return (error); 174 if (val < 0 || val > 99) 175 return (EINVAL); 176 177 mtx_lock(&poll_mtx); 178 user_frac = val; 179 mtx_unlock(&poll_mtx); 180 181 return (0); 182 } 183 SYSCTL_PROC(_kern_polling, OID_AUTO, user_frac, CTLTYPE_UINT | CTLFLAG_RW, 184 0, sizeof(uint32_t), user_frac_sysctl, "I", 185 "Desired user fraction of cpu time"); 186 187 static uint32_t reg_frac_count = 0; 188 static uint32_t reg_frac = 20 ; 189 static int reg_frac_sysctl(SYSCTL_HANDLER_ARGS) 190 { 191 uint32_t val = reg_frac; 192 int error; 193 194 error = sysctl_handle_int(oidp, &val, 0, req); 195 if (error || !req->newptr ) 196 return (error); 197 if (val < 1 || val > hz) 198 return (EINVAL); 199 200 mtx_lock(&poll_mtx); 201 reg_frac = val; 202 if (reg_frac_count >= reg_frac) 203 reg_frac_count = 0; 204 mtx_unlock(&poll_mtx); 205 206 return (0); 207 } 208 SYSCTL_PROC(_kern_polling, OID_AUTO, reg_frac, CTLTYPE_UINT | CTLFLAG_RW, 209 0, sizeof(uint32_t), reg_frac_sysctl, "I", 210 "Every this many cycles check registers"); 211 212 static uint32_t short_ticks; 213 SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RD, 214 &short_ticks, 0, "Hardclock ticks shorter than they should be"); 215 216 static uint32_t lost_polls; 217 SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RD, 218 &lost_polls, 0, "How many times we would have lost a poll tick"); 219 220 static uint32_t pending_polls; 221 SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RD, 222 &pending_polls, 0, "Do we need to poll again"); 223 224 static int residual_burst = 0; 225 SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RD, 226 &residual_burst, 0, "# of residual cycles in burst"); 227 228 static uint32_t poll_handlers; /* next free entry in pr[]. */ 229 SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD, 230 &poll_handlers, 0, "Number of registered poll handlers"); 231 232 static int polling = 0; 233 SYSCTL_PROC(_kern_polling, OID_AUTO, enable, CTLTYPE_UINT | CTLFLAG_RW, 234 0, sizeof(int), poll_switch, "I", "Switch polling for all interfaces"); 235 236 static uint32_t phase; 237 SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RD, 238 &phase, 0, "Polling phase"); 239 240 static uint32_t suspect; 241 SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RD, 242 &suspect, 0, "suspect event"); 243 244 static uint32_t stalled; 245 SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RD, 246 &stalled, 0, "potential stalls"); 247 248 static uint32_t idlepoll_sleeping; /* idlepoll is sleeping */ 249 SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD, 250 &idlepoll_sleeping, 0, "idlepoll is sleeping"); 251 252 253 #define POLL_LIST_LEN 128 254 struct pollrec { 255 poll_handler_t *handler; 256 struct ifnet *ifp; 257 }; 258 259 static struct pollrec pr[POLL_LIST_LEN]; 260 261 static void 262 init_device_poll(void) 263 { 264 265 mtx_init(&poll_mtx, "polling", NULL, MTX_DEF); 266 netisr_register(NETISR_POLL, (netisr_t *)netisr_poll, NULL, 0); 267 netisr_register(NETISR_POLLMORE, (netisr_t *)netisr_pollmore, NULL, 0); 268 } 269 SYSINIT(device_poll, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, init_device_poll, NULL); 270 271 272 /* 273 * Hook from hardclock. Tries to schedule a netisr, but keeps track 274 * of lost ticks due to the previous handler taking too long. 275 * Normally, this should not happen, because polling handler should 276 * run for a short time. However, in some cases (e.g. when there are 277 * changes in link status etc.) the drivers take a very long time 278 * (even in the order of milliseconds) to reset and reconfigure the 279 * device, causing apparent lost polls. 280 * 281 * The first part of the code is just for debugging purposes, and tries 282 * to count how often hardclock ticks are shorter than they should, 283 * meaning either stray interrupts or delayed events. 284 */ 285 void 286 hardclock_device_poll(void) 287 { 288 static struct timeval prev_t, t; 289 int delta; 290 291 if (poll_handlers == 0) 292 return; 293 294 microuptime(&t); 295 delta = (t.tv_usec - prev_t.tv_usec) + 296 (t.tv_sec - prev_t.tv_sec)*1000000; 297 if (delta * hz < 500000) 298 short_ticks++; 299 else 300 prev_t = t; 301 302 if (pending_polls > 100) { 303 /* 304 * Too much, assume it has stalled (not always true 305 * see comment above). 306 */ 307 stalled++; 308 pending_polls = 0; 309 phase = 0; 310 } 311 312 if (phase <= 2) { 313 if (phase != 0) 314 suspect++; 315 phase = 1; 316 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE); 317 phase = 2; 318 } 319 if (pending_polls++ > 0) 320 lost_polls++; 321 } 322 323 /* 324 * ether_poll is called from the idle loop. 325 */ 326 void 327 ether_poll(int count) 328 { 329 int i; 330 331 mtx_lock(&poll_mtx); 332 333 if (count > poll_each_burst) 334 count = poll_each_burst; 335 336 for (i = 0 ; i < poll_handlers ; i++) 337 pr[i].handler(pr[i].ifp, POLL_ONLY, count); 338 339 mtx_unlock(&poll_mtx); 340 } 341 342 /* 343 * netisr_pollmore is called after other netisr's, possibly scheduling 344 * another NETISR_POLL call, or adapting the burst size for the next cycle. 345 * 346 * It is very bad to fetch large bursts of packets from a single card at once, 347 * because the burst could take a long time to be completely processed, or 348 * could saturate the intermediate queue (ipintrq or similar) leading to 349 * losses or unfairness. To reduce the problem, and also to account better for 350 * time spent in network-related processing, we split the burst in smaller 351 * chunks of fixed size, giving control to the other netisr's between chunks. 352 * This helps in improving the fairness, reducing livelock (because we 353 * emulate more closely the "process to completion" that we have with 354 * fastforwarding) and accounting for the work performed in low level 355 * handling and forwarding. 356 */ 357 358 static struct timeval poll_start_t; 359 360 void 361 netisr_pollmore() 362 { 363 struct timeval t; 364 int kern_load; 365 366 mtx_lock(&poll_mtx); 367 phase = 5; 368 if (residual_burst > 0) { 369 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE); 370 mtx_unlock(&poll_mtx); 371 /* will run immediately on return, followed by netisrs */ 372 return; 373 } 374 /* here we can account time spent in netisr's in this tick */ 375 microuptime(&t); 376 kern_load = (t.tv_usec - poll_start_t.tv_usec) + 377 (t.tv_sec - poll_start_t.tv_sec)*1000000; /* us */ 378 kern_load = (kern_load * hz) / 10000; /* 0..100 */ 379 if (kern_load > (100 - user_frac)) { /* try decrease ticks */ 380 if (poll_burst > 1) 381 poll_burst--; 382 } else { 383 if (poll_burst < poll_burst_max) 384 poll_burst++; 385 } 386 387 pending_polls--; 388 if (pending_polls == 0) /* we are done */ 389 phase = 0; 390 else { 391 /* 392 * Last cycle was long and caused us to miss one or more 393 * hardclock ticks. Restart processing again, but slightly 394 * reduce the burst size to prevent that this happens again. 395 */ 396 poll_burst -= (poll_burst / 8); 397 if (poll_burst < 1) 398 poll_burst = 1; 399 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE); 400 phase = 6; 401 } 402 mtx_unlock(&poll_mtx); 403 } 404 405 /* 406 * netisr_poll is scheduled by schednetisr when appropriate, typically once 407 * per tick. 408 */ 409 static void 410 netisr_poll(void) 411 { 412 int i, cycles; 413 enum poll_cmd arg = POLL_ONLY; 414 415 mtx_lock(&poll_mtx); 416 phase = 3; 417 if (residual_burst == 0) { /* first call in this tick */ 418 microuptime(&poll_start_t); 419 if (++reg_frac_count == reg_frac) { 420 arg = POLL_AND_CHECK_STATUS; 421 reg_frac_count = 0; 422 } 423 424 residual_burst = poll_burst; 425 } 426 cycles = (residual_burst < poll_each_burst) ? 427 residual_burst : poll_each_burst; 428 residual_burst -= cycles; 429 430 for (i = 0 ; i < poll_handlers ; i++) 431 pr[i].handler(pr[i].ifp, arg, cycles); 432 433 phase = 4; 434 mtx_unlock(&poll_mtx); 435 } 436 437 /* 438 * Try to register routine for polling. Returns 0 if successful 439 * (and polling should be enabled), error code otherwise. 440 * A device is not supposed to register itself multiple times. 441 * 442 * This is called from within the *_ioctl() functions. 443 */ 444 int 445 ether_poll_register(poll_handler_t *h, struct ifnet *ifp) 446 { 447 int i; 448 449 KASSERT(h != NULL, ("%s: handler is NULL", __func__)); 450 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__)); 451 452 mtx_lock(&poll_mtx); 453 if (poll_handlers >= POLL_LIST_LEN) { 454 /* 455 * List full, cannot register more entries. 456 * This should never happen; if it does, it is probably a 457 * broken driver trying to register multiple times. Checking 458 * this at runtime is expensive, and won't solve the problem 459 * anyways, so just report a few times and then give up. 460 */ 461 static int verbose = 10 ; 462 if (verbose >0) { 463 log(LOG_ERR, "poll handlers list full, " 464 "maybe a broken driver ?\n"); 465 verbose--; 466 } 467 mtx_unlock(&poll_mtx); 468 return (ENOMEM); /* no polling for you */ 469 } 470 471 for (i = 0 ; i < poll_handlers ; i++) 472 if (pr[i].ifp == ifp && pr[i].handler != NULL) { 473 mtx_unlock(&poll_mtx); 474 log(LOG_DEBUG, "ether_poll_register: %s: handler" 475 " already registered\n", ifp->if_xname); 476 return (EEXIST); 477 } 478 479 pr[poll_handlers].handler = h; 480 pr[poll_handlers].ifp = ifp; 481 poll_handlers++; 482 mtx_unlock(&poll_mtx); 483 if (idlepoll_sleeping) 484 wakeup(&idlepoll_sleeping); 485 return (0); 486 } 487 488 /* 489 * Remove interface from the polling list. Called from *_ioctl(), too. 490 */ 491 int 492 ether_poll_deregister(struct ifnet *ifp) 493 { 494 int i; 495 496 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__)); 497 498 mtx_lock(&poll_mtx); 499 500 for (i = 0 ; i < poll_handlers ; i++) 501 if (pr[i].ifp == ifp) /* found it */ 502 break; 503 if (i == poll_handlers) { 504 log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n", 505 ifp->if_xname); 506 mtx_unlock(&poll_mtx); 507 return (ENOENT); 508 } 509 poll_handlers--; 510 if (i < poll_handlers) { /* Last entry replaces this one. */ 511 pr[i].handler = pr[poll_handlers].handler; 512 pr[i].ifp = pr[poll_handlers].ifp; 513 } 514 mtx_unlock(&poll_mtx); 515 return (0); 516 } 517 518 /* 519 * Legacy interface for turning polling on all interfaces at one time. 520 */ 521 static int 522 poll_switch(SYSCTL_HANDLER_ARGS) 523 { 524 INIT_VNET_NET(curvnet); 525 struct ifnet *ifp; 526 int error; 527 int val = polling; 528 529 error = sysctl_handle_int(oidp, &val, 0, req); 530 if (error || !req->newptr ) 531 return (error); 532 533 if (val == polling) 534 return (0); 535 536 if (val < 0 || val > 1) 537 return (EINVAL); 538 539 polling = val; 540 541 IFNET_RLOCK(); 542 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 543 if (ifp->if_capabilities & IFCAP_POLLING) { 544 struct ifreq ifr; 545 546 if (val == 1) 547 ifr.ifr_reqcap = 548 ifp->if_capenable | IFCAP_POLLING; 549 else 550 ifr.ifr_reqcap = 551 ifp->if_capenable & ~IFCAP_POLLING; 552 IFF_LOCKGIANT(ifp); /* LOR here */ 553 (void) (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr); 554 IFF_UNLOCKGIANT(ifp); 555 } 556 } 557 IFNET_RUNLOCK(); 558 559 log(LOG_ERR, "kern.polling.enable is deprecated. Use ifconfig(8)"); 560 561 return (0); 562 } 563 564 static void 565 poll_idle(void) 566 { 567 struct thread *td = curthread; 568 struct rtprio rtp; 569 570 rtp.prio = RTP_PRIO_MAX; /* lowest priority */ 571 rtp.type = RTP_PRIO_IDLE; 572 PROC_SLOCK(td->td_proc); 573 rtp_to_pri(&rtp, td); 574 PROC_SUNLOCK(td->td_proc); 575 576 for (;;) { 577 if (poll_in_idle_loop && poll_handlers > 0) { 578 idlepoll_sleeping = 0; 579 ether_poll(poll_each_burst); 580 thread_lock(td); 581 mi_switch(SW_VOL, NULL); 582 thread_unlock(td); 583 } else { 584 idlepoll_sleeping = 1; 585 tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3); 586 } 587 } 588 } 589 590 static struct proc *idlepoll; 591 static struct kproc_desc idlepoll_kp = { 592 "idlepoll", 593 poll_idle, 594 &idlepoll 595 }; 596 SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start, 597 &idlepoll_kp); 598