1 /*- 2 * Copyright (c) 2001-2002 Luigi Rizzo 3 * 4 * Supported by: the Xorp Project (www.xorp.org) 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/socket.h> /* needed by net/if.h */ 35 #include <sys/sysctl.h> 36 37 #include <net/if.h> /* for IFF_* flags */ 38 #include <net/netisr.h> /* for NETISR_POLL */ 39 40 #include <sys/proc.h> 41 #include <sys/resourcevar.h> 42 #include <sys/kthread.h> 43 44 static void netisr_poll(void); /* the two netisr handlers */ 45 static void netisr_pollmore(void); 46 47 void hardclock_device_poll(void); /* hook from hardclock */ 48 void ether_poll(int); /* polling while in trap */ 49 50 /* 51 * Polling support for [network] device drivers. 52 * 53 * Drivers which support this feature try to register with the 54 * polling code. 55 * 56 * If registration is successful, the driver must disable interrupts, 57 * and further I/O is performed through the handler, which is invoked 58 * (at least once per clock tick) with 3 arguments: the "arg" passed at 59 * register time (a struct ifnet pointer), a command, and a "count" limit. 60 * 61 * The command can be one of the following: 62 * POLL_ONLY: quick move of "count" packets from input/output queues. 63 * POLL_AND_CHECK_STATUS: as above, plus check status registers or do 64 * other more expensive operations. This command is issued periodically 65 * but less frequently than POLL_ONLY. 66 * POLL_DEREGISTER: deregister and return to interrupt mode. 67 * 68 * The first two commands are only issued if the interface is marked as 69 * 'IFF_UP and IFF_RUNNING', the last one only if IFF_RUNNING is set. 70 * 71 * The count limit specifies how much work the handler can do during the 72 * call -- typically this is the number of packets to be received, or 73 * transmitted, etc. (drivers are free to interpret this number, as long 74 * as the max time spent in the function grows roughly linearly with the 75 * count). 76 * 77 * Deregistration can be requested by the driver itself (typically in the 78 * *_stop() routine), or by the polling code, by invoking the handler. 79 * 80 * Polling can be globally enabled or disabled with the sysctl variable 81 * kern.polling.enable (default is 0, disabled) 82 * 83 * A second variable controls the sharing of CPU between polling/kernel 84 * network processing, and other activities (typically userlevel tasks): 85 * kern.polling.user_frac (between 0 and 100, default 50) sets the share 86 * of CPU allocated to user tasks. CPU is allocated proportionally to the 87 * shares, by dynamically adjusting the "count" (poll_burst). 88 * 89 * Other parameters can should be left to their default values. 90 * The following constraints hold 91 * 92 * 1 <= poll_each_burst <= poll_burst <= poll_burst_max 93 * 0 <= poll_in_trap <= poll_each_burst 94 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX 95 */ 96 97 #define MIN_POLL_BURST_MAX 10 98 #define MAX_POLL_BURST_MAX 1000 99 100 SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0, 101 "Device polling parameters"); 102 103 static u_int32_t poll_burst = 5; 104 SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RW, 105 &poll_burst, 0, "Current polling burst size"); 106 107 static u_int32_t poll_each_burst = 5; 108 SYSCTL_UINT(_kern_polling, OID_AUTO, each_burst, CTLFLAG_RW, 109 &poll_each_burst, 0, "Max size of each burst"); 110 111 static u_int32_t poll_burst_max = 150; /* good for 100Mbit net and HZ=1000 */ 112 SYSCTL_UINT(_kern_polling, OID_AUTO, burst_max, CTLFLAG_RW, 113 &poll_burst_max, 0, "Max Polling burst size"); 114 115 static u_int32_t poll_in_idle_loop=0; /* do we poll in idle loop ? */ 116 SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW, 117 &poll_in_idle_loop, 0, "Enable device polling in idle loop"); 118 119 u_int32_t poll_in_trap; /* used in trap.c */ 120 SYSCTL_UINT(_kern_polling, OID_AUTO, poll_in_trap, CTLFLAG_RW, 121 &poll_in_trap, 0, "Poll burst size during a trap"); 122 123 static u_int32_t user_frac = 50; 124 SYSCTL_UINT(_kern_polling, OID_AUTO, user_frac, CTLFLAG_RW, 125 &user_frac, 0, "Desired user fraction of cpu time"); 126 127 static u_int32_t reg_frac = 20 ; 128 SYSCTL_UINT(_kern_polling, OID_AUTO, reg_frac, CTLFLAG_RW, 129 ®_frac, 0, "Every this many cycles poll register"); 130 131 static u_int32_t short_ticks; 132 SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RW, 133 &short_ticks, 0, "Hardclock ticks shorter than they should be"); 134 135 static u_int32_t lost_polls; 136 SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RW, 137 &lost_polls, 0, "How many times we would have lost a poll tick"); 138 139 static u_int32_t pending_polls; 140 SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RW, 141 &pending_polls, 0, "Do we need to poll again"); 142 143 static int residual_burst = 0; 144 SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RW, 145 &residual_burst, 0, "# of residual cycles in burst"); 146 147 static u_int32_t poll_handlers; /* next free entry in pr[]. */ 148 SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD, 149 &poll_handlers, 0, "Number of registered poll handlers"); 150 151 static int polling = 0; /* global polling enable */ 152 SYSCTL_UINT(_kern_polling, OID_AUTO, enable, CTLFLAG_RW, 153 &polling, 0, "Polling enabled"); 154 155 static u_int32_t phase; 156 SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RW, 157 &phase, 0, "Polling phase"); 158 159 static u_int32_t suspect; 160 SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RW, 161 &suspect, 0, "suspect event"); 162 163 static u_int32_t stalled; 164 SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RW, 165 &stalled, 0, "potential stalls"); 166 167 static u_int32_t idlepoll_sleeping; /* idlepoll is sleeping */ 168 SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD, 169 &idlepoll_sleeping, 0, "idlepoll is sleeping"); 170 171 172 #define POLL_LIST_LEN 128 173 struct pollrec { 174 poll_handler_t *handler; 175 struct ifnet *ifp; 176 }; 177 178 static struct pollrec pr[POLL_LIST_LEN]; 179 180 static void 181 init_device_poll(void) 182 { 183 184 netisr_register(NETISR_POLL, (netisr_t *)netisr_poll, NULL, 0); 185 netisr_register(NETISR_POLLMORE, (netisr_t *)netisr_pollmore, NULL, 0); 186 } 187 SYSINIT(device_poll, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, init_device_poll, NULL) 188 189 190 /* 191 * Hook from hardclock. Tries to schedule a netisr, but keeps track 192 * of lost ticks due to the previous handler taking too long. 193 * Normally, this should not happen, because polling handler should 194 * run for a short time. However, in some cases (e.g. when there are 195 * changes in link status etc.) the drivers take a very long time 196 * (even in the order of milliseconds) to reset and reconfigure the 197 * device, causing apparent lost polls. 198 * 199 * The first part of the code is just for debugging purposes, and tries 200 * to count how often hardclock ticks are shorter than they should, 201 * meaning either stray interrupts or delayed events. 202 */ 203 void 204 hardclock_device_poll(void) 205 { 206 static struct timeval prev_t, t; 207 int delta; 208 209 if (poll_handlers == 0) 210 return; 211 212 microuptime(&t); 213 delta = (t.tv_usec - prev_t.tv_usec) + 214 (t.tv_sec - prev_t.tv_sec)*1000000; 215 if (delta * hz < 500000) 216 short_ticks++; 217 else 218 prev_t = t; 219 220 if (pending_polls > 100) { 221 /* 222 * Too much, assume it has stalled (not always true 223 * see comment above). 224 */ 225 stalled++; 226 pending_polls = 0; 227 phase = 0; 228 } 229 230 if (phase <= 2) { 231 if (phase != 0) 232 suspect++; 233 phase = 1; 234 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE); 235 phase = 2; 236 } 237 if (pending_polls++ > 0) 238 lost_polls++; 239 } 240 241 /* 242 * ether_poll is called from the idle loop or from the trap handler. 243 */ 244 void 245 ether_poll(int count) 246 { 247 int i; 248 249 mtx_lock(&Giant); 250 251 if (count > poll_each_burst) 252 count = poll_each_burst; 253 for (i = 0 ; i < poll_handlers ; i++) 254 if (pr[i].handler && (IFF_UP|IFF_RUNNING) == 255 (pr[i].ifp->if_flags & (IFF_UP|IFF_RUNNING)) ) 256 pr[i].handler(pr[i].ifp, 0, count); /* quick check */ 257 mtx_unlock(&Giant); 258 } 259 260 /* 261 * netisr_pollmore is called after other netisr's, possibly scheduling 262 * another NETISR_POLL call, or adapting the burst size for the next cycle. 263 * 264 * It is very bad to fetch large bursts of packets from a single card at once, 265 * because the burst could take a long time to be completely processed, or 266 * could saturate the intermediate queue (ipintrq or similar) leading to 267 * losses or unfairness. To reduce the problem, and also to account better for 268 * time spent in network-related processing, we split the burst in smaller 269 * chunks of fixed size, giving control to the other netisr's between chunks. 270 * This helps in improving the fairness, reducing livelock (because we 271 * emulate more closely the "process to completion" that we have with 272 * fastforwarding) and accounting for the work performed in low level 273 * handling and forwarding. 274 */ 275 276 static struct timeval poll_start_t; 277 278 void 279 netisr_pollmore() 280 { 281 struct timeval t; 282 int kern_load; 283 /* XXX run at splhigh() or equivalent */ 284 285 phase = 5; 286 if (residual_burst > 0) { 287 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE); 288 /* will run immediately on return, followed by netisrs */ 289 return; 290 } 291 /* here we can account time spent in netisr's in this tick */ 292 microuptime(&t); 293 kern_load = (t.tv_usec - poll_start_t.tv_usec) + 294 (t.tv_sec - poll_start_t.tv_sec)*1000000; /* us */ 295 kern_load = (kern_load * hz) / 10000; /* 0..100 */ 296 if (kern_load > (100 - user_frac)) { /* try decrease ticks */ 297 if (poll_burst > 1) 298 poll_burst--; 299 } else { 300 if (poll_burst < poll_burst_max) 301 poll_burst++; 302 } 303 304 pending_polls--; 305 if (pending_polls == 0) /* we are done */ 306 phase = 0; 307 else { 308 /* 309 * Last cycle was long and caused us to miss one or more 310 * hardclock ticks. Restart processing again, but slightly 311 * reduce the burst size to prevent that this happens again. 312 */ 313 poll_burst -= (poll_burst / 8); 314 if (poll_burst < 1) 315 poll_burst = 1; 316 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE); 317 phase = 6; 318 } 319 } 320 321 /* 322 * netisr_poll is scheduled by schednetisr when appropriate, typically once 323 * per tick. It is called at splnet() so first thing to do is to upgrade to 324 * splimp(), and call all registered handlers. 325 */ 326 static void 327 netisr_poll(void) 328 { 329 static int reg_frac_count; 330 int i, cycles; 331 enum poll_cmd arg = POLL_ONLY; 332 mtx_lock(&Giant); 333 334 phase = 3; 335 if (residual_burst == 0) { /* first call in this tick */ 336 microuptime(&poll_start_t); 337 /* 338 * Check that paremeters are consistent with runtime 339 * variables. Some of these tests could be done at sysctl 340 * time, but the savings would be very limited because we 341 * still have to check against reg_frac_count and 342 * poll_each_burst. So, instead of writing separate sysctl 343 * handlers, we do all here. 344 */ 345 346 if (reg_frac > hz) 347 reg_frac = hz; 348 else if (reg_frac < 1) 349 reg_frac = 1; 350 if (reg_frac_count > reg_frac) 351 reg_frac_count = reg_frac - 1; 352 if (reg_frac_count-- == 0) { 353 arg = POLL_AND_CHECK_STATUS; 354 reg_frac_count = reg_frac - 1; 355 } 356 if (poll_burst_max < MIN_POLL_BURST_MAX) 357 poll_burst_max = MIN_POLL_BURST_MAX; 358 else if (poll_burst_max > MAX_POLL_BURST_MAX) 359 poll_burst_max = MAX_POLL_BURST_MAX; 360 361 if (poll_each_burst < 1) 362 poll_each_burst = 1; 363 else if (poll_each_burst > poll_burst_max) 364 poll_each_burst = poll_burst_max; 365 366 if (poll_burst > poll_burst_max) 367 poll_burst = poll_burst_max; 368 residual_burst = poll_burst; 369 } 370 cycles = (residual_burst < poll_each_burst) ? 371 residual_burst : poll_each_burst; 372 residual_burst -= cycles; 373 374 if (polling) { 375 for (i = 0 ; i < poll_handlers ; i++) 376 if (pr[i].handler && (IFF_UP|IFF_RUNNING) == 377 (pr[i].ifp->if_flags & (IFF_UP|IFF_RUNNING)) ) 378 pr[i].handler(pr[i].ifp, arg, cycles); 379 } else { /* unregister */ 380 for (i = 0 ; i < poll_handlers ; i++) { 381 if (pr[i].handler && 382 pr[i].ifp->if_flags & IFF_RUNNING) { 383 pr[i].ifp->if_flags &= ~IFF_POLLING; 384 pr[i].handler(pr[i].ifp, POLL_DEREGISTER, 1); 385 } 386 pr[i].handler=NULL; 387 } 388 residual_burst = 0; 389 poll_handlers = 0; 390 } 391 /* on -stable, schednetisr(NETISR_POLLMORE); */ 392 phase = 4; 393 mtx_unlock(&Giant); 394 } 395 396 /* 397 * Try to register routine for polling. Returns 1 if successful 398 * (and polling should be enabled), 0 otherwise. 399 * A device is not supposed to register itself multiple times. 400 * 401 * This is called from within the *_intr() functions, so we do not need 402 * further locking. 403 */ 404 int 405 ether_poll_register(poll_handler_t *h, struct ifnet *ifp) 406 { 407 int s; 408 409 if (polling == 0) /* polling disabled, cannot register */ 410 return 0; 411 if (h == NULL || ifp == NULL) /* bad arguments */ 412 return 0; 413 if ( !(ifp->if_flags & IFF_UP) ) /* must be up */ 414 return 0; 415 if (ifp->if_flags & IFF_POLLING) /* already polling */ 416 return 0; 417 418 s = splhigh(); 419 if (poll_handlers >= POLL_LIST_LEN) { 420 /* 421 * List full, cannot register more entries. 422 * This should never happen; if it does, it is probably a 423 * broken driver trying to register multiple times. Checking 424 * this at runtime is expensive, and won't solve the problem 425 * anyways, so just report a few times and then give up. 426 */ 427 static int verbose = 10 ; 428 splx(s); 429 if (verbose >0) { 430 printf("poll handlers list full, " 431 "maybe a broken driver ?\n"); 432 verbose--; 433 } 434 return 0; /* no polling for you */ 435 } 436 437 pr[poll_handlers].handler = h; 438 pr[poll_handlers].ifp = ifp; 439 poll_handlers++; 440 ifp->if_flags |= IFF_POLLING; 441 splx(s); 442 if (idlepoll_sleeping) 443 wakeup(&idlepoll_sleeping); 444 return 1; /* polling enabled in next call */ 445 } 446 447 /* 448 * Remove interface from the polling list. Normally called by *_stop(). 449 * It is not an error to call it with IFF_POLLING clear, the call is 450 * sufficiently rare to be preferable to save the space for the extra 451 * test in each driver in exchange of one additional function call. 452 */ 453 int 454 ether_poll_deregister(struct ifnet *ifp) 455 { 456 int i; 457 458 mtx_lock(&Giant); 459 if ( !ifp || !(ifp->if_flags & IFF_POLLING) ) { 460 mtx_unlock(&Giant); 461 return 0; 462 } 463 for (i = 0 ; i < poll_handlers ; i++) 464 if (pr[i].ifp == ifp) /* found it */ 465 break; 466 ifp->if_flags &= ~IFF_POLLING; /* found or not... */ 467 if (i == poll_handlers) { 468 mtx_unlock(&Giant); 469 printf("ether_poll_deregister: ifp not found!!!\n"); 470 return 0; 471 } 472 poll_handlers--; 473 if (i < poll_handlers) { /* Last entry replaces this one. */ 474 pr[i].handler = pr[poll_handlers].handler; 475 pr[i].ifp = pr[poll_handlers].ifp; 476 } 477 mtx_unlock(&Giant); 478 return 1; 479 } 480 481 static void 482 poll_idle(void) 483 { 484 struct thread *td = curthread; 485 struct rtprio rtp; 486 int pri; 487 488 rtp.prio = RTP_PRIO_MAX; /* lowest priority */ 489 rtp.type = RTP_PRIO_IDLE; 490 mtx_lock_spin(&sched_lock); 491 rtp_to_pri(&rtp, td->td_ksegrp); 492 pri = td->td_priority; 493 mtx_unlock_spin(&sched_lock); 494 495 for (;;) { 496 if (poll_in_idle_loop && poll_handlers > 0) { 497 idlepoll_sleeping = 0; 498 mtx_lock(&Giant); 499 ether_poll(poll_each_burst); 500 mtx_unlock(&Giant); 501 mtx_assert(&Giant, MA_NOTOWNED); 502 mtx_lock_spin(&sched_lock); 503 mi_switch(SW_VOL, NULL); 504 mtx_unlock_spin(&sched_lock); 505 } else { 506 idlepoll_sleeping = 1; 507 tsleep(&idlepoll_sleeping, pri, "pollid", hz * 3); 508 } 509 } 510 } 511 512 static struct proc *idlepoll; 513 static struct kproc_desc idlepoll_kp = { 514 "idlepoll", 515 poll_idle, 516 &idlepoll 517 }; 518 SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start, &idlepoll_kp) 519