1 /*- 2 * Copyright (c) 1991-1997 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the Network Research 16 * Group at Lawrence Berkeley Laboratory. 17 * 4. Neither the name of the University nor of the Laboratory may be used 18 * to endorse or promote products derived from this software without 19 * specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * LBL code modified by speer@eng.sun.com, May 1977. 34 * For questions and/or comments, please send mail to cbq@ee.lbl.gov 35 * 36 * @(#)rm_class.c 1.48 97/12/05 SMI 37 * $KAME: altq_rmclass.c,v 1.19 2005/04/13 03:44:25 suz Exp $ 38 * $FreeBSD$ 39 */ 40 #include "opt_altq.h" 41 #include "opt_inet.h" 42 #include "opt_inet6.h" 43 #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */ 44 45 #include <sys/param.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/socket.h> 49 #include <sys/systm.h> 50 #include <sys/errno.h> 51 #include <sys/time.h> 52 53 #include <net/if.h> 54 #include <net/if_var.h> 55 56 #include <net/altq/if_altq.h> 57 #include <net/altq/altq.h> 58 #include <net/altq/altq_codel.h> 59 #include <net/altq/altq_rmclass.h> 60 #include <net/altq/altq_rmclass_debug.h> 61 #include <net/altq/altq_red.h> 62 #include <net/altq/altq_rio.h> 63 64 /* 65 * Local Macros 66 */ 67 68 #define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; } 69 70 /* 71 * Local routines. 72 */ 73 74 static int rmc_satisfied(struct rm_class *, struct timeval *); 75 static void rmc_wrr_set_weights(struct rm_ifdat *); 76 static void rmc_depth_compute(struct rm_class *); 77 static void rmc_depth_recompute(rm_class_t *); 78 79 static mbuf_t *_rmc_wrr_dequeue_next(struct rm_ifdat *, int); 80 static mbuf_t *_rmc_prr_dequeue_next(struct rm_ifdat *, int); 81 82 static int _rmc_addq(rm_class_t *, mbuf_t *); 83 static void _rmc_dropq(rm_class_t *); 84 static mbuf_t *_rmc_getq(rm_class_t *); 85 static mbuf_t *_rmc_pollq(rm_class_t *); 86 87 static int rmc_under_limit(struct rm_class *, struct timeval *); 88 static void rmc_tl_satisfied(struct rm_ifdat *, struct timeval *); 89 static void rmc_drop_action(struct rm_class *); 90 static void rmc_restart(struct rm_class *); 91 static void rmc_root_overlimit(struct rm_class *, struct rm_class *); 92 93 #define BORROW_OFFTIME 94 /* 95 * BORROW_OFFTIME (experimental): 96 * borrow the offtime of the class borrowing from. 97 * the reason is that when its own offtime is set, the class is unable 98 * to borrow much, especially when cutoff is taking effect. 99 * but when the borrowed class is overloaded (advidle is close to minidle), 100 * use the borrowing class's offtime to avoid overload. 101 */ 102 #define ADJUST_CUTOFF 103 /* 104 * ADJUST_CUTOFF (experimental): 105 * if no underlimit class is found due to cutoff, increase cutoff and 106 * retry the scheduling loop. 107 * also, don't invoke delay_actions while cutoff is taking effect, 108 * since a sleeping class won't have a chance to be scheduled in the 109 * next loop. 110 * 111 * now heuristics for setting the top-level variable (cutoff_) becomes: 112 * 1. if a packet arrives for a not-overlimit class, set cutoff 113 * to the depth of the class. 114 * 2. if cutoff is i, and a packet arrives for an overlimit class 115 * with an underlimit ancestor at a lower level than i (say j), 116 * then set cutoff to j. 117 * 3. at scheduling a packet, if there is no underlimit class 118 * due to the current cutoff level, increase cutoff by 1 and 119 * then try to schedule again. 120 */ 121 122 /* 123 * rm_class_t * 124 * rmc_newclass(...) - Create a new resource management class at priority 125 * 'pri' on the interface given by 'ifd'. 126 * 127 * nsecPerByte is the data rate of the interface in nanoseconds/byte. 128 * E.g., 800 for a 10Mb/s ethernet. If the class gets less 129 * than 100% of the bandwidth, this number should be the 130 * 'effective' rate for the class. Let f be the 131 * bandwidth fraction allocated to this class, and let 132 * nsPerByte be the data rate of the output link in 133 * nanoseconds/byte. Then nsecPerByte is set to 134 * nsPerByte / f. E.g., 1600 (= 800 / .5) 135 * for a class that gets 50% of an ethernet's bandwidth. 136 * 137 * action the routine to call when the class is over limit. 138 * 139 * maxq max allowable queue size for class (in packets). 140 * 141 * parent parent class pointer. 142 * 143 * borrow class to borrow from (should be either 'parent' or null). 144 * 145 * maxidle max value allowed for class 'idle' time estimate (this 146 * parameter determines how large an initial burst of packets 147 * can be before overlimit action is invoked. 148 * 149 * offtime how long 'delay' action will delay when class goes over 150 * limit (this parameter determines the steady-state burst 151 * size when a class is running over its limit). 152 * 153 * Maxidle and offtime have to be computed from the following: If the 154 * average packet size is s, the bandwidth fraction allocated to this 155 * class is f, we want to allow b packet bursts, and the gain of the 156 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then: 157 * 158 * ptime = s * nsPerByte * (1 - f) / f 159 * maxidle = ptime * (1 - g^b) / g^b 160 * minidle = -ptime * (1 / (f - 1)) 161 * offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1) 162 * 163 * Operationally, it's convenient to specify maxidle & offtime in units 164 * independent of the link bandwidth so the maxidle & offtime passed to 165 * this routine are the above values multiplied by 8*f/(1000*nsPerByte). 166 * (The constant factor is a scale factor needed to make the parameters 167 * integers. This scaling also means that the 'unscaled' values of 168 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds, 169 * not nanoseconds.) Also note that the 'idle' filter computation keeps 170 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of 171 * maxidle also must be scaled upward by this value. Thus, the passed 172 * values for maxidle and offtime can be computed as follows: 173 * 174 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte) 175 * offtime = offtime * 8 / (1000 * nsecPerByte) 176 * 177 * When USE_HRTIME is employed, then maxidle and offtime become: 178 * maxidle = maxilde * (8.0 / nsecPerByte); 179 * offtime = offtime * (8.0 / nsecPerByte); 180 */ 181 struct rm_class * 182 rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte, 183 void (*action)(rm_class_t *, rm_class_t *), int maxq, 184 struct rm_class *parent, struct rm_class *borrow, u_int maxidle, 185 int minidle, u_int offtime, int pktsize, int flags) 186 { 187 struct rm_class *cl; 188 struct rm_class *peer; 189 int s; 190 191 if (pri >= RM_MAXPRIO) 192 return (NULL); 193 #ifndef ALTQ_RED 194 if (flags & RMCF_RED) { 195 #ifdef ALTQ_DEBUG 196 printf("rmc_newclass: RED not configured for CBQ!\n"); 197 #endif 198 return (NULL); 199 } 200 #endif 201 #ifndef ALTQ_RIO 202 if (flags & RMCF_RIO) { 203 #ifdef ALTQ_DEBUG 204 printf("rmc_newclass: RIO not configured for CBQ!\n"); 205 #endif 206 return (NULL); 207 } 208 #endif 209 #ifndef ALTQ_CODEL 210 if (flags & RMCF_CODEL) { 211 #ifdef ALTQ_DEBUG 212 printf("rmc_newclass: CODEL not configured for CBQ!\n"); 213 #endif 214 return (NULL); 215 } 216 #endif 217 218 cl = malloc(sizeof(struct rm_class), M_DEVBUF, M_NOWAIT | M_ZERO); 219 if (cl == NULL) 220 return (NULL); 221 CALLOUT_INIT(&cl->callout_); 222 cl->q_ = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO); 223 if (cl->q_ == NULL) { 224 free(cl, M_DEVBUF); 225 return (NULL); 226 } 227 228 /* 229 * Class initialization. 230 */ 231 cl->children_ = NULL; 232 cl->parent_ = parent; 233 cl->borrow_ = borrow; 234 cl->leaf_ = 1; 235 cl->ifdat_ = ifd; 236 cl->pri_ = pri; 237 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */ 238 cl->depth_ = 0; 239 cl->qthresh_ = 0; 240 cl->ns_per_byte_ = nsecPerByte; 241 242 qlimit(cl->q_) = maxq; 243 qtype(cl->q_) = Q_DROPHEAD; 244 qlen(cl->q_) = 0; 245 cl->flags_ = flags; 246 247 #if 1 /* minidle is also scaled in ALTQ */ 248 cl->minidle_ = (minidle * (int)nsecPerByte) / 8; 249 if (cl->minidle_ > 0) 250 cl->minidle_ = 0; 251 #else 252 cl->minidle_ = minidle; 253 #endif 254 cl->maxidle_ = (maxidle * nsecPerByte) / 8; 255 if (cl->maxidle_ == 0) 256 cl->maxidle_ = 1; 257 #if 1 /* offtime is also scaled in ALTQ */ 258 cl->avgidle_ = cl->maxidle_; 259 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN; 260 if (cl->offtime_ == 0) 261 cl->offtime_ = 1; 262 #else 263 cl->avgidle_ = 0; 264 cl->offtime_ = (offtime * nsecPerByte) / 8; 265 #endif 266 cl->overlimit = action; 267 268 #ifdef ALTQ_RED 269 if (flags & (RMCF_RED|RMCF_RIO)) { 270 int red_flags, red_pkttime; 271 272 red_flags = 0; 273 if (flags & RMCF_ECN) 274 red_flags |= REDF_ECN; 275 if (flags & RMCF_FLOWVALVE) 276 red_flags |= REDF_FLOWVALVE; 277 #ifdef ALTQ_RIO 278 if (flags & RMCF_CLEARDSCP) 279 red_flags |= RIOF_CLEARDSCP; 280 #endif 281 red_pkttime = nsecPerByte * pktsize / 1000; 282 283 if (flags & RMCF_RED) { 284 cl->red_ = red_alloc(0, 0, 285 qlimit(cl->q_) * 10/100, 286 qlimit(cl->q_) * 30/100, 287 red_flags, red_pkttime); 288 if (cl->red_ != NULL) 289 qtype(cl->q_) = Q_RED; 290 } 291 #ifdef ALTQ_RIO 292 else { 293 cl->red_ = (red_t *)rio_alloc(0, NULL, 294 red_flags, red_pkttime); 295 if (cl->red_ != NULL) 296 qtype(cl->q_) = Q_RIO; 297 } 298 #endif 299 } 300 #endif /* ALTQ_RED */ 301 #ifdef ALTQ_CODEL 302 if (flags & RMCF_CODEL) { 303 cl->codel_ = codel_alloc(5, 100, 0); 304 if (cl->codel_ != NULL) 305 qtype(cl->q_) = Q_CODEL; 306 } 307 #endif 308 309 /* 310 * put the class into the class tree 311 */ 312 s = splnet(); 313 IFQ_LOCK(ifd->ifq_); 314 if ((peer = ifd->active_[pri]) != NULL) { 315 /* find the last class at this pri */ 316 cl->peer_ = peer; 317 while (peer->peer_ != ifd->active_[pri]) 318 peer = peer->peer_; 319 peer->peer_ = cl; 320 } else { 321 ifd->active_[pri] = cl; 322 cl->peer_ = cl; 323 } 324 325 if (cl->parent_) { 326 cl->next_ = parent->children_; 327 parent->children_ = cl; 328 parent->leaf_ = 0; 329 } 330 331 /* 332 * Compute the depth of this class and its ancestors in the class 333 * hierarchy. 334 */ 335 rmc_depth_compute(cl); 336 337 /* 338 * If CBQ's WRR is enabled, then initialize the class WRR state. 339 */ 340 if (ifd->wrr_) { 341 ifd->num_[pri]++; 342 ifd->alloc_[pri] += cl->allotment_; 343 rmc_wrr_set_weights(ifd); 344 } 345 IFQ_UNLOCK(ifd->ifq_); 346 splx(s); 347 return (cl); 348 } 349 350 int 351 rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle, 352 int minidle, u_int offtime, int pktsize) 353 { 354 struct rm_ifdat *ifd; 355 u_int old_allotment; 356 int s; 357 358 ifd = cl->ifdat_; 359 old_allotment = cl->allotment_; 360 361 s = splnet(); 362 IFQ_LOCK(ifd->ifq_); 363 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */ 364 cl->qthresh_ = 0; 365 cl->ns_per_byte_ = nsecPerByte; 366 367 qlimit(cl->q_) = maxq; 368 369 #if 1 /* minidle is also scaled in ALTQ */ 370 cl->minidle_ = (minidle * nsecPerByte) / 8; 371 if (cl->minidle_ > 0) 372 cl->minidle_ = 0; 373 #else 374 cl->minidle_ = minidle; 375 #endif 376 cl->maxidle_ = (maxidle * nsecPerByte) / 8; 377 if (cl->maxidle_ == 0) 378 cl->maxidle_ = 1; 379 #if 1 /* offtime is also scaled in ALTQ */ 380 cl->avgidle_ = cl->maxidle_; 381 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN; 382 if (cl->offtime_ == 0) 383 cl->offtime_ = 1; 384 #else 385 cl->avgidle_ = 0; 386 cl->offtime_ = (offtime * nsecPerByte) / 8; 387 #endif 388 389 /* 390 * If CBQ's WRR is enabled, then initialize the class WRR state. 391 */ 392 if (ifd->wrr_) { 393 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment; 394 rmc_wrr_set_weights(ifd); 395 } 396 IFQ_UNLOCK(ifd->ifq_); 397 splx(s); 398 return (0); 399 } 400 401 /* 402 * static void 403 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes 404 * the appropriate run robin weights for the CBQ weighted round robin 405 * algorithm. 406 * 407 * Returns: NONE 408 */ 409 410 static void 411 rmc_wrr_set_weights(struct rm_ifdat *ifd) 412 { 413 int i; 414 struct rm_class *cl, *clh; 415 416 for (i = 0; i < RM_MAXPRIO; i++) { 417 /* 418 * This is inverted from that of the simulator to 419 * maintain precision. 420 */ 421 if (ifd->num_[i] == 0) 422 ifd->M_[i] = 0; 423 else 424 ifd->M_[i] = ifd->alloc_[i] / 425 (ifd->num_[i] * ifd->maxpkt_); 426 /* 427 * Compute the weighted allotment for each class. 428 * This takes the expensive div instruction out 429 * of the main loop for the wrr scheduling path. 430 * These only get recomputed when a class comes or 431 * goes. 432 */ 433 if (ifd->active_[i] != NULL) { 434 clh = cl = ifd->active_[i]; 435 do { 436 /* safe-guard for slow link or alloc_ == 0 */ 437 if (ifd->M_[i] == 0) 438 cl->w_allotment_ = 0; 439 else 440 cl->w_allotment_ = cl->allotment_ / 441 ifd->M_[i]; 442 cl = cl->peer_; 443 } while ((cl != NULL) && (cl != clh)); 444 } 445 } 446 } 447 448 int 449 rmc_get_weight(struct rm_ifdat *ifd, int pri) 450 { 451 if ((pri >= 0) && (pri < RM_MAXPRIO)) 452 return (ifd->M_[pri]); 453 else 454 return (0); 455 } 456 457 /* 458 * static void 459 * rmc_depth_compute(struct rm_class *cl) - This function computes the 460 * appropriate depth of class 'cl' and its ancestors. 461 * 462 * Returns: NONE 463 */ 464 465 static void 466 rmc_depth_compute(struct rm_class *cl) 467 { 468 rm_class_t *t = cl, *p; 469 470 /* 471 * Recompute the depth for the branch of the tree. 472 */ 473 while (t != NULL) { 474 p = t->parent_; 475 if (p && (t->depth_ >= p->depth_)) { 476 p->depth_ = t->depth_ + 1; 477 t = p; 478 } else 479 t = NULL; 480 } 481 } 482 483 /* 484 * static void 485 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes 486 * the depth of the tree after a class has been deleted. 487 * 488 * Returns: NONE 489 */ 490 491 static void 492 rmc_depth_recompute(rm_class_t *cl) 493 { 494 #if 1 /* ALTQ */ 495 rm_class_t *p, *t; 496 497 p = cl; 498 while (p != NULL) { 499 if ((t = p->children_) == NULL) { 500 p->depth_ = 0; 501 } else { 502 int cdepth = 0; 503 504 while (t != NULL) { 505 if (t->depth_ > cdepth) 506 cdepth = t->depth_; 507 t = t->next_; 508 } 509 510 if (p->depth_ == cdepth + 1) 511 /* no change to this parent */ 512 return; 513 514 p->depth_ = cdepth + 1; 515 } 516 517 p = p->parent_; 518 } 519 #else 520 rm_class_t *t; 521 522 if (cl->depth_ >= 1) { 523 if (cl->children_ == NULL) { 524 cl->depth_ = 0; 525 } else if ((t = cl->children_) != NULL) { 526 while (t != NULL) { 527 if (t->children_ != NULL) 528 rmc_depth_recompute(t); 529 t = t->next_; 530 } 531 } else 532 rmc_depth_compute(cl); 533 } 534 #endif 535 } 536 537 /* 538 * void 539 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This 540 * function deletes a class from the link-sharing structure and frees 541 * all resources associated with the class. 542 * 543 * Returns: NONE 544 */ 545 546 void 547 rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl) 548 { 549 struct rm_class *p, *head, *previous; 550 int s; 551 552 ASSERT(cl->children_ == NULL); 553 554 if (cl->sleeping_) 555 CALLOUT_STOP(&cl->callout_); 556 557 s = splnet(); 558 IFQ_LOCK(ifd->ifq_); 559 /* 560 * Free packets in the packet queue. 561 * XXX - this may not be a desired behavior. Packets should be 562 * re-queued. 563 */ 564 rmc_dropall(cl); 565 566 /* 567 * If the class has a parent, then remove the class from the 568 * class from the parent's children chain. 569 */ 570 if (cl->parent_ != NULL) { 571 head = cl->parent_->children_; 572 p = previous = head; 573 if (head->next_ == NULL) { 574 ASSERT(head == cl); 575 cl->parent_->children_ = NULL; 576 cl->parent_->leaf_ = 1; 577 } else while (p != NULL) { 578 if (p == cl) { 579 if (cl == head) 580 cl->parent_->children_ = cl->next_; 581 else 582 previous->next_ = cl->next_; 583 cl->next_ = NULL; 584 p = NULL; 585 } else { 586 previous = p; 587 p = p->next_; 588 } 589 } 590 } 591 592 /* 593 * Delete class from class priority peer list. 594 */ 595 if ((p = ifd->active_[cl->pri_]) != NULL) { 596 /* 597 * If there is more than one member of this priority 598 * level, then look for class(cl) in the priority level. 599 */ 600 if (p != p->peer_) { 601 while (p->peer_ != cl) 602 p = p->peer_; 603 p->peer_ = cl->peer_; 604 605 if (ifd->active_[cl->pri_] == cl) 606 ifd->active_[cl->pri_] = cl->peer_; 607 } else { 608 ASSERT(p == cl); 609 ifd->active_[cl->pri_] = NULL; 610 } 611 } 612 613 /* 614 * Recompute the WRR weights. 615 */ 616 if (ifd->wrr_) { 617 ifd->alloc_[cl->pri_] -= cl->allotment_; 618 ifd->num_[cl->pri_]--; 619 rmc_wrr_set_weights(ifd); 620 } 621 622 /* 623 * Re-compute the depth of the tree. 624 */ 625 #if 1 /* ALTQ */ 626 rmc_depth_recompute(cl->parent_); 627 #else 628 rmc_depth_recompute(ifd->root_); 629 #endif 630 631 IFQ_UNLOCK(ifd->ifq_); 632 splx(s); 633 634 /* 635 * Free the class structure. 636 */ 637 if (cl->red_ != NULL) { 638 #ifdef ALTQ_RIO 639 if (q_is_rio(cl->q_)) 640 rio_destroy((rio_t *)cl->red_); 641 #endif 642 #ifdef ALTQ_RED 643 if (q_is_red(cl->q_)) 644 red_destroy(cl->red_); 645 #endif 646 #ifdef ALTQ_CODEL 647 if (q_is_codel(cl->q_)) 648 codel_destroy(cl->codel_); 649 #endif 650 } 651 free(cl->q_, M_DEVBUF); 652 free(cl, M_DEVBUF); 653 } 654 655 656 /* 657 * void 658 * rmc_init(...) - Initialize the resource management data structures 659 * associated with the output portion of interface 'ifp'. 'ifd' is 660 * where the structures will be built (for backwards compatibility, the 661 * structures aren't kept in the ifnet struct). 'nsecPerByte' 662 * gives the link speed (inverse of bandwidth) in nanoseconds/byte. 663 * 'restart' is the driver-specific routine that the generic 'delay 664 * until under limit' action will call to restart output. `maxq' 665 * is the queue size of the 'link' & 'default' classes. 'maxqueued' 666 * is the maximum number of packets that the resource management 667 * code will allow to be queued 'downstream' (this is typically 1). 668 * 669 * Returns: NONE 670 */ 671 672 void 673 rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte, 674 void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle, 675 int minidle, u_int offtime, int flags) 676 { 677 int i, mtu; 678 679 /* 680 * Initialize the CBQ tracing/debug facility. 681 */ 682 CBQTRACEINIT(); 683 684 bzero((char *)ifd, sizeof (*ifd)); 685 mtu = ifq->altq_ifp->if_mtu; 686 ifd->ifq_ = ifq; 687 ifd->restart = restart; 688 ifd->maxqueued_ = maxqueued; 689 ifd->ns_per_byte_ = nsecPerByte; 690 ifd->maxpkt_ = mtu; 691 ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0; 692 ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0; 693 #if 1 694 ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16; 695 if (mtu * nsecPerByte > 10 * 1000000) 696 ifd->maxiftime_ /= 4; 697 #endif 698 699 reset_cutoff(ifd); 700 CBQTRACE(rmc_init, 'INIT', ifd->cutoff_); 701 702 /* 703 * Initialize the CBQ's WRR state. 704 */ 705 for (i = 0; i < RM_MAXPRIO; i++) { 706 ifd->alloc_[i] = 0; 707 ifd->M_[i] = 0; 708 ifd->num_[i] = 0; 709 ifd->na_[i] = 0; 710 ifd->active_[i] = NULL; 711 } 712 713 /* 714 * Initialize current packet state. 715 */ 716 ifd->qi_ = 0; 717 ifd->qo_ = 0; 718 for (i = 0; i < RM_MAXQUEUED; i++) { 719 ifd->class_[i] = NULL; 720 ifd->curlen_[i] = 0; 721 ifd->borrowed_[i] = NULL; 722 } 723 724 /* 725 * Create the root class of the link-sharing structure. 726 */ 727 if ((ifd->root_ = rmc_newclass(0, ifd, 728 nsecPerByte, 729 rmc_root_overlimit, maxq, 0, 0, 730 maxidle, minidle, offtime, 731 0, 0)) == NULL) { 732 printf("rmc_init: root class not allocated\n"); 733 return ; 734 } 735 ifd->root_->depth_ = 0; 736 } 737 738 /* 739 * void 740 * rmc_queue_packet(struct rm_class *cl, mbuf_t *m) - Add packet given by 741 * mbuf 'm' to queue for resource class 'cl'. This routine is called 742 * by a driver's if_output routine. This routine must be called with 743 * output packet completion interrupts locked out (to avoid racing with 744 * rmc_dequeue_next). 745 * 746 * Returns: 0 on successful queueing 747 * -1 when packet drop occurs 748 */ 749 int 750 rmc_queue_packet(struct rm_class *cl, mbuf_t *m) 751 { 752 struct timeval now; 753 struct rm_ifdat *ifd = cl->ifdat_; 754 int cpri = cl->pri_; 755 int is_empty = qempty(cl->q_); 756 757 RM_GETTIME(now); 758 if (ifd->cutoff_ > 0) { 759 if (TV_LT(&cl->undertime_, &now)) { 760 if (ifd->cutoff_ > cl->depth_) 761 ifd->cutoff_ = cl->depth_; 762 CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_); 763 } 764 #if 1 /* ALTQ */ 765 else { 766 /* 767 * the class is overlimit. if the class has 768 * underlimit ancestors, set cutoff to the lowest 769 * depth among them. 770 */ 771 struct rm_class *borrow = cl->borrow_; 772 773 while (borrow != NULL && 774 borrow->depth_ < ifd->cutoff_) { 775 if (TV_LT(&borrow->undertime_, &now)) { 776 ifd->cutoff_ = borrow->depth_; 777 CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_); 778 break; 779 } 780 borrow = borrow->borrow_; 781 } 782 } 783 #else /* !ALTQ */ 784 else if ((ifd->cutoff_ > 1) && cl->borrow_) { 785 if (TV_LT(&cl->borrow_->undertime_, &now)) { 786 ifd->cutoff_ = cl->borrow_->depth_; 787 CBQTRACE(rmc_queue_packet, 'ffob', 788 cl->borrow_->depth_); 789 } 790 } 791 #endif /* !ALTQ */ 792 } 793 794 if (_rmc_addq(cl, m) < 0) 795 /* failed */ 796 return (-1); 797 798 if (is_empty) { 799 CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle); 800 ifd->na_[cpri]++; 801 } 802 803 if (qlen(cl->q_) > qlimit(cl->q_)) { 804 /* note: qlimit can be set to 0 or 1 */ 805 rmc_drop_action(cl); 806 return (-1); 807 } 808 return (0); 809 } 810 811 /* 812 * void 813 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all 814 * classes to see if there are satified. 815 */ 816 817 static void 818 rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) 819 { 820 int i; 821 rm_class_t *p, *bp; 822 823 for (i = RM_MAXPRIO - 1; i >= 0; i--) { 824 if ((bp = ifd->active_[i]) != NULL) { 825 p = bp; 826 do { 827 if (!rmc_satisfied(p, now)) { 828 ifd->cutoff_ = p->depth_; 829 return; 830 } 831 p = p->peer_; 832 } while (p != bp); 833 } 834 } 835 836 reset_cutoff(ifd); 837 } 838 839 /* 840 * rmc_satisfied - Return 1 of the class is satisfied. O, otherwise. 841 */ 842 843 static int 844 rmc_satisfied(struct rm_class *cl, struct timeval *now) 845 { 846 rm_class_t *p; 847 848 if (cl == NULL) 849 return (1); 850 if (TV_LT(now, &cl->undertime_)) 851 return (1); 852 if (cl->depth_ == 0) { 853 if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_)) 854 return (0); 855 else 856 return (1); 857 } 858 if (cl->children_ != NULL) { 859 p = cl->children_; 860 while (p != NULL) { 861 if (!rmc_satisfied(p, now)) 862 return (0); 863 p = p->next_; 864 } 865 } 866 867 return (1); 868 } 869 870 /* 871 * Return 1 if class 'cl' is under limit or can borrow from a parent, 872 * 0 if overlimit. As a side-effect, this routine will invoke the 873 * class overlimit action if the class if overlimit. 874 */ 875 876 static int 877 rmc_under_limit(struct rm_class *cl, struct timeval *now) 878 { 879 rm_class_t *p = cl; 880 rm_class_t *top; 881 struct rm_ifdat *ifd = cl->ifdat_; 882 883 ifd->borrowed_[ifd->qi_] = NULL; 884 /* 885 * If cl is the root class, then always return that it is 886 * underlimit. Otherwise, check to see if the class is underlimit. 887 */ 888 if (cl->parent_ == NULL) 889 return (1); 890 891 if (cl->sleeping_) { 892 if (TV_LT(now, &cl->undertime_)) 893 return (0); 894 895 CALLOUT_STOP(&cl->callout_); 896 cl->sleeping_ = 0; 897 cl->undertime_.tv_sec = 0; 898 return (1); 899 } 900 901 top = NULL; 902 while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) { 903 if (((cl = cl->borrow_) == NULL) || 904 (cl->depth_ > ifd->cutoff_)) { 905 #ifdef ADJUST_CUTOFF 906 if (cl != NULL) 907 /* cutoff is taking effect, just 908 return false without calling 909 the delay action. */ 910 return (0); 911 #endif 912 #ifdef BORROW_OFFTIME 913 /* 914 * check if the class can borrow offtime too. 915 * borrow offtime from the top of the borrow 916 * chain if the top class is not overloaded. 917 */ 918 if (cl != NULL) { 919 /* cutoff is taking effect, use this class as top. */ 920 top = cl; 921 CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_); 922 } 923 if (top != NULL && top->avgidle_ == top->minidle_) 924 top = NULL; 925 p->overtime_ = *now; 926 (p->overlimit)(p, top); 927 #else 928 p->overtime_ = *now; 929 (p->overlimit)(p, NULL); 930 #endif 931 return (0); 932 } 933 top = cl; 934 } 935 936 if (cl != p) 937 ifd->borrowed_[ifd->qi_] = cl; 938 return (1); 939 } 940 941 /* 942 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to 943 * Packet-by-packet round robin. 944 * 945 * The heart of the weighted round-robin scheduler, which decides which 946 * class next gets to send a packet. Highest priority first, then 947 * weighted round-robin within priorites. 948 * 949 * Each able-to-send class gets to send until its byte allocation is 950 * exhausted. Thus, the active pointer is only changed after a class has 951 * exhausted its allocation. 952 * 953 * If the scheduler finds no class that is underlimit or able to borrow, 954 * then the first class found that had a nonzero queue and is allowed to 955 * borrow gets to send. 956 */ 957 958 static mbuf_t * 959 _rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op) 960 { 961 struct rm_class *cl = NULL, *first = NULL; 962 u_int deficit; 963 int cpri; 964 mbuf_t *m; 965 struct timeval now; 966 967 RM_GETTIME(now); 968 969 /* 970 * if the driver polls the top of the queue and then removes 971 * the polled packet, we must return the same packet. 972 */ 973 if (op == ALTDQ_REMOVE && ifd->pollcache_) { 974 cl = ifd->pollcache_; 975 cpri = cl->pri_; 976 if (ifd->efficient_) { 977 /* check if this class is overlimit */ 978 if (cl->undertime_.tv_sec != 0 && 979 rmc_under_limit(cl, &now) == 0) 980 first = cl; 981 } 982 ifd->pollcache_ = NULL; 983 goto _wrr_out; 984 } 985 else { 986 /* mode == ALTDQ_POLL || pollcache == NULL */ 987 ifd->pollcache_ = NULL; 988 ifd->borrowed_[ifd->qi_] = NULL; 989 } 990 #ifdef ADJUST_CUTOFF 991 _again: 992 #endif 993 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) { 994 if (ifd->na_[cpri] == 0) 995 continue; 996 deficit = 0; 997 /* 998 * Loop through twice for a priority level, if some class 999 * was unable to send a packet the first round because 1000 * of the weighted round-robin mechanism. 1001 * During the second loop at this level, deficit==2. 1002 * (This second loop is not needed if for every class, 1003 * "M[cl->pri_])" times "cl->allotment" is greater than 1004 * the byte size for the largest packet in the class.) 1005 */ 1006 _wrr_loop: 1007 cl = ifd->active_[cpri]; 1008 ASSERT(cl != NULL); 1009 do { 1010 if ((deficit < 2) && (cl->bytes_alloc_ <= 0)) 1011 cl->bytes_alloc_ += cl->w_allotment_; 1012 if (!qempty(cl->q_)) { 1013 if ((cl->undertime_.tv_sec == 0) || 1014 rmc_under_limit(cl, &now)) { 1015 if (cl->bytes_alloc_ > 0 || deficit > 1) 1016 goto _wrr_out; 1017 1018 /* underlimit but no alloc */ 1019 deficit = 1; 1020 #if 1 1021 ifd->borrowed_[ifd->qi_] = NULL; 1022 #endif 1023 } 1024 else if (first == NULL && cl->borrow_ != NULL) 1025 first = cl; /* borrowing candidate */ 1026 } 1027 1028 cl->bytes_alloc_ = 0; 1029 cl = cl->peer_; 1030 } while (cl != ifd->active_[cpri]); 1031 1032 if (deficit == 1) { 1033 /* first loop found an underlimit class with deficit */ 1034 /* Loop on same priority level, with new deficit. */ 1035 deficit = 2; 1036 goto _wrr_loop; 1037 } 1038 } 1039 1040 #ifdef ADJUST_CUTOFF 1041 /* 1042 * no underlimit class found. if cutoff is taking effect, 1043 * increase cutoff and try again. 1044 */ 1045 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) { 1046 ifd->cutoff_++; 1047 CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_); 1048 goto _again; 1049 } 1050 #endif /* ADJUST_CUTOFF */ 1051 /* 1052 * If LINK_EFFICIENCY is turned on, then the first overlimit 1053 * class we encounter will send a packet if all the classes 1054 * of the link-sharing structure are overlimit. 1055 */ 1056 reset_cutoff(ifd); 1057 CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_); 1058 1059 if (!ifd->efficient_ || first == NULL) 1060 return (NULL); 1061 1062 cl = first; 1063 cpri = cl->pri_; 1064 #if 0 /* too time-consuming for nothing */ 1065 if (cl->sleeping_) 1066 CALLOUT_STOP(&cl->callout_); 1067 cl->sleeping_ = 0; 1068 cl->undertime_.tv_sec = 0; 1069 #endif 1070 ifd->borrowed_[ifd->qi_] = cl->borrow_; 1071 ifd->cutoff_ = cl->borrow_->depth_; 1072 1073 /* 1074 * Deque the packet and do the book keeping... 1075 */ 1076 _wrr_out: 1077 if (op == ALTDQ_REMOVE) { 1078 m = _rmc_getq(cl); 1079 if (m == NULL) 1080 panic("_rmc_wrr_dequeue_next"); 1081 if (qempty(cl->q_)) 1082 ifd->na_[cpri]--; 1083 1084 /* 1085 * Update class statistics and link data. 1086 */ 1087 if (cl->bytes_alloc_ > 0) 1088 cl->bytes_alloc_ -= m_pktlen(m); 1089 1090 if ((cl->bytes_alloc_ <= 0) || first == cl) 1091 ifd->active_[cl->pri_] = cl->peer_; 1092 else 1093 ifd->active_[cl->pri_] = cl; 1094 1095 ifd->class_[ifd->qi_] = cl; 1096 ifd->curlen_[ifd->qi_] = m_pktlen(m); 1097 ifd->now_[ifd->qi_] = now; 1098 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_; 1099 ifd->queued_++; 1100 } else { 1101 /* mode == ALTDQ_PPOLL */ 1102 m = _rmc_pollq(cl); 1103 ifd->pollcache_ = cl; 1104 } 1105 return (m); 1106 } 1107 1108 /* 1109 * Dequeue & return next packet from the highest priority class that 1110 * has a packet to send & has enough allocation to send it. This 1111 * routine is called by a driver whenever it needs a new packet to 1112 * output. 1113 */ 1114 static mbuf_t * 1115 _rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op) 1116 { 1117 mbuf_t *m; 1118 int cpri; 1119 struct rm_class *cl, *first = NULL; 1120 struct timeval now; 1121 1122 RM_GETTIME(now); 1123 1124 /* 1125 * if the driver polls the top of the queue and then removes 1126 * the polled packet, we must return the same packet. 1127 */ 1128 if (op == ALTDQ_REMOVE && ifd->pollcache_) { 1129 cl = ifd->pollcache_; 1130 cpri = cl->pri_; 1131 ifd->pollcache_ = NULL; 1132 goto _prr_out; 1133 } else { 1134 /* mode == ALTDQ_POLL || pollcache == NULL */ 1135 ifd->pollcache_ = NULL; 1136 ifd->borrowed_[ifd->qi_] = NULL; 1137 } 1138 #ifdef ADJUST_CUTOFF 1139 _again: 1140 #endif 1141 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) { 1142 if (ifd->na_[cpri] == 0) 1143 continue; 1144 cl = ifd->active_[cpri]; 1145 ASSERT(cl != NULL); 1146 do { 1147 if (!qempty(cl->q_)) { 1148 if ((cl->undertime_.tv_sec == 0) || 1149 rmc_under_limit(cl, &now)) 1150 goto _prr_out; 1151 if (first == NULL && cl->borrow_ != NULL) 1152 first = cl; 1153 } 1154 cl = cl->peer_; 1155 } while (cl != ifd->active_[cpri]); 1156 } 1157 1158 #ifdef ADJUST_CUTOFF 1159 /* 1160 * no underlimit class found. if cutoff is taking effect, increase 1161 * cutoff and try again. 1162 */ 1163 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) { 1164 ifd->cutoff_++; 1165 goto _again; 1166 } 1167 #endif /* ADJUST_CUTOFF */ 1168 /* 1169 * If LINK_EFFICIENCY is turned on, then the first overlimit 1170 * class we encounter will send a packet if all the classes 1171 * of the link-sharing structure are overlimit. 1172 */ 1173 reset_cutoff(ifd); 1174 if (!ifd->efficient_ || first == NULL) 1175 return (NULL); 1176 1177 cl = first; 1178 cpri = cl->pri_; 1179 #if 0 /* too time-consuming for nothing */ 1180 if (cl->sleeping_) 1181 CALLOUT_STOP(&cl->callout_); 1182 cl->sleeping_ = 0; 1183 cl->undertime_.tv_sec = 0; 1184 #endif 1185 ifd->borrowed_[ifd->qi_] = cl->borrow_; 1186 ifd->cutoff_ = cl->borrow_->depth_; 1187 1188 /* 1189 * Deque the packet and do the book keeping... 1190 */ 1191 _prr_out: 1192 if (op == ALTDQ_REMOVE) { 1193 m = _rmc_getq(cl); 1194 if (m == NULL) 1195 panic("_rmc_prr_dequeue_next"); 1196 if (qempty(cl->q_)) 1197 ifd->na_[cpri]--; 1198 1199 ifd->active_[cpri] = cl->peer_; 1200 1201 ifd->class_[ifd->qi_] = cl; 1202 ifd->curlen_[ifd->qi_] = m_pktlen(m); 1203 ifd->now_[ifd->qi_] = now; 1204 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_; 1205 ifd->queued_++; 1206 } else { 1207 /* mode == ALTDQ_POLL */ 1208 m = _rmc_pollq(cl); 1209 ifd->pollcache_ = cl; 1210 } 1211 return (m); 1212 } 1213 1214 /* 1215 * mbuf_t * 1216 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function 1217 * is invoked by the packet driver to get the next packet to be 1218 * dequeued and output on the link. If WRR is enabled, then the 1219 * WRR dequeue next routine will determine the next packet to sent. 1220 * Otherwise, packet-by-packet round robin is invoked. 1221 * 1222 * Returns: NULL, if a packet is not available or if all 1223 * classes are overlimit. 1224 * 1225 * Otherwise, Pointer to the next packet. 1226 */ 1227 1228 mbuf_t * 1229 rmc_dequeue_next(struct rm_ifdat *ifd, int mode) 1230 { 1231 if (ifd->queued_ >= ifd->maxqueued_) 1232 return (NULL); 1233 else if (ifd->wrr_) 1234 return (_rmc_wrr_dequeue_next(ifd, mode)); 1235 else 1236 return (_rmc_prr_dequeue_next(ifd, mode)); 1237 } 1238 1239 /* 1240 * Update the utilization estimate for the packet that just completed. 1241 * The packet's class & the parent(s) of that class all get their 1242 * estimators updated. This routine is called by the driver's output- 1243 * packet-completion interrupt service routine. 1244 */ 1245 1246 /* 1247 * a macro to approximate "divide by 1000" that gives 0.000999, 1248 * if a value has enough effective digits. 1249 * (on pentium, mul takes 9 cycles but div takes 46!) 1250 */ 1251 #define NSEC_TO_USEC(t) (((t) >> 10) + ((t) >> 16) + ((t) >> 17)) 1252 void 1253 rmc_update_class_util(struct rm_ifdat *ifd) 1254 { 1255 int idle, avgidle, pktlen; 1256 int pkt_time, tidle; 1257 rm_class_t *cl, *borrowed; 1258 rm_class_t *borrows; 1259 struct timeval *nowp; 1260 1261 /* 1262 * Get the most recent completed class. 1263 */ 1264 if ((cl = ifd->class_[ifd->qo_]) == NULL) 1265 return; 1266 1267 pktlen = ifd->curlen_[ifd->qo_]; 1268 borrowed = ifd->borrowed_[ifd->qo_]; 1269 borrows = borrowed; 1270 1271 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen); 1272 1273 /* 1274 * Run estimator on class and its ancestors. 1275 */ 1276 /* 1277 * rm_update_class_util is designed to be called when the 1278 * transfer is completed from a xmit complete interrupt, 1279 * but most drivers don't implement an upcall for that. 1280 * so, just use estimated completion time. 1281 * as a result, ifd->qi_ and ifd->qo_ are always synced. 1282 */ 1283 nowp = &ifd->now_[ifd->qo_]; 1284 /* get pkt_time (for link) in usec */ 1285 #if 1 /* use approximation */ 1286 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_; 1287 pkt_time = NSEC_TO_USEC(pkt_time); 1288 #else 1289 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000; 1290 #endif 1291 #if 1 /* ALTQ4PPP */ 1292 if (TV_LT(nowp, &ifd->ifnow_)) { 1293 int iftime; 1294 1295 /* 1296 * make sure the estimated completion time does not go 1297 * too far. it can happen when the link layer supports 1298 * data compression or the interface speed is set to 1299 * a much lower value. 1300 */ 1301 TV_DELTA(&ifd->ifnow_, nowp, iftime); 1302 if (iftime+pkt_time < ifd->maxiftime_) { 1303 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_); 1304 } else { 1305 TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_); 1306 } 1307 } else { 1308 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_); 1309 } 1310 #else 1311 if (TV_LT(nowp, &ifd->ifnow_)) { 1312 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_); 1313 } else { 1314 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_); 1315 } 1316 #endif 1317 1318 while (cl != NULL) { 1319 TV_DELTA(&ifd->ifnow_, &cl->last_, idle); 1320 if (idle >= 2000000) 1321 /* 1322 * this class is idle enough, reset avgidle. 1323 * (TV_DELTA returns 2000000 us when delta is large.) 1324 */ 1325 cl->avgidle_ = cl->maxidle_; 1326 1327 /* get pkt_time (for class) in usec */ 1328 #if 1 /* use approximation */ 1329 pkt_time = pktlen * cl->ns_per_byte_; 1330 pkt_time = NSEC_TO_USEC(pkt_time); 1331 #else 1332 pkt_time = pktlen * cl->ns_per_byte_ / 1000; 1333 #endif 1334 idle -= pkt_time; 1335 1336 avgidle = cl->avgidle_; 1337 avgidle += idle - (avgidle >> RM_FILTER_GAIN); 1338 cl->avgidle_ = avgidle; 1339 1340 /* Are we overlimit ? */ 1341 if (avgidle <= 0) { 1342 CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle); 1343 #if 1 /* ALTQ */ 1344 /* 1345 * need some lower bound for avgidle, otherwise 1346 * a borrowing class gets unbounded penalty. 1347 */ 1348 if (avgidle < cl->minidle_) 1349 avgidle = cl->avgidle_ = cl->minidle_; 1350 #endif 1351 /* set next idle to make avgidle 0 */ 1352 tidle = pkt_time + 1353 (((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN); 1354 TV_ADD_DELTA(nowp, tidle, &cl->undertime_); 1355 ++cl->stats_.over; 1356 } else { 1357 cl->avgidle_ = 1358 (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle; 1359 cl->undertime_.tv_sec = 0; 1360 if (cl->sleeping_) { 1361 CALLOUT_STOP(&cl->callout_); 1362 cl->sleeping_ = 0; 1363 } 1364 } 1365 1366 if (borrows != NULL) { 1367 if (borrows != cl) 1368 ++cl->stats_.borrows; 1369 else 1370 borrows = NULL; 1371 } 1372 cl->last_ = ifd->ifnow_; 1373 cl->last_pkttime_ = pkt_time; 1374 1375 #if 1 1376 if (cl->parent_ == NULL) { 1377 /* take stats of root class */ 1378 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen); 1379 } 1380 #endif 1381 1382 cl = cl->parent_; 1383 } 1384 1385 /* 1386 * Check to see if cutoff needs to set to a new level. 1387 */ 1388 cl = ifd->class_[ifd->qo_]; 1389 if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) { 1390 #if 1 /* ALTQ */ 1391 if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) { 1392 rmc_tl_satisfied(ifd, nowp); 1393 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_); 1394 } else { 1395 ifd->cutoff_ = borrowed->depth_; 1396 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_); 1397 } 1398 #else /* !ALTQ */ 1399 if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) { 1400 reset_cutoff(ifd); 1401 #ifdef notdef 1402 rmc_tl_satisfied(ifd, &now); 1403 #endif 1404 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_); 1405 } else { 1406 ifd->cutoff_ = borrowed->depth_; 1407 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_); 1408 } 1409 #endif /* !ALTQ */ 1410 } 1411 1412 /* 1413 * Release class slot 1414 */ 1415 ifd->borrowed_[ifd->qo_] = NULL; 1416 ifd->class_[ifd->qo_] = NULL; 1417 ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_; 1418 ifd->queued_--; 1419 } 1420 1421 /* 1422 * void 1423 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific) 1424 * over-limit action routines. These get invoked by rmc_under_limit() 1425 * if a class with packets to send if over its bandwidth limit & can't 1426 * borrow from a parent class. 1427 * 1428 * Returns: NONE 1429 */ 1430 1431 static void 1432 rmc_drop_action(struct rm_class *cl) 1433 { 1434 struct rm_ifdat *ifd = cl->ifdat_; 1435 1436 ASSERT(qlen(cl->q_) > 0); 1437 _rmc_dropq(cl); 1438 if (qempty(cl->q_)) 1439 ifd->na_[cl->pri_]--; 1440 } 1441 1442 void rmc_dropall(struct rm_class *cl) 1443 { 1444 struct rm_ifdat *ifd = cl->ifdat_; 1445 1446 if (!qempty(cl->q_)) { 1447 _flushq(cl->q_); 1448 1449 ifd->na_[cl->pri_]--; 1450 } 1451 } 1452 1453 #if (__FreeBSD_version > 300000) 1454 /* hzto() is removed from FreeBSD-3.0 */ 1455 static int hzto(struct timeval *); 1456 1457 static int 1458 hzto(tv) 1459 struct timeval *tv; 1460 { 1461 struct timeval t2; 1462 1463 getmicrotime(&t2); 1464 t2.tv_sec = tv->tv_sec - t2.tv_sec; 1465 t2.tv_usec = tv->tv_usec - t2.tv_usec; 1466 return (tvtohz(&t2)); 1467 } 1468 #endif /* __FreeBSD_version > 300000 */ 1469 1470 /* 1471 * void 1472 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ 1473 * delay action routine. It is invoked via rmc_under_limit when the 1474 * packet is discoverd to be overlimit. 1475 * 1476 * If the delay action is result of borrow class being overlimit, then 1477 * delay for the offtime of the borrowing class that is overlimit. 1478 * 1479 * Returns: NONE 1480 */ 1481 1482 void 1483 rmc_delay_action(struct rm_class *cl, struct rm_class *borrow) 1484 { 1485 int delay, t, extradelay; 1486 1487 cl->stats_.overactions++; 1488 TV_DELTA(&cl->undertime_, &cl->overtime_, delay); 1489 #ifndef BORROW_OFFTIME 1490 delay += cl->offtime_; 1491 #endif 1492 1493 if (!cl->sleeping_) { 1494 CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle); 1495 #ifdef BORROW_OFFTIME 1496 if (borrow != NULL) 1497 extradelay = borrow->offtime_; 1498 else 1499 #endif 1500 extradelay = cl->offtime_; 1501 1502 #ifdef ALTQ 1503 /* 1504 * XXX recalculate suspend time: 1505 * current undertime is (tidle + pkt_time) calculated 1506 * from the last transmission. 1507 * tidle: time required to bring avgidle back to 0 1508 * pkt_time: target waiting time for this class 1509 * we need to replace pkt_time by offtime 1510 */ 1511 extradelay -= cl->last_pkttime_; 1512 #endif 1513 if (extradelay > 0) { 1514 TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_); 1515 delay += extradelay; 1516 } 1517 1518 cl->sleeping_ = 1; 1519 cl->stats_.delays++; 1520 1521 /* 1522 * Since packets are phased randomly with respect to the 1523 * clock, 1 tick (the next clock tick) can be an arbitrarily 1524 * short time so we have to wait for at least two ticks. 1525 * NOTE: If there's no other traffic, we need the timer as 1526 * a 'backstop' to restart this class. 1527 */ 1528 if (delay > tick * 2) { 1529 /* FreeBSD rounds up the tick */ 1530 t = hzto(&cl->undertime_); 1531 } else 1532 t = 2; 1533 CALLOUT_RESET(&cl->callout_, t, 1534 (timeout_t *)rmc_restart, (caddr_t)cl); 1535 } 1536 } 1537 1538 /* 1539 * void 1540 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is 1541 * called by the system timer code & is responsible checking if the 1542 * class is still sleeping (it might have been restarted as a side 1543 * effect of the queue scan on a packet arrival) and, if so, restarting 1544 * output for the class. Inspecting the class state & restarting output 1545 * require locking the class structure. In general the driver is 1546 * responsible for locking but this is the only routine that is not 1547 * called directly or indirectly from the interface driver so it has 1548 * know about system locking conventions. Under bsd, locking is done 1549 * by raising IPL to splimp so that's what's implemented here. On a 1550 * different system this would probably need to be changed. 1551 * 1552 * Returns: NONE 1553 */ 1554 1555 static void 1556 rmc_restart(struct rm_class *cl) 1557 { 1558 struct rm_ifdat *ifd = cl->ifdat_; 1559 int s; 1560 1561 s = splnet(); 1562 IFQ_LOCK(ifd->ifq_); 1563 if (cl->sleeping_) { 1564 cl->sleeping_ = 0; 1565 cl->undertime_.tv_sec = 0; 1566 1567 if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) { 1568 CBQTRACE(rmc_restart, 'trts', cl->stats_.handle); 1569 (ifd->restart)(ifd->ifq_); 1570 } 1571 } 1572 IFQ_UNLOCK(ifd->ifq_); 1573 splx(s); 1574 } 1575 1576 /* 1577 * void 1578 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit 1579 * handling routine for the root class of the link sharing structure. 1580 * 1581 * Returns: NONE 1582 */ 1583 1584 static void 1585 rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow) 1586 { 1587 panic("rmc_root_overlimit"); 1588 } 1589 1590 /* 1591 * Packet Queue handling routines. Eventually, this is to localize the 1592 * effects on the code whether queues are red queues or droptail 1593 * queues. 1594 */ 1595 1596 static int 1597 _rmc_addq(rm_class_t *cl, mbuf_t *m) 1598 { 1599 #ifdef ALTQ_RIO 1600 if (q_is_rio(cl->q_)) 1601 return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_); 1602 #endif 1603 #ifdef ALTQ_RED 1604 if (q_is_red(cl->q_)) 1605 return red_addq(cl->red_, cl->q_, m, cl->pktattr_); 1606 #endif /* ALTQ_RED */ 1607 #ifdef ALTQ_CODEL 1608 if (q_is_codel(cl->q_)) 1609 return codel_addq(cl->codel_, cl->q_, m); 1610 #endif 1611 1612 if (cl->flags_ & RMCF_CLEARDSCP) 1613 write_dsfield(m, cl->pktattr_, 0); 1614 1615 _addq(cl->q_, m); 1616 return (0); 1617 } 1618 1619 /* note: _rmc_dropq is not called for red */ 1620 static void 1621 _rmc_dropq(rm_class_t *cl) 1622 { 1623 mbuf_t *m; 1624 1625 if ((m = _getq(cl->q_)) != NULL) 1626 m_freem(m); 1627 } 1628 1629 static mbuf_t * 1630 _rmc_getq(rm_class_t *cl) 1631 { 1632 #ifdef ALTQ_RIO 1633 if (q_is_rio(cl->q_)) 1634 return rio_getq((rio_t *)cl->red_, cl->q_); 1635 #endif 1636 #ifdef ALTQ_RED 1637 if (q_is_red(cl->q_)) 1638 return red_getq(cl->red_, cl->q_); 1639 #endif 1640 #ifdef ALTQ_CODEL 1641 if (q_is_codel(cl->q_)) 1642 return codel_getq(cl->codel_, cl->q_); 1643 #endif 1644 return _getq(cl->q_); 1645 } 1646 1647 static mbuf_t * 1648 _rmc_pollq(rm_class_t *cl) 1649 { 1650 return qhead(cl->q_); 1651 } 1652 1653 #ifdef CBQ_TRACE 1654 1655 struct cbqtrace cbqtrace_buffer[NCBQTRACE+1]; 1656 struct cbqtrace *cbqtrace_ptr = NULL; 1657 int cbqtrace_count; 1658 1659 /* 1660 * DDB hook to trace cbq events: 1661 * the last 1024 events are held in a circular buffer. 1662 * use "call cbqtrace_dump(N)" to display 20 events from Nth event. 1663 */ 1664 void cbqtrace_dump(int); 1665 static char *rmc_funcname(void *); 1666 1667 static struct rmc_funcs { 1668 void *func; 1669 char *name; 1670 } rmc_funcs[] = 1671 { 1672 rmc_init, "rmc_init", 1673 rmc_queue_packet, "rmc_queue_packet", 1674 rmc_under_limit, "rmc_under_limit", 1675 rmc_update_class_util, "rmc_update_class_util", 1676 rmc_delay_action, "rmc_delay_action", 1677 rmc_restart, "rmc_restart", 1678 _rmc_wrr_dequeue_next, "_rmc_wrr_dequeue_next", 1679 NULL, NULL 1680 }; 1681 1682 static char *rmc_funcname(void *func) 1683 { 1684 struct rmc_funcs *fp; 1685 1686 for (fp = rmc_funcs; fp->func != NULL; fp++) 1687 if (fp->func == func) 1688 return (fp->name); 1689 return ("unknown"); 1690 } 1691 1692 void cbqtrace_dump(int counter) 1693 { 1694 int i, *p; 1695 char *cp; 1696 1697 counter = counter % NCBQTRACE; 1698 p = (int *)&cbqtrace_buffer[counter]; 1699 1700 for (i=0; i<20; i++) { 1701 printf("[0x%x] ", *p++); 1702 printf("%s: ", rmc_funcname((void *)*p++)); 1703 cp = (char *)p++; 1704 printf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]); 1705 printf("%d\n",*p++); 1706 1707 if (p >= (int *)&cbqtrace_buffer[NCBQTRACE]) 1708 p = (int *)cbqtrace_buffer; 1709 } 1710 } 1711 #endif /* CBQ_TRACE */ 1712 #endif /* ALTQ_CBQ */ 1713 1714 #if defined(ALTQ_CBQ) || defined(ALTQ_RED) || defined(ALTQ_RIO) || \ 1715 defined(ALTQ_HFSC) || defined(ALTQ_PRIQ) || defined(ALTQ_CODEL) 1716 #if !defined(__GNUC__) || defined(ALTQ_DEBUG) 1717 1718 void 1719 _addq(class_queue_t *q, mbuf_t *m) 1720 { 1721 mbuf_t *m0; 1722 1723 if ((m0 = qtail(q)) != NULL) 1724 m->m_nextpkt = m0->m_nextpkt; 1725 else 1726 m0 = m; 1727 m0->m_nextpkt = m; 1728 qtail(q) = m; 1729 qlen(q)++; 1730 } 1731 1732 mbuf_t * 1733 _getq(class_queue_t *q) 1734 { 1735 mbuf_t *m, *m0; 1736 1737 if ((m = qtail(q)) == NULL) 1738 return (NULL); 1739 if ((m0 = m->m_nextpkt) != m) 1740 m->m_nextpkt = m0->m_nextpkt; 1741 else { 1742 ASSERT(qlen(q) == 1); 1743 qtail(q) = NULL; 1744 } 1745 qlen(q)--; 1746 m0->m_nextpkt = NULL; 1747 return (m0); 1748 } 1749 1750 /* drop a packet at the tail of the queue */ 1751 mbuf_t * 1752 _getq_tail(class_queue_t *q) 1753 { 1754 mbuf_t *m, *m0, *prev; 1755 1756 if ((m = m0 = qtail(q)) == NULL) 1757 return NULL; 1758 do { 1759 prev = m0; 1760 m0 = m0->m_nextpkt; 1761 } while (m0 != m); 1762 prev->m_nextpkt = m->m_nextpkt; 1763 if (prev == m) { 1764 ASSERT(qlen(q) == 1); 1765 qtail(q) = NULL; 1766 } else 1767 qtail(q) = prev; 1768 qlen(q)--; 1769 m->m_nextpkt = NULL; 1770 return (m); 1771 } 1772 1773 /* randomly select a packet in the queue */ 1774 mbuf_t * 1775 _getq_random(class_queue_t *q) 1776 { 1777 struct mbuf *m; 1778 int i, n; 1779 1780 if ((m = qtail(q)) == NULL) 1781 return NULL; 1782 if (m->m_nextpkt == m) { 1783 ASSERT(qlen(q) == 1); 1784 qtail(q) = NULL; 1785 } else { 1786 struct mbuf *prev = NULL; 1787 1788 n = arc4random() % qlen(q) + 1; 1789 for (i = 0; i < n; i++) { 1790 prev = m; 1791 m = m->m_nextpkt; 1792 } 1793 prev->m_nextpkt = m->m_nextpkt; 1794 if (m == qtail(q)) 1795 qtail(q) = prev; 1796 } 1797 qlen(q)--; 1798 m->m_nextpkt = NULL; 1799 return (m); 1800 } 1801 1802 void 1803 _removeq(class_queue_t *q, mbuf_t *m) 1804 { 1805 mbuf_t *m0, *prev; 1806 1807 m0 = qtail(q); 1808 do { 1809 prev = m0; 1810 m0 = m0->m_nextpkt; 1811 } while (m0 != m); 1812 prev->m_nextpkt = m->m_nextpkt; 1813 if (prev == m) 1814 qtail(q) = NULL; 1815 else if (qtail(q) == m) 1816 qtail(q) = prev; 1817 qlen(q)--; 1818 } 1819 1820 void 1821 _flushq(class_queue_t *q) 1822 { 1823 mbuf_t *m; 1824 1825 while ((m = _getq(q)) != NULL) 1826 m_freem(m); 1827 ASSERT(qlen(q) == 0); 1828 } 1829 1830 #endif /* !__GNUC__ || ALTQ_DEBUG */ 1831 #endif /* ALTQ_CBQ || ALTQ_RED || ALTQ_RIO || ALTQ_HFSC || ALTQ_PRIQ */ 1832