1 /*- 2 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation is hereby granted (including for commercial or 6 * for-profit use), provided that both the copyright notice and this 7 * permission notice appear in all copies of the software, derivative 8 * works, or modified versions, and any portions thereof. 9 * 10 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 11 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 12 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 13 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 14 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 15 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 16 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 18 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 19 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 20 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 22 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 23 * DAMAGE. 24 * 25 * Carnegie Mellon encourages (but does not require) users of this 26 * software to return any improvements or extensions that they make, 27 * and to grant Carnegie Mellon the rights to redistribute these 28 * changes without encumbrance. 29 * 30 * $KAME: altq_hfsc.c,v 1.24 2003/12/05 05:40:46 kjc Exp $ 31 * $FreeBSD$ 32 */ 33 /* 34 * H-FSC is described in Proceedings of SIGCOMM'97, 35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 36 * Real-Time and Priority Service" 37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 38 * 39 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 40 * when a class has an upperlimit, the fit-time is computed from the 41 * upperlimit service curve. the link-sharing scheduler does not schedule 42 * a class whose fit-time exceeds the current time. 43 */ 44 45 #include "opt_altq.h" 46 #include "opt_inet.h" 47 #include "opt_inet6.h" 48 49 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */ 50 51 #include <sys/param.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/socket.h> 55 #include <sys/systm.h> 56 #include <sys/errno.h> 57 #include <sys/queue.h> 58 #if 1 /* ALTQ3_COMPAT */ 59 #include <sys/sockio.h> 60 #include <sys/proc.h> 61 #include <sys/kernel.h> 62 #endif /* ALTQ3_COMPAT */ 63 64 #include <net/if.h> 65 #include <net/if_var.h> 66 #include <net/if_private.h> 67 #include <netinet/in.h> 68 69 #include <netpfil/pf/pf.h> 70 #include <netpfil/pf/pf_altq.h> 71 #include <netpfil/pf/pf_mtag.h> 72 #include <net/altq/altq.h> 73 #include <net/altq/altq_hfsc.h> 74 75 /* 76 * function prototypes 77 */ 78 static int hfsc_clear_interface(struct hfsc_if *); 79 static int hfsc_request(struct ifaltq *, int, void *); 80 static void hfsc_purge(struct hfsc_if *); 81 static struct hfsc_class *hfsc_class_create(struct hfsc_if *, 82 struct service_curve *, struct service_curve *, struct service_curve *, 83 struct hfsc_class *, int, int, int); 84 static int hfsc_class_destroy(struct hfsc_class *); 85 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *); 86 static int hfsc_enqueue(struct ifaltq *, struct mbuf *, 87 struct altq_pktattr *); 88 static struct mbuf *hfsc_dequeue(struct ifaltq *, int); 89 90 static int hfsc_addq(struct hfsc_class *, struct mbuf *); 91 static struct mbuf *hfsc_getq(struct hfsc_class *); 92 static struct mbuf *hfsc_pollq(struct hfsc_class *); 93 static void hfsc_purgeq(struct hfsc_class *); 94 95 static void update_cfmin(struct hfsc_class *); 96 static void set_active(struct hfsc_class *, int); 97 static void set_passive(struct hfsc_class *); 98 99 static void init_ed(struct hfsc_class *, int); 100 static void update_ed(struct hfsc_class *, int); 101 static void update_d(struct hfsc_class *, int); 102 static void init_vf(struct hfsc_class *, int); 103 static void update_vf(struct hfsc_class *, int, u_int64_t); 104 static void ellist_insert(struct hfsc_class *); 105 static void ellist_remove(struct hfsc_class *); 106 static void ellist_update(struct hfsc_class *); 107 struct hfsc_class *hfsc_get_mindl(struct hfsc_if *, u_int64_t); 108 static void actlist_insert(struct hfsc_class *); 109 static void actlist_remove(struct hfsc_class *); 110 static void actlist_update(struct hfsc_class *); 111 112 static struct hfsc_class *actlist_firstfit(struct hfsc_class *, 113 u_int64_t); 114 115 static __inline u_int64_t seg_x2y(u_int64_t, u_int64_t); 116 static __inline u_int64_t seg_y2x(u_int64_t, u_int64_t); 117 static __inline u_int64_t m2sm(u_int64_t); 118 static __inline u_int64_t m2ism(u_int64_t); 119 static __inline u_int64_t d2dx(u_int); 120 static u_int64_t sm2m(u_int64_t); 121 static u_int dx2d(u_int64_t); 122 123 static void sc2isc(struct service_curve *, struct internal_sc *); 124 static void rtsc_init(struct runtime_sc *, struct internal_sc *, 125 u_int64_t, u_int64_t); 126 static u_int64_t rtsc_y2x(struct runtime_sc *, u_int64_t); 127 static u_int64_t rtsc_x2y(struct runtime_sc *, u_int64_t); 128 static void rtsc_min(struct runtime_sc *, struct internal_sc *, 129 u_int64_t, u_int64_t); 130 131 static void get_class_stats_v0(struct hfsc_classstats_v0 *, 132 struct hfsc_class *); 133 static void get_class_stats_v1(struct hfsc_classstats_v1 *, 134 struct hfsc_class *); 135 static struct hfsc_class *clh_to_clp(struct hfsc_if *, u_int32_t); 136 137 /* 138 * macros 139 */ 140 #define is_a_parent_class(cl) ((cl)->cl_children != NULL) 141 142 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ 143 144 int 145 hfsc_pfattach(struct pf_altq *a) 146 { 147 struct ifnet *ifp; 148 int s, error; 149 150 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL) 151 return (EINVAL); 152 s = splnet(); 153 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc, 154 hfsc_enqueue, hfsc_dequeue, hfsc_request); 155 splx(s); 156 return (error); 157 } 158 159 int 160 hfsc_add_altq(struct ifnet *ifp, struct pf_altq *a) 161 { 162 struct hfsc_if *hif; 163 164 if (ifp == NULL) 165 return (EINVAL); 166 if (!ALTQ_IS_READY(&ifp->if_snd)) 167 return (ENODEV); 168 169 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_NOWAIT | M_ZERO); 170 if (hif == NULL) 171 return (ENOMEM); 172 173 TAILQ_INIT(&hif->hif_eligible); 174 hif->hif_ifq = &ifp->if_snd; 175 176 /* keep the state in pf_altq */ 177 a->altq_disc = hif; 178 179 return (0); 180 } 181 182 int 183 hfsc_remove_altq(struct pf_altq *a) 184 { 185 struct hfsc_if *hif; 186 187 if ((hif = a->altq_disc) == NULL) 188 return (EINVAL); 189 a->altq_disc = NULL; 190 191 (void)hfsc_clear_interface(hif); 192 (void)hfsc_class_destroy(hif->hif_rootclass); 193 194 free(hif, M_DEVBUF); 195 196 return (0); 197 } 198 199 int 200 hfsc_add_queue(struct pf_altq *a) 201 { 202 struct hfsc_if *hif; 203 struct hfsc_class *cl, *parent; 204 struct hfsc_opts_v1 *opts; 205 struct service_curve rtsc, lssc, ulsc; 206 207 if ((hif = a->altq_disc) == NULL) 208 return (EINVAL); 209 210 opts = &a->pq_u.hfsc_opts; 211 212 if (a->parent_qid == HFSC_NULLCLASS_HANDLE && 213 hif->hif_rootclass == NULL) 214 parent = NULL; 215 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL) 216 return (EINVAL); 217 218 if (a->qid == 0) 219 return (EINVAL); 220 221 if (clh_to_clp(hif, a->qid) != NULL) 222 return (EBUSY); 223 224 rtsc.m1 = opts->rtsc_m1; 225 rtsc.d = opts->rtsc_d; 226 rtsc.m2 = opts->rtsc_m2; 227 lssc.m1 = opts->lssc_m1; 228 lssc.d = opts->lssc_d; 229 lssc.m2 = opts->lssc_m2; 230 ulsc.m1 = opts->ulsc_m1; 231 ulsc.d = opts->ulsc_d; 232 ulsc.m2 = opts->ulsc_m2; 233 234 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc, 235 parent, a->qlimit, opts->flags, a->qid); 236 if (cl == NULL) 237 return (ENOMEM); 238 239 return (0); 240 } 241 242 int 243 hfsc_remove_queue(struct pf_altq *a) 244 { 245 struct hfsc_if *hif; 246 struct hfsc_class *cl; 247 248 if ((hif = a->altq_disc) == NULL) 249 return (EINVAL); 250 251 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 252 return (EINVAL); 253 254 return (hfsc_class_destroy(cl)); 255 } 256 257 int 258 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes, int version) 259 { 260 struct hfsc_if *hif; 261 struct hfsc_class *cl; 262 union { 263 struct hfsc_classstats_v0 v0; 264 struct hfsc_classstats_v1 v1; 265 } stats; 266 size_t stats_size; 267 int error = 0; 268 269 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL) 270 return (EBADF); 271 272 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 273 return (EINVAL); 274 275 if (version > HFSC_STATS_VERSION) 276 return (EINVAL); 277 278 memset(&stats, 0, sizeof(stats)); 279 switch (version) { 280 case 0: 281 get_class_stats_v0(&stats.v0, cl); 282 stats_size = sizeof(struct hfsc_classstats_v0); 283 break; 284 case 1: 285 get_class_stats_v1(&stats.v1, cl); 286 stats_size = sizeof(struct hfsc_classstats_v1); 287 break; 288 } 289 290 if (*nbytes < stats_size) 291 return (EINVAL); 292 293 if ((error = copyout((caddr_t)&stats, ubuf, stats_size)) != 0) 294 return (error); 295 *nbytes = stats_size; 296 return (0); 297 } 298 299 /* 300 * bring the interface back to the initial state by discarding 301 * all the filters and classes except the root class. 302 */ 303 static int 304 hfsc_clear_interface(struct hfsc_if *hif) 305 { 306 struct hfsc_class *cl; 307 308 /* clear out the classes */ 309 while (hif->hif_rootclass != NULL && 310 (cl = hif->hif_rootclass->cl_children) != NULL) { 311 /* 312 * remove the first leaf class found in the hierarchy 313 * then start over 314 */ 315 for (; cl != NULL; cl = hfsc_nextclass(cl)) { 316 if (!is_a_parent_class(cl)) { 317 (void)hfsc_class_destroy(cl); 318 break; 319 } 320 } 321 } 322 323 return (0); 324 } 325 326 static int 327 hfsc_request(struct ifaltq *ifq, int req, void *arg) 328 { 329 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 330 331 IFQ_LOCK_ASSERT(ifq); 332 333 switch (req) { 334 case ALTRQ_PURGE: 335 hfsc_purge(hif); 336 break; 337 } 338 return (0); 339 } 340 341 /* discard all the queued packets on the interface */ 342 static void 343 hfsc_purge(struct hfsc_if *hif) 344 { 345 struct hfsc_class *cl; 346 347 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 348 if (!qempty(cl->cl_q)) 349 hfsc_purgeq(cl); 350 if (ALTQ_IS_ENABLED(hif->hif_ifq)) 351 hif->hif_ifq->ifq_len = 0; 352 } 353 354 struct hfsc_class * 355 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc, 356 struct service_curve *fsc, struct service_curve *usc, 357 struct hfsc_class *parent, int qlimit, int flags, int qid) 358 { 359 struct hfsc_class *cl, *p; 360 int i, s; 361 362 if (hif->hif_classes >= HFSC_MAX_CLASSES) 363 return (NULL); 364 365 #ifndef ALTQ_RED 366 if (flags & HFCF_RED) { 367 #ifdef ALTQ_DEBUG 368 printf("hfsc_class_create: RED not configured for HFSC!\n"); 369 #endif 370 return (NULL); 371 } 372 #endif 373 #ifndef ALTQ_CODEL 374 if (flags & HFCF_CODEL) { 375 #ifdef ALTQ_DEBUG 376 printf("hfsc_class_create: CODEL not configured for HFSC!\n"); 377 #endif 378 return (NULL); 379 } 380 #endif 381 382 cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_NOWAIT | M_ZERO); 383 if (cl == NULL) 384 return (NULL); 385 386 cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO); 387 if (cl->cl_q == NULL) 388 goto err_ret; 389 390 TAILQ_INIT(&cl->cl_actc); 391 392 if (qlimit == 0) 393 qlimit = 50; /* use default */ 394 qlimit(cl->cl_q) = qlimit; 395 qtype(cl->cl_q) = Q_DROPTAIL; 396 qlen(cl->cl_q) = 0; 397 qsize(cl->cl_q) = 0; 398 cl->cl_flags = flags; 399 #ifdef ALTQ_RED 400 if (flags & (HFCF_RED|HFCF_RIO)) { 401 int red_flags, red_pkttime; 402 u_int m2; 403 404 m2 = 0; 405 if (rsc != NULL && rsc->m2 > m2) 406 m2 = rsc->m2; 407 if (fsc != NULL && fsc->m2 > m2) 408 m2 = fsc->m2; 409 if (usc != NULL && usc->m2 > m2) 410 m2 = usc->m2; 411 412 red_flags = 0; 413 if (flags & HFCF_ECN) 414 red_flags |= REDF_ECN; 415 #ifdef ALTQ_RIO 416 if (flags & HFCF_CLEARDSCP) 417 red_flags |= RIOF_CLEARDSCP; 418 #endif 419 if (m2 < 8) 420 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ 421 else 422 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu 423 * 1000 * 1000 * 1000 / (m2 / 8); 424 if (flags & HFCF_RED) { 425 cl->cl_red = red_alloc(0, 0, 426 qlimit(cl->cl_q) * 10/100, 427 qlimit(cl->cl_q) * 30/100, 428 red_flags, red_pkttime); 429 if (cl->cl_red != NULL) 430 qtype(cl->cl_q) = Q_RED; 431 } 432 #ifdef ALTQ_RIO 433 else { 434 cl->cl_red = (red_t *)rio_alloc(0, NULL, 435 red_flags, red_pkttime); 436 if (cl->cl_red != NULL) 437 qtype(cl->cl_q) = Q_RIO; 438 } 439 #endif 440 } 441 #endif /* ALTQ_RED */ 442 #ifdef ALTQ_CODEL 443 if (flags & HFCF_CODEL) { 444 cl->cl_codel = codel_alloc(5, 100, 0); 445 if (cl->cl_codel != NULL) 446 qtype(cl->cl_q) = Q_CODEL; 447 } 448 #endif 449 450 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) { 451 cl->cl_rsc = malloc(sizeof(struct internal_sc), 452 M_DEVBUF, M_NOWAIT); 453 if (cl->cl_rsc == NULL) 454 goto err_ret; 455 sc2isc(rsc, cl->cl_rsc); 456 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0); 457 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0); 458 } 459 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) { 460 cl->cl_fsc = malloc(sizeof(struct internal_sc), 461 M_DEVBUF, M_NOWAIT); 462 if (cl->cl_fsc == NULL) 463 goto err_ret; 464 sc2isc(fsc, cl->cl_fsc); 465 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0); 466 } 467 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) { 468 cl->cl_usc = malloc(sizeof(struct internal_sc), 469 M_DEVBUF, M_NOWAIT); 470 if (cl->cl_usc == NULL) 471 goto err_ret; 472 sc2isc(usc, cl->cl_usc); 473 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0); 474 } 475 476 cl->cl_id = hif->hif_classid++; 477 cl->cl_handle = qid; 478 cl->cl_hif = hif; 479 cl->cl_parent = parent; 480 481 s = splnet(); 482 IFQ_LOCK(hif->hif_ifq); 483 hif->hif_classes++; 484 485 /* 486 * find a free slot in the class table. if the slot matching 487 * the lower bits of qid is free, use this slot. otherwise, 488 * use the first free slot. 489 */ 490 i = qid % HFSC_MAX_CLASSES; 491 if (hif->hif_class_tbl[i] == NULL) 492 hif->hif_class_tbl[i] = cl; 493 else { 494 for (i = 0; i < HFSC_MAX_CLASSES; i++) 495 if (hif->hif_class_tbl[i] == NULL) { 496 hif->hif_class_tbl[i] = cl; 497 break; 498 } 499 if (i == HFSC_MAX_CLASSES) { 500 IFQ_UNLOCK(hif->hif_ifq); 501 splx(s); 502 goto err_ret; 503 } 504 } 505 cl->cl_slot = i; 506 507 if (flags & HFCF_DEFAULTCLASS) 508 hif->hif_defaultclass = cl; 509 510 if (parent == NULL) { 511 /* this is root class */ 512 hif->hif_rootclass = cl; 513 } else { 514 /* add this class to the children list of the parent */ 515 if ((p = parent->cl_children) == NULL) 516 parent->cl_children = cl; 517 else { 518 /* Put new class at beginning of list */ 519 cl->cl_siblings = parent->cl_children; 520 parent->cl_children = cl; 521 } 522 } 523 IFQ_UNLOCK(hif->hif_ifq); 524 splx(s); 525 526 return (cl); 527 528 err_ret: 529 if (cl->cl_red != NULL) { 530 #ifdef ALTQ_RIO 531 if (q_is_rio(cl->cl_q)) 532 rio_destroy((rio_t *)cl->cl_red); 533 #endif 534 #ifdef ALTQ_RED 535 if (q_is_red(cl->cl_q)) 536 red_destroy(cl->cl_red); 537 #endif 538 #ifdef ALTQ_CODEL 539 if (q_is_codel(cl->cl_q)) 540 codel_destroy(cl->cl_codel); 541 #endif 542 } 543 if (cl->cl_fsc != NULL) 544 free(cl->cl_fsc, M_DEVBUF); 545 if (cl->cl_rsc != NULL) 546 free(cl->cl_rsc, M_DEVBUF); 547 if (cl->cl_usc != NULL) 548 free(cl->cl_usc, M_DEVBUF); 549 if (cl->cl_q != NULL) 550 free(cl->cl_q, M_DEVBUF); 551 free(cl, M_DEVBUF); 552 return (NULL); 553 } 554 555 static int 556 hfsc_class_destroy(struct hfsc_class *cl) 557 { 558 int s; 559 560 if (cl == NULL) 561 return (0); 562 563 if (is_a_parent_class(cl)) 564 return (EBUSY); 565 566 s = splnet(); 567 IFQ_LOCK(cl->cl_hif->hif_ifq); 568 569 if (!qempty(cl->cl_q)) 570 hfsc_purgeq(cl); 571 572 if (cl->cl_parent == NULL) { 573 /* this is root class */ 574 } else { 575 struct hfsc_class *p = cl->cl_parent->cl_children; 576 577 if (p == cl) 578 cl->cl_parent->cl_children = cl->cl_siblings; 579 else do { 580 if (p->cl_siblings == cl) { 581 p->cl_siblings = cl->cl_siblings; 582 break; 583 } 584 } while ((p = p->cl_siblings) != NULL); 585 ASSERT(p != NULL); 586 } 587 588 cl->cl_hif->hif_class_tbl[cl->cl_slot] = NULL; 589 cl->cl_hif->hif_classes--; 590 IFQ_UNLOCK(cl->cl_hif->hif_ifq); 591 splx(s); 592 593 if (cl->cl_red != NULL) { 594 #ifdef ALTQ_RIO 595 if (q_is_rio(cl->cl_q)) 596 rio_destroy((rio_t *)cl->cl_red); 597 #endif 598 #ifdef ALTQ_RED 599 if (q_is_red(cl->cl_q)) 600 red_destroy(cl->cl_red); 601 #endif 602 #ifdef ALTQ_CODEL 603 if (q_is_codel(cl->cl_q)) 604 codel_destroy(cl->cl_codel); 605 #endif 606 } 607 608 IFQ_LOCK(cl->cl_hif->hif_ifq); 609 if (cl == cl->cl_hif->hif_rootclass) 610 cl->cl_hif->hif_rootclass = NULL; 611 if (cl == cl->cl_hif->hif_defaultclass) 612 cl->cl_hif->hif_defaultclass = NULL; 613 IFQ_UNLOCK(cl->cl_hif->hif_ifq); 614 615 if (cl->cl_usc != NULL) 616 free(cl->cl_usc, M_DEVBUF); 617 if (cl->cl_fsc != NULL) 618 free(cl->cl_fsc, M_DEVBUF); 619 if (cl->cl_rsc != NULL) 620 free(cl->cl_rsc, M_DEVBUF); 621 free(cl->cl_q, M_DEVBUF); 622 free(cl, M_DEVBUF); 623 624 return (0); 625 } 626 627 /* 628 * hfsc_nextclass returns the next class in the tree. 629 * usage: 630 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 631 * do_something; 632 */ 633 static struct hfsc_class * 634 hfsc_nextclass(struct hfsc_class *cl) 635 { 636 if (cl->cl_children != NULL) 637 cl = cl->cl_children; 638 else if (cl->cl_siblings != NULL) 639 cl = cl->cl_siblings; 640 else { 641 while ((cl = cl->cl_parent) != NULL) 642 if (cl->cl_siblings) { 643 cl = cl->cl_siblings; 644 break; 645 } 646 } 647 648 return (cl); 649 } 650 651 /* 652 * hfsc_enqueue is an enqueue function to be registered to 653 * (*altq_enqueue) in struct ifaltq. 654 */ 655 static int 656 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr) 657 { 658 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 659 struct hfsc_class *cl; 660 struct pf_mtag *t; 661 int len; 662 663 IFQ_LOCK_ASSERT(ifq); 664 665 /* grab class set by classifier */ 666 if ((m->m_flags & M_PKTHDR) == 0) { 667 /* should not happen */ 668 printf("altq: packet for %s does not have pkthdr\n", 669 ifq->altq_ifp->if_xname); 670 m_freem(m); 671 return (ENOBUFS); 672 } 673 cl = NULL; 674 if ((t = pf_find_mtag(m)) != NULL) 675 cl = clh_to_clp(hif, t->qid); 676 if (cl == NULL || is_a_parent_class(cl)) { 677 cl = hif->hif_defaultclass; 678 if (cl == NULL) { 679 m_freem(m); 680 return (ENOBUFS); 681 } 682 } 683 cl->cl_pktattr = NULL; 684 len = m_pktlen(m); 685 if (hfsc_addq(cl, m) != 0) { 686 /* drop occurred. mbuf was freed in hfsc_addq. */ 687 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len); 688 return (ENOBUFS); 689 } 690 IFQ_INC_LEN(ifq); 691 cl->cl_hif->hif_packets++; 692 693 /* successfully queued. */ 694 if (qlen(cl->cl_q) == 1) 695 set_active(cl, m_pktlen(m)); 696 697 return (0); 698 } 699 700 /* 701 * hfsc_dequeue is a dequeue function to be registered to 702 * (*altq_dequeue) in struct ifaltq. 703 * 704 * note: ALTDQ_POLL returns the next packet without removing the packet 705 * from the queue. ALTDQ_REMOVE is a normal dequeue operation. 706 * ALTDQ_REMOVE must return the same packet if called immediately 707 * after ALTDQ_POLL. 708 */ 709 static struct mbuf * 710 hfsc_dequeue(struct ifaltq *ifq, int op) 711 { 712 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 713 struct hfsc_class *cl; 714 struct mbuf *m; 715 int len, next_len; 716 int realtime = 0; 717 u_int64_t cur_time; 718 719 IFQ_LOCK_ASSERT(ifq); 720 721 if (hif->hif_packets == 0) 722 /* no packet in the tree */ 723 return (NULL); 724 725 cur_time = read_machclk(); 726 727 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) { 728 cl = hif->hif_pollcache; 729 hif->hif_pollcache = NULL; 730 /* check if the class was scheduled by real-time criteria */ 731 if (cl->cl_rsc != NULL) 732 realtime = (cl->cl_e <= cur_time); 733 } else { 734 /* 735 * if there are eligible classes, use real-time criteria. 736 * find the class with the minimum deadline among 737 * the eligible classes. 738 */ 739 if ((cl = hfsc_get_mindl(hif, cur_time)) 740 != NULL) { 741 realtime = 1; 742 } else { 743 #ifdef ALTQ_DEBUG 744 int fits = 0; 745 #endif 746 /* 747 * use link-sharing criteria 748 * get the class with the minimum vt in the hierarchy 749 */ 750 cl = hif->hif_rootclass; 751 while (is_a_parent_class(cl)) { 752 cl = actlist_firstfit(cl, cur_time); 753 if (cl == NULL) { 754 #ifdef ALTQ_DEBUG 755 if (fits > 0) 756 printf("%d fit but none found\n",fits); 757 #endif 758 return (NULL); 759 } 760 /* 761 * update parent's cl_cvtmin. 762 * don't update if the new vt is smaller. 763 */ 764 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 765 cl->cl_parent->cl_cvtmin = cl->cl_vt; 766 #ifdef ALTQ_DEBUG 767 fits++; 768 #endif 769 } 770 } 771 772 if (op == ALTDQ_POLL) { 773 hif->hif_pollcache = cl; 774 m = hfsc_pollq(cl); 775 return (m); 776 } 777 } 778 779 m = hfsc_getq(cl); 780 if (m == NULL) 781 panic("hfsc_dequeue:"); 782 len = m_pktlen(m); 783 cl->cl_hif->hif_packets--; 784 IFQ_DEC_LEN(ifq); 785 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len); 786 787 update_vf(cl, len, cur_time); 788 if (realtime) 789 cl->cl_cumul += len; 790 791 if (!qempty(cl->cl_q)) { 792 if (cl->cl_rsc != NULL) { 793 /* update ed */ 794 next_len = m_pktlen(qhead(cl->cl_q)); 795 796 if (realtime) 797 update_ed(cl, next_len); 798 else 799 update_d(cl, next_len); 800 } 801 } else { 802 /* the class becomes passive */ 803 set_passive(cl); 804 } 805 806 return (m); 807 } 808 809 static int 810 hfsc_addq(struct hfsc_class *cl, struct mbuf *m) 811 { 812 813 #ifdef ALTQ_RIO 814 if (q_is_rio(cl->cl_q)) 815 return rio_addq((rio_t *)cl->cl_red, cl->cl_q, 816 m, cl->cl_pktattr); 817 #endif 818 #ifdef ALTQ_RED 819 if (q_is_red(cl->cl_q)) 820 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr); 821 #endif 822 #ifdef ALTQ_CODEL 823 if (q_is_codel(cl->cl_q)) 824 return codel_addq(cl->cl_codel, cl->cl_q, m); 825 #endif 826 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) { 827 m_freem(m); 828 return (-1); 829 } 830 831 if (cl->cl_flags & HFCF_CLEARDSCP) 832 write_dsfield(m, cl->cl_pktattr, 0); 833 834 _addq(cl->cl_q, m); 835 836 return (0); 837 } 838 839 static struct mbuf * 840 hfsc_getq(struct hfsc_class *cl) 841 { 842 #ifdef ALTQ_RIO 843 if (q_is_rio(cl->cl_q)) 844 return rio_getq((rio_t *)cl->cl_red, cl->cl_q); 845 #endif 846 #ifdef ALTQ_RED 847 if (q_is_red(cl->cl_q)) 848 return red_getq(cl->cl_red, cl->cl_q); 849 #endif 850 #ifdef ALTQ_CODEL 851 if (q_is_codel(cl->cl_q)) 852 return codel_getq(cl->cl_codel, cl->cl_q); 853 #endif 854 return _getq(cl->cl_q); 855 } 856 857 static struct mbuf * 858 hfsc_pollq(struct hfsc_class *cl) 859 { 860 return qhead(cl->cl_q); 861 } 862 863 static void 864 hfsc_purgeq(struct hfsc_class *cl) 865 { 866 struct mbuf *m; 867 868 if (qempty(cl->cl_q)) 869 return; 870 871 while ((m = _getq(cl->cl_q)) != NULL) { 872 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m)); 873 m_freem(m); 874 cl->cl_hif->hif_packets--; 875 IFQ_DEC_LEN(cl->cl_hif->hif_ifq); 876 } 877 ASSERT(qlen(cl->cl_q) == 0); 878 879 update_vf(cl, 0, 0); /* remove cl from the actlist */ 880 set_passive(cl); 881 } 882 883 static void 884 set_active(struct hfsc_class *cl, int len) 885 { 886 if (cl->cl_rsc != NULL) 887 init_ed(cl, len); 888 if (cl->cl_fsc != NULL) 889 init_vf(cl, len); 890 891 cl->cl_stats.period++; 892 } 893 894 static void 895 set_passive(struct hfsc_class *cl) 896 { 897 if (cl->cl_rsc != NULL) 898 ellist_remove(cl); 899 900 /* 901 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0) 902 * needs to be called explicitly to remove a class from actlist 903 */ 904 } 905 906 static void 907 init_ed(struct hfsc_class *cl, int next_len) 908 { 909 u_int64_t cur_time; 910 911 cur_time = read_machclk(); 912 913 /* update the deadline curve */ 914 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul); 915 916 /* 917 * update the eligible curve. 918 * for concave, it is equal to the deadline curve. 919 * for convex, it is a linear curve with slope m2. 920 */ 921 cl->cl_eligible = cl->cl_deadline; 922 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 923 cl->cl_eligible.dx = 0; 924 cl->cl_eligible.dy = 0; 925 } 926 927 /* compute e and d */ 928 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 929 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 930 931 ellist_insert(cl); 932 } 933 934 static void 935 update_ed(struct hfsc_class *cl, int next_len) 936 { 937 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 938 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 939 940 ellist_update(cl); 941 } 942 943 static void 944 update_d(struct hfsc_class *cl, int next_len) 945 { 946 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 947 } 948 949 static void 950 init_vf(struct hfsc_class *cl, int len) 951 { 952 struct hfsc_class *max_cl, *p; 953 u_int64_t vt, f, cur_time; 954 int go_active; 955 956 cur_time = 0; 957 go_active = 1; 958 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) { 959 if (go_active && cl->cl_nactive++ == 0) 960 go_active = 1; 961 else 962 go_active = 0; 963 964 if (go_active) { 965 max_cl = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead); 966 if (max_cl != NULL) { 967 /* 968 * set vt to the average of the min and max 969 * classes. if the parent's period didn't 970 * change, don't decrease vt of the class. 971 */ 972 vt = max_cl->cl_vt; 973 if (cl->cl_parent->cl_cvtmin != 0) 974 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 975 976 if (cl->cl_parent->cl_vtperiod != 977 cl->cl_parentperiod || vt > cl->cl_vt) 978 cl->cl_vt = vt; 979 } else { 980 /* 981 * first child for a new parent backlog period. 982 * add parent's cvtmax to vtoff of children 983 * to make a new vt (vtoff + vt) larger than 984 * the vt in the last period for all children. 985 */ 986 vt = cl->cl_parent->cl_cvtmax; 987 for (p = cl->cl_parent->cl_children; p != NULL; 988 p = p->cl_siblings) 989 p->cl_vtoff += vt; 990 cl->cl_vt = 0; 991 cl->cl_parent->cl_cvtmax = 0; 992 cl->cl_parent->cl_cvtmin = 0; 993 } 994 cl->cl_initvt = cl->cl_vt; 995 996 /* update the virtual curve */ 997 vt = cl->cl_vt + cl->cl_vtoff; 998 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total); 999 if (cl->cl_virtual.x == vt) { 1000 cl->cl_virtual.x -= cl->cl_vtoff; 1001 cl->cl_vtoff = 0; 1002 } 1003 cl->cl_vtadj = 0; 1004 1005 cl->cl_vtperiod++; /* increment vt period */ 1006 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 1007 if (cl->cl_parent->cl_nactive == 0) 1008 cl->cl_parentperiod++; 1009 cl->cl_f = 0; 1010 1011 actlist_insert(cl); 1012 1013 if (cl->cl_usc != NULL) { 1014 /* class has upper limit curve */ 1015 if (cur_time == 0) 1016 cur_time = read_machclk(); 1017 1018 /* update the ulimit curve */ 1019 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time, 1020 cl->cl_total); 1021 /* compute myf */ 1022 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 1023 cl->cl_total); 1024 cl->cl_myfadj = 0; 1025 } 1026 } 1027 1028 if (cl->cl_myf > cl->cl_cfmin) 1029 f = cl->cl_myf; 1030 else 1031 f = cl->cl_cfmin; 1032 if (f != cl->cl_f) { 1033 cl->cl_f = f; 1034 update_cfmin(cl->cl_parent); 1035 } 1036 } 1037 } 1038 1039 static void 1040 update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time) 1041 { 1042 u_int64_t f, myf_bound, delta; 1043 int go_passive; 1044 1045 go_passive = qempty(cl->cl_q); 1046 1047 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 1048 cl->cl_total += len; 1049 1050 if (cl->cl_fsc == NULL || cl->cl_nactive == 0) 1051 continue; 1052 1053 if (go_passive && --cl->cl_nactive == 0) 1054 go_passive = 1; 1055 else 1056 go_passive = 0; 1057 1058 if (go_passive) { 1059 /* no more active child, going passive */ 1060 1061 /* update cvtmax of the parent class */ 1062 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 1063 cl->cl_parent->cl_cvtmax = cl->cl_vt; 1064 1065 /* remove this class from the vt list */ 1066 actlist_remove(cl); 1067 1068 update_cfmin(cl->cl_parent); 1069 1070 continue; 1071 } 1072 1073 /* 1074 * update vt and f 1075 */ 1076 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 1077 - cl->cl_vtoff + cl->cl_vtadj; 1078 1079 /* 1080 * if vt of the class is smaller than cvtmin, 1081 * the class was skipped in the past due to non-fit. 1082 * if so, we need to adjust vtadj. 1083 */ 1084 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 1085 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 1086 cl->cl_vt = cl->cl_parent->cl_cvtmin; 1087 } 1088 1089 /* update the vt list */ 1090 actlist_update(cl); 1091 1092 if (cl->cl_usc != NULL) { 1093 cl->cl_myf = cl->cl_myfadj 1094 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total); 1095 1096 /* 1097 * if myf lags behind by more than one clock tick 1098 * from the current time, adjust myfadj to prevent 1099 * a rate-limited class from going greedy. 1100 * in a steady state under rate-limiting, myf 1101 * fluctuates within one clock tick. 1102 */ 1103 myf_bound = cur_time - machclk_per_tick; 1104 if (cl->cl_myf < myf_bound) { 1105 delta = cur_time - cl->cl_myf; 1106 cl->cl_myfadj += delta; 1107 cl->cl_myf += delta; 1108 } 1109 } 1110 1111 /* cl_f is max(cl_myf, cl_cfmin) */ 1112 if (cl->cl_myf > cl->cl_cfmin) 1113 f = cl->cl_myf; 1114 else 1115 f = cl->cl_cfmin; 1116 if (f != cl->cl_f) { 1117 cl->cl_f = f; 1118 update_cfmin(cl->cl_parent); 1119 } 1120 } 1121 } 1122 1123 static void 1124 update_cfmin(struct hfsc_class *cl) 1125 { 1126 struct hfsc_class *p; 1127 u_int64_t cfmin; 1128 1129 if (TAILQ_EMPTY(&cl->cl_actc)) { 1130 cl->cl_cfmin = 0; 1131 return; 1132 } 1133 cfmin = HT_INFINITY; 1134 TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) { 1135 if (p->cl_f == 0) { 1136 cl->cl_cfmin = 0; 1137 return; 1138 } 1139 if (p->cl_f < cfmin) 1140 cfmin = p->cl_f; 1141 } 1142 cl->cl_cfmin = cfmin; 1143 } 1144 1145 /* 1146 * TAILQ based ellist and actlist implementation 1147 * (ion wanted to make a calendar queue based implementation) 1148 */ 1149 /* 1150 * eligible list holds backlogged classes being sorted by their eligible times. 1151 * there is one eligible list per interface. 1152 */ 1153 1154 static void 1155 ellist_insert(struct hfsc_class *cl) 1156 { 1157 struct hfsc_if *hif = cl->cl_hif; 1158 struct hfsc_class *p; 1159 1160 /* check the last entry first */ 1161 if ((p = TAILQ_LAST(&hif->hif_eligible, elighead)) == NULL || 1162 p->cl_e <= cl->cl_e) { 1163 TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist); 1164 return; 1165 } 1166 1167 TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) { 1168 if (cl->cl_e < p->cl_e) { 1169 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1170 return; 1171 } 1172 } 1173 ASSERT(0); /* should not reach here */ 1174 } 1175 1176 static void 1177 ellist_remove(struct hfsc_class *cl) 1178 { 1179 struct hfsc_if *hif = cl->cl_hif; 1180 1181 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist); 1182 } 1183 1184 static void 1185 ellist_update(struct hfsc_class *cl) 1186 { 1187 struct hfsc_if *hif = cl->cl_hif; 1188 struct hfsc_class *p, *last; 1189 1190 /* 1191 * the eligible time of a class increases monotonically. 1192 * if the next entry has a larger eligible time, nothing to do. 1193 */ 1194 p = TAILQ_NEXT(cl, cl_ellist); 1195 if (p == NULL || cl->cl_e <= p->cl_e) 1196 return; 1197 1198 /* check the last entry */ 1199 last = TAILQ_LAST(&hif->hif_eligible, elighead); 1200 ASSERT(last != NULL); 1201 if (last->cl_e <= cl->cl_e) { 1202 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist); 1203 TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist); 1204 return; 1205 } 1206 1207 /* 1208 * the new position must be between the next entry 1209 * and the last entry 1210 */ 1211 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) { 1212 if (cl->cl_e < p->cl_e) { 1213 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist); 1214 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1215 return; 1216 } 1217 } 1218 ASSERT(0); /* should not reach here */ 1219 } 1220 1221 /* find the class with the minimum deadline among the eligible classes */ 1222 struct hfsc_class * 1223 hfsc_get_mindl(struct hfsc_if *hif, u_int64_t cur_time) 1224 { 1225 struct hfsc_class *p, *cl = NULL; 1226 1227 TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) { 1228 if (p->cl_e > cur_time) 1229 break; 1230 if (cl == NULL || p->cl_d < cl->cl_d) 1231 cl = p; 1232 } 1233 return (cl); 1234 } 1235 1236 /* 1237 * active children list holds backlogged child classes being sorted 1238 * by their virtual time. 1239 * each intermediate class has one active children list. 1240 */ 1241 1242 static void 1243 actlist_insert(struct hfsc_class *cl) 1244 { 1245 struct hfsc_class *p; 1246 1247 /* check the last entry first */ 1248 if ((p = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead)) == NULL 1249 || p->cl_vt <= cl->cl_vt) { 1250 TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist); 1251 return; 1252 } 1253 1254 TAILQ_FOREACH(p, &cl->cl_parent->cl_actc, cl_actlist) { 1255 if (cl->cl_vt < p->cl_vt) { 1256 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1257 return; 1258 } 1259 } 1260 ASSERT(0); /* should not reach here */ 1261 } 1262 1263 static void 1264 actlist_remove(struct hfsc_class *cl) 1265 { 1266 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist); 1267 } 1268 1269 static void 1270 actlist_update(struct hfsc_class *cl) 1271 { 1272 struct hfsc_class *p, *last; 1273 1274 /* 1275 * the virtual time of a class increases monotonically during its 1276 * backlogged period. 1277 * if the next entry has a larger virtual time, nothing to do. 1278 */ 1279 p = TAILQ_NEXT(cl, cl_actlist); 1280 if (p == NULL || cl->cl_vt < p->cl_vt) 1281 return; 1282 1283 /* check the last entry */ 1284 last = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead); 1285 ASSERT(last != NULL); 1286 if (last->cl_vt <= cl->cl_vt) { 1287 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist); 1288 TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist); 1289 return; 1290 } 1291 1292 /* 1293 * the new position must be between the next entry 1294 * and the last entry 1295 */ 1296 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) { 1297 if (cl->cl_vt < p->cl_vt) { 1298 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist); 1299 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1300 return; 1301 } 1302 } 1303 ASSERT(0); /* should not reach here */ 1304 } 1305 1306 static struct hfsc_class * 1307 actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time) 1308 { 1309 struct hfsc_class *p; 1310 1311 TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) { 1312 if (p->cl_f <= cur_time) 1313 return (p); 1314 } 1315 return (NULL); 1316 } 1317 1318 /* 1319 * service curve support functions 1320 * 1321 * external service curve parameters 1322 * m: bits/sec 1323 * d: msec 1324 * internal service curve parameters 1325 * sm: (bytes/machclk tick) << SM_SHIFT 1326 * ism: (machclk ticks/byte) << ISM_SHIFT 1327 * dx: machclk ticks 1328 * 1329 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits. we 1330 * should be able to handle 100K-100Gbps linkspeed with 256 MHz machclk 1331 * frequency and at least 3 effective digits in decimal. 1332 * 1333 */ 1334 #define SM_SHIFT 24 1335 #define ISM_SHIFT 14 1336 1337 #define SM_MASK ((1LL << SM_SHIFT) - 1) 1338 #define ISM_MASK ((1LL << ISM_SHIFT) - 1) 1339 1340 static __inline u_int64_t 1341 seg_x2y(u_int64_t x, u_int64_t sm) 1342 { 1343 u_int64_t y; 1344 1345 /* 1346 * compute 1347 * y = x * sm >> SM_SHIFT 1348 * but divide it for the upper and lower bits to avoid overflow 1349 */ 1350 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 1351 return (y); 1352 } 1353 1354 static __inline u_int64_t 1355 seg_y2x(u_int64_t y, u_int64_t ism) 1356 { 1357 u_int64_t x; 1358 1359 if (y == 0) 1360 x = 0; 1361 else if (ism == HT_INFINITY) 1362 x = HT_INFINITY; 1363 else { 1364 x = (y >> ISM_SHIFT) * ism 1365 + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 1366 } 1367 return (x); 1368 } 1369 1370 static __inline u_int64_t 1371 m2sm(u_int64_t m) 1372 { 1373 u_int64_t sm; 1374 1375 sm = (m << SM_SHIFT) / 8 / machclk_freq; 1376 return (sm); 1377 } 1378 1379 static __inline u_int64_t 1380 m2ism(u_int64_t m) 1381 { 1382 u_int64_t ism; 1383 1384 if (m == 0) 1385 ism = HT_INFINITY; 1386 else 1387 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m; 1388 return (ism); 1389 } 1390 1391 static __inline u_int64_t 1392 d2dx(u_int d) 1393 { 1394 u_int64_t dx; 1395 1396 dx = ((u_int64_t)d * machclk_freq) / 1000; 1397 return (dx); 1398 } 1399 1400 static u_int64_t 1401 sm2m(u_int64_t sm) 1402 { 1403 u_int64_t m; 1404 1405 m = (sm * 8 * machclk_freq) >> SM_SHIFT; 1406 return (m); 1407 } 1408 1409 static u_int 1410 dx2d(u_int64_t dx) 1411 { 1412 u_int64_t d; 1413 1414 d = dx * 1000 / machclk_freq; 1415 return ((u_int)d); 1416 } 1417 1418 static void 1419 sc2isc(struct service_curve *sc, struct internal_sc *isc) 1420 { 1421 isc->sm1 = m2sm(sc->m1); 1422 isc->ism1 = m2ism(sc->m1); 1423 isc->dx = d2dx(sc->d); 1424 isc->dy = seg_x2y(isc->dx, isc->sm1); 1425 isc->sm2 = m2sm(sc->m2); 1426 isc->ism2 = m2ism(sc->m2); 1427 } 1428 1429 /* 1430 * initialize the runtime service curve with the given internal 1431 * service curve starting at (x, y). 1432 */ 1433 static void 1434 rtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x, 1435 u_int64_t y) 1436 { 1437 rtsc->x = x; 1438 rtsc->y = y; 1439 rtsc->sm1 = isc->sm1; 1440 rtsc->ism1 = isc->ism1; 1441 rtsc->dx = isc->dx; 1442 rtsc->dy = isc->dy; 1443 rtsc->sm2 = isc->sm2; 1444 rtsc->ism2 = isc->ism2; 1445 } 1446 1447 /* 1448 * calculate the y-projection of the runtime service curve by the 1449 * given x-projection value 1450 */ 1451 static u_int64_t 1452 rtsc_y2x(struct runtime_sc *rtsc, u_int64_t y) 1453 { 1454 u_int64_t x; 1455 1456 if (y < rtsc->y) 1457 x = rtsc->x; 1458 else if (y <= rtsc->y + rtsc->dy) { 1459 /* x belongs to the 1st segment */ 1460 if (rtsc->dy == 0) 1461 x = rtsc->x + rtsc->dx; 1462 else 1463 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 1464 } else { 1465 /* x belongs to the 2nd segment */ 1466 x = rtsc->x + rtsc->dx 1467 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 1468 } 1469 return (x); 1470 } 1471 1472 static u_int64_t 1473 rtsc_x2y(struct runtime_sc *rtsc, u_int64_t x) 1474 { 1475 u_int64_t y; 1476 1477 if (x <= rtsc->x) 1478 y = rtsc->y; 1479 else if (x <= rtsc->x + rtsc->dx) 1480 /* y belongs to the 1st segment */ 1481 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 1482 else 1483 /* y belongs to the 2nd segment */ 1484 y = rtsc->y + rtsc->dy 1485 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 1486 return (y); 1487 } 1488 1489 /* 1490 * update the runtime service curve by taking the minimum of the current 1491 * runtime service curve and the service curve starting at (x, y). 1492 */ 1493 static void 1494 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x, 1495 u_int64_t y) 1496 { 1497 u_int64_t y1, y2, dx, dy; 1498 1499 if (isc->sm1 <= isc->sm2) { 1500 /* service curve is convex */ 1501 y1 = rtsc_x2y(rtsc, x); 1502 if (y1 < y) 1503 /* the current rtsc is smaller */ 1504 return; 1505 rtsc->x = x; 1506 rtsc->y = y; 1507 return; 1508 } 1509 1510 /* 1511 * service curve is concave 1512 * compute the two y values of the current rtsc 1513 * y1: at x 1514 * y2: at (x + dx) 1515 */ 1516 y1 = rtsc_x2y(rtsc, x); 1517 if (y1 <= y) { 1518 /* rtsc is below isc, no change to rtsc */ 1519 return; 1520 } 1521 1522 y2 = rtsc_x2y(rtsc, x + isc->dx); 1523 if (y2 >= y + isc->dy) { 1524 /* rtsc is above isc, replace rtsc by isc */ 1525 rtsc->x = x; 1526 rtsc->y = y; 1527 rtsc->dx = isc->dx; 1528 rtsc->dy = isc->dy; 1529 return; 1530 } 1531 1532 /* 1533 * the two curves intersect 1534 * compute the offsets (dx, dy) using the reverse 1535 * function of seg_x2y() 1536 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 1537 */ 1538 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2); 1539 /* 1540 * check if (x, y1) belongs to the 1st segment of rtsc. 1541 * if so, add the offset. 1542 */ 1543 if (rtsc->x + rtsc->dx > x) 1544 dx += rtsc->x + rtsc->dx - x; 1545 dy = seg_x2y(dx, isc->sm1); 1546 1547 rtsc->x = x; 1548 rtsc->y = y; 1549 rtsc->dx = dx; 1550 rtsc->dy = dy; 1551 return; 1552 } 1553 1554 static void 1555 get_class_stats_v0(struct hfsc_classstats_v0 *sp, struct hfsc_class *cl) 1556 { 1557 sp->class_id = cl->cl_id; 1558 sp->class_handle = cl->cl_handle; 1559 1560 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1561 1562 if (cl->cl_rsc != NULL) { 1563 sp->rsc.m1 = SATU32(sm2m(cl->cl_rsc->sm1)); 1564 sp->rsc.d = dx2d(cl->cl_rsc->dx); 1565 sp->rsc.m2 = SATU32(sm2m(cl->cl_rsc->sm2)); 1566 } else { 1567 sp->rsc.m1 = 0; 1568 sp->rsc.d = 0; 1569 sp->rsc.m2 = 0; 1570 } 1571 if (cl->cl_fsc != NULL) { 1572 sp->fsc.m1 = SATU32(sm2m(cl->cl_fsc->sm1)); 1573 sp->fsc.d = dx2d(cl->cl_fsc->dx); 1574 sp->fsc.m2 = SATU32(sm2m(cl->cl_fsc->sm2)); 1575 } else { 1576 sp->fsc.m1 = 0; 1577 sp->fsc.d = 0; 1578 sp->fsc.m2 = 0; 1579 } 1580 if (cl->cl_usc != NULL) { 1581 sp->usc.m1 = SATU32(sm2m(cl->cl_usc->sm1)); 1582 sp->usc.d = dx2d(cl->cl_usc->dx); 1583 sp->usc.m2 = SATU32(sm2m(cl->cl_usc->sm2)); 1584 } else { 1585 sp->usc.m1 = 0; 1586 sp->usc.d = 0; 1587 sp->usc.m2 = 0; 1588 } 1589 1590 #undef SATU32 1591 1592 sp->total = cl->cl_total; 1593 sp->cumul = cl->cl_cumul; 1594 1595 sp->d = cl->cl_d; 1596 sp->e = cl->cl_e; 1597 sp->vt = cl->cl_vt; 1598 sp->f = cl->cl_f; 1599 1600 sp->initvt = cl->cl_initvt; 1601 sp->vtperiod = cl->cl_vtperiod; 1602 sp->parentperiod = cl->cl_parentperiod; 1603 sp->nactive = cl->cl_nactive; 1604 sp->vtoff = cl->cl_vtoff; 1605 sp->cvtmax = cl->cl_cvtmax; 1606 sp->myf = cl->cl_myf; 1607 sp->cfmin = cl->cl_cfmin; 1608 sp->cvtmin = cl->cl_cvtmin; 1609 sp->myfadj = cl->cl_myfadj; 1610 sp->vtadj = cl->cl_vtadj; 1611 1612 sp->cur_time = read_machclk(); 1613 sp->machclk_freq = machclk_freq; 1614 1615 sp->qlength = qlen(cl->cl_q); 1616 sp->qlimit = qlimit(cl->cl_q); 1617 sp->xmit_cnt = cl->cl_stats.xmit_cnt; 1618 sp->drop_cnt = cl->cl_stats.drop_cnt; 1619 sp->period = cl->cl_stats.period; 1620 1621 sp->qtype = qtype(cl->cl_q); 1622 #ifdef ALTQ_RED 1623 if (q_is_red(cl->cl_q)) 1624 red_getstats(cl->cl_red, &sp->red[0]); 1625 #endif 1626 #ifdef ALTQ_RIO 1627 if (q_is_rio(cl->cl_q)) 1628 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); 1629 #endif 1630 #ifdef ALTQ_CODEL 1631 if (q_is_codel(cl->cl_q)) 1632 codel_getstats(cl->cl_codel, &sp->codel); 1633 #endif 1634 } 1635 1636 static void 1637 get_class_stats_v1(struct hfsc_classstats_v1 *sp, struct hfsc_class *cl) 1638 { 1639 sp->class_id = cl->cl_id; 1640 sp->class_handle = cl->cl_handle; 1641 1642 if (cl->cl_rsc != NULL) { 1643 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1); 1644 sp->rsc.d = dx2d(cl->cl_rsc->dx); 1645 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2); 1646 } else { 1647 sp->rsc.m1 = 0; 1648 sp->rsc.d = 0; 1649 sp->rsc.m2 = 0; 1650 } 1651 if (cl->cl_fsc != NULL) { 1652 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1); 1653 sp->fsc.d = dx2d(cl->cl_fsc->dx); 1654 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2); 1655 } else { 1656 sp->fsc.m1 = 0; 1657 sp->fsc.d = 0; 1658 sp->fsc.m2 = 0; 1659 } 1660 if (cl->cl_usc != NULL) { 1661 sp->usc.m1 = sm2m(cl->cl_usc->sm1); 1662 sp->usc.d = dx2d(cl->cl_usc->dx); 1663 sp->usc.m2 = sm2m(cl->cl_usc->sm2); 1664 } else { 1665 sp->usc.m1 = 0; 1666 sp->usc.d = 0; 1667 sp->usc.m2 = 0; 1668 } 1669 1670 sp->total = cl->cl_total; 1671 sp->cumul = cl->cl_cumul; 1672 1673 sp->d = cl->cl_d; 1674 sp->e = cl->cl_e; 1675 sp->vt = cl->cl_vt; 1676 sp->f = cl->cl_f; 1677 1678 sp->initvt = cl->cl_initvt; 1679 sp->vtperiod = cl->cl_vtperiod; 1680 sp->parentperiod = cl->cl_parentperiod; 1681 sp->nactive = cl->cl_nactive; 1682 sp->vtoff = cl->cl_vtoff; 1683 sp->cvtmax = cl->cl_cvtmax; 1684 sp->myf = cl->cl_myf; 1685 sp->cfmin = cl->cl_cfmin; 1686 sp->cvtmin = cl->cl_cvtmin; 1687 sp->myfadj = cl->cl_myfadj; 1688 sp->vtadj = cl->cl_vtadj; 1689 1690 sp->cur_time = read_machclk(); 1691 sp->machclk_freq = machclk_freq; 1692 1693 sp->qlength = qlen(cl->cl_q); 1694 sp->qlimit = qlimit(cl->cl_q); 1695 sp->xmit_cnt = cl->cl_stats.xmit_cnt; 1696 sp->drop_cnt = cl->cl_stats.drop_cnt; 1697 sp->period = cl->cl_stats.period; 1698 1699 sp->qtype = qtype(cl->cl_q); 1700 #ifdef ALTQ_RED 1701 if (q_is_red(cl->cl_q)) 1702 red_getstats(cl->cl_red, &sp->red[0]); 1703 #endif 1704 #ifdef ALTQ_RIO 1705 if (q_is_rio(cl->cl_q)) 1706 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); 1707 #endif 1708 #ifdef ALTQ_CODEL 1709 if (q_is_codel(cl->cl_q)) 1710 codel_getstats(cl->cl_codel, &sp->codel); 1711 #endif 1712 } 1713 1714 /* convert a class handle to the corresponding class pointer */ 1715 static struct hfsc_class * 1716 clh_to_clp(struct hfsc_if *hif, u_int32_t chandle) 1717 { 1718 int i; 1719 struct hfsc_class *cl; 1720 1721 if (chandle == 0) 1722 return (NULL); 1723 /* 1724 * first, try optimistically the slot matching the lower bits of 1725 * the handle. if it fails, do the linear table search. 1726 */ 1727 i = chandle % HFSC_MAX_CLASSES; 1728 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle) 1729 return (cl); 1730 for (i = 0; i < HFSC_MAX_CLASSES; i++) 1731 if ((cl = hif->hif_class_tbl[i]) != NULL && 1732 cl->cl_handle == chandle) 1733 return (cl); 1734 return (NULL); 1735 } 1736 1737 #endif /* ALTQ_HFSC */ 1738