1 /*- 2 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation is hereby granted (including for commercial or 6 * for-profit use), provided that both the copyright notice and this 7 * permission notice appear in all copies of the software, derivative 8 * works, or modified versions, and any portions thereof. 9 * 10 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 11 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 12 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 13 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 14 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 15 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 16 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 18 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 19 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 20 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 22 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 23 * DAMAGE. 24 * 25 * Carnegie Mellon encourages (but does not require) users of this 26 * software to return any improvements or extensions that they make, 27 * and to grant Carnegie Mellon the rights to redistribute these 28 * changes without encumbrance. 29 * 30 * $KAME: altq_hfsc.c,v 1.24 2003/12/05 05:40:46 kjc Exp $ 31 * $FreeBSD$ 32 */ 33 /* 34 * H-FSC is described in Proceedings of SIGCOMM'97, 35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 36 * Real-Time and Priority Service" 37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 38 * 39 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 40 * when a class has an upperlimit, the fit-time is computed from the 41 * upperlimit service curve. the link-sharing scheduler does not schedule 42 * a class whose fit-time exceeds the current time. 43 */ 44 45 #include "opt_altq.h" 46 #include "opt_inet.h" 47 #include "opt_inet6.h" 48 49 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */ 50 51 #include <sys/param.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/socket.h> 55 #include <sys/systm.h> 56 #include <sys/errno.h> 57 #include <sys/queue.h> 58 #if 1 /* ALTQ3_COMPAT */ 59 #include <sys/sockio.h> 60 #include <sys/proc.h> 61 #include <sys/kernel.h> 62 #endif /* ALTQ3_COMPAT */ 63 64 #include <net/if.h> 65 #include <net/if_var.h> 66 #include <netinet/in.h> 67 68 #include <netpfil/pf/pf.h> 69 #include <netpfil/pf/pf_altq.h> 70 #include <netpfil/pf/pf_mtag.h> 71 #include <net/altq/altq.h> 72 #include <net/altq/altq_hfsc.h> 73 #ifdef ALTQ3_COMPAT 74 #include <net/altq/altq_conf.h> 75 #endif 76 77 /* 78 * function prototypes 79 */ 80 static int hfsc_clear_interface(struct hfsc_if *); 81 static int hfsc_request(struct ifaltq *, int, void *); 82 static void hfsc_purge(struct hfsc_if *); 83 static struct hfsc_class *hfsc_class_create(struct hfsc_if *, 84 struct service_curve *, struct service_curve *, struct service_curve *, 85 struct hfsc_class *, int, int, int); 86 static int hfsc_class_destroy(struct hfsc_class *); 87 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *); 88 static int hfsc_enqueue(struct ifaltq *, struct mbuf *, 89 struct altq_pktattr *); 90 static struct mbuf *hfsc_dequeue(struct ifaltq *, int); 91 92 static int hfsc_addq(struct hfsc_class *, struct mbuf *); 93 static struct mbuf *hfsc_getq(struct hfsc_class *); 94 static struct mbuf *hfsc_pollq(struct hfsc_class *); 95 static void hfsc_purgeq(struct hfsc_class *); 96 97 static void update_cfmin(struct hfsc_class *); 98 static void set_active(struct hfsc_class *, int); 99 static void set_passive(struct hfsc_class *); 100 101 static void init_ed(struct hfsc_class *, int); 102 static void update_ed(struct hfsc_class *, int); 103 static void update_d(struct hfsc_class *, int); 104 static void init_vf(struct hfsc_class *, int); 105 static void update_vf(struct hfsc_class *, int, u_int64_t); 106 static void ellist_insert(struct hfsc_class *); 107 static void ellist_remove(struct hfsc_class *); 108 static void ellist_update(struct hfsc_class *); 109 struct hfsc_class *hfsc_get_mindl(struct hfsc_if *, u_int64_t); 110 static void actlist_insert(struct hfsc_class *); 111 static void actlist_remove(struct hfsc_class *); 112 static void actlist_update(struct hfsc_class *); 113 114 static struct hfsc_class *actlist_firstfit(struct hfsc_class *, 115 u_int64_t); 116 117 static __inline u_int64_t seg_x2y(u_int64_t, u_int64_t); 118 static __inline u_int64_t seg_y2x(u_int64_t, u_int64_t); 119 static __inline u_int64_t m2sm(u_int); 120 static __inline u_int64_t m2ism(u_int); 121 static __inline u_int64_t d2dx(u_int); 122 static u_int sm2m(u_int64_t); 123 static u_int dx2d(u_int64_t); 124 125 static void sc2isc(struct service_curve *, struct internal_sc *); 126 static void rtsc_init(struct runtime_sc *, struct internal_sc *, 127 u_int64_t, u_int64_t); 128 static u_int64_t rtsc_y2x(struct runtime_sc *, u_int64_t); 129 static u_int64_t rtsc_x2y(struct runtime_sc *, u_int64_t); 130 static void rtsc_min(struct runtime_sc *, struct internal_sc *, 131 u_int64_t, u_int64_t); 132 133 static void get_class_stats(struct hfsc_classstats *, 134 struct hfsc_class *); 135 static struct hfsc_class *clh_to_clp(struct hfsc_if *, u_int32_t); 136 137 138 #ifdef ALTQ3_COMPAT 139 static struct hfsc_if *hfsc_attach(struct ifaltq *, u_int); 140 static int hfsc_detach(struct hfsc_if *); 141 static int hfsc_class_modify(struct hfsc_class *, struct service_curve *, 142 struct service_curve *, struct service_curve *); 143 144 static int hfsccmd_if_attach(struct hfsc_attach *); 145 static int hfsccmd_if_detach(struct hfsc_interface *); 146 static int hfsccmd_add_class(struct hfsc_add_class *); 147 static int hfsccmd_delete_class(struct hfsc_delete_class *); 148 static int hfsccmd_modify_class(struct hfsc_modify_class *); 149 static int hfsccmd_add_filter(struct hfsc_add_filter *); 150 static int hfsccmd_delete_filter(struct hfsc_delete_filter *); 151 static int hfsccmd_class_stats(struct hfsc_class_stats *); 152 153 altqdev_decl(hfsc); 154 #endif /* ALTQ3_COMPAT */ 155 156 /* 157 * macros 158 */ 159 #define is_a_parent_class(cl) ((cl)->cl_children != NULL) 160 161 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */ 162 163 #ifdef ALTQ3_COMPAT 164 /* hif_list keeps all hfsc_if's allocated. */ 165 static struct hfsc_if *hif_list = NULL; 166 #endif /* ALTQ3_COMPAT */ 167 168 int 169 hfsc_pfattach(struct pf_altq *a) 170 { 171 struct ifnet *ifp; 172 int s, error; 173 174 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL) 175 return (EINVAL); 176 s = splnet(); 177 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc, 178 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL); 179 splx(s); 180 return (error); 181 } 182 183 int 184 hfsc_add_altq(struct pf_altq *a) 185 { 186 struct hfsc_if *hif; 187 struct ifnet *ifp; 188 189 if ((ifp = ifunit(a->ifname)) == NULL) 190 return (EINVAL); 191 if (!ALTQ_IS_READY(&ifp->if_snd)) 192 return (ENODEV); 193 194 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_NOWAIT | M_ZERO); 195 if (hif == NULL) 196 return (ENOMEM); 197 198 TAILQ_INIT(&hif->hif_eligible); 199 hif->hif_ifq = &ifp->if_snd; 200 201 /* keep the state in pf_altq */ 202 a->altq_disc = hif; 203 204 return (0); 205 } 206 207 int 208 hfsc_remove_altq(struct pf_altq *a) 209 { 210 struct hfsc_if *hif; 211 212 if ((hif = a->altq_disc) == NULL) 213 return (EINVAL); 214 a->altq_disc = NULL; 215 216 (void)hfsc_clear_interface(hif); 217 (void)hfsc_class_destroy(hif->hif_rootclass); 218 219 free(hif, M_DEVBUF); 220 221 return (0); 222 } 223 224 int 225 hfsc_add_queue(struct pf_altq *a) 226 { 227 struct hfsc_if *hif; 228 struct hfsc_class *cl, *parent; 229 struct hfsc_opts *opts; 230 struct service_curve rtsc, lssc, ulsc; 231 232 if ((hif = a->altq_disc) == NULL) 233 return (EINVAL); 234 235 opts = &a->pq_u.hfsc_opts; 236 237 if (a->parent_qid == HFSC_NULLCLASS_HANDLE && 238 hif->hif_rootclass == NULL) 239 parent = NULL; 240 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL) 241 return (EINVAL); 242 243 if (a->qid == 0) 244 return (EINVAL); 245 246 if (clh_to_clp(hif, a->qid) != NULL) 247 return (EBUSY); 248 249 rtsc.m1 = opts->rtsc_m1; 250 rtsc.d = opts->rtsc_d; 251 rtsc.m2 = opts->rtsc_m2; 252 lssc.m1 = opts->lssc_m1; 253 lssc.d = opts->lssc_d; 254 lssc.m2 = opts->lssc_m2; 255 ulsc.m1 = opts->ulsc_m1; 256 ulsc.d = opts->ulsc_d; 257 ulsc.m2 = opts->ulsc_m2; 258 259 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc, 260 parent, a->qlimit, opts->flags, a->qid); 261 if (cl == NULL) 262 return (ENOMEM); 263 264 return (0); 265 } 266 267 int 268 hfsc_remove_queue(struct pf_altq *a) 269 { 270 struct hfsc_if *hif; 271 struct hfsc_class *cl; 272 273 if ((hif = a->altq_disc) == NULL) 274 return (EINVAL); 275 276 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 277 return (EINVAL); 278 279 return (hfsc_class_destroy(cl)); 280 } 281 282 int 283 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) 284 { 285 struct hfsc_if *hif; 286 struct hfsc_class *cl; 287 struct hfsc_classstats stats; 288 int error = 0; 289 290 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL) 291 return (EBADF); 292 293 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 294 return (EINVAL); 295 296 if (*nbytes < sizeof(stats)) 297 return (EINVAL); 298 299 get_class_stats(&stats, cl); 300 301 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0) 302 return (error); 303 *nbytes = sizeof(stats); 304 return (0); 305 } 306 307 /* 308 * bring the interface back to the initial state by discarding 309 * all the filters and classes except the root class. 310 */ 311 static int 312 hfsc_clear_interface(struct hfsc_if *hif) 313 { 314 struct hfsc_class *cl; 315 316 #ifdef ALTQ3_COMPAT 317 /* free the filters for this interface */ 318 acc_discard_filters(&hif->hif_classifier, NULL, 1); 319 #endif 320 321 /* clear out the classes */ 322 while (hif->hif_rootclass != NULL && 323 (cl = hif->hif_rootclass->cl_children) != NULL) { 324 /* 325 * remove the first leaf class found in the hierarchy 326 * then start over 327 */ 328 for (; cl != NULL; cl = hfsc_nextclass(cl)) { 329 if (!is_a_parent_class(cl)) { 330 (void)hfsc_class_destroy(cl); 331 break; 332 } 333 } 334 } 335 336 return (0); 337 } 338 339 static int 340 hfsc_request(struct ifaltq *ifq, int req, void *arg) 341 { 342 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 343 344 IFQ_LOCK_ASSERT(ifq); 345 346 switch (req) { 347 case ALTRQ_PURGE: 348 hfsc_purge(hif); 349 break; 350 } 351 return (0); 352 } 353 354 /* discard all the queued packets on the interface */ 355 static void 356 hfsc_purge(struct hfsc_if *hif) 357 { 358 struct hfsc_class *cl; 359 360 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 361 if (!qempty(cl->cl_q)) 362 hfsc_purgeq(cl); 363 if (ALTQ_IS_ENABLED(hif->hif_ifq)) 364 hif->hif_ifq->ifq_len = 0; 365 } 366 367 struct hfsc_class * 368 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc, 369 struct service_curve *fsc, struct service_curve *usc, 370 struct hfsc_class *parent, int qlimit, int flags, int qid) 371 { 372 struct hfsc_class *cl, *p; 373 int i, s; 374 375 if (hif->hif_classes >= HFSC_MAX_CLASSES) 376 return (NULL); 377 378 #ifndef ALTQ_RED 379 if (flags & HFCF_RED) { 380 #ifdef ALTQ_DEBUG 381 printf("hfsc_class_create: RED not configured for HFSC!\n"); 382 #endif 383 return (NULL); 384 } 385 #endif 386 387 cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_NOWAIT | M_ZERO); 388 if (cl == NULL) 389 return (NULL); 390 391 cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO); 392 if (cl->cl_q == NULL) 393 goto err_ret; 394 395 TAILQ_INIT(&cl->cl_actc); 396 397 if (qlimit == 0) 398 qlimit = 50; /* use default */ 399 qlimit(cl->cl_q) = qlimit; 400 qtype(cl->cl_q) = Q_DROPTAIL; 401 qlen(cl->cl_q) = 0; 402 cl->cl_flags = flags; 403 #ifdef ALTQ_RED 404 if (flags & (HFCF_RED|HFCF_RIO)) { 405 int red_flags, red_pkttime; 406 u_int m2; 407 408 m2 = 0; 409 if (rsc != NULL && rsc->m2 > m2) 410 m2 = rsc->m2; 411 if (fsc != NULL && fsc->m2 > m2) 412 m2 = fsc->m2; 413 if (usc != NULL && usc->m2 > m2) 414 m2 = usc->m2; 415 416 red_flags = 0; 417 if (flags & HFCF_ECN) 418 red_flags |= REDF_ECN; 419 #ifdef ALTQ_RIO 420 if (flags & HFCF_CLEARDSCP) 421 red_flags |= RIOF_CLEARDSCP; 422 #endif 423 if (m2 < 8) 424 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ 425 else 426 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu 427 * 1000 * 1000 * 1000 / (m2 / 8); 428 if (flags & HFCF_RED) { 429 cl->cl_red = red_alloc(0, 0, 430 qlimit(cl->cl_q) * 10/100, 431 qlimit(cl->cl_q) * 30/100, 432 red_flags, red_pkttime); 433 if (cl->cl_red != NULL) 434 qtype(cl->cl_q) = Q_RED; 435 } 436 #ifdef ALTQ_RIO 437 else { 438 cl->cl_red = (red_t *)rio_alloc(0, NULL, 439 red_flags, red_pkttime); 440 if (cl->cl_red != NULL) 441 qtype(cl->cl_q) = Q_RIO; 442 } 443 #endif 444 } 445 #endif /* ALTQ_RED */ 446 447 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) { 448 cl->cl_rsc = malloc(sizeof(struct internal_sc), 449 M_DEVBUF, M_NOWAIT); 450 if (cl->cl_rsc == NULL) 451 goto err_ret; 452 sc2isc(rsc, cl->cl_rsc); 453 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0); 454 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0); 455 } 456 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) { 457 cl->cl_fsc = malloc(sizeof(struct internal_sc), 458 M_DEVBUF, M_NOWAIT); 459 if (cl->cl_fsc == NULL) 460 goto err_ret; 461 sc2isc(fsc, cl->cl_fsc); 462 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0); 463 } 464 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) { 465 cl->cl_usc = malloc(sizeof(struct internal_sc), 466 M_DEVBUF, M_NOWAIT); 467 if (cl->cl_usc == NULL) 468 goto err_ret; 469 sc2isc(usc, cl->cl_usc); 470 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0); 471 } 472 473 cl->cl_id = hif->hif_classid++; 474 cl->cl_handle = qid; 475 cl->cl_hif = hif; 476 cl->cl_parent = parent; 477 478 s = splnet(); 479 IFQ_LOCK(hif->hif_ifq); 480 hif->hif_classes++; 481 482 /* 483 * find a free slot in the class table. if the slot matching 484 * the lower bits of qid is free, use this slot. otherwise, 485 * use the first free slot. 486 */ 487 i = qid % HFSC_MAX_CLASSES; 488 if (hif->hif_class_tbl[i] == NULL) 489 hif->hif_class_tbl[i] = cl; 490 else { 491 for (i = 0; i < HFSC_MAX_CLASSES; i++) 492 if (hif->hif_class_tbl[i] == NULL) { 493 hif->hif_class_tbl[i] = cl; 494 break; 495 } 496 if (i == HFSC_MAX_CLASSES) { 497 IFQ_UNLOCK(hif->hif_ifq); 498 splx(s); 499 goto err_ret; 500 } 501 } 502 503 if (flags & HFCF_DEFAULTCLASS) 504 hif->hif_defaultclass = cl; 505 506 if (parent == NULL) { 507 /* this is root class */ 508 hif->hif_rootclass = cl; 509 } else { 510 /* add this class to the children list of the parent */ 511 if ((p = parent->cl_children) == NULL) 512 parent->cl_children = cl; 513 else { 514 while (p->cl_siblings != NULL) 515 p = p->cl_siblings; 516 p->cl_siblings = cl; 517 } 518 } 519 IFQ_UNLOCK(hif->hif_ifq); 520 splx(s); 521 522 return (cl); 523 524 err_ret: 525 if (cl->cl_red != NULL) { 526 #ifdef ALTQ_RIO 527 if (q_is_rio(cl->cl_q)) 528 rio_destroy((rio_t *)cl->cl_red); 529 #endif 530 #ifdef ALTQ_RED 531 if (q_is_red(cl->cl_q)) 532 red_destroy(cl->cl_red); 533 #endif 534 } 535 if (cl->cl_fsc != NULL) 536 free(cl->cl_fsc, M_DEVBUF); 537 if (cl->cl_rsc != NULL) 538 free(cl->cl_rsc, M_DEVBUF); 539 if (cl->cl_usc != NULL) 540 free(cl->cl_usc, M_DEVBUF); 541 if (cl->cl_q != NULL) 542 free(cl->cl_q, M_DEVBUF); 543 free(cl, M_DEVBUF); 544 return (NULL); 545 } 546 547 static int 548 hfsc_class_destroy(struct hfsc_class *cl) 549 { 550 int i, s; 551 552 if (cl == NULL) 553 return (0); 554 555 if (is_a_parent_class(cl)) 556 return (EBUSY); 557 558 s = splnet(); 559 IFQ_LOCK(cl->cl_hif->hif_ifq); 560 561 #ifdef ALTQ3_COMPAT 562 /* delete filters referencing to this class */ 563 acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0); 564 #endif /* ALTQ3_COMPAT */ 565 566 if (!qempty(cl->cl_q)) 567 hfsc_purgeq(cl); 568 569 if (cl->cl_parent == NULL) { 570 /* this is root class */ 571 } else { 572 struct hfsc_class *p = cl->cl_parent->cl_children; 573 574 if (p == cl) 575 cl->cl_parent->cl_children = cl->cl_siblings; 576 else do { 577 if (p->cl_siblings == cl) { 578 p->cl_siblings = cl->cl_siblings; 579 break; 580 } 581 } while ((p = p->cl_siblings) != NULL); 582 ASSERT(p != NULL); 583 } 584 585 for (i = 0; i < HFSC_MAX_CLASSES; i++) 586 if (cl->cl_hif->hif_class_tbl[i] == cl) { 587 cl->cl_hif->hif_class_tbl[i] = NULL; 588 break; 589 } 590 591 cl->cl_hif->hif_classes--; 592 IFQ_UNLOCK(cl->cl_hif->hif_ifq); 593 splx(s); 594 595 if (cl->cl_red != NULL) { 596 #ifdef ALTQ_RIO 597 if (q_is_rio(cl->cl_q)) 598 rio_destroy((rio_t *)cl->cl_red); 599 #endif 600 #ifdef ALTQ_RED 601 if (q_is_red(cl->cl_q)) 602 red_destroy(cl->cl_red); 603 #endif 604 } 605 606 IFQ_LOCK(cl->cl_hif->hif_ifq); 607 if (cl == cl->cl_hif->hif_rootclass) 608 cl->cl_hif->hif_rootclass = NULL; 609 if (cl == cl->cl_hif->hif_defaultclass) 610 cl->cl_hif->hif_defaultclass = NULL; 611 IFQ_UNLOCK(cl->cl_hif->hif_ifq); 612 613 if (cl->cl_usc != NULL) 614 free(cl->cl_usc, M_DEVBUF); 615 if (cl->cl_fsc != NULL) 616 free(cl->cl_fsc, M_DEVBUF); 617 if (cl->cl_rsc != NULL) 618 free(cl->cl_rsc, M_DEVBUF); 619 free(cl->cl_q, M_DEVBUF); 620 free(cl, M_DEVBUF); 621 622 return (0); 623 } 624 625 /* 626 * hfsc_nextclass returns the next class in the tree. 627 * usage: 628 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 629 * do_something; 630 */ 631 static struct hfsc_class * 632 hfsc_nextclass(struct hfsc_class *cl) 633 { 634 if (cl->cl_children != NULL) 635 cl = cl->cl_children; 636 else if (cl->cl_siblings != NULL) 637 cl = cl->cl_siblings; 638 else { 639 while ((cl = cl->cl_parent) != NULL) 640 if (cl->cl_siblings) { 641 cl = cl->cl_siblings; 642 break; 643 } 644 } 645 646 return (cl); 647 } 648 649 /* 650 * hfsc_enqueue is an enqueue function to be registered to 651 * (*altq_enqueue) in struct ifaltq. 652 */ 653 static int 654 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr) 655 { 656 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 657 struct hfsc_class *cl; 658 struct pf_mtag *t; 659 int len; 660 661 IFQ_LOCK_ASSERT(ifq); 662 663 /* grab class set by classifier */ 664 if ((m->m_flags & M_PKTHDR) == 0) { 665 /* should not happen */ 666 printf("altq: packet for %s does not have pkthdr\n", 667 ifq->altq_ifp->if_xname); 668 m_freem(m); 669 return (ENOBUFS); 670 } 671 cl = NULL; 672 if ((t = pf_find_mtag(m)) != NULL) 673 cl = clh_to_clp(hif, t->qid); 674 #ifdef ALTQ3_COMPAT 675 else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL) 676 cl = pktattr->pattr_class; 677 #endif 678 if (cl == NULL || is_a_parent_class(cl)) { 679 cl = hif->hif_defaultclass; 680 if (cl == NULL) { 681 m_freem(m); 682 return (ENOBUFS); 683 } 684 } 685 #ifdef ALTQ3_COMPAT 686 if (pktattr != NULL) 687 cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */ 688 else 689 #endif 690 cl->cl_pktattr = NULL; 691 len = m_pktlen(m); 692 if (hfsc_addq(cl, m) != 0) { 693 /* drop occurred. mbuf was freed in hfsc_addq. */ 694 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len); 695 return (ENOBUFS); 696 } 697 IFQ_INC_LEN(ifq); 698 cl->cl_hif->hif_packets++; 699 700 /* successfully queued. */ 701 if (qlen(cl->cl_q) == 1) 702 set_active(cl, m_pktlen(m)); 703 704 return (0); 705 } 706 707 /* 708 * hfsc_dequeue is a dequeue function to be registered to 709 * (*altq_dequeue) in struct ifaltq. 710 * 711 * note: ALTDQ_POLL returns the next packet without removing the packet 712 * from the queue. ALTDQ_REMOVE is a normal dequeue operation. 713 * ALTDQ_REMOVE must return the same packet if called immediately 714 * after ALTDQ_POLL. 715 */ 716 static struct mbuf * 717 hfsc_dequeue(struct ifaltq *ifq, int op) 718 { 719 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 720 struct hfsc_class *cl; 721 struct mbuf *m; 722 int len, next_len; 723 int realtime = 0; 724 u_int64_t cur_time; 725 726 IFQ_LOCK_ASSERT(ifq); 727 728 if (hif->hif_packets == 0) 729 /* no packet in the tree */ 730 return (NULL); 731 732 cur_time = read_machclk(); 733 734 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) { 735 736 cl = hif->hif_pollcache; 737 hif->hif_pollcache = NULL; 738 /* check if the class was scheduled by real-time criteria */ 739 if (cl->cl_rsc != NULL) 740 realtime = (cl->cl_e <= cur_time); 741 } else { 742 /* 743 * if there are eligible classes, use real-time criteria. 744 * find the class with the minimum deadline among 745 * the eligible classes. 746 */ 747 if ((cl = hfsc_get_mindl(hif, cur_time)) 748 != NULL) { 749 realtime = 1; 750 } else { 751 #ifdef ALTQ_DEBUG 752 int fits = 0; 753 #endif 754 /* 755 * use link-sharing criteria 756 * get the class with the minimum vt in the hierarchy 757 */ 758 cl = hif->hif_rootclass; 759 while (is_a_parent_class(cl)) { 760 761 cl = actlist_firstfit(cl, cur_time); 762 if (cl == NULL) { 763 #ifdef ALTQ_DEBUG 764 if (fits > 0) 765 printf("%d fit but none found\n",fits); 766 #endif 767 return (NULL); 768 } 769 /* 770 * update parent's cl_cvtmin. 771 * don't update if the new vt is smaller. 772 */ 773 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 774 cl->cl_parent->cl_cvtmin = cl->cl_vt; 775 #ifdef ALTQ_DEBUG 776 fits++; 777 #endif 778 } 779 } 780 781 if (op == ALTDQ_POLL) { 782 hif->hif_pollcache = cl; 783 m = hfsc_pollq(cl); 784 return (m); 785 } 786 } 787 788 m = hfsc_getq(cl); 789 if (m == NULL) 790 panic("hfsc_dequeue:"); 791 len = m_pktlen(m); 792 cl->cl_hif->hif_packets--; 793 IFQ_DEC_LEN(ifq); 794 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len); 795 796 update_vf(cl, len, cur_time); 797 if (realtime) 798 cl->cl_cumul += len; 799 800 if (!qempty(cl->cl_q)) { 801 if (cl->cl_rsc != NULL) { 802 /* update ed */ 803 next_len = m_pktlen(qhead(cl->cl_q)); 804 805 if (realtime) 806 update_ed(cl, next_len); 807 else 808 update_d(cl, next_len); 809 } 810 } else { 811 /* the class becomes passive */ 812 set_passive(cl); 813 } 814 815 return (m); 816 } 817 818 static int 819 hfsc_addq(struct hfsc_class *cl, struct mbuf *m) 820 { 821 822 #ifdef ALTQ_RIO 823 if (q_is_rio(cl->cl_q)) 824 return rio_addq((rio_t *)cl->cl_red, cl->cl_q, 825 m, cl->cl_pktattr); 826 #endif 827 #ifdef ALTQ_RED 828 if (q_is_red(cl->cl_q)) 829 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr); 830 #endif 831 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) { 832 m_freem(m); 833 return (-1); 834 } 835 836 if (cl->cl_flags & HFCF_CLEARDSCP) 837 write_dsfield(m, cl->cl_pktattr, 0); 838 839 _addq(cl->cl_q, m); 840 841 return (0); 842 } 843 844 static struct mbuf * 845 hfsc_getq(struct hfsc_class *cl) 846 { 847 #ifdef ALTQ_RIO 848 if (q_is_rio(cl->cl_q)) 849 return rio_getq((rio_t *)cl->cl_red, cl->cl_q); 850 #endif 851 #ifdef ALTQ_RED 852 if (q_is_red(cl->cl_q)) 853 return red_getq(cl->cl_red, cl->cl_q); 854 #endif 855 return _getq(cl->cl_q); 856 } 857 858 static struct mbuf * 859 hfsc_pollq(struct hfsc_class *cl) 860 { 861 return qhead(cl->cl_q); 862 } 863 864 static void 865 hfsc_purgeq(struct hfsc_class *cl) 866 { 867 struct mbuf *m; 868 869 if (qempty(cl->cl_q)) 870 return; 871 872 while ((m = _getq(cl->cl_q)) != NULL) { 873 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m)); 874 m_freem(m); 875 cl->cl_hif->hif_packets--; 876 IFQ_DEC_LEN(cl->cl_hif->hif_ifq); 877 } 878 ASSERT(qlen(cl->cl_q) == 0); 879 880 update_vf(cl, 0, 0); /* remove cl from the actlist */ 881 set_passive(cl); 882 } 883 884 static void 885 set_active(struct hfsc_class *cl, int len) 886 { 887 if (cl->cl_rsc != NULL) 888 init_ed(cl, len); 889 if (cl->cl_fsc != NULL) 890 init_vf(cl, len); 891 892 cl->cl_stats.period++; 893 } 894 895 static void 896 set_passive(struct hfsc_class *cl) 897 { 898 if (cl->cl_rsc != NULL) 899 ellist_remove(cl); 900 901 /* 902 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0) 903 * needs to be called explicitly to remove a class from actlist 904 */ 905 } 906 907 static void 908 init_ed(struct hfsc_class *cl, int next_len) 909 { 910 u_int64_t cur_time; 911 912 cur_time = read_machclk(); 913 914 /* update the deadline curve */ 915 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul); 916 917 /* 918 * update the eligible curve. 919 * for concave, it is equal to the deadline curve. 920 * for convex, it is a linear curve with slope m2. 921 */ 922 cl->cl_eligible = cl->cl_deadline; 923 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 924 cl->cl_eligible.dx = 0; 925 cl->cl_eligible.dy = 0; 926 } 927 928 /* compute e and d */ 929 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 930 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 931 932 ellist_insert(cl); 933 } 934 935 static void 936 update_ed(struct hfsc_class *cl, int next_len) 937 { 938 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 939 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 940 941 ellist_update(cl); 942 } 943 944 static void 945 update_d(struct hfsc_class *cl, int next_len) 946 { 947 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 948 } 949 950 static void 951 init_vf(struct hfsc_class *cl, int len) 952 { 953 struct hfsc_class *max_cl, *p; 954 u_int64_t vt, f, cur_time; 955 int go_active; 956 957 cur_time = 0; 958 go_active = 1; 959 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) { 960 961 if (go_active && cl->cl_nactive++ == 0) 962 go_active = 1; 963 else 964 go_active = 0; 965 966 if (go_active) { 967 max_cl = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead); 968 if (max_cl != NULL) { 969 /* 970 * set vt to the average of the min and max 971 * classes. if the parent's period didn't 972 * change, don't decrease vt of the class. 973 */ 974 vt = max_cl->cl_vt; 975 if (cl->cl_parent->cl_cvtmin != 0) 976 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 977 978 if (cl->cl_parent->cl_vtperiod != 979 cl->cl_parentperiod || vt > cl->cl_vt) 980 cl->cl_vt = vt; 981 } else { 982 /* 983 * first child for a new parent backlog period. 984 * add parent's cvtmax to vtoff of children 985 * to make a new vt (vtoff + vt) larger than 986 * the vt in the last period for all children. 987 */ 988 vt = cl->cl_parent->cl_cvtmax; 989 for (p = cl->cl_parent->cl_children; p != NULL; 990 p = p->cl_siblings) 991 p->cl_vtoff += vt; 992 cl->cl_vt = 0; 993 cl->cl_parent->cl_cvtmax = 0; 994 cl->cl_parent->cl_cvtmin = 0; 995 } 996 cl->cl_initvt = cl->cl_vt; 997 998 /* update the virtual curve */ 999 vt = cl->cl_vt + cl->cl_vtoff; 1000 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total); 1001 if (cl->cl_virtual.x == vt) { 1002 cl->cl_virtual.x -= cl->cl_vtoff; 1003 cl->cl_vtoff = 0; 1004 } 1005 cl->cl_vtadj = 0; 1006 1007 cl->cl_vtperiod++; /* increment vt period */ 1008 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 1009 if (cl->cl_parent->cl_nactive == 0) 1010 cl->cl_parentperiod++; 1011 cl->cl_f = 0; 1012 1013 actlist_insert(cl); 1014 1015 if (cl->cl_usc != NULL) { 1016 /* class has upper limit curve */ 1017 if (cur_time == 0) 1018 cur_time = read_machclk(); 1019 1020 /* update the ulimit curve */ 1021 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time, 1022 cl->cl_total); 1023 /* compute myf */ 1024 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 1025 cl->cl_total); 1026 cl->cl_myfadj = 0; 1027 } 1028 } 1029 1030 if (cl->cl_myf > cl->cl_cfmin) 1031 f = cl->cl_myf; 1032 else 1033 f = cl->cl_cfmin; 1034 if (f != cl->cl_f) { 1035 cl->cl_f = f; 1036 update_cfmin(cl->cl_parent); 1037 } 1038 } 1039 } 1040 1041 static void 1042 update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time) 1043 { 1044 u_int64_t f, myf_bound, delta; 1045 int go_passive; 1046 1047 go_passive = qempty(cl->cl_q); 1048 1049 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 1050 1051 cl->cl_total += len; 1052 1053 if (cl->cl_fsc == NULL || cl->cl_nactive == 0) 1054 continue; 1055 1056 if (go_passive && --cl->cl_nactive == 0) 1057 go_passive = 1; 1058 else 1059 go_passive = 0; 1060 1061 if (go_passive) { 1062 /* no more active child, going passive */ 1063 1064 /* update cvtmax of the parent class */ 1065 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 1066 cl->cl_parent->cl_cvtmax = cl->cl_vt; 1067 1068 /* remove this class from the vt list */ 1069 actlist_remove(cl); 1070 1071 update_cfmin(cl->cl_parent); 1072 1073 continue; 1074 } 1075 1076 /* 1077 * update vt and f 1078 */ 1079 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 1080 - cl->cl_vtoff + cl->cl_vtadj; 1081 1082 /* 1083 * if vt of the class is smaller than cvtmin, 1084 * the class was skipped in the past due to non-fit. 1085 * if so, we need to adjust vtadj. 1086 */ 1087 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 1088 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 1089 cl->cl_vt = cl->cl_parent->cl_cvtmin; 1090 } 1091 1092 /* update the vt list */ 1093 actlist_update(cl); 1094 1095 if (cl->cl_usc != NULL) { 1096 cl->cl_myf = cl->cl_myfadj 1097 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total); 1098 1099 /* 1100 * if myf lags behind by more than one clock tick 1101 * from the current time, adjust myfadj to prevent 1102 * a rate-limited class from going greedy. 1103 * in a steady state under rate-limiting, myf 1104 * fluctuates within one clock tick. 1105 */ 1106 myf_bound = cur_time - machclk_per_tick; 1107 if (cl->cl_myf < myf_bound) { 1108 delta = cur_time - cl->cl_myf; 1109 cl->cl_myfadj += delta; 1110 cl->cl_myf += delta; 1111 } 1112 } 1113 1114 /* cl_f is max(cl_myf, cl_cfmin) */ 1115 if (cl->cl_myf > cl->cl_cfmin) 1116 f = cl->cl_myf; 1117 else 1118 f = cl->cl_cfmin; 1119 if (f != cl->cl_f) { 1120 cl->cl_f = f; 1121 update_cfmin(cl->cl_parent); 1122 } 1123 } 1124 } 1125 1126 static void 1127 update_cfmin(struct hfsc_class *cl) 1128 { 1129 struct hfsc_class *p; 1130 u_int64_t cfmin; 1131 1132 if (TAILQ_EMPTY(&cl->cl_actc)) { 1133 cl->cl_cfmin = 0; 1134 return; 1135 } 1136 cfmin = HT_INFINITY; 1137 TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) { 1138 if (p->cl_f == 0) { 1139 cl->cl_cfmin = 0; 1140 return; 1141 } 1142 if (p->cl_f < cfmin) 1143 cfmin = p->cl_f; 1144 } 1145 cl->cl_cfmin = cfmin; 1146 } 1147 1148 /* 1149 * TAILQ based ellist and actlist implementation 1150 * (ion wanted to make a calendar queue based implementation) 1151 */ 1152 /* 1153 * eligible list holds backlogged classes being sorted by their eligible times. 1154 * there is one eligible list per interface. 1155 */ 1156 1157 static void 1158 ellist_insert(struct hfsc_class *cl) 1159 { 1160 struct hfsc_if *hif = cl->cl_hif; 1161 struct hfsc_class *p; 1162 1163 /* check the last entry first */ 1164 if ((p = TAILQ_LAST(&hif->hif_eligible, elighead)) == NULL || 1165 p->cl_e <= cl->cl_e) { 1166 TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist); 1167 return; 1168 } 1169 1170 TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) { 1171 if (cl->cl_e < p->cl_e) { 1172 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1173 return; 1174 } 1175 } 1176 ASSERT(0); /* should not reach here */ 1177 } 1178 1179 static void 1180 ellist_remove(struct hfsc_class *cl) 1181 { 1182 struct hfsc_if *hif = cl->cl_hif; 1183 1184 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist); 1185 } 1186 1187 static void 1188 ellist_update(struct hfsc_class *cl) 1189 { 1190 struct hfsc_if *hif = cl->cl_hif; 1191 struct hfsc_class *p, *last; 1192 1193 /* 1194 * the eligible time of a class increases monotonically. 1195 * if the next entry has a larger eligible time, nothing to do. 1196 */ 1197 p = TAILQ_NEXT(cl, cl_ellist); 1198 if (p == NULL || cl->cl_e <= p->cl_e) 1199 return; 1200 1201 /* check the last entry */ 1202 last = TAILQ_LAST(&hif->hif_eligible, elighead); 1203 ASSERT(last != NULL); 1204 if (last->cl_e <= cl->cl_e) { 1205 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist); 1206 TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist); 1207 return; 1208 } 1209 1210 /* 1211 * the new position must be between the next entry 1212 * and the last entry 1213 */ 1214 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) { 1215 if (cl->cl_e < p->cl_e) { 1216 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist); 1217 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1218 return; 1219 } 1220 } 1221 ASSERT(0); /* should not reach here */ 1222 } 1223 1224 /* find the class with the minimum deadline among the eligible classes */ 1225 struct hfsc_class * 1226 hfsc_get_mindl(struct hfsc_if *hif, u_int64_t cur_time) 1227 { 1228 struct hfsc_class *p, *cl = NULL; 1229 1230 TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) { 1231 if (p->cl_e > cur_time) 1232 break; 1233 if (cl == NULL || p->cl_d < cl->cl_d) 1234 cl = p; 1235 } 1236 return (cl); 1237 } 1238 1239 /* 1240 * active children list holds backlogged child classes being sorted 1241 * by their virtual time. 1242 * each intermediate class has one active children list. 1243 */ 1244 1245 static void 1246 actlist_insert(struct hfsc_class *cl) 1247 { 1248 struct hfsc_class *p; 1249 1250 /* check the last entry first */ 1251 if ((p = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead)) == NULL 1252 || p->cl_vt <= cl->cl_vt) { 1253 TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist); 1254 return; 1255 } 1256 1257 TAILQ_FOREACH(p, &cl->cl_parent->cl_actc, cl_actlist) { 1258 if (cl->cl_vt < p->cl_vt) { 1259 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1260 return; 1261 } 1262 } 1263 ASSERT(0); /* should not reach here */ 1264 } 1265 1266 static void 1267 actlist_remove(struct hfsc_class *cl) 1268 { 1269 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist); 1270 } 1271 1272 static void 1273 actlist_update(struct hfsc_class *cl) 1274 { 1275 struct hfsc_class *p, *last; 1276 1277 /* 1278 * the virtual time of a class increases monotonically during its 1279 * backlogged period. 1280 * if the next entry has a larger virtual time, nothing to do. 1281 */ 1282 p = TAILQ_NEXT(cl, cl_actlist); 1283 if (p == NULL || cl->cl_vt < p->cl_vt) 1284 return; 1285 1286 /* check the last entry */ 1287 last = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead); 1288 ASSERT(last != NULL); 1289 if (last->cl_vt <= cl->cl_vt) { 1290 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist); 1291 TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist); 1292 return; 1293 } 1294 1295 /* 1296 * the new position must be between the next entry 1297 * and the last entry 1298 */ 1299 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) { 1300 if (cl->cl_vt < p->cl_vt) { 1301 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist); 1302 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1303 return; 1304 } 1305 } 1306 ASSERT(0); /* should not reach here */ 1307 } 1308 1309 static struct hfsc_class * 1310 actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time) 1311 { 1312 struct hfsc_class *p; 1313 1314 TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) { 1315 if (p->cl_f <= cur_time) 1316 return (p); 1317 } 1318 return (NULL); 1319 } 1320 1321 /* 1322 * service curve support functions 1323 * 1324 * external service curve parameters 1325 * m: bits/sec 1326 * d: msec 1327 * internal service curve parameters 1328 * sm: (bytes/tsc_interval) << SM_SHIFT 1329 * ism: (tsc_count/byte) << ISM_SHIFT 1330 * dx: tsc_count 1331 * 1332 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits. 1333 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU 1334 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective 1335 * digits in decimal using the following table. 1336 * 1337 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps 1338 * ----------+------------------------------------------------------- 1339 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6 1340 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6 1341 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6 1342 * 1343 * nsec/byte 80000 8000 800 80 8 1344 * ism(500MHz) 40000 4000 400 40 4 1345 * ism(200MHz) 16000 1600 160 16 1.6 1346 */ 1347 #define SM_SHIFT 24 1348 #define ISM_SHIFT 10 1349 1350 #define SM_MASK ((1LL << SM_SHIFT) - 1) 1351 #define ISM_MASK ((1LL << ISM_SHIFT) - 1) 1352 1353 static __inline u_int64_t 1354 seg_x2y(u_int64_t x, u_int64_t sm) 1355 { 1356 u_int64_t y; 1357 1358 /* 1359 * compute 1360 * y = x * sm >> SM_SHIFT 1361 * but divide it for the upper and lower bits to avoid overflow 1362 */ 1363 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 1364 return (y); 1365 } 1366 1367 static __inline u_int64_t 1368 seg_y2x(u_int64_t y, u_int64_t ism) 1369 { 1370 u_int64_t x; 1371 1372 if (y == 0) 1373 x = 0; 1374 else if (ism == HT_INFINITY) 1375 x = HT_INFINITY; 1376 else { 1377 x = (y >> ISM_SHIFT) * ism 1378 + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 1379 } 1380 return (x); 1381 } 1382 1383 static __inline u_int64_t 1384 m2sm(u_int m) 1385 { 1386 u_int64_t sm; 1387 1388 sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq; 1389 return (sm); 1390 } 1391 1392 static __inline u_int64_t 1393 m2ism(u_int m) 1394 { 1395 u_int64_t ism; 1396 1397 if (m == 0) 1398 ism = HT_INFINITY; 1399 else 1400 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m; 1401 return (ism); 1402 } 1403 1404 static __inline u_int64_t 1405 d2dx(u_int d) 1406 { 1407 u_int64_t dx; 1408 1409 dx = ((u_int64_t)d * machclk_freq) / 1000; 1410 return (dx); 1411 } 1412 1413 static u_int 1414 sm2m(u_int64_t sm) 1415 { 1416 u_int64_t m; 1417 1418 m = (sm * 8 * machclk_freq) >> SM_SHIFT; 1419 return ((u_int)m); 1420 } 1421 1422 static u_int 1423 dx2d(u_int64_t dx) 1424 { 1425 u_int64_t d; 1426 1427 d = dx * 1000 / machclk_freq; 1428 return ((u_int)d); 1429 } 1430 1431 static void 1432 sc2isc(struct service_curve *sc, struct internal_sc *isc) 1433 { 1434 isc->sm1 = m2sm(sc->m1); 1435 isc->ism1 = m2ism(sc->m1); 1436 isc->dx = d2dx(sc->d); 1437 isc->dy = seg_x2y(isc->dx, isc->sm1); 1438 isc->sm2 = m2sm(sc->m2); 1439 isc->ism2 = m2ism(sc->m2); 1440 } 1441 1442 /* 1443 * initialize the runtime service curve with the given internal 1444 * service curve starting at (x, y). 1445 */ 1446 static void 1447 rtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x, 1448 u_int64_t y) 1449 { 1450 rtsc->x = x; 1451 rtsc->y = y; 1452 rtsc->sm1 = isc->sm1; 1453 rtsc->ism1 = isc->ism1; 1454 rtsc->dx = isc->dx; 1455 rtsc->dy = isc->dy; 1456 rtsc->sm2 = isc->sm2; 1457 rtsc->ism2 = isc->ism2; 1458 } 1459 1460 /* 1461 * calculate the y-projection of the runtime service curve by the 1462 * given x-projection value 1463 */ 1464 static u_int64_t 1465 rtsc_y2x(struct runtime_sc *rtsc, u_int64_t y) 1466 { 1467 u_int64_t x; 1468 1469 if (y < rtsc->y) 1470 x = rtsc->x; 1471 else if (y <= rtsc->y + rtsc->dy) { 1472 /* x belongs to the 1st segment */ 1473 if (rtsc->dy == 0) 1474 x = rtsc->x + rtsc->dx; 1475 else 1476 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 1477 } else { 1478 /* x belongs to the 2nd segment */ 1479 x = rtsc->x + rtsc->dx 1480 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 1481 } 1482 return (x); 1483 } 1484 1485 static u_int64_t 1486 rtsc_x2y(struct runtime_sc *rtsc, u_int64_t x) 1487 { 1488 u_int64_t y; 1489 1490 if (x <= rtsc->x) 1491 y = rtsc->y; 1492 else if (x <= rtsc->x + rtsc->dx) 1493 /* y belongs to the 1st segment */ 1494 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 1495 else 1496 /* y belongs to the 2nd segment */ 1497 y = rtsc->y + rtsc->dy 1498 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 1499 return (y); 1500 } 1501 1502 /* 1503 * update the runtime service curve by taking the minimum of the current 1504 * runtime service curve and the service curve starting at (x, y). 1505 */ 1506 static void 1507 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x, 1508 u_int64_t y) 1509 { 1510 u_int64_t y1, y2, dx, dy; 1511 1512 if (isc->sm1 <= isc->sm2) { 1513 /* service curve is convex */ 1514 y1 = rtsc_x2y(rtsc, x); 1515 if (y1 < y) 1516 /* the current rtsc is smaller */ 1517 return; 1518 rtsc->x = x; 1519 rtsc->y = y; 1520 return; 1521 } 1522 1523 /* 1524 * service curve is concave 1525 * compute the two y values of the current rtsc 1526 * y1: at x 1527 * y2: at (x + dx) 1528 */ 1529 y1 = rtsc_x2y(rtsc, x); 1530 if (y1 <= y) { 1531 /* rtsc is below isc, no change to rtsc */ 1532 return; 1533 } 1534 1535 y2 = rtsc_x2y(rtsc, x + isc->dx); 1536 if (y2 >= y + isc->dy) { 1537 /* rtsc is above isc, replace rtsc by isc */ 1538 rtsc->x = x; 1539 rtsc->y = y; 1540 rtsc->dx = isc->dx; 1541 rtsc->dy = isc->dy; 1542 return; 1543 } 1544 1545 /* 1546 * the two curves intersect 1547 * compute the offsets (dx, dy) using the reverse 1548 * function of seg_x2y() 1549 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 1550 */ 1551 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2); 1552 /* 1553 * check if (x, y1) belongs to the 1st segment of rtsc. 1554 * if so, add the offset. 1555 */ 1556 if (rtsc->x + rtsc->dx > x) 1557 dx += rtsc->x + rtsc->dx - x; 1558 dy = seg_x2y(dx, isc->sm1); 1559 1560 rtsc->x = x; 1561 rtsc->y = y; 1562 rtsc->dx = dx; 1563 rtsc->dy = dy; 1564 return; 1565 } 1566 1567 static void 1568 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl) 1569 { 1570 sp->class_id = cl->cl_id; 1571 sp->class_handle = cl->cl_handle; 1572 1573 if (cl->cl_rsc != NULL) { 1574 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1); 1575 sp->rsc.d = dx2d(cl->cl_rsc->dx); 1576 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2); 1577 } else { 1578 sp->rsc.m1 = 0; 1579 sp->rsc.d = 0; 1580 sp->rsc.m2 = 0; 1581 } 1582 if (cl->cl_fsc != NULL) { 1583 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1); 1584 sp->fsc.d = dx2d(cl->cl_fsc->dx); 1585 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2); 1586 } else { 1587 sp->fsc.m1 = 0; 1588 sp->fsc.d = 0; 1589 sp->fsc.m2 = 0; 1590 } 1591 if (cl->cl_usc != NULL) { 1592 sp->usc.m1 = sm2m(cl->cl_usc->sm1); 1593 sp->usc.d = dx2d(cl->cl_usc->dx); 1594 sp->usc.m2 = sm2m(cl->cl_usc->sm2); 1595 } else { 1596 sp->usc.m1 = 0; 1597 sp->usc.d = 0; 1598 sp->usc.m2 = 0; 1599 } 1600 1601 sp->total = cl->cl_total; 1602 sp->cumul = cl->cl_cumul; 1603 1604 sp->d = cl->cl_d; 1605 sp->e = cl->cl_e; 1606 sp->vt = cl->cl_vt; 1607 sp->f = cl->cl_f; 1608 1609 sp->initvt = cl->cl_initvt; 1610 sp->vtperiod = cl->cl_vtperiod; 1611 sp->parentperiod = cl->cl_parentperiod; 1612 sp->nactive = cl->cl_nactive; 1613 sp->vtoff = cl->cl_vtoff; 1614 sp->cvtmax = cl->cl_cvtmax; 1615 sp->myf = cl->cl_myf; 1616 sp->cfmin = cl->cl_cfmin; 1617 sp->cvtmin = cl->cl_cvtmin; 1618 sp->myfadj = cl->cl_myfadj; 1619 sp->vtadj = cl->cl_vtadj; 1620 1621 sp->cur_time = read_machclk(); 1622 sp->machclk_freq = machclk_freq; 1623 1624 sp->qlength = qlen(cl->cl_q); 1625 sp->qlimit = qlimit(cl->cl_q); 1626 sp->xmit_cnt = cl->cl_stats.xmit_cnt; 1627 sp->drop_cnt = cl->cl_stats.drop_cnt; 1628 sp->period = cl->cl_stats.period; 1629 1630 sp->qtype = qtype(cl->cl_q); 1631 #ifdef ALTQ_RED 1632 if (q_is_red(cl->cl_q)) 1633 red_getstats(cl->cl_red, &sp->red[0]); 1634 #endif 1635 #ifdef ALTQ_RIO 1636 if (q_is_rio(cl->cl_q)) 1637 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); 1638 #endif 1639 } 1640 1641 /* convert a class handle to the corresponding class pointer */ 1642 static struct hfsc_class * 1643 clh_to_clp(struct hfsc_if *hif, u_int32_t chandle) 1644 { 1645 int i; 1646 struct hfsc_class *cl; 1647 1648 if (chandle == 0) 1649 return (NULL); 1650 /* 1651 * first, try optimistically the slot matching the lower bits of 1652 * the handle. if it fails, do the linear table search. 1653 */ 1654 i = chandle % HFSC_MAX_CLASSES; 1655 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle) 1656 return (cl); 1657 for (i = 0; i < HFSC_MAX_CLASSES; i++) 1658 if ((cl = hif->hif_class_tbl[i]) != NULL && 1659 cl->cl_handle == chandle) 1660 return (cl); 1661 return (NULL); 1662 } 1663 1664 #ifdef ALTQ3_COMPAT 1665 static struct hfsc_if * 1666 hfsc_attach(ifq, bandwidth) 1667 struct ifaltq *ifq; 1668 u_int bandwidth; 1669 { 1670 struct hfsc_if *hif; 1671 1672 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK); 1673 if (hif == NULL) 1674 return (NULL); 1675 bzero(hif, sizeof(struct hfsc_if)); 1676 1677 hif->hif_eligible = ellist_alloc(); 1678 if (hif->hif_eligible == NULL) { 1679 free(hif, M_DEVBUF); 1680 return NULL; 1681 } 1682 1683 hif->hif_ifq = ifq; 1684 1685 /* add this state to the hfsc list */ 1686 hif->hif_next = hif_list; 1687 hif_list = hif; 1688 1689 return (hif); 1690 } 1691 1692 static int 1693 hfsc_detach(hif) 1694 struct hfsc_if *hif; 1695 { 1696 (void)hfsc_clear_interface(hif); 1697 (void)hfsc_class_destroy(hif->hif_rootclass); 1698 1699 /* remove this interface from the hif list */ 1700 if (hif_list == hif) 1701 hif_list = hif->hif_next; 1702 else { 1703 struct hfsc_if *h; 1704 1705 for (h = hif_list; h != NULL; h = h->hif_next) 1706 if (h->hif_next == hif) { 1707 h->hif_next = hif->hif_next; 1708 break; 1709 } 1710 ASSERT(h != NULL); 1711 } 1712 1713 ellist_destroy(hif->hif_eligible); 1714 1715 free(hif, M_DEVBUF); 1716 1717 return (0); 1718 } 1719 1720 static int 1721 hfsc_class_modify(cl, rsc, fsc, usc) 1722 struct hfsc_class *cl; 1723 struct service_curve *rsc, *fsc, *usc; 1724 { 1725 struct internal_sc *rsc_tmp, *fsc_tmp, *usc_tmp; 1726 u_int64_t cur_time; 1727 int s; 1728 1729 rsc_tmp = fsc_tmp = usc_tmp = NULL; 1730 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) && 1731 cl->cl_rsc == NULL) { 1732 rsc_tmp = malloc(sizeof(struct internal_sc), 1733 M_DEVBUF, M_WAITOK); 1734 if (rsc_tmp == NULL) 1735 return (ENOMEM); 1736 } 1737 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) && 1738 cl->cl_fsc == NULL) { 1739 fsc_tmp = malloc(sizeof(struct internal_sc), 1740 M_DEVBUF, M_WAITOK); 1741 if (fsc_tmp == NULL) { 1742 free(rsc_tmp); 1743 return (ENOMEM); 1744 } 1745 } 1746 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0) && 1747 cl->cl_usc == NULL) { 1748 usc_tmp = malloc(sizeof(struct internal_sc), 1749 M_DEVBUF, M_WAITOK); 1750 if (usc_tmp == NULL) { 1751 free(rsc_tmp); 1752 free(fsc_tmp); 1753 return (ENOMEM); 1754 } 1755 } 1756 1757 cur_time = read_machclk(); 1758 s = splnet(); 1759 IFQ_LOCK(cl->cl_hif->hif_ifq); 1760 1761 if (rsc != NULL) { 1762 if (rsc->m1 == 0 && rsc->m2 == 0) { 1763 if (cl->cl_rsc != NULL) { 1764 if (!qempty(cl->cl_q)) 1765 hfsc_purgeq(cl); 1766 free(cl->cl_rsc, M_DEVBUF); 1767 cl->cl_rsc = NULL; 1768 } 1769 } else { 1770 if (cl->cl_rsc == NULL) 1771 cl->cl_rsc = rsc_tmp; 1772 sc2isc(rsc, cl->cl_rsc); 1773 rtsc_init(&cl->cl_deadline, cl->cl_rsc, cur_time, 1774 cl->cl_cumul); 1775 cl->cl_eligible = cl->cl_deadline; 1776 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 1777 cl->cl_eligible.dx = 0; 1778 cl->cl_eligible.dy = 0; 1779 } 1780 } 1781 } 1782 1783 if (fsc != NULL) { 1784 if (fsc->m1 == 0 && fsc->m2 == 0) { 1785 if (cl->cl_fsc != NULL) { 1786 if (!qempty(cl->cl_q)) 1787 hfsc_purgeq(cl); 1788 free(cl->cl_fsc, M_DEVBUF); 1789 cl->cl_fsc = NULL; 1790 } 1791 } else { 1792 if (cl->cl_fsc == NULL) 1793 cl->cl_fsc = fsc_tmp; 1794 sc2isc(fsc, cl->cl_fsc); 1795 rtsc_init(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt, 1796 cl->cl_total); 1797 } 1798 } 1799 1800 if (usc != NULL) { 1801 if (usc->m1 == 0 && usc->m2 == 0) { 1802 if (cl->cl_usc != NULL) { 1803 free(cl->cl_usc, M_DEVBUF); 1804 cl->cl_usc = NULL; 1805 cl->cl_myf = 0; 1806 } 1807 } else { 1808 if (cl->cl_usc == NULL) 1809 cl->cl_usc = usc_tmp; 1810 sc2isc(usc, cl->cl_usc); 1811 rtsc_init(&cl->cl_ulimit, cl->cl_usc, cur_time, 1812 cl->cl_total); 1813 } 1814 } 1815 1816 if (!qempty(cl->cl_q)) { 1817 if (cl->cl_rsc != NULL) 1818 update_ed(cl, m_pktlen(qhead(cl->cl_q))); 1819 if (cl->cl_fsc != NULL) 1820 update_vf(cl, 0, cur_time); 1821 /* is this enough? */ 1822 } 1823 1824 IFQ_UNLOCK(cl->cl_hif->hif_ifq); 1825 splx(s); 1826 1827 return (0); 1828 } 1829 1830 /* 1831 * hfsc device interface 1832 */ 1833 int 1834 hfscopen(dev, flag, fmt, p) 1835 dev_t dev; 1836 int flag, fmt; 1837 #if (__FreeBSD_version > 500000) 1838 struct thread *p; 1839 #else 1840 struct proc *p; 1841 #endif 1842 { 1843 if (machclk_freq == 0) 1844 init_machclk(); 1845 1846 if (machclk_freq == 0) { 1847 printf("hfsc: no cpu clock available!\n"); 1848 return (ENXIO); 1849 } 1850 1851 /* everything will be done when the queueing scheme is attached. */ 1852 return 0; 1853 } 1854 1855 int 1856 hfscclose(dev, flag, fmt, p) 1857 dev_t dev; 1858 int flag, fmt; 1859 #if (__FreeBSD_version > 500000) 1860 struct thread *p; 1861 #else 1862 struct proc *p; 1863 #endif 1864 { 1865 struct hfsc_if *hif; 1866 int err, error = 0; 1867 1868 while ((hif = hif_list) != NULL) { 1869 /* destroy all */ 1870 if (ALTQ_IS_ENABLED(hif->hif_ifq)) 1871 altq_disable(hif->hif_ifq); 1872 1873 err = altq_detach(hif->hif_ifq); 1874 if (err == 0) 1875 err = hfsc_detach(hif); 1876 if (err != 0 && error == 0) 1877 error = err; 1878 } 1879 1880 return error; 1881 } 1882 1883 int 1884 hfscioctl(dev, cmd, addr, flag, p) 1885 dev_t dev; 1886 ioctlcmd_t cmd; 1887 caddr_t addr; 1888 int flag; 1889 #if (__FreeBSD_version > 500000) 1890 struct thread *p; 1891 #else 1892 struct proc *p; 1893 #endif 1894 { 1895 struct hfsc_if *hif; 1896 struct hfsc_interface *ifacep; 1897 int error = 0; 1898 1899 /* check super-user privilege */ 1900 switch (cmd) { 1901 case HFSC_GETSTATS: 1902 break; 1903 default: 1904 #if (__FreeBSD_version > 700000) 1905 if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0) 1906 return (error); 1907 #elsif (__FreeBSD_version > 400000) 1908 if ((error = suser(p)) != 0) 1909 return (error); 1910 #else 1911 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 1912 return (error); 1913 #endif 1914 break; 1915 } 1916 1917 switch (cmd) { 1918 1919 case HFSC_IF_ATTACH: 1920 error = hfsccmd_if_attach((struct hfsc_attach *)addr); 1921 break; 1922 1923 case HFSC_IF_DETACH: 1924 error = hfsccmd_if_detach((struct hfsc_interface *)addr); 1925 break; 1926 1927 case HFSC_ENABLE: 1928 case HFSC_DISABLE: 1929 case HFSC_CLEAR_HIERARCHY: 1930 ifacep = (struct hfsc_interface *)addr; 1931 if ((hif = altq_lookup(ifacep->hfsc_ifname, 1932 ALTQT_HFSC)) == NULL) { 1933 error = EBADF; 1934 break; 1935 } 1936 1937 switch (cmd) { 1938 1939 case HFSC_ENABLE: 1940 if (hif->hif_defaultclass == NULL) { 1941 #ifdef ALTQ_DEBUG 1942 printf("hfsc: no default class\n"); 1943 #endif 1944 error = EINVAL; 1945 break; 1946 } 1947 error = altq_enable(hif->hif_ifq); 1948 break; 1949 1950 case HFSC_DISABLE: 1951 error = altq_disable(hif->hif_ifq); 1952 break; 1953 1954 case HFSC_CLEAR_HIERARCHY: 1955 hfsc_clear_interface(hif); 1956 break; 1957 } 1958 break; 1959 1960 case HFSC_ADD_CLASS: 1961 error = hfsccmd_add_class((struct hfsc_add_class *)addr); 1962 break; 1963 1964 case HFSC_DEL_CLASS: 1965 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr); 1966 break; 1967 1968 case HFSC_MOD_CLASS: 1969 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr); 1970 break; 1971 1972 case HFSC_ADD_FILTER: 1973 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr); 1974 break; 1975 1976 case HFSC_DEL_FILTER: 1977 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr); 1978 break; 1979 1980 case HFSC_GETSTATS: 1981 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr); 1982 break; 1983 1984 default: 1985 error = EINVAL; 1986 break; 1987 } 1988 return error; 1989 } 1990 1991 static int 1992 hfsccmd_if_attach(ap) 1993 struct hfsc_attach *ap; 1994 { 1995 struct hfsc_if *hif; 1996 struct ifnet *ifp; 1997 int error; 1998 1999 if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL) 2000 return (ENXIO); 2001 2002 if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL) 2003 return (ENOMEM); 2004 2005 /* 2006 * set HFSC to this ifnet structure. 2007 */ 2008 if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif, 2009 hfsc_enqueue, hfsc_dequeue, hfsc_request, 2010 &hif->hif_classifier, acc_classify)) != 0) 2011 (void)hfsc_detach(hif); 2012 2013 return (error); 2014 } 2015 2016 static int 2017 hfsccmd_if_detach(ap) 2018 struct hfsc_interface *ap; 2019 { 2020 struct hfsc_if *hif; 2021 int error; 2022 2023 if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL) 2024 return (EBADF); 2025 2026 if (ALTQ_IS_ENABLED(hif->hif_ifq)) 2027 altq_disable(hif->hif_ifq); 2028 2029 if ((error = altq_detach(hif->hif_ifq))) 2030 return (error); 2031 2032 return hfsc_detach(hif); 2033 } 2034 2035 static int 2036 hfsccmd_add_class(ap) 2037 struct hfsc_add_class *ap; 2038 { 2039 struct hfsc_if *hif; 2040 struct hfsc_class *cl, *parent; 2041 int i; 2042 2043 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2044 return (EBADF); 2045 2046 if (ap->parent_handle == HFSC_NULLCLASS_HANDLE && 2047 hif->hif_rootclass == NULL) 2048 parent = NULL; 2049 else if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL) 2050 return (EINVAL); 2051 2052 /* assign a class handle (use a free slot number for now) */ 2053 for (i = 1; i < HFSC_MAX_CLASSES; i++) 2054 if (hif->hif_class_tbl[i] == NULL) 2055 break; 2056 if (i == HFSC_MAX_CLASSES) 2057 return (EBUSY); 2058 2059 if ((cl = hfsc_class_create(hif, &ap->service_curve, NULL, NULL, 2060 parent, ap->qlimit, ap->flags, i)) == NULL) 2061 return (ENOMEM); 2062 2063 /* return a class handle to the user */ 2064 ap->class_handle = i; 2065 2066 return (0); 2067 } 2068 2069 static int 2070 hfsccmd_delete_class(ap) 2071 struct hfsc_delete_class *ap; 2072 { 2073 struct hfsc_if *hif; 2074 struct hfsc_class *cl; 2075 2076 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2077 return (EBADF); 2078 2079 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL) 2080 return (EINVAL); 2081 2082 return hfsc_class_destroy(cl); 2083 } 2084 2085 static int 2086 hfsccmd_modify_class(ap) 2087 struct hfsc_modify_class *ap; 2088 { 2089 struct hfsc_if *hif; 2090 struct hfsc_class *cl; 2091 struct service_curve *rsc = NULL; 2092 struct service_curve *fsc = NULL; 2093 struct service_curve *usc = NULL; 2094 2095 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2096 return (EBADF); 2097 2098 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL) 2099 return (EINVAL); 2100 2101 if (ap->sctype & HFSC_REALTIMESC) 2102 rsc = &ap->service_curve; 2103 if (ap->sctype & HFSC_LINKSHARINGSC) 2104 fsc = &ap->service_curve; 2105 if (ap->sctype & HFSC_UPPERLIMITSC) 2106 usc = &ap->service_curve; 2107 2108 return hfsc_class_modify(cl, rsc, fsc, usc); 2109 } 2110 2111 static int 2112 hfsccmd_add_filter(ap) 2113 struct hfsc_add_filter *ap; 2114 { 2115 struct hfsc_if *hif; 2116 struct hfsc_class *cl; 2117 2118 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2119 return (EBADF); 2120 2121 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL) 2122 return (EINVAL); 2123 2124 if (is_a_parent_class(cl)) { 2125 #ifdef ALTQ_DEBUG 2126 printf("hfsccmd_add_filter: not a leaf class!\n"); 2127 #endif 2128 return (EINVAL); 2129 } 2130 2131 return acc_add_filter(&hif->hif_classifier, &ap->filter, 2132 cl, &ap->filter_handle); 2133 } 2134 2135 static int 2136 hfsccmd_delete_filter(ap) 2137 struct hfsc_delete_filter *ap; 2138 { 2139 struct hfsc_if *hif; 2140 2141 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2142 return (EBADF); 2143 2144 return acc_delete_filter(&hif->hif_classifier, 2145 ap->filter_handle); 2146 } 2147 2148 static int 2149 hfsccmd_class_stats(ap) 2150 struct hfsc_class_stats *ap; 2151 { 2152 struct hfsc_if *hif; 2153 struct hfsc_class *cl; 2154 struct hfsc_classstats stats, *usp; 2155 int n, nclasses, error; 2156 2157 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2158 return (EBADF); 2159 2160 ap->cur_time = read_machclk(); 2161 ap->machclk_freq = machclk_freq; 2162 ap->hif_classes = hif->hif_classes; 2163 ap->hif_packets = hif->hif_packets; 2164 2165 /* skip the first N classes in the tree */ 2166 nclasses = ap->nskip; 2167 for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses; 2168 cl = hfsc_nextclass(cl), n++) 2169 ; 2170 if (n != nclasses) 2171 return (EINVAL); 2172 2173 /* then, read the next N classes in the tree */ 2174 nclasses = ap->nclasses; 2175 usp = ap->stats; 2176 for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) { 2177 2178 get_class_stats(&stats, cl); 2179 2180 if ((error = copyout((caddr_t)&stats, (caddr_t)usp++, 2181 sizeof(stats))) != 0) 2182 return (error); 2183 } 2184 2185 ap->nclasses = n; 2186 2187 return (0); 2188 } 2189 2190 #ifdef KLD_MODULE 2191 2192 static struct altqsw hfsc_sw = 2193 {"hfsc", hfscopen, hfscclose, hfscioctl}; 2194 2195 ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw); 2196 MODULE_DEPEND(altq_hfsc, altq_red, 1, 1, 1); 2197 MODULE_DEPEND(altq_hfsc, altq_rio, 1, 1, 1); 2198 2199 #endif /* KLD_MODULE */ 2200 #endif /* ALTQ3_COMPAT */ 2201 2202 #endif /* ALTQ_HFSC */ 2203