1 /*- 2 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation is hereby granted (including for commercial or 6 * for-profit use), provided that both the copyright notice and this 7 * permission notice appear in all copies of the software, derivative 8 * works, or modified versions, and any portions thereof. 9 * 10 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 11 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 12 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 13 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 14 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 15 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 16 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 18 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 19 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 20 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 22 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 23 * DAMAGE. 24 * 25 * Carnegie Mellon encourages (but does not require) users of this 26 * software to return any improvements or extensions that they make, 27 * and to grant Carnegie Mellon the rights to redistribute these 28 * changes without encumbrance. 29 * 30 * $KAME: altq_hfsc.c,v 1.24 2003/12/05 05:40:46 kjc Exp $ 31 * $FreeBSD$ 32 */ 33 /* 34 * H-FSC is described in Proceedings of SIGCOMM'97, 35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 36 * Real-Time and Priority Service" 37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 38 * 39 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 40 * when a class has an upperlimit, the fit-time is computed from the 41 * upperlimit service curve. the link-sharing scheduler does not schedule 42 * a class whose fit-time exceeds the current time. 43 */ 44 45 #include "opt_altq.h" 46 #include "opt_inet.h" 47 #include "opt_inet6.h" 48 49 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */ 50 51 #include <sys/param.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/socket.h> 55 #include <sys/systm.h> 56 #include <sys/errno.h> 57 #include <sys/queue.h> 58 #if 1 /* ALTQ3_COMPAT */ 59 #include <sys/sockio.h> 60 #include <sys/proc.h> 61 #include <sys/kernel.h> 62 #endif /* ALTQ3_COMPAT */ 63 64 #include <net/if.h> 65 #include <net/if_var.h> 66 #include <netinet/in.h> 67 68 #include <netpfil/pf/pf.h> 69 #include <netpfil/pf/pf_altq.h> 70 #include <netpfil/pf/pf_mtag.h> 71 #include <net/altq/altq.h> 72 #include <net/altq/altq_hfsc.h> 73 #ifdef ALTQ3_COMPAT 74 #include <net/altq/altq_conf.h> 75 #endif 76 77 /* 78 * function prototypes 79 */ 80 static int hfsc_clear_interface(struct hfsc_if *); 81 static int hfsc_request(struct ifaltq *, int, void *); 82 static void hfsc_purge(struct hfsc_if *); 83 static struct hfsc_class *hfsc_class_create(struct hfsc_if *, 84 struct service_curve *, struct service_curve *, struct service_curve *, 85 struct hfsc_class *, int, int, int); 86 static int hfsc_class_destroy(struct hfsc_class *); 87 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *); 88 static int hfsc_enqueue(struct ifaltq *, struct mbuf *, 89 struct altq_pktattr *); 90 static struct mbuf *hfsc_dequeue(struct ifaltq *, int); 91 92 static int hfsc_addq(struct hfsc_class *, struct mbuf *); 93 static struct mbuf *hfsc_getq(struct hfsc_class *); 94 static struct mbuf *hfsc_pollq(struct hfsc_class *); 95 static void hfsc_purgeq(struct hfsc_class *); 96 97 static void update_cfmin(struct hfsc_class *); 98 static void set_active(struct hfsc_class *, int); 99 static void set_passive(struct hfsc_class *); 100 101 static void init_ed(struct hfsc_class *, int); 102 static void update_ed(struct hfsc_class *, int); 103 static void update_d(struct hfsc_class *, int); 104 static void init_vf(struct hfsc_class *, int); 105 static void update_vf(struct hfsc_class *, int, u_int64_t); 106 static void ellist_insert(struct hfsc_class *); 107 static void ellist_remove(struct hfsc_class *); 108 static void ellist_update(struct hfsc_class *); 109 struct hfsc_class *hfsc_get_mindl(struct hfsc_if *, u_int64_t); 110 static void actlist_insert(struct hfsc_class *); 111 static void actlist_remove(struct hfsc_class *); 112 static void actlist_update(struct hfsc_class *); 113 114 static struct hfsc_class *actlist_firstfit(struct hfsc_class *, 115 u_int64_t); 116 117 static __inline u_int64_t seg_x2y(u_int64_t, u_int64_t); 118 static __inline u_int64_t seg_y2x(u_int64_t, u_int64_t); 119 static __inline u_int64_t m2sm(u_int64_t); 120 static __inline u_int64_t m2ism(u_int64_t); 121 static __inline u_int64_t d2dx(u_int); 122 static u_int64_t sm2m(u_int64_t); 123 static u_int dx2d(u_int64_t); 124 125 static void sc2isc(struct service_curve *, struct internal_sc *); 126 static void rtsc_init(struct runtime_sc *, struct internal_sc *, 127 u_int64_t, u_int64_t); 128 static u_int64_t rtsc_y2x(struct runtime_sc *, u_int64_t); 129 static u_int64_t rtsc_x2y(struct runtime_sc *, u_int64_t); 130 static void rtsc_min(struct runtime_sc *, struct internal_sc *, 131 u_int64_t, u_int64_t); 132 133 static void get_class_stats_v0(struct hfsc_classstats_v0 *, 134 struct hfsc_class *); 135 static void get_class_stats_v1(struct hfsc_classstats_v1 *, 136 struct hfsc_class *); 137 static struct hfsc_class *clh_to_clp(struct hfsc_if *, u_int32_t); 138 139 140 #ifdef ALTQ3_COMPAT 141 static struct hfsc_if *hfsc_attach(struct ifaltq *, u_int); 142 static int hfsc_detach(struct hfsc_if *); 143 static int hfsc_class_modify(struct hfsc_class *, struct service_curve *, 144 struct service_curve *, struct service_curve *); 145 146 static int hfsccmd_if_attach(struct hfsc_attach *); 147 static int hfsccmd_if_detach(struct hfsc_interface *); 148 static int hfsccmd_add_class(struct hfsc_add_class *); 149 static int hfsccmd_delete_class(struct hfsc_delete_class *); 150 static int hfsccmd_modify_class(struct hfsc_modify_class *); 151 static int hfsccmd_add_filter(struct hfsc_add_filter *); 152 static int hfsccmd_delete_filter(struct hfsc_delete_filter *); 153 static int hfsccmd_class_stats(struct hfsc_class_stats *); 154 155 altqdev_decl(hfsc); 156 #endif /* ALTQ3_COMPAT */ 157 158 /* 159 * macros 160 */ 161 #define is_a_parent_class(cl) ((cl)->cl_children != NULL) 162 163 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ 164 165 #ifdef ALTQ3_COMPAT 166 /* hif_list keeps all hfsc_if's allocated. */ 167 static struct hfsc_if *hif_list = NULL; 168 #endif /* ALTQ3_COMPAT */ 169 170 int 171 hfsc_pfattach(struct pf_altq *a) 172 { 173 struct ifnet *ifp; 174 int s, error; 175 176 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL) 177 return (EINVAL); 178 s = splnet(); 179 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc, 180 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL); 181 splx(s); 182 return (error); 183 } 184 185 int 186 hfsc_add_altq(struct pf_altq *a) 187 { 188 struct hfsc_if *hif; 189 struct ifnet *ifp; 190 191 if ((ifp = ifunit(a->ifname)) == NULL) 192 return (EINVAL); 193 if (!ALTQ_IS_READY(&ifp->if_snd)) 194 return (ENODEV); 195 196 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_NOWAIT | M_ZERO); 197 if (hif == NULL) 198 return (ENOMEM); 199 200 TAILQ_INIT(&hif->hif_eligible); 201 hif->hif_ifq = &ifp->if_snd; 202 203 /* keep the state in pf_altq */ 204 a->altq_disc = hif; 205 206 return (0); 207 } 208 209 int 210 hfsc_remove_altq(struct pf_altq *a) 211 { 212 struct hfsc_if *hif; 213 214 if ((hif = a->altq_disc) == NULL) 215 return (EINVAL); 216 a->altq_disc = NULL; 217 218 (void)hfsc_clear_interface(hif); 219 (void)hfsc_class_destroy(hif->hif_rootclass); 220 221 free(hif, M_DEVBUF); 222 223 return (0); 224 } 225 226 int 227 hfsc_add_queue(struct pf_altq *a) 228 { 229 struct hfsc_if *hif; 230 struct hfsc_class *cl, *parent; 231 struct hfsc_opts_v1 *opts; 232 struct service_curve rtsc, lssc, ulsc; 233 234 if ((hif = a->altq_disc) == NULL) 235 return (EINVAL); 236 237 opts = &a->pq_u.hfsc_opts; 238 239 if (a->parent_qid == HFSC_NULLCLASS_HANDLE && 240 hif->hif_rootclass == NULL) 241 parent = NULL; 242 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL) 243 return (EINVAL); 244 245 if (a->qid == 0) 246 return (EINVAL); 247 248 if (clh_to_clp(hif, a->qid) != NULL) 249 return (EBUSY); 250 251 rtsc.m1 = opts->rtsc_m1; 252 rtsc.d = opts->rtsc_d; 253 rtsc.m2 = opts->rtsc_m2; 254 lssc.m1 = opts->lssc_m1; 255 lssc.d = opts->lssc_d; 256 lssc.m2 = opts->lssc_m2; 257 ulsc.m1 = opts->ulsc_m1; 258 ulsc.d = opts->ulsc_d; 259 ulsc.m2 = opts->ulsc_m2; 260 261 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc, 262 parent, a->qlimit, opts->flags, a->qid); 263 if (cl == NULL) 264 return (ENOMEM); 265 266 return (0); 267 } 268 269 int 270 hfsc_remove_queue(struct pf_altq *a) 271 { 272 struct hfsc_if *hif; 273 struct hfsc_class *cl; 274 275 if ((hif = a->altq_disc) == NULL) 276 return (EINVAL); 277 278 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 279 return (EINVAL); 280 281 return (hfsc_class_destroy(cl)); 282 } 283 284 int 285 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes, int version) 286 { 287 struct hfsc_if *hif; 288 struct hfsc_class *cl; 289 union { 290 struct hfsc_classstats_v0 v0; 291 struct hfsc_classstats_v1 v1; 292 } stats; 293 size_t stats_size; 294 int error = 0; 295 296 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL) 297 return (EBADF); 298 299 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 300 return (EINVAL); 301 302 if (version > HFSC_STATS_VERSION) 303 return (EINVAL); 304 305 memset(&stats, 0, sizeof(stats)); 306 switch (version) { 307 case 0: 308 get_class_stats_v0(&stats.v0, cl); 309 stats_size = sizeof(struct hfsc_classstats_v0); 310 break; 311 case 1: 312 get_class_stats_v1(&stats.v1, cl); 313 stats_size = sizeof(struct hfsc_classstats_v1); 314 break; 315 } 316 317 if (*nbytes < stats_size) 318 return (EINVAL); 319 320 if ((error = copyout((caddr_t)&stats, ubuf, stats_size)) != 0) 321 return (error); 322 *nbytes = stats_size; 323 return (0); 324 } 325 326 /* 327 * bring the interface back to the initial state by discarding 328 * all the filters and classes except the root class. 329 */ 330 static int 331 hfsc_clear_interface(struct hfsc_if *hif) 332 { 333 struct hfsc_class *cl; 334 335 #ifdef ALTQ3_COMPAT 336 /* free the filters for this interface */ 337 acc_discard_filters(&hif->hif_classifier, NULL, 1); 338 #endif 339 340 /* clear out the classes */ 341 while (hif->hif_rootclass != NULL && 342 (cl = hif->hif_rootclass->cl_children) != NULL) { 343 /* 344 * remove the first leaf class found in the hierarchy 345 * then start over 346 */ 347 for (; cl != NULL; cl = hfsc_nextclass(cl)) { 348 if (!is_a_parent_class(cl)) { 349 (void)hfsc_class_destroy(cl); 350 break; 351 } 352 } 353 } 354 355 return (0); 356 } 357 358 static int 359 hfsc_request(struct ifaltq *ifq, int req, void *arg) 360 { 361 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 362 363 IFQ_LOCK_ASSERT(ifq); 364 365 switch (req) { 366 case ALTRQ_PURGE: 367 hfsc_purge(hif); 368 break; 369 } 370 return (0); 371 } 372 373 /* discard all the queued packets on the interface */ 374 static void 375 hfsc_purge(struct hfsc_if *hif) 376 { 377 struct hfsc_class *cl; 378 379 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 380 if (!qempty(cl->cl_q)) 381 hfsc_purgeq(cl); 382 if (ALTQ_IS_ENABLED(hif->hif_ifq)) 383 hif->hif_ifq->ifq_len = 0; 384 } 385 386 struct hfsc_class * 387 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc, 388 struct service_curve *fsc, struct service_curve *usc, 389 struct hfsc_class *parent, int qlimit, int flags, int qid) 390 { 391 struct hfsc_class *cl, *p; 392 int i, s; 393 394 if (hif->hif_classes >= HFSC_MAX_CLASSES) 395 return (NULL); 396 397 #ifndef ALTQ_RED 398 if (flags & HFCF_RED) { 399 #ifdef ALTQ_DEBUG 400 printf("hfsc_class_create: RED not configured for HFSC!\n"); 401 #endif 402 return (NULL); 403 } 404 #endif 405 #ifndef ALTQ_CODEL 406 if (flags & HFCF_CODEL) { 407 #ifdef ALTQ_DEBUG 408 printf("hfsc_class_create: CODEL not configured for HFSC!\n"); 409 #endif 410 return (NULL); 411 } 412 #endif 413 414 cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_NOWAIT | M_ZERO); 415 if (cl == NULL) 416 return (NULL); 417 418 cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO); 419 if (cl->cl_q == NULL) 420 goto err_ret; 421 422 TAILQ_INIT(&cl->cl_actc); 423 424 if (qlimit == 0) 425 qlimit = 50; /* use default */ 426 qlimit(cl->cl_q) = qlimit; 427 qtype(cl->cl_q) = Q_DROPTAIL; 428 qlen(cl->cl_q) = 0; 429 qsize(cl->cl_q) = 0; 430 cl->cl_flags = flags; 431 #ifdef ALTQ_RED 432 if (flags & (HFCF_RED|HFCF_RIO)) { 433 int red_flags, red_pkttime; 434 u_int m2; 435 436 m2 = 0; 437 if (rsc != NULL && rsc->m2 > m2) 438 m2 = rsc->m2; 439 if (fsc != NULL && fsc->m2 > m2) 440 m2 = fsc->m2; 441 if (usc != NULL && usc->m2 > m2) 442 m2 = usc->m2; 443 444 red_flags = 0; 445 if (flags & HFCF_ECN) 446 red_flags |= REDF_ECN; 447 #ifdef ALTQ_RIO 448 if (flags & HFCF_CLEARDSCP) 449 red_flags |= RIOF_CLEARDSCP; 450 #endif 451 if (m2 < 8) 452 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ 453 else 454 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu 455 * 1000 * 1000 * 1000 / (m2 / 8); 456 if (flags & HFCF_RED) { 457 cl->cl_red = red_alloc(0, 0, 458 qlimit(cl->cl_q) * 10/100, 459 qlimit(cl->cl_q) * 30/100, 460 red_flags, red_pkttime); 461 if (cl->cl_red != NULL) 462 qtype(cl->cl_q) = Q_RED; 463 } 464 #ifdef ALTQ_RIO 465 else { 466 cl->cl_red = (red_t *)rio_alloc(0, NULL, 467 red_flags, red_pkttime); 468 if (cl->cl_red != NULL) 469 qtype(cl->cl_q) = Q_RIO; 470 } 471 #endif 472 } 473 #endif /* ALTQ_RED */ 474 #ifdef ALTQ_CODEL 475 if (flags & HFCF_CODEL) { 476 cl->cl_codel = codel_alloc(5, 100, 0); 477 if (cl->cl_codel != NULL) 478 qtype(cl->cl_q) = Q_CODEL; 479 } 480 #endif 481 482 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) { 483 cl->cl_rsc = malloc(sizeof(struct internal_sc), 484 M_DEVBUF, M_NOWAIT); 485 if (cl->cl_rsc == NULL) 486 goto err_ret; 487 sc2isc(rsc, cl->cl_rsc); 488 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0); 489 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0); 490 } 491 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) { 492 cl->cl_fsc = malloc(sizeof(struct internal_sc), 493 M_DEVBUF, M_NOWAIT); 494 if (cl->cl_fsc == NULL) 495 goto err_ret; 496 sc2isc(fsc, cl->cl_fsc); 497 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0); 498 } 499 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) { 500 cl->cl_usc = malloc(sizeof(struct internal_sc), 501 M_DEVBUF, M_NOWAIT); 502 if (cl->cl_usc == NULL) 503 goto err_ret; 504 sc2isc(usc, cl->cl_usc); 505 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0); 506 } 507 508 cl->cl_id = hif->hif_classid++; 509 cl->cl_handle = qid; 510 cl->cl_hif = hif; 511 cl->cl_parent = parent; 512 513 s = splnet(); 514 IFQ_LOCK(hif->hif_ifq); 515 hif->hif_classes++; 516 517 /* 518 * find a free slot in the class table. if the slot matching 519 * the lower bits of qid is free, use this slot. otherwise, 520 * use the first free slot. 521 */ 522 i = qid % HFSC_MAX_CLASSES; 523 if (hif->hif_class_tbl[i] == NULL) 524 hif->hif_class_tbl[i] = cl; 525 else { 526 for (i = 0; i < HFSC_MAX_CLASSES; i++) 527 if (hif->hif_class_tbl[i] == NULL) { 528 hif->hif_class_tbl[i] = cl; 529 break; 530 } 531 if (i == HFSC_MAX_CLASSES) { 532 IFQ_UNLOCK(hif->hif_ifq); 533 splx(s); 534 goto err_ret; 535 } 536 } 537 538 if (flags & HFCF_DEFAULTCLASS) 539 hif->hif_defaultclass = cl; 540 541 if (parent == NULL) { 542 /* this is root class */ 543 hif->hif_rootclass = cl; 544 } else { 545 /* add this class to the children list of the parent */ 546 if ((p = parent->cl_children) == NULL) 547 parent->cl_children = cl; 548 else { 549 while (p->cl_siblings != NULL) 550 p = p->cl_siblings; 551 p->cl_siblings = cl; 552 } 553 } 554 IFQ_UNLOCK(hif->hif_ifq); 555 splx(s); 556 557 return (cl); 558 559 err_ret: 560 if (cl->cl_red != NULL) { 561 #ifdef ALTQ_RIO 562 if (q_is_rio(cl->cl_q)) 563 rio_destroy((rio_t *)cl->cl_red); 564 #endif 565 #ifdef ALTQ_RED 566 if (q_is_red(cl->cl_q)) 567 red_destroy(cl->cl_red); 568 #endif 569 #ifdef ALTQ_CODEL 570 if (q_is_codel(cl->cl_q)) 571 codel_destroy(cl->cl_codel); 572 #endif 573 } 574 if (cl->cl_fsc != NULL) 575 free(cl->cl_fsc, M_DEVBUF); 576 if (cl->cl_rsc != NULL) 577 free(cl->cl_rsc, M_DEVBUF); 578 if (cl->cl_usc != NULL) 579 free(cl->cl_usc, M_DEVBUF); 580 if (cl->cl_q != NULL) 581 free(cl->cl_q, M_DEVBUF); 582 free(cl, M_DEVBUF); 583 return (NULL); 584 } 585 586 static int 587 hfsc_class_destroy(struct hfsc_class *cl) 588 { 589 int i, s; 590 591 if (cl == NULL) 592 return (0); 593 594 if (is_a_parent_class(cl)) 595 return (EBUSY); 596 597 s = splnet(); 598 IFQ_LOCK(cl->cl_hif->hif_ifq); 599 600 #ifdef ALTQ3_COMPAT 601 /* delete filters referencing to this class */ 602 acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0); 603 #endif /* ALTQ3_COMPAT */ 604 605 if (!qempty(cl->cl_q)) 606 hfsc_purgeq(cl); 607 608 if (cl->cl_parent == NULL) { 609 /* this is root class */ 610 } else { 611 struct hfsc_class *p = cl->cl_parent->cl_children; 612 613 if (p == cl) 614 cl->cl_parent->cl_children = cl->cl_siblings; 615 else do { 616 if (p->cl_siblings == cl) { 617 p->cl_siblings = cl->cl_siblings; 618 break; 619 } 620 } while ((p = p->cl_siblings) != NULL); 621 ASSERT(p != NULL); 622 } 623 624 for (i = 0; i < HFSC_MAX_CLASSES; i++) 625 if (cl->cl_hif->hif_class_tbl[i] == cl) { 626 cl->cl_hif->hif_class_tbl[i] = NULL; 627 break; 628 } 629 630 cl->cl_hif->hif_classes--; 631 IFQ_UNLOCK(cl->cl_hif->hif_ifq); 632 splx(s); 633 634 if (cl->cl_red != NULL) { 635 #ifdef ALTQ_RIO 636 if (q_is_rio(cl->cl_q)) 637 rio_destroy((rio_t *)cl->cl_red); 638 #endif 639 #ifdef ALTQ_RED 640 if (q_is_red(cl->cl_q)) 641 red_destroy(cl->cl_red); 642 #endif 643 #ifdef ALTQ_CODEL 644 if (q_is_codel(cl->cl_q)) 645 codel_destroy(cl->cl_codel); 646 #endif 647 } 648 649 IFQ_LOCK(cl->cl_hif->hif_ifq); 650 if (cl == cl->cl_hif->hif_rootclass) 651 cl->cl_hif->hif_rootclass = NULL; 652 if (cl == cl->cl_hif->hif_defaultclass) 653 cl->cl_hif->hif_defaultclass = NULL; 654 IFQ_UNLOCK(cl->cl_hif->hif_ifq); 655 656 if (cl->cl_usc != NULL) 657 free(cl->cl_usc, M_DEVBUF); 658 if (cl->cl_fsc != NULL) 659 free(cl->cl_fsc, M_DEVBUF); 660 if (cl->cl_rsc != NULL) 661 free(cl->cl_rsc, M_DEVBUF); 662 free(cl->cl_q, M_DEVBUF); 663 free(cl, M_DEVBUF); 664 665 return (0); 666 } 667 668 /* 669 * hfsc_nextclass returns the next class in the tree. 670 * usage: 671 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 672 * do_something; 673 */ 674 static struct hfsc_class * 675 hfsc_nextclass(struct hfsc_class *cl) 676 { 677 if (cl->cl_children != NULL) 678 cl = cl->cl_children; 679 else if (cl->cl_siblings != NULL) 680 cl = cl->cl_siblings; 681 else { 682 while ((cl = cl->cl_parent) != NULL) 683 if (cl->cl_siblings) { 684 cl = cl->cl_siblings; 685 break; 686 } 687 } 688 689 return (cl); 690 } 691 692 /* 693 * hfsc_enqueue is an enqueue function to be registered to 694 * (*altq_enqueue) in struct ifaltq. 695 */ 696 static int 697 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr) 698 { 699 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 700 struct hfsc_class *cl; 701 struct pf_mtag *t; 702 int len; 703 704 IFQ_LOCK_ASSERT(ifq); 705 706 /* grab class set by classifier */ 707 if ((m->m_flags & M_PKTHDR) == 0) { 708 /* should not happen */ 709 printf("altq: packet for %s does not have pkthdr\n", 710 ifq->altq_ifp->if_xname); 711 m_freem(m); 712 return (ENOBUFS); 713 } 714 cl = NULL; 715 if ((t = pf_find_mtag(m)) != NULL) 716 cl = clh_to_clp(hif, t->qid); 717 #ifdef ALTQ3_COMPAT 718 else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL) 719 cl = pktattr->pattr_class; 720 #endif 721 if (cl == NULL || is_a_parent_class(cl)) { 722 cl = hif->hif_defaultclass; 723 if (cl == NULL) { 724 m_freem(m); 725 return (ENOBUFS); 726 } 727 } 728 #ifdef ALTQ3_COMPAT 729 if (pktattr != NULL) 730 cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */ 731 else 732 #endif 733 cl->cl_pktattr = NULL; 734 len = m_pktlen(m); 735 if (hfsc_addq(cl, m) != 0) { 736 /* drop occurred. mbuf was freed in hfsc_addq. */ 737 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len); 738 return (ENOBUFS); 739 } 740 IFQ_INC_LEN(ifq); 741 cl->cl_hif->hif_packets++; 742 743 /* successfully queued. */ 744 if (qlen(cl->cl_q) == 1) 745 set_active(cl, m_pktlen(m)); 746 747 return (0); 748 } 749 750 /* 751 * hfsc_dequeue is a dequeue function to be registered to 752 * (*altq_dequeue) in struct ifaltq. 753 * 754 * note: ALTDQ_POLL returns the next packet without removing the packet 755 * from the queue. ALTDQ_REMOVE is a normal dequeue operation. 756 * ALTDQ_REMOVE must return the same packet if called immediately 757 * after ALTDQ_POLL. 758 */ 759 static struct mbuf * 760 hfsc_dequeue(struct ifaltq *ifq, int op) 761 { 762 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 763 struct hfsc_class *cl; 764 struct mbuf *m; 765 int len, next_len; 766 int realtime = 0; 767 u_int64_t cur_time; 768 769 IFQ_LOCK_ASSERT(ifq); 770 771 if (hif->hif_packets == 0) 772 /* no packet in the tree */ 773 return (NULL); 774 775 cur_time = read_machclk(); 776 777 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) { 778 779 cl = hif->hif_pollcache; 780 hif->hif_pollcache = NULL; 781 /* check if the class was scheduled by real-time criteria */ 782 if (cl->cl_rsc != NULL) 783 realtime = (cl->cl_e <= cur_time); 784 } else { 785 /* 786 * if there are eligible classes, use real-time criteria. 787 * find the class with the minimum deadline among 788 * the eligible classes. 789 */ 790 if ((cl = hfsc_get_mindl(hif, cur_time)) 791 != NULL) { 792 realtime = 1; 793 } else { 794 #ifdef ALTQ_DEBUG 795 int fits = 0; 796 #endif 797 /* 798 * use link-sharing criteria 799 * get the class with the minimum vt in the hierarchy 800 */ 801 cl = hif->hif_rootclass; 802 while (is_a_parent_class(cl)) { 803 804 cl = actlist_firstfit(cl, cur_time); 805 if (cl == NULL) { 806 #ifdef ALTQ_DEBUG 807 if (fits > 0) 808 printf("%d fit but none found\n",fits); 809 #endif 810 return (NULL); 811 } 812 /* 813 * update parent's cl_cvtmin. 814 * don't update if the new vt is smaller. 815 */ 816 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 817 cl->cl_parent->cl_cvtmin = cl->cl_vt; 818 #ifdef ALTQ_DEBUG 819 fits++; 820 #endif 821 } 822 } 823 824 if (op == ALTDQ_POLL) { 825 hif->hif_pollcache = cl; 826 m = hfsc_pollq(cl); 827 return (m); 828 } 829 } 830 831 m = hfsc_getq(cl); 832 if (m == NULL) 833 panic("hfsc_dequeue:"); 834 len = m_pktlen(m); 835 cl->cl_hif->hif_packets--; 836 IFQ_DEC_LEN(ifq); 837 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len); 838 839 update_vf(cl, len, cur_time); 840 if (realtime) 841 cl->cl_cumul += len; 842 843 if (!qempty(cl->cl_q)) { 844 if (cl->cl_rsc != NULL) { 845 /* update ed */ 846 next_len = m_pktlen(qhead(cl->cl_q)); 847 848 if (realtime) 849 update_ed(cl, next_len); 850 else 851 update_d(cl, next_len); 852 } 853 } else { 854 /* the class becomes passive */ 855 set_passive(cl); 856 } 857 858 return (m); 859 } 860 861 static int 862 hfsc_addq(struct hfsc_class *cl, struct mbuf *m) 863 { 864 865 #ifdef ALTQ_RIO 866 if (q_is_rio(cl->cl_q)) 867 return rio_addq((rio_t *)cl->cl_red, cl->cl_q, 868 m, cl->cl_pktattr); 869 #endif 870 #ifdef ALTQ_RED 871 if (q_is_red(cl->cl_q)) 872 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr); 873 #endif 874 #ifdef ALTQ_CODEL 875 if (q_is_codel(cl->cl_q)) 876 return codel_addq(cl->cl_codel, cl->cl_q, m); 877 #endif 878 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) { 879 m_freem(m); 880 return (-1); 881 } 882 883 if (cl->cl_flags & HFCF_CLEARDSCP) 884 write_dsfield(m, cl->cl_pktattr, 0); 885 886 _addq(cl->cl_q, m); 887 888 return (0); 889 } 890 891 static struct mbuf * 892 hfsc_getq(struct hfsc_class *cl) 893 { 894 #ifdef ALTQ_RIO 895 if (q_is_rio(cl->cl_q)) 896 return rio_getq((rio_t *)cl->cl_red, cl->cl_q); 897 #endif 898 #ifdef ALTQ_RED 899 if (q_is_red(cl->cl_q)) 900 return red_getq(cl->cl_red, cl->cl_q); 901 #endif 902 #ifdef ALTQ_CODEL 903 if (q_is_codel(cl->cl_q)) 904 return codel_getq(cl->cl_codel, cl->cl_q); 905 #endif 906 return _getq(cl->cl_q); 907 } 908 909 static struct mbuf * 910 hfsc_pollq(struct hfsc_class *cl) 911 { 912 return qhead(cl->cl_q); 913 } 914 915 static void 916 hfsc_purgeq(struct hfsc_class *cl) 917 { 918 struct mbuf *m; 919 920 if (qempty(cl->cl_q)) 921 return; 922 923 while ((m = _getq(cl->cl_q)) != NULL) { 924 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m)); 925 m_freem(m); 926 cl->cl_hif->hif_packets--; 927 IFQ_DEC_LEN(cl->cl_hif->hif_ifq); 928 } 929 ASSERT(qlen(cl->cl_q) == 0); 930 931 update_vf(cl, 0, 0); /* remove cl from the actlist */ 932 set_passive(cl); 933 } 934 935 static void 936 set_active(struct hfsc_class *cl, int len) 937 { 938 if (cl->cl_rsc != NULL) 939 init_ed(cl, len); 940 if (cl->cl_fsc != NULL) 941 init_vf(cl, len); 942 943 cl->cl_stats.period++; 944 } 945 946 static void 947 set_passive(struct hfsc_class *cl) 948 { 949 if (cl->cl_rsc != NULL) 950 ellist_remove(cl); 951 952 /* 953 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0) 954 * needs to be called explicitly to remove a class from actlist 955 */ 956 } 957 958 static void 959 init_ed(struct hfsc_class *cl, int next_len) 960 { 961 u_int64_t cur_time; 962 963 cur_time = read_machclk(); 964 965 /* update the deadline curve */ 966 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul); 967 968 /* 969 * update the eligible curve. 970 * for concave, it is equal to the deadline curve. 971 * for convex, it is a linear curve with slope m2. 972 */ 973 cl->cl_eligible = cl->cl_deadline; 974 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 975 cl->cl_eligible.dx = 0; 976 cl->cl_eligible.dy = 0; 977 } 978 979 /* compute e and d */ 980 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 981 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 982 983 ellist_insert(cl); 984 } 985 986 static void 987 update_ed(struct hfsc_class *cl, int next_len) 988 { 989 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 990 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 991 992 ellist_update(cl); 993 } 994 995 static void 996 update_d(struct hfsc_class *cl, int next_len) 997 { 998 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 999 } 1000 1001 static void 1002 init_vf(struct hfsc_class *cl, int len) 1003 { 1004 struct hfsc_class *max_cl, *p; 1005 u_int64_t vt, f, cur_time; 1006 int go_active; 1007 1008 cur_time = 0; 1009 go_active = 1; 1010 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) { 1011 1012 if (go_active && cl->cl_nactive++ == 0) 1013 go_active = 1; 1014 else 1015 go_active = 0; 1016 1017 if (go_active) { 1018 max_cl = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead); 1019 if (max_cl != NULL) { 1020 /* 1021 * set vt to the average of the min and max 1022 * classes. if the parent's period didn't 1023 * change, don't decrease vt of the class. 1024 */ 1025 vt = max_cl->cl_vt; 1026 if (cl->cl_parent->cl_cvtmin != 0) 1027 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 1028 1029 if (cl->cl_parent->cl_vtperiod != 1030 cl->cl_parentperiod || vt > cl->cl_vt) 1031 cl->cl_vt = vt; 1032 } else { 1033 /* 1034 * first child for a new parent backlog period. 1035 * add parent's cvtmax to vtoff of children 1036 * to make a new vt (vtoff + vt) larger than 1037 * the vt in the last period for all children. 1038 */ 1039 vt = cl->cl_parent->cl_cvtmax; 1040 for (p = cl->cl_parent->cl_children; p != NULL; 1041 p = p->cl_siblings) 1042 p->cl_vtoff += vt; 1043 cl->cl_vt = 0; 1044 cl->cl_parent->cl_cvtmax = 0; 1045 cl->cl_parent->cl_cvtmin = 0; 1046 } 1047 cl->cl_initvt = cl->cl_vt; 1048 1049 /* update the virtual curve */ 1050 vt = cl->cl_vt + cl->cl_vtoff; 1051 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total); 1052 if (cl->cl_virtual.x == vt) { 1053 cl->cl_virtual.x -= cl->cl_vtoff; 1054 cl->cl_vtoff = 0; 1055 } 1056 cl->cl_vtadj = 0; 1057 1058 cl->cl_vtperiod++; /* increment vt period */ 1059 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 1060 if (cl->cl_parent->cl_nactive == 0) 1061 cl->cl_parentperiod++; 1062 cl->cl_f = 0; 1063 1064 actlist_insert(cl); 1065 1066 if (cl->cl_usc != NULL) { 1067 /* class has upper limit curve */ 1068 if (cur_time == 0) 1069 cur_time = read_machclk(); 1070 1071 /* update the ulimit curve */ 1072 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time, 1073 cl->cl_total); 1074 /* compute myf */ 1075 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 1076 cl->cl_total); 1077 cl->cl_myfadj = 0; 1078 } 1079 } 1080 1081 if (cl->cl_myf > cl->cl_cfmin) 1082 f = cl->cl_myf; 1083 else 1084 f = cl->cl_cfmin; 1085 if (f != cl->cl_f) { 1086 cl->cl_f = f; 1087 update_cfmin(cl->cl_parent); 1088 } 1089 } 1090 } 1091 1092 static void 1093 update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time) 1094 { 1095 u_int64_t f, myf_bound, delta; 1096 int go_passive; 1097 1098 go_passive = qempty(cl->cl_q); 1099 1100 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 1101 1102 cl->cl_total += len; 1103 1104 if (cl->cl_fsc == NULL || cl->cl_nactive == 0) 1105 continue; 1106 1107 if (go_passive && --cl->cl_nactive == 0) 1108 go_passive = 1; 1109 else 1110 go_passive = 0; 1111 1112 if (go_passive) { 1113 /* no more active child, going passive */ 1114 1115 /* update cvtmax of the parent class */ 1116 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 1117 cl->cl_parent->cl_cvtmax = cl->cl_vt; 1118 1119 /* remove this class from the vt list */ 1120 actlist_remove(cl); 1121 1122 update_cfmin(cl->cl_parent); 1123 1124 continue; 1125 } 1126 1127 /* 1128 * update vt and f 1129 */ 1130 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 1131 - cl->cl_vtoff + cl->cl_vtadj; 1132 1133 /* 1134 * if vt of the class is smaller than cvtmin, 1135 * the class was skipped in the past due to non-fit. 1136 * if so, we need to adjust vtadj. 1137 */ 1138 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 1139 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 1140 cl->cl_vt = cl->cl_parent->cl_cvtmin; 1141 } 1142 1143 /* update the vt list */ 1144 actlist_update(cl); 1145 1146 if (cl->cl_usc != NULL) { 1147 cl->cl_myf = cl->cl_myfadj 1148 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total); 1149 1150 /* 1151 * if myf lags behind by more than one clock tick 1152 * from the current time, adjust myfadj to prevent 1153 * a rate-limited class from going greedy. 1154 * in a steady state under rate-limiting, myf 1155 * fluctuates within one clock tick. 1156 */ 1157 myf_bound = cur_time - machclk_per_tick; 1158 if (cl->cl_myf < myf_bound) { 1159 delta = cur_time - cl->cl_myf; 1160 cl->cl_myfadj += delta; 1161 cl->cl_myf += delta; 1162 } 1163 } 1164 1165 /* cl_f is max(cl_myf, cl_cfmin) */ 1166 if (cl->cl_myf > cl->cl_cfmin) 1167 f = cl->cl_myf; 1168 else 1169 f = cl->cl_cfmin; 1170 if (f != cl->cl_f) { 1171 cl->cl_f = f; 1172 update_cfmin(cl->cl_parent); 1173 } 1174 } 1175 } 1176 1177 static void 1178 update_cfmin(struct hfsc_class *cl) 1179 { 1180 struct hfsc_class *p; 1181 u_int64_t cfmin; 1182 1183 if (TAILQ_EMPTY(&cl->cl_actc)) { 1184 cl->cl_cfmin = 0; 1185 return; 1186 } 1187 cfmin = HT_INFINITY; 1188 TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) { 1189 if (p->cl_f == 0) { 1190 cl->cl_cfmin = 0; 1191 return; 1192 } 1193 if (p->cl_f < cfmin) 1194 cfmin = p->cl_f; 1195 } 1196 cl->cl_cfmin = cfmin; 1197 } 1198 1199 /* 1200 * TAILQ based ellist and actlist implementation 1201 * (ion wanted to make a calendar queue based implementation) 1202 */ 1203 /* 1204 * eligible list holds backlogged classes being sorted by their eligible times. 1205 * there is one eligible list per interface. 1206 */ 1207 1208 static void 1209 ellist_insert(struct hfsc_class *cl) 1210 { 1211 struct hfsc_if *hif = cl->cl_hif; 1212 struct hfsc_class *p; 1213 1214 /* check the last entry first */ 1215 if ((p = TAILQ_LAST(&hif->hif_eligible, elighead)) == NULL || 1216 p->cl_e <= cl->cl_e) { 1217 TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist); 1218 return; 1219 } 1220 1221 TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) { 1222 if (cl->cl_e < p->cl_e) { 1223 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1224 return; 1225 } 1226 } 1227 ASSERT(0); /* should not reach here */ 1228 } 1229 1230 static void 1231 ellist_remove(struct hfsc_class *cl) 1232 { 1233 struct hfsc_if *hif = cl->cl_hif; 1234 1235 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist); 1236 } 1237 1238 static void 1239 ellist_update(struct hfsc_class *cl) 1240 { 1241 struct hfsc_if *hif = cl->cl_hif; 1242 struct hfsc_class *p, *last; 1243 1244 /* 1245 * the eligible time of a class increases monotonically. 1246 * if the next entry has a larger eligible time, nothing to do. 1247 */ 1248 p = TAILQ_NEXT(cl, cl_ellist); 1249 if (p == NULL || cl->cl_e <= p->cl_e) 1250 return; 1251 1252 /* check the last entry */ 1253 last = TAILQ_LAST(&hif->hif_eligible, elighead); 1254 ASSERT(last != NULL); 1255 if (last->cl_e <= cl->cl_e) { 1256 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist); 1257 TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist); 1258 return; 1259 } 1260 1261 /* 1262 * the new position must be between the next entry 1263 * and the last entry 1264 */ 1265 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) { 1266 if (cl->cl_e < p->cl_e) { 1267 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist); 1268 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1269 return; 1270 } 1271 } 1272 ASSERT(0); /* should not reach here */ 1273 } 1274 1275 /* find the class with the minimum deadline among the eligible classes */ 1276 struct hfsc_class * 1277 hfsc_get_mindl(struct hfsc_if *hif, u_int64_t cur_time) 1278 { 1279 struct hfsc_class *p, *cl = NULL; 1280 1281 TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) { 1282 if (p->cl_e > cur_time) 1283 break; 1284 if (cl == NULL || p->cl_d < cl->cl_d) 1285 cl = p; 1286 } 1287 return (cl); 1288 } 1289 1290 /* 1291 * active children list holds backlogged child classes being sorted 1292 * by their virtual time. 1293 * each intermediate class has one active children list. 1294 */ 1295 1296 static void 1297 actlist_insert(struct hfsc_class *cl) 1298 { 1299 struct hfsc_class *p; 1300 1301 /* check the last entry first */ 1302 if ((p = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead)) == NULL 1303 || p->cl_vt <= cl->cl_vt) { 1304 TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist); 1305 return; 1306 } 1307 1308 TAILQ_FOREACH(p, &cl->cl_parent->cl_actc, cl_actlist) { 1309 if (cl->cl_vt < p->cl_vt) { 1310 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1311 return; 1312 } 1313 } 1314 ASSERT(0); /* should not reach here */ 1315 } 1316 1317 static void 1318 actlist_remove(struct hfsc_class *cl) 1319 { 1320 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist); 1321 } 1322 1323 static void 1324 actlist_update(struct hfsc_class *cl) 1325 { 1326 struct hfsc_class *p, *last; 1327 1328 /* 1329 * the virtual time of a class increases monotonically during its 1330 * backlogged period. 1331 * if the next entry has a larger virtual time, nothing to do. 1332 */ 1333 p = TAILQ_NEXT(cl, cl_actlist); 1334 if (p == NULL || cl->cl_vt < p->cl_vt) 1335 return; 1336 1337 /* check the last entry */ 1338 last = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead); 1339 ASSERT(last != NULL); 1340 if (last->cl_vt <= cl->cl_vt) { 1341 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist); 1342 TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist); 1343 return; 1344 } 1345 1346 /* 1347 * the new position must be between the next entry 1348 * and the last entry 1349 */ 1350 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) { 1351 if (cl->cl_vt < p->cl_vt) { 1352 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist); 1353 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1354 return; 1355 } 1356 } 1357 ASSERT(0); /* should not reach here */ 1358 } 1359 1360 static struct hfsc_class * 1361 actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time) 1362 { 1363 struct hfsc_class *p; 1364 1365 TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) { 1366 if (p->cl_f <= cur_time) 1367 return (p); 1368 } 1369 return (NULL); 1370 } 1371 1372 /* 1373 * service curve support functions 1374 * 1375 * external service curve parameters 1376 * m: bits/sec 1377 * d: msec 1378 * internal service curve parameters 1379 * sm: (bytes/machclk tick) << SM_SHIFT 1380 * ism: (machclk ticks/byte) << ISM_SHIFT 1381 * dx: machclk ticks 1382 * 1383 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits. we 1384 * should be able to handle 100K-100Gbps linkspeed with 256 MHz machclk 1385 * frequency and at least 3 effective digits in decimal. 1386 * 1387 */ 1388 #define SM_SHIFT 24 1389 #define ISM_SHIFT 14 1390 1391 #define SM_MASK ((1LL << SM_SHIFT) - 1) 1392 #define ISM_MASK ((1LL << ISM_SHIFT) - 1) 1393 1394 static __inline u_int64_t 1395 seg_x2y(u_int64_t x, u_int64_t sm) 1396 { 1397 u_int64_t y; 1398 1399 /* 1400 * compute 1401 * y = x * sm >> SM_SHIFT 1402 * but divide it for the upper and lower bits to avoid overflow 1403 */ 1404 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 1405 return (y); 1406 } 1407 1408 static __inline u_int64_t 1409 seg_y2x(u_int64_t y, u_int64_t ism) 1410 { 1411 u_int64_t x; 1412 1413 if (y == 0) 1414 x = 0; 1415 else if (ism == HT_INFINITY) 1416 x = HT_INFINITY; 1417 else { 1418 x = (y >> ISM_SHIFT) * ism 1419 + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 1420 } 1421 return (x); 1422 } 1423 1424 static __inline u_int64_t 1425 m2sm(u_int64_t m) 1426 { 1427 u_int64_t sm; 1428 1429 sm = (m << SM_SHIFT) / 8 / machclk_freq; 1430 return (sm); 1431 } 1432 1433 static __inline u_int64_t 1434 m2ism(u_int64_t m) 1435 { 1436 u_int64_t ism; 1437 1438 if (m == 0) 1439 ism = HT_INFINITY; 1440 else 1441 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m; 1442 return (ism); 1443 } 1444 1445 static __inline u_int64_t 1446 d2dx(u_int d) 1447 { 1448 u_int64_t dx; 1449 1450 dx = ((u_int64_t)d * machclk_freq) / 1000; 1451 return (dx); 1452 } 1453 1454 static u_int64_t 1455 sm2m(u_int64_t sm) 1456 { 1457 u_int64_t m; 1458 1459 m = (sm * 8 * machclk_freq) >> SM_SHIFT; 1460 return (m); 1461 } 1462 1463 static u_int 1464 dx2d(u_int64_t dx) 1465 { 1466 u_int64_t d; 1467 1468 d = dx * 1000 / machclk_freq; 1469 return ((u_int)d); 1470 } 1471 1472 static void 1473 sc2isc(struct service_curve *sc, struct internal_sc *isc) 1474 { 1475 isc->sm1 = m2sm(sc->m1); 1476 isc->ism1 = m2ism(sc->m1); 1477 isc->dx = d2dx(sc->d); 1478 isc->dy = seg_x2y(isc->dx, isc->sm1); 1479 isc->sm2 = m2sm(sc->m2); 1480 isc->ism2 = m2ism(sc->m2); 1481 } 1482 1483 /* 1484 * initialize the runtime service curve with the given internal 1485 * service curve starting at (x, y). 1486 */ 1487 static void 1488 rtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x, 1489 u_int64_t y) 1490 { 1491 rtsc->x = x; 1492 rtsc->y = y; 1493 rtsc->sm1 = isc->sm1; 1494 rtsc->ism1 = isc->ism1; 1495 rtsc->dx = isc->dx; 1496 rtsc->dy = isc->dy; 1497 rtsc->sm2 = isc->sm2; 1498 rtsc->ism2 = isc->ism2; 1499 } 1500 1501 /* 1502 * calculate the y-projection of the runtime service curve by the 1503 * given x-projection value 1504 */ 1505 static u_int64_t 1506 rtsc_y2x(struct runtime_sc *rtsc, u_int64_t y) 1507 { 1508 u_int64_t x; 1509 1510 if (y < rtsc->y) 1511 x = rtsc->x; 1512 else if (y <= rtsc->y + rtsc->dy) { 1513 /* x belongs to the 1st segment */ 1514 if (rtsc->dy == 0) 1515 x = rtsc->x + rtsc->dx; 1516 else 1517 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 1518 } else { 1519 /* x belongs to the 2nd segment */ 1520 x = rtsc->x + rtsc->dx 1521 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 1522 } 1523 return (x); 1524 } 1525 1526 static u_int64_t 1527 rtsc_x2y(struct runtime_sc *rtsc, u_int64_t x) 1528 { 1529 u_int64_t y; 1530 1531 if (x <= rtsc->x) 1532 y = rtsc->y; 1533 else if (x <= rtsc->x + rtsc->dx) 1534 /* y belongs to the 1st segment */ 1535 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 1536 else 1537 /* y belongs to the 2nd segment */ 1538 y = rtsc->y + rtsc->dy 1539 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 1540 return (y); 1541 } 1542 1543 /* 1544 * update the runtime service curve by taking the minimum of the current 1545 * runtime service curve and the service curve starting at (x, y). 1546 */ 1547 static void 1548 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x, 1549 u_int64_t y) 1550 { 1551 u_int64_t y1, y2, dx, dy; 1552 1553 if (isc->sm1 <= isc->sm2) { 1554 /* service curve is convex */ 1555 y1 = rtsc_x2y(rtsc, x); 1556 if (y1 < y) 1557 /* the current rtsc is smaller */ 1558 return; 1559 rtsc->x = x; 1560 rtsc->y = y; 1561 return; 1562 } 1563 1564 /* 1565 * service curve is concave 1566 * compute the two y values of the current rtsc 1567 * y1: at x 1568 * y2: at (x + dx) 1569 */ 1570 y1 = rtsc_x2y(rtsc, x); 1571 if (y1 <= y) { 1572 /* rtsc is below isc, no change to rtsc */ 1573 return; 1574 } 1575 1576 y2 = rtsc_x2y(rtsc, x + isc->dx); 1577 if (y2 >= y + isc->dy) { 1578 /* rtsc is above isc, replace rtsc by isc */ 1579 rtsc->x = x; 1580 rtsc->y = y; 1581 rtsc->dx = isc->dx; 1582 rtsc->dy = isc->dy; 1583 return; 1584 } 1585 1586 /* 1587 * the two curves intersect 1588 * compute the offsets (dx, dy) using the reverse 1589 * function of seg_x2y() 1590 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 1591 */ 1592 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2); 1593 /* 1594 * check if (x, y1) belongs to the 1st segment of rtsc. 1595 * if so, add the offset. 1596 */ 1597 if (rtsc->x + rtsc->dx > x) 1598 dx += rtsc->x + rtsc->dx - x; 1599 dy = seg_x2y(dx, isc->sm1); 1600 1601 rtsc->x = x; 1602 rtsc->y = y; 1603 rtsc->dx = dx; 1604 rtsc->dy = dy; 1605 return; 1606 } 1607 1608 static void 1609 get_class_stats_v0(struct hfsc_classstats_v0 *sp, struct hfsc_class *cl) 1610 { 1611 sp->class_id = cl->cl_id; 1612 sp->class_handle = cl->cl_handle; 1613 1614 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1615 1616 if (cl->cl_rsc != NULL) { 1617 sp->rsc.m1 = SATU32(sm2m(cl->cl_rsc->sm1)); 1618 sp->rsc.d = dx2d(cl->cl_rsc->dx); 1619 sp->rsc.m2 = SATU32(sm2m(cl->cl_rsc->sm2)); 1620 } else { 1621 sp->rsc.m1 = 0; 1622 sp->rsc.d = 0; 1623 sp->rsc.m2 = 0; 1624 } 1625 if (cl->cl_fsc != NULL) { 1626 sp->fsc.m1 = SATU32(sm2m(cl->cl_fsc->sm1)); 1627 sp->fsc.d = dx2d(cl->cl_fsc->dx); 1628 sp->fsc.m2 = SATU32(sm2m(cl->cl_fsc->sm2)); 1629 } else { 1630 sp->fsc.m1 = 0; 1631 sp->fsc.d = 0; 1632 sp->fsc.m2 = 0; 1633 } 1634 if (cl->cl_usc != NULL) { 1635 sp->usc.m1 = SATU32(sm2m(cl->cl_usc->sm1)); 1636 sp->usc.d = dx2d(cl->cl_usc->dx); 1637 sp->usc.m2 = SATU32(sm2m(cl->cl_usc->sm2)); 1638 } else { 1639 sp->usc.m1 = 0; 1640 sp->usc.d = 0; 1641 sp->usc.m2 = 0; 1642 } 1643 1644 #undef SATU32 1645 1646 sp->total = cl->cl_total; 1647 sp->cumul = cl->cl_cumul; 1648 1649 sp->d = cl->cl_d; 1650 sp->e = cl->cl_e; 1651 sp->vt = cl->cl_vt; 1652 sp->f = cl->cl_f; 1653 1654 sp->initvt = cl->cl_initvt; 1655 sp->vtperiod = cl->cl_vtperiod; 1656 sp->parentperiod = cl->cl_parentperiod; 1657 sp->nactive = cl->cl_nactive; 1658 sp->vtoff = cl->cl_vtoff; 1659 sp->cvtmax = cl->cl_cvtmax; 1660 sp->myf = cl->cl_myf; 1661 sp->cfmin = cl->cl_cfmin; 1662 sp->cvtmin = cl->cl_cvtmin; 1663 sp->myfadj = cl->cl_myfadj; 1664 sp->vtadj = cl->cl_vtadj; 1665 1666 sp->cur_time = read_machclk(); 1667 sp->machclk_freq = machclk_freq; 1668 1669 sp->qlength = qlen(cl->cl_q); 1670 sp->qlimit = qlimit(cl->cl_q); 1671 sp->xmit_cnt = cl->cl_stats.xmit_cnt; 1672 sp->drop_cnt = cl->cl_stats.drop_cnt; 1673 sp->period = cl->cl_stats.period; 1674 1675 sp->qtype = qtype(cl->cl_q); 1676 #ifdef ALTQ_RED 1677 if (q_is_red(cl->cl_q)) 1678 red_getstats(cl->cl_red, &sp->red[0]); 1679 #endif 1680 #ifdef ALTQ_RIO 1681 if (q_is_rio(cl->cl_q)) 1682 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); 1683 #endif 1684 #ifdef ALTQ_CODEL 1685 if (q_is_codel(cl->cl_q)) 1686 codel_getstats(cl->cl_codel, &sp->codel); 1687 #endif 1688 } 1689 1690 static void 1691 get_class_stats_v1(struct hfsc_classstats_v1 *sp, struct hfsc_class *cl) 1692 { 1693 sp->class_id = cl->cl_id; 1694 sp->class_handle = cl->cl_handle; 1695 1696 if (cl->cl_rsc != NULL) { 1697 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1); 1698 sp->rsc.d = dx2d(cl->cl_rsc->dx); 1699 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2); 1700 } else { 1701 sp->rsc.m1 = 0; 1702 sp->rsc.d = 0; 1703 sp->rsc.m2 = 0; 1704 } 1705 if (cl->cl_fsc != NULL) { 1706 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1); 1707 sp->fsc.d = dx2d(cl->cl_fsc->dx); 1708 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2); 1709 } else { 1710 sp->fsc.m1 = 0; 1711 sp->fsc.d = 0; 1712 sp->fsc.m2 = 0; 1713 } 1714 if (cl->cl_usc != NULL) { 1715 sp->usc.m1 = sm2m(cl->cl_usc->sm1); 1716 sp->usc.d = dx2d(cl->cl_usc->dx); 1717 sp->usc.m2 = sm2m(cl->cl_usc->sm2); 1718 } else { 1719 sp->usc.m1 = 0; 1720 sp->usc.d = 0; 1721 sp->usc.m2 = 0; 1722 } 1723 1724 sp->total = cl->cl_total; 1725 sp->cumul = cl->cl_cumul; 1726 1727 sp->d = cl->cl_d; 1728 sp->e = cl->cl_e; 1729 sp->vt = cl->cl_vt; 1730 sp->f = cl->cl_f; 1731 1732 sp->initvt = cl->cl_initvt; 1733 sp->vtperiod = cl->cl_vtperiod; 1734 sp->parentperiod = cl->cl_parentperiod; 1735 sp->nactive = cl->cl_nactive; 1736 sp->vtoff = cl->cl_vtoff; 1737 sp->cvtmax = cl->cl_cvtmax; 1738 sp->myf = cl->cl_myf; 1739 sp->cfmin = cl->cl_cfmin; 1740 sp->cvtmin = cl->cl_cvtmin; 1741 sp->myfadj = cl->cl_myfadj; 1742 sp->vtadj = cl->cl_vtadj; 1743 1744 sp->cur_time = read_machclk(); 1745 sp->machclk_freq = machclk_freq; 1746 1747 sp->qlength = qlen(cl->cl_q); 1748 sp->qlimit = qlimit(cl->cl_q); 1749 sp->xmit_cnt = cl->cl_stats.xmit_cnt; 1750 sp->drop_cnt = cl->cl_stats.drop_cnt; 1751 sp->period = cl->cl_stats.period; 1752 1753 sp->qtype = qtype(cl->cl_q); 1754 #ifdef ALTQ_RED 1755 if (q_is_red(cl->cl_q)) 1756 red_getstats(cl->cl_red, &sp->red[0]); 1757 #endif 1758 #ifdef ALTQ_RIO 1759 if (q_is_rio(cl->cl_q)) 1760 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); 1761 #endif 1762 #ifdef ALTQ_CODEL 1763 if (q_is_codel(cl->cl_q)) 1764 codel_getstats(cl->cl_codel, &sp->codel); 1765 #endif 1766 } 1767 1768 /* convert a class handle to the corresponding class pointer */ 1769 static struct hfsc_class * 1770 clh_to_clp(struct hfsc_if *hif, u_int32_t chandle) 1771 { 1772 int i; 1773 struct hfsc_class *cl; 1774 1775 if (chandle == 0) 1776 return (NULL); 1777 /* 1778 * first, try optimistically the slot matching the lower bits of 1779 * the handle. if it fails, do the linear table search. 1780 */ 1781 i = chandle % HFSC_MAX_CLASSES; 1782 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle) 1783 return (cl); 1784 for (i = 0; i < HFSC_MAX_CLASSES; i++) 1785 if ((cl = hif->hif_class_tbl[i]) != NULL && 1786 cl->cl_handle == chandle) 1787 return (cl); 1788 return (NULL); 1789 } 1790 1791 #ifdef ALTQ3_COMPAT 1792 static struct hfsc_if * 1793 hfsc_attach(ifq, bandwidth) 1794 struct ifaltq *ifq; 1795 u_int bandwidth; 1796 { 1797 struct hfsc_if *hif; 1798 1799 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK); 1800 if (hif == NULL) 1801 return (NULL); 1802 bzero(hif, sizeof(struct hfsc_if)); 1803 1804 hif->hif_eligible = ellist_alloc(); 1805 if (hif->hif_eligible == NULL) { 1806 free(hif, M_DEVBUF); 1807 return NULL; 1808 } 1809 1810 hif->hif_ifq = ifq; 1811 1812 /* add this state to the hfsc list */ 1813 hif->hif_next = hif_list; 1814 hif_list = hif; 1815 1816 return (hif); 1817 } 1818 1819 static int 1820 hfsc_detach(hif) 1821 struct hfsc_if *hif; 1822 { 1823 (void)hfsc_clear_interface(hif); 1824 (void)hfsc_class_destroy(hif->hif_rootclass); 1825 1826 /* remove this interface from the hif list */ 1827 if (hif_list == hif) 1828 hif_list = hif->hif_next; 1829 else { 1830 struct hfsc_if *h; 1831 1832 for (h = hif_list; h != NULL; h = h->hif_next) 1833 if (h->hif_next == hif) { 1834 h->hif_next = hif->hif_next; 1835 break; 1836 } 1837 ASSERT(h != NULL); 1838 } 1839 1840 ellist_destroy(hif->hif_eligible); 1841 1842 free(hif, M_DEVBUF); 1843 1844 return (0); 1845 } 1846 1847 static int 1848 hfsc_class_modify(cl, rsc, fsc, usc) 1849 struct hfsc_class *cl; 1850 struct service_curve *rsc, *fsc, *usc; 1851 { 1852 struct internal_sc *rsc_tmp, *fsc_tmp, *usc_tmp; 1853 u_int64_t cur_time; 1854 int s; 1855 1856 rsc_tmp = fsc_tmp = usc_tmp = NULL; 1857 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) && 1858 cl->cl_rsc == NULL) { 1859 rsc_tmp = malloc(sizeof(struct internal_sc), 1860 M_DEVBUF, M_WAITOK); 1861 if (rsc_tmp == NULL) 1862 return (ENOMEM); 1863 } 1864 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) && 1865 cl->cl_fsc == NULL) { 1866 fsc_tmp = malloc(sizeof(struct internal_sc), 1867 M_DEVBUF, M_WAITOK); 1868 if (fsc_tmp == NULL) { 1869 free(rsc_tmp); 1870 return (ENOMEM); 1871 } 1872 } 1873 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0) && 1874 cl->cl_usc == NULL) { 1875 usc_tmp = malloc(sizeof(struct internal_sc), 1876 M_DEVBUF, M_WAITOK); 1877 if (usc_tmp == NULL) { 1878 free(rsc_tmp); 1879 free(fsc_tmp); 1880 return (ENOMEM); 1881 } 1882 } 1883 1884 cur_time = read_machclk(); 1885 s = splnet(); 1886 IFQ_LOCK(cl->cl_hif->hif_ifq); 1887 1888 if (rsc != NULL) { 1889 if (rsc->m1 == 0 && rsc->m2 == 0) { 1890 if (cl->cl_rsc != NULL) { 1891 if (!qempty(cl->cl_q)) 1892 hfsc_purgeq(cl); 1893 free(cl->cl_rsc, M_DEVBUF); 1894 cl->cl_rsc = NULL; 1895 } 1896 } else { 1897 if (cl->cl_rsc == NULL) 1898 cl->cl_rsc = rsc_tmp; 1899 sc2isc(rsc, cl->cl_rsc); 1900 rtsc_init(&cl->cl_deadline, cl->cl_rsc, cur_time, 1901 cl->cl_cumul); 1902 cl->cl_eligible = cl->cl_deadline; 1903 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 1904 cl->cl_eligible.dx = 0; 1905 cl->cl_eligible.dy = 0; 1906 } 1907 } 1908 } 1909 1910 if (fsc != NULL) { 1911 if (fsc->m1 == 0 && fsc->m2 == 0) { 1912 if (cl->cl_fsc != NULL) { 1913 if (!qempty(cl->cl_q)) 1914 hfsc_purgeq(cl); 1915 free(cl->cl_fsc, M_DEVBUF); 1916 cl->cl_fsc = NULL; 1917 } 1918 } else { 1919 if (cl->cl_fsc == NULL) 1920 cl->cl_fsc = fsc_tmp; 1921 sc2isc(fsc, cl->cl_fsc); 1922 rtsc_init(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt, 1923 cl->cl_total); 1924 } 1925 } 1926 1927 if (usc != NULL) { 1928 if (usc->m1 == 0 && usc->m2 == 0) { 1929 if (cl->cl_usc != NULL) { 1930 free(cl->cl_usc, M_DEVBUF); 1931 cl->cl_usc = NULL; 1932 cl->cl_myf = 0; 1933 } 1934 } else { 1935 if (cl->cl_usc == NULL) 1936 cl->cl_usc = usc_tmp; 1937 sc2isc(usc, cl->cl_usc); 1938 rtsc_init(&cl->cl_ulimit, cl->cl_usc, cur_time, 1939 cl->cl_total); 1940 } 1941 } 1942 1943 if (!qempty(cl->cl_q)) { 1944 if (cl->cl_rsc != NULL) 1945 update_ed(cl, m_pktlen(qhead(cl->cl_q))); 1946 if (cl->cl_fsc != NULL) 1947 update_vf(cl, 0, cur_time); 1948 /* is this enough? */ 1949 } 1950 1951 IFQ_UNLOCK(cl->cl_hif->hif_ifq); 1952 splx(s); 1953 1954 return (0); 1955 } 1956 1957 /* 1958 * hfsc device interface 1959 */ 1960 int 1961 hfscopen(dev, flag, fmt, p) 1962 dev_t dev; 1963 int flag, fmt; 1964 #if (__FreeBSD_version > 500000) 1965 struct thread *p; 1966 #else 1967 struct proc *p; 1968 #endif 1969 { 1970 if (machclk_freq == 0) 1971 init_machclk(); 1972 1973 if (machclk_freq == 0) { 1974 printf("hfsc: no cpu clock available!\n"); 1975 return (ENXIO); 1976 } 1977 1978 /* everything will be done when the queueing scheme is attached. */ 1979 return 0; 1980 } 1981 1982 int 1983 hfscclose(dev, flag, fmt, p) 1984 dev_t dev; 1985 int flag, fmt; 1986 #if (__FreeBSD_version > 500000) 1987 struct thread *p; 1988 #else 1989 struct proc *p; 1990 #endif 1991 { 1992 struct hfsc_if *hif; 1993 int err, error = 0; 1994 1995 while ((hif = hif_list) != NULL) { 1996 /* destroy all */ 1997 if (ALTQ_IS_ENABLED(hif->hif_ifq)) 1998 altq_disable(hif->hif_ifq); 1999 2000 err = altq_detach(hif->hif_ifq); 2001 if (err == 0) 2002 err = hfsc_detach(hif); 2003 if (err != 0 && error == 0) 2004 error = err; 2005 } 2006 2007 return error; 2008 } 2009 2010 int 2011 hfscioctl(dev, cmd, addr, flag, p) 2012 dev_t dev; 2013 ioctlcmd_t cmd; 2014 caddr_t addr; 2015 int flag; 2016 #if (__FreeBSD_version > 500000) 2017 struct thread *p; 2018 #else 2019 struct proc *p; 2020 #endif 2021 { 2022 struct hfsc_if *hif; 2023 struct hfsc_interface *ifacep; 2024 int error = 0; 2025 2026 /* check super-user privilege */ 2027 switch (cmd) { 2028 case HFSC_GETSTATS: 2029 break; 2030 default: 2031 #if (__FreeBSD_version > 700000) 2032 if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0) 2033 return (error); 2034 #elsif (__FreeBSD_version > 400000) 2035 if ((error = suser(p)) != 0) 2036 return (error); 2037 #else 2038 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 2039 return (error); 2040 #endif 2041 break; 2042 } 2043 2044 switch (cmd) { 2045 2046 case HFSC_IF_ATTACH: 2047 error = hfsccmd_if_attach((struct hfsc_attach *)addr); 2048 break; 2049 2050 case HFSC_IF_DETACH: 2051 error = hfsccmd_if_detach((struct hfsc_interface *)addr); 2052 break; 2053 2054 case HFSC_ENABLE: 2055 case HFSC_DISABLE: 2056 case HFSC_CLEAR_HIERARCHY: 2057 ifacep = (struct hfsc_interface *)addr; 2058 if ((hif = altq_lookup(ifacep->hfsc_ifname, 2059 ALTQT_HFSC)) == NULL) { 2060 error = EBADF; 2061 break; 2062 } 2063 2064 switch (cmd) { 2065 2066 case HFSC_ENABLE: 2067 if (hif->hif_defaultclass == NULL) { 2068 #ifdef ALTQ_DEBUG 2069 printf("hfsc: no default class\n"); 2070 #endif 2071 error = EINVAL; 2072 break; 2073 } 2074 error = altq_enable(hif->hif_ifq); 2075 break; 2076 2077 case HFSC_DISABLE: 2078 error = altq_disable(hif->hif_ifq); 2079 break; 2080 2081 case HFSC_CLEAR_HIERARCHY: 2082 hfsc_clear_interface(hif); 2083 break; 2084 } 2085 break; 2086 2087 case HFSC_ADD_CLASS: 2088 error = hfsccmd_add_class((struct hfsc_add_class *)addr); 2089 break; 2090 2091 case HFSC_DEL_CLASS: 2092 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr); 2093 break; 2094 2095 case HFSC_MOD_CLASS: 2096 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr); 2097 break; 2098 2099 case HFSC_ADD_FILTER: 2100 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr); 2101 break; 2102 2103 case HFSC_DEL_FILTER: 2104 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr); 2105 break; 2106 2107 case HFSC_GETSTATS: 2108 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr); 2109 break; 2110 2111 default: 2112 error = EINVAL; 2113 break; 2114 } 2115 return error; 2116 } 2117 2118 static int 2119 hfsccmd_if_attach(ap) 2120 struct hfsc_attach *ap; 2121 { 2122 struct hfsc_if *hif; 2123 struct ifnet *ifp; 2124 int error; 2125 2126 if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL) 2127 return (ENXIO); 2128 2129 if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL) 2130 return (ENOMEM); 2131 2132 /* 2133 * set HFSC to this ifnet structure. 2134 */ 2135 if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif, 2136 hfsc_enqueue, hfsc_dequeue, hfsc_request, 2137 &hif->hif_classifier, acc_classify)) != 0) 2138 (void)hfsc_detach(hif); 2139 2140 return (error); 2141 } 2142 2143 static int 2144 hfsccmd_if_detach(ap) 2145 struct hfsc_interface *ap; 2146 { 2147 struct hfsc_if *hif; 2148 int error; 2149 2150 if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL) 2151 return (EBADF); 2152 2153 if (ALTQ_IS_ENABLED(hif->hif_ifq)) 2154 altq_disable(hif->hif_ifq); 2155 2156 if ((error = altq_detach(hif->hif_ifq))) 2157 return (error); 2158 2159 return hfsc_detach(hif); 2160 } 2161 2162 static int 2163 hfsccmd_add_class(ap) 2164 struct hfsc_add_class *ap; 2165 { 2166 struct hfsc_if *hif; 2167 struct hfsc_class *cl, *parent; 2168 int i; 2169 2170 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2171 return (EBADF); 2172 2173 if (ap->parent_handle == HFSC_NULLCLASS_HANDLE && 2174 hif->hif_rootclass == NULL) 2175 parent = NULL; 2176 else if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL) 2177 return (EINVAL); 2178 2179 /* assign a class handle (use a free slot number for now) */ 2180 for (i = 1; i < HFSC_MAX_CLASSES; i++) 2181 if (hif->hif_class_tbl[i] == NULL) 2182 break; 2183 if (i == HFSC_MAX_CLASSES) 2184 return (EBUSY); 2185 2186 if ((cl = hfsc_class_create(hif, &ap->service_curve, NULL, NULL, 2187 parent, ap->qlimit, ap->flags, i)) == NULL) 2188 return (ENOMEM); 2189 2190 /* return a class handle to the user */ 2191 ap->class_handle = i; 2192 2193 return (0); 2194 } 2195 2196 static int 2197 hfsccmd_delete_class(ap) 2198 struct hfsc_delete_class *ap; 2199 { 2200 struct hfsc_if *hif; 2201 struct hfsc_class *cl; 2202 2203 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2204 return (EBADF); 2205 2206 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL) 2207 return (EINVAL); 2208 2209 return hfsc_class_destroy(cl); 2210 } 2211 2212 static int 2213 hfsccmd_modify_class(ap) 2214 struct hfsc_modify_class *ap; 2215 { 2216 struct hfsc_if *hif; 2217 struct hfsc_class *cl; 2218 struct service_curve *rsc = NULL; 2219 struct service_curve *fsc = NULL; 2220 struct service_curve *usc = NULL; 2221 2222 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2223 return (EBADF); 2224 2225 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL) 2226 return (EINVAL); 2227 2228 if (ap->sctype & HFSC_REALTIMESC) 2229 rsc = &ap->service_curve; 2230 if (ap->sctype & HFSC_LINKSHARINGSC) 2231 fsc = &ap->service_curve; 2232 if (ap->sctype & HFSC_UPPERLIMITSC) 2233 usc = &ap->service_curve; 2234 2235 return hfsc_class_modify(cl, rsc, fsc, usc); 2236 } 2237 2238 static int 2239 hfsccmd_add_filter(ap) 2240 struct hfsc_add_filter *ap; 2241 { 2242 struct hfsc_if *hif; 2243 struct hfsc_class *cl; 2244 2245 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2246 return (EBADF); 2247 2248 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL) 2249 return (EINVAL); 2250 2251 if (is_a_parent_class(cl)) { 2252 #ifdef ALTQ_DEBUG 2253 printf("hfsccmd_add_filter: not a leaf class!\n"); 2254 #endif 2255 return (EINVAL); 2256 } 2257 2258 return acc_add_filter(&hif->hif_classifier, &ap->filter, 2259 cl, &ap->filter_handle); 2260 } 2261 2262 static int 2263 hfsccmd_delete_filter(ap) 2264 struct hfsc_delete_filter *ap; 2265 { 2266 struct hfsc_if *hif; 2267 2268 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2269 return (EBADF); 2270 2271 return acc_delete_filter(&hif->hif_classifier, 2272 ap->filter_handle); 2273 } 2274 2275 static int 2276 hfsccmd_class_stats(ap) 2277 struct hfsc_class_stats *ap; 2278 { 2279 struct hfsc_if *hif; 2280 struct hfsc_class *cl; 2281 struct hfsc_classstats stats, *usp; 2282 int n, nclasses, error; 2283 2284 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL) 2285 return (EBADF); 2286 2287 ap->cur_time = read_machclk(); 2288 ap->machclk_freq = machclk_freq; 2289 ap->hif_classes = hif->hif_classes; 2290 ap->hif_packets = hif->hif_packets; 2291 2292 /* skip the first N classes in the tree */ 2293 nclasses = ap->nskip; 2294 for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses; 2295 cl = hfsc_nextclass(cl), n++) 2296 ; 2297 if (n != nclasses) 2298 return (EINVAL); 2299 2300 /* then, read the next N classes in the tree */ 2301 nclasses = ap->nclasses; 2302 usp = ap->stats; 2303 for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) { 2304 2305 get_class_stats(&stats, cl); 2306 2307 if ((error = copyout((caddr_t)&stats, (caddr_t)usp++, 2308 sizeof(stats))) != 0) 2309 return (error); 2310 } 2311 2312 ap->nclasses = n; 2313 2314 return (0); 2315 } 2316 2317 #ifdef KLD_MODULE 2318 2319 static struct altqsw hfsc_sw = 2320 {"hfsc", hfscopen, hfscclose, hfscioctl}; 2321 2322 ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw); 2323 MODULE_DEPEND(altq_hfsc, altq_red, 1, 1, 1); 2324 MODULE_DEPEND(altq_hfsc, altq_rio, 1, 1, 1); 2325 2326 #endif /* KLD_MODULE */ 2327 #endif /* ALTQ3_COMPAT */ 2328 2329 #endif /* ALTQ_HFSC */ 2330