1 /* $OpenBSD: pfctl_altq.c,v 1.93 2007/10/15 02:16:35 deraadt Exp $ */ 2 3 /* 4 * Copyright (c) 2002 5 * Sony Computer Science Laboratories Inc. 6 * Copyright (c) 2002, 2003 Henning Brauer <henning@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include <sys/cdefs.h> 22 __FBSDID("$FreeBSD$"); 23 24 #define PFIOC_USE_LATEST 25 26 #include <sys/types.h> 27 #include <sys/bitset.h> 28 #include <sys/ioctl.h> 29 #include <sys/socket.h> 30 31 #include <net/if.h> 32 #include <netinet/in.h> 33 #include <net/pfvar.h> 34 35 #include <err.h> 36 #include <errno.h> 37 #include <inttypes.h> 38 #include <limits.h> 39 #include <math.h> 40 #include <search.h> 41 #include <stdio.h> 42 #include <stdlib.h> 43 #include <string.h> 44 #include <unistd.h> 45 46 #include <net/altq/altq.h> 47 #include <net/altq/altq_cbq.h> 48 #include <net/altq/altq_codel.h> 49 #include <net/altq/altq_priq.h> 50 #include <net/altq/altq_hfsc.h> 51 #include <net/altq/altq_fairq.h> 52 53 #include "pfctl_parser.h" 54 #include "pfctl.h" 55 56 #define is_sc_null(sc) (((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0)) 57 58 static STAILQ_HEAD(interfaces, pfctl_altq) interfaces = STAILQ_HEAD_INITIALIZER(interfaces); 59 static struct hsearch_data queue_map; 60 static struct hsearch_data if_map; 61 static struct hsearch_data qid_map; 62 63 static struct pfctl_altq *pfaltq_lookup(char *ifname); 64 static struct pfctl_altq *qname_to_pfaltq(const char *, const char *); 65 static u_int32_t qname_to_qid(char *); 66 67 static int eval_pfqueue_cbq(struct pfctl *, struct pf_altq *, 68 struct pfctl_altq *); 69 static int cbq_compute_idletime(struct pfctl *, struct pf_altq *); 70 static int check_commit_cbq(int, int, struct pfctl_altq *); 71 static int print_cbq_opts(const struct pf_altq *); 72 73 static int print_codel_opts(const struct pf_altq *, 74 const struct node_queue_opt *); 75 76 static int eval_pfqueue_priq(struct pfctl *, struct pf_altq *, 77 struct pfctl_altq *); 78 static int check_commit_priq(int, int, struct pfctl_altq *); 79 static int print_priq_opts(const struct pf_altq *); 80 81 static int eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *, 82 struct pfctl_altq *, struct pfctl_altq *); 83 static int check_commit_hfsc(int, int, struct pfctl_altq *); 84 static int print_hfsc_opts(const struct pf_altq *, 85 const struct node_queue_opt *); 86 87 static int eval_pfqueue_fairq(struct pfctl *, struct pf_altq *, 88 struct pfctl_altq *, struct pfctl_altq *); 89 static int print_fairq_opts(const struct pf_altq *, 90 const struct node_queue_opt *); 91 static int check_commit_fairq(int, int, struct pfctl_altq *); 92 93 static void gsc_add_sc(struct gen_sc *, struct service_curve *); 94 static int is_gsc_under_sc(struct gen_sc *, 95 struct service_curve *); 96 static struct segment *gsc_getentry(struct gen_sc *, double); 97 static int gsc_add_seg(struct gen_sc *, double, double, double, 98 double); 99 static double sc_x2y(struct service_curve *, double); 100 101 #ifdef __FreeBSD__ 102 u_int64_t getifspeed(int, char *); 103 #else 104 u_int32_t getifspeed(char *); 105 #endif 106 u_long getifmtu(char *); 107 int eval_queue_opts(struct pf_altq *, struct node_queue_opt *, 108 u_int64_t); 109 u_int64_t eval_bwspec(struct node_queue_bw *, u_int64_t); 110 void print_hfsc_sc(const char *, u_int, u_int, u_int, 111 const struct node_hfsc_sc *); 112 void print_fairq_sc(const char *, u_int, u_int, u_int, 113 const struct node_fairq_sc *); 114 115 static __attribute__((constructor)) void 116 pfctl_altq_init(void) 117 { 118 /* 119 * As hdestroy() will never be called on these tables, it will be 120 * safe to use references into the stored data as keys. 121 */ 122 if (hcreate_r(0, &queue_map) == 0) 123 err(1, "Failed to create altq queue map"); 124 if (hcreate_r(0, &if_map) == 0) 125 err(1, "Failed to create altq interface map"); 126 if (hcreate_r(0, &qid_map) == 0) 127 err(1, "Failed to create altq queue id map"); 128 } 129 130 void 131 pfaltq_store(struct pf_altq *a) 132 { 133 struct pfctl_altq *altq; 134 ENTRY item; 135 ENTRY *ret_item; 136 size_t key_size; 137 138 if ((altq = malloc(sizeof(*altq))) == NULL) 139 err(1, "queue malloc"); 140 memcpy(&altq->pa, a, sizeof(struct pf_altq)); 141 memset(&altq->meta, 0, sizeof(altq->meta)); 142 143 if (a->qname[0] == 0) { 144 item.key = altq->pa.ifname; 145 item.data = altq; 146 if (hsearch_r(item, ENTER, &ret_item, &if_map) == 0) 147 err(1, "interface map insert"); 148 STAILQ_INSERT_TAIL(&interfaces, altq, meta.link); 149 } else { 150 key_size = sizeof(a->ifname) + sizeof(a->qname); 151 if ((item.key = malloc(key_size)) == NULL) 152 err(1, "queue map key malloc"); 153 snprintf(item.key, key_size, "%s:%s", a->ifname, a->qname); 154 item.data = altq; 155 if (hsearch_r(item, ENTER, &ret_item, &queue_map) == 0) 156 err(1, "queue map insert"); 157 158 item.key = altq->pa.qname; 159 item.data = &altq->pa.qid; 160 if (hsearch_r(item, ENTER, &ret_item, &qid_map) == 0) 161 err(1, "qid map insert"); 162 } 163 } 164 165 static struct pfctl_altq * 166 pfaltq_lookup(char *ifname) 167 { 168 ENTRY item; 169 ENTRY *ret_item; 170 171 item.key = ifname; 172 if (hsearch_r(item, FIND, &ret_item, &if_map) == 0) 173 return (NULL); 174 175 return (ret_item->data); 176 } 177 178 static struct pfctl_altq * 179 qname_to_pfaltq(const char *qname, const char *ifname) 180 { 181 ENTRY item; 182 ENTRY *ret_item; 183 char key[IFNAMSIZ + PF_QNAME_SIZE]; 184 185 item.key = key; 186 snprintf(item.key, sizeof(key), "%s:%s", ifname, qname); 187 if (hsearch_r(item, FIND, &ret_item, &queue_map) == 0) 188 return (NULL); 189 190 return (ret_item->data); 191 } 192 193 static u_int32_t 194 qname_to_qid(char *qname) 195 { 196 ENTRY item; 197 ENTRY *ret_item; 198 uint32_t qid; 199 200 /* 201 * We guarantee that same named queues on different interfaces 202 * have the same qid. 203 */ 204 item.key = qname; 205 if (hsearch_r(item, FIND, &ret_item, &qid_map) == 0) 206 return (0); 207 208 qid = *(uint32_t *)ret_item->data; 209 return (qid); 210 } 211 212 void 213 print_altq(const struct pf_altq *a, unsigned int level, 214 struct node_queue_bw *bw, struct node_queue_opt *qopts) 215 { 216 if (a->qname[0] != 0) { 217 print_queue(a, level, bw, 1, qopts); 218 return; 219 } 220 221 #ifdef __FreeBSD__ 222 if (a->local_flags & PFALTQ_FLAG_IF_REMOVED) 223 printf("INACTIVE "); 224 #endif 225 226 printf("altq on %s ", a->ifname); 227 228 switch (a->scheduler) { 229 case ALTQT_CBQ: 230 if (!print_cbq_opts(a)) 231 printf("cbq "); 232 break; 233 case ALTQT_PRIQ: 234 if (!print_priq_opts(a)) 235 printf("priq "); 236 break; 237 case ALTQT_HFSC: 238 if (!print_hfsc_opts(a, qopts)) 239 printf("hfsc "); 240 break; 241 case ALTQT_FAIRQ: 242 if (!print_fairq_opts(a, qopts)) 243 printf("fairq "); 244 break; 245 case ALTQT_CODEL: 246 if (!print_codel_opts(a, qopts)) 247 printf("codel "); 248 break; 249 } 250 251 if (bw != NULL && bw->bw_percent > 0) { 252 if (bw->bw_percent < 100) 253 printf("bandwidth %u%% ", bw->bw_percent); 254 } else 255 printf("bandwidth %s ", rate2str((double)a->ifbandwidth)); 256 257 if (a->qlimit != DEFAULT_QLIMIT) 258 printf("qlimit %u ", a->qlimit); 259 printf("tbrsize %u ", a->tbrsize); 260 } 261 262 void 263 print_queue(const struct pf_altq *a, unsigned int level, 264 struct node_queue_bw *bw, int print_interface, 265 struct node_queue_opt *qopts) 266 { 267 unsigned int i; 268 269 #ifdef __FreeBSD__ 270 if (a->local_flags & PFALTQ_FLAG_IF_REMOVED) 271 printf("INACTIVE "); 272 #endif 273 printf("queue "); 274 for (i = 0; i < level; ++i) 275 printf(" "); 276 printf("%s ", a->qname); 277 if (print_interface) 278 printf("on %s ", a->ifname); 279 if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC || 280 a->scheduler == ALTQT_FAIRQ) { 281 if (bw != NULL && bw->bw_percent > 0) { 282 if (bw->bw_percent < 100) 283 printf("bandwidth %u%% ", bw->bw_percent); 284 } else 285 printf("bandwidth %s ", rate2str((double)a->bandwidth)); 286 } 287 if (a->priority != DEFAULT_PRIORITY) 288 printf("priority %u ", a->priority); 289 if (a->qlimit != DEFAULT_QLIMIT) 290 printf("qlimit %u ", a->qlimit); 291 switch (a->scheduler) { 292 case ALTQT_CBQ: 293 print_cbq_opts(a); 294 break; 295 case ALTQT_PRIQ: 296 print_priq_opts(a); 297 break; 298 case ALTQT_HFSC: 299 print_hfsc_opts(a, qopts); 300 break; 301 case ALTQT_FAIRQ: 302 print_fairq_opts(a, qopts); 303 break; 304 } 305 } 306 307 /* 308 * eval_pfaltq computes the discipline parameters. 309 */ 310 int 311 eval_pfaltq(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw, 312 struct node_queue_opt *opts) 313 { 314 u_int64_t rate; 315 u_int size, errors = 0; 316 317 if (bw->bw_absolute > 0) 318 pa->ifbandwidth = bw->bw_absolute; 319 else 320 #ifdef __FreeBSD__ 321 if ((rate = getifspeed(pf->dev, pa->ifname)) == 0) { 322 #else 323 if ((rate = getifspeed(pa->ifname)) == 0) { 324 #endif 325 fprintf(stderr, "interface %s does not know its bandwidth, " 326 "please specify an absolute bandwidth\n", 327 pa->ifname); 328 errors++; 329 } else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0) 330 pa->ifbandwidth = rate; 331 332 /* 333 * Limit bandwidth to UINT_MAX for schedulers that aren't 64-bit ready. 334 */ 335 if ((pa->scheduler != ALTQT_HFSC) && (pa->ifbandwidth > UINT_MAX)) { 336 pa->ifbandwidth = UINT_MAX; 337 warnx("interface %s bandwidth limited to %" PRIu64 " bps " 338 "because selected scheduler is 32-bit limited\n", pa->ifname, 339 pa->ifbandwidth); 340 } 341 errors += eval_queue_opts(pa, opts, pa->ifbandwidth); 342 343 /* if tbrsize is not specified, use heuristics */ 344 if (pa->tbrsize == 0) { 345 rate = pa->ifbandwidth; 346 if (rate <= 1 * 1000 * 1000) 347 size = 1; 348 else if (rate <= 10 * 1000 * 1000) 349 size = 4; 350 else if (rate <= 200 * 1000 * 1000) 351 size = 8; 352 else if (rate <= 2500 * 1000 * 1000ULL) 353 size = 24; 354 else 355 size = 128; 356 size = size * getifmtu(pa->ifname); 357 pa->tbrsize = size; 358 } 359 return (errors); 360 } 361 362 /* 363 * check_commit_altq does consistency check for each interface 364 */ 365 int 366 check_commit_altq(int dev, int opts) 367 { 368 struct pfctl_altq *if_ppa; 369 int error = 0; 370 371 /* call the discipline check for each interface. */ 372 STAILQ_FOREACH(if_ppa, &interfaces, meta.link) { 373 switch (if_ppa->pa.scheduler) { 374 case ALTQT_CBQ: 375 error = check_commit_cbq(dev, opts, if_ppa); 376 break; 377 case ALTQT_PRIQ: 378 error = check_commit_priq(dev, opts, if_ppa); 379 break; 380 case ALTQT_HFSC: 381 error = check_commit_hfsc(dev, opts, if_ppa); 382 break; 383 case ALTQT_FAIRQ: 384 error = check_commit_fairq(dev, opts, if_ppa); 385 break; 386 default: 387 break; 388 } 389 } 390 return (error); 391 } 392 393 /* 394 * eval_pfqueue computes the queue parameters. 395 */ 396 int 397 eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw, 398 struct node_queue_opt *opts) 399 { 400 /* should be merged with expand_queue */ 401 struct pfctl_altq *if_ppa, *parent; 402 int error = 0; 403 404 /* find the corresponding interface and copy fields used by queues */ 405 if ((if_ppa = pfaltq_lookup(pa->ifname)) == NULL) { 406 fprintf(stderr, "altq not defined on %s\n", pa->ifname); 407 return (1); 408 } 409 pa->scheduler = if_ppa->pa.scheduler; 410 pa->ifbandwidth = if_ppa->pa.ifbandwidth; 411 412 if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) { 413 fprintf(stderr, "queue %s already exists on interface %s\n", 414 pa->qname, pa->ifname); 415 return (1); 416 } 417 pa->qid = qname_to_qid(pa->qname); 418 419 parent = NULL; 420 if (pa->parent[0] != 0) { 421 parent = qname_to_pfaltq(pa->parent, pa->ifname); 422 if (parent == NULL) { 423 fprintf(stderr, "parent %s not found for %s\n", 424 pa->parent, pa->qname); 425 return (1); 426 } 427 pa->parent_qid = parent->pa.qid; 428 } 429 if (pa->qlimit == 0) 430 pa->qlimit = DEFAULT_QLIMIT; 431 432 if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC || 433 pa->scheduler == ALTQT_FAIRQ) { 434 pa->bandwidth = eval_bwspec(bw, 435 parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth); 436 437 if (pa->bandwidth > pa->ifbandwidth) { 438 fprintf(stderr, "bandwidth for %s higher than " 439 "interface\n", pa->qname); 440 return (1); 441 } 442 /* 443 * If not HFSC, then check that the sum of the child 444 * bandwidths is less than the parent's bandwidth. For 445 * HFSC, the equivalent concept is to check that the sum of 446 * the child linkshare service curves are under the parent's 447 * linkshare service curve, and that check is performed by 448 * eval_pfqueue_hfsc(). 449 */ 450 if ((parent != NULL) && (pa->scheduler != ALTQT_HFSC)) { 451 if (pa->bandwidth > parent->pa.bandwidth) { 452 warnx("bandwidth for %s higher than parent", 453 pa->qname); 454 return (1); 455 } 456 parent->meta.bwsum += pa->bandwidth; 457 if (parent->meta.bwsum > parent->pa.bandwidth) { 458 warnx("the sum of the child bandwidth (%" PRIu64 459 ") higher than parent \"%s\" (%" PRIu64 ")", 460 parent->meta.bwsum, parent->pa.qname, 461 parent->pa.bandwidth); 462 } 463 } 464 } 465 466 if (eval_queue_opts(pa, opts, 467 parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth)) 468 return (1); 469 470 if (parent != NULL) 471 parent->meta.children++; 472 473 switch (pa->scheduler) { 474 case ALTQT_CBQ: 475 error = eval_pfqueue_cbq(pf, pa, if_ppa); 476 break; 477 case ALTQT_PRIQ: 478 error = eval_pfqueue_priq(pf, pa, if_ppa); 479 break; 480 case ALTQT_HFSC: 481 error = eval_pfqueue_hfsc(pf, pa, if_ppa, parent); 482 break; 483 case ALTQT_FAIRQ: 484 error = eval_pfqueue_fairq(pf, pa, if_ppa, parent); 485 break; 486 default: 487 break; 488 } 489 return (error); 490 } 491 492 /* 493 * CBQ support functions 494 */ 495 #define RM_FILTER_GAIN 5 /* log2 of gain, e.g., 5 => 31/32 */ 496 #define RM_NS_PER_SEC (1000000000) 497 498 static int 499 eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa) 500 { 501 struct cbq_opts *opts; 502 u_int ifmtu; 503 504 if (pa->priority >= CBQ_MAXPRI) { 505 warnx("priority out of range: max %d", CBQ_MAXPRI - 1); 506 return (-1); 507 } 508 509 ifmtu = getifmtu(pa->ifname); 510 opts = &pa->pq_u.cbq_opts; 511 512 if (opts->pktsize == 0) { /* use default */ 513 opts->pktsize = ifmtu; 514 if (opts->pktsize > MCLBYTES) /* do what TCP does */ 515 opts->pktsize &= ~MCLBYTES; 516 } else if (opts->pktsize > ifmtu) 517 opts->pktsize = ifmtu; 518 if (opts->maxpktsize == 0) /* use default */ 519 opts->maxpktsize = ifmtu; 520 else if (opts->maxpktsize > ifmtu) 521 opts->pktsize = ifmtu; 522 523 if (opts->pktsize > opts->maxpktsize) 524 opts->pktsize = opts->maxpktsize; 525 526 if (pa->parent[0] == 0) 527 opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR); 528 529 if (pa->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS) 530 if_ppa->meta.root_classes++; 531 if (pa->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS) 532 if_ppa->meta.default_classes++; 533 534 cbq_compute_idletime(pf, pa); 535 return (0); 536 } 537 538 /* 539 * compute ns_per_byte, maxidle, minidle, and offtime 540 */ 541 static int 542 cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa) 543 { 544 struct cbq_opts *opts; 545 double maxidle_s, maxidle, minidle; 546 double offtime, nsPerByte, ifnsPerByte, ptime, cptime; 547 double z, g, f, gton, gtom; 548 u_int minburst, maxburst; 549 550 opts = &pa->pq_u.cbq_opts; 551 ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8; 552 minburst = opts->minburst; 553 maxburst = opts->maxburst; 554 555 if (pa->bandwidth == 0) 556 f = 0.0001; /* small enough? */ 557 else 558 f = ((double) pa->bandwidth / (double) pa->ifbandwidth); 559 560 nsPerByte = ifnsPerByte / f; 561 ptime = (double)opts->pktsize * ifnsPerByte; 562 cptime = ptime * (1.0 - f) / f; 563 564 if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) { 565 /* 566 * this causes integer overflow in kernel! 567 * (bandwidth < 6Kbps when max_pkt_size=1500) 568 */ 569 if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0) { 570 warnx("queue bandwidth must be larger than %s", 571 rate2str(ifnsPerByte * (double)opts->maxpktsize / 572 (double)INT_MAX * (double)pa->ifbandwidth)); 573 fprintf(stderr, "cbq: queue %s is too slow!\n", 574 pa->qname); 575 } 576 nsPerByte = (double)(INT_MAX / opts->maxpktsize); 577 } 578 579 if (maxburst == 0) { /* use default */ 580 if (cptime > 10.0 * 1000000) 581 maxburst = 4; 582 else 583 maxburst = 16; 584 } 585 if (minburst == 0) /* use default */ 586 minburst = 2; 587 if (minburst > maxburst) 588 minburst = maxburst; 589 590 z = (double)(1 << RM_FILTER_GAIN); 591 g = (1.0 - 1.0 / z); 592 gton = pow(g, (double)maxburst); 593 gtom = pow(g, (double)(minburst-1)); 594 maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton)); 595 maxidle_s = (1.0 - g); 596 if (maxidle > maxidle_s) 597 maxidle = ptime * maxidle; 598 else 599 maxidle = ptime * maxidle_s; 600 offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom); 601 minidle = -((double)opts->maxpktsize * (double)nsPerByte); 602 603 /* scale parameters */ 604 maxidle = ((maxidle * 8.0) / nsPerByte) * 605 pow(2.0, (double)RM_FILTER_GAIN); 606 offtime = (offtime * 8.0) / nsPerByte * 607 pow(2.0, (double)RM_FILTER_GAIN); 608 minidle = ((minidle * 8.0) / nsPerByte) * 609 pow(2.0, (double)RM_FILTER_GAIN); 610 611 maxidle = maxidle / 1000.0; 612 offtime = offtime / 1000.0; 613 minidle = minidle / 1000.0; 614 615 opts->minburst = minburst; 616 opts->maxburst = maxburst; 617 opts->ns_per_byte = (u_int)nsPerByte; 618 opts->maxidle = (u_int)fabs(maxidle); 619 opts->minidle = (int)minidle; 620 opts->offtime = (u_int)fabs(offtime); 621 622 return (0); 623 } 624 625 static int 626 check_commit_cbq(int dev, int opts, struct pfctl_altq *if_ppa) 627 { 628 int error = 0; 629 630 /* 631 * check if cbq has one root queue and one default queue 632 * for this interface 633 */ 634 if (if_ppa->meta.root_classes != 1) { 635 warnx("should have one root queue on %s", if_ppa->pa.ifname); 636 error++; 637 } 638 if (if_ppa->meta.default_classes != 1) { 639 warnx("should have one default queue on %s", if_ppa->pa.ifname); 640 error++; 641 } 642 return (error); 643 } 644 645 static int 646 print_cbq_opts(const struct pf_altq *a) 647 { 648 const struct cbq_opts *opts; 649 650 opts = &a->pq_u.cbq_opts; 651 if (opts->flags) { 652 printf("cbq("); 653 if (opts->flags & CBQCLF_RED) 654 printf(" red"); 655 if (opts->flags & CBQCLF_ECN) 656 printf(" ecn"); 657 if (opts->flags & CBQCLF_RIO) 658 printf(" rio"); 659 if (opts->flags & CBQCLF_CODEL) 660 printf(" codel"); 661 if (opts->flags & CBQCLF_CLEARDSCP) 662 printf(" cleardscp"); 663 if (opts->flags & CBQCLF_FLOWVALVE) 664 printf(" flowvalve"); 665 if (opts->flags & CBQCLF_BORROW) 666 printf(" borrow"); 667 if (opts->flags & CBQCLF_WRR) 668 printf(" wrr"); 669 if (opts->flags & CBQCLF_EFFICIENT) 670 printf(" efficient"); 671 if (opts->flags & CBQCLF_ROOTCLASS) 672 printf(" root"); 673 if (opts->flags & CBQCLF_DEFCLASS) 674 printf(" default"); 675 printf(" ) "); 676 677 return (1); 678 } else 679 return (0); 680 } 681 682 /* 683 * PRIQ support functions 684 */ 685 static int 686 eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa) 687 { 688 689 if (pa->priority >= PRIQ_MAXPRI) { 690 warnx("priority out of range: max %d", PRIQ_MAXPRI - 1); 691 return (-1); 692 } 693 if (BIT_ISSET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris)) { 694 warnx("%s does not have a unique priority on interface %s", 695 pa->qname, pa->ifname); 696 return (-1); 697 } else 698 BIT_SET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris); 699 700 if (pa->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS) 701 if_ppa->meta.default_classes++; 702 return (0); 703 } 704 705 static int 706 check_commit_priq(int dev, int opts, struct pfctl_altq *if_ppa) 707 { 708 709 /* 710 * check if priq has one default class for this interface 711 */ 712 if (if_ppa->meta.default_classes != 1) { 713 warnx("should have one default queue on %s", if_ppa->pa.ifname); 714 return (1); 715 } 716 return (0); 717 } 718 719 static int 720 print_priq_opts(const struct pf_altq *a) 721 { 722 const struct priq_opts *opts; 723 724 opts = &a->pq_u.priq_opts; 725 726 if (opts->flags) { 727 printf("priq("); 728 if (opts->flags & PRCF_RED) 729 printf(" red"); 730 if (opts->flags & PRCF_ECN) 731 printf(" ecn"); 732 if (opts->flags & PRCF_RIO) 733 printf(" rio"); 734 if (opts->flags & PRCF_CODEL) 735 printf(" codel"); 736 if (opts->flags & PRCF_CLEARDSCP) 737 printf(" cleardscp"); 738 if (opts->flags & PRCF_DEFAULTCLASS) 739 printf(" default"); 740 printf(" ) "); 741 742 return (1); 743 } else 744 return (0); 745 } 746 747 /* 748 * HFSC support functions 749 */ 750 static int 751 eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa, 752 struct pfctl_altq *parent) 753 { 754 struct hfsc_opts_v1 *opts; 755 struct service_curve sc; 756 757 opts = &pa->pq_u.hfsc_opts; 758 759 if (parent == NULL) { 760 /* root queue */ 761 opts->lssc_m1 = pa->ifbandwidth; 762 opts->lssc_m2 = pa->ifbandwidth; 763 opts->lssc_d = 0; 764 return (0); 765 } 766 767 /* First child initializes the parent's service curve accumulators. */ 768 if (parent->meta.children == 1) { 769 LIST_INIT(&parent->meta.rtsc); 770 LIST_INIT(&parent->meta.lssc); 771 } 772 773 if (parent->pa.pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) { 774 warnx("adding %s would make default queue %s not a leaf", 775 pa->qname, pa->parent); 776 return (-1); 777 } 778 779 if (pa->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) 780 if_ppa->meta.default_classes++; 781 782 /* if link_share is not specified, use bandwidth */ 783 if (opts->lssc_m2 == 0) 784 opts->lssc_m2 = pa->bandwidth; 785 786 if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) || 787 (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) || 788 (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) { 789 warnx("m2 is zero for %s", pa->qname); 790 return (-1); 791 } 792 793 if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) || 794 (opts->lssc_m1 < opts->lssc_m2 && opts->lssc_m1 != 0) || 795 (opts->ulsc_m1 < opts->ulsc_m2 && opts->ulsc_m1 != 0)) { 796 warnx("m1 must be zero for convex curve: %s", pa->qname); 797 return (-1); 798 } 799 800 /* 801 * admission control: 802 * for the real-time service curve, the sum of the service curves 803 * should not exceed 80% of the interface bandwidth. 20% is reserved 804 * not to over-commit the actual interface bandwidth. 805 * for the linkshare service curve, the sum of the child service 806 * curve should not exceed the parent service curve. 807 * for the upper-limit service curve, the assigned bandwidth should 808 * be smaller than the interface bandwidth, and the upper-limit should 809 * be larger than the real-time service curve when both are defined. 810 */ 811 812 /* check the real-time service curve. reserve 20% of interface bw */ 813 if (opts->rtsc_m2 != 0) { 814 /* add this queue to the sum */ 815 sc.m1 = opts->rtsc_m1; 816 sc.d = opts->rtsc_d; 817 sc.m2 = opts->rtsc_m2; 818 gsc_add_sc(&parent->meta.rtsc, &sc); 819 /* compare the sum with 80% of the interface */ 820 sc.m1 = 0; 821 sc.d = 0; 822 sc.m2 = pa->ifbandwidth / 100 * 80; 823 if (!is_gsc_under_sc(&parent->meta.rtsc, &sc)) { 824 warnx("real-time sc exceeds 80%% of the interface " 825 "bandwidth (%s)", rate2str((double)sc.m2)); 826 return (-1); 827 } 828 } 829 830 /* check the linkshare service curve. */ 831 if (opts->lssc_m2 != 0) { 832 /* add this queue to the child sum */ 833 sc.m1 = opts->lssc_m1; 834 sc.d = opts->lssc_d; 835 sc.m2 = opts->lssc_m2; 836 gsc_add_sc(&parent->meta.lssc, &sc); 837 /* compare the sum of the children with parent's sc */ 838 sc.m1 = parent->pa.pq_u.hfsc_opts.lssc_m1; 839 sc.d = parent->pa.pq_u.hfsc_opts.lssc_d; 840 sc.m2 = parent->pa.pq_u.hfsc_opts.lssc_m2; 841 if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) { 842 warnx("linkshare sc exceeds parent's sc"); 843 return (-1); 844 } 845 } 846 847 /* check the upper-limit service curve. */ 848 if (opts->ulsc_m2 != 0) { 849 if (opts->ulsc_m1 > pa->ifbandwidth || 850 opts->ulsc_m2 > pa->ifbandwidth) { 851 warnx("upper-limit larger than interface bandwidth"); 852 return (-1); 853 } 854 if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) { 855 warnx("upper-limit sc smaller than real-time sc"); 856 return (-1); 857 } 858 } 859 860 return (0); 861 } 862 863 /* 864 * FAIRQ support functions 865 */ 866 static int 867 eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa, 868 struct pfctl_altq *if_ppa, struct pfctl_altq *parent) 869 { 870 struct fairq_opts *opts; 871 struct service_curve sc; 872 873 opts = &pa->pq_u.fairq_opts; 874 875 if (pa->parent == NULL) { 876 /* root queue */ 877 opts->lssc_m1 = pa->ifbandwidth; 878 opts->lssc_m2 = pa->ifbandwidth; 879 opts->lssc_d = 0; 880 return (0); 881 } 882 883 /* First child initializes the parent's service curve accumulator. */ 884 if (parent->meta.children == 1) 885 LIST_INIT(&parent->meta.lssc); 886 887 if (parent->pa.pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) { 888 warnx("adding %s would make default queue %s not a leaf", 889 pa->qname, pa->parent); 890 return (-1); 891 } 892 893 if (pa->pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) 894 if_ppa->meta.default_classes++; 895 896 /* if link_share is not specified, use bandwidth */ 897 if (opts->lssc_m2 == 0) 898 opts->lssc_m2 = pa->bandwidth; 899 900 /* 901 * admission control: 902 * for the real-time service curve, the sum of the service curves 903 * should not exceed 80% of the interface bandwidth. 20% is reserved 904 * not to over-commit the actual interface bandwidth. 905 * for the link-sharing service curve, the sum of the child service 906 * curve should not exceed the parent service curve. 907 * for the upper-limit service curve, the assigned bandwidth should 908 * be smaller than the interface bandwidth, and the upper-limit should 909 * be larger than the real-time service curve when both are defined. 910 */ 911 912 /* check the linkshare service curve. */ 913 if (opts->lssc_m2 != 0) { 914 /* add this queue to the child sum */ 915 sc.m1 = opts->lssc_m1; 916 sc.d = opts->lssc_d; 917 sc.m2 = opts->lssc_m2; 918 gsc_add_sc(&parent->meta.lssc, &sc); 919 /* compare the sum of the children with parent's sc */ 920 sc.m1 = parent->pa.pq_u.fairq_opts.lssc_m1; 921 sc.d = parent->pa.pq_u.fairq_opts.lssc_d; 922 sc.m2 = parent->pa.pq_u.fairq_opts.lssc_m2; 923 if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) { 924 warnx("link-sharing sc exceeds parent's sc"); 925 return (-1); 926 } 927 } 928 929 return (0); 930 } 931 932 static int 933 check_commit_hfsc(int dev, int opts, struct pfctl_altq *if_ppa) 934 { 935 936 /* check if hfsc has one default queue for this interface */ 937 if (if_ppa->meta.default_classes != 1) { 938 warnx("should have one default queue on %s", if_ppa->pa.ifname); 939 return (1); 940 } 941 return (0); 942 } 943 944 static int 945 check_commit_fairq(int dev __unused, int opts __unused, struct pfctl_altq *if_ppa) 946 { 947 948 /* check if fairq has one default queue for this interface */ 949 if (if_ppa->meta.default_classes != 1) { 950 warnx("should have one default queue on %s", if_ppa->pa.ifname); 951 return (1); 952 } 953 return (0); 954 } 955 956 static int 957 print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts) 958 { 959 const struct hfsc_opts_v1 *opts; 960 const struct node_hfsc_sc *rtsc, *lssc, *ulsc; 961 962 opts = &a->pq_u.hfsc_opts; 963 if (qopts == NULL) 964 rtsc = lssc = ulsc = NULL; 965 else { 966 rtsc = &qopts->data.hfsc_opts.realtime; 967 lssc = &qopts->data.hfsc_opts.linkshare; 968 ulsc = &qopts->data.hfsc_opts.upperlimit; 969 } 970 971 if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 || 972 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth || 973 opts->lssc_d != 0))) { 974 printf("hfsc("); 975 if (opts->flags & HFCF_RED) 976 printf(" red"); 977 if (opts->flags & HFCF_ECN) 978 printf(" ecn"); 979 if (opts->flags & HFCF_RIO) 980 printf(" rio"); 981 if (opts->flags & HFCF_CODEL) 982 printf(" codel"); 983 if (opts->flags & HFCF_CLEARDSCP) 984 printf(" cleardscp"); 985 if (opts->flags & HFCF_DEFAULTCLASS) 986 printf(" default"); 987 if (opts->rtsc_m2 != 0) 988 print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d, 989 opts->rtsc_m2, rtsc); 990 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth || 991 opts->lssc_d != 0)) 992 print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d, 993 opts->lssc_m2, lssc); 994 if (opts->ulsc_m2 != 0) 995 print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d, 996 opts->ulsc_m2, ulsc); 997 printf(" ) "); 998 999 return (1); 1000 } else 1001 return (0); 1002 } 1003 1004 static int 1005 print_codel_opts(const struct pf_altq *a, const struct node_queue_opt *qopts) 1006 { 1007 const struct codel_opts *opts; 1008 1009 opts = &a->pq_u.codel_opts; 1010 if (opts->target || opts->interval || opts->ecn) { 1011 printf("codel("); 1012 if (opts->target) 1013 printf(" target %d", opts->target); 1014 if (opts->interval) 1015 printf(" interval %d", opts->interval); 1016 if (opts->ecn) 1017 printf("ecn"); 1018 printf(" ) "); 1019 1020 return (1); 1021 } 1022 1023 return (0); 1024 } 1025 1026 static int 1027 print_fairq_opts(const struct pf_altq *a, const struct node_queue_opt *qopts) 1028 { 1029 const struct fairq_opts *opts; 1030 const struct node_fairq_sc *loc_lssc; 1031 1032 opts = &a->pq_u.fairq_opts; 1033 if (qopts == NULL) 1034 loc_lssc = NULL; 1035 else 1036 loc_lssc = &qopts->data.fairq_opts.linkshare; 1037 1038 if (opts->flags || 1039 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth || 1040 opts->lssc_d != 0))) { 1041 printf("fairq("); 1042 if (opts->flags & FARF_RED) 1043 printf(" red"); 1044 if (opts->flags & FARF_ECN) 1045 printf(" ecn"); 1046 if (opts->flags & FARF_RIO) 1047 printf(" rio"); 1048 if (opts->flags & FARF_CODEL) 1049 printf(" codel"); 1050 if (opts->flags & FARF_CLEARDSCP) 1051 printf(" cleardscp"); 1052 if (opts->flags & FARF_DEFAULTCLASS) 1053 printf(" default"); 1054 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth || 1055 opts->lssc_d != 0)) 1056 print_fairq_sc("linkshare", opts->lssc_m1, opts->lssc_d, 1057 opts->lssc_m2, loc_lssc); 1058 printf(" ) "); 1059 1060 return (1); 1061 } else 1062 return (0); 1063 } 1064 1065 /* 1066 * admission control using generalized service curve 1067 */ 1068 1069 /* add a new service curve to a generalized service curve */ 1070 static void 1071 gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc) 1072 { 1073 if (is_sc_null(sc)) 1074 return; 1075 if (sc->d != 0) 1076 gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1); 1077 gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2); 1078 } 1079 1080 /* 1081 * check whether all points of a generalized service curve have 1082 * their y-coordinates no larger than a given two-piece linear 1083 * service curve. 1084 */ 1085 static int 1086 is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc) 1087 { 1088 struct segment *s, *last, *end; 1089 double y; 1090 1091 if (is_sc_null(sc)) { 1092 if (LIST_EMPTY(gsc)) 1093 return (1); 1094 LIST_FOREACH(s, gsc, _next) { 1095 if (s->m != 0) 1096 return (0); 1097 } 1098 return (1); 1099 } 1100 /* 1101 * gsc has a dummy entry at the end with x = INFINITY. 1102 * loop through up to this dummy entry. 1103 */ 1104 end = gsc_getentry(gsc, INFINITY); 1105 if (end == NULL) 1106 return (1); 1107 last = NULL; 1108 for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) { 1109 if (s->y > sc_x2y(sc, s->x)) 1110 return (0); 1111 last = s; 1112 } 1113 /* last now holds the real last segment */ 1114 if (last == NULL) 1115 return (1); 1116 if (last->m > sc->m2) 1117 return (0); 1118 if (last->x < sc->d && last->m > sc->m1) { 1119 y = last->y + (sc->d - last->x) * last->m; 1120 if (y > sc_x2y(sc, sc->d)) 1121 return (0); 1122 } 1123 return (1); 1124 } 1125 1126 /* 1127 * return a segment entry starting at x. 1128 * if gsc has no entry starting at x, a new entry is created at x. 1129 */ 1130 static struct segment * 1131 gsc_getentry(struct gen_sc *gsc, double x) 1132 { 1133 struct segment *new, *prev, *s; 1134 1135 prev = NULL; 1136 LIST_FOREACH(s, gsc, _next) { 1137 if (s->x == x) 1138 return (s); /* matching entry found */ 1139 else if (s->x < x) 1140 prev = s; 1141 else 1142 break; 1143 } 1144 1145 /* we have to create a new entry */ 1146 if ((new = calloc(1, sizeof(struct segment))) == NULL) 1147 return (NULL); 1148 1149 new->x = x; 1150 if (x == INFINITY || s == NULL) 1151 new->d = 0; 1152 else if (s->x == INFINITY) 1153 new->d = INFINITY; 1154 else 1155 new->d = s->x - x; 1156 if (prev == NULL) { 1157 /* insert the new entry at the head of the list */ 1158 new->y = 0; 1159 new->m = 0; 1160 LIST_INSERT_HEAD(gsc, new, _next); 1161 } else { 1162 /* 1163 * the start point intersects with the segment pointed by 1164 * prev. divide prev into 2 segments 1165 */ 1166 if (x == INFINITY) { 1167 prev->d = INFINITY; 1168 if (prev->m == 0) 1169 new->y = prev->y; 1170 else 1171 new->y = INFINITY; 1172 } else { 1173 prev->d = x - prev->x; 1174 new->y = prev->d * prev->m + prev->y; 1175 } 1176 new->m = prev->m; 1177 LIST_INSERT_AFTER(prev, new, _next); 1178 } 1179 return (new); 1180 } 1181 1182 /* add a segment to a generalized service curve */ 1183 static int 1184 gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m) 1185 { 1186 struct segment *start, *end, *s; 1187 double x2; 1188 1189 if (d == INFINITY) 1190 x2 = INFINITY; 1191 else 1192 x2 = x + d; 1193 start = gsc_getentry(gsc, x); 1194 end = gsc_getentry(gsc, x2); 1195 if (start == NULL || end == NULL) 1196 return (-1); 1197 1198 for (s = start; s != end; s = LIST_NEXT(s, _next)) { 1199 s->m += m; 1200 s->y += y + (s->x - x) * m; 1201 } 1202 1203 end = gsc_getentry(gsc, INFINITY); 1204 for (; s != end; s = LIST_NEXT(s, _next)) { 1205 s->y += m * d; 1206 } 1207 1208 return (0); 1209 } 1210 1211 /* get y-projection of a service curve */ 1212 static double 1213 sc_x2y(struct service_curve *sc, double x) 1214 { 1215 double y; 1216 1217 if (x <= (double)sc->d) 1218 /* y belongs to the 1st segment */ 1219 y = x * (double)sc->m1; 1220 else 1221 /* y belongs to the 2nd segment */ 1222 y = (double)sc->d * (double)sc->m1 1223 + (x - (double)sc->d) * (double)sc->m2; 1224 return (y); 1225 } 1226 1227 /* 1228 * misc utilities 1229 */ 1230 #define R2S_BUFS 8 1231 #define RATESTR_MAX 16 1232 1233 char * 1234 rate2str(double rate) 1235 { 1236 char *buf; 1237 static char r2sbuf[R2S_BUFS][RATESTR_MAX]; /* ring bufer */ 1238 static int idx = 0; 1239 int i; 1240 static const char unit[] = " KMG"; 1241 1242 buf = r2sbuf[idx++]; 1243 if (idx == R2S_BUFS) 1244 idx = 0; 1245 1246 for (i = 0; rate >= 1000 && i <= 3; i++) 1247 rate /= 1000; 1248 1249 if ((int)(rate * 100) % 100) 1250 snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]); 1251 else 1252 snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]); 1253 1254 return (buf); 1255 } 1256 1257 #ifdef __FreeBSD__ 1258 /* 1259 * XXX 1260 * FreeBSD does not have SIOCGIFDATA. 1261 * To emulate this, DIOCGIFSPEED ioctl added to pf. 1262 */ 1263 u_int64_t 1264 getifspeed(int pfdev, char *ifname) 1265 { 1266 struct pf_ifspeed io; 1267 1268 bzero(&io, sizeof io); 1269 if (strlcpy(io.ifname, ifname, IFNAMSIZ) >= 1270 sizeof(io.ifname)) 1271 errx(1, "getifspeed: strlcpy"); 1272 if (ioctl(pfdev, DIOCGIFSPEED, &io) == -1) 1273 err(1, "DIOCGIFSPEED"); 1274 return (io.baudrate); 1275 } 1276 #else 1277 u_int32_t 1278 getifspeed(char *ifname) 1279 { 1280 int s; 1281 struct ifreq ifr; 1282 struct if_data ifrdat; 1283 1284 s = get_query_socket(); 1285 bzero(&ifr, sizeof(ifr)); 1286 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >= 1287 sizeof(ifr.ifr_name)) 1288 errx(1, "getifspeed: strlcpy"); 1289 ifr.ifr_data = (caddr_t)&ifrdat; 1290 if (ioctl(s, SIOCGIFDATA, (caddr_t)&ifr) == -1) 1291 err(1, "SIOCGIFDATA"); 1292 return ((u_int32_t)ifrdat.ifi_baudrate); 1293 } 1294 #endif 1295 1296 u_long 1297 getifmtu(char *ifname) 1298 { 1299 int s; 1300 struct ifreq ifr; 1301 1302 s = get_query_socket(); 1303 bzero(&ifr, sizeof(ifr)); 1304 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >= 1305 sizeof(ifr.ifr_name)) 1306 errx(1, "getifmtu: strlcpy"); 1307 if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1) 1308 #ifdef __FreeBSD__ 1309 ifr.ifr_mtu = 1500; 1310 #else 1311 err(1, "SIOCGIFMTU"); 1312 #endif 1313 if (ifr.ifr_mtu > 0) 1314 return (ifr.ifr_mtu); 1315 else { 1316 warnx("could not get mtu for %s, assuming 1500", ifname); 1317 return (1500); 1318 } 1319 } 1320 1321 int 1322 eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts, 1323 u_int64_t ref_bw) 1324 { 1325 int errors = 0; 1326 1327 switch (pa->scheduler) { 1328 case ALTQT_CBQ: 1329 pa->pq_u.cbq_opts = opts->data.cbq_opts; 1330 break; 1331 case ALTQT_PRIQ: 1332 pa->pq_u.priq_opts = opts->data.priq_opts; 1333 break; 1334 case ALTQT_HFSC: 1335 pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags; 1336 if (opts->data.hfsc_opts.linkshare.used) { 1337 pa->pq_u.hfsc_opts.lssc_m1 = 1338 eval_bwspec(&opts->data.hfsc_opts.linkshare.m1, 1339 ref_bw); 1340 pa->pq_u.hfsc_opts.lssc_m2 = 1341 eval_bwspec(&opts->data.hfsc_opts.linkshare.m2, 1342 ref_bw); 1343 pa->pq_u.hfsc_opts.lssc_d = 1344 opts->data.hfsc_opts.linkshare.d; 1345 } 1346 if (opts->data.hfsc_opts.realtime.used) { 1347 pa->pq_u.hfsc_opts.rtsc_m1 = 1348 eval_bwspec(&opts->data.hfsc_opts.realtime.m1, 1349 ref_bw); 1350 pa->pq_u.hfsc_opts.rtsc_m2 = 1351 eval_bwspec(&opts->data.hfsc_opts.realtime.m2, 1352 ref_bw); 1353 pa->pq_u.hfsc_opts.rtsc_d = 1354 opts->data.hfsc_opts.realtime.d; 1355 } 1356 if (opts->data.hfsc_opts.upperlimit.used) { 1357 pa->pq_u.hfsc_opts.ulsc_m1 = 1358 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1, 1359 ref_bw); 1360 pa->pq_u.hfsc_opts.ulsc_m2 = 1361 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2, 1362 ref_bw); 1363 pa->pq_u.hfsc_opts.ulsc_d = 1364 opts->data.hfsc_opts.upperlimit.d; 1365 } 1366 break; 1367 case ALTQT_FAIRQ: 1368 pa->pq_u.fairq_opts.flags = opts->data.fairq_opts.flags; 1369 pa->pq_u.fairq_opts.nbuckets = opts->data.fairq_opts.nbuckets; 1370 pa->pq_u.fairq_opts.hogs_m1 = 1371 eval_bwspec(&opts->data.fairq_opts.hogs_bw, ref_bw); 1372 1373 if (opts->data.fairq_opts.linkshare.used) { 1374 pa->pq_u.fairq_opts.lssc_m1 = 1375 eval_bwspec(&opts->data.fairq_opts.linkshare.m1, 1376 ref_bw); 1377 pa->pq_u.fairq_opts.lssc_m2 = 1378 eval_bwspec(&opts->data.fairq_opts.linkshare.m2, 1379 ref_bw); 1380 pa->pq_u.fairq_opts.lssc_d = 1381 opts->data.fairq_opts.linkshare.d; 1382 } 1383 break; 1384 case ALTQT_CODEL: 1385 pa->pq_u.codel_opts.target = opts->data.codel_opts.target; 1386 pa->pq_u.codel_opts.interval = opts->data.codel_opts.interval; 1387 pa->pq_u.codel_opts.ecn = opts->data.codel_opts.ecn; 1388 break; 1389 default: 1390 warnx("eval_queue_opts: unknown scheduler type %u", 1391 opts->qtype); 1392 errors++; 1393 break; 1394 } 1395 1396 return (errors); 1397 } 1398 1399 /* 1400 * If absolute bandwidth if set, return the lesser of that value and the 1401 * reference bandwidth. Limiting to the reference bandwidth allows simple 1402 * limiting of configured bandwidth parameters for schedulers that are 1403 * 32-bit limited, as the root/interface bandwidth (top-level reference 1404 * bandwidth) will be properly limited in that case. 1405 * 1406 * Otherwise, if the absolute bandwidth is not set, return given percentage 1407 * of reference bandwidth. 1408 */ 1409 u_int64_t 1410 eval_bwspec(struct node_queue_bw *bw, u_int64_t ref_bw) 1411 { 1412 if (bw->bw_absolute > 0) 1413 return (MIN(bw->bw_absolute, ref_bw)); 1414 1415 if (bw->bw_percent > 0) 1416 return (ref_bw / 100 * bw->bw_percent); 1417 1418 return (0); 1419 } 1420 1421 void 1422 print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2, 1423 const struct node_hfsc_sc *sc) 1424 { 1425 printf(" %s", scname); 1426 1427 if (d != 0) { 1428 printf("("); 1429 if (sc != NULL && sc->m1.bw_percent > 0) 1430 printf("%u%%", sc->m1.bw_percent); 1431 else 1432 printf("%s", rate2str((double)m1)); 1433 printf(" %u", d); 1434 } 1435 1436 if (sc != NULL && sc->m2.bw_percent > 0) 1437 printf(" %u%%", sc->m2.bw_percent); 1438 else 1439 printf(" %s", rate2str((double)m2)); 1440 1441 if (d != 0) 1442 printf(")"); 1443 } 1444 1445 void 1446 print_fairq_sc(const char *scname, u_int m1, u_int d, u_int m2, 1447 const struct node_fairq_sc *sc) 1448 { 1449 printf(" %s", scname); 1450 1451 if (d != 0) { 1452 printf("("); 1453 if (sc != NULL && sc->m1.bw_percent > 0) 1454 printf("%u%%", sc->m1.bw_percent); 1455 else 1456 printf("%s", rate2str((double)m1)); 1457 printf(" %u", d); 1458 } 1459 1460 if (sc != NULL && sc->m2.bw_percent > 0) 1461 printf(" %u%%", sc->m2.bw_percent); 1462 else 1463 printf(" %s", rate2str((double)m2)); 1464 1465 if (d != 0) 1466 printf(")"); 1467 } 1468