1 /* $OpenBSD: pfctl_altq.c,v 1.93 2007/10/15 02:16:35 deraadt Exp $ */ 2 3 /* 4 * Copyright (c) 2002 5 * Sony Computer Science Laboratories Inc. 6 * Copyright (c) 2002, 2003 Henning Brauer <henning@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include <sys/cdefs.h> 22 __FBSDID("$FreeBSD$"); 23 24 #define PFIOC_USE_LATEST 25 26 #include <sys/types.h> 27 #include <sys/ioctl.h> 28 #include <sys/socket.h> 29 30 #include <net/if.h> 31 #include <netinet/in.h> 32 #include <net/pfvar.h> 33 34 #include <err.h> 35 #include <errno.h> 36 #include <inttypes.h> 37 #include <limits.h> 38 #include <math.h> 39 #include <stdio.h> 40 #include <stdlib.h> 41 #include <string.h> 42 #include <unistd.h> 43 44 #include <net/altq/altq.h> 45 #include <net/altq/altq_cbq.h> 46 #include <net/altq/altq_codel.h> 47 #include <net/altq/altq_priq.h> 48 #include <net/altq/altq_hfsc.h> 49 #include <net/altq/altq_fairq.h> 50 51 #include "pfctl_parser.h" 52 #include "pfctl.h" 53 54 #define is_sc_null(sc) (((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0)) 55 56 static TAILQ_HEAD(altqs, pf_altq) altqs = TAILQ_HEAD_INITIALIZER(altqs); 57 static LIST_HEAD(gen_sc, segment) rtsc, lssc; 58 59 struct pf_altq *qname_to_pfaltq(const char *, const char *); 60 u_int32_t qname_to_qid(const char *); 61 62 static int eval_pfqueue_cbq(struct pfctl *, struct pf_altq *); 63 static int cbq_compute_idletime(struct pfctl *, struct pf_altq *); 64 static int check_commit_cbq(int, int, struct pf_altq *); 65 static int print_cbq_opts(const struct pf_altq *); 66 67 static int print_codel_opts(const struct pf_altq *, 68 const struct node_queue_opt *); 69 70 static int eval_pfqueue_priq(struct pfctl *, struct pf_altq *); 71 static int check_commit_priq(int, int, struct pf_altq *); 72 static int print_priq_opts(const struct pf_altq *); 73 74 static int eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *); 75 static int check_commit_hfsc(int, int, struct pf_altq *); 76 static int print_hfsc_opts(const struct pf_altq *, 77 const struct node_queue_opt *); 78 79 static int eval_pfqueue_fairq(struct pfctl *, struct pf_altq *); 80 static int print_fairq_opts(const struct pf_altq *, 81 const struct node_queue_opt *); 82 static int check_commit_fairq(int, int, struct pf_altq *); 83 84 static void gsc_add_sc(struct gen_sc *, struct service_curve *); 85 static int is_gsc_under_sc(struct gen_sc *, 86 struct service_curve *); 87 static void gsc_destroy(struct gen_sc *); 88 static struct segment *gsc_getentry(struct gen_sc *, double); 89 static int gsc_add_seg(struct gen_sc *, double, double, double, 90 double); 91 static double sc_x2y(struct service_curve *, double); 92 93 #ifdef __FreeBSD__ 94 u_int64_t getifspeed(int, char *); 95 #else 96 u_int32_t getifspeed(char *); 97 #endif 98 u_long getifmtu(char *); 99 int eval_queue_opts(struct pf_altq *, struct node_queue_opt *, 100 u_int64_t); 101 u_int64_t eval_bwspec(struct node_queue_bw *, u_int64_t); 102 void print_hfsc_sc(const char *, u_int, u_int, u_int, 103 const struct node_hfsc_sc *); 104 void print_fairq_sc(const char *, u_int, u_int, u_int, 105 const struct node_fairq_sc *); 106 107 void 108 pfaltq_store(struct pf_altq *a) 109 { 110 struct pf_altq *altq; 111 112 if ((altq = malloc(sizeof(*altq))) == NULL) 113 err(1, "malloc"); 114 memcpy(altq, a, sizeof(struct pf_altq)); 115 TAILQ_INSERT_TAIL(&altqs, altq, entries); 116 } 117 118 struct pf_altq * 119 pfaltq_lookup(const char *ifname) 120 { 121 struct pf_altq *altq; 122 123 TAILQ_FOREACH(altq, &altqs, entries) { 124 if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 && 125 altq->qname[0] == 0) 126 return (altq); 127 } 128 return (NULL); 129 } 130 131 struct pf_altq * 132 qname_to_pfaltq(const char *qname, const char *ifname) 133 { 134 struct pf_altq *altq; 135 136 TAILQ_FOREACH(altq, &altqs, entries) { 137 if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 && 138 strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0) 139 return (altq); 140 } 141 return (NULL); 142 } 143 144 u_int32_t 145 qname_to_qid(const char *qname) 146 { 147 struct pf_altq *altq; 148 149 /* 150 * We guarantee that same named queues on different interfaces 151 * have the same qid, so we do NOT need to limit matching on 152 * one interface! 153 */ 154 155 TAILQ_FOREACH(altq, &altqs, entries) { 156 if (strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0) 157 return (altq->qid); 158 } 159 return (0); 160 } 161 162 void 163 print_altq(const struct pf_altq *a, unsigned int level, 164 struct node_queue_bw *bw, struct node_queue_opt *qopts) 165 { 166 if (a->qname[0] != 0) { 167 print_queue(a, level, bw, 1, qopts); 168 return; 169 } 170 171 #ifdef __FreeBSD__ 172 if (a->local_flags & PFALTQ_FLAG_IF_REMOVED) 173 printf("INACTIVE "); 174 #endif 175 176 printf("altq on %s ", a->ifname); 177 178 switch (a->scheduler) { 179 case ALTQT_CBQ: 180 if (!print_cbq_opts(a)) 181 printf("cbq "); 182 break; 183 case ALTQT_PRIQ: 184 if (!print_priq_opts(a)) 185 printf("priq "); 186 break; 187 case ALTQT_HFSC: 188 if (!print_hfsc_opts(a, qopts)) 189 printf("hfsc "); 190 break; 191 case ALTQT_FAIRQ: 192 if (!print_fairq_opts(a, qopts)) 193 printf("fairq "); 194 break; 195 case ALTQT_CODEL: 196 if (!print_codel_opts(a, qopts)) 197 printf("codel "); 198 break; 199 } 200 201 if (bw != NULL && bw->bw_percent > 0) { 202 if (bw->bw_percent < 100) 203 printf("bandwidth %u%% ", bw->bw_percent); 204 } else 205 printf("bandwidth %s ", rate2str((double)a->ifbandwidth)); 206 207 if (a->qlimit != DEFAULT_QLIMIT) 208 printf("qlimit %u ", a->qlimit); 209 printf("tbrsize %u ", a->tbrsize); 210 } 211 212 void 213 print_queue(const struct pf_altq *a, unsigned int level, 214 struct node_queue_bw *bw, int print_interface, 215 struct node_queue_opt *qopts) 216 { 217 unsigned int i; 218 219 #ifdef __FreeBSD__ 220 if (a->local_flags & PFALTQ_FLAG_IF_REMOVED) 221 printf("INACTIVE "); 222 #endif 223 printf("queue "); 224 for (i = 0; i < level; ++i) 225 printf(" "); 226 printf("%s ", a->qname); 227 if (print_interface) 228 printf("on %s ", a->ifname); 229 if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC || 230 a->scheduler == ALTQT_FAIRQ) { 231 if (bw != NULL && bw->bw_percent > 0) { 232 if (bw->bw_percent < 100) 233 printf("bandwidth %u%% ", bw->bw_percent); 234 } else 235 printf("bandwidth %s ", rate2str((double)a->bandwidth)); 236 } 237 if (a->priority != DEFAULT_PRIORITY) 238 printf("priority %u ", a->priority); 239 if (a->qlimit != DEFAULT_QLIMIT) 240 printf("qlimit %u ", a->qlimit); 241 switch (a->scheduler) { 242 case ALTQT_CBQ: 243 print_cbq_opts(a); 244 break; 245 case ALTQT_PRIQ: 246 print_priq_opts(a); 247 break; 248 case ALTQT_HFSC: 249 print_hfsc_opts(a, qopts); 250 break; 251 case ALTQT_FAIRQ: 252 print_fairq_opts(a, qopts); 253 break; 254 } 255 } 256 257 /* 258 * eval_pfaltq computes the discipline parameters. 259 */ 260 int 261 eval_pfaltq(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw, 262 struct node_queue_opt *opts) 263 { 264 u_int64_t rate; 265 u_int size, errors = 0; 266 267 if (bw->bw_absolute > 0) 268 pa->ifbandwidth = bw->bw_absolute; 269 else 270 #ifdef __FreeBSD__ 271 if ((rate = getifspeed(pf->dev, pa->ifname)) == 0) { 272 #else 273 if ((rate = getifspeed(pa->ifname)) == 0) { 274 #endif 275 fprintf(stderr, "interface %s does not know its bandwidth, " 276 "please specify an absolute bandwidth\n", 277 pa->ifname); 278 errors++; 279 } else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0) 280 pa->ifbandwidth = rate; 281 282 /* 283 * Limit bandwidth to UINT_MAX for schedulers that aren't 64-bit ready. 284 */ 285 if ((pa->scheduler != ALTQT_HFSC) && (pa->ifbandwidth > UINT_MAX)) { 286 pa->ifbandwidth = UINT_MAX; 287 warnx("interface %s bandwidth limited to %" PRIu64 " bps " 288 "because selected scheduler is 32-bit limited\n", pa->ifname, 289 pa->ifbandwidth); 290 } 291 errors += eval_queue_opts(pa, opts, pa->ifbandwidth); 292 293 /* if tbrsize is not specified, use heuristics */ 294 if (pa->tbrsize == 0) { 295 rate = pa->ifbandwidth; 296 if (rate <= 1 * 1000 * 1000) 297 size = 1; 298 else if (rate <= 10 * 1000 * 1000) 299 size = 4; 300 else if (rate <= 200 * 1000 * 1000) 301 size = 8; 302 else 303 size = 24; 304 size = size * getifmtu(pa->ifname); 305 pa->tbrsize = size; 306 } 307 return (errors); 308 } 309 310 /* 311 * check_commit_altq does consistency check for each interface 312 */ 313 int 314 check_commit_altq(int dev, int opts) 315 { 316 struct pf_altq *altq; 317 int error = 0; 318 319 /* call the discipline check for each interface. */ 320 TAILQ_FOREACH(altq, &altqs, entries) { 321 if (altq->qname[0] == 0) { 322 switch (altq->scheduler) { 323 case ALTQT_CBQ: 324 error = check_commit_cbq(dev, opts, altq); 325 break; 326 case ALTQT_PRIQ: 327 error = check_commit_priq(dev, opts, altq); 328 break; 329 case ALTQT_HFSC: 330 error = check_commit_hfsc(dev, opts, altq); 331 break; 332 case ALTQT_FAIRQ: 333 error = check_commit_fairq(dev, opts, altq); 334 break; 335 default: 336 break; 337 } 338 } 339 } 340 return (error); 341 } 342 343 /* 344 * eval_pfqueue computes the queue parameters. 345 */ 346 int 347 eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw, 348 struct node_queue_opt *opts) 349 { 350 /* should be merged with expand_queue */ 351 struct pf_altq *if_pa, *parent, *altq; 352 u_int64_t bwsum; 353 int error = 0; 354 355 /* find the corresponding interface and copy fields used by queues */ 356 if ((if_pa = pfaltq_lookup(pa->ifname)) == NULL) { 357 fprintf(stderr, "altq not defined on %s\n", pa->ifname); 358 return (1); 359 } 360 pa->scheduler = if_pa->scheduler; 361 pa->ifbandwidth = if_pa->ifbandwidth; 362 363 if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) { 364 fprintf(stderr, "queue %s already exists on interface %s\n", 365 pa->qname, pa->ifname); 366 return (1); 367 } 368 pa->qid = qname_to_qid(pa->qname); 369 370 parent = NULL; 371 if (pa->parent[0] != 0) { 372 parent = qname_to_pfaltq(pa->parent, pa->ifname); 373 if (parent == NULL) { 374 fprintf(stderr, "parent %s not found for %s\n", 375 pa->parent, pa->qname); 376 return (1); 377 } 378 pa->parent_qid = parent->qid; 379 } 380 if (pa->qlimit == 0) 381 pa->qlimit = DEFAULT_QLIMIT; 382 383 if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC || 384 pa->scheduler == ALTQT_FAIRQ) { 385 pa->bandwidth = eval_bwspec(bw, 386 parent == NULL ? pa->ifbandwidth : parent->bandwidth); 387 388 if (pa->bandwidth > pa->ifbandwidth) { 389 fprintf(stderr, "bandwidth for %s higher than " 390 "interface\n", pa->qname); 391 return (1); 392 } 393 /* check the sum of the child bandwidth is under parent's */ 394 if (parent != NULL) { 395 if (pa->bandwidth > parent->bandwidth) { 396 warnx("bandwidth for %s higher than parent", 397 pa->qname); 398 return (1); 399 } 400 bwsum = 0; 401 TAILQ_FOREACH(altq, &altqs, entries) { 402 if (strncmp(altq->ifname, pa->ifname, 403 IFNAMSIZ) == 0 && 404 altq->qname[0] != 0 && 405 strncmp(altq->parent, pa->parent, 406 PF_QNAME_SIZE) == 0) 407 bwsum += altq->bandwidth; 408 } 409 bwsum += pa->bandwidth; 410 if (bwsum > parent->bandwidth) { 411 warnx("the sum of the child bandwidth higher" 412 " than parent \"%s\"", parent->qname); 413 } 414 } 415 } 416 417 if (eval_queue_opts(pa, opts, 418 parent == NULL ? pa->ifbandwidth : parent->bandwidth)) 419 return (1); 420 421 switch (pa->scheduler) { 422 case ALTQT_CBQ: 423 error = eval_pfqueue_cbq(pf, pa); 424 break; 425 case ALTQT_PRIQ: 426 error = eval_pfqueue_priq(pf, pa); 427 break; 428 case ALTQT_HFSC: 429 error = eval_pfqueue_hfsc(pf, pa); 430 break; 431 case ALTQT_FAIRQ: 432 error = eval_pfqueue_fairq(pf, pa); 433 break; 434 default: 435 break; 436 } 437 return (error); 438 } 439 440 /* 441 * CBQ support functions 442 */ 443 #define RM_FILTER_GAIN 5 /* log2 of gain, e.g., 5 => 31/32 */ 444 #define RM_NS_PER_SEC (1000000000) 445 446 static int 447 eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa) 448 { 449 struct cbq_opts *opts; 450 u_int ifmtu; 451 452 if (pa->priority >= CBQ_MAXPRI) { 453 warnx("priority out of range: max %d", CBQ_MAXPRI - 1); 454 return (-1); 455 } 456 457 ifmtu = getifmtu(pa->ifname); 458 opts = &pa->pq_u.cbq_opts; 459 460 if (opts->pktsize == 0) { /* use default */ 461 opts->pktsize = ifmtu; 462 if (opts->pktsize > MCLBYTES) /* do what TCP does */ 463 opts->pktsize &= ~MCLBYTES; 464 } else if (opts->pktsize > ifmtu) 465 opts->pktsize = ifmtu; 466 if (opts->maxpktsize == 0) /* use default */ 467 opts->maxpktsize = ifmtu; 468 else if (opts->maxpktsize > ifmtu) 469 opts->pktsize = ifmtu; 470 471 if (opts->pktsize > opts->maxpktsize) 472 opts->pktsize = opts->maxpktsize; 473 474 if (pa->parent[0] == 0) 475 opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR); 476 477 cbq_compute_idletime(pf, pa); 478 return (0); 479 } 480 481 /* 482 * compute ns_per_byte, maxidle, minidle, and offtime 483 */ 484 static int 485 cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa) 486 { 487 struct cbq_opts *opts; 488 double maxidle_s, maxidle, minidle; 489 double offtime, nsPerByte, ifnsPerByte, ptime, cptime; 490 double z, g, f, gton, gtom; 491 u_int minburst, maxburst; 492 493 opts = &pa->pq_u.cbq_opts; 494 ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8; 495 minburst = opts->minburst; 496 maxburst = opts->maxburst; 497 498 if (pa->bandwidth == 0) 499 f = 0.0001; /* small enough? */ 500 else 501 f = ((double) pa->bandwidth / (double) pa->ifbandwidth); 502 503 nsPerByte = ifnsPerByte / f; 504 ptime = (double)opts->pktsize * ifnsPerByte; 505 cptime = ptime * (1.0 - f) / f; 506 507 if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) { 508 /* 509 * this causes integer overflow in kernel! 510 * (bandwidth < 6Kbps when max_pkt_size=1500) 511 */ 512 if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0) { 513 warnx("queue bandwidth must be larger than %s", 514 rate2str(ifnsPerByte * (double)opts->maxpktsize / 515 (double)INT_MAX * (double)pa->ifbandwidth)); 516 fprintf(stderr, "cbq: queue %s is too slow!\n", 517 pa->qname); 518 } 519 nsPerByte = (double)(INT_MAX / opts->maxpktsize); 520 } 521 522 if (maxburst == 0) { /* use default */ 523 if (cptime > 10.0 * 1000000) 524 maxburst = 4; 525 else 526 maxburst = 16; 527 } 528 if (minburst == 0) /* use default */ 529 minburst = 2; 530 if (minburst > maxburst) 531 minburst = maxburst; 532 533 z = (double)(1 << RM_FILTER_GAIN); 534 g = (1.0 - 1.0 / z); 535 gton = pow(g, (double)maxburst); 536 gtom = pow(g, (double)(minburst-1)); 537 maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton)); 538 maxidle_s = (1.0 - g); 539 if (maxidle > maxidle_s) 540 maxidle = ptime * maxidle; 541 else 542 maxidle = ptime * maxidle_s; 543 offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom); 544 minidle = -((double)opts->maxpktsize * (double)nsPerByte); 545 546 /* scale parameters */ 547 maxidle = ((maxidle * 8.0) / nsPerByte) * 548 pow(2.0, (double)RM_FILTER_GAIN); 549 offtime = (offtime * 8.0) / nsPerByte * 550 pow(2.0, (double)RM_FILTER_GAIN); 551 minidle = ((minidle * 8.0) / nsPerByte) * 552 pow(2.0, (double)RM_FILTER_GAIN); 553 554 maxidle = maxidle / 1000.0; 555 offtime = offtime / 1000.0; 556 minidle = minidle / 1000.0; 557 558 opts->minburst = minburst; 559 opts->maxburst = maxburst; 560 opts->ns_per_byte = (u_int)nsPerByte; 561 opts->maxidle = (u_int)fabs(maxidle); 562 opts->minidle = (int)minidle; 563 opts->offtime = (u_int)fabs(offtime); 564 565 return (0); 566 } 567 568 static int 569 check_commit_cbq(int dev, int opts, struct pf_altq *pa) 570 { 571 struct pf_altq *altq; 572 int root_class, default_class; 573 int error = 0; 574 575 /* 576 * check if cbq has one root queue and one default queue 577 * for this interface 578 */ 579 root_class = default_class = 0; 580 TAILQ_FOREACH(altq, &altqs, entries) { 581 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 582 continue; 583 if (altq->qname[0] == 0) /* this is for interface */ 584 continue; 585 if (altq->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS) 586 root_class++; 587 if (altq->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS) 588 default_class++; 589 } 590 if (root_class != 1) { 591 warnx("should have one root queue on %s", pa->ifname); 592 error++; 593 } 594 if (default_class != 1) { 595 warnx("should have one default queue on %s", pa->ifname); 596 error++; 597 } 598 return (error); 599 } 600 601 static int 602 print_cbq_opts(const struct pf_altq *a) 603 { 604 const struct cbq_opts *opts; 605 606 opts = &a->pq_u.cbq_opts; 607 if (opts->flags) { 608 printf("cbq("); 609 if (opts->flags & CBQCLF_RED) 610 printf(" red"); 611 if (opts->flags & CBQCLF_ECN) 612 printf(" ecn"); 613 if (opts->flags & CBQCLF_RIO) 614 printf(" rio"); 615 if (opts->flags & CBQCLF_CODEL) 616 printf(" codel"); 617 if (opts->flags & CBQCLF_CLEARDSCP) 618 printf(" cleardscp"); 619 if (opts->flags & CBQCLF_FLOWVALVE) 620 printf(" flowvalve"); 621 if (opts->flags & CBQCLF_BORROW) 622 printf(" borrow"); 623 if (opts->flags & CBQCLF_WRR) 624 printf(" wrr"); 625 if (opts->flags & CBQCLF_EFFICIENT) 626 printf(" efficient"); 627 if (opts->flags & CBQCLF_ROOTCLASS) 628 printf(" root"); 629 if (opts->flags & CBQCLF_DEFCLASS) 630 printf(" default"); 631 printf(" ) "); 632 633 return (1); 634 } else 635 return (0); 636 } 637 638 /* 639 * PRIQ support functions 640 */ 641 static int 642 eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa) 643 { 644 struct pf_altq *altq; 645 646 if (pa->priority >= PRIQ_MAXPRI) { 647 warnx("priority out of range: max %d", PRIQ_MAXPRI - 1); 648 return (-1); 649 } 650 /* the priority should be unique for the interface */ 651 TAILQ_FOREACH(altq, &altqs, entries) { 652 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) == 0 && 653 altq->qname[0] != 0 && altq->priority == pa->priority) { 654 warnx("%s and %s have the same priority", 655 altq->qname, pa->qname); 656 return (-1); 657 } 658 } 659 660 return (0); 661 } 662 663 static int 664 check_commit_priq(int dev, int opts, struct pf_altq *pa) 665 { 666 struct pf_altq *altq; 667 int default_class; 668 int error = 0; 669 670 /* 671 * check if priq has one default class for this interface 672 */ 673 default_class = 0; 674 TAILQ_FOREACH(altq, &altqs, entries) { 675 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 676 continue; 677 if (altq->qname[0] == 0) /* this is for interface */ 678 continue; 679 if (altq->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS) 680 default_class++; 681 } 682 if (default_class != 1) { 683 warnx("should have one default queue on %s", pa->ifname); 684 error++; 685 } 686 return (error); 687 } 688 689 static int 690 print_priq_opts(const struct pf_altq *a) 691 { 692 const struct priq_opts *opts; 693 694 opts = &a->pq_u.priq_opts; 695 696 if (opts->flags) { 697 printf("priq("); 698 if (opts->flags & PRCF_RED) 699 printf(" red"); 700 if (opts->flags & PRCF_ECN) 701 printf(" ecn"); 702 if (opts->flags & PRCF_RIO) 703 printf(" rio"); 704 if (opts->flags & PRCF_CODEL) 705 printf(" codel"); 706 if (opts->flags & PRCF_CLEARDSCP) 707 printf(" cleardscp"); 708 if (opts->flags & PRCF_DEFAULTCLASS) 709 printf(" default"); 710 printf(" ) "); 711 712 return (1); 713 } else 714 return (0); 715 } 716 717 /* 718 * HFSC support functions 719 */ 720 static int 721 eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa) 722 { 723 struct pf_altq *altq, *parent; 724 struct hfsc_opts_v1 *opts; 725 struct service_curve sc; 726 727 opts = &pa->pq_u.hfsc_opts; 728 729 if (pa->parent[0] == 0) { 730 /* root queue */ 731 opts->lssc_m1 = pa->ifbandwidth; 732 opts->lssc_m2 = pa->ifbandwidth; 733 opts->lssc_d = 0; 734 return (0); 735 } 736 737 LIST_INIT(&rtsc); 738 LIST_INIT(&lssc); 739 740 /* if link_share is not specified, use bandwidth */ 741 if (opts->lssc_m2 == 0) 742 opts->lssc_m2 = pa->bandwidth; 743 744 if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) || 745 (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) || 746 (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) { 747 warnx("m2 is zero for %s", pa->qname); 748 return (-1); 749 } 750 751 if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) || 752 (opts->lssc_m1 < opts->lssc_m2 && opts->lssc_m1 != 0) || 753 (opts->ulsc_m1 < opts->ulsc_m2 && opts->ulsc_m1 != 0)) { 754 warnx("m1 must be zero for convex curve: %s", pa->qname); 755 return (-1); 756 } 757 758 /* 759 * admission control: 760 * for the real-time service curve, the sum of the service curves 761 * should not exceed 80% of the interface bandwidth. 20% is reserved 762 * not to over-commit the actual interface bandwidth. 763 * for the linkshare service curve, the sum of the child service 764 * curve should not exceed the parent service curve. 765 * for the upper-limit service curve, the assigned bandwidth should 766 * be smaller than the interface bandwidth, and the upper-limit should 767 * be larger than the real-time service curve when both are defined. 768 */ 769 parent = qname_to_pfaltq(pa->parent, pa->ifname); 770 if (parent == NULL) 771 errx(1, "parent %s not found for %s", pa->parent, pa->qname); 772 773 TAILQ_FOREACH(altq, &altqs, entries) { 774 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 775 continue; 776 if (altq->qname[0] == 0) /* this is for interface */ 777 continue; 778 779 /* if the class has a real-time service curve, add it. */ 780 if (opts->rtsc_m2 != 0 && altq->pq_u.hfsc_opts.rtsc_m2 != 0) { 781 sc.m1 = altq->pq_u.hfsc_opts.rtsc_m1; 782 sc.d = altq->pq_u.hfsc_opts.rtsc_d; 783 sc.m2 = altq->pq_u.hfsc_opts.rtsc_m2; 784 gsc_add_sc(&rtsc, &sc); 785 } 786 787 if (strncmp(altq->parent, pa->parent, PF_QNAME_SIZE) != 0) 788 continue; 789 790 /* if the class has a linkshare service curve, add it. */ 791 if (opts->lssc_m2 != 0 && altq->pq_u.hfsc_opts.lssc_m2 != 0) { 792 sc.m1 = altq->pq_u.hfsc_opts.lssc_m1; 793 sc.d = altq->pq_u.hfsc_opts.lssc_d; 794 sc.m2 = altq->pq_u.hfsc_opts.lssc_m2; 795 gsc_add_sc(&lssc, &sc); 796 } 797 } 798 799 /* check the real-time service curve. reserve 20% of interface bw */ 800 if (opts->rtsc_m2 != 0) { 801 /* add this queue to the sum */ 802 sc.m1 = opts->rtsc_m1; 803 sc.d = opts->rtsc_d; 804 sc.m2 = opts->rtsc_m2; 805 gsc_add_sc(&rtsc, &sc); 806 /* compare the sum with 80% of the interface */ 807 sc.m1 = 0; 808 sc.d = 0; 809 sc.m2 = pa->ifbandwidth / 100 * 80; 810 if (!is_gsc_under_sc(&rtsc, &sc)) { 811 warnx("real-time sc exceeds 80%% of the interface " 812 "bandwidth (%s)", rate2str((double)sc.m2)); 813 goto err_ret; 814 } 815 } 816 817 /* check the linkshare service curve. */ 818 if (opts->lssc_m2 != 0) { 819 /* add this queue to the child sum */ 820 sc.m1 = opts->lssc_m1; 821 sc.d = opts->lssc_d; 822 sc.m2 = opts->lssc_m2; 823 gsc_add_sc(&lssc, &sc); 824 /* compare the sum of the children with parent's sc */ 825 sc.m1 = parent->pq_u.hfsc_opts.lssc_m1; 826 sc.d = parent->pq_u.hfsc_opts.lssc_d; 827 sc.m2 = parent->pq_u.hfsc_opts.lssc_m2; 828 if (!is_gsc_under_sc(&lssc, &sc)) { 829 warnx("linkshare sc exceeds parent's sc"); 830 goto err_ret; 831 } 832 } 833 834 /* check the upper-limit service curve. */ 835 if (opts->ulsc_m2 != 0) { 836 if (opts->ulsc_m1 > pa->ifbandwidth || 837 opts->ulsc_m2 > pa->ifbandwidth) { 838 warnx("upper-limit larger than interface bandwidth"); 839 goto err_ret; 840 } 841 if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) { 842 warnx("upper-limit sc smaller than real-time sc"); 843 goto err_ret; 844 } 845 } 846 847 gsc_destroy(&rtsc); 848 gsc_destroy(&lssc); 849 850 return (0); 851 852 err_ret: 853 gsc_destroy(&rtsc); 854 gsc_destroy(&lssc); 855 return (-1); 856 } 857 858 /* 859 * FAIRQ support functions 860 */ 861 static int 862 eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa) 863 { 864 struct pf_altq *altq, *parent; 865 struct fairq_opts *opts; 866 struct service_curve sc; 867 868 opts = &pa->pq_u.fairq_opts; 869 870 if (pa->parent[0] == 0) { 871 /* root queue */ 872 opts->lssc_m1 = pa->ifbandwidth; 873 opts->lssc_m2 = pa->ifbandwidth; 874 opts->lssc_d = 0; 875 return (0); 876 } 877 878 LIST_INIT(&lssc); 879 880 /* if link_share is not specified, use bandwidth */ 881 if (opts->lssc_m2 == 0) 882 opts->lssc_m2 = pa->bandwidth; 883 884 /* 885 * admission control: 886 * for the real-time service curve, the sum of the service curves 887 * should not exceed 80% of the interface bandwidth. 20% is reserved 888 * not to over-commit the actual interface bandwidth. 889 * for the link-sharing service curve, the sum of the child service 890 * curve should not exceed the parent service curve. 891 * for the upper-limit service curve, the assigned bandwidth should 892 * be smaller than the interface bandwidth, and the upper-limit should 893 * be larger than the real-time service curve when both are defined. 894 */ 895 parent = qname_to_pfaltq(pa->parent, pa->ifname); 896 if (parent == NULL) 897 errx(1, "parent %s not found for %s", pa->parent, pa->qname); 898 899 TAILQ_FOREACH(altq, &altqs, entries) { 900 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 901 continue; 902 if (altq->qname[0] == 0) /* this is for interface */ 903 continue; 904 905 if (strncmp(altq->parent, pa->parent, PF_QNAME_SIZE) != 0) 906 continue; 907 908 /* if the class has a link-sharing service curve, add it. */ 909 if (opts->lssc_m2 != 0 && altq->pq_u.fairq_opts.lssc_m2 != 0) { 910 sc.m1 = altq->pq_u.fairq_opts.lssc_m1; 911 sc.d = altq->pq_u.fairq_opts.lssc_d; 912 sc.m2 = altq->pq_u.fairq_opts.lssc_m2; 913 gsc_add_sc(&lssc, &sc); 914 } 915 } 916 917 /* check the link-sharing service curve. */ 918 if (opts->lssc_m2 != 0) { 919 sc.m1 = parent->pq_u.fairq_opts.lssc_m1; 920 sc.d = parent->pq_u.fairq_opts.lssc_d; 921 sc.m2 = parent->pq_u.fairq_opts.lssc_m2; 922 if (!is_gsc_under_sc(&lssc, &sc)) { 923 warnx("link-sharing sc exceeds parent's sc"); 924 goto err_ret; 925 } 926 } 927 928 gsc_destroy(&lssc); 929 930 return (0); 931 932 err_ret: 933 gsc_destroy(&lssc); 934 return (-1); 935 } 936 937 static int 938 check_commit_hfsc(int dev, int opts, struct pf_altq *pa) 939 { 940 struct pf_altq *altq, *def = NULL; 941 int default_class; 942 int error = 0; 943 944 /* check if hfsc has one default queue for this interface */ 945 default_class = 0; 946 TAILQ_FOREACH(altq, &altqs, entries) { 947 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 948 continue; 949 if (altq->qname[0] == 0) /* this is for interface */ 950 continue; 951 if (altq->parent[0] == 0) /* dummy root */ 952 continue; 953 if (altq->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) { 954 default_class++; 955 def = altq; 956 } 957 } 958 if (default_class != 1) { 959 warnx("should have one default queue on %s", pa->ifname); 960 return (1); 961 } 962 /* make sure the default queue is a leaf */ 963 TAILQ_FOREACH(altq, &altqs, entries) { 964 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 965 continue; 966 if (altq->qname[0] == 0) /* this is for interface */ 967 continue; 968 if (strncmp(altq->parent, def->qname, PF_QNAME_SIZE) == 0) { 969 warnx("default queue is not a leaf"); 970 error++; 971 } 972 } 973 return (error); 974 } 975 976 static int 977 check_commit_fairq(int dev __unused, int opts __unused, struct pf_altq *pa) 978 { 979 struct pf_altq *altq, *def = NULL; 980 int default_class; 981 int error = 0; 982 983 /* check if fairq has one default queue for this interface */ 984 default_class = 0; 985 TAILQ_FOREACH(altq, &altqs, entries) { 986 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 987 continue; 988 if (altq->qname[0] == 0) /* this is for interface */ 989 continue; 990 if (altq->pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) { 991 default_class++; 992 def = altq; 993 } 994 } 995 if (default_class != 1) { 996 warnx("should have one default queue on %s", pa->ifname); 997 return (1); 998 } 999 /* make sure the default queue is a leaf */ 1000 TAILQ_FOREACH(altq, &altqs, entries) { 1001 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 1002 continue; 1003 if (altq->qname[0] == 0) /* this is for interface */ 1004 continue; 1005 if (strncmp(altq->parent, def->qname, PF_QNAME_SIZE) == 0) { 1006 warnx("default queue is not a leaf"); 1007 error++; 1008 } 1009 } 1010 return (error); 1011 } 1012 1013 static int 1014 print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts) 1015 { 1016 const struct hfsc_opts_v1 *opts; 1017 const struct node_hfsc_sc *rtsc, *lssc, *ulsc; 1018 1019 opts = &a->pq_u.hfsc_opts; 1020 if (qopts == NULL) 1021 rtsc = lssc = ulsc = NULL; 1022 else { 1023 rtsc = &qopts->data.hfsc_opts.realtime; 1024 lssc = &qopts->data.hfsc_opts.linkshare; 1025 ulsc = &qopts->data.hfsc_opts.upperlimit; 1026 } 1027 1028 if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 || 1029 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth || 1030 opts->lssc_d != 0))) { 1031 printf("hfsc("); 1032 if (opts->flags & HFCF_RED) 1033 printf(" red"); 1034 if (opts->flags & HFCF_ECN) 1035 printf(" ecn"); 1036 if (opts->flags & HFCF_RIO) 1037 printf(" rio"); 1038 if (opts->flags & HFCF_CODEL) 1039 printf(" codel"); 1040 if (opts->flags & HFCF_CLEARDSCP) 1041 printf(" cleardscp"); 1042 if (opts->flags & HFCF_DEFAULTCLASS) 1043 printf(" default"); 1044 if (opts->rtsc_m2 != 0) 1045 print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d, 1046 opts->rtsc_m2, rtsc); 1047 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth || 1048 opts->lssc_d != 0)) 1049 print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d, 1050 opts->lssc_m2, lssc); 1051 if (opts->ulsc_m2 != 0) 1052 print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d, 1053 opts->ulsc_m2, ulsc); 1054 printf(" ) "); 1055 1056 return (1); 1057 } else 1058 return (0); 1059 } 1060 1061 static int 1062 print_codel_opts(const struct pf_altq *a, const struct node_queue_opt *qopts) 1063 { 1064 const struct codel_opts *opts; 1065 1066 opts = &a->pq_u.codel_opts; 1067 if (opts->target || opts->interval || opts->ecn) { 1068 printf("codel("); 1069 if (opts->target) 1070 printf(" target %d", opts->target); 1071 if (opts->interval) 1072 printf(" interval %d", opts->interval); 1073 if (opts->ecn) 1074 printf("ecn"); 1075 printf(" ) "); 1076 1077 return (1); 1078 } 1079 1080 return (0); 1081 } 1082 1083 static int 1084 print_fairq_opts(const struct pf_altq *a, const struct node_queue_opt *qopts) 1085 { 1086 const struct fairq_opts *opts; 1087 const struct node_fairq_sc *loc_lssc; 1088 1089 opts = &a->pq_u.fairq_opts; 1090 if (qopts == NULL) 1091 loc_lssc = NULL; 1092 else 1093 loc_lssc = &qopts->data.fairq_opts.linkshare; 1094 1095 if (opts->flags || 1096 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth || 1097 opts->lssc_d != 0))) { 1098 printf("fairq("); 1099 if (opts->flags & FARF_RED) 1100 printf(" red"); 1101 if (opts->flags & FARF_ECN) 1102 printf(" ecn"); 1103 if (opts->flags & FARF_RIO) 1104 printf(" rio"); 1105 if (opts->flags & FARF_CODEL) 1106 printf(" codel"); 1107 if (opts->flags & FARF_CLEARDSCP) 1108 printf(" cleardscp"); 1109 if (opts->flags & FARF_DEFAULTCLASS) 1110 printf(" default"); 1111 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth || 1112 opts->lssc_d != 0)) 1113 print_fairq_sc("linkshare", opts->lssc_m1, opts->lssc_d, 1114 opts->lssc_m2, loc_lssc); 1115 printf(" ) "); 1116 1117 return (1); 1118 } else 1119 return (0); 1120 } 1121 1122 /* 1123 * admission control using generalized service curve 1124 */ 1125 1126 /* add a new service curve to a generalized service curve */ 1127 static void 1128 gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc) 1129 { 1130 if (is_sc_null(sc)) 1131 return; 1132 if (sc->d != 0) 1133 gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1); 1134 gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2); 1135 } 1136 1137 /* 1138 * check whether all points of a generalized service curve have 1139 * their y-coordinates no larger than a given two-piece linear 1140 * service curve. 1141 */ 1142 static int 1143 is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc) 1144 { 1145 struct segment *s, *last, *end; 1146 double y; 1147 1148 if (is_sc_null(sc)) { 1149 if (LIST_EMPTY(gsc)) 1150 return (1); 1151 LIST_FOREACH(s, gsc, _next) { 1152 if (s->m != 0) 1153 return (0); 1154 } 1155 return (1); 1156 } 1157 /* 1158 * gsc has a dummy entry at the end with x = INFINITY. 1159 * loop through up to this dummy entry. 1160 */ 1161 end = gsc_getentry(gsc, INFINITY); 1162 if (end == NULL) 1163 return (1); 1164 last = NULL; 1165 for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) { 1166 if (s->y > sc_x2y(sc, s->x)) 1167 return (0); 1168 last = s; 1169 } 1170 /* last now holds the real last segment */ 1171 if (last == NULL) 1172 return (1); 1173 if (last->m > sc->m2) 1174 return (0); 1175 if (last->x < sc->d && last->m > sc->m1) { 1176 y = last->y + (sc->d - last->x) * last->m; 1177 if (y > sc_x2y(sc, sc->d)) 1178 return (0); 1179 } 1180 return (1); 1181 } 1182 1183 static void 1184 gsc_destroy(struct gen_sc *gsc) 1185 { 1186 struct segment *s; 1187 1188 while ((s = LIST_FIRST(gsc)) != NULL) { 1189 LIST_REMOVE(s, _next); 1190 free(s); 1191 } 1192 } 1193 1194 /* 1195 * return a segment entry starting at x. 1196 * if gsc has no entry starting at x, a new entry is created at x. 1197 */ 1198 static struct segment * 1199 gsc_getentry(struct gen_sc *gsc, double x) 1200 { 1201 struct segment *new, *prev, *s; 1202 1203 prev = NULL; 1204 LIST_FOREACH(s, gsc, _next) { 1205 if (s->x == x) 1206 return (s); /* matching entry found */ 1207 else if (s->x < x) 1208 prev = s; 1209 else 1210 break; 1211 } 1212 1213 /* we have to create a new entry */ 1214 if ((new = calloc(1, sizeof(struct segment))) == NULL) 1215 return (NULL); 1216 1217 new->x = x; 1218 if (x == INFINITY || s == NULL) 1219 new->d = 0; 1220 else if (s->x == INFINITY) 1221 new->d = INFINITY; 1222 else 1223 new->d = s->x - x; 1224 if (prev == NULL) { 1225 /* insert the new entry at the head of the list */ 1226 new->y = 0; 1227 new->m = 0; 1228 LIST_INSERT_HEAD(gsc, new, _next); 1229 } else { 1230 /* 1231 * the start point intersects with the segment pointed by 1232 * prev. divide prev into 2 segments 1233 */ 1234 if (x == INFINITY) { 1235 prev->d = INFINITY; 1236 if (prev->m == 0) 1237 new->y = prev->y; 1238 else 1239 new->y = INFINITY; 1240 } else { 1241 prev->d = x - prev->x; 1242 new->y = prev->d * prev->m + prev->y; 1243 } 1244 new->m = prev->m; 1245 LIST_INSERT_AFTER(prev, new, _next); 1246 } 1247 return (new); 1248 } 1249 1250 /* add a segment to a generalized service curve */ 1251 static int 1252 gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m) 1253 { 1254 struct segment *start, *end, *s; 1255 double x2; 1256 1257 if (d == INFINITY) 1258 x2 = INFINITY; 1259 else 1260 x2 = x + d; 1261 start = gsc_getentry(gsc, x); 1262 end = gsc_getentry(gsc, x2); 1263 if (start == NULL || end == NULL) 1264 return (-1); 1265 1266 for (s = start; s != end; s = LIST_NEXT(s, _next)) { 1267 s->m += m; 1268 s->y += y + (s->x - x) * m; 1269 } 1270 1271 end = gsc_getentry(gsc, INFINITY); 1272 for (; s != end; s = LIST_NEXT(s, _next)) { 1273 s->y += m * d; 1274 } 1275 1276 return (0); 1277 } 1278 1279 /* get y-projection of a service curve */ 1280 static double 1281 sc_x2y(struct service_curve *sc, double x) 1282 { 1283 double y; 1284 1285 if (x <= (double)sc->d) 1286 /* y belongs to the 1st segment */ 1287 y = x * (double)sc->m1; 1288 else 1289 /* y belongs to the 2nd segment */ 1290 y = (double)sc->d * (double)sc->m1 1291 + (x - (double)sc->d) * (double)sc->m2; 1292 return (y); 1293 } 1294 1295 /* 1296 * misc utilities 1297 */ 1298 #define R2S_BUFS 8 1299 #define RATESTR_MAX 16 1300 1301 char * 1302 rate2str(double rate) 1303 { 1304 char *buf; 1305 static char r2sbuf[R2S_BUFS][RATESTR_MAX]; /* ring bufer */ 1306 static int idx = 0; 1307 int i; 1308 static const char unit[] = " KMG"; 1309 1310 buf = r2sbuf[idx++]; 1311 if (idx == R2S_BUFS) 1312 idx = 0; 1313 1314 for (i = 0; rate >= 1000 && i <= 3; i++) 1315 rate /= 1000; 1316 1317 if ((int)(rate * 100) % 100) 1318 snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]); 1319 else 1320 snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]); 1321 1322 return (buf); 1323 } 1324 1325 #ifdef __FreeBSD__ 1326 /* 1327 * XXX 1328 * FreeBSD does not have SIOCGIFDATA. 1329 * To emulate this, DIOCGIFSPEED ioctl added to pf. 1330 */ 1331 u_int64_t 1332 getifspeed(int pfdev, char *ifname) 1333 { 1334 struct pf_ifspeed io; 1335 1336 bzero(&io, sizeof io); 1337 if (strlcpy(io.ifname, ifname, IFNAMSIZ) >= 1338 sizeof(io.ifname)) 1339 errx(1, "getifspeed: strlcpy"); 1340 if (ioctl(pfdev, DIOCGIFSPEED, &io) == -1) 1341 err(1, "DIOCGIFSPEED"); 1342 return (io.baudrate); 1343 } 1344 #else 1345 u_int32_t 1346 getifspeed(char *ifname) 1347 { 1348 int s; 1349 struct ifreq ifr; 1350 struct if_data ifrdat; 1351 1352 if ((s = socket(get_socket_domain(), SOCK_DGRAM, 0)) < 0) 1353 err(1, "socket"); 1354 bzero(&ifr, sizeof(ifr)); 1355 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >= 1356 sizeof(ifr.ifr_name)) 1357 errx(1, "getifspeed: strlcpy"); 1358 ifr.ifr_data = (caddr_t)&ifrdat; 1359 if (ioctl(s, SIOCGIFDATA, (caddr_t)&ifr) == -1) 1360 err(1, "SIOCGIFDATA"); 1361 if (close(s)) 1362 err(1, "close"); 1363 return ((u_int32_t)ifrdat.ifi_baudrate); 1364 } 1365 #endif 1366 1367 u_long 1368 getifmtu(char *ifname) 1369 { 1370 int s; 1371 struct ifreq ifr; 1372 1373 if ((s = socket(get_socket_domain(), SOCK_DGRAM, 0)) < 0) 1374 err(1, "socket"); 1375 bzero(&ifr, sizeof(ifr)); 1376 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >= 1377 sizeof(ifr.ifr_name)) 1378 errx(1, "getifmtu: strlcpy"); 1379 if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1) 1380 #ifdef __FreeBSD__ 1381 ifr.ifr_mtu = 1500; 1382 #else 1383 err(1, "SIOCGIFMTU"); 1384 #endif 1385 if (close(s)) 1386 err(1, "close"); 1387 if (ifr.ifr_mtu > 0) 1388 return (ifr.ifr_mtu); 1389 else { 1390 warnx("could not get mtu for %s, assuming 1500", ifname); 1391 return (1500); 1392 } 1393 } 1394 1395 int 1396 eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts, 1397 u_int64_t ref_bw) 1398 { 1399 int errors = 0; 1400 1401 switch (pa->scheduler) { 1402 case ALTQT_CBQ: 1403 pa->pq_u.cbq_opts = opts->data.cbq_opts; 1404 break; 1405 case ALTQT_PRIQ: 1406 pa->pq_u.priq_opts = opts->data.priq_opts; 1407 break; 1408 case ALTQT_HFSC: 1409 pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags; 1410 if (opts->data.hfsc_opts.linkshare.used) { 1411 pa->pq_u.hfsc_opts.lssc_m1 = 1412 eval_bwspec(&opts->data.hfsc_opts.linkshare.m1, 1413 ref_bw); 1414 pa->pq_u.hfsc_opts.lssc_m2 = 1415 eval_bwspec(&opts->data.hfsc_opts.linkshare.m2, 1416 ref_bw); 1417 pa->pq_u.hfsc_opts.lssc_d = 1418 opts->data.hfsc_opts.linkshare.d; 1419 } 1420 if (opts->data.hfsc_opts.realtime.used) { 1421 pa->pq_u.hfsc_opts.rtsc_m1 = 1422 eval_bwspec(&opts->data.hfsc_opts.realtime.m1, 1423 ref_bw); 1424 pa->pq_u.hfsc_opts.rtsc_m2 = 1425 eval_bwspec(&opts->data.hfsc_opts.realtime.m2, 1426 ref_bw); 1427 pa->pq_u.hfsc_opts.rtsc_d = 1428 opts->data.hfsc_opts.realtime.d; 1429 } 1430 if (opts->data.hfsc_opts.upperlimit.used) { 1431 pa->pq_u.hfsc_opts.ulsc_m1 = 1432 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1, 1433 ref_bw); 1434 pa->pq_u.hfsc_opts.ulsc_m2 = 1435 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2, 1436 ref_bw); 1437 pa->pq_u.hfsc_opts.ulsc_d = 1438 opts->data.hfsc_opts.upperlimit.d; 1439 } 1440 break; 1441 case ALTQT_FAIRQ: 1442 pa->pq_u.fairq_opts.flags = opts->data.fairq_opts.flags; 1443 pa->pq_u.fairq_opts.nbuckets = opts->data.fairq_opts.nbuckets; 1444 pa->pq_u.fairq_opts.hogs_m1 = 1445 eval_bwspec(&opts->data.fairq_opts.hogs_bw, ref_bw); 1446 1447 if (opts->data.fairq_opts.linkshare.used) { 1448 pa->pq_u.fairq_opts.lssc_m1 = 1449 eval_bwspec(&opts->data.fairq_opts.linkshare.m1, 1450 ref_bw); 1451 pa->pq_u.fairq_opts.lssc_m2 = 1452 eval_bwspec(&opts->data.fairq_opts.linkshare.m2, 1453 ref_bw); 1454 pa->pq_u.fairq_opts.lssc_d = 1455 opts->data.fairq_opts.linkshare.d; 1456 } 1457 break; 1458 case ALTQT_CODEL: 1459 pa->pq_u.codel_opts.target = opts->data.codel_opts.target; 1460 pa->pq_u.codel_opts.interval = opts->data.codel_opts.interval; 1461 pa->pq_u.codel_opts.ecn = opts->data.codel_opts.ecn; 1462 break; 1463 default: 1464 warnx("eval_queue_opts: unknown scheduler type %u", 1465 opts->qtype); 1466 errors++; 1467 break; 1468 } 1469 1470 return (errors); 1471 } 1472 1473 /* 1474 * If absolute bandwidth if set, return the lesser of that value and the 1475 * reference bandwidth. Limiting to the reference bandwidth allows simple 1476 * limiting of configured bandwidth parameters for schedulers that are 1477 * 32-bit limited, as the root/interface bandwidth (top-level reference 1478 * bandwidth) will be properly limited in that case. 1479 * 1480 * Otherwise, if the absolute bandwidth is not set, return given percentage 1481 * of reference bandwidth. 1482 */ 1483 u_int64_t 1484 eval_bwspec(struct node_queue_bw *bw, u_int64_t ref_bw) 1485 { 1486 if (bw->bw_absolute > 0) 1487 return (MIN(bw->bw_absolute, ref_bw)); 1488 1489 if (bw->bw_percent > 0) 1490 return (ref_bw / 100 * bw->bw_percent); 1491 1492 return (0); 1493 } 1494 1495 void 1496 print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2, 1497 const struct node_hfsc_sc *sc) 1498 { 1499 printf(" %s", scname); 1500 1501 if (d != 0) { 1502 printf("("); 1503 if (sc != NULL && sc->m1.bw_percent > 0) 1504 printf("%u%%", sc->m1.bw_percent); 1505 else 1506 printf("%s", rate2str((double)m1)); 1507 printf(" %u", d); 1508 } 1509 1510 if (sc != NULL && sc->m2.bw_percent > 0) 1511 printf(" %u%%", sc->m2.bw_percent); 1512 else 1513 printf(" %s", rate2str((double)m2)); 1514 1515 if (d != 0) 1516 printf(")"); 1517 } 1518 1519 void 1520 print_fairq_sc(const char *scname, u_int m1, u_int d, u_int m2, 1521 const struct node_fairq_sc *sc) 1522 { 1523 printf(" %s", scname); 1524 1525 if (d != 0) { 1526 printf("("); 1527 if (sc != NULL && sc->m1.bw_percent > 0) 1528 printf("%u%%", sc->m1.bw_percent); 1529 else 1530 printf("%s", rate2str((double)m1)); 1531 printf(" %u", d); 1532 } 1533 1534 if (sc != NULL && sc->m2.bw_percent > 0) 1535 printf(" %u%%", sc->m2.bw_percent); 1536 else 1537 printf(" %s", rate2str((double)m2)); 1538 1539 if (d != 0) 1540 printf(")"); 1541 } 1542