1 /* $OpenBSD: pfctl_altq.c,v 1.93 2007/10/15 02:16:35 deraadt Exp $ */ 2 3 /* 4 * Copyright (c) 2002 5 * Sony Computer Science Laboratories Inc. 6 * Copyright (c) 2002, 2003 Henning Brauer <henning@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include <sys/cdefs.h> 22 __FBSDID("$FreeBSD$"); 23 24 #define PFIOC_USE_LATEST 25 26 #include <sys/types.h> 27 #include <sys/ioctl.h> 28 #include <sys/socket.h> 29 30 #include <net/if.h> 31 #include <netinet/in.h> 32 #include <net/pfvar.h> 33 34 #include <err.h> 35 #include <errno.h> 36 #include <inttypes.h> 37 #include <limits.h> 38 #include <math.h> 39 #include <stdio.h> 40 #include <stdlib.h> 41 #include <string.h> 42 #include <unistd.h> 43 44 #include <net/altq/altq.h> 45 #include <net/altq/altq_cbq.h> 46 #include <net/altq/altq_codel.h> 47 #include <net/altq/altq_priq.h> 48 #include <net/altq/altq_hfsc.h> 49 #include <net/altq/altq_fairq.h> 50 51 #include "pfctl_parser.h" 52 #include "pfctl.h" 53 54 #define is_sc_null(sc) (((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0)) 55 56 static TAILQ_HEAD(altqs, pf_altq) altqs = TAILQ_HEAD_INITIALIZER(altqs); 57 static LIST_HEAD(gen_sc, segment) rtsc, lssc; 58 59 struct pf_altq *qname_to_pfaltq(const char *, const char *); 60 u_int32_t qname_to_qid(const char *); 61 62 static int eval_pfqueue_cbq(struct pfctl *, struct pf_altq *); 63 static int cbq_compute_idletime(struct pfctl *, struct pf_altq *); 64 static int check_commit_cbq(int, int, struct pf_altq *); 65 static int print_cbq_opts(const struct pf_altq *); 66 67 static int print_codel_opts(const struct pf_altq *, 68 const struct node_queue_opt *); 69 70 static int eval_pfqueue_priq(struct pfctl *, struct pf_altq *); 71 static int check_commit_priq(int, int, struct pf_altq *); 72 static int print_priq_opts(const struct pf_altq *); 73 74 static int eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *); 75 static int check_commit_hfsc(int, int, struct pf_altq *); 76 static int print_hfsc_opts(const struct pf_altq *, 77 const struct node_queue_opt *); 78 79 static int eval_pfqueue_fairq(struct pfctl *, struct pf_altq *); 80 static int print_fairq_opts(const struct pf_altq *, 81 const struct node_queue_opt *); 82 static int check_commit_fairq(int, int, struct pf_altq *); 83 84 static void gsc_add_sc(struct gen_sc *, struct service_curve *); 85 static int is_gsc_under_sc(struct gen_sc *, 86 struct service_curve *); 87 static void gsc_destroy(struct gen_sc *); 88 static struct segment *gsc_getentry(struct gen_sc *, double); 89 static int gsc_add_seg(struct gen_sc *, double, double, double, 90 double); 91 static double sc_x2y(struct service_curve *, double); 92 93 #ifdef __FreeBSD__ 94 u_int64_t getifspeed(int, char *); 95 #else 96 u_int32_t getifspeed(char *); 97 #endif 98 u_long getifmtu(char *); 99 int eval_queue_opts(struct pf_altq *, struct node_queue_opt *, 100 u_int64_t); 101 u_int64_t eval_bwspec(struct node_queue_bw *, u_int64_t); 102 void print_hfsc_sc(const char *, u_int, u_int, u_int, 103 const struct node_hfsc_sc *); 104 void print_fairq_sc(const char *, u_int, u_int, u_int, 105 const struct node_fairq_sc *); 106 107 void 108 pfaltq_store(struct pf_altq *a) 109 { 110 struct pf_altq *altq; 111 112 if ((altq = malloc(sizeof(*altq))) == NULL) 113 err(1, "malloc"); 114 memcpy(altq, a, sizeof(struct pf_altq)); 115 TAILQ_INSERT_TAIL(&altqs, altq, entries); 116 } 117 118 struct pf_altq * 119 pfaltq_lookup(const char *ifname) 120 { 121 struct pf_altq *altq; 122 123 TAILQ_FOREACH(altq, &altqs, entries) { 124 if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 && 125 altq->qname[0] == 0) 126 return (altq); 127 } 128 return (NULL); 129 } 130 131 struct pf_altq * 132 qname_to_pfaltq(const char *qname, const char *ifname) 133 { 134 struct pf_altq *altq; 135 136 TAILQ_FOREACH(altq, &altqs, entries) { 137 if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 && 138 strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0) 139 return (altq); 140 } 141 return (NULL); 142 } 143 144 u_int32_t 145 qname_to_qid(const char *qname) 146 { 147 struct pf_altq *altq; 148 149 /* 150 * We guarantee that same named queues on different interfaces 151 * have the same qid, so we do NOT need to limit matching on 152 * one interface! 153 */ 154 155 TAILQ_FOREACH(altq, &altqs, entries) { 156 if (strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0) 157 return (altq->qid); 158 } 159 return (0); 160 } 161 162 void 163 print_altq(const struct pf_altq *a, unsigned int level, 164 struct node_queue_bw *bw, struct node_queue_opt *qopts) 165 { 166 if (a->qname[0] != 0) { 167 print_queue(a, level, bw, 1, qopts); 168 return; 169 } 170 171 #ifdef __FreeBSD__ 172 if (a->local_flags & PFALTQ_FLAG_IF_REMOVED) 173 printf("INACTIVE "); 174 #endif 175 176 printf("altq on %s ", a->ifname); 177 178 switch (a->scheduler) { 179 case ALTQT_CBQ: 180 if (!print_cbq_opts(a)) 181 printf("cbq "); 182 break; 183 case ALTQT_PRIQ: 184 if (!print_priq_opts(a)) 185 printf("priq "); 186 break; 187 case ALTQT_HFSC: 188 if (!print_hfsc_opts(a, qopts)) 189 printf("hfsc "); 190 break; 191 case ALTQT_FAIRQ: 192 if (!print_fairq_opts(a, qopts)) 193 printf("fairq "); 194 break; 195 case ALTQT_CODEL: 196 if (!print_codel_opts(a, qopts)) 197 printf("codel "); 198 break; 199 } 200 201 if (bw != NULL && bw->bw_percent > 0) { 202 if (bw->bw_percent < 100) 203 printf("bandwidth %u%% ", bw->bw_percent); 204 } else 205 printf("bandwidth %s ", rate2str((double)a->ifbandwidth)); 206 207 if (a->qlimit != DEFAULT_QLIMIT) 208 printf("qlimit %u ", a->qlimit); 209 printf("tbrsize %u ", a->tbrsize); 210 } 211 212 void 213 print_queue(const struct pf_altq *a, unsigned int level, 214 struct node_queue_bw *bw, int print_interface, 215 struct node_queue_opt *qopts) 216 { 217 unsigned int i; 218 219 #ifdef __FreeBSD__ 220 if (a->local_flags & PFALTQ_FLAG_IF_REMOVED) 221 printf("INACTIVE "); 222 #endif 223 printf("queue "); 224 for (i = 0; i < level; ++i) 225 printf(" "); 226 printf("%s ", a->qname); 227 if (print_interface) 228 printf("on %s ", a->ifname); 229 if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC || 230 a->scheduler == ALTQT_FAIRQ) { 231 if (bw != NULL && bw->bw_percent > 0) { 232 if (bw->bw_percent < 100) 233 printf("bandwidth %u%% ", bw->bw_percent); 234 } else 235 printf("bandwidth %s ", rate2str((double)a->bandwidth)); 236 } 237 if (a->priority != DEFAULT_PRIORITY) 238 printf("priority %u ", a->priority); 239 if (a->qlimit != DEFAULT_QLIMIT) 240 printf("qlimit %u ", a->qlimit); 241 switch (a->scheduler) { 242 case ALTQT_CBQ: 243 print_cbq_opts(a); 244 break; 245 case ALTQT_PRIQ: 246 print_priq_opts(a); 247 break; 248 case ALTQT_HFSC: 249 print_hfsc_opts(a, qopts); 250 break; 251 case ALTQT_FAIRQ: 252 print_fairq_opts(a, qopts); 253 break; 254 } 255 } 256 257 /* 258 * eval_pfaltq computes the discipline parameters. 259 */ 260 int 261 eval_pfaltq(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw, 262 struct node_queue_opt *opts) 263 { 264 u_int64_t rate; 265 u_int size, errors = 0; 266 267 if (bw->bw_absolute > 0) 268 pa->ifbandwidth = bw->bw_absolute; 269 else 270 #ifdef __FreeBSD__ 271 if ((rate = getifspeed(pf->dev, pa->ifname)) == 0) { 272 #else 273 if ((rate = getifspeed(pa->ifname)) == 0) { 274 #endif 275 fprintf(stderr, "interface %s does not know its bandwidth, " 276 "please specify an absolute bandwidth\n", 277 pa->ifname); 278 errors++; 279 } else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0) 280 pa->ifbandwidth = rate; 281 282 /* 283 * Limit bandwidth to UINT_MAX for schedulers that aren't 64-bit ready. 284 */ 285 if ((pa->scheduler != ALTQT_HFSC) && (pa->ifbandwidth > UINT_MAX)) { 286 pa->ifbandwidth = UINT_MAX; 287 warnx("interface %s bandwidth limited to %" PRIu64 " bps " 288 "because selected scheduler is 32-bit limited\n", pa->ifname, 289 pa->ifbandwidth); 290 } 291 errors += eval_queue_opts(pa, opts, pa->ifbandwidth); 292 293 /* if tbrsize is not specified, use heuristics */ 294 if (pa->tbrsize == 0) { 295 rate = pa->ifbandwidth; 296 if (rate <= 1 * 1000 * 1000) 297 size = 1; 298 else if (rate <= 10 * 1000 * 1000) 299 size = 4; 300 else if (rate <= 200 * 1000 * 1000) 301 size = 8; 302 else if (rate <= 2500 * 1000 * 1000ULL) 303 size = 24; 304 else 305 size = 128; 306 size = size * getifmtu(pa->ifname); 307 pa->tbrsize = size; 308 } 309 return (errors); 310 } 311 312 /* 313 * check_commit_altq does consistency check for each interface 314 */ 315 int 316 check_commit_altq(int dev, int opts) 317 { 318 struct pf_altq *altq; 319 int error = 0; 320 321 /* call the discipline check for each interface. */ 322 TAILQ_FOREACH(altq, &altqs, entries) { 323 if (altq->qname[0] == 0) { 324 switch (altq->scheduler) { 325 case ALTQT_CBQ: 326 error = check_commit_cbq(dev, opts, altq); 327 break; 328 case ALTQT_PRIQ: 329 error = check_commit_priq(dev, opts, altq); 330 break; 331 case ALTQT_HFSC: 332 error = check_commit_hfsc(dev, opts, altq); 333 break; 334 case ALTQT_FAIRQ: 335 error = check_commit_fairq(dev, opts, altq); 336 break; 337 default: 338 break; 339 } 340 } 341 } 342 return (error); 343 } 344 345 /* 346 * eval_pfqueue computes the queue parameters. 347 */ 348 int 349 eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw, 350 struct node_queue_opt *opts) 351 { 352 /* should be merged with expand_queue */ 353 struct pf_altq *if_pa, *parent, *altq; 354 u_int64_t bwsum; 355 int error = 0; 356 357 /* find the corresponding interface and copy fields used by queues */ 358 if ((if_pa = pfaltq_lookup(pa->ifname)) == NULL) { 359 fprintf(stderr, "altq not defined on %s\n", pa->ifname); 360 return (1); 361 } 362 pa->scheduler = if_pa->scheduler; 363 pa->ifbandwidth = if_pa->ifbandwidth; 364 365 if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) { 366 fprintf(stderr, "queue %s already exists on interface %s\n", 367 pa->qname, pa->ifname); 368 return (1); 369 } 370 pa->qid = qname_to_qid(pa->qname); 371 372 parent = NULL; 373 if (pa->parent[0] != 0) { 374 parent = qname_to_pfaltq(pa->parent, pa->ifname); 375 if (parent == NULL) { 376 fprintf(stderr, "parent %s not found for %s\n", 377 pa->parent, pa->qname); 378 return (1); 379 } 380 pa->parent_qid = parent->qid; 381 } 382 if (pa->qlimit == 0) 383 pa->qlimit = DEFAULT_QLIMIT; 384 385 if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC || 386 pa->scheduler == ALTQT_FAIRQ) { 387 pa->bandwidth = eval_bwspec(bw, 388 parent == NULL ? pa->ifbandwidth : parent->bandwidth); 389 390 if (pa->bandwidth > pa->ifbandwidth) { 391 fprintf(stderr, "bandwidth for %s higher than " 392 "interface\n", pa->qname); 393 return (1); 394 } 395 /* check the sum of the child bandwidth is under parent's */ 396 if (parent != NULL) { 397 if (pa->bandwidth > parent->bandwidth) { 398 warnx("bandwidth for %s higher than parent", 399 pa->qname); 400 return (1); 401 } 402 bwsum = 0; 403 TAILQ_FOREACH(altq, &altqs, entries) { 404 if (strncmp(altq->ifname, pa->ifname, 405 IFNAMSIZ) == 0 && 406 altq->qname[0] != 0 && 407 strncmp(altq->parent, pa->parent, 408 PF_QNAME_SIZE) == 0) 409 bwsum += altq->bandwidth; 410 } 411 bwsum += pa->bandwidth; 412 if (bwsum > parent->bandwidth) { 413 warnx("the sum of the child bandwidth higher" 414 " than parent \"%s\"", parent->qname); 415 } 416 } 417 } 418 419 if (eval_queue_opts(pa, opts, 420 parent == NULL ? pa->ifbandwidth : parent->bandwidth)) 421 return (1); 422 423 switch (pa->scheduler) { 424 case ALTQT_CBQ: 425 error = eval_pfqueue_cbq(pf, pa); 426 break; 427 case ALTQT_PRIQ: 428 error = eval_pfqueue_priq(pf, pa); 429 break; 430 case ALTQT_HFSC: 431 error = eval_pfqueue_hfsc(pf, pa); 432 break; 433 case ALTQT_FAIRQ: 434 error = eval_pfqueue_fairq(pf, pa); 435 break; 436 default: 437 break; 438 } 439 return (error); 440 } 441 442 /* 443 * CBQ support functions 444 */ 445 #define RM_FILTER_GAIN 5 /* log2 of gain, e.g., 5 => 31/32 */ 446 #define RM_NS_PER_SEC (1000000000) 447 448 static int 449 eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa) 450 { 451 struct cbq_opts *opts; 452 u_int ifmtu; 453 454 if (pa->priority >= CBQ_MAXPRI) { 455 warnx("priority out of range: max %d", CBQ_MAXPRI - 1); 456 return (-1); 457 } 458 459 ifmtu = getifmtu(pa->ifname); 460 opts = &pa->pq_u.cbq_opts; 461 462 if (opts->pktsize == 0) { /* use default */ 463 opts->pktsize = ifmtu; 464 if (opts->pktsize > MCLBYTES) /* do what TCP does */ 465 opts->pktsize &= ~MCLBYTES; 466 } else if (opts->pktsize > ifmtu) 467 opts->pktsize = ifmtu; 468 if (opts->maxpktsize == 0) /* use default */ 469 opts->maxpktsize = ifmtu; 470 else if (opts->maxpktsize > ifmtu) 471 opts->pktsize = ifmtu; 472 473 if (opts->pktsize > opts->maxpktsize) 474 opts->pktsize = opts->maxpktsize; 475 476 if (pa->parent[0] == 0) 477 opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR); 478 479 cbq_compute_idletime(pf, pa); 480 return (0); 481 } 482 483 /* 484 * compute ns_per_byte, maxidle, minidle, and offtime 485 */ 486 static int 487 cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa) 488 { 489 struct cbq_opts *opts; 490 double maxidle_s, maxidle, minidle; 491 double offtime, nsPerByte, ifnsPerByte, ptime, cptime; 492 double z, g, f, gton, gtom; 493 u_int minburst, maxburst; 494 495 opts = &pa->pq_u.cbq_opts; 496 ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8; 497 minburst = opts->minburst; 498 maxburst = opts->maxburst; 499 500 if (pa->bandwidth == 0) 501 f = 0.0001; /* small enough? */ 502 else 503 f = ((double) pa->bandwidth / (double) pa->ifbandwidth); 504 505 nsPerByte = ifnsPerByte / f; 506 ptime = (double)opts->pktsize * ifnsPerByte; 507 cptime = ptime * (1.0 - f) / f; 508 509 if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) { 510 /* 511 * this causes integer overflow in kernel! 512 * (bandwidth < 6Kbps when max_pkt_size=1500) 513 */ 514 if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0) { 515 warnx("queue bandwidth must be larger than %s", 516 rate2str(ifnsPerByte * (double)opts->maxpktsize / 517 (double)INT_MAX * (double)pa->ifbandwidth)); 518 fprintf(stderr, "cbq: queue %s is too slow!\n", 519 pa->qname); 520 } 521 nsPerByte = (double)(INT_MAX / opts->maxpktsize); 522 } 523 524 if (maxburst == 0) { /* use default */ 525 if (cptime > 10.0 * 1000000) 526 maxburst = 4; 527 else 528 maxburst = 16; 529 } 530 if (minburst == 0) /* use default */ 531 minburst = 2; 532 if (minburst > maxburst) 533 minburst = maxburst; 534 535 z = (double)(1 << RM_FILTER_GAIN); 536 g = (1.0 - 1.0 / z); 537 gton = pow(g, (double)maxburst); 538 gtom = pow(g, (double)(minburst-1)); 539 maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton)); 540 maxidle_s = (1.0 - g); 541 if (maxidle > maxidle_s) 542 maxidle = ptime * maxidle; 543 else 544 maxidle = ptime * maxidle_s; 545 offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom); 546 minidle = -((double)opts->maxpktsize * (double)nsPerByte); 547 548 /* scale parameters */ 549 maxidle = ((maxidle * 8.0) / nsPerByte) * 550 pow(2.0, (double)RM_FILTER_GAIN); 551 offtime = (offtime * 8.0) / nsPerByte * 552 pow(2.0, (double)RM_FILTER_GAIN); 553 minidle = ((minidle * 8.0) / nsPerByte) * 554 pow(2.0, (double)RM_FILTER_GAIN); 555 556 maxidle = maxidle / 1000.0; 557 offtime = offtime / 1000.0; 558 minidle = minidle / 1000.0; 559 560 opts->minburst = minburst; 561 opts->maxburst = maxburst; 562 opts->ns_per_byte = (u_int)nsPerByte; 563 opts->maxidle = (u_int)fabs(maxidle); 564 opts->minidle = (int)minidle; 565 opts->offtime = (u_int)fabs(offtime); 566 567 return (0); 568 } 569 570 static int 571 check_commit_cbq(int dev, int opts, struct pf_altq *pa) 572 { 573 struct pf_altq *altq; 574 int root_class, default_class; 575 int error = 0; 576 577 /* 578 * check if cbq has one root queue and one default queue 579 * for this interface 580 */ 581 root_class = default_class = 0; 582 TAILQ_FOREACH(altq, &altqs, entries) { 583 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 584 continue; 585 if (altq->qname[0] == 0) /* this is for interface */ 586 continue; 587 if (altq->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS) 588 root_class++; 589 if (altq->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS) 590 default_class++; 591 } 592 if (root_class != 1) { 593 warnx("should have one root queue on %s", pa->ifname); 594 error++; 595 } 596 if (default_class != 1) { 597 warnx("should have one default queue on %s", pa->ifname); 598 error++; 599 } 600 return (error); 601 } 602 603 static int 604 print_cbq_opts(const struct pf_altq *a) 605 { 606 const struct cbq_opts *opts; 607 608 opts = &a->pq_u.cbq_opts; 609 if (opts->flags) { 610 printf("cbq("); 611 if (opts->flags & CBQCLF_RED) 612 printf(" red"); 613 if (opts->flags & CBQCLF_ECN) 614 printf(" ecn"); 615 if (opts->flags & CBQCLF_RIO) 616 printf(" rio"); 617 if (opts->flags & CBQCLF_CODEL) 618 printf(" codel"); 619 if (opts->flags & CBQCLF_CLEARDSCP) 620 printf(" cleardscp"); 621 if (opts->flags & CBQCLF_FLOWVALVE) 622 printf(" flowvalve"); 623 if (opts->flags & CBQCLF_BORROW) 624 printf(" borrow"); 625 if (opts->flags & CBQCLF_WRR) 626 printf(" wrr"); 627 if (opts->flags & CBQCLF_EFFICIENT) 628 printf(" efficient"); 629 if (opts->flags & CBQCLF_ROOTCLASS) 630 printf(" root"); 631 if (opts->flags & CBQCLF_DEFCLASS) 632 printf(" default"); 633 printf(" ) "); 634 635 return (1); 636 } else 637 return (0); 638 } 639 640 /* 641 * PRIQ support functions 642 */ 643 static int 644 eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa) 645 { 646 struct pf_altq *altq; 647 648 if (pa->priority >= PRIQ_MAXPRI) { 649 warnx("priority out of range: max %d", PRIQ_MAXPRI - 1); 650 return (-1); 651 } 652 /* the priority should be unique for the interface */ 653 TAILQ_FOREACH(altq, &altqs, entries) { 654 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) == 0 && 655 altq->qname[0] != 0 && altq->priority == pa->priority) { 656 warnx("%s and %s have the same priority", 657 altq->qname, pa->qname); 658 return (-1); 659 } 660 } 661 662 return (0); 663 } 664 665 static int 666 check_commit_priq(int dev, int opts, struct pf_altq *pa) 667 { 668 struct pf_altq *altq; 669 int default_class; 670 int error = 0; 671 672 /* 673 * check if priq has one default class for this interface 674 */ 675 default_class = 0; 676 TAILQ_FOREACH(altq, &altqs, entries) { 677 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 678 continue; 679 if (altq->qname[0] == 0) /* this is for interface */ 680 continue; 681 if (altq->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS) 682 default_class++; 683 } 684 if (default_class != 1) { 685 warnx("should have one default queue on %s", pa->ifname); 686 error++; 687 } 688 return (error); 689 } 690 691 static int 692 print_priq_opts(const struct pf_altq *a) 693 { 694 const struct priq_opts *opts; 695 696 opts = &a->pq_u.priq_opts; 697 698 if (opts->flags) { 699 printf("priq("); 700 if (opts->flags & PRCF_RED) 701 printf(" red"); 702 if (opts->flags & PRCF_ECN) 703 printf(" ecn"); 704 if (opts->flags & PRCF_RIO) 705 printf(" rio"); 706 if (opts->flags & PRCF_CODEL) 707 printf(" codel"); 708 if (opts->flags & PRCF_CLEARDSCP) 709 printf(" cleardscp"); 710 if (opts->flags & PRCF_DEFAULTCLASS) 711 printf(" default"); 712 printf(" ) "); 713 714 return (1); 715 } else 716 return (0); 717 } 718 719 /* 720 * HFSC support functions 721 */ 722 static int 723 eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa) 724 { 725 struct pf_altq *altq, *parent; 726 struct hfsc_opts_v1 *opts; 727 struct service_curve sc; 728 729 opts = &pa->pq_u.hfsc_opts; 730 731 if (pa->parent[0] == 0) { 732 /* root queue */ 733 opts->lssc_m1 = pa->ifbandwidth; 734 opts->lssc_m2 = pa->ifbandwidth; 735 opts->lssc_d = 0; 736 return (0); 737 } 738 739 LIST_INIT(&rtsc); 740 LIST_INIT(&lssc); 741 742 /* if link_share is not specified, use bandwidth */ 743 if (opts->lssc_m2 == 0) 744 opts->lssc_m2 = pa->bandwidth; 745 746 if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) || 747 (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) || 748 (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) { 749 warnx("m2 is zero for %s", pa->qname); 750 return (-1); 751 } 752 753 if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) || 754 (opts->lssc_m1 < opts->lssc_m2 && opts->lssc_m1 != 0) || 755 (opts->ulsc_m1 < opts->ulsc_m2 && opts->ulsc_m1 != 0)) { 756 warnx("m1 must be zero for convex curve: %s", pa->qname); 757 return (-1); 758 } 759 760 /* 761 * admission control: 762 * for the real-time service curve, the sum of the service curves 763 * should not exceed 80% of the interface bandwidth. 20% is reserved 764 * not to over-commit the actual interface bandwidth. 765 * for the linkshare service curve, the sum of the child service 766 * curve should not exceed the parent service curve. 767 * for the upper-limit service curve, the assigned bandwidth should 768 * be smaller than the interface bandwidth, and the upper-limit should 769 * be larger than the real-time service curve when both are defined. 770 */ 771 parent = qname_to_pfaltq(pa->parent, pa->ifname); 772 if (parent == NULL) 773 errx(1, "parent %s not found for %s", pa->parent, pa->qname); 774 775 TAILQ_FOREACH(altq, &altqs, entries) { 776 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 777 continue; 778 if (altq->qname[0] == 0) /* this is for interface */ 779 continue; 780 781 /* if the class has a real-time service curve, add it. */ 782 if (opts->rtsc_m2 != 0 && altq->pq_u.hfsc_opts.rtsc_m2 != 0) { 783 sc.m1 = altq->pq_u.hfsc_opts.rtsc_m1; 784 sc.d = altq->pq_u.hfsc_opts.rtsc_d; 785 sc.m2 = altq->pq_u.hfsc_opts.rtsc_m2; 786 gsc_add_sc(&rtsc, &sc); 787 } 788 789 if (strncmp(altq->parent, pa->parent, PF_QNAME_SIZE) != 0) 790 continue; 791 792 /* if the class has a linkshare service curve, add it. */ 793 if (opts->lssc_m2 != 0 && altq->pq_u.hfsc_opts.lssc_m2 != 0) { 794 sc.m1 = altq->pq_u.hfsc_opts.lssc_m1; 795 sc.d = altq->pq_u.hfsc_opts.lssc_d; 796 sc.m2 = altq->pq_u.hfsc_opts.lssc_m2; 797 gsc_add_sc(&lssc, &sc); 798 } 799 } 800 801 /* check the real-time service curve. reserve 20% of interface bw */ 802 if (opts->rtsc_m2 != 0) { 803 /* add this queue to the sum */ 804 sc.m1 = opts->rtsc_m1; 805 sc.d = opts->rtsc_d; 806 sc.m2 = opts->rtsc_m2; 807 gsc_add_sc(&rtsc, &sc); 808 /* compare the sum with 80% of the interface */ 809 sc.m1 = 0; 810 sc.d = 0; 811 sc.m2 = pa->ifbandwidth / 100 * 80; 812 if (!is_gsc_under_sc(&rtsc, &sc)) { 813 warnx("real-time sc exceeds 80%% of the interface " 814 "bandwidth (%s)", rate2str((double)sc.m2)); 815 goto err_ret; 816 } 817 } 818 819 /* check the linkshare service curve. */ 820 if (opts->lssc_m2 != 0) { 821 /* add this queue to the child sum */ 822 sc.m1 = opts->lssc_m1; 823 sc.d = opts->lssc_d; 824 sc.m2 = opts->lssc_m2; 825 gsc_add_sc(&lssc, &sc); 826 /* compare the sum of the children with parent's sc */ 827 sc.m1 = parent->pq_u.hfsc_opts.lssc_m1; 828 sc.d = parent->pq_u.hfsc_opts.lssc_d; 829 sc.m2 = parent->pq_u.hfsc_opts.lssc_m2; 830 if (!is_gsc_under_sc(&lssc, &sc)) { 831 warnx("linkshare sc exceeds parent's sc"); 832 goto err_ret; 833 } 834 } 835 836 /* check the upper-limit service curve. */ 837 if (opts->ulsc_m2 != 0) { 838 if (opts->ulsc_m1 > pa->ifbandwidth || 839 opts->ulsc_m2 > pa->ifbandwidth) { 840 warnx("upper-limit larger than interface bandwidth"); 841 goto err_ret; 842 } 843 if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) { 844 warnx("upper-limit sc smaller than real-time sc"); 845 goto err_ret; 846 } 847 } 848 849 gsc_destroy(&rtsc); 850 gsc_destroy(&lssc); 851 852 return (0); 853 854 err_ret: 855 gsc_destroy(&rtsc); 856 gsc_destroy(&lssc); 857 return (-1); 858 } 859 860 /* 861 * FAIRQ support functions 862 */ 863 static int 864 eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa) 865 { 866 struct pf_altq *altq, *parent; 867 struct fairq_opts *opts; 868 struct service_curve sc; 869 870 opts = &pa->pq_u.fairq_opts; 871 872 if (pa->parent[0] == 0) { 873 /* root queue */ 874 opts->lssc_m1 = pa->ifbandwidth; 875 opts->lssc_m2 = pa->ifbandwidth; 876 opts->lssc_d = 0; 877 return (0); 878 } 879 880 LIST_INIT(&lssc); 881 882 /* if link_share is not specified, use bandwidth */ 883 if (opts->lssc_m2 == 0) 884 opts->lssc_m2 = pa->bandwidth; 885 886 /* 887 * admission control: 888 * for the real-time service curve, the sum of the service curves 889 * should not exceed 80% of the interface bandwidth. 20% is reserved 890 * not to over-commit the actual interface bandwidth. 891 * for the link-sharing service curve, the sum of the child service 892 * curve should not exceed the parent service curve. 893 * for the upper-limit service curve, the assigned bandwidth should 894 * be smaller than the interface bandwidth, and the upper-limit should 895 * be larger than the real-time service curve when both are defined. 896 */ 897 parent = qname_to_pfaltq(pa->parent, pa->ifname); 898 if (parent == NULL) 899 errx(1, "parent %s not found for %s", pa->parent, pa->qname); 900 901 TAILQ_FOREACH(altq, &altqs, entries) { 902 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 903 continue; 904 if (altq->qname[0] == 0) /* this is for interface */ 905 continue; 906 907 if (strncmp(altq->parent, pa->parent, PF_QNAME_SIZE) != 0) 908 continue; 909 910 /* if the class has a link-sharing service curve, add it. */ 911 if (opts->lssc_m2 != 0 && altq->pq_u.fairq_opts.lssc_m2 != 0) { 912 sc.m1 = altq->pq_u.fairq_opts.lssc_m1; 913 sc.d = altq->pq_u.fairq_opts.lssc_d; 914 sc.m2 = altq->pq_u.fairq_opts.lssc_m2; 915 gsc_add_sc(&lssc, &sc); 916 } 917 } 918 919 /* check the link-sharing service curve. */ 920 if (opts->lssc_m2 != 0) { 921 sc.m1 = parent->pq_u.fairq_opts.lssc_m1; 922 sc.d = parent->pq_u.fairq_opts.lssc_d; 923 sc.m2 = parent->pq_u.fairq_opts.lssc_m2; 924 if (!is_gsc_under_sc(&lssc, &sc)) { 925 warnx("link-sharing sc exceeds parent's sc"); 926 goto err_ret; 927 } 928 } 929 930 gsc_destroy(&lssc); 931 932 return (0); 933 934 err_ret: 935 gsc_destroy(&lssc); 936 return (-1); 937 } 938 939 static int 940 check_commit_hfsc(int dev, int opts, struct pf_altq *pa) 941 { 942 struct pf_altq *altq, *def = NULL; 943 int default_class; 944 int error = 0; 945 946 /* check if hfsc has one default queue for this interface */ 947 default_class = 0; 948 TAILQ_FOREACH(altq, &altqs, entries) { 949 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 950 continue; 951 if (altq->qname[0] == 0) /* this is for interface */ 952 continue; 953 if (altq->parent[0] == 0) /* dummy root */ 954 continue; 955 if (altq->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) { 956 default_class++; 957 def = altq; 958 } 959 } 960 if (default_class != 1) { 961 warnx("should have one default queue on %s", pa->ifname); 962 return (1); 963 } 964 /* make sure the default queue is a leaf */ 965 TAILQ_FOREACH(altq, &altqs, entries) { 966 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 967 continue; 968 if (altq->qname[0] == 0) /* this is for interface */ 969 continue; 970 if (strncmp(altq->parent, def->qname, PF_QNAME_SIZE) == 0) { 971 warnx("default queue is not a leaf"); 972 error++; 973 } 974 } 975 return (error); 976 } 977 978 static int 979 check_commit_fairq(int dev __unused, int opts __unused, struct pf_altq *pa) 980 { 981 struct pf_altq *altq, *def = NULL; 982 int default_class; 983 int error = 0; 984 985 /* check if fairq has one default queue for this interface */ 986 default_class = 0; 987 TAILQ_FOREACH(altq, &altqs, entries) { 988 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 989 continue; 990 if (altq->qname[0] == 0) /* this is for interface */ 991 continue; 992 if (altq->pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) { 993 default_class++; 994 def = altq; 995 } 996 } 997 if (default_class != 1) { 998 warnx("should have one default queue on %s", pa->ifname); 999 return (1); 1000 } 1001 /* make sure the default queue is a leaf */ 1002 TAILQ_FOREACH(altq, &altqs, entries) { 1003 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) 1004 continue; 1005 if (altq->qname[0] == 0) /* this is for interface */ 1006 continue; 1007 if (strncmp(altq->parent, def->qname, PF_QNAME_SIZE) == 0) { 1008 warnx("default queue is not a leaf"); 1009 error++; 1010 } 1011 } 1012 return (error); 1013 } 1014 1015 static int 1016 print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts) 1017 { 1018 const struct hfsc_opts_v1 *opts; 1019 const struct node_hfsc_sc *rtsc, *lssc, *ulsc; 1020 1021 opts = &a->pq_u.hfsc_opts; 1022 if (qopts == NULL) 1023 rtsc = lssc = ulsc = NULL; 1024 else { 1025 rtsc = &qopts->data.hfsc_opts.realtime; 1026 lssc = &qopts->data.hfsc_opts.linkshare; 1027 ulsc = &qopts->data.hfsc_opts.upperlimit; 1028 } 1029 1030 if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 || 1031 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth || 1032 opts->lssc_d != 0))) { 1033 printf("hfsc("); 1034 if (opts->flags & HFCF_RED) 1035 printf(" red"); 1036 if (opts->flags & HFCF_ECN) 1037 printf(" ecn"); 1038 if (opts->flags & HFCF_RIO) 1039 printf(" rio"); 1040 if (opts->flags & HFCF_CODEL) 1041 printf(" codel"); 1042 if (opts->flags & HFCF_CLEARDSCP) 1043 printf(" cleardscp"); 1044 if (opts->flags & HFCF_DEFAULTCLASS) 1045 printf(" default"); 1046 if (opts->rtsc_m2 != 0) 1047 print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d, 1048 opts->rtsc_m2, rtsc); 1049 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth || 1050 opts->lssc_d != 0)) 1051 print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d, 1052 opts->lssc_m2, lssc); 1053 if (opts->ulsc_m2 != 0) 1054 print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d, 1055 opts->ulsc_m2, ulsc); 1056 printf(" ) "); 1057 1058 return (1); 1059 } else 1060 return (0); 1061 } 1062 1063 static int 1064 print_codel_opts(const struct pf_altq *a, const struct node_queue_opt *qopts) 1065 { 1066 const struct codel_opts *opts; 1067 1068 opts = &a->pq_u.codel_opts; 1069 if (opts->target || opts->interval || opts->ecn) { 1070 printf("codel("); 1071 if (opts->target) 1072 printf(" target %d", opts->target); 1073 if (opts->interval) 1074 printf(" interval %d", opts->interval); 1075 if (opts->ecn) 1076 printf("ecn"); 1077 printf(" ) "); 1078 1079 return (1); 1080 } 1081 1082 return (0); 1083 } 1084 1085 static int 1086 print_fairq_opts(const struct pf_altq *a, const struct node_queue_opt *qopts) 1087 { 1088 const struct fairq_opts *opts; 1089 const struct node_fairq_sc *loc_lssc; 1090 1091 opts = &a->pq_u.fairq_opts; 1092 if (qopts == NULL) 1093 loc_lssc = NULL; 1094 else 1095 loc_lssc = &qopts->data.fairq_opts.linkshare; 1096 1097 if (opts->flags || 1098 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth || 1099 opts->lssc_d != 0))) { 1100 printf("fairq("); 1101 if (opts->flags & FARF_RED) 1102 printf(" red"); 1103 if (opts->flags & FARF_ECN) 1104 printf(" ecn"); 1105 if (opts->flags & FARF_RIO) 1106 printf(" rio"); 1107 if (opts->flags & FARF_CODEL) 1108 printf(" codel"); 1109 if (opts->flags & FARF_CLEARDSCP) 1110 printf(" cleardscp"); 1111 if (opts->flags & FARF_DEFAULTCLASS) 1112 printf(" default"); 1113 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth || 1114 opts->lssc_d != 0)) 1115 print_fairq_sc("linkshare", opts->lssc_m1, opts->lssc_d, 1116 opts->lssc_m2, loc_lssc); 1117 printf(" ) "); 1118 1119 return (1); 1120 } else 1121 return (0); 1122 } 1123 1124 /* 1125 * admission control using generalized service curve 1126 */ 1127 1128 /* add a new service curve to a generalized service curve */ 1129 static void 1130 gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc) 1131 { 1132 if (is_sc_null(sc)) 1133 return; 1134 if (sc->d != 0) 1135 gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1); 1136 gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2); 1137 } 1138 1139 /* 1140 * check whether all points of a generalized service curve have 1141 * their y-coordinates no larger than a given two-piece linear 1142 * service curve. 1143 */ 1144 static int 1145 is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc) 1146 { 1147 struct segment *s, *last, *end; 1148 double y; 1149 1150 if (is_sc_null(sc)) { 1151 if (LIST_EMPTY(gsc)) 1152 return (1); 1153 LIST_FOREACH(s, gsc, _next) { 1154 if (s->m != 0) 1155 return (0); 1156 } 1157 return (1); 1158 } 1159 /* 1160 * gsc has a dummy entry at the end with x = INFINITY. 1161 * loop through up to this dummy entry. 1162 */ 1163 end = gsc_getentry(gsc, INFINITY); 1164 if (end == NULL) 1165 return (1); 1166 last = NULL; 1167 for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) { 1168 if (s->y > sc_x2y(sc, s->x)) 1169 return (0); 1170 last = s; 1171 } 1172 /* last now holds the real last segment */ 1173 if (last == NULL) 1174 return (1); 1175 if (last->m > sc->m2) 1176 return (0); 1177 if (last->x < sc->d && last->m > sc->m1) { 1178 y = last->y + (sc->d - last->x) * last->m; 1179 if (y > sc_x2y(sc, sc->d)) 1180 return (0); 1181 } 1182 return (1); 1183 } 1184 1185 static void 1186 gsc_destroy(struct gen_sc *gsc) 1187 { 1188 struct segment *s; 1189 1190 while ((s = LIST_FIRST(gsc)) != NULL) { 1191 LIST_REMOVE(s, _next); 1192 free(s); 1193 } 1194 } 1195 1196 /* 1197 * return a segment entry starting at x. 1198 * if gsc has no entry starting at x, a new entry is created at x. 1199 */ 1200 static struct segment * 1201 gsc_getentry(struct gen_sc *gsc, double x) 1202 { 1203 struct segment *new, *prev, *s; 1204 1205 prev = NULL; 1206 LIST_FOREACH(s, gsc, _next) { 1207 if (s->x == x) 1208 return (s); /* matching entry found */ 1209 else if (s->x < x) 1210 prev = s; 1211 else 1212 break; 1213 } 1214 1215 /* we have to create a new entry */ 1216 if ((new = calloc(1, sizeof(struct segment))) == NULL) 1217 return (NULL); 1218 1219 new->x = x; 1220 if (x == INFINITY || s == NULL) 1221 new->d = 0; 1222 else if (s->x == INFINITY) 1223 new->d = INFINITY; 1224 else 1225 new->d = s->x - x; 1226 if (prev == NULL) { 1227 /* insert the new entry at the head of the list */ 1228 new->y = 0; 1229 new->m = 0; 1230 LIST_INSERT_HEAD(gsc, new, _next); 1231 } else { 1232 /* 1233 * the start point intersects with the segment pointed by 1234 * prev. divide prev into 2 segments 1235 */ 1236 if (x == INFINITY) { 1237 prev->d = INFINITY; 1238 if (prev->m == 0) 1239 new->y = prev->y; 1240 else 1241 new->y = INFINITY; 1242 } else { 1243 prev->d = x - prev->x; 1244 new->y = prev->d * prev->m + prev->y; 1245 } 1246 new->m = prev->m; 1247 LIST_INSERT_AFTER(prev, new, _next); 1248 } 1249 return (new); 1250 } 1251 1252 /* add a segment to a generalized service curve */ 1253 static int 1254 gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m) 1255 { 1256 struct segment *start, *end, *s; 1257 double x2; 1258 1259 if (d == INFINITY) 1260 x2 = INFINITY; 1261 else 1262 x2 = x + d; 1263 start = gsc_getentry(gsc, x); 1264 end = gsc_getentry(gsc, x2); 1265 if (start == NULL || end == NULL) 1266 return (-1); 1267 1268 for (s = start; s != end; s = LIST_NEXT(s, _next)) { 1269 s->m += m; 1270 s->y += y + (s->x - x) * m; 1271 } 1272 1273 end = gsc_getentry(gsc, INFINITY); 1274 for (; s != end; s = LIST_NEXT(s, _next)) { 1275 s->y += m * d; 1276 } 1277 1278 return (0); 1279 } 1280 1281 /* get y-projection of a service curve */ 1282 static double 1283 sc_x2y(struct service_curve *sc, double x) 1284 { 1285 double y; 1286 1287 if (x <= (double)sc->d) 1288 /* y belongs to the 1st segment */ 1289 y = x * (double)sc->m1; 1290 else 1291 /* y belongs to the 2nd segment */ 1292 y = (double)sc->d * (double)sc->m1 1293 + (x - (double)sc->d) * (double)sc->m2; 1294 return (y); 1295 } 1296 1297 /* 1298 * misc utilities 1299 */ 1300 #define R2S_BUFS 8 1301 #define RATESTR_MAX 16 1302 1303 char * 1304 rate2str(double rate) 1305 { 1306 char *buf; 1307 static char r2sbuf[R2S_BUFS][RATESTR_MAX]; /* ring bufer */ 1308 static int idx = 0; 1309 int i; 1310 static const char unit[] = " KMG"; 1311 1312 buf = r2sbuf[idx++]; 1313 if (idx == R2S_BUFS) 1314 idx = 0; 1315 1316 for (i = 0; rate >= 1000 && i <= 3; i++) 1317 rate /= 1000; 1318 1319 if ((int)(rate * 100) % 100) 1320 snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]); 1321 else 1322 snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]); 1323 1324 return (buf); 1325 } 1326 1327 #ifdef __FreeBSD__ 1328 /* 1329 * XXX 1330 * FreeBSD does not have SIOCGIFDATA. 1331 * To emulate this, DIOCGIFSPEED ioctl added to pf. 1332 */ 1333 u_int64_t 1334 getifspeed(int pfdev, char *ifname) 1335 { 1336 struct pf_ifspeed io; 1337 1338 bzero(&io, sizeof io); 1339 if (strlcpy(io.ifname, ifname, IFNAMSIZ) >= 1340 sizeof(io.ifname)) 1341 errx(1, "getifspeed: strlcpy"); 1342 if (ioctl(pfdev, DIOCGIFSPEED, &io) == -1) 1343 err(1, "DIOCGIFSPEED"); 1344 return (io.baudrate); 1345 } 1346 #else 1347 u_int32_t 1348 getifspeed(char *ifname) 1349 { 1350 int s; 1351 struct ifreq ifr; 1352 struct if_data ifrdat; 1353 1354 if ((s = socket(get_socket_domain(), SOCK_DGRAM, 0)) < 0) 1355 err(1, "socket"); 1356 bzero(&ifr, sizeof(ifr)); 1357 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >= 1358 sizeof(ifr.ifr_name)) 1359 errx(1, "getifspeed: strlcpy"); 1360 ifr.ifr_data = (caddr_t)&ifrdat; 1361 if (ioctl(s, SIOCGIFDATA, (caddr_t)&ifr) == -1) 1362 err(1, "SIOCGIFDATA"); 1363 if (close(s)) 1364 err(1, "close"); 1365 return ((u_int32_t)ifrdat.ifi_baudrate); 1366 } 1367 #endif 1368 1369 u_long 1370 getifmtu(char *ifname) 1371 { 1372 int s; 1373 struct ifreq ifr; 1374 1375 if ((s = socket(get_socket_domain(), SOCK_DGRAM, 0)) < 0) 1376 err(1, "socket"); 1377 bzero(&ifr, sizeof(ifr)); 1378 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >= 1379 sizeof(ifr.ifr_name)) 1380 errx(1, "getifmtu: strlcpy"); 1381 if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1) 1382 #ifdef __FreeBSD__ 1383 ifr.ifr_mtu = 1500; 1384 #else 1385 err(1, "SIOCGIFMTU"); 1386 #endif 1387 if (close(s)) 1388 err(1, "close"); 1389 if (ifr.ifr_mtu > 0) 1390 return (ifr.ifr_mtu); 1391 else { 1392 warnx("could not get mtu for %s, assuming 1500", ifname); 1393 return (1500); 1394 } 1395 } 1396 1397 int 1398 eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts, 1399 u_int64_t ref_bw) 1400 { 1401 int errors = 0; 1402 1403 switch (pa->scheduler) { 1404 case ALTQT_CBQ: 1405 pa->pq_u.cbq_opts = opts->data.cbq_opts; 1406 break; 1407 case ALTQT_PRIQ: 1408 pa->pq_u.priq_opts = opts->data.priq_opts; 1409 break; 1410 case ALTQT_HFSC: 1411 pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags; 1412 if (opts->data.hfsc_opts.linkshare.used) { 1413 pa->pq_u.hfsc_opts.lssc_m1 = 1414 eval_bwspec(&opts->data.hfsc_opts.linkshare.m1, 1415 ref_bw); 1416 pa->pq_u.hfsc_opts.lssc_m2 = 1417 eval_bwspec(&opts->data.hfsc_opts.linkshare.m2, 1418 ref_bw); 1419 pa->pq_u.hfsc_opts.lssc_d = 1420 opts->data.hfsc_opts.linkshare.d; 1421 } 1422 if (opts->data.hfsc_opts.realtime.used) { 1423 pa->pq_u.hfsc_opts.rtsc_m1 = 1424 eval_bwspec(&opts->data.hfsc_opts.realtime.m1, 1425 ref_bw); 1426 pa->pq_u.hfsc_opts.rtsc_m2 = 1427 eval_bwspec(&opts->data.hfsc_opts.realtime.m2, 1428 ref_bw); 1429 pa->pq_u.hfsc_opts.rtsc_d = 1430 opts->data.hfsc_opts.realtime.d; 1431 } 1432 if (opts->data.hfsc_opts.upperlimit.used) { 1433 pa->pq_u.hfsc_opts.ulsc_m1 = 1434 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1, 1435 ref_bw); 1436 pa->pq_u.hfsc_opts.ulsc_m2 = 1437 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2, 1438 ref_bw); 1439 pa->pq_u.hfsc_opts.ulsc_d = 1440 opts->data.hfsc_opts.upperlimit.d; 1441 } 1442 break; 1443 case ALTQT_FAIRQ: 1444 pa->pq_u.fairq_opts.flags = opts->data.fairq_opts.flags; 1445 pa->pq_u.fairq_opts.nbuckets = opts->data.fairq_opts.nbuckets; 1446 pa->pq_u.fairq_opts.hogs_m1 = 1447 eval_bwspec(&opts->data.fairq_opts.hogs_bw, ref_bw); 1448 1449 if (opts->data.fairq_opts.linkshare.used) { 1450 pa->pq_u.fairq_opts.lssc_m1 = 1451 eval_bwspec(&opts->data.fairq_opts.linkshare.m1, 1452 ref_bw); 1453 pa->pq_u.fairq_opts.lssc_m2 = 1454 eval_bwspec(&opts->data.fairq_opts.linkshare.m2, 1455 ref_bw); 1456 pa->pq_u.fairq_opts.lssc_d = 1457 opts->data.fairq_opts.linkshare.d; 1458 } 1459 break; 1460 case ALTQT_CODEL: 1461 pa->pq_u.codel_opts.target = opts->data.codel_opts.target; 1462 pa->pq_u.codel_opts.interval = opts->data.codel_opts.interval; 1463 pa->pq_u.codel_opts.ecn = opts->data.codel_opts.ecn; 1464 break; 1465 default: 1466 warnx("eval_queue_opts: unknown scheduler type %u", 1467 opts->qtype); 1468 errors++; 1469 break; 1470 } 1471 1472 return (errors); 1473 } 1474 1475 /* 1476 * If absolute bandwidth if set, return the lesser of that value and the 1477 * reference bandwidth. Limiting to the reference bandwidth allows simple 1478 * limiting of configured bandwidth parameters for schedulers that are 1479 * 32-bit limited, as the root/interface bandwidth (top-level reference 1480 * bandwidth) will be properly limited in that case. 1481 * 1482 * Otherwise, if the absolute bandwidth is not set, return given percentage 1483 * of reference bandwidth. 1484 */ 1485 u_int64_t 1486 eval_bwspec(struct node_queue_bw *bw, u_int64_t ref_bw) 1487 { 1488 if (bw->bw_absolute > 0) 1489 return (MIN(bw->bw_absolute, ref_bw)); 1490 1491 if (bw->bw_percent > 0) 1492 return (ref_bw / 100 * bw->bw_percent); 1493 1494 return (0); 1495 } 1496 1497 void 1498 print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2, 1499 const struct node_hfsc_sc *sc) 1500 { 1501 printf(" %s", scname); 1502 1503 if (d != 0) { 1504 printf("("); 1505 if (sc != NULL && sc->m1.bw_percent > 0) 1506 printf("%u%%", sc->m1.bw_percent); 1507 else 1508 printf("%s", rate2str((double)m1)); 1509 printf(" %u", d); 1510 } 1511 1512 if (sc != NULL && sc->m2.bw_percent > 0) 1513 printf(" %u%%", sc->m2.bw_percent); 1514 else 1515 printf(" %s", rate2str((double)m2)); 1516 1517 if (d != 0) 1518 printf(")"); 1519 } 1520 1521 void 1522 print_fairq_sc(const char *scname, u_int m1, u_int d, u_int m2, 1523 const struct node_fairq_sc *sc) 1524 { 1525 printf(" %s", scname); 1526 1527 if (d != 0) { 1528 printf("("); 1529 if (sc != NULL && sc->m1.bw_percent > 0) 1530 printf("%u%%", sc->m1.bw_percent); 1531 else 1532 printf("%s", rate2str((double)m1)); 1533 printf(" %u", d); 1534 } 1535 1536 if (sc != NULL && sc->m2.bw_percent > 0) 1537 printf(" %u%%", sc->m2.bw_percent); 1538 else 1539 printf(" %s", rate2str((double)m2)); 1540 1541 if (d != 0) 1542 printf(")"); 1543 } 1544