xref: /freebsd/sbin/pfctl/pfctl_altq.c (revision 28f4385e45a2681c14bd04b83fe1796eaefe8265)
1 /*	$OpenBSD: pfctl_altq.c,v 1.93 2007/10/15 02:16:35 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2002
5  *	Sony Computer Science Laboratories Inc.
6  * Copyright (c) 2002, 2003 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/cdefs.h>
22 __FBSDID("$FreeBSD$");
23 
24 #define PFIOC_USE_LATEST
25 
26 #include <sys/types.h>
27 #include <sys/bitset.h>
28 #include <sys/ioctl.h>
29 #include <sys/socket.h>
30 
31 #include <net/if.h>
32 #include <netinet/in.h>
33 #include <net/pfvar.h>
34 
35 #include <err.h>
36 #include <errno.h>
37 #include <inttypes.h>
38 #include <limits.h>
39 #include <math.h>
40 #include <search.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 #include <string.h>
44 #include <unistd.h>
45 
46 #include <net/altq/altq.h>
47 #include <net/altq/altq_cbq.h>
48 #include <net/altq/altq_codel.h>
49 #include <net/altq/altq_priq.h>
50 #include <net/altq/altq_hfsc.h>
51 #include <net/altq/altq_fairq.h>
52 
53 #include "pfctl_parser.h"
54 #include "pfctl.h"
55 
56 #define is_sc_null(sc)	(((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0))
57 
58 static STAILQ_HEAD(interfaces, pfctl_altq) interfaces = STAILQ_HEAD_INITIALIZER(interfaces);
59 static struct hsearch_data queue_map;
60 static struct hsearch_data if_map;
61 static struct hsearch_data qid_map;
62 
63 static struct pfctl_altq *pfaltq_lookup(char *ifname);
64 static struct pfctl_altq *qname_to_pfaltq(const char *, const char *);
65 static u_int32_t	 qname_to_qid(char *);
66 
67 static int	eval_pfqueue_cbq(struct pfctl *, struct pf_altq *,
68 		    struct pfctl_altq *);
69 static int	cbq_compute_idletime(struct pfctl *, struct pf_altq *);
70 static int	check_commit_cbq(int, int, struct pfctl_altq *);
71 static int	print_cbq_opts(const struct pf_altq *);
72 
73 static int	print_codel_opts(const struct pf_altq *,
74 		    const struct node_queue_opt *);
75 
76 static int	eval_pfqueue_priq(struct pfctl *, struct pf_altq *,
77 		    struct pfctl_altq *);
78 static int	check_commit_priq(int, int, struct pfctl_altq *);
79 static int	print_priq_opts(const struct pf_altq *);
80 
81 static int	eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *,
82 		    struct pfctl_altq *, struct pfctl_altq *);
83 static int	check_commit_hfsc(int, int, struct pfctl_altq *);
84 static int	print_hfsc_opts(const struct pf_altq *,
85 		    const struct node_queue_opt *);
86 
87 static int	eval_pfqueue_fairq(struct pfctl *, struct pf_altq *,
88 		    struct pfctl_altq *, struct pfctl_altq *);
89 static int	print_fairq_opts(const struct pf_altq *,
90 		    const struct node_queue_opt *);
91 static int	check_commit_fairq(int, int, struct pfctl_altq *);
92 
93 static void		 gsc_add_sc(struct gen_sc *, struct service_curve *);
94 static int		 is_gsc_under_sc(struct gen_sc *,
95 			     struct service_curve *);
96 static struct segment	*gsc_getentry(struct gen_sc *, double);
97 static int		 gsc_add_seg(struct gen_sc *, double, double, double,
98 			     double);
99 static double		 sc_x2y(struct service_curve *, double);
100 
101 #ifdef __FreeBSD__
102 u_int64_t	getifspeed(int, char *);
103 #else
104 u_int32_t	 getifspeed(char *);
105 #endif
106 u_long		 getifmtu(char *);
107 int		 eval_queue_opts(struct pf_altq *, struct node_queue_opt *,
108 		     u_int64_t);
109 u_int64_t	 eval_bwspec(struct node_queue_bw *, u_int64_t);
110 void		 print_hfsc_sc(const char *, u_int, u_int, u_int,
111 		     const struct node_hfsc_sc *);
112 void		 print_fairq_sc(const char *, u_int, u_int, u_int,
113 		     const struct node_fairq_sc *);
114 
115 static __attribute__((constructor)) void
116 pfctl_altq_init(void)
117 {
118 	/*
119 	 * As hdestroy() will never be called on these tables, it will be
120 	 * safe to use references into the stored data as keys.
121 	 */
122 	if (hcreate_r(0, &queue_map) == 0)
123 		err(1, "Failed to create altq queue map");
124 	if (hcreate_r(0, &if_map) == 0)
125 		err(1, "Failed to create altq interface map");
126 	if (hcreate_r(0, &qid_map) == 0)
127 		err(1, "Failed to create altq queue id map");
128 }
129 
130 void
131 pfaltq_store(struct pf_altq *a)
132 {
133 	struct pfctl_altq	*altq;
134 	ENTRY 			 item;
135 	ENTRY			*ret_item;
136 	size_t			 key_size;
137 
138 	if ((altq = malloc(sizeof(*altq))) == NULL)
139 		err(1, "queue malloc");
140 	memcpy(&altq->pa, a, sizeof(struct pf_altq));
141 	memset(&altq->meta, 0, sizeof(altq->meta));
142 
143 	if (a->qname[0] == 0) {
144 		item.key = altq->pa.ifname;
145 		item.data = altq;
146 		if (hsearch_r(item, ENTER, &ret_item, &if_map) == 0)
147 			err(1, "interface map insert");
148 		STAILQ_INSERT_TAIL(&interfaces, altq, meta.link);
149 	} else {
150 		key_size = sizeof(a->ifname) + sizeof(a->qname);
151 		if ((item.key = malloc(key_size)) == NULL)
152 			err(1, "queue map key malloc");
153 		snprintf(item.key, key_size, "%s:%s", a->ifname, a->qname);
154 		item.data = altq;
155 		if (hsearch_r(item, ENTER, &ret_item, &queue_map) == 0)
156 			err(1, "queue map insert");
157 
158 		item.key = altq->pa.qname;
159 		item.data = &altq->pa.qid;
160 		if (hsearch_r(item, ENTER, &ret_item, &qid_map) == 0)
161 			err(1, "qid map insert");
162 	}
163 }
164 
165 static struct pfctl_altq *
166 pfaltq_lookup(char *ifname)
167 {
168 	ENTRY	 item;
169 	ENTRY	*ret_item;
170 
171 	item.key = ifname;
172 	if (hsearch_r(item, FIND, &ret_item, &if_map) == 0)
173 		return (NULL);
174 
175 	return (ret_item->data);
176 }
177 
178 static struct pfctl_altq *
179 qname_to_pfaltq(const char *qname, const char *ifname)
180 {
181 	ENTRY	 item;
182 	ENTRY	*ret_item;
183 	char	 key[IFNAMSIZ + PF_QNAME_SIZE];
184 
185 	item.key = key;
186 	snprintf(item.key, sizeof(key), "%s:%s", ifname, qname);
187 	if (hsearch_r(item, FIND, &ret_item, &queue_map) == 0)
188 		return (NULL);
189 
190 	return (ret_item->data);
191 }
192 
193 static u_int32_t
194 qname_to_qid(char *qname)
195 {
196 	ENTRY	 item;
197 	ENTRY	*ret_item;
198 	uint32_t qid;
199 
200 	/*
201 	 * We guarantee that same named queues on different interfaces
202 	 * have the same qid.
203 	 */
204 	item.key = qname;
205 	if (hsearch_r(item, FIND, &ret_item, &qid_map) == 0)
206 		return (0);
207 
208 	qid = *(uint32_t *)ret_item->data;
209 	return (qid);
210 }
211 
212 void
213 print_altq(const struct pf_altq *a, unsigned int level,
214     struct node_queue_bw *bw, struct node_queue_opt *qopts)
215 {
216 	if (a->qname[0] != 0) {
217 		print_queue(a, level, bw, 1, qopts);
218 		return;
219 	}
220 
221 #ifdef __FreeBSD__
222 	if (a->local_flags & PFALTQ_FLAG_IF_REMOVED)
223 		printf("INACTIVE ");
224 #endif
225 
226 	printf("altq on %s ", a->ifname);
227 
228 	switch (a->scheduler) {
229 	case ALTQT_CBQ:
230 		if (!print_cbq_opts(a))
231 			printf("cbq ");
232 		break;
233 	case ALTQT_PRIQ:
234 		if (!print_priq_opts(a))
235 			printf("priq ");
236 		break;
237 	case ALTQT_HFSC:
238 		if (!print_hfsc_opts(a, qopts))
239 			printf("hfsc ");
240 		break;
241 	case ALTQT_FAIRQ:
242 		if (!print_fairq_opts(a, qopts))
243 			printf("fairq ");
244 		break;
245 	case ALTQT_CODEL:
246 		if (!print_codel_opts(a, qopts))
247 			printf("codel ");
248 		break;
249 	}
250 
251 	if (bw != NULL && bw->bw_percent > 0) {
252 		if (bw->bw_percent < 100)
253 			printf("bandwidth %u%% ", bw->bw_percent);
254 	} else
255 		printf("bandwidth %s ", rate2str((double)a->ifbandwidth));
256 
257 	if (a->qlimit != DEFAULT_QLIMIT)
258 		printf("qlimit %u ", a->qlimit);
259 	printf("tbrsize %u ", a->tbrsize);
260 }
261 
262 void
263 print_queue(const struct pf_altq *a, unsigned int level,
264     struct node_queue_bw *bw, int print_interface,
265     struct node_queue_opt *qopts)
266 {
267 	unsigned int	i;
268 
269 #ifdef __FreeBSD__
270 	if (a->local_flags & PFALTQ_FLAG_IF_REMOVED)
271 		printf("INACTIVE ");
272 #endif
273 	printf("queue ");
274 	for (i = 0; i < level; ++i)
275 		printf(" ");
276 	printf("%s ", a->qname);
277 	if (print_interface)
278 		printf("on %s ", a->ifname);
279 	if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC ||
280 		a->scheduler == ALTQT_FAIRQ) {
281 		if (bw != NULL && bw->bw_percent > 0) {
282 			if (bw->bw_percent < 100)
283 				printf("bandwidth %u%% ", bw->bw_percent);
284 		} else
285 			printf("bandwidth %s ", rate2str((double)a->bandwidth));
286 	}
287 	if (a->priority != DEFAULT_PRIORITY)
288 		printf("priority %u ", a->priority);
289 	if (a->qlimit != DEFAULT_QLIMIT)
290 		printf("qlimit %u ", a->qlimit);
291 	switch (a->scheduler) {
292 	case ALTQT_CBQ:
293 		print_cbq_opts(a);
294 		break;
295 	case ALTQT_PRIQ:
296 		print_priq_opts(a);
297 		break;
298 	case ALTQT_HFSC:
299 		print_hfsc_opts(a, qopts);
300 		break;
301 	case ALTQT_FAIRQ:
302 		print_fairq_opts(a, qopts);
303 		break;
304 	}
305 }
306 
307 /*
308  * eval_pfaltq computes the discipline parameters.
309  */
310 int
311 eval_pfaltq(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
312     struct node_queue_opt *opts)
313 {
314 	u_int64_t	rate;
315 	u_int		size, errors = 0;
316 
317 	if (bw->bw_absolute > 0)
318 		pa->ifbandwidth = bw->bw_absolute;
319 	else
320 #ifdef __FreeBSD__
321 		if ((rate = getifspeed(pf->dev, pa->ifname)) == 0) {
322 #else
323 		if ((rate = getifspeed(pa->ifname)) == 0) {
324 #endif
325 			fprintf(stderr, "interface %s does not know its bandwidth, "
326 			    "please specify an absolute bandwidth\n",
327 			    pa->ifname);
328 			errors++;
329 		} else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0)
330 			pa->ifbandwidth = rate;
331 
332 	/*
333 	 * Limit bandwidth to UINT_MAX for schedulers that aren't 64-bit ready.
334 	 */
335 	if ((pa->scheduler != ALTQT_HFSC) && (pa->ifbandwidth > UINT_MAX)) {
336 		pa->ifbandwidth = UINT_MAX;
337 		warnx("interface %s bandwidth limited to %" PRIu64 " bps "
338 		    "because selected scheduler is 32-bit limited\n", pa->ifname,
339 		    pa->ifbandwidth);
340 	}
341 	errors += eval_queue_opts(pa, opts, pa->ifbandwidth);
342 
343 	/* if tbrsize is not specified, use heuristics */
344 	if (pa->tbrsize == 0) {
345 		rate = pa->ifbandwidth;
346 		if (rate <= 1 * 1000 * 1000)
347 			size = 1;
348 		else if (rate <= 10 * 1000 * 1000)
349 			size = 4;
350 		else if (rate <= 200 * 1000 * 1000)
351 			size = 8;
352 		else if (rate <= 2500 * 1000 * 1000ULL)
353 			size = 24;
354 		else
355 			size = 128;
356 		size = size * getifmtu(pa->ifname);
357 		pa->tbrsize = size;
358 	}
359 	return (errors);
360 }
361 
362 /*
363  * check_commit_altq does consistency check for each interface
364  */
365 int
366 check_commit_altq(int dev, int opts)
367 {
368 	struct pfctl_altq	*if_ppa;
369 	int			 error = 0;
370 
371 	/* call the discipline check for each interface. */
372 	STAILQ_FOREACH(if_ppa, &interfaces, meta.link) {
373 		switch (if_ppa->pa.scheduler) {
374 		case ALTQT_CBQ:
375 			error = check_commit_cbq(dev, opts, if_ppa);
376 			break;
377 		case ALTQT_PRIQ:
378 			error = check_commit_priq(dev, opts, if_ppa);
379 			break;
380 		case ALTQT_HFSC:
381 			error = check_commit_hfsc(dev, opts, if_ppa);
382 			break;
383 		case ALTQT_FAIRQ:
384 			error = check_commit_fairq(dev, opts, if_ppa);
385 			break;
386 		default:
387 			break;
388 		}
389 	}
390 	return (error);
391 }
392 
393 /*
394  * eval_pfqueue computes the queue parameters.
395  */
396 int
397 eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
398     struct node_queue_opt *opts)
399 {
400 	/* should be merged with expand_queue */
401 	struct pfctl_altq	*if_ppa, *parent;
402 	int		 	 error = 0;
403 
404 	/* find the corresponding interface and copy fields used by queues */
405 	if ((if_ppa = pfaltq_lookup(pa->ifname)) == NULL) {
406 		fprintf(stderr, "altq not defined on %s\n", pa->ifname);
407 		return (1);
408 	}
409 	pa->scheduler = if_ppa->pa.scheduler;
410 	pa->ifbandwidth = if_ppa->pa.ifbandwidth;
411 
412 	if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) {
413 		fprintf(stderr, "queue %s already exists on interface %s\n",
414 		    pa->qname, pa->ifname);
415 		return (1);
416 	}
417 	pa->qid = qname_to_qid(pa->qname);
418 
419 	parent = NULL;
420 	if (pa->parent[0] != 0) {
421 		parent = qname_to_pfaltq(pa->parent, pa->ifname);
422 		if (parent == NULL) {
423 			fprintf(stderr, "parent %s not found for %s\n",
424 			    pa->parent, pa->qname);
425 			return (1);
426 		}
427 		pa->parent_qid = parent->pa.qid;
428 	}
429 	if (pa->qlimit == 0)
430 		pa->qlimit = DEFAULT_QLIMIT;
431 
432 	if (eval_queue_opts(pa, opts,
433 		parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth))
434 		return (1);
435 
436 	if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC ||
437 		pa->scheduler == ALTQT_FAIRQ) {
438 		pa->bandwidth = eval_bwspec(bw,
439 		    parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth);
440 
441 		/*
442 		 * For HFSC, if the linkshare service curve m2 parameter is
443 		 * set, it overrides the provided queue bandwidth parameter,
444 		 * so adjust the queue bandwidth parameter accordingly here
445 		 * to avoid false positives in the total child bandwidth
446 		 * check below.
447 		 */
448 		if ((pa->scheduler == ALTQT_HFSC) &&
449 		    (pa->pq_u.hfsc_opts.lssc_m2 != 0)) {
450 			pa->bandwidth = pa->pq_u.hfsc_opts.lssc_m2;
451 		}
452 
453 		if (pa->bandwidth > pa->ifbandwidth) {
454 			fprintf(stderr, "bandwidth for %s higher than "
455 			    "interface\n", pa->qname);
456 			return (1);
457 		}
458 		/* check the sum of the child bandwidth is under parent's */
459 		if (parent != NULL) {
460 			if (pa->bandwidth > parent->pa.bandwidth) {
461 				warnx("bandwidth for %s higher than parent",
462 				    pa->qname);
463 				return (1);
464 			}
465 			parent->meta.bwsum += pa->bandwidth;
466 			if (parent->meta.bwsum > parent->pa.bandwidth) {
467 				warnx("the sum of the child bandwidth (%" PRIu64
468 				    ") higher than parent \"%s\" (%" PRIu64 ")",
469 				    parent->meta.bwsum, parent->pa.qname,
470 				    parent->pa.bandwidth);
471 			}
472 		}
473 	}
474 
475 	if (parent != NULL)
476 		parent->meta.children++;
477 
478 	switch (pa->scheduler) {
479 	case ALTQT_CBQ:
480 		error = eval_pfqueue_cbq(pf, pa, if_ppa);
481 		break;
482 	case ALTQT_PRIQ:
483 		error = eval_pfqueue_priq(pf, pa, if_ppa);
484 		break;
485 	case ALTQT_HFSC:
486 		error = eval_pfqueue_hfsc(pf, pa, if_ppa, parent);
487 		break;
488 	case ALTQT_FAIRQ:
489 		error = eval_pfqueue_fairq(pf, pa, if_ppa, parent);
490 		break;
491 	default:
492 		break;
493 	}
494 	return (error);
495 }
496 
497 /*
498  * CBQ support functions
499  */
500 #define	RM_FILTER_GAIN	5	/* log2 of gain, e.g., 5 => 31/32 */
501 #define	RM_NS_PER_SEC	(1000000000)
502 
503 static int
504 eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa)
505 {
506 	struct cbq_opts	*opts;
507 	u_int		 ifmtu;
508 
509 	if (pa->priority >= CBQ_MAXPRI) {
510 		warnx("priority out of range: max %d", CBQ_MAXPRI - 1);
511 		return (-1);
512 	}
513 
514 	ifmtu = getifmtu(pa->ifname);
515 	opts = &pa->pq_u.cbq_opts;
516 
517 	if (opts->pktsize == 0) {	/* use default */
518 		opts->pktsize = ifmtu;
519 		if (opts->pktsize > MCLBYTES)	/* do what TCP does */
520 			opts->pktsize &= ~MCLBYTES;
521 	} else if (opts->pktsize > ifmtu)
522 		opts->pktsize = ifmtu;
523 	if (opts->maxpktsize == 0)	/* use default */
524 		opts->maxpktsize = ifmtu;
525 	else if (opts->maxpktsize > ifmtu)
526 		opts->pktsize = ifmtu;
527 
528 	if (opts->pktsize > opts->maxpktsize)
529 		opts->pktsize = opts->maxpktsize;
530 
531 	if (pa->parent[0] == 0)
532 		opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR);
533 
534 	if (pa->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS)
535 		if_ppa->meta.root_classes++;
536 	if (pa->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS)
537 		if_ppa->meta.default_classes++;
538 
539 	cbq_compute_idletime(pf, pa);
540 	return (0);
541 }
542 
543 /*
544  * compute ns_per_byte, maxidle, minidle, and offtime
545  */
546 static int
547 cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa)
548 {
549 	struct cbq_opts	*opts;
550 	double		 maxidle_s, maxidle, minidle;
551 	double		 offtime, nsPerByte, ifnsPerByte, ptime, cptime;
552 	double		 z, g, f, gton, gtom;
553 	u_int		 minburst, maxburst;
554 
555 	opts = &pa->pq_u.cbq_opts;
556 	ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8;
557 	minburst = opts->minburst;
558 	maxburst = opts->maxburst;
559 
560 	if (pa->bandwidth == 0)
561 		f = 0.0001;	/* small enough? */
562 	else
563 		f = ((double) pa->bandwidth / (double) pa->ifbandwidth);
564 
565 	nsPerByte = ifnsPerByte / f;
566 	ptime = (double)opts->pktsize * ifnsPerByte;
567 	cptime = ptime * (1.0 - f) / f;
568 
569 	if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) {
570 		/*
571 		 * this causes integer overflow in kernel!
572 		 * (bandwidth < 6Kbps when max_pkt_size=1500)
573 		 */
574 		if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0) {
575 			warnx("queue bandwidth must be larger than %s",
576 			    rate2str(ifnsPerByte * (double)opts->maxpktsize /
577 			    (double)INT_MAX * (double)pa->ifbandwidth));
578 			fprintf(stderr, "cbq: queue %s is too slow!\n",
579 			    pa->qname);
580 		}
581 		nsPerByte = (double)(INT_MAX / opts->maxpktsize);
582 	}
583 
584 	if (maxburst == 0) {  /* use default */
585 		if (cptime > 10.0 * 1000000)
586 			maxburst = 4;
587 		else
588 			maxburst = 16;
589 	}
590 	if (minburst == 0)  /* use default */
591 		minburst = 2;
592 	if (minburst > maxburst)
593 		minburst = maxburst;
594 
595 	z = (double)(1 << RM_FILTER_GAIN);
596 	g = (1.0 - 1.0 / z);
597 	gton = pow(g, (double)maxburst);
598 	gtom = pow(g, (double)(minburst-1));
599 	maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton));
600 	maxidle_s = (1.0 - g);
601 	if (maxidle > maxidle_s)
602 		maxidle = ptime * maxidle;
603 	else
604 		maxidle = ptime * maxidle_s;
605 	offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom);
606 	minidle = -((double)opts->maxpktsize * (double)nsPerByte);
607 
608 	/* scale parameters */
609 	maxidle = ((maxidle * 8.0) / nsPerByte) *
610 	    pow(2.0, (double)RM_FILTER_GAIN);
611 	offtime = (offtime * 8.0) / nsPerByte *
612 	    pow(2.0, (double)RM_FILTER_GAIN);
613 	minidle = ((minidle * 8.0) / nsPerByte) *
614 	    pow(2.0, (double)RM_FILTER_GAIN);
615 
616 	maxidle = maxidle / 1000.0;
617 	offtime = offtime / 1000.0;
618 	minidle = minidle / 1000.0;
619 
620 	opts->minburst = minburst;
621 	opts->maxburst = maxburst;
622 	opts->ns_per_byte = (u_int)nsPerByte;
623 	opts->maxidle = (u_int)fabs(maxidle);
624 	opts->minidle = (int)minidle;
625 	opts->offtime = (u_int)fabs(offtime);
626 
627 	return (0);
628 }
629 
630 static int
631 check_commit_cbq(int dev, int opts, struct pfctl_altq *if_ppa)
632 {
633 	int	error = 0;
634 
635 	/*
636 	 * check if cbq has one root queue and one default queue
637 	 * for this interface
638 	 */
639 	if (if_ppa->meta.root_classes != 1) {
640 		warnx("should have one root queue on %s", if_ppa->pa.ifname);
641 		error++;
642 	}
643 	if (if_ppa->meta.default_classes != 1) {
644 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
645 		error++;
646 	}
647 	return (error);
648 }
649 
650 static int
651 print_cbq_opts(const struct pf_altq *a)
652 {
653 	const struct cbq_opts	*opts;
654 
655 	opts = &a->pq_u.cbq_opts;
656 	if (opts->flags) {
657 		printf("cbq(");
658 		if (opts->flags & CBQCLF_RED)
659 			printf(" red");
660 		if (opts->flags & CBQCLF_ECN)
661 			printf(" ecn");
662 		if (opts->flags & CBQCLF_RIO)
663 			printf(" rio");
664 		if (opts->flags & CBQCLF_CODEL)
665 			printf(" codel");
666 		if (opts->flags & CBQCLF_CLEARDSCP)
667 			printf(" cleardscp");
668 		if (opts->flags & CBQCLF_FLOWVALVE)
669 			printf(" flowvalve");
670 		if (opts->flags & CBQCLF_BORROW)
671 			printf(" borrow");
672 		if (opts->flags & CBQCLF_WRR)
673 			printf(" wrr");
674 		if (opts->flags & CBQCLF_EFFICIENT)
675 			printf(" efficient");
676 		if (opts->flags & CBQCLF_ROOTCLASS)
677 			printf(" root");
678 		if (opts->flags & CBQCLF_DEFCLASS)
679 			printf(" default");
680 		printf(" ) ");
681 
682 		return (1);
683 	} else
684 		return (0);
685 }
686 
687 /*
688  * PRIQ support functions
689  */
690 static int
691 eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa)
692 {
693 
694 	if (pa->priority >= PRIQ_MAXPRI) {
695 		warnx("priority out of range: max %d", PRIQ_MAXPRI - 1);
696 		return (-1);
697 	}
698 	if (BIT_ISSET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris)) {
699 		warnx("%s does not have a unique priority on interface %s",
700 		    pa->qname, pa->ifname);
701 		return (-1);
702 	} else
703 		BIT_SET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris);
704 
705 	if (pa->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS)
706 		if_ppa->meta.default_classes++;
707 	return (0);
708 }
709 
710 static int
711 check_commit_priq(int dev, int opts, struct pfctl_altq *if_ppa)
712 {
713 
714 	/*
715 	 * check if priq has one default class for this interface
716 	 */
717 	if (if_ppa->meta.default_classes != 1) {
718 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
719 		return (1);
720 	}
721 	return (0);
722 }
723 
724 static int
725 print_priq_opts(const struct pf_altq *a)
726 {
727 	const struct priq_opts	*opts;
728 
729 	opts = &a->pq_u.priq_opts;
730 
731 	if (opts->flags) {
732 		printf("priq(");
733 		if (opts->flags & PRCF_RED)
734 			printf(" red");
735 		if (opts->flags & PRCF_ECN)
736 			printf(" ecn");
737 		if (opts->flags & PRCF_RIO)
738 			printf(" rio");
739 		if (opts->flags & PRCF_CODEL)
740 			printf(" codel");
741 		if (opts->flags & PRCF_CLEARDSCP)
742 			printf(" cleardscp");
743 		if (opts->flags & PRCF_DEFAULTCLASS)
744 			printf(" default");
745 		printf(" ) ");
746 
747 		return (1);
748 	} else
749 		return (0);
750 }
751 
752 /*
753  * HFSC support functions
754  */
755 static int
756 eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa,
757     struct pfctl_altq *parent)
758 {
759 	struct hfsc_opts_v1	*opts;
760 	struct service_curve	 sc;
761 
762 	opts = &pa->pq_u.hfsc_opts;
763 
764 	if (parent == NULL) {
765 		/* root queue */
766 		opts->lssc_m1 = pa->ifbandwidth;
767 		opts->lssc_m2 = pa->ifbandwidth;
768 		opts->lssc_d = 0;
769 		return (0);
770 	}
771 
772 	/* First child initializes the parent's service curve accumulators. */
773 	if (parent->meta.children == 1) {
774 		LIST_INIT(&parent->meta.rtsc);
775 		LIST_INIT(&parent->meta.lssc);
776 	}
777 
778 	if (parent->pa.pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) {
779 		warnx("adding %s would make default queue %s not a leaf",
780 		    pa->qname, pa->parent);
781 		return (-1);
782 	}
783 
784 	if (pa->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS)
785 		if_ppa->meta.default_classes++;
786 
787 	/* if link_share is not specified, use bandwidth */
788 	if (opts->lssc_m2 == 0)
789 		opts->lssc_m2 = pa->bandwidth;
790 
791 	if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) ||
792 	    (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) ||
793 	    (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) {
794 		warnx("m2 is zero for %s", pa->qname);
795 		return (-1);
796 	}
797 
798 	if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
799 	    (opts->lssc_m1 < opts->lssc_m2 && opts->lssc_m1 != 0) ||
800 	    (opts->ulsc_m1 < opts->ulsc_m2 && opts->ulsc_m1 != 0)) {
801 		warnx("m1 must be zero for convex curve: %s", pa->qname);
802 		return (-1);
803 	}
804 
805 	/*
806 	 * admission control:
807 	 * for the real-time service curve, the sum of the service curves
808 	 * should not exceed 80% of the interface bandwidth.  20% is reserved
809 	 * not to over-commit the actual interface bandwidth.
810 	 * for the linkshare service curve, the sum of the child service
811 	 * curve should not exceed the parent service curve.
812 	 * for the upper-limit service curve, the assigned bandwidth should
813 	 * be smaller than the interface bandwidth, and the upper-limit should
814 	 * be larger than the real-time service curve when both are defined.
815 	 */
816 
817 	/* check the real-time service curve.  reserve 20% of interface bw */
818 	if (opts->rtsc_m2 != 0) {
819 		/* add this queue to the sum */
820 		sc.m1 = opts->rtsc_m1;
821 		sc.d = opts->rtsc_d;
822 		sc.m2 = opts->rtsc_m2;
823 		gsc_add_sc(&parent->meta.rtsc, &sc);
824 		/* compare the sum with 80% of the interface */
825 		sc.m1 = 0;
826 		sc.d = 0;
827 		sc.m2 = pa->ifbandwidth / 100 * 80;
828 		if (!is_gsc_under_sc(&parent->meta.rtsc, &sc)) {
829 			warnx("real-time sc exceeds 80%% of the interface "
830 			    "bandwidth (%s)", rate2str((double)sc.m2));
831 			return (-1);
832 		}
833 	}
834 
835 	/* check the linkshare service curve. */
836 	if (opts->lssc_m2 != 0) {
837 		/* add this queue to the child sum */
838 		sc.m1 = opts->lssc_m1;
839 		sc.d = opts->lssc_d;
840 		sc.m2 = opts->lssc_m2;
841 		gsc_add_sc(&parent->meta.lssc, &sc);
842 		/* compare the sum of the children with parent's sc */
843 		sc.m1 = parent->pa.pq_u.hfsc_opts.lssc_m1;
844 		sc.d = parent->pa.pq_u.hfsc_opts.lssc_d;
845 		sc.m2 = parent->pa.pq_u.hfsc_opts.lssc_m2;
846 		if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) {
847 			warnx("linkshare sc exceeds parent's sc");
848 			return (-1);
849 		}
850 	}
851 
852 	/* check the upper-limit service curve. */
853 	if (opts->ulsc_m2 != 0) {
854 		if (opts->ulsc_m1 > pa->ifbandwidth ||
855 		    opts->ulsc_m2 > pa->ifbandwidth) {
856 			warnx("upper-limit larger than interface bandwidth");
857 			return (-1);
858 		}
859 		if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) {
860 			warnx("upper-limit sc smaller than real-time sc");
861 			return (-1);
862 		}
863 	}
864 
865 	return (0);
866 }
867 
868 /*
869  * FAIRQ support functions
870  */
871 static int
872 eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa,
873     struct pfctl_altq *if_ppa, struct pfctl_altq *parent)
874 {
875 	struct fairq_opts	*opts;
876 	struct service_curve	 sc;
877 
878 	opts = &pa->pq_u.fairq_opts;
879 
880 	if (pa->parent == NULL) {
881 		/* root queue */
882 		opts->lssc_m1 = pa->ifbandwidth;
883 		opts->lssc_m2 = pa->ifbandwidth;
884 		opts->lssc_d = 0;
885 		return (0);
886 	}
887 
888 	/* First child initializes the parent's service curve accumulator. */
889 	if (parent->meta.children == 1)
890 		LIST_INIT(&parent->meta.lssc);
891 
892 	if (parent->pa.pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) {
893 		warnx("adding %s would make default queue %s not a leaf",
894 		    pa->qname, pa->parent);
895 		return (-1);
896 	}
897 
898 	if (pa->pq_u.fairq_opts.flags & FARF_DEFAULTCLASS)
899 		if_ppa->meta.default_classes++;
900 
901 	/* if link_share is not specified, use bandwidth */
902 	if (opts->lssc_m2 == 0)
903 		opts->lssc_m2 = pa->bandwidth;
904 
905 	/*
906 	 * admission control:
907 	 * for the real-time service curve, the sum of the service curves
908 	 * should not exceed 80% of the interface bandwidth.  20% is reserved
909 	 * not to over-commit the actual interface bandwidth.
910 	 * for the link-sharing service curve, the sum of the child service
911 	 * curve should not exceed the parent service curve.
912 	 * for the upper-limit service curve, the assigned bandwidth should
913 	 * be smaller than the interface bandwidth, and the upper-limit should
914 	 * be larger than the real-time service curve when both are defined.
915 	 */
916 
917 	/* check the linkshare service curve. */
918 	if (opts->lssc_m2 != 0) {
919 		/* add this queue to the child sum */
920 		sc.m1 = opts->lssc_m1;
921 		sc.d = opts->lssc_d;
922 		sc.m2 = opts->lssc_m2;
923 		gsc_add_sc(&parent->meta.lssc, &sc);
924 		/* compare the sum of the children with parent's sc */
925 		sc.m1 = parent->pa.pq_u.fairq_opts.lssc_m1;
926 		sc.d = parent->pa.pq_u.fairq_opts.lssc_d;
927 		sc.m2 = parent->pa.pq_u.fairq_opts.lssc_m2;
928 		if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) {
929 			warnx("link-sharing sc exceeds parent's sc");
930 			return (-1);
931 		}
932 	}
933 
934 	return (0);
935 }
936 
937 static int
938 check_commit_hfsc(int dev, int opts, struct pfctl_altq *if_ppa)
939 {
940 
941 	/* check if hfsc has one default queue for this interface */
942 	if (if_ppa->meta.default_classes != 1) {
943 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
944 		return (1);
945 	}
946 	return (0);
947 }
948 
949 static int
950 check_commit_fairq(int dev __unused, int opts __unused, struct pfctl_altq *if_ppa)
951 {
952 
953 	/* check if fairq has one default queue for this interface */
954 	if (if_ppa->meta.default_classes != 1) {
955 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
956 		return (1);
957 	}
958 	return (0);
959 }
960 
961 static int
962 print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
963 {
964 	const struct hfsc_opts_v1	*opts;
965 	const struct node_hfsc_sc	*rtsc, *lssc, *ulsc;
966 
967 	opts = &a->pq_u.hfsc_opts;
968 	if (qopts == NULL)
969 		rtsc = lssc = ulsc = NULL;
970 	else {
971 		rtsc = &qopts->data.hfsc_opts.realtime;
972 		lssc = &qopts->data.hfsc_opts.linkshare;
973 		ulsc = &qopts->data.hfsc_opts.upperlimit;
974 	}
975 
976 	if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 ||
977 	    (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
978 	    opts->lssc_d != 0))) {
979 		printf("hfsc(");
980 		if (opts->flags & HFCF_RED)
981 			printf(" red");
982 		if (opts->flags & HFCF_ECN)
983 			printf(" ecn");
984 		if (opts->flags & HFCF_RIO)
985 			printf(" rio");
986 		if (opts->flags & HFCF_CODEL)
987 			printf(" codel");
988 		if (opts->flags & HFCF_CLEARDSCP)
989 			printf(" cleardscp");
990 		if (opts->flags & HFCF_DEFAULTCLASS)
991 			printf(" default");
992 		if (opts->rtsc_m2 != 0)
993 			print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d,
994 			    opts->rtsc_m2, rtsc);
995 		if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
996 		    opts->lssc_d != 0))
997 			print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d,
998 			    opts->lssc_m2, lssc);
999 		if (opts->ulsc_m2 != 0)
1000 			print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d,
1001 			    opts->ulsc_m2, ulsc);
1002 		printf(" ) ");
1003 
1004 		return (1);
1005 	} else
1006 		return (0);
1007 }
1008 
1009 static int
1010 print_codel_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
1011 {
1012 	const struct codel_opts *opts;
1013 
1014 	opts = &a->pq_u.codel_opts;
1015 	if (opts->target || opts->interval || opts->ecn) {
1016 		printf("codel(");
1017 		if (opts->target)
1018 			printf(" target %d", opts->target);
1019 		if (opts->interval)
1020 			printf(" interval %d", opts->interval);
1021 		if (opts->ecn)
1022 			printf("ecn");
1023 		printf(" ) ");
1024 
1025 		return (1);
1026 	}
1027 
1028 	return (0);
1029 }
1030 
1031 static int
1032 print_fairq_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
1033 {
1034 	const struct fairq_opts		*opts;
1035 	const struct node_fairq_sc	*loc_lssc;
1036 
1037 	opts = &a->pq_u.fairq_opts;
1038 	if (qopts == NULL)
1039 		loc_lssc = NULL;
1040 	else
1041 		loc_lssc = &qopts->data.fairq_opts.linkshare;
1042 
1043 	if (opts->flags ||
1044 	    (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1045 	    opts->lssc_d != 0))) {
1046 		printf("fairq(");
1047 		if (opts->flags & FARF_RED)
1048 			printf(" red");
1049 		if (opts->flags & FARF_ECN)
1050 			printf(" ecn");
1051 		if (opts->flags & FARF_RIO)
1052 			printf(" rio");
1053 		if (opts->flags & FARF_CODEL)
1054 			printf(" codel");
1055 		if (opts->flags & FARF_CLEARDSCP)
1056 			printf(" cleardscp");
1057 		if (opts->flags & FARF_DEFAULTCLASS)
1058 			printf(" default");
1059 		if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1060 		    opts->lssc_d != 0))
1061 			print_fairq_sc("linkshare", opts->lssc_m1, opts->lssc_d,
1062 			    opts->lssc_m2, loc_lssc);
1063 		printf(" ) ");
1064 
1065 		return (1);
1066 	} else
1067 		return (0);
1068 }
1069 
1070 /*
1071  * admission control using generalized service curve
1072  */
1073 
1074 /* add a new service curve to a generalized service curve */
1075 static void
1076 gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc)
1077 {
1078 	if (is_sc_null(sc))
1079 		return;
1080 	if (sc->d != 0)
1081 		gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1);
1082 	gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2);
1083 }
1084 
1085 /*
1086  * check whether all points of a generalized service curve have
1087  * their y-coordinates no larger than a given two-piece linear
1088  * service curve.
1089  */
1090 static int
1091 is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc)
1092 {
1093 	struct segment	*s, *last, *end;
1094 	double		 y;
1095 
1096 	if (is_sc_null(sc)) {
1097 		if (LIST_EMPTY(gsc))
1098 			return (1);
1099 		LIST_FOREACH(s, gsc, _next) {
1100 			if (s->m != 0)
1101 				return (0);
1102 		}
1103 		return (1);
1104 	}
1105 	/*
1106 	 * gsc has a dummy entry at the end with x = INFINITY.
1107 	 * loop through up to this dummy entry.
1108 	 */
1109 	end = gsc_getentry(gsc, INFINITY);
1110 	if (end == NULL)
1111 		return (1);
1112 	last = NULL;
1113 	for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) {
1114 		if (s->y > sc_x2y(sc, s->x))
1115 			return (0);
1116 		last = s;
1117 	}
1118 	/* last now holds the real last segment */
1119 	if (last == NULL)
1120 		return (1);
1121 	if (last->m > sc->m2)
1122 		return (0);
1123 	if (last->x < sc->d && last->m > sc->m1) {
1124 		y = last->y + (sc->d - last->x) * last->m;
1125 		if (y > sc_x2y(sc, sc->d))
1126 			return (0);
1127 	}
1128 	return (1);
1129 }
1130 
1131 /*
1132  * return a segment entry starting at x.
1133  * if gsc has no entry starting at x, a new entry is created at x.
1134  */
1135 static struct segment *
1136 gsc_getentry(struct gen_sc *gsc, double x)
1137 {
1138 	struct segment	*new, *prev, *s;
1139 
1140 	prev = NULL;
1141 	LIST_FOREACH(s, gsc, _next) {
1142 		if (s->x == x)
1143 			return (s);	/* matching entry found */
1144 		else if (s->x < x)
1145 			prev = s;
1146 		else
1147 			break;
1148 	}
1149 
1150 	/* we have to create a new entry */
1151 	if ((new = calloc(1, sizeof(struct segment))) == NULL)
1152 		return (NULL);
1153 
1154 	new->x = x;
1155 	if (x == INFINITY || s == NULL)
1156 		new->d = 0;
1157 	else if (s->x == INFINITY)
1158 		new->d = INFINITY;
1159 	else
1160 		new->d = s->x - x;
1161 	if (prev == NULL) {
1162 		/* insert the new entry at the head of the list */
1163 		new->y = 0;
1164 		new->m = 0;
1165 		LIST_INSERT_HEAD(gsc, new, _next);
1166 	} else {
1167 		/*
1168 		 * the start point intersects with the segment pointed by
1169 		 * prev.  divide prev into 2 segments
1170 		 */
1171 		if (x == INFINITY) {
1172 			prev->d = INFINITY;
1173 			if (prev->m == 0)
1174 				new->y = prev->y;
1175 			else
1176 				new->y = INFINITY;
1177 		} else {
1178 			prev->d = x - prev->x;
1179 			new->y = prev->d * prev->m + prev->y;
1180 		}
1181 		new->m = prev->m;
1182 		LIST_INSERT_AFTER(prev, new, _next);
1183 	}
1184 	return (new);
1185 }
1186 
1187 /* add a segment to a generalized service curve */
1188 static int
1189 gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m)
1190 {
1191 	struct segment	*start, *end, *s;
1192 	double		 x2;
1193 
1194 	if (d == INFINITY)
1195 		x2 = INFINITY;
1196 	else
1197 		x2 = x + d;
1198 	start = gsc_getentry(gsc, x);
1199 	end = gsc_getentry(gsc, x2);
1200 	if (start == NULL || end == NULL)
1201 		return (-1);
1202 
1203 	for (s = start; s != end; s = LIST_NEXT(s, _next)) {
1204 		s->m += m;
1205 		s->y += y + (s->x - x) * m;
1206 	}
1207 
1208 	end = gsc_getentry(gsc, INFINITY);
1209 	for (; s != end; s = LIST_NEXT(s, _next)) {
1210 		s->y += m * d;
1211 	}
1212 
1213 	return (0);
1214 }
1215 
1216 /* get y-projection of a service curve */
1217 static double
1218 sc_x2y(struct service_curve *sc, double x)
1219 {
1220 	double	y;
1221 
1222 	if (x <= (double)sc->d)
1223 		/* y belongs to the 1st segment */
1224 		y = x * (double)sc->m1;
1225 	else
1226 		/* y belongs to the 2nd segment */
1227 		y = (double)sc->d * (double)sc->m1
1228 			+ (x - (double)sc->d) * (double)sc->m2;
1229 	return (y);
1230 }
1231 
1232 /*
1233  * misc utilities
1234  */
1235 #define	R2S_BUFS	8
1236 #define	RATESTR_MAX	16
1237 
1238 char *
1239 rate2str(double rate)
1240 {
1241 	char		*buf;
1242 	static char	 r2sbuf[R2S_BUFS][RATESTR_MAX];  /* ring bufer */
1243 	static int	 idx = 0;
1244 	int		 i;
1245 	static const char unit[] = " KMG";
1246 
1247 	buf = r2sbuf[idx++];
1248 	if (idx == R2S_BUFS)
1249 		idx = 0;
1250 
1251 	for (i = 0; rate >= 1000 && i <= 3; i++)
1252 		rate /= 1000;
1253 
1254 	if ((int)(rate * 100) % 100)
1255 		snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]);
1256 	else
1257 		snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]);
1258 
1259 	return (buf);
1260 }
1261 
1262 #ifdef __FreeBSD__
1263 /*
1264  * XXX
1265  * FreeBSD does not have SIOCGIFDATA.
1266  * To emulate this, DIOCGIFSPEED ioctl added to pf.
1267  */
1268 u_int64_t
1269 getifspeed(int pfdev, char *ifname)
1270 {
1271 	struct pf_ifspeed io;
1272 
1273 	bzero(&io, sizeof io);
1274 	if (strlcpy(io.ifname, ifname, IFNAMSIZ) >=
1275 	    sizeof(io.ifname))
1276 		errx(1, "getifspeed: strlcpy");
1277 	if (ioctl(pfdev, DIOCGIFSPEED, &io) == -1)
1278 		err(1, "DIOCGIFSPEED");
1279 	return (io.baudrate);
1280 }
1281 #else
1282 u_int32_t
1283 getifspeed(char *ifname)
1284 {
1285 	int		s;
1286 	struct ifreq	ifr;
1287 	struct if_data	ifrdat;
1288 
1289 	s = get_query_socket();
1290 	bzero(&ifr, sizeof(ifr));
1291 	if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1292 	    sizeof(ifr.ifr_name))
1293 		errx(1, "getifspeed: strlcpy");
1294 	ifr.ifr_data = (caddr_t)&ifrdat;
1295 	if (ioctl(s, SIOCGIFDATA, (caddr_t)&ifr) == -1)
1296 		err(1, "SIOCGIFDATA");
1297 	return ((u_int32_t)ifrdat.ifi_baudrate);
1298 }
1299 #endif
1300 
1301 u_long
1302 getifmtu(char *ifname)
1303 {
1304 	int		s;
1305 	struct ifreq	ifr;
1306 
1307 	s = get_query_socket();
1308 	bzero(&ifr, sizeof(ifr));
1309 	if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1310 	    sizeof(ifr.ifr_name))
1311 		errx(1, "getifmtu: strlcpy");
1312 	if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1)
1313 #ifdef __FreeBSD__
1314 		ifr.ifr_mtu = 1500;
1315 #else
1316 		err(1, "SIOCGIFMTU");
1317 #endif
1318 	if (ifr.ifr_mtu > 0)
1319 		return (ifr.ifr_mtu);
1320 	else {
1321 		warnx("could not get mtu for %s, assuming 1500", ifname);
1322 		return (1500);
1323 	}
1324 }
1325 
1326 int
1327 eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts,
1328     u_int64_t ref_bw)
1329 {
1330 	int	errors = 0;
1331 
1332 	switch (pa->scheduler) {
1333 	case ALTQT_CBQ:
1334 		pa->pq_u.cbq_opts = opts->data.cbq_opts;
1335 		break;
1336 	case ALTQT_PRIQ:
1337 		pa->pq_u.priq_opts = opts->data.priq_opts;
1338 		break;
1339 	case ALTQT_HFSC:
1340 		pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags;
1341 		if (opts->data.hfsc_opts.linkshare.used) {
1342 			pa->pq_u.hfsc_opts.lssc_m1 =
1343 			    eval_bwspec(&opts->data.hfsc_opts.linkshare.m1,
1344 			    ref_bw);
1345 			pa->pq_u.hfsc_opts.lssc_m2 =
1346 			    eval_bwspec(&opts->data.hfsc_opts.linkshare.m2,
1347 			    ref_bw);
1348 			pa->pq_u.hfsc_opts.lssc_d =
1349 			    opts->data.hfsc_opts.linkshare.d;
1350 		}
1351 		if (opts->data.hfsc_opts.realtime.used) {
1352 			pa->pq_u.hfsc_opts.rtsc_m1 =
1353 			    eval_bwspec(&opts->data.hfsc_opts.realtime.m1,
1354 			    ref_bw);
1355 			pa->pq_u.hfsc_opts.rtsc_m2 =
1356 			    eval_bwspec(&opts->data.hfsc_opts.realtime.m2,
1357 			    ref_bw);
1358 			pa->pq_u.hfsc_opts.rtsc_d =
1359 			    opts->data.hfsc_opts.realtime.d;
1360 		}
1361 		if (opts->data.hfsc_opts.upperlimit.used) {
1362 			pa->pq_u.hfsc_opts.ulsc_m1 =
1363 			    eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1,
1364 			    ref_bw);
1365 			pa->pq_u.hfsc_opts.ulsc_m2 =
1366 			    eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2,
1367 			    ref_bw);
1368 			pa->pq_u.hfsc_opts.ulsc_d =
1369 			    opts->data.hfsc_opts.upperlimit.d;
1370 		}
1371 		break;
1372 	case ALTQT_FAIRQ:
1373 		pa->pq_u.fairq_opts.flags = opts->data.fairq_opts.flags;
1374 		pa->pq_u.fairq_opts.nbuckets = opts->data.fairq_opts.nbuckets;
1375 		pa->pq_u.fairq_opts.hogs_m1 =
1376 			eval_bwspec(&opts->data.fairq_opts.hogs_bw, ref_bw);
1377 
1378 		if (opts->data.fairq_opts.linkshare.used) {
1379 			pa->pq_u.fairq_opts.lssc_m1 =
1380 			    eval_bwspec(&opts->data.fairq_opts.linkshare.m1,
1381 			    ref_bw);
1382 			pa->pq_u.fairq_opts.lssc_m2 =
1383 			    eval_bwspec(&opts->data.fairq_opts.linkshare.m2,
1384 			    ref_bw);
1385 			pa->pq_u.fairq_opts.lssc_d =
1386 			    opts->data.fairq_opts.linkshare.d;
1387 		}
1388 		break;
1389 	case ALTQT_CODEL:
1390 		pa->pq_u.codel_opts.target = opts->data.codel_opts.target;
1391 		pa->pq_u.codel_opts.interval = opts->data.codel_opts.interval;
1392 		pa->pq_u.codel_opts.ecn = opts->data.codel_opts.ecn;
1393 		break;
1394 	default:
1395 		warnx("eval_queue_opts: unknown scheduler type %u",
1396 		    opts->qtype);
1397 		errors++;
1398 		break;
1399 	}
1400 
1401 	return (errors);
1402 }
1403 
1404 /*
1405  * If absolute bandwidth if set, return the lesser of that value and the
1406  * reference bandwidth.  Limiting to the reference bandwidth allows simple
1407  * limiting of configured bandwidth parameters for schedulers that are
1408  * 32-bit limited, as the root/interface bandwidth (top-level reference
1409  * bandwidth) will be properly limited in that case.
1410  *
1411  * Otherwise, if the absolute bandwidth is not set, return given percentage
1412  * of reference bandwidth.
1413  */
1414 u_int64_t
1415 eval_bwspec(struct node_queue_bw *bw, u_int64_t ref_bw)
1416 {
1417 	if (bw->bw_absolute > 0)
1418 		return (MIN(bw->bw_absolute, ref_bw));
1419 
1420 	if (bw->bw_percent > 0)
1421 		return (ref_bw / 100 * bw->bw_percent);
1422 
1423 	return (0);
1424 }
1425 
1426 void
1427 print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2,
1428     const struct node_hfsc_sc *sc)
1429 {
1430 	printf(" %s", scname);
1431 
1432 	if (d != 0) {
1433 		printf("(");
1434 		if (sc != NULL && sc->m1.bw_percent > 0)
1435 			printf("%u%%", sc->m1.bw_percent);
1436 		else
1437 			printf("%s", rate2str((double)m1));
1438 		printf(" %u", d);
1439 	}
1440 
1441 	if (sc != NULL && sc->m2.bw_percent > 0)
1442 		printf(" %u%%", sc->m2.bw_percent);
1443 	else
1444 		printf(" %s", rate2str((double)m2));
1445 
1446 	if (d != 0)
1447 		printf(")");
1448 }
1449 
1450 void
1451 print_fairq_sc(const char *scname, u_int m1, u_int d, u_int m2,
1452     const struct node_fairq_sc *sc)
1453 {
1454 	printf(" %s", scname);
1455 
1456 	if (d != 0) {
1457 		printf("(");
1458 		if (sc != NULL && sc->m1.bw_percent > 0)
1459 			printf("%u%%", sc->m1.bw_percent);
1460 		else
1461 			printf("%s", rate2str((double)m1));
1462 		printf(" %u", d);
1463 	}
1464 
1465 	if (sc != NULL && sc->m2.bw_percent > 0)
1466 		printf(" %u%%", sc->m2.bw_percent);
1467 	else
1468 		printf(" %s", rate2str((double)m2));
1469 
1470 	if (d != 0)
1471 		printf(")");
1472 }
1473