xref: /freebsd/sbin/pfctl/pfctl_altq.c (revision 1d34c9dac8624c5c315ae39ad3ae8e5879b23256)
1 /*	$OpenBSD: pfctl_altq.c,v 1.93 2007/10/15 02:16:35 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2002
5  *	Sony Computer Science Laboratories Inc.
6  * Copyright (c) 2002, 2003 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/cdefs.h>
22 __FBSDID("$FreeBSD$");
23 
24 #define PFIOC_USE_LATEST
25 
26 #include <sys/types.h>
27 #include <sys/bitset.h>
28 #include <sys/ioctl.h>
29 #include <sys/socket.h>
30 
31 #include <net/if.h>
32 #include <netinet/in.h>
33 #include <net/pfvar.h>
34 
35 #include <err.h>
36 #include <errno.h>
37 #include <inttypes.h>
38 #include <limits.h>
39 #include <math.h>
40 #include <search.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 #include <string.h>
44 #include <unistd.h>
45 
46 #include <net/altq/altq.h>
47 #include <net/altq/altq_cbq.h>
48 #include <net/altq/altq_codel.h>
49 #include <net/altq/altq_priq.h>
50 #include <net/altq/altq_hfsc.h>
51 #include <net/altq/altq_fairq.h>
52 
53 #include "pfctl_parser.h"
54 #include "pfctl.h"
55 
56 #define is_sc_null(sc)	(((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0))
57 
58 static STAILQ_HEAD(interfaces, pfctl_altq) interfaces = STAILQ_HEAD_INITIALIZER(interfaces);
59 static struct hsearch_data queue_map;
60 static struct hsearch_data if_map;
61 static struct hsearch_data qid_map;
62 
63 static struct pfctl_altq *pfaltq_lookup(char *ifname);
64 static struct pfctl_altq *qname_to_pfaltq(const char *, const char *);
65 static u_int32_t	 qname_to_qid(char *);
66 
67 static int	eval_pfqueue_cbq(struct pfctl *, struct pf_altq *,
68 		    struct pfctl_altq *);
69 static int	cbq_compute_idletime(struct pfctl *, struct pf_altq *);
70 static int	check_commit_cbq(int, int, struct pfctl_altq *);
71 static int	print_cbq_opts(const struct pf_altq *);
72 
73 static int	print_codel_opts(const struct pf_altq *,
74 		    const struct node_queue_opt *);
75 
76 static int	eval_pfqueue_priq(struct pfctl *, struct pf_altq *,
77 		    struct pfctl_altq *);
78 static int	check_commit_priq(int, int, struct pfctl_altq *);
79 static int	print_priq_opts(const struct pf_altq *);
80 
81 static int	eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *,
82 		    struct pfctl_altq *, struct pfctl_altq *);
83 static int	check_commit_hfsc(int, int, struct pfctl_altq *);
84 static int	print_hfsc_opts(const struct pf_altq *,
85 		    const struct node_queue_opt *);
86 
87 static int	eval_pfqueue_fairq(struct pfctl *, struct pf_altq *,
88 		    struct pfctl_altq *, struct pfctl_altq *);
89 static int	print_fairq_opts(const struct pf_altq *,
90 		    const struct node_queue_opt *);
91 static int	check_commit_fairq(int, int, struct pfctl_altq *);
92 
93 static void		 gsc_add_sc(struct gen_sc *, struct service_curve *);
94 static int		 is_gsc_under_sc(struct gen_sc *,
95 			     struct service_curve *);
96 static void		 gsc_destroy(struct gen_sc *);
97 static struct segment	*gsc_getentry(struct gen_sc *, double);
98 static int		 gsc_add_seg(struct gen_sc *, double, double, double,
99 			     double);
100 static double		 sc_x2y(struct service_curve *, double);
101 
102 #ifdef __FreeBSD__
103 u_int64_t	getifspeed(int, char *);
104 #else
105 u_int32_t	 getifspeed(char *);
106 #endif
107 u_long		 getifmtu(char *);
108 int		 eval_queue_opts(struct pf_altq *, struct node_queue_opt *,
109 		     u_int64_t);
110 u_int64_t	 eval_bwspec(struct node_queue_bw *, u_int64_t);
111 void		 print_hfsc_sc(const char *, u_int, u_int, u_int,
112 		     const struct node_hfsc_sc *);
113 void		 print_fairq_sc(const char *, u_int, u_int, u_int,
114 		     const struct node_fairq_sc *);
115 
116 static __attribute__((constructor)) void
117 pfctl_altq_init(void)
118 {
119 	/*
120 	 * As hdestroy() will never be called on these tables, it will be
121 	 * safe to use references into the stored data as keys.
122 	 */
123 	if (hcreate_r(0, &queue_map) == 0)
124 		err(1, "Failed to create altq queue map");
125 	if (hcreate_r(0, &if_map) == 0)
126 		err(1, "Failed to create altq interface map");
127 	if (hcreate_r(0, &qid_map) == 0)
128 		err(1, "Failed to create altq queue id map");
129 }
130 
131 void
132 pfaltq_store(struct pf_altq *a)
133 {
134 	struct pfctl_altq	*altq;
135 	ENTRY 			 item;
136 	ENTRY			*ret_item;
137 	size_t			 key_size;
138 
139 	if ((altq = malloc(sizeof(*altq))) == NULL)
140 		err(1, "queue malloc");
141 	memcpy(&altq->pa, a, sizeof(struct pf_altq));
142 	memset(&altq->meta, 0, sizeof(altq->meta));
143 
144 	if (a->qname[0] == 0) {
145 		item.key = altq->pa.ifname;
146 		item.data = altq;
147 		if (hsearch_r(item, ENTER, &ret_item, &if_map) == 0)
148 			err(1, "interface map insert");
149 		STAILQ_INSERT_TAIL(&interfaces, altq, meta.link);
150 	} else {
151 		key_size = sizeof(a->ifname) + sizeof(a->qname);
152 		if ((item.key = malloc(key_size)) == NULL)
153 			err(1, "queue map key malloc");
154 		snprintf(item.key, key_size, "%s:%s", a->ifname, a->qname);
155 		item.data = altq;
156 		if (hsearch_r(item, ENTER, &ret_item, &queue_map) == 0)
157 			err(1, "queue map insert");
158 
159 		item.key = altq->pa.qname;
160 		item.data = &altq->pa.qid;
161 		if (hsearch_r(item, ENTER, &ret_item, &qid_map) == 0)
162 			err(1, "qid map insert");
163 	}
164 }
165 
166 static struct pfctl_altq *
167 pfaltq_lookup(char *ifname)
168 {
169 	ENTRY	 item;
170 	ENTRY	*ret_item;
171 
172 	item.key = ifname;
173 	if (hsearch_r(item, FIND, &ret_item, &if_map) == 0)
174 		return (NULL);
175 
176 	return (ret_item->data);
177 }
178 
179 static struct pfctl_altq *
180 qname_to_pfaltq(const char *qname, const char *ifname)
181 {
182 	ENTRY	 item;
183 	ENTRY	*ret_item;
184 	char	 key[IFNAMSIZ + PF_QNAME_SIZE];
185 
186 	item.key = key;
187 	snprintf(item.key, sizeof(key), "%s:%s", ifname, qname);
188 	if (hsearch_r(item, FIND, &ret_item, &queue_map) == 0)
189 		return (NULL);
190 
191 	return (ret_item->data);
192 }
193 
194 static u_int32_t
195 qname_to_qid(char *qname)
196 {
197 	ENTRY	 item;
198 	ENTRY	*ret_item;
199 	uint32_t qid;
200 
201 	/*
202 	 * We guarantee that same named queues on different interfaces
203 	 * have the same qid.
204 	 */
205 	item.key = qname;
206 	if (hsearch_r(item, FIND, &ret_item, &qid_map) == 0)
207 		return (0);
208 
209 	qid = *(uint32_t *)ret_item->data;
210 	return (qid);
211 }
212 
213 void
214 print_altq(const struct pf_altq *a, unsigned int level,
215     struct node_queue_bw *bw, struct node_queue_opt *qopts)
216 {
217 	if (a->qname[0] != 0) {
218 		print_queue(a, level, bw, 1, qopts);
219 		return;
220 	}
221 
222 #ifdef __FreeBSD__
223 	if (a->local_flags & PFALTQ_FLAG_IF_REMOVED)
224 		printf("INACTIVE ");
225 #endif
226 
227 	printf("altq on %s ", a->ifname);
228 
229 	switch (a->scheduler) {
230 	case ALTQT_CBQ:
231 		if (!print_cbq_opts(a))
232 			printf("cbq ");
233 		break;
234 	case ALTQT_PRIQ:
235 		if (!print_priq_opts(a))
236 			printf("priq ");
237 		break;
238 	case ALTQT_HFSC:
239 		if (!print_hfsc_opts(a, qopts))
240 			printf("hfsc ");
241 		break;
242 	case ALTQT_FAIRQ:
243 		if (!print_fairq_opts(a, qopts))
244 			printf("fairq ");
245 		break;
246 	case ALTQT_CODEL:
247 		if (!print_codel_opts(a, qopts))
248 			printf("codel ");
249 		break;
250 	}
251 
252 	if (bw != NULL && bw->bw_percent > 0) {
253 		if (bw->bw_percent < 100)
254 			printf("bandwidth %u%% ", bw->bw_percent);
255 	} else
256 		printf("bandwidth %s ", rate2str((double)a->ifbandwidth));
257 
258 	if (a->qlimit != DEFAULT_QLIMIT)
259 		printf("qlimit %u ", a->qlimit);
260 	printf("tbrsize %u ", a->tbrsize);
261 }
262 
263 void
264 print_queue(const struct pf_altq *a, unsigned int level,
265     struct node_queue_bw *bw, int print_interface,
266     struct node_queue_opt *qopts)
267 {
268 	unsigned int	i;
269 
270 #ifdef __FreeBSD__
271 	if (a->local_flags & PFALTQ_FLAG_IF_REMOVED)
272 		printf("INACTIVE ");
273 #endif
274 	printf("queue ");
275 	for (i = 0; i < level; ++i)
276 		printf(" ");
277 	printf("%s ", a->qname);
278 	if (print_interface)
279 		printf("on %s ", a->ifname);
280 	if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC ||
281 		a->scheduler == ALTQT_FAIRQ) {
282 		if (bw != NULL && bw->bw_percent > 0) {
283 			if (bw->bw_percent < 100)
284 				printf("bandwidth %u%% ", bw->bw_percent);
285 		} else
286 			printf("bandwidth %s ", rate2str((double)a->bandwidth));
287 	}
288 	if (a->priority != DEFAULT_PRIORITY)
289 		printf("priority %u ", a->priority);
290 	if (a->qlimit != DEFAULT_QLIMIT)
291 		printf("qlimit %u ", a->qlimit);
292 	switch (a->scheduler) {
293 	case ALTQT_CBQ:
294 		print_cbq_opts(a);
295 		break;
296 	case ALTQT_PRIQ:
297 		print_priq_opts(a);
298 		break;
299 	case ALTQT_HFSC:
300 		print_hfsc_opts(a, qopts);
301 		break;
302 	case ALTQT_FAIRQ:
303 		print_fairq_opts(a, qopts);
304 		break;
305 	}
306 }
307 
308 /*
309  * eval_pfaltq computes the discipline parameters.
310  */
311 int
312 eval_pfaltq(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
313     struct node_queue_opt *opts)
314 {
315 	u_int64_t	rate;
316 	u_int		size, errors = 0;
317 
318 	if (bw->bw_absolute > 0)
319 		pa->ifbandwidth = bw->bw_absolute;
320 	else
321 #ifdef __FreeBSD__
322 		if ((rate = getifspeed(pf->dev, pa->ifname)) == 0) {
323 #else
324 		if ((rate = getifspeed(pa->ifname)) == 0) {
325 #endif
326 			fprintf(stderr, "interface %s does not know its bandwidth, "
327 			    "please specify an absolute bandwidth\n",
328 			    pa->ifname);
329 			errors++;
330 		} else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0)
331 			pa->ifbandwidth = rate;
332 
333 	/*
334 	 * Limit bandwidth to UINT_MAX for schedulers that aren't 64-bit ready.
335 	 */
336 	if ((pa->scheduler != ALTQT_HFSC) && (pa->ifbandwidth > UINT_MAX)) {
337 		pa->ifbandwidth = UINT_MAX;
338 		warnx("interface %s bandwidth limited to %" PRIu64 " bps "
339 		    "because selected scheduler is 32-bit limited\n", pa->ifname,
340 		    pa->ifbandwidth);
341 	}
342 	errors += eval_queue_opts(pa, opts, pa->ifbandwidth);
343 
344 	/* if tbrsize is not specified, use heuristics */
345 	if (pa->tbrsize == 0) {
346 		rate = pa->ifbandwidth;
347 		if (rate <= 1 * 1000 * 1000)
348 			size = 1;
349 		else if (rate <= 10 * 1000 * 1000)
350 			size = 4;
351 		else if (rate <= 200 * 1000 * 1000)
352 			size = 8;
353 		else if (rate <= 2500 * 1000 * 1000ULL)
354 			size = 24;
355 		else
356 			size = 128;
357 		size = size * getifmtu(pa->ifname);
358 		pa->tbrsize = size;
359 	}
360 	return (errors);
361 }
362 
363 /*
364  * check_commit_altq does consistency check for each interface
365  */
366 int
367 check_commit_altq(int dev, int opts)
368 {
369 	struct pfctl_altq	*if_ppa;
370 	int			 error = 0;
371 
372 	/* call the discipline check for each interface. */
373 	STAILQ_FOREACH(if_ppa, &interfaces, meta.link) {
374 		switch (if_ppa->pa.scheduler) {
375 		case ALTQT_CBQ:
376 			error = check_commit_cbq(dev, opts, if_ppa);
377 			break;
378 		case ALTQT_PRIQ:
379 			error = check_commit_priq(dev, opts, if_ppa);
380 			break;
381 		case ALTQT_HFSC:
382 			error = check_commit_hfsc(dev, opts, if_ppa);
383 			break;
384 		case ALTQT_FAIRQ:
385 			error = check_commit_fairq(dev, opts, if_ppa);
386 			break;
387 		default:
388 			break;
389 		}
390 	}
391 	return (error);
392 }
393 
394 /*
395  * eval_pfqueue computes the queue parameters.
396  */
397 int
398 eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
399     struct node_queue_opt *opts)
400 {
401 	/* should be merged with expand_queue */
402 	struct pfctl_altq	*if_ppa, *parent;
403 	int		 	 error = 0;
404 
405 	/* find the corresponding interface and copy fields used by queues */
406 	if ((if_ppa = pfaltq_lookup(pa->ifname)) == NULL) {
407 		fprintf(stderr, "altq not defined on %s\n", pa->ifname);
408 		return (1);
409 	}
410 	pa->scheduler = if_ppa->pa.scheduler;
411 	pa->ifbandwidth = if_ppa->pa.ifbandwidth;
412 
413 	if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) {
414 		fprintf(stderr, "queue %s already exists on interface %s\n",
415 		    pa->qname, pa->ifname);
416 		return (1);
417 	}
418 	pa->qid = qname_to_qid(pa->qname);
419 
420 	parent = NULL;
421 	if (pa->parent[0] != 0) {
422 		parent = qname_to_pfaltq(pa->parent, pa->ifname);
423 		if (parent == NULL) {
424 			fprintf(stderr, "parent %s not found for %s\n",
425 			    pa->parent, pa->qname);
426 			return (1);
427 		}
428 		pa->parent_qid = parent->pa.qid;
429 	}
430 	if (pa->qlimit == 0)
431 		pa->qlimit = DEFAULT_QLIMIT;
432 
433 	if (eval_queue_opts(pa, opts,
434 		parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth))
435 		return (1);
436 
437 	if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC ||
438 		pa->scheduler == ALTQT_FAIRQ) {
439 		pa->bandwidth = eval_bwspec(bw,
440 		    parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth);
441 
442 		/*
443 		 * For HFSC, if the linkshare service curve m2 parameter is
444 		 * set, it overrides the provided queue bandwidth parameter,
445 		 * so adjust the queue bandwidth parameter accordingly here
446 		 * to avoid false positives in the total child bandwidth
447 		 * check below.
448 		 */
449 		if ((pa->scheduler == ALTQT_HFSC) &&
450 		    (pa->pq_u.hfsc_opts.lssc_m2 != 0)) {
451 			pa->bandwidth = pa->pq_u.hfsc_opts.lssc_m2;
452 		}
453 
454 		if (pa->bandwidth > pa->ifbandwidth) {
455 			fprintf(stderr, "bandwidth for %s higher than "
456 			    "interface\n", pa->qname);
457 			return (1);
458 		}
459 		/* check the sum of the child bandwidth is under parent's */
460 		if (parent != NULL) {
461 			if (pa->bandwidth > parent->pa.bandwidth) {
462 				warnx("bandwidth for %s higher than parent",
463 				    pa->qname);
464 				return (1);
465 			}
466 			parent->meta.bwsum += pa->bandwidth;
467 			if (parent->meta.bwsum > parent->pa.bandwidth) {
468 				warnx("the sum of the child bandwidth (%" PRIu64
469 				    ") higher than parent \"%s\" (%" PRIu64 ")",
470 				    parent->meta.bwsum, parent->pa.qname,
471 				    parent->pa.bandwidth);
472 			}
473 		}
474 	}
475 
476 	if (parent != NULL)
477 		parent->meta.children++;
478 
479 	switch (pa->scheduler) {
480 	case ALTQT_CBQ:
481 		error = eval_pfqueue_cbq(pf, pa, if_ppa);
482 		break;
483 	case ALTQT_PRIQ:
484 		error = eval_pfqueue_priq(pf, pa, if_ppa);
485 		break;
486 	case ALTQT_HFSC:
487 		error = eval_pfqueue_hfsc(pf, pa, if_ppa, parent);
488 		break;
489 	case ALTQT_FAIRQ:
490 		error = eval_pfqueue_fairq(pf, pa, if_ppa, parent);
491 		break;
492 	default:
493 		break;
494 	}
495 	return (error);
496 }
497 
498 /*
499  * CBQ support functions
500  */
501 #define	RM_FILTER_GAIN	5	/* log2 of gain, e.g., 5 => 31/32 */
502 #define	RM_NS_PER_SEC	(1000000000)
503 
504 static int
505 eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa)
506 {
507 	struct cbq_opts	*opts;
508 	u_int		 ifmtu;
509 
510 	if (pa->priority >= CBQ_MAXPRI) {
511 		warnx("priority out of range: max %d", CBQ_MAXPRI - 1);
512 		return (-1);
513 	}
514 
515 	ifmtu = getifmtu(pa->ifname);
516 	opts = &pa->pq_u.cbq_opts;
517 
518 	if (opts->pktsize == 0) {	/* use default */
519 		opts->pktsize = ifmtu;
520 		if (opts->pktsize > MCLBYTES)	/* do what TCP does */
521 			opts->pktsize &= ~MCLBYTES;
522 	} else if (opts->pktsize > ifmtu)
523 		opts->pktsize = ifmtu;
524 	if (opts->maxpktsize == 0)	/* use default */
525 		opts->maxpktsize = ifmtu;
526 	else if (opts->maxpktsize > ifmtu)
527 		opts->pktsize = ifmtu;
528 
529 	if (opts->pktsize > opts->maxpktsize)
530 		opts->pktsize = opts->maxpktsize;
531 
532 	if (pa->parent[0] == 0)
533 		opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR);
534 
535 	if (pa->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS)
536 		if_ppa->meta.root_classes++;
537 	if (pa->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS)
538 		if_ppa->meta.default_classes++;
539 
540 	cbq_compute_idletime(pf, pa);
541 	return (0);
542 }
543 
544 /*
545  * compute ns_per_byte, maxidle, minidle, and offtime
546  */
547 static int
548 cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa)
549 {
550 	struct cbq_opts	*opts;
551 	double		 maxidle_s, maxidle, minidle;
552 	double		 offtime, nsPerByte, ifnsPerByte, ptime, cptime;
553 	double		 z, g, f, gton, gtom;
554 	u_int		 minburst, maxburst;
555 
556 	opts = &pa->pq_u.cbq_opts;
557 	ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8;
558 	minburst = opts->minburst;
559 	maxburst = opts->maxburst;
560 
561 	if (pa->bandwidth == 0)
562 		f = 0.0001;	/* small enough? */
563 	else
564 		f = ((double) pa->bandwidth / (double) pa->ifbandwidth);
565 
566 	nsPerByte = ifnsPerByte / f;
567 	ptime = (double)opts->pktsize * ifnsPerByte;
568 	cptime = ptime * (1.0 - f) / f;
569 
570 	if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) {
571 		/*
572 		 * this causes integer overflow in kernel!
573 		 * (bandwidth < 6Kbps when max_pkt_size=1500)
574 		 */
575 		if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0) {
576 			warnx("queue bandwidth must be larger than %s",
577 			    rate2str(ifnsPerByte * (double)opts->maxpktsize /
578 			    (double)INT_MAX * (double)pa->ifbandwidth));
579 			fprintf(stderr, "cbq: queue %s is too slow!\n",
580 			    pa->qname);
581 		}
582 		nsPerByte = (double)(INT_MAX / opts->maxpktsize);
583 	}
584 
585 	if (maxburst == 0) {  /* use default */
586 		if (cptime > 10.0 * 1000000)
587 			maxburst = 4;
588 		else
589 			maxburst = 16;
590 	}
591 	if (minburst == 0)  /* use default */
592 		minburst = 2;
593 	if (minburst > maxburst)
594 		minburst = maxburst;
595 
596 	z = (double)(1 << RM_FILTER_GAIN);
597 	g = (1.0 - 1.0 / z);
598 	gton = pow(g, (double)maxburst);
599 	gtom = pow(g, (double)(minburst-1));
600 	maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton));
601 	maxidle_s = (1.0 - g);
602 	if (maxidle > maxidle_s)
603 		maxidle = ptime * maxidle;
604 	else
605 		maxidle = ptime * maxidle_s;
606 	offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom);
607 	minidle = -((double)opts->maxpktsize * (double)nsPerByte);
608 
609 	/* scale parameters */
610 	maxidle = ((maxidle * 8.0) / nsPerByte) *
611 	    pow(2.0, (double)RM_FILTER_GAIN);
612 	offtime = (offtime * 8.0) / nsPerByte *
613 	    pow(2.0, (double)RM_FILTER_GAIN);
614 	minidle = ((minidle * 8.0) / nsPerByte) *
615 	    pow(2.0, (double)RM_FILTER_GAIN);
616 
617 	maxidle = maxidle / 1000.0;
618 	offtime = offtime / 1000.0;
619 	minidle = minidle / 1000.0;
620 
621 	opts->minburst = minburst;
622 	opts->maxburst = maxburst;
623 	opts->ns_per_byte = (u_int)nsPerByte;
624 	opts->maxidle = (u_int)fabs(maxidle);
625 	opts->minidle = (int)minidle;
626 	opts->offtime = (u_int)fabs(offtime);
627 
628 	return (0);
629 }
630 
631 static int
632 check_commit_cbq(int dev, int opts, struct pfctl_altq *if_ppa)
633 {
634 	int	error = 0;
635 
636 	/*
637 	 * check if cbq has one root queue and one default queue
638 	 * for this interface
639 	 */
640 	if (if_ppa->meta.root_classes != 1) {
641 		warnx("should have one root queue on %s", if_ppa->pa.ifname);
642 		error++;
643 	}
644 	if (if_ppa->meta.default_classes != 1) {
645 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
646 		error++;
647 	}
648 	return (error);
649 }
650 
651 static int
652 print_cbq_opts(const struct pf_altq *a)
653 {
654 	const struct cbq_opts	*opts;
655 
656 	opts = &a->pq_u.cbq_opts;
657 	if (opts->flags) {
658 		printf("cbq(");
659 		if (opts->flags & CBQCLF_RED)
660 			printf(" red");
661 		if (opts->flags & CBQCLF_ECN)
662 			printf(" ecn");
663 		if (opts->flags & CBQCLF_RIO)
664 			printf(" rio");
665 		if (opts->flags & CBQCLF_CODEL)
666 			printf(" codel");
667 		if (opts->flags & CBQCLF_CLEARDSCP)
668 			printf(" cleardscp");
669 		if (opts->flags & CBQCLF_FLOWVALVE)
670 			printf(" flowvalve");
671 		if (opts->flags & CBQCLF_BORROW)
672 			printf(" borrow");
673 		if (opts->flags & CBQCLF_WRR)
674 			printf(" wrr");
675 		if (opts->flags & CBQCLF_EFFICIENT)
676 			printf(" efficient");
677 		if (opts->flags & CBQCLF_ROOTCLASS)
678 			printf(" root");
679 		if (opts->flags & CBQCLF_DEFCLASS)
680 			printf(" default");
681 		printf(" ) ");
682 
683 		return (1);
684 	} else
685 		return (0);
686 }
687 
688 /*
689  * PRIQ support functions
690  */
691 static int
692 eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa)
693 {
694 
695 	if (pa->priority >= PRIQ_MAXPRI) {
696 		warnx("priority out of range: max %d", PRIQ_MAXPRI - 1);
697 		return (-1);
698 	}
699 	if (BIT_ISSET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris)) {
700 		warnx("%s does not have a unique priority on interface %s",
701 		    pa->qname, pa->ifname);
702 		return (-1);
703 	} else
704 		BIT_SET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris);
705 
706 	if (pa->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS)
707 		if_ppa->meta.default_classes++;
708 	return (0);
709 }
710 
711 static int
712 check_commit_priq(int dev, int opts, struct pfctl_altq *if_ppa)
713 {
714 
715 	/*
716 	 * check if priq has one default class for this interface
717 	 */
718 	if (if_ppa->meta.default_classes != 1) {
719 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
720 		return (1);
721 	}
722 	return (0);
723 }
724 
725 static int
726 print_priq_opts(const struct pf_altq *a)
727 {
728 	const struct priq_opts	*opts;
729 
730 	opts = &a->pq_u.priq_opts;
731 
732 	if (opts->flags) {
733 		printf("priq(");
734 		if (opts->flags & PRCF_RED)
735 			printf(" red");
736 		if (opts->flags & PRCF_ECN)
737 			printf(" ecn");
738 		if (opts->flags & PRCF_RIO)
739 			printf(" rio");
740 		if (opts->flags & PRCF_CODEL)
741 			printf(" codel");
742 		if (opts->flags & PRCF_CLEARDSCP)
743 			printf(" cleardscp");
744 		if (opts->flags & PRCF_DEFAULTCLASS)
745 			printf(" default");
746 		printf(" ) ");
747 
748 		return (1);
749 	} else
750 		return (0);
751 }
752 
753 /*
754  * HFSC support functions
755  */
756 static int
757 eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa,
758     struct pfctl_altq *parent)
759 {
760 	struct hfsc_opts_v1	*opts;
761 	struct service_curve	 sc;
762 
763 	opts = &pa->pq_u.hfsc_opts;
764 
765 	if (parent == NULL) {
766 		/* root queue */
767 		opts->lssc_m1 = pa->ifbandwidth;
768 		opts->lssc_m2 = pa->ifbandwidth;
769 		opts->lssc_d = 0;
770 		return (0);
771 	}
772 
773 	/* First child initializes the parent's service curve accumulators. */
774 	if (parent->meta.children == 1) {
775 		LIST_INIT(&parent->meta.rtsc);
776 		LIST_INIT(&parent->meta.lssc);
777 	}
778 
779 	if (parent->pa.pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) {
780 		warnx("adding %s would make default queue %s not a leaf",
781 		    pa->qname, pa->parent);
782 		return (-1);
783 	}
784 
785 	if (pa->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS)
786 		if_ppa->meta.default_classes++;
787 
788 	/* if link_share is not specified, use bandwidth */
789 	if (opts->lssc_m2 == 0)
790 		opts->lssc_m2 = pa->bandwidth;
791 
792 	if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) ||
793 	    (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) ||
794 	    (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) {
795 		warnx("m2 is zero for %s", pa->qname);
796 		return (-1);
797 	}
798 
799 	if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
800 	    (opts->lssc_m1 < opts->lssc_m2 && opts->lssc_m1 != 0) ||
801 	    (opts->ulsc_m1 < opts->ulsc_m2 && opts->ulsc_m1 != 0)) {
802 		warnx("m1 must be zero for convex curve: %s", pa->qname);
803 		return (-1);
804 	}
805 
806 	/*
807 	 * admission control:
808 	 * for the real-time service curve, the sum of the service curves
809 	 * should not exceed 80% of the interface bandwidth.  20% is reserved
810 	 * not to over-commit the actual interface bandwidth.
811 	 * for the linkshare service curve, the sum of the child service
812 	 * curve should not exceed the parent service curve.
813 	 * for the upper-limit service curve, the assigned bandwidth should
814 	 * be smaller than the interface bandwidth, and the upper-limit should
815 	 * be larger than the real-time service curve when both are defined.
816 	 */
817 
818 	/* check the real-time service curve.  reserve 20% of interface bw */
819 	if (opts->rtsc_m2 != 0) {
820 		/* add this queue to the sum */
821 		sc.m1 = opts->rtsc_m1;
822 		sc.d = opts->rtsc_d;
823 		sc.m2 = opts->rtsc_m2;
824 		gsc_add_sc(&parent->meta.rtsc, &sc);
825 		/* compare the sum with 80% of the interface */
826 		sc.m1 = 0;
827 		sc.d = 0;
828 		sc.m2 = pa->ifbandwidth / 100 * 80;
829 		if (!is_gsc_under_sc(&parent->meta.rtsc, &sc)) {
830 			warnx("real-time sc exceeds 80%% of the interface "
831 			    "bandwidth (%s)", rate2str((double)sc.m2));
832 			return (-1);
833 		}
834 	}
835 
836 	/* check the linkshare service curve. */
837 	if (opts->lssc_m2 != 0) {
838 		/* add this queue to the child sum */
839 		sc.m1 = opts->lssc_m1;
840 		sc.d = opts->lssc_d;
841 		sc.m2 = opts->lssc_m2;
842 		gsc_add_sc(&parent->meta.lssc, &sc);
843 		/* compare the sum of the children with parent's sc */
844 		sc.m1 = parent->pa.pq_u.hfsc_opts.lssc_m1;
845 		sc.d = parent->pa.pq_u.hfsc_opts.lssc_d;
846 		sc.m2 = parent->pa.pq_u.hfsc_opts.lssc_m2;
847 		if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) {
848 			warnx("linkshare sc exceeds parent's sc");
849 			return (-1);
850 		}
851 	}
852 
853 	/* check the upper-limit service curve. */
854 	if (opts->ulsc_m2 != 0) {
855 		if (opts->ulsc_m1 > pa->ifbandwidth ||
856 		    opts->ulsc_m2 > pa->ifbandwidth) {
857 			warnx("upper-limit larger than interface bandwidth");
858 			return (-1);
859 		}
860 		if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) {
861 			warnx("upper-limit sc smaller than real-time sc");
862 			return (-1);
863 		}
864 	}
865 
866 	return (0);
867 }
868 
869 /*
870  * FAIRQ support functions
871  */
872 static int
873 eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa,
874     struct pfctl_altq *if_ppa, struct pfctl_altq *parent)
875 {
876 	struct fairq_opts	*opts;
877 	struct service_curve	 sc;
878 
879 	opts = &pa->pq_u.fairq_opts;
880 
881 	if (pa->parent == NULL) {
882 		/* root queue */
883 		opts->lssc_m1 = pa->ifbandwidth;
884 		opts->lssc_m2 = pa->ifbandwidth;
885 		opts->lssc_d = 0;
886 		return (0);
887 	}
888 
889 	/* First child initializes the parent's service curve accumulator. */
890 	if (parent->meta.children == 1)
891 		LIST_INIT(&parent->meta.lssc);
892 
893 	if (parent->pa.pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) {
894 		warnx("adding %s would make default queue %s not a leaf",
895 		    pa->qname, pa->parent);
896 		return (-1);
897 	}
898 
899 	if (pa->pq_u.fairq_opts.flags & FARF_DEFAULTCLASS)
900 		if_ppa->meta.default_classes++;
901 
902 	/* if link_share is not specified, use bandwidth */
903 	if (opts->lssc_m2 == 0)
904 		opts->lssc_m2 = pa->bandwidth;
905 
906 	/*
907 	 * admission control:
908 	 * for the real-time service curve, the sum of the service curves
909 	 * should not exceed 80% of the interface bandwidth.  20% is reserved
910 	 * not to over-commit the actual interface bandwidth.
911 	 * for the link-sharing service curve, the sum of the child service
912 	 * curve should not exceed the parent service curve.
913 	 * for the upper-limit service curve, the assigned bandwidth should
914 	 * be smaller than the interface bandwidth, and the upper-limit should
915 	 * be larger than the real-time service curve when both are defined.
916 	 */
917 
918 	/* check the linkshare service curve. */
919 	if (opts->lssc_m2 != 0) {
920 		/* add this queue to the child sum */
921 		sc.m1 = opts->lssc_m1;
922 		sc.d = opts->lssc_d;
923 		sc.m2 = opts->lssc_m2;
924 		gsc_add_sc(&parent->meta.lssc, &sc);
925 		/* compare the sum of the children with parent's sc */
926 		sc.m1 = parent->pa.pq_u.fairq_opts.lssc_m1;
927 		sc.d = parent->pa.pq_u.fairq_opts.lssc_d;
928 		sc.m2 = parent->pa.pq_u.fairq_opts.lssc_m2;
929 		if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) {
930 			warnx("link-sharing sc exceeds parent's sc");
931 			return (-1);
932 		}
933 	}
934 
935 	return (0);
936 }
937 
938 static int
939 check_commit_hfsc(int dev, int opts, struct pfctl_altq *if_ppa)
940 {
941 
942 	/* check if hfsc has one default queue for this interface */
943 	if (if_ppa->meta.default_classes != 1) {
944 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
945 		return (1);
946 	}
947 	return (0);
948 }
949 
950 static int
951 check_commit_fairq(int dev __unused, int opts __unused, struct pfctl_altq *if_ppa)
952 {
953 
954 	/* check if fairq has one default queue for this interface */
955 	if (if_ppa->meta.default_classes != 1) {
956 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
957 		return (1);
958 	}
959 	return (0);
960 }
961 
962 static int
963 print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
964 {
965 	const struct hfsc_opts_v1	*opts;
966 	const struct node_hfsc_sc	*rtsc, *lssc, *ulsc;
967 
968 	opts = &a->pq_u.hfsc_opts;
969 	if (qopts == NULL)
970 		rtsc = lssc = ulsc = NULL;
971 	else {
972 		rtsc = &qopts->data.hfsc_opts.realtime;
973 		lssc = &qopts->data.hfsc_opts.linkshare;
974 		ulsc = &qopts->data.hfsc_opts.upperlimit;
975 	}
976 
977 	if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 ||
978 	    (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
979 	    opts->lssc_d != 0))) {
980 		printf("hfsc(");
981 		if (opts->flags & HFCF_RED)
982 			printf(" red");
983 		if (opts->flags & HFCF_ECN)
984 			printf(" ecn");
985 		if (opts->flags & HFCF_RIO)
986 			printf(" rio");
987 		if (opts->flags & HFCF_CODEL)
988 			printf(" codel");
989 		if (opts->flags & HFCF_CLEARDSCP)
990 			printf(" cleardscp");
991 		if (opts->flags & HFCF_DEFAULTCLASS)
992 			printf(" default");
993 		if (opts->rtsc_m2 != 0)
994 			print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d,
995 			    opts->rtsc_m2, rtsc);
996 		if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
997 		    opts->lssc_d != 0))
998 			print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d,
999 			    opts->lssc_m2, lssc);
1000 		if (opts->ulsc_m2 != 0)
1001 			print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d,
1002 			    opts->ulsc_m2, ulsc);
1003 		printf(" ) ");
1004 
1005 		return (1);
1006 	} else
1007 		return (0);
1008 }
1009 
1010 static int
1011 print_codel_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
1012 {
1013 	const struct codel_opts *opts;
1014 
1015 	opts = &a->pq_u.codel_opts;
1016 	if (opts->target || opts->interval || opts->ecn) {
1017 		printf("codel(");
1018 		if (opts->target)
1019 			printf(" target %d", opts->target);
1020 		if (opts->interval)
1021 			printf(" interval %d", opts->interval);
1022 		if (opts->ecn)
1023 			printf("ecn");
1024 		printf(" ) ");
1025 
1026 		return (1);
1027 	}
1028 
1029 	return (0);
1030 }
1031 
1032 static int
1033 print_fairq_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
1034 {
1035 	const struct fairq_opts		*opts;
1036 	const struct node_fairq_sc	*loc_lssc;
1037 
1038 	opts = &a->pq_u.fairq_opts;
1039 	if (qopts == NULL)
1040 		loc_lssc = NULL;
1041 	else
1042 		loc_lssc = &qopts->data.fairq_opts.linkshare;
1043 
1044 	if (opts->flags ||
1045 	    (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1046 	    opts->lssc_d != 0))) {
1047 		printf("fairq(");
1048 		if (opts->flags & FARF_RED)
1049 			printf(" red");
1050 		if (opts->flags & FARF_ECN)
1051 			printf(" ecn");
1052 		if (opts->flags & FARF_RIO)
1053 			printf(" rio");
1054 		if (opts->flags & FARF_CODEL)
1055 			printf(" codel");
1056 		if (opts->flags & FARF_CLEARDSCP)
1057 			printf(" cleardscp");
1058 		if (opts->flags & FARF_DEFAULTCLASS)
1059 			printf(" default");
1060 		if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1061 		    opts->lssc_d != 0))
1062 			print_fairq_sc("linkshare", opts->lssc_m1, opts->lssc_d,
1063 			    opts->lssc_m2, loc_lssc);
1064 		printf(" ) ");
1065 
1066 		return (1);
1067 	} else
1068 		return (0);
1069 }
1070 
1071 /*
1072  * admission control using generalized service curve
1073  */
1074 
1075 /* add a new service curve to a generalized service curve */
1076 static void
1077 gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc)
1078 {
1079 	if (is_sc_null(sc))
1080 		return;
1081 	if (sc->d != 0)
1082 		gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1);
1083 	gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2);
1084 }
1085 
1086 /*
1087  * check whether all points of a generalized service curve have
1088  * their y-coordinates no larger than a given two-piece linear
1089  * service curve.
1090  */
1091 static int
1092 is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc)
1093 {
1094 	struct segment	*s, *last, *end;
1095 	double		 y;
1096 
1097 	if (is_sc_null(sc)) {
1098 		if (LIST_EMPTY(gsc))
1099 			return (1);
1100 		LIST_FOREACH(s, gsc, _next) {
1101 			if (s->m != 0)
1102 				return (0);
1103 		}
1104 		return (1);
1105 	}
1106 	/*
1107 	 * gsc has a dummy entry at the end with x = INFINITY.
1108 	 * loop through up to this dummy entry.
1109 	 */
1110 	end = gsc_getentry(gsc, INFINITY);
1111 	if (end == NULL)
1112 		return (1);
1113 	last = NULL;
1114 	for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) {
1115 		if (s->y > sc_x2y(sc, s->x))
1116 			return (0);
1117 		last = s;
1118 	}
1119 	/* last now holds the real last segment */
1120 	if (last == NULL)
1121 		return (1);
1122 	if (last->m > sc->m2)
1123 		return (0);
1124 	if (last->x < sc->d && last->m > sc->m1) {
1125 		y = last->y + (sc->d - last->x) * last->m;
1126 		if (y > sc_x2y(sc, sc->d))
1127 			return (0);
1128 	}
1129 	return (1);
1130 }
1131 
1132 static void
1133 gsc_destroy(struct gen_sc *gsc)
1134 {
1135 	struct segment	*s;
1136 
1137 	while ((s = LIST_FIRST(gsc)) != NULL) {
1138 		LIST_REMOVE(s, _next);
1139 		free(s);
1140 	}
1141 }
1142 
1143 /*
1144  * return a segment entry starting at x.
1145  * if gsc has no entry starting at x, a new entry is created at x.
1146  */
1147 static struct segment *
1148 gsc_getentry(struct gen_sc *gsc, double x)
1149 {
1150 	struct segment	*new, *prev, *s;
1151 
1152 	prev = NULL;
1153 	LIST_FOREACH(s, gsc, _next) {
1154 		if (s->x == x)
1155 			return (s);	/* matching entry found */
1156 		else if (s->x < x)
1157 			prev = s;
1158 		else
1159 			break;
1160 	}
1161 
1162 	/* we have to create a new entry */
1163 	if ((new = calloc(1, sizeof(struct segment))) == NULL)
1164 		return (NULL);
1165 
1166 	new->x = x;
1167 	if (x == INFINITY || s == NULL)
1168 		new->d = 0;
1169 	else if (s->x == INFINITY)
1170 		new->d = INFINITY;
1171 	else
1172 		new->d = s->x - x;
1173 	if (prev == NULL) {
1174 		/* insert the new entry at the head of the list */
1175 		new->y = 0;
1176 		new->m = 0;
1177 		LIST_INSERT_HEAD(gsc, new, _next);
1178 	} else {
1179 		/*
1180 		 * the start point intersects with the segment pointed by
1181 		 * prev.  divide prev into 2 segments
1182 		 */
1183 		if (x == INFINITY) {
1184 			prev->d = INFINITY;
1185 			if (prev->m == 0)
1186 				new->y = prev->y;
1187 			else
1188 				new->y = INFINITY;
1189 		} else {
1190 			prev->d = x - prev->x;
1191 			new->y = prev->d * prev->m + prev->y;
1192 		}
1193 		new->m = prev->m;
1194 		LIST_INSERT_AFTER(prev, new, _next);
1195 	}
1196 	return (new);
1197 }
1198 
1199 /* add a segment to a generalized service curve */
1200 static int
1201 gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m)
1202 {
1203 	struct segment	*start, *end, *s;
1204 	double		 x2;
1205 
1206 	if (d == INFINITY)
1207 		x2 = INFINITY;
1208 	else
1209 		x2 = x + d;
1210 	start = gsc_getentry(gsc, x);
1211 	end = gsc_getentry(gsc, x2);
1212 	if (start == NULL || end == NULL)
1213 		return (-1);
1214 
1215 	for (s = start; s != end; s = LIST_NEXT(s, _next)) {
1216 		s->m += m;
1217 		s->y += y + (s->x - x) * m;
1218 	}
1219 
1220 	end = gsc_getentry(gsc, INFINITY);
1221 	for (; s != end; s = LIST_NEXT(s, _next)) {
1222 		s->y += m * d;
1223 	}
1224 
1225 	return (0);
1226 }
1227 
1228 /* get y-projection of a service curve */
1229 static double
1230 sc_x2y(struct service_curve *sc, double x)
1231 {
1232 	double	y;
1233 
1234 	if (x <= (double)sc->d)
1235 		/* y belongs to the 1st segment */
1236 		y = x * (double)sc->m1;
1237 	else
1238 		/* y belongs to the 2nd segment */
1239 		y = (double)sc->d * (double)sc->m1
1240 			+ (x - (double)sc->d) * (double)sc->m2;
1241 	return (y);
1242 }
1243 
1244 /*
1245  * misc utilities
1246  */
1247 #define	R2S_BUFS	8
1248 #define	RATESTR_MAX	16
1249 
1250 char *
1251 rate2str(double rate)
1252 {
1253 	char		*buf;
1254 	static char	 r2sbuf[R2S_BUFS][RATESTR_MAX];  /* ring bufer */
1255 	static int	 idx = 0;
1256 	int		 i;
1257 	static const char unit[] = " KMG";
1258 
1259 	buf = r2sbuf[idx++];
1260 	if (idx == R2S_BUFS)
1261 		idx = 0;
1262 
1263 	for (i = 0; rate >= 1000 && i <= 3; i++)
1264 		rate /= 1000;
1265 
1266 	if ((int)(rate * 100) % 100)
1267 		snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]);
1268 	else
1269 		snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]);
1270 
1271 	return (buf);
1272 }
1273 
1274 #ifdef __FreeBSD__
1275 /*
1276  * XXX
1277  * FreeBSD does not have SIOCGIFDATA.
1278  * To emulate this, DIOCGIFSPEED ioctl added to pf.
1279  */
1280 u_int64_t
1281 getifspeed(int pfdev, char *ifname)
1282 {
1283 	struct pf_ifspeed io;
1284 
1285 	bzero(&io, sizeof io);
1286 	if (strlcpy(io.ifname, ifname, IFNAMSIZ) >=
1287 	    sizeof(io.ifname))
1288 		errx(1, "getifspeed: strlcpy");
1289 	if (ioctl(pfdev, DIOCGIFSPEED, &io) == -1)
1290 		err(1, "DIOCGIFSPEED");
1291 	return (io.baudrate);
1292 }
1293 #else
1294 u_int32_t
1295 getifspeed(char *ifname)
1296 {
1297 	int		s;
1298 	struct ifreq	ifr;
1299 	struct if_data	ifrdat;
1300 
1301 	s = get_query_socket();
1302 	bzero(&ifr, sizeof(ifr));
1303 	if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1304 	    sizeof(ifr.ifr_name))
1305 		errx(1, "getifspeed: strlcpy");
1306 	ifr.ifr_data = (caddr_t)&ifrdat;
1307 	if (ioctl(s, SIOCGIFDATA, (caddr_t)&ifr) == -1)
1308 		err(1, "SIOCGIFDATA");
1309 	return ((u_int32_t)ifrdat.ifi_baudrate);
1310 }
1311 #endif
1312 
1313 u_long
1314 getifmtu(char *ifname)
1315 {
1316 	int		s;
1317 	struct ifreq	ifr;
1318 
1319 	s = get_query_socket();
1320 	bzero(&ifr, sizeof(ifr));
1321 	if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1322 	    sizeof(ifr.ifr_name))
1323 		errx(1, "getifmtu: strlcpy");
1324 	if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1)
1325 #ifdef __FreeBSD__
1326 		ifr.ifr_mtu = 1500;
1327 #else
1328 		err(1, "SIOCGIFMTU");
1329 #endif
1330 	if (ifr.ifr_mtu > 0)
1331 		return (ifr.ifr_mtu);
1332 	else {
1333 		warnx("could not get mtu for %s, assuming 1500", ifname);
1334 		return (1500);
1335 	}
1336 }
1337 
1338 int
1339 eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts,
1340     u_int64_t ref_bw)
1341 {
1342 	int	errors = 0;
1343 
1344 	switch (pa->scheduler) {
1345 	case ALTQT_CBQ:
1346 		pa->pq_u.cbq_opts = opts->data.cbq_opts;
1347 		break;
1348 	case ALTQT_PRIQ:
1349 		pa->pq_u.priq_opts = opts->data.priq_opts;
1350 		break;
1351 	case ALTQT_HFSC:
1352 		pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags;
1353 		if (opts->data.hfsc_opts.linkshare.used) {
1354 			pa->pq_u.hfsc_opts.lssc_m1 =
1355 			    eval_bwspec(&opts->data.hfsc_opts.linkshare.m1,
1356 			    ref_bw);
1357 			pa->pq_u.hfsc_opts.lssc_m2 =
1358 			    eval_bwspec(&opts->data.hfsc_opts.linkshare.m2,
1359 			    ref_bw);
1360 			pa->pq_u.hfsc_opts.lssc_d =
1361 			    opts->data.hfsc_opts.linkshare.d;
1362 		}
1363 		if (opts->data.hfsc_opts.realtime.used) {
1364 			pa->pq_u.hfsc_opts.rtsc_m1 =
1365 			    eval_bwspec(&opts->data.hfsc_opts.realtime.m1,
1366 			    ref_bw);
1367 			pa->pq_u.hfsc_opts.rtsc_m2 =
1368 			    eval_bwspec(&opts->data.hfsc_opts.realtime.m2,
1369 			    ref_bw);
1370 			pa->pq_u.hfsc_opts.rtsc_d =
1371 			    opts->data.hfsc_opts.realtime.d;
1372 		}
1373 		if (opts->data.hfsc_opts.upperlimit.used) {
1374 			pa->pq_u.hfsc_opts.ulsc_m1 =
1375 			    eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1,
1376 			    ref_bw);
1377 			pa->pq_u.hfsc_opts.ulsc_m2 =
1378 			    eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2,
1379 			    ref_bw);
1380 			pa->pq_u.hfsc_opts.ulsc_d =
1381 			    opts->data.hfsc_opts.upperlimit.d;
1382 		}
1383 		break;
1384 	case ALTQT_FAIRQ:
1385 		pa->pq_u.fairq_opts.flags = opts->data.fairq_opts.flags;
1386 		pa->pq_u.fairq_opts.nbuckets = opts->data.fairq_opts.nbuckets;
1387 		pa->pq_u.fairq_opts.hogs_m1 =
1388 			eval_bwspec(&opts->data.fairq_opts.hogs_bw, ref_bw);
1389 
1390 		if (opts->data.fairq_opts.linkshare.used) {
1391 			pa->pq_u.fairq_opts.lssc_m1 =
1392 			    eval_bwspec(&opts->data.fairq_opts.linkshare.m1,
1393 			    ref_bw);
1394 			pa->pq_u.fairq_opts.lssc_m2 =
1395 			    eval_bwspec(&opts->data.fairq_opts.linkshare.m2,
1396 			    ref_bw);
1397 			pa->pq_u.fairq_opts.lssc_d =
1398 			    opts->data.fairq_opts.linkshare.d;
1399 		}
1400 		break;
1401 	case ALTQT_CODEL:
1402 		pa->pq_u.codel_opts.target = opts->data.codel_opts.target;
1403 		pa->pq_u.codel_opts.interval = opts->data.codel_opts.interval;
1404 		pa->pq_u.codel_opts.ecn = opts->data.codel_opts.ecn;
1405 		break;
1406 	default:
1407 		warnx("eval_queue_opts: unknown scheduler type %u",
1408 		    opts->qtype);
1409 		errors++;
1410 		break;
1411 	}
1412 
1413 	return (errors);
1414 }
1415 
1416 /*
1417  * If absolute bandwidth if set, return the lesser of that value and the
1418  * reference bandwidth.  Limiting to the reference bandwidth allows simple
1419  * limiting of configured bandwidth parameters for schedulers that are
1420  * 32-bit limited, as the root/interface bandwidth (top-level reference
1421  * bandwidth) will be properly limited in that case.
1422  *
1423  * Otherwise, if the absolute bandwidth is not set, return given percentage
1424  * of reference bandwidth.
1425  */
1426 u_int64_t
1427 eval_bwspec(struct node_queue_bw *bw, u_int64_t ref_bw)
1428 {
1429 	if (bw->bw_absolute > 0)
1430 		return (MIN(bw->bw_absolute, ref_bw));
1431 
1432 	if (bw->bw_percent > 0)
1433 		return (ref_bw / 100 * bw->bw_percent);
1434 
1435 	return (0);
1436 }
1437 
1438 void
1439 print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2,
1440     const struct node_hfsc_sc *sc)
1441 {
1442 	printf(" %s", scname);
1443 
1444 	if (d != 0) {
1445 		printf("(");
1446 		if (sc != NULL && sc->m1.bw_percent > 0)
1447 			printf("%u%%", sc->m1.bw_percent);
1448 		else
1449 			printf("%s", rate2str((double)m1));
1450 		printf(" %u", d);
1451 	}
1452 
1453 	if (sc != NULL && sc->m2.bw_percent > 0)
1454 		printf(" %u%%", sc->m2.bw_percent);
1455 	else
1456 		printf(" %s", rate2str((double)m2));
1457 
1458 	if (d != 0)
1459 		printf(")");
1460 }
1461 
1462 void
1463 print_fairq_sc(const char *scname, u_int m1, u_int d, u_int m2,
1464     const struct node_fairq_sc *sc)
1465 {
1466 	printf(" %s", scname);
1467 
1468 	if (d != 0) {
1469 		printf("(");
1470 		if (sc != NULL && sc->m1.bw_percent > 0)
1471 			printf("%u%%", sc->m1.bw_percent);
1472 		else
1473 			printf("%s", rate2str((double)m1));
1474 		printf(" %u", d);
1475 	}
1476 
1477 	if (sc != NULL && sc->m2.bw_percent > 0)
1478 		printf(" %u%%", sc->m2.bw_percent);
1479 	else
1480 		printf(" %s", rate2str((double)m2));
1481 
1482 	if (d != 0)
1483 		printf(")");
1484 }
1485