xref: /freebsd/sbin/pfctl/pfctl_altq.c (revision ebacd8013fe5f7fdf9f6a5b286f6680dd2891036)
1 /*	$OpenBSD: pfctl_altq.c,v 1.93 2007/10/15 02:16:35 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2002
5  *	Sony Computer Science Laboratories Inc.
6  * Copyright (c) 2002, 2003 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/cdefs.h>
22 __FBSDID("$FreeBSD$");
23 
24 #define PFIOC_USE_LATEST
25 #define _WANT_FREEBSD_BITSET
26 
27 #include <sys/types.h>
28 #include <sys/bitset.h>
29 #include <sys/ioctl.h>
30 #include <sys/socket.h>
31 
32 #include <net/if.h>
33 #include <netinet/in.h>
34 #include <net/pfvar.h>
35 
36 #include <err.h>
37 #include <errno.h>
38 #include <inttypes.h>
39 #include <limits.h>
40 #include <math.h>
41 #include <search.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <string.h>
45 #include <unistd.h>
46 
47 #include <net/altq/altq.h>
48 #include <net/altq/altq_cbq.h>
49 #include <net/altq/altq_codel.h>
50 #include <net/altq/altq_priq.h>
51 #include <net/altq/altq_hfsc.h>
52 #include <net/altq/altq_fairq.h>
53 
54 #include "pfctl_parser.h"
55 #include "pfctl.h"
56 
57 #define is_sc_null(sc)	(((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0))
58 
59 static STAILQ_HEAD(interfaces, pfctl_altq) interfaces = STAILQ_HEAD_INITIALIZER(interfaces);
60 static struct hsearch_data queue_map;
61 static struct hsearch_data if_map;
62 static struct hsearch_data qid_map;
63 
64 static struct pfctl_altq *pfaltq_lookup(char *ifname);
65 static struct pfctl_altq *qname_to_pfaltq(const char *, const char *);
66 static u_int32_t	 qname_to_qid(char *);
67 
68 static int	eval_pfqueue_cbq(struct pfctl *, struct pf_altq *,
69 		    struct pfctl_altq *);
70 static int	cbq_compute_idletime(struct pfctl *, struct pf_altq *);
71 static int	check_commit_cbq(int, int, struct pfctl_altq *);
72 static int	print_cbq_opts(const struct pf_altq *);
73 
74 static int	print_codel_opts(const struct pf_altq *,
75 		    const struct node_queue_opt *);
76 
77 static int	eval_pfqueue_priq(struct pfctl *, struct pf_altq *,
78 		    struct pfctl_altq *);
79 static int	check_commit_priq(int, int, struct pfctl_altq *);
80 static int	print_priq_opts(const struct pf_altq *);
81 
82 static int	eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *,
83 		    struct pfctl_altq *, struct pfctl_altq *);
84 static int	check_commit_hfsc(int, int, struct pfctl_altq *);
85 static int	print_hfsc_opts(const struct pf_altq *,
86 		    const struct node_queue_opt *);
87 
88 static int	eval_pfqueue_fairq(struct pfctl *, struct pf_altq *,
89 		    struct pfctl_altq *, struct pfctl_altq *);
90 static int	print_fairq_opts(const struct pf_altq *,
91 		    const struct node_queue_opt *);
92 static int	check_commit_fairq(int, int, struct pfctl_altq *);
93 
94 static void		 gsc_add_sc(struct gen_sc *, struct service_curve *);
95 static int		 is_gsc_under_sc(struct gen_sc *,
96 			     struct service_curve *);
97 static struct segment	*gsc_getentry(struct gen_sc *, double);
98 static int		 gsc_add_seg(struct gen_sc *, double, double, double,
99 			     double);
100 static double		 sc_x2y(struct service_curve *, double);
101 
102 u_int32_t	 getifspeed(char *);
103 u_long		 getifmtu(char *);
104 int		 eval_queue_opts(struct pf_altq *, struct node_queue_opt *,
105 		     u_int64_t);
106 u_int64_t	 eval_bwspec(struct node_queue_bw *, u_int64_t);
107 void		 print_hfsc_sc(const char *, u_int, u_int, u_int,
108 		     const struct node_hfsc_sc *);
109 void		 print_fairq_sc(const char *, u_int, u_int, u_int,
110 		     const struct node_fairq_sc *);
111 
112 static __attribute__((constructor)) void
113 pfctl_altq_init(void)
114 {
115 	/*
116 	 * As hdestroy() will never be called on these tables, it will be
117 	 * safe to use references into the stored data as keys.
118 	 */
119 	if (hcreate_r(0, &queue_map) == 0)
120 		err(1, "Failed to create altq queue map");
121 	if (hcreate_r(0, &if_map) == 0)
122 		err(1, "Failed to create altq interface map");
123 	if (hcreate_r(0, &qid_map) == 0)
124 		err(1, "Failed to create altq queue id map");
125 }
126 
127 void
128 pfaltq_store(struct pf_altq *a)
129 {
130 	struct pfctl_altq	*altq;
131 	ENTRY 			 item;
132 	ENTRY			*ret_item;
133 	size_t			 key_size;
134 
135 	if ((altq = malloc(sizeof(*altq))) == NULL)
136 		err(1, "queue malloc");
137 	memcpy(&altq->pa, a, sizeof(struct pf_altq));
138 	memset(&altq->meta, 0, sizeof(altq->meta));
139 
140 	if (a->qname[0] == 0) {
141 		item.key = altq->pa.ifname;
142 		item.data = altq;
143 		if (hsearch_r(item, ENTER, &ret_item, &if_map) == 0)
144 			err(1, "interface map insert");
145 		STAILQ_INSERT_TAIL(&interfaces, altq, meta.link);
146 	} else {
147 		key_size = sizeof(a->ifname) + sizeof(a->qname);
148 		if ((item.key = malloc(key_size)) == NULL)
149 			err(1, "queue map key malloc");
150 		snprintf(item.key, key_size, "%s:%s", a->ifname, a->qname);
151 		item.data = altq;
152 		if (hsearch_r(item, ENTER, &ret_item, &queue_map) == 0)
153 			err(1, "queue map insert");
154 
155 		item.key = altq->pa.qname;
156 		item.data = &altq->pa.qid;
157 		if (hsearch_r(item, ENTER, &ret_item, &qid_map) == 0)
158 			err(1, "qid map insert");
159 	}
160 }
161 
162 static struct pfctl_altq *
163 pfaltq_lookup(char *ifname)
164 {
165 	ENTRY	 item;
166 	ENTRY	*ret_item;
167 
168 	item.key = ifname;
169 	if (hsearch_r(item, FIND, &ret_item, &if_map) == 0)
170 		return (NULL);
171 
172 	return (ret_item->data);
173 }
174 
175 static struct pfctl_altq *
176 qname_to_pfaltq(const char *qname, const char *ifname)
177 {
178 	ENTRY	 item;
179 	ENTRY	*ret_item;
180 	char	 key[IFNAMSIZ + PF_QNAME_SIZE];
181 
182 	item.key = key;
183 	snprintf(item.key, sizeof(key), "%s:%s", ifname, qname);
184 	if (hsearch_r(item, FIND, &ret_item, &queue_map) == 0)
185 		return (NULL);
186 
187 	return (ret_item->data);
188 }
189 
190 static u_int32_t
191 qname_to_qid(char *qname)
192 {
193 	ENTRY	 item;
194 	ENTRY	*ret_item;
195 	uint32_t qid;
196 
197 	/*
198 	 * We guarantee that same named queues on different interfaces
199 	 * have the same qid.
200 	 */
201 	item.key = qname;
202 	if (hsearch_r(item, FIND, &ret_item, &qid_map) == 0)
203 		return (0);
204 
205 	qid = *(uint32_t *)ret_item->data;
206 	return (qid);
207 }
208 
209 void
210 print_altq(const struct pf_altq *a, unsigned int level,
211     struct node_queue_bw *bw, struct node_queue_opt *qopts)
212 {
213 	if (a->qname[0] != 0) {
214 		print_queue(a, level, bw, 1, qopts);
215 		return;
216 	}
217 
218 #ifdef __FreeBSD__
219 	if (a->local_flags & PFALTQ_FLAG_IF_REMOVED)
220 		printf("INACTIVE ");
221 #endif
222 
223 	printf("altq on %s ", a->ifname);
224 
225 	switch (a->scheduler) {
226 	case ALTQT_CBQ:
227 		if (!print_cbq_opts(a))
228 			printf("cbq ");
229 		break;
230 	case ALTQT_PRIQ:
231 		if (!print_priq_opts(a))
232 			printf("priq ");
233 		break;
234 	case ALTQT_HFSC:
235 		if (!print_hfsc_opts(a, qopts))
236 			printf("hfsc ");
237 		break;
238 	case ALTQT_FAIRQ:
239 		if (!print_fairq_opts(a, qopts))
240 			printf("fairq ");
241 		break;
242 	case ALTQT_CODEL:
243 		if (!print_codel_opts(a, qopts))
244 			printf("codel ");
245 		break;
246 	}
247 
248 	if (bw != NULL && bw->bw_percent > 0) {
249 		if (bw->bw_percent < 100)
250 			printf("bandwidth %u%% ", bw->bw_percent);
251 	} else
252 		printf("bandwidth %s ", rate2str((double)a->ifbandwidth));
253 
254 	if (a->qlimit != DEFAULT_QLIMIT)
255 		printf("qlimit %u ", a->qlimit);
256 	printf("tbrsize %u ", a->tbrsize);
257 }
258 
259 void
260 print_queue(const struct pf_altq *a, unsigned int level,
261     struct node_queue_bw *bw, int print_interface,
262     struct node_queue_opt *qopts)
263 {
264 	unsigned int	i;
265 
266 #ifdef __FreeBSD__
267 	if (a->local_flags & PFALTQ_FLAG_IF_REMOVED)
268 		printf("INACTIVE ");
269 #endif
270 	printf("queue ");
271 	for (i = 0; i < level; ++i)
272 		printf(" ");
273 	printf("%s ", a->qname);
274 	if (print_interface)
275 		printf("on %s ", a->ifname);
276 	if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC ||
277 		a->scheduler == ALTQT_FAIRQ) {
278 		if (bw != NULL && bw->bw_percent > 0) {
279 			if (bw->bw_percent < 100)
280 				printf("bandwidth %u%% ", bw->bw_percent);
281 		} else
282 			printf("bandwidth %s ", rate2str((double)a->bandwidth));
283 	}
284 	if (a->priority != DEFAULT_PRIORITY)
285 		printf("priority %u ", a->priority);
286 	if (a->qlimit != DEFAULT_QLIMIT)
287 		printf("qlimit %u ", a->qlimit);
288 	switch (a->scheduler) {
289 	case ALTQT_CBQ:
290 		print_cbq_opts(a);
291 		break;
292 	case ALTQT_PRIQ:
293 		print_priq_opts(a);
294 		break;
295 	case ALTQT_HFSC:
296 		print_hfsc_opts(a, qopts);
297 		break;
298 	case ALTQT_FAIRQ:
299 		print_fairq_opts(a, qopts);
300 		break;
301 	}
302 }
303 
304 /*
305  * eval_pfaltq computes the discipline parameters.
306  */
307 int
308 eval_pfaltq(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
309     struct node_queue_opt *opts)
310 {
311 	u_int64_t	rate;
312 	u_int		size, errors = 0;
313 
314 	if (bw->bw_absolute > 0)
315 		pa->ifbandwidth = bw->bw_absolute;
316 	else
317 		if ((rate = getifspeed(pa->ifname)) == 0) {
318 			fprintf(stderr, "interface %s does not know its bandwidth, "
319 			    "please specify an absolute bandwidth\n",
320 			    pa->ifname);
321 			errors++;
322 		} else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0)
323 			pa->ifbandwidth = rate;
324 
325 	/*
326 	 * Limit bandwidth to UINT_MAX for schedulers that aren't 64-bit ready.
327 	 */
328 	if ((pa->scheduler != ALTQT_HFSC) && (pa->ifbandwidth > UINT_MAX)) {
329 		pa->ifbandwidth = UINT_MAX;
330 		warnx("interface %s bandwidth limited to %" PRIu64 " bps "
331 		    "because selected scheduler is 32-bit limited\n", pa->ifname,
332 		    pa->ifbandwidth);
333 	}
334 	errors += eval_queue_opts(pa, opts, pa->ifbandwidth);
335 
336 	/* if tbrsize is not specified, use heuristics */
337 	if (pa->tbrsize == 0) {
338 		rate = pa->ifbandwidth;
339 		if (rate <= 1 * 1000 * 1000)
340 			size = 1;
341 		else if (rate <= 10 * 1000 * 1000)
342 			size = 4;
343 		else if (rate <= 200 * 1000 * 1000)
344 			size = 8;
345 		else if (rate <= 2500 * 1000 * 1000ULL)
346 			size = 24;
347 		else
348 			size = 128;
349 		size = size * getifmtu(pa->ifname);
350 		pa->tbrsize = size;
351 	}
352 	return (errors);
353 }
354 
355 /*
356  * check_commit_altq does consistency check for each interface
357  */
358 int
359 check_commit_altq(int dev, int opts)
360 {
361 	struct pfctl_altq	*if_ppa;
362 	int			 error = 0;
363 
364 	/* call the discipline check for each interface. */
365 	STAILQ_FOREACH(if_ppa, &interfaces, meta.link) {
366 		switch (if_ppa->pa.scheduler) {
367 		case ALTQT_CBQ:
368 			error = check_commit_cbq(dev, opts, if_ppa);
369 			break;
370 		case ALTQT_PRIQ:
371 			error = check_commit_priq(dev, opts, if_ppa);
372 			break;
373 		case ALTQT_HFSC:
374 			error = check_commit_hfsc(dev, opts, if_ppa);
375 			break;
376 		case ALTQT_FAIRQ:
377 			error = check_commit_fairq(dev, opts, if_ppa);
378 			break;
379 		default:
380 			break;
381 		}
382 	}
383 	return (error);
384 }
385 
386 /*
387  * eval_pfqueue computes the queue parameters.
388  */
389 int
390 eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
391     struct node_queue_opt *opts)
392 {
393 	/* should be merged with expand_queue */
394 	struct pfctl_altq	*if_ppa, *parent;
395 	int		 	 error = 0;
396 
397 	/* find the corresponding interface and copy fields used by queues */
398 	if ((if_ppa = pfaltq_lookup(pa->ifname)) == NULL) {
399 		fprintf(stderr, "altq not defined on %s\n", pa->ifname);
400 		return (1);
401 	}
402 	pa->scheduler = if_ppa->pa.scheduler;
403 	pa->ifbandwidth = if_ppa->pa.ifbandwidth;
404 
405 	if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) {
406 		fprintf(stderr, "queue %s already exists on interface %s\n",
407 		    pa->qname, pa->ifname);
408 		return (1);
409 	}
410 	pa->qid = qname_to_qid(pa->qname);
411 
412 	parent = NULL;
413 	if (pa->parent[0] != 0) {
414 		parent = qname_to_pfaltq(pa->parent, pa->ifname);
415 		if (parent == NULL) {
416 			fprintf(stderr, "parent %s not found for %s\n",
417 			    pa->parent, pa->qname);
418 			return (1);
419 		}
420 		pa->parent_qid = parent->pa.qid;
421 	}
422 	if (pa->qlimit == 0)
423 		pa->qlimit = DEFAULT_QLIMIT;
424 
425 	if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC ||
426 		pa->scheduler == ALTQT_FAIRQ) {
427 		pa->bandwidth = eval_bwspec(bw,
428 		    parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth);
429 
430 		if (pa->bandwidth > pa->ifbandwidth) {
431 			fprintf(stderr, "bandwidth for %s higher than "
432 			    "interface\n", pa->qname);
433 			return (1);
434 		}
435 		/*
436 		 * If not HFSC, then check that the sum of the child
437 		 * bandwidths is less than the parent's bandwidth.  For
438 		 * HFSC, the equivalent concept is to check that the sum of
439 		 * the child linkshare service curves are under the parent's
440 		 * linkshare service curve, and that check is performed by
441 		 * eval_pfqueue_hfsc().
442 		 */
443 		if ((parent != NULL) && (pa->scheduler != ALTQT_HFSC)) {
444 			if (pa->bandwidth > parent->pa.bandwidth) {
445 				warnx("bandwidth for %s higher than parent",
446 				    pa->qname);
447 				return (1);
448 			}
449 			parent->meta.bwsum += pa->bandwidth;
450 			if (parent->meta.bwsum > parent->pa.bandwidth) {
451 				warnx("the sum of the child bandwidth (%" PRIu64
452 				    ") higher than parent \"%s\" (%" PRIu64 ")",
453 				    parent->meta.bwsum, parent->pa.qname,
454 				    parent->pa.bandwidth);
455 			}
456 		}
457 	}
458 
459 	if (eval_queue_opts(pa, opts,
460 		parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth))
461 		return (1);
462 
463 	if (parent != NULL)
464 		parent->meta.children++;
465 
466 	switch (pa->scheduler) {
467 	case ALTQT_CBQ:
468 		error = eval_pfqueue_cbq(pf, pa, if_ppa);
469 		break;
470 	case ALTQT_PRIQ:
471 		error = eval_pfqueue_priq(pf, pa, if_ppa);
472 		break;
473 	case ALTQT_HFSC:
474 		error = eval_pfqueue_hfsc(pf, pa, if_ppa, parent);
475 		break;
476 	case ALTQT_FAIRQ:
477 		error = eval_pfqueue_fairq(pf, pa, if_ppa, parent);
478 		break;
479 	default:
480 		break;
481 	}
482 	return (error);
483 }
484 
485 /*
486  * CBQ support functions
487  */
488 #define	RM_FILTER_GAIN	5	/* log2 of gain, e.g., 5 => 31/32 */
489 #define	RM_NS_PER_SEC	(1000000000)
490 
491 static int
492 eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa)
493 {
494 	struct cbq_opts	*opts;
495 	u_int		 ifmtu;
496 
497 	if (pa->priority >= CBQ_MAXPRI) {
498 		warnx("priority out of range: max %d", CBQ_MAXPRI - 1);
499 		return (-1);
500 	}
501 
502 	ifmtu = getifmtu(pa->ifname);
503 	opts = &pa->pq_u.cbq_opts;
504 
505 	if (opts->pktsize == 0) {	/* use default */
506 		opts->pktsize = ifmtu;
507 		if (opts->pktsize > MCLBYTES)	/* do what TCP does */
508 			opts->pktsize &= ~MCLBYTES;
509 	} else if (opts->pktsize > ifmtu)
510 		opts->pktsize = ifmtu;
511 	if (opts->maxpktsize == 0)	/* use default */
512 		opts->maxpktsize = ifmtu;
513 	else if (opts->maxpktsize > ifmtu)
514 		opts->pktsize = ifmtu;
515 
516 	if (opts->pktsize > opts->maxpktsize)
517 		opts->pktsize = opts->maxpktsize;
518 
519 	if (pa->parent[0] == 0)
520 		opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR);
521 
522 	if (pa->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS)
523 		if_ppa->meta.root_classes++;
524 	if (pa->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS)
525 		if_ppa->meta.default_classes++;
526 
527 	cbq_compute_idletime(pf, pa);
528 	return (0);
529 }
530 
531 /*
532  * compute ns_per_byte, maxidle, minidle, and offtime
533  */
534 static int
535 cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa)
536 {
537 	struct cbq_opts	*opts;
538 	double		 maxidle_s, maxidle, minidle;
539 	double		 offtime, nsPerByte, ifnsPerByte, ptime, cptime;
540 	double		 z, g, f, gton, gtom;
541 	u_int		 minburst, maxburst;
542 
543 	opts = &pa->pq_u.cbq_opts;
544 	ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8;
545 	minburst = opts->minburst;
546 	maxburst = opts->maxburst;
547 
548 	if (pa->bandwidth == 0)
549 		f = 0.0001;	/* small enough? */
550 	else
551 		f = ((double) pa->bandwidth / (double) pa->ifbandwidth);
552 
553 	nsPerByte = ifnsPerByte / f;
554 	ptime = (double)opts->pktsize * ifnsPerByte;
555 	cptime = ptime * (1.0 - f) / f;
556 
557 	if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) {
558 		/*
559 		 * this causes integer overflow in kernel!
560 		 * (bandwidth < 6Kbps when max_pkt_size=1500)
561 		 */
562 		if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0) {
563 			warnx("queue bandwidth must be larger than %s",
564 			    rate2str(ifnsPerByte * (double)opts->maxpktsize /
565 			    (double)INT_MAX * (double)pa->ifbandwidth));
566 			fprintf(stderr, "cbq: queue %s is too slow!\n",
567 			    pa->qname);
568 		}
569 		nsPerByte = (double)(INT_MAX / opts->maxpktsize);
570 	}
571 
572 	if (maxburst == 0) {  /* use default */
573 		if (cptime > 10.0 * 1000000)
574 			maxburst = 4;
575 		else
576 			maxburst = 16;
577 	}
578 	if (minburst == 0)  /* use default */
579 		minburst = 2;
580 	if (minburst > maxburst)
581 		minburst = maxburst;
582 
583 	z = (double)(1 << RM_FILTER_GAIN);
584 	g = (1.0 - 1.0 / z);
585 	gton = pow(g, (double)maxburst);
586 	gtom = pow(g, (double)(minburst-1));
587 	maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton));
588 	maxidle_s = (1.0 - g);
589 	if (maxidle > maxidle_s)
590 		maxidle = ptime * maxidle;
591 	else
592 		maxidle = ptime * maxidle_s;
593 	offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom);
594 	minidle = -((double)opts->maxpktsize * (double)nsPerByte);
595 
596 	/* scale parameters */
597 	maxidle = ((maxidle * 8.0) / nsPerByte) *
598 	    pow(2.0, (double)RM_FILTER_GAIN);
599 	offtime = (offtime * 8.0) / nsPerByte *
600 	    pow(2.0, (double)RM_FILTER_GAIN);
601 	minidle = ((minidle * 8.0) / nsPerByte) *
602 	    pow(2.0, (double)RM_FILTER_GAIN);
603 
604 	maxidle = maxidle / 1000.0;
605 	offtime = offtime / 1000.0;
606 	minidle = minidle / 1000.0;
607 
608 	opts->minburst = minburst;
609 	opts->maxburst = maxburst;
610 	opts->ns_per_byte = (u_int)nsPerByte;
611 	opts->maxidle = (u_int)fabs(maxidle);
612 	opts->minidle = (int)minidle;
613 	opts->offtime = (u_int)fabs(offtime);
614 
615 	return (0);
616 }
617 
618 static int
619 check_commit_cbq(int dev, int opts, struct pfctl_altq *if_ppa)
620 {
621 	int	error = 0;
622 
623 	/*
624 	 * check if cbq has one root queue and one default queue
625 	 * for this interface
626 	 */
627 	if (if_ppa->meta.root_classes != 1) {
628 		warnx("should have one root queue on %s", if_ppa->pa.ifname);
629 		error++;
630 	}
631 	if (if_ppa->meta.default_classes != 1) {
632 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
633 		error++;
634 	}
635 	return (error);
636 }
637 
638 static int
639 print_cbq_opts(const struct pf_altq *a)
640 {
641 	const struct cbq_opts	*opts;
642 
643 	opts = &a->pq_u.cbq_opts;
644 	if (opts->flags) {
645 		printf("cbq(");
646 		if (opts->flags & CBQCLF_RED)
647 			printf(" red");
648 		if (opts->flags & CBQCLF_ECN)
649 			printf(" ecn");
650 		if (opts->flags & CBQCLF_RIO)
651 			printf(" rio");
652 		if (opts->flags & CBQCLF_CODEL)
653 			printf(" codel");
654 		if (opts->flags & CBQCLF_CLEARDSCP)
655 			printf(" cleardscp");
656 		if (opts->flags & CBQCLF_FLOWVALVE)
657 			printf(" flowvalve");
658 		if (opts->flags & CBQCLF_BORROW)
659 			printf(" borrow");
660 		if (opts->flags & CBQCLF_WRR)
661 			printf(" wrr");
662 		if (opts->flags & CBQCLF_EFFICIENT)
663 			printf(" efficient");
664 		if (opts->flags & CBQCLF_ROOTCLASS)
665 			printf(" root");
666 		if (opts->flags & CBQCLF_DEFCLASS)
667 			printf(" default");
668 		printf(" ) ");
669 
670 		return (1);
671 	} else
672 		return (0);
673 }
674 
675 /*
676  * PRIQ support functions
677  */
678 static int
679 eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa)
680 {
681 
682 	if (pa->priority >= PRIQ_MAXPRI) {
683 		warnx("priority out of range: max %d", PRIQ_MAXPRI - 1);
684 		return (-1);
685 	}
686 	if (BIT_ISSET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris)) {
687 		warnx("%s does not have a unique priority on interface %s",
688 		    pa->qname, pa->ifname);
689 		return (-1);
690 	} else
691 		BIT_SET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris);
692 
693 	if (pa->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS)
694 		if_ppa->meta.default_classes++;
695 	return (0);
696 }
697 
698 static int
699 check_commit_priq(int dev, int opts, struct pfctl_altq *if_ppa)
700 {
701 
702 	/*
703 	 * check if priq has one default class for this interface
704 	 */
705 	if (if_ppa->meta.default_classes != 1) {
706 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
707 		return (1);
708 	}
709 	return (0);
710 }
711 
712 static int
713 print_priq_opts(const struct pf_altq *a)
714 {
715 	const struct priq_opts	*opts;
716 
717 	opts = &a->pq_u.priq_opts;
718 
719 	if (opts->flags) {
720 		printf("priq(");
721 		if (opts->flags & PRCF_RED)
722 			printf(" red");
723 		if (opts->flags & PRCF_ECN)
724 			printf(" ecn");
725 		if (opts->flags & PRCF_RIO)
726 			printf(" rio");
727 		if (opts->flags & PRCF_CODEL)
728 			printf(" codel");
729 		if (opts->flags & PRCF_CLEARDSCP)
730 			printf(" cleardscp");
731 		if (opts->flags & PRCF_DEFAULTCLASS)
732 			printf(" default");
733 		printf(" ) ");
734 
735 		return (1);
736 	} else
737 		return (0);
738 }
739 
740 /*
741  * HFSC support functions
742  */
743 static int
744 eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa,
745     struct pfctl_altq *parent)
746 {
747 	struct hfsc_opts_v1	*opts;
748 	struct service_curve	 sc;
749 
750 	opts = &pa->pq_u.hfsc_opts;
751 
752 	if (parent == NULL) {
753 		/* root queue */
754 		opts->lssc_m1 = pa->ifbandwidth;
755 		opts->lssc_m2 = pa->ifbandwidth;
756 		opts->lssc_d = 0;
757 		return (0);
758 	}
759 
760 	/* First child initializes the parent's service curve accumulators. */
761 	if (parent->meta.children == 1) {
762 		LIST_INIT(&parent->meta.rtsc);
763 		LIST_INIT(&parent->meta.lssc);
764 	}
765 
766 	if (parent->pa.pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) {
767 		warnx("adding %s would make default queue %s not a leaf",
768 		    pa->qname, pa->parent);
769 		return (-1);
770 	}
771 
772 	if (pa->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS)
773 		if_ppa->meta.default_classes++;
774 
775 	/* if link_share is not specified, use bandwidth */
776 	if (opts->lssc_m2 == 0)
777 		opts->lssc_m2 = pa->bandwidth;
778 
779 	if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) ||
780 	    (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) ||
781 	    (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) {
782 		warnx("m2 is zero for %s", pa->qname);
783 		return (-1);
784 	}
785 
786 	if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
787 	    (opts->lssc_m1 < opts->lssc_m2 && opts->lssc_m1 != 0) ||
788 	    (opts->ulsc_m1 < opts->ulsc_m2 && opts->ulsc_m1 != 0)) {
789 		warnx("m1 must be zero for convex curve: %s", pa->qname);
790 		return (-1);
791 	}
792 
793 	/*
794 	 * admission control:
795 	 * for the real-time service curve, the sum of the service curves
796 	 * should not exceed 80% of the interface bandwidth.  20% is reserved
797 	 * not to over-commit the actual interface bandwidth.
798 	 * for the linkshare service curve, the sum of the child service
799 	 * curve should not exceed the parent service curve.
800 	 * for the upper-limit service curve, the assigned bandwidth should
801 	 * be smaller than the interface bandwidth, and the upper-limit should
802 	 * be larger than the real-time service curve when both are defined.
803 	 */
804 
805 	/* check the real-time service curve.  reserve 20% of interface bw */
806 	if (opts->rtsc_m2 != 0) {
807 		/* add this queue to the sum */
808 		sc.m1 = opts->rtsc_m1;
809 		sc.d = opts->rtsc_d;
810 		sc.m2 = opts->rtsc_m2;
811 		gsc_add_sc(&parent->meta.rtsc, &sc);
812 		/* compare the sum with 80% of the interface */
813 		sc.m1 = 0;
814 		sc.d = 0;
815 		sc.m2 = pa->ifbandwidth / 100 * 80;
816 		if (!is_gsc_under_sc(&parent->meta.rtsc, &sc)) {
817 			warnx("real-time sc exceeds 80%% of the interface "
818 			    "bandwidth (%s)", rate2str((double)sc.m2));
819 			return (-1);
820 		}
821 	}
822 
823 	/* check the linkshare service curve. */
824 	if (opts->lssc_m2 != 0) {
825 		/* add this queue to the child sum */
826 		sc.m1 = opts->lssc_m1;
827 		sc.d = opts->lssc_d;
828 		sc.m2 = opts->lssc_m2;
829 		gsc_add_sc(&parent->meta.lssc, &sc);
830 		/* compare the sum of the children with parent's sc */
831 		sc.m1 = parent->pa.pq_u.hfsc_opts.lssc_m1;
832 		sc.d = parent->pa.pq_u.hfsc_opts.lssc_d;
833 		sc.m2 = parent->pa.pq_u.hfsc_opts.lssc_m2;
834 		if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) {
835 			warnx("linkshare sc exceeds parent's sc");
836 			return (-1);
837 		}
838 	}
839 
840 	/* check the upper-limit service curve. */
841 	if (opts->ulsc_m2 != 0) {
842 		if (opts->ulsc_m1 > pa->ifbandwidth ||
843 		    opts->ulsc_m2 > pa->ifbandwidth) {
844 			warnx("upper-limit larger than interface bandwidth");
845 			return (-1);
846 		}
847 		if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) {
848 			warnx("upper-limit sc smaller than real-time sc");
849 			return (-1);
850 		}
851 	}
852 
853 	return (0);
854 }
855 
856 /*
857  * FAIRQ support functions
858  */
859 static int
860 eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa,
861     struct pfctl_altq *if_ppa, struct pfctl_altq *parent)
862 {
863 	struct fairq_opts	*opts;
864 	struct service_curve	 sc;
865 
866 	opts = &pa->pq_u.fairq_opts;
867 
868 	if (parent == NULL) {
869 		/* root queue */
870 		opts->lssc_m1 = pa->ifbandwidth;
871 		opts->lssc_m2 = pa->ifbandwidth;
872 		opts->lssc_d = 0;
873 		return (0);
874 	}
875 
876 	/* First child initializes the parent's service curve accumulator. */
877 	if (parent->meta.children == 1)
878 		LIST_INIT(&parent->meta.lssc);
879 
880 	if (parent->pa.pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) {
881 		warnx("adding %s would make default queue %s not a leaf",
882 		    pa->qname, pa->parent);
883 		return (-1);
884 	}
885 
886 	if (pa->pq_u.fairq_opts.flags & FARF_DEFAULTCLASS)
887 		if_ppa->meta.default_classes++;
888 
889 	/* if link_share is not specified, use bandwidth */
890 	if (opts->lssc_m2 == 0)
891 		opts->lssc_m2 = pa->bandwidth;
892 
893 	/*
894 	 * admission control:
895 	 * for the real-time service curve, the sum of the service curves
896 	 * should not exceed 80% of the interface bandwidth.  20% is reserved
897 	 * not to over-commit the actual interface bandwidth.
898 	 * for the link-sharing service curve, the sum of the child service
899 	 * curve should not exceed the parent service curve.
900 	 * for the upper-limit service curve, the assigned bandwidth should
901 	 * be smaller than the interface bandwidth, and the upper-limit should
902 	 * be larger than the real-time service curve when both are defined.
903 	 */
904 
905 	/* check the linkshare service curve. */
906 	if (opts->lssc_m2 != 0) {
907 		/* add this queue to the child sum */
908 		sc.m1 = opts->lssc_m1;
909 		sc.d = opts->lssc_d;
910 		sc.m2 = opts->lssc_m2;
911 		gsc_add_sc(&parent->meta.lssc, &sc);
912 		/* compare the sum of the children with parent's sc */
913 		sc.m1 = parent->pa.pq_u.fairq_opts.lssc_m1;
914 		sc.d = parent->pa.pq_u.fairq_opts.lssc_d;
915 		sc.m2 = parent->pa.pq_u.fairq_opts.lssc_m2;
916 		if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) {
917 			warnx("link-sharing sc exceeds parent's sc");
918 			return (-1);
919 		}
920 	}
921 
922 	return (0);
923 }
924 
925 static int
926 check_commit_hfsc(int dev, int opts, struct pfctl_altq *if_ppa)
927 {
928 
929 	/* check if hfsc has one default queue for this interface */
930 	if (if_ppa->meta.default_classes != 1) {
931 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
932 		return (1);
933 	}
934 	return (0);
935 }
936 
937 static int
938 check_commit_fairq(int dev __unused, int opts __unused, struct pfctl_altq *if_ppa)
939 {
940 
941 	/* check if fairq has one default queue for this interface */
942 	if (if_ppa->meta.default_classes != 1) {
943 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
944 		return (1);
945 	}
946 	return (0);
947 }
948 
949 static int
950 print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
951 {
952 	const struct hfsc_opts_v1	*opts;
953 	const struct node_hfsc_sc	*rtsc, *lssc, *ulsc;
954 
955 	opts = &a->pq_u.hfsc_opts;
956 	if (qopts == NULL)
957 		rtsc = lssc = ulsc = NULL;
958 	else {
959 		rtsc = &qopts->data.hfsc_opts.realtime;
960 		lssc = &qopts->data.hfsc_opts.linkshare;
961 		ulsc = &qopts->data.hfsc_opts.upperlimit;
962 	}
963 
964 	if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 ||
965 	    (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
966 	    opts->lssc_d != 0))) {
967 		printf("hfsc(");
968 		if (opts->flags & HFCF_RED)
969 			printf(" red");
970 		if (opts->flags & HFCF_ECN)
971 			printf(" ecn");
972 		if (opts->flags & HFCF_RIO)
973 			printf(" rio");
974 		if (opts->flags & HFCF_CODEL)
975 			printf(" codel");
976 		if (opts->flags & HFCF_CLEARDSCP)
977 			printf(" cleardscp");
978 		if (opts->flags & HFCF_DEFAULTCLASS)
979 			printf(" default");
980 		if (opts->rtsc_m2 != 0)
981 			print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d,
982 			    opts->rtsc_m2, rtsc);
983 		if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
984 		    opts->lssc_d != 0))
985 			print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d,
986 			    opts->lssc_m2, lssc);
987 		if (opts->ulsc_m2 != 0)
988 			print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d,
989 			    opts->ulsc_m2, ulsc);
990 		printf(" ) ");
991 
992 		return (1);
993 	} else
994 		return (0);
995 }
996 
997 static int
998 print_codel_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
999 {
1000 	const struct codel_opts *opts;
1001 
1002 	opts = &a->pq_u.codel_opts;
1003 	if (opts->target || opts->interval || opts->ecn) {
1004 		printf("codel(");
1005 		if (opts->target)
1006 			printf(" target %d", opts->target);
1007 		if (opts->interval)
1008 			printf(" interval %d", opts->interval);
1009 		if (opts->ecn)
1010 			printf("ecn");
1011 		printf(" ) ");
1012 
1013 		return (1);
1014 	}
1015 
1016 	return (0);
1017 }
1018 
1019 static int
1020 print_fairq_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
1021 {
1022 	const struct fairq_opts		*opts;
1023 	const struct node_fairq_sc	*loc_lssc;
1024 
1025 	opts = &a->pq_u.fairq_opts;
1026 	if (qopts == NULL)
1027 		loc_lssc = NULL;
1028 	else
1029 		loc_lssc = &qopts->data.fairq_opts.linkshare;
1030 
1031 	if (opts->flags ||
1032 	    (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1033 	    opts->lssc_d != 0))) {
1034 		printf("fairq(");
1035 		if (opts->flags & FARF_RED)
1036 			printf(" red");
1037 		if (opts->flags & FARF_ECN)
1038 			printf(" ecn");
1039 		if (opts->flags & FARF_RIO)
1040 			printf(" rio");
1041 		if (opts->flags & FARF_CODEL)
1042 			printf(" codel");
1043 		if (opts->flags & FARF_CLEARDSCP)
1044 			printf(" cleardscp");
1045 		if (opts->flags & FARF_DEFAULTCLASS)
1046 			printf(" default");
1047 		if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1048 		    opts->lssc_d != 0))
1049 			print_fairq_sc("linkshare", opts->lssc_m1, opts->lssc_d,
1050 			    opts->lssc_m2, loc_lssc);
1051 		printf(" ) ");
1052 
1053 		return (1);
1054 	} else
1055 		return (0);
1056 }
1057 
1058 /*
1059  * admission control using generalized service curve
1060  */
1061 
1062 /* add a new service curve to a generalized service curve */
1063 static void
1064 gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc)
1065 {
1066 	if (is_sc_null(sc))
1067 		return;
1068 	if (sc->d != 0)
1069 		gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1);
1070 	gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2);
1071 }
1072 
1073 /*
1074  * check whether all points of a generalized service curve have
1075  * their y-coordinates no larger than a given two-piece linear
1076  * service curve.
1077  */
1078 static int
1079 is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc)
1080 {
1081 	struct segment	*s, *last, *end;
1082 	double		 y;
1083 
1084 	if (is_sc_null(sc)) {
1085 		if (LIST_EMPTY(gsc))
1086 			return (1);
1087 		LIST_FOREACH(s, gsc, _next) {
1088 			if (s->m != 0)
1089 				return (0);
1090 		}
1091 		return (1);
1092 	}
1093 	/*
1094 	 * gsc has a dummy entry at the end with x = INFINITY.
1095 	 * loop through up to this dummy entry.
1096 	 */
1097 	end = gsc_getentry(gsc, INFINITY);
1098 	if (end == NULL)
1099 		return (1);
1100 	last = NULL;
1101 	for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) {
1102 		if (s->y > sc_x2y(sc, s->x))
1103 			return (0);
1104 		last = s;
1105 	}
1106 	/* last now holds the real last segment */
1107 	if (last == NULL)
1108 		return (1);
1109 	if (last->m > sc->m2)
1110 		return (0);
1111 	if (last->x < sc->d && last->m > sc->m1) {
1112 		y = last->y + (sc->d - last->x) * last->m;
1113 		if (y > sc_x2y(sc, sc->d))
1114 			return (0);
1115 	}
1116 	return (1);
1117 }
1118 
1119 /*
1120  * return a segment entry starting at x.
1121  * if gsc has no entry starting at x, a new entry is created at x.
1122  */
1123 static struct segment *
1124 gsc_getentry(struct gen_sc *gsc, double x)
1125 {
1126 	struct segment	*new, *prev, *s;
1127 
1128 	prev = NULL;
1129 	LIST_FOREACH(s, gsc, _next) {
1130 		if (s->x == x)
1131 			return (s);	/* matching entry found */
1132 		else if (s->x < x)
1133 			prev = s;
1134 		else
1135 			break;
1136 	}
1137 
1138 	/* we have to create a new entry */
1139 	if ((new = calloc(1, sizeof(struct segment))) == NULL)
1140 		return (NULL);
1141 
1142 	new->x = x;
1143 	if (x == INFINITY || s == NULL)
1144 		new->d = 0;
1145 	else if (s->x == INFINITY)
1146 		new->d = INFINITY;
1147 	else
1148 		new->d = s->x - x;
1149 	if (prev == NULL) {
1150 		/* insert the new entry at the head of the list */
1151 		new->y = 0;
1152 		new->m = 0;
1153 		LIST_INSERT_HEAD(gsc, new, _next);
1154 	} else {
1155 		/*
1156 		 * the start point intersects with the segment pointed by
1157 		 * prev.  divide prev into 2 segments
1158 		 */
1159 		if (x == INFINITY) {
1160 			prev->d = INFINITY;
1161 			if (prev->m == 0)
1162 				new->y = prev->y;
1163 			else
1164 				new->y = INFINITY;
1165 		} else {
1166 			prev->d = x - prev->x;
1167 			new->y = prev->d * prev->m + prev->y;
1168 		}
1169 		new->m = prev->m;
1170 		LIST_INSERT_AFTER(prev, new, _next);
1171 	}
1172 	return (new);
1173 }
1174 
1175 /* add a segment to a generalized service curve */
1176 static int
1177 gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m)
1178 {
1179 	struct segment	*start, *end, *s;
1180 	double		 x2;
1181 
1182 	if (d == INFINITY)
1183 		x2 = INFINITY;
1184 	else
1185 		x2 = x + d;
1186 	start = gsc_getentry(gsc, x);
1187 	end = gsc_getentry(gsc, x2);
1188 	if (start == NULL || end == NULL)
1189 		return (-1);
1190 
1191 	for (s = start; s != end; s = LIST_NEXT(s, _next)) {
1192 		s->m += m;
1193 		s->y += y + (s->x - x) * m;
1194 	}
1195 
1196 	end = gsc_getentry(gsc, INFINITY);
1197 	for (; s != end; s = LIST_NEXT(s, _next)) {
1198 		s->y += m * d;
1199 	}
1200 
1201 	return (0);
1202 }
1203 
1204 /* get y-projection of a service curve */
1205 static double
1206 sc_x2y(struct service_curve *sc, double x)
1207 {
1208 	double	y;
1209 
1210 	if (x <= (double)sc->d)
1211 		/* y belongs to the 1st segment */
1212 		y = x * (double)sc->m1;
1213 	else
1214 		/* y belongs to the 2nd segment */
1215 		y = (double)sc->d * (double)sc->m1
1216 			+ (x - (double)sc->d) * (double)sc->m2;
1217 	return (y);
1218 }
1219 
1220 /*
1221  * misc utilities
1222  */
1223 #define	R2S_BUFS	8
1224 #define	RATESTR_MAX	16
1225 
1226 char *
1227 rate2str(double rate)
1228 {
1229 	char		*buf;
1230 	static char	 r2sbuf[R2S_BUFS][RATESTR_MAX];  /* ring buffer */
1231 	static int	 idx = 0;
1232 	int		 i;
1233 	static const char unit[] = " KMG";
1234 
1235 	buf = r2sbuf[idx++];
1236 	if (idx == R2S_BUFS)
1237 		idx = 0;
1238 
1239 	for (i = 0; rate >= 1000 && i <= 3; i++)
1240 		rate /= 1000;
1241 
1242 	if ((int)(rate * 100) % 100)
1243 		snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]);
1244 	else
1245 		snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]);
1246 
1247 	return (buf);
1248 }
1249 
1250 u_int32_t
1251 getifspeed(char *ifname)
1252 {
1253 	int		s;
1254 	struct ifreq	ifr;
1255 	struct if_data	ifrdat;
1256 
1257 	s = get_query_socket();
1258 	bzero(&ifr, sizeof(ifr));
1259 	if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1260 	    sizeof(ifr.ifr_name))
1261 		errx(1, "getifspeed: strlcpy");
1262 	ifr.ifr_data = (caddr_t)&ifrdat;
1263 	if (ioctl(s, SIOCGIFDATA, (caddr_t)&ifr) == -1)
1264 		err(1, "SIOCGIFDATA");
1265 	return ((u_int32_t)ifrdat.ifi_baudrate);
1266 }
1267 
1268 u_long
1269 getifmtu(char *ifname)
1270 {
1271 	int		s;
1272 	struct ifreq	ifr;
1273 
1274 	s = get_query_socket();
1275 	bzero(&ifr, sizeof(ifr));
1276 	if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1277 	    sizeof(ifr.ifr_name))
1278 		errx(1, "getifmtu: strlcpy");
1279 	if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1)
1280 #ifdef __FreeBSD__
1281 		ifr.ifr_mtu = 1500;
1282 #else
1283 		err(1, "SIOCGIFMTU");
1284 #endif
1285 	if (ifr.ifr_mtu > 0)
1286 		return (ifr.ifr_mtu);
1287 	else {
1288 		warnx("could not get mtu for %s, assuming 1500", ifname);
1289 		return (1500);
1290 	}
1291 }
1292 
1293 int
1294 eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts,
1295     u_int64_t ref_bw)
1296 {
1297 	int	errors = 0;
1298 
1299 	switch (pa->scheduler) {
1300 	case ALTQT_CBQ:
1301 		pa->pq_u.cbq_opts = opts->data.cbq_opts;
1302 		break;
1303 	case ALTQT_PRIQ:
1304 		pa->pq_u.priq_opts = opts->data.priq_opts;
1305 		break;
1306 	case ALTQT_HFSC:
1307 		pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags;
1308 		if (opts->data.hfsc_opts.linkshare.used) {
1309 			pa->pq_u.hfsc_opts.lssc_m1 =
1310 			    eval_bwspec(&opts->data.hfsc_opts.linkshare.m1,
1311 			    ref_bw);
1312 			pa->pq_u.hfsc_opts.lssc_m2 =
1313 			    eval_bwspec(&opts->data.hfsc_opts.linkshare.m2,
1314 			    ref_bw);
1315 			pa->pq_u.hfsc_opts.lssc_d =
1316 			    opts->data.hfsc_opts.linkshare.d;
1317 		}
1318 		if (opts->data.hfsc_opts.realtime.used) {
1319 			pa->pq_u.hfsc_opts.rtsc_m1 =
1320 			    eval_bwspec(&opts->data.hfsc_opts.realtime.m1,
1321 			    ref_bw);
1322 			pa->pq_u.hfsc_opts.rtsc_m2 =
1323 			    eval_bwspec(&opts->data.hfsc_opts.realtime.m2,
1324 			    ref_bw);
1325 			pa->pq_u.hfsc_opts.rtsc_d =
1326 			    opts->data.hfsc_opts.realtime.d;
1327 		}
1328 		if (opts->data.hfsc_opts.upperlimit.used) {
1329 			pa->pq_u.hfsc_opts.ulsc_m1 =
1330 			    eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1,
1331 			    ref_bw);
1332 			pa->pq_u.hfsc_opts.ulsc_m2 =
1333 			    eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2,
1334 			    ref_bw);
1335 			pa->pq_u.hfsc_opts.ulsc_d =
1336 			    opts->data.hfsc_opts.upperlimit.d;
1337 		}
1338 		break;
1339 	case ALTQT_FAIRQ:
1340 		pa->pq_u.fairq_opts.flags = opts->data.fairq_opts.flags;
1341 		pa->pq_u.fairq_opts.nbuckets = opts->data.fairq_opts.nbuckets;
1342 		pa->pq_u.fairq_opts.hogs_m1 =
1343 			eval_bwspec(&opts->data.fairq_opts.hogs_bw, ref_bw);
1344 
1345 		if (opts->data.fairq_opts.linkshare.used) {
1346 			pa->pq_u.fairq_opts.lssc_m1 =
1347 			    eval_bwspec(&opts->data.fairq_opts.linkshare.m1,
1348 			    ref_bw);
1349 			pa->pq_u.fairq_opts.lssc_m2 =
1350 			    eval_bwspec(&opts->data.fairq_opts.linkshare.m2,
1351 			    ref_bw);
1352 			pa->pq_u.fairq_opts.lssc_d =
1353 			    opts->data.fairq_opts.linkshare.d;
1354 		}
1355 		break;
1356 	case ALTQT_CODEL:
1357 		pa->pq_u.codel_opts.target = opts->data.codel_opts.target;
1358 		pa->pq_u.codel_opts.interval = opts->data.codel_opts.interval;
1359 		pa->pq_u.codel_opts.ecn = opts->data.codel_opts.ecn;
1360 		break;
1361 	default:
1362 		warnx("eval_queue_opts: unknown scheduler type %u",
1363 		    opts->qtype);
1364 		errors++;
1365 		break;
1366 	}
1367 
1368 	return (errors);
1369 }
1370 
1371 /*
1372  * If absolute bandwidth if set, return the lesser of that value and the
1373  * reference bandwidth.  Limiting to the reference bandwidth allows simple
1374  * limiting of configured bandwidth parameters for schedulers that are
1375  * 32-bit limited, as the root/interface bandwidth (top-level reference
1376  * bandwidth) will be properly limited in that case.
1377  *
1378  * Otherwise, if the absolute bandwidth is not set, return given percentage
1379  * of reference bandwidth.
1380  */
1381 u_int64_t
1382 eval_bwspec(struct node_queue_bw *bw, u_int64_t ref_bw)
1383 {
1384 	if (bw->bw_absolute > 0)
1385 		return (MIN(bw->bw_absolute, ref_bw));
1386 
1387 	if (bw->bw_percent > 0)
1388 		return (ref_bw / 100 * bw->bw_percent);
1389 
1390 	return (0);
1391 }
1392 
1393 void
1394 print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2,
1395     const struct node_hfsc_sc *sc)
1396 {
1397 	printf(" %s", scname);
1398 
1399 	if (d != 0) {
1400 		printf("(");
1401 		if (sc != NULL && sc->m1.bw_percent > 0)
1402 			printf("%u%%", sc->m1.bw_percent);
1403 		else
1404 			printf("%s", rate2str((double)m1));
1405 		printf(" %u", d);
1406 	}
1407 
1408 	if (sc != NULL && sc->m2.bw_percent > 0)
1409 		printf(" %u%%", sc->m2.bw_percent);
1410 	else
1411 		printf(" %s", rate2str((double)m2));
1412 
1413 	if (d != 0)
1414 		printf(")");
1415 }
1416 
1417 void
1418 print_fairq_sc(const char *scname, u_int m1, u_int d, u_int m2,
1419     const struct node_fairq_sc *sc)
1420 {
1421 	printf(" %s", scname);
1422 
1423 	if (d != 0) {
1424 		printf("(");
1425 		if (sc != NULL && sc->m1.bw_percent > 0)
1426 			printf("%u%%", sc->m1.bw_percent);
1427 		else
1428 			printf("%s", rate2str((double)m1));
1429 		printf(" %u", d);
1430 	}
1431 
1432 	if (sc != NULL && sc->m2.bw_percent > 0)
1433 		printf(" %u%%", sc->m2.bw_percent);
1434 	else
1435 		printf(" %s", rate2str((double)m2));
1436 
1437 	if (d != 0)
1438 		printf(")");
1439 }
1440