xref: /freebsd/sys/netgraph/ng_pipe.c (revision 674d86bf9177ff80b5f38f7191951f303a816cac)
1 /*-
2  * Copyright (c) 2004-2010 University of Zagreb
3  * Copyright (c) 2007-2008 FreeBSD Foundation
4  *
5  * This software was developed by the University of Zagreb and the
6  * FreeBSD Foundation under sponsorship by the Stichting NLnet and the
7  * FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 /*
34  * This node permits simple traffic shaping by emulating bandwidth
35  * and delay, as well as random packet losses.
36  * The node has two hooks, upper and lower. Traffic flowing from upper to
37  * lower hook is referenced as downstream, and vice versa. Parameters for
38  * both directions can be set separately, except for delay.
39  */
40 
41 
42 #include <sys/param.h>
43 #include <sys/errno.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/time.h>
49 
50 #include <vm/uma.h>
51 
52 #include <net/vnet.h>
53 
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/ip.h>
57 
58 #include <netgraph/ng_message.h>
59 #include <netgraph/netgraph.h>
60 #include <netgraph/ng_parse.h>
61 #include <netgraph/ng_pipe.h>
62 
63 static MALLOC_DEFINE(M_NG_PIPE, "ng_pipe", "ng_pipe");
64 
65 /* Packet header struct */
66 struct ngp_hdr {
67 	TAILQ_ENTRY(ngp_hdr)	ngp_link;	/* next pkt in queue */
68 	struct timeval		when;		/* this packet's due time */
69 	struct mbuf		*m;		/* ptr to the packet data */
70 };
71 TAILQ_HEAD(p_head, ngp_hdr);
72 
73 /* FIFO queue struct */
74 struct ngp_fifo {
75 	TAILQ_ENTRY(ngp_fifo)	fifo_le;	/* list of active queues only */
76 	struct p_head		packet_head;	/* FIFO queue head */
77 	u_int32_t		hash;		/* flow signature */
78 	struct timeval		vtime;		/* virtual time, for WFQ */
79 	u_int32_t		rr_deficit;	/* for DRR */
80 	u_int32_t		packets;	/* # of packets in this queue */
81 };
82 
83 /* Per hook info */
84 struct hookinfo {
85 	hook_p			hook;
86 	int			noqueue;	/* bypass any processing */
87 	TAILQ_HEAD(, ngp_fifo)	fifo_head;	/* FIFO queues */
88 	TAILQ_HEAD(, ngp_hdr)	qout_head;	/* delay queue head */
89 	struct timeval		qin_utime;
90 	struct ng_pipe_hookcfg	cfg;
91 	struct ng_pipe_hookrun	run;
92 	struct ng_pipe_hookstat	stats;
93 	uint64_t		*ber_p;		/* loss_p(BER,psize) map */
94 };
95 
96 /* Per node info */
97 struct node_priv {
98 	u_int64_t		delay;
99 	u_int32_t		overhead;
100 	u_int32_t		header_offset;
101 	struct hookinfo		lower;
102 	struct hookinfo		upper;
103 	struct callout		timer;
104 	int			timer_scheduled;
105 };
106 typedef struct node_priv *priv_p;
107 
108 /* Macro for calculating the virtual time for packet dequeueing in WFQ */
109 #define FIFO_VTIME_SORT(plen)						\
110 	if (hinfo->cfg.wfq && hinfo->cfg.bandwidth) {			\
111 		ngp_f->vtime.tv_usec = now->tv_usec + ((uint64_t) (plen) \
112 			+ priv->overhead ) * hinfo->run.fifo_queues *	\
113 			8000000 / hinfo->cfg.bandwidth;			\
114 		ngp_f->vtime.tv_sec = now->tv_sec +			\
115 			ngp_f->vtime.tv_usec / 1000000;			\
116 		ngp_f->vtime.tv_usec = ngp_f->vtime.tv_usec % 1000000;	\
117 		TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le)	\
118 			if (ngp_f1->vtime.tv_sec > ngp_f->vtime.tv_sec || \
119 			    (ngp_f1->vtime.tv_sec == ngp_f->vtime.tv_sec && \
120 			    ngp_f1->vtime.tv_usec > ngp_f->vtime.tv_usec)) \
121 				break;					\
122 		if (ngp_f1 == NULL)					\
123 			TAILQ_INSERT_TAIL(&hinfo->fifo_head, ngp_f, fifo_le); \
124 		else							\
125 			TAILQ_INSERT_BEFORE(ngp_f1, ngp_f, fifo_le);	\
126 	} else								\
127 		TAILQ_INSERT_TAIL(&hinfo->fifo_head, ngp_f, fifo_le);	\
128 
129 
130 static void	parse_cfg(struct ng_pipe_hookcfg *, struct ng_pipe_hookcfg *,
131 			struct hookinfo *, priv_p);
132 static void	pipe_dequeue(struct hookinfo *, struct timeval *);
133 static void	ngp_callout(node_p, hook_p, void *, int);
134 static int	ngp_modevent(module_t, int, void *);
135 
136 /* zone for storing ngp_hdr-s */
137 static uma_zone_t ngp_zone;
138 
139 /* Netgraph methods */
140 static ng_constructor_t	ngp_constructor;
141 static ng_rcvmsg_t	ngp_rcvmsg;
142 static ng_shutdown_t	ngp_shutdown;
143 static ng_newhook_t	ngp_newhook;
144 static ng_rcvdata_t	ngp_rcvdata;
145 static ng_disconnect_t	ngp_disconnect;
146 
147 /* Parse type for struct ng_pipe_hookstat */
148 static const struct ng_parse_struct_field
149 	ng_pipe_hookstat_type_fields[] = NG_PIPE_HOOKSTAT_INFO;
150 static const struct ng_parse_type ng_pipe_hookstat_type = {
151 	&ng_parse_struct_type,
152 	&ng_pipe_hookstat_type_fields
153 };
154 
155 /* Parse type for struct ng_pipe_stats */
156 static const struct ng_parse_struct_field ng_pipe_stats_type_fields[] =
157 	NG_PIPE_STATS_INFO(&ng_pipe_hookstat_type);
158 static const struct ng_parse_type ng_pipe_stats_type = {
159 	&ng_parse_struct_type,
160 	&ng_pipe_stats_type_fields
161 };
162 
163 /* Parse type for struct ng_pipe_hookrun */
164 static const struct ng_parse_struct_field
165 	ng_pipe_hookrun_type_fields[] = NG_PIPE_HOOKRUN_INFO;
166 static const struct ng_parse_type ng_pipe_hookrun_type = {
167 	&ng_parse_struct_type,
168 	&ng_pipe_hookrun_type_fields
169 };
170 
171 /* Parse type for struct ng_pipe_run */
172 static const struct ng_parse_struct_field
173 	ng_pipe_run_type_fields[] = NG_PIPE_RUN_INFO(&ng_pipe_hookrun_type);
174 static const struct ng_parse_type ng_pipe_run_type = {
175 	&ng_parse_struct_type,
176 	&ng_pipe_run_type_fields
177 };
178 
179 /* Parse type for struct ng_pipe_hookcfg */
180 static const struct ng_parse_struct_field
181 	ng_pipe_hookcfg_type_fields[] = NG_PIPE_HOOKCFG_INFO;
182 static const struct ng_parse_type ng_pipe_hookcfg_type = {
183 	&ng_parse_struct_type,
184 	&ng_pipe_hookcfg_type_fields
185 };
186 
187 /* Parse type for struct ng_pipe_cfg */
188 static const struct ng_parse_struct_field
189 	ng_pipe_cfg_type_fields[] = NG_PIPE_CFG_INFO(&ng_pipe_hookcfg_type);
190 static const struct ng_parse_type ng_pipe_cfg_type = {
191 	&ng_parse_struct_type,
192 	&ng_pipe_cfg_type_fields
193 };
194 
195 /* List of commands and how to convert arguments to/from ASCII */
196 static const struct ng_cmdlist ngp_cmds[] = {
197 	{
198 		.cookie =	NGM_PIPE_COOKIE,
199 		.cmd =		NGM_PIPE_GET_STATS,
200 		.name = 	"getstats",
201 		.respType =	 &ng_pipe_stats_type
202 	},
203 	{
204 		.cookie =	NGM_PIPE_COOKIE,
205 		.cmd =		NGM_PIPE_CLR_STATS,
206 		.name =		"clrstats"
207 	},
208 	{
209 		.cookie =	NGM_PIPE_COOKIE,
210 		.cmd =		NGM_PIPE_GETCLR_STATS,
211 		.name =		"getclrstats",
212 		.respType =	&ng_pipe_stats_type
213 	},
214 	{
215 		.cookie =	NGM_PIPE_COOKIE,
216 		.cmd =		NGM_PIPE_GET_RUN,
217 		.name =		"getrun",
218 		.respType =	&ng_pipe_run_type
219 	},
220 	{
221 		.cookie =	NGM_PIPE_COOKIE,
222 		.cmd =		NGM_PIPE_GET_CFG,
223 		.name =		"getcfg",
224 		.respType =	&ng_pipe_cfg_type
225 	},
226 	{
227 		.cookie =	NGM_PIPE_COOKIE,
228 		.cmd =		NGM_PIPE_SET_CFG,
229 		.name =		"setcfg",
230 		.mesgType =	&ng_pipe_cfg_type,
231 	},
232 	{ 0 }
233 };
234 
235 /* Netgraph type descriptor */
236 static struct ng_type ng_pipe_typestruct = {
237 	.version =	NG_ABI_VERSION,
238 	.name =		NG_PIPE_NODE_TYPE,
239 	.mod_event =	ngp_modevent,
240 	.constructor =	ngp_constructor,
241 	.shutdown =	ngp_shutdown,
242 	.rcvmsg =	ngp_rcvmsg,
243 	.newhook =	ngp_newhook,
244 	.rcvdata =	ngp_rcvdata,
245 	.disconnect =	ngp_disconnect,
246 	.cmdlist =	ngp_cmds
247 };
248 NETGRAPH_INIT(pipe, &ng_pipe_typestruct);
249 
250 /* Node constructor */
251 static int
252 ngp_constructor(node_p node)
253 {
254 	priv_p priv;
255 
256 	priv = malloc(sizeof(*priv), M_NG_PIPE, M_ZERO | M_WAITOK);
257 	NG_NODE_SET_PRIVATE(node, priv);
258 
259 	/* Mark node as single-threaded */
260 	NG_NODE_FORCE_WRITER(node);
261 
262 	ng_callout_init(&priv->timer);
263 
264 	return (0);
265 }
266 
267 /* Add a hook */
268 static int
269 ngp_newhook(node_p node, hook_p hook, const char *name)
270 {
271 	const priv_p priv = NG_NODE_PRIVATE(node);
272 	struct hookinfo *hinfo;
273 
274 	if (strcmp(name, NG_PIPE_HOOK_UPPER) == 0) {
275 		bzero(&priv->upper, sizeof(priv->upper));
276 		priv->upper.hook = hook;
277 		NG_HOOK_SET_PRIVATE(hook, &priv->upper);
278 	} else if (strcmp(name, NG_PIPE_HOOK_LOWER) == 0) {
279 		bzero(&priv->lower, sizeof(priv->lower));
280 		priv->lower.hook = hook;
281 		NG_HOOK_SET_PRIVATE(hook, &priv->lower);
282 	} else
283 		return (EINVAL);
284 
285 	/* Load non-zero initial cfg values */
286 	hinfo = NG_HOOK_PRIVATE(hook);
287 	hinfo->cfg.qin_size_limit = 50;
288 	hinfo->cfg.fifo = 1;
289 	hinfo->cfg.droptail = 1;
290 	TAILQ_INIT(&hinfo->fifo_head);
291 	TAILQ_INIT(&hinfo->qout_head);
292 	return (0);
293 }
294 
295 /* Receive a control message */
296 static int
297 ngp_rcvmsg(node_p node, item_p item, hook_p lasthook)
298 {
299 	const priv_p priv = NG_NODE_PRIVATE(node);
300 	struct ng_mesg *resp = NULL;
301 	struct ng_mesg *msg;
302 	struct ng_pipe_stats *stats;
303 	struct ng_pipe_run *run;
304 	struct ng_pipe_cfg *cfg;
305 	int error = 0;
306 
307 	NGI_GET_MSG(item, msg);
308 	switch (msg->header.typecookie) {
309 	case NGM_PIPE_COOKIE:
310 		switch (msg->header.cmd) {
311 		case NGM_PIPE_GET_STATS:
312 		case NGM_PIPE_CLR_STATS:
313 		case NGM_PIPE_GETCLR_STATS:
314 			if (msg->header.cmd != NGM_PIPE_CLR_STATS) {
315 				NG_MKRESPONSE(resp, msg,
316 				    sizeof(*stats), M_NOWAIT);
317 				if (resp == NULL) {
318 					error = ENOMEM;
319 					break;
320 				}
321 				stats = (struct ng_pipe_stats *) resp->data;
322 				bcopy(&priv->upper.stats, &stats->downstream,
323 				    sizeof(stats->downstream));
324 				bcopy(&priv->lower.stats, &stats->upstream,
325 				    sizeof(stats->upstream));
326 			}
327 			if (msg->header.cmd != NGM_PIPE_GET_STATS) {
328 				bzero(&priv->upper.stats,
329 				    sizeof(priv->upper.stats));
330 				bzero(&priv->lower.stats,
331 				    sizeof(priv->lower.stats));
332 			}
333 			break;
334 		case NGM_PIPE_GET_RUN:
335 			NG_MKRESPONSE(resp, msg, sizeof(*run), M_NOWAIT);
336 			if (resp == NULL) {
337 				error = ENOMEM;
338 				break;
339 			}
340 			run = (struct ng_pipe_run *) resp->data;
341 			bcopy(&priv->upper.run, &run->downstream,
342 				sizeof(run->downstream));
343 			bcopy(&priv->lower.run, &run->upstream,
344 				sizeof(run->upstream));
345 			break;
346 		case NGM_PIPE_GET_CFG:
347 			NG_MKRESPONSE(resp, msg, sizeof(*cfg), M_NOWAIT);
348 			if (resp == NULL) {
349 				error = ENOMEM;
350 				break;
351 			}
352 			cfg = (struct ng_pipe_cfg *) resp->data;
353 			bcopy(&priv->upper.cfg, &cfg->downstream,
354 				sizeof(cfg->downstream));
355 			bcopy(&priv->lower.cfg, &cfg->upstream,
356 				sizeof(cfg->upstream));
357 			cfg->delay = priv->delay;
358 			cfg->overhead = priv->overhead;
359 			cfg->header_offset = priv->header_offset;
360 			if (cfg->upstream.bandwidth ==
361 			    cfg->downstream.bandwidth) {
362 				cfg->bandwidth = cfg->upstream.bandwidth;
363 				cfg->upstream.bandwidth = 0;
364 				cfg->downstream.bandwidth = 0;
365 			} else
366 				cfg->bandwidth = 0;
367 			break;
368 		case NGM_PIPE_SET_CFG:
369 			cfg = (struct ng_pipe_cfg *) msg->data;
370 			if (msg->header.arglen != sizeof(*cfg)) {
371 				error = EINVAL;
372 				break;
373 			}
374 
375 			if (cfg->delay == -1)
376 				priv->delay = 0;
377 			else if (cfg->delay > 0 && cfg->delay < 10000000)
378 				priv->delay = cfg->delay;
379 
380 			if (cfg->bandwidth == -1) {
381 				priv->upper.cfg.bandwidth = 0;
382 				priv->lower.cfg.bandwidth = 0;
383 				priv->overhead = 0;
384 			} else if (cfg->bandwidth >= 100 &&
385 			    cfg->bandwidth <= 1000000000) {
386 				priv->upper.cfg.bandwidth = cfg->bandwidth;
387 				priv->lower.cfg.bandwidth = cfg->bandwidth;
388 				if (cfg->bandwidth >= 10000000)
389 					priv->overhead = 8+4+12; /* Ethernet */
390 				else
391 					priv->overhead = 10; /* HDLC */
392 			}
393 
394 			if (cfg->overhead == -1)
395 				priv->overhead = 0;
396 			else if (cfg->overhead > 0 &&
397 			    cfg->overhead < MAX_OHSIZE)
398 				priv->overhead = cfg->overhead;
399 
400 			if (cfg->header_offset == -1)
401 				priv->header_offset = 0;
402 			else if (cfg->header_offset > 0 &&
403 			    cfg->header_offset < 64)
404 				priv->header_offset = cfg->header_offset;
405 
406 			parse_cfg(&priv->upper.cfg, &cfg->downstream,
407 			    &priv->upper, priv);
408 			parse_cfg(&priv->lower.cfg, &cfg->upstream,
409 			    &priv->lower, priv);
410 			break;
411 		default:
412 			error = EINVAL;
413 			break;
414 		}
415 		break;
416 	default:
417 		error = EINVAL;
418 		break;
419 	}
420 	NG_RESPOND_MSG(error, node, item, resp);
421 	NG_FREE_MSG(msg);
422 
423 	return (error);
424 }
425 
426 static void
427 parse_cfg(struct ng_pipe_hookcfg *current, struct ng_pipe_hookcfg *new,
428 	struct hookinfo *hinfo, priv_p priv)
429 {
430 
431 	if (new->ber == -1) {
432 		current->ber = 0;
433 		if (hinfo->ber_p) {
434 			free(hinfo->ber_p, M_NG_PIPE);
435 			hinfo->ber_p = NULL;
436 		}
437 	} else if (new->ber >= 1 && new->ber <= 1000000000000) {
438 		static const uint64_t one = 0x1000000000000; /* = 2^48 */
439 		uint64_t p0, p;
440 		uint32_t fsize, i;
441 
442 		if (hinfo->ber_p == NULL)
443 			hinfo->ber_p =
444 			    malloc((MAX_FSIZE + MAX_OHSIZE) * sizeof(uint64_t),
445 			    M_NG_PIPE, M_NOWAIT);
446 		current->ber = new->ber;
447 
448 		/*
449 		 * For given BER and each frame size N (in bytes) calculate
450 		 * the probability P_OK that the frame is clean:
451 		 *
452 		 * P_OK(BER,N) = (1 - 1/BER)^(N*8)
453 		 *
454 		 * We use a 64-bit fixed-point format with decimal point
455 		 * positioned between bits 47 and 48.
456 		 */
457 		p0 = one - one / new->ber;
458 		p = one;
459 		for (fsize = 0; fsize < MAX_FSIZE + MAX_OHSIZE; fsize++) {
460 			hinfo->ber_p[fsize] = p;
461 			for (i = 0; i < 8; i++)
462 				p = (p * (p0 & 0xffff) >> 48) +
463 				    (p * ((p0 >> 16) & 0xffff) >> 32) +
464 				    (p * (p0 >> 32) >> 16);
465 		}
466 	}
467 
468 	if (new->qin_size_limit == -1)
469 		current->qin_size_limit = 0;
470 	else if (new->qin_size_limit >= 5)
471 		current->qin_size_limit = new->qin_size_limit;
472 
473 	if (new->qout_size_limit == -1)
474 		current->qout_size_limit = 0;
475 	else if (new->qout_size_limit >= 5)
476 		current->qout_size_limit = new->qout_size_limit;
477 
478 	if (new->duplicate == -1)
479 		current->duplicate = 0;
480 	else if (new->duplicate > 0 && new->duplicate <= 50)
481 		current->duplicate = new->duplicate;
482 
483 	if (new->fifo) {
484 		current->fifo = 1;
485 		current->wfq = 0;
486 		current->drr = 0;
487 	}
488 
489 	if (new->wfq) {
490 		current->fifo = 0;
491 		current->wfq = 1;
492 		current->drr = 0;
493 	}
494 
495 	if (new->drr) {
496 		current->fifo = 0;
497 		current->wfq = 0;
498 		/* DRR quantum */
499 		if (new->drr >= 32)
500 			current->drr = new->drr;
501 		else
502 			current->drr = 2048;		/* default quantum */
503 	}
504 
505 	if (new->droptail) {
506 		current->droptail = 1;
507 		current->drophead = 0;
508 	}
509 
510 	if (new->drophead) {
511 		current->droptail = 0;
512 		current->drophead = 1;
513 	}
514 
515 	if (new->bandwidth == -1) {
516 		current->bandwidth = 0;
517 		current->fifo = 1;
518 		current->wfq = 0;
519 		current->drr = 0;
520 	} else if (new->bandwidth >= 100 && new->bandwidth <= 1000000000)
521 		current->bandwidth = new->bandwidth;
522 
523 	if (current->bandwidth | priv->delay |
524 	    current->duplicate | current->ber)
525 		hinfo->noqueue = 0;
526 	else
527 		hinfo->noqueue = 1;
528 }
529 
530 /*
531  * Compute a hash signature for a packet. This function suffers from the
532  * NIH sindrome, so probably it would be wise to look around what other
533  * folks have found out to be a good and efficient IP hash function...
534  */
535 static int
536 ip_hash(struct mbuf *m, int offset)
537 {
538 	u_int64_t i;
539 	struct ip *ip = (struct ip *)(mtod(m, u_char *) + offset);
540 
541 	if (m->m_len < sizeof(struct ip) + offset ||
542 	    ip->ip_v != 4 || ip->ip_hl << 2 != sizeof(struct ip))
543 		return 0;
544 
545 	i = ((u_int64_t) ip->ip_src.s_addr ^
546 	    ((u_int64_t) ip->ip_src.s_addr << 13) ^
547 	    ((u_int64_t) ip->ip_dst.s_addr << 7) ^
548 	    ((u_int64_t) ip->ip_dst.s_addr << 19));
549 	return (i ^ (i >> 32));
550 }
551 
552 /*
553  * Receive data on a hook - both in upstream and downstream direction.
554  * We put the frame on the inbound queue, and try to initiate dequeuing
555  * sequence immediately. If inbound queue is full, discard one frame
556  * depending on dropping policy (from the head or from the tail of the
557  * queue).
558  */
559 static int
560 ngp_rcvdata(hook_p hook, item_p item)
561 {
562 	struct hookinfo *const hinfo = NG_HOOK_PRIVATE(hook);
563 	const priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook));
564 	struct timeval uuptime;
565 	struct timeval *now = &uuptime;
566 	struct ngp_fifo *ngp_f = NULL, *ngp_f1;
567 	struct ngp_hdr *ngp_h = NULL;
568 	struct mbuf *m;
569 	int hash, plen;
570 	int error = 0;
571 
572 	/*
573 	 * Shortcut from inbound to outbound hook when neither of
574 	 * bandwidth, delay, BER or duplication probability is
575 	 * configured, nor we have queued frames to drain.
576 	 */
577 	if (hinfo->run.qin_frames == 0 && hinfo->run.qout_frames == 0 &&
578 	    hinfo->noqueue) {
579 		struct hookinfo *dest;
580 		if (hinfo == &priv->lower)
581 			dest = &priv->upper;
582 		else
583 			dest = &priv->lower;
584 
585 		/* Send the frame. */
586 		plen = NGI_M(item)->m_pkthdr.len;
587 		NG_FWD_ITEM_HOOK(error, item, dest->hook);
588 
589 		/* Update stats. */
590 		if (error) {
591 			hinfo->stats.out_disc_frames++;
592 			hinfo->stats.out_disc_octets += plen;
593 		} else {
594 			hinfo->stats.fwd_frames++;
595 			hinfo->stats.fwd_octets += plen;
596 		}
597 
598 		return (error);
599 	}
600 
601 	microuptime(now);
602 
603 	/*
604 	 * If this was an empty queue, update service deadline time.
605 	 */
606 	if (hinfo->run.qin_frames == 0) {
607 		struct timeval *when = &hinfo->qin_utime;
608 		if (when->tv_sec < now->tv_sec || (when->tv_sec == now->tv_sec
609 		    && when->tv_usec < now->tv_usec)) {
610 			when->tv_sec = now->tv_sec;
611 			when->tv_usec = now->tv_usec;
612 		}
613 	}
614 
615 	/* Populate the packet header */
616 	ngp_h = uma_zalloc(ngp_zone, M_NOWAIT);
617 	KASSERT((ngp_h != NULL), ("ngp_h zalloc failed (1)"));
618 	NGI_GET_M(item, m);
619 	KASSERT(m != NULL, ("NGI_GET_M failed"));
620 	ngp_h->m = m;
621 	NG_FREE_ITEM(item);
622 
623 	if (hinfo->cfg.fifo)
624 		hash = 0;	/* all packets go into a single FIFO queue */
625 	else
626 		hash = ip_hash(m, priv->header_offset);
627 
628 	/* Find the appropriate FIFO queue for the packet and enqueue it*/
629 	TAILQ_FOREACH(ngp_f, &hinfo->fifo_head, fifo_le)
630 		if (hash == ngp_f->hash)
631 			break;
632 	if (ngp_f == NULL) {
633 		ngp_f = uma_zalloc(ngp_zone, M_NOWAIT);
634 		KASSERT(ngp_h != NULL, ("ngp_h zalloc failed (2)"));
635 		TAILQ_INIT(&ngp_f->packet_head);
636 		ngp_f->hash = hash;
637 		ngp_f->packets = 1;
638 		ngp_f->rr_deficit = hinfo->cfg.drr;	/* DRR quantum */
639 		hinfo->run.fifo_queues++;
640 		TAILQ_INSERT_TAIL(&ngp_f->packet_head, ngp_h, ngp_link);
641 		FIFO_VTIME_SORT(m->m_pkthdr.len);
642 	} else {
643 		TAILQ_INSERT_TAIL(&ngp_f->packet_head, ngp_h, ngp_link);
644 		ngp_f->packets++;
645 	}
646 	hinfo->run.qin_frames++;
647 	hinfo->run.qin_octets += m->m_pkthdr.len;
648 
649 	/* Discard a frame if inbound queue limit has been reached */
650 	if (hinfo->run.qin_frames > hinfo->cfg.qin_size_limit) {
651 		struct mbuf *m1;
652 		int longest = 0;
653 
654 		/* Find the longest queue */
655 		TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le)
656 			if (ngp_f1->packets > longest) {
657 				longest = ngp_f1->packets;
658 				ngp_f = ngp_f1;
659 			}
660 
661 		/* Drop a frame from the queue head/tail, depending on cfg */
662 		if (hinfo->cfg.drophead)
663 			ngp_h = TAILQ_FIRST(&ngp_f->packet_head);
664 		else
665 			ngp_h = TAILQ_LAST(&ngp_f->packet_head, p_head);
666 		TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link);
667 		m1 = ngp_h->m;
668 		uma_zfree(ngp_zone, ngp_h);
669 		hinfo->run.qin_octets -= m1->m_pkthdr.len;
670 		hinfo->stats.in_disc_octets += m1->m_pkthdr.len;
671 		m_freem(m1);
672 		if (--(ngp_f->packets) == 0) {
673 			TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
674 			uma_zfree(ngp_zone, ngp_f);
675 			hinfo->run.fifo_queues--;
676 		}
677 		hinfo->run.qin_frames--;
678 		hinfo->stats.in_disc_frames++;
679 	} else if (hinfo->run.qin_frames > hinfo->cfg.qin_size_limit) {
680 		struct mbuf *m1;
681 		int longest = 0;
682 
683 		/* Find the longest queue */
684 		TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le)
685 			if (ngp_f1->packets > longest) {
686 				longest = ngp_f1->packets;
687 				ngp_f = ngp_f1;
688 			}
689 
690 		/* Drop a frame from the queue head/tail, depending on cfg */
691 		if (hinfo->cfg.drophead)
692 			ngp_h = TAILQ_FIRST(&ngp_f->packet_head);
693 		else
694 			ngp_h = TAILQ_LAST(&ngp_f->packet_head, p_head);
695 		TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link);
696 		m1 = ngp_h->m;
697 		uma_zfree(ngp_zone, ngp_h);
698 		hinfo->run.qin_octets -= m1->m_pkthdr.len;
699 		hinfo->stats.in_disc_octets += m1->m_pkthdr.len;
700 		m_freem(m1);
701 		if (--(ngp_f->packets) == 0) {
702 			TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
703 			uma_zfree(ngp_zone, ngp_f);
704 			hinfo->run.fifo_queues--;
705 		}
706 		hinfo->run.qin_frames--;
707 		hinfo->stats.in_disc_frames++;
708 	}
709 
710 	/*
711 	 * Try to start the dequeuing process immediately.
712 	 */
713 	pipe_dequeue(hinfo, now);
714 
715 	return (0);
716 }
717 
718 
719 /*
720  * Dequeueing sequence - we basically do the following:
721  *  1) Try to extract the frame from the inbound (bandwidth) queue;
722  *  2) In accordance to BER specified, discard the frame randomly;
723  *  3) If the frame survives BER, prepend it with delay info and move it
724  *     to outbound (delay) queue;
725  *  4) Loop to 2) until bandwidth quota for this timeslice is reached, or
726  *     inbound queue is flushed completely;
727  *  5) Dequeue frames from the outbound queue and send them downstream until
728  *     outbound queue is flushed completely, or the next frame in the queue
729  *     is not due to be dequeued yet
730  */
731 static void
732 pipe_dequeue(struct hookinfo *hinfo, struct timeval *now) {
733 	static uint64_t rand, oldrand;
734 	const node_p node = NG_HOOK_NODE(hinfo->hook);
735 	const priv_p priv = NG_NODE_PRIVATE(node);
736 	struct hookinfo *dest;
737 	struct ngp_fifo *ngp_f, *ngp_f1;
738 	struct ngp_hdr *ngp_h;
739 	struct timeval *when;
740 	struct mbuf *m;
741 	int plen, error = 0;
742 
743 	/* Which one is the destination hook? */
744 	if (hinfo == &priv->lower)
745 		dest = &priv->upper;
746 	else
747 		dest = &priv->lower;
748 
749 	/* Bandwidth queue processing */
750 	while ((ngp_f = TAILQ_FIRST(&hinfo->fifo_head))) {
751 		when = &hinfo->qin_utime;
752 		if (when->tv_sec > now->tv_sec || (when->tv_sec == now->tv_sec
753 		    && when->tv_usec > now->tv_usec))
754 			break;
755 
756 		ngp_h = TAILQ_FIRST(&ngp_f->packet_head);
757 		m = ngp_h->m;
758 
759 		/* Deficit Round Robin (DRR) processing */
760 		if (hinfo->cfg.drr) {
761 			if (ngp_f->rr_deficit >= m->m_pkthdr.len) {
762 				ngp_f->rr_deficit -= m->m_pkthdr.len;
763 			} else {
764 				ngp_f->rr_deficit += hinfo->cfg.drr;
765 				TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
766 				TAILQ_INSERT_TAIL(&hinfo->fifo_head,
767 				    ngp_f, fifo_le);
768 				continue;
769 			}
770 		}
771 
772 		/*
773 		 * Either create a duplicate and pass it on, or dequeue
774 		 * the original packet...
775 		 */
776 		if (hinfo->cfg.duplicate &&
777 		    random() % 100 <= hinfo->cfg.duplicate) {
778 			ngp_h = uma_zalloc(ngp_zone, M_NOWAIT);
779 			KASSERT(ngp_h != NULL, ("ngp_h zalloc failed (3)"));
780 			m = m_dup(m, M_NOWAIT);
781 			KASSERT(m != NULL, ("m_dup failed"));
782 			ngp_h->m = m;
783 		} else {
784 			TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link);
785 			hinfo->run.qin_frames--;
786 			hinfo->run.qin_octets -= m->m_pkthdr.len;
787 			ngp_f->packets--;
788 		}
789 
790 		/* Calculate the serialization delay */
791 		if (hinfo->cfg.bandwidth) {
792 			hinfo->qin_utime.tv_usec +=
793 			    ((uint64_t) m->m_pkthdr.len + priv->overhead ) *
794 			    8000000 / hinfo->cfg.bandwidth;
795 			hinfo->qin_utime.tv_sec +=
796 			    hinfo->qin_utime.tv_usec / 1000000;
797 			hinfo->qin_utime.tv_usec =
798 			    hinfo->qin_utime.tv_usec % 1000000;
799 		}
800 		when = &ngp_h->when;
801 		when->tv_sec = hinfo->qin_utime.tv_sec;
802 		when->tv_usec = hinfo->qin_utime.tv_usec;
803 
804 		/* Sort / rearrange inbound queues */
805 		if (ngp_f->packets) {
806 			if (hinfo->cfg.wfq) {
807 				TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
808 				FIFO_VTIME_SORT(TAILQ_FIRST(
809 				    &ngp_f->packet_head)->m->m_pkthdr.len)
810 			}
811 		} else {
812 			TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
813 			uma_zfree(ngp_zone, ngp_f);
814 			hinfo->run.fifo_queues--;
815 		}
816 
817 		/* Randomly discard the frame, according to BER setting */
818 		if (hinfo->cfg.ber) {
819 			oldrand = rand;
820 			rand = random();
821 			if (((oldrand ^ rand) << 17) >=
822 			    hinfo->ber_p[priv->overhead + m->m_pkthdr.len]) {
823 				hinfo->stats.out_disc_frames++;
824 				hinfo->stats.out_disc_octets += m->m_pkthdr.len;
825 				uma_zfree(ngp_zone, ngp_h);
826 				m_freem(m);
827 				continue;
828 			}
829 		}
830 
831 		/* Discard frame if outbound queue size limit exceeded */
832 		if (hinfo->cfg.qout_size_limit &&
833 		    hinfo->run.qout_frames>=hinfo->cfg.qout_size_limit) {
834 			hinfo->stats.out_disc_frames++;
835 			hinfo->stats.out_disc_octets += m->m_pkthdr.len;
836 			uma_zfree(ngp_zone, ngp_h);
837 			m_freem(m);
838 			continue;
839 		}
840 
841 		/* Calculate the propagation delay */
842 		when->tv_usec += priv->delay;
843 		when->tv_sec += when->tv_usec / 1000000;
844 		when->tv_usec = when->tv_usec % 1000000;
845 
846 		/* Put the frame into the delay queue */
847 		TAILQ_INSERT_TAIL(&hinfo->qout_head, ngp_h, ngp_link);
848 		hinfo->run.qout_frames++;
849 		hinfo->run.qout_octets += m->m_pkthdr.len;
850 	}
851 
852 	/* Delay queue processing */
853 	while ((ngp_h = TAILQ_FIRST(&hinfo->qout_head))) {
854 		when = &ngp_h->when;
855 		m = ngp_h->m;
856 		if (when->tv_sec > now->tv_sec ||
857 		    (when->tv_sec == now->tv_sec &&
858 		    when->tv_usec > now->tv_usec))
859 			break;
860 
861 		/* Update outbound queue stats */
862 		plen = m->m_pkthdr.len;
863 		hinfo->run.qout_frames--;
864 		hinfo->run.qout_octets -= plen;
865 
866 		/* Dequeue the packet from qout */
867 		TAILQ_REMOVE(&hinfo->qout_head, ngp_h, ngp_link);
868 		uma_zfree(ngp_zone, ngp_h);
869 
870 		NG_SEND_DATA(error, dest->hook, m, meta);
871 		if (error) {
872 			hinfo->stats.out_disc_frames++;
873 			hinfo->stats.out_disc_octets += plen;
874 		} else {
875 			hinfo->stats.fwd_frames++;
876 			hinfo->stats.fwd_octets += plen;
877 		}
878 	}
879 
880 	if ((hinfo->run.qin_frames != 0 || hinfo->run.qout_frames != 0) &&
881 	    !priv->timer_scheduled) {
882 		ng_callout(&priv->timer, node, NULL, 1, ngp_callout, NULL, 0);
883 		priv->timer_scheduled = 1;
884 	}
885 }
886 
887 /*
888  * This routine is called on every clock tick.  We poll connected hooks
889  * for queued frames by calling pipe_dequeue().
890  */
891 static void
892 ngp_callout(node_p node, hook_p hook, void *arg1, int arg2)
893 {
894 	const priv_p priv = NG_NODE_PRIVATE(node);
895 	struct timeval now;
896 
897 	priv->timer_scheduled = 0;
898 	microuptime(&now);
899 	if (priv->upper.hook != NULL)
900 		pipe_dequeue(&priv->upper, &now);
901 	if (priv->lower.hook != NULL)
902 		pipe_dequeue(&priv->lower, &now);
903 }
904 
905 /*
906  * Shutdown processing
907  *
908  * This is tricky. If we have both a lower and upper hook, then we
909  * probably want to extricate ourselves and leave the two peers
910  * still linked to each other. Otherwise we should just shut down as
911  * a normal node would.
912  */
913 static int
914 ngp_shutdown(node_p node)
915 {
916 	const priv_p priv = NG_NODE_PRIVATE(node);
917 
918 	if (priv->timer_scheduled)
919 		ng_uncallout(&priv->timer, node);
920 	if (priv->lower.hook && priv->upper.hook)
921 		ng_bypass(priv->lower.hook, priv->upper.hook);
922 	else {
923 		if (priv->upper.hook != NULL)
924 			ng_rmhook_self(priv->upper.hook);
925 		if (priv->lower.hook != NULL)
926 			ng_rmhook_self(priv->lower.hook);
927 	}
928 	NG_NODE_UNREF(node);
929 	free(priv, M_NG_PIPE);
930 	return (0);
931 }
932 
933 
934 /*
935  * Hook disconnection
936  */
937 static int
938 ngp_disconnect(hook_p hook)
939 {
940 	struct hookinfo *const hinfo = NG_HOOK_PRIVATE(hook);
941 	struct ngp_fifo *ngp_f;
942 	struct ngp_hdr *ngp_h;
943 
944 	KASSERT(hinfo != NULL, ("%s: null info", __FUNCTION__));
945 	hinfo->hook = NULL;
946 
947 	/* Flush all fifo queues associated with the hook */
948 	while ((ngp_f = TAILQ_FIRST(&hinfo->fifo_head))) {
949 		while ((ngp_h = TAILQ_FIRST(&ngp_f->packet_head))) {
950 			TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link);
951 			m_freem(ngp_h->m);
952 			uma_zfree(ngp_zone, ngp_h);
953 		}
954 		TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
955 		uma_zfree(ngp_zone, ngp_f);
956 	}
957 
958 	/* Flush the delay queue */
959 	while ((ngp_h = TAILQ_FIRST(&hinfo->qout_head))) {
960 		TAILQ_REMOVE(&hinfo->qout_head, ngp_h, ngp_link);
961 		m_freem(ngp_h->m);
962 		uma_zfree(ngp_zone, ngp_h);
963 	}
964 
965 	/* Release the packet loss probability table (BER) */
966 	if (hinfo->ber_p)
967 		free(hinfo->ber_p, M_NG_PIPE);
968 
969 	return (0);
970 }
971 
972 static int
973 ngp_modevent(module_t mod, int type, void *unused)
974 {
975 	int error = 0;
976 
977 	switch (type) {
978 	case MOD_LOAD:
979 		ngp_zone = uma_zcreate("ng_pipe", max(sizeof(struct ngp_hdr),
980 		    sizeof (struct ngp_fifo)), NULL, NULL, NULL, NULL,
981 		    UMA_ALIGN_PTR, 0);
982 		if (ngp_zone == NULL)
983 			panic("ng_pipe: couldn't allocate descriptor zone");
984 		break;
985 	case MOD_UNLOAD:
986 		uma_zdestroy(ngp_zone);
987 		break;
988 	default:
989 		error = EOPNOTSUPP;
990 		break;
991 	}
992 
993 	return (error);
994 }
995