xref: /freebsd/sys/netpfil/ipfw/ip_dn_io.c (revision ca987d4641cdcd7f27e153db17c5bf064934faf5)
1 /*-
2  * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa
3  * All rights reserved
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * Dummynet portions related to packet handling.
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_inet6.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/mbuf.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/priv.h>
44 #include <sys/proc.h>
45 #include <sys/rwlock.h>
46 #include <sys/socket.h>
47 #include <sys/time.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/if.h>	/* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */
51 #include <net/netisr.h>
52 #include <net/vnet.h>
53 
54 #include <netinet/in.h>
55 #include <netinet/ip.h>		/* ip_len, ip_off */
56 #include <netinet/ip_var.h>	/* ip_output(), IP_FORWARDING */
57 #include <netinet/ip_fw.h>
58 #include <netinet/ip_dummynet.h>
59 #include <netinet/if_ether.h> /* various ether_* routines */
60 #include <netinet/ip6.h>       /* for ip6_input, ip6_output prototypes */
61 #include <netinet6/ip6_var.h>
62 
63 #include <netpfil/ipfw/ip_fw_private.h>
64 #include <netpfil/ipfw/dn_heap.h>
65 #include <netpfil/ipfw/ip_dn_private.h>
66 #ifdef NEW_AQM
67 #include <netpfil/ipfw/dn_aqm.h>
68 #endif
69 #include <netpfil/ipfw/dn_sched.h>
70 
71 /*
72  * We keep a private variable for the simulation time, but we could
73  * probably use an existing one ("softticks" in sys/kern/kern_timeout.c)
74  * instead of dn_cfg.curr_time
75  */
76 
77 struct dn_parms dn_cfg;
78 //VNET_DEFINE(struct dn_parms, _base_dn_cfg);
79 
80 static long tick_last;		/* Last tick duration (usec). */
81 static long tick_delta;		/* Last vs standard tick diff (usec). */
82 static long tick_delta_sum;	/* Accumulated tick difference (usec).*/
83 static long tick_adjustment;	/* Tick adjustments done. */
84 static long tick_lost;		/* Lost(coalesced) ticks number. */
85 /* Adjusted vs non-adjusted curr_time difference (ticks). */
86 static long tick_diff;
87 
88 static unsigned long	io_pkt;
89 static unsigned long	io_pkt_fast;
90 
91 #ifdef NEW_AQM
92 unsigned long	io_pkt_drop;
93 #else
94 static unsigned long	io_pkt_drop;
95 #endif
96 /*
97  * We use a heap to store entities for which we have pending timer events.
98  * The heap is checked at every tick and all entities with expired events
99  * are extracted.
100  */
101 
102 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
103 
104 extern	void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
105 
106 #ifdef SYSCTL_NODE
107 
108 /*
109  * Because of the way the SYSBEGIN/SYSEND macros work on other
110  * platforms, there should not be functions between them.
111  * So keep the handlers outside the block.
112  */
113 static int
114 sysctl_hash_size(SYSCTL_HANDLER_ARGS)
115 {
116 	int error, value;
117 
118 	value = dn_cfg.hash_size;
119 	error = sysctl_handle_int(oidp, &value, 0, req);
120 	if (error != 0 || req->newptr == NULL)
121 		return (error);
122 	if (value < 16 || value > 65536)
123 		return (EINVAL);
124 	dn_cfg.hash_size = value;
125 	return (0);
126 }
127 
128 static int
129 sysctl_limits(SYSCTL_HANDLER_ARGS)
130 {
131 	int error;
132 	long value;
133 
134 	if (arg2 != 0)
135 		value = dn_cfg.slot_limit;
136 	else
137 		value = dn_cfg.byte_limit;
138 	error = sysctl_handle_long(oidp, &value, 0, req);
139 
140 	if (error != 0 || req->newptr == NULL)
141 		return (error);
142 	if (arg2 != 0) {
143 		if (value < 1)
144 			return (EINVAL);
145 		dn_cfg.slot_limit = value;
146 	} else {
147 		if (value < 1500)
148 			return (EINVAL);
149 		dn_cfg.byte_limit = value;
150 	}
151 	return (0);
152 }
153 
154 SYSBEGIN(f4)
155 
156 SYSCTL_DECL(_net_inet);
157 SYSCTL_DECL(_net_inet_ip);
158 #ifdef NEW_AQM
159 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW, 0, "Dummynet");
160 #else
161 static SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW, 0, "Dummynet");
162 #endif
163 
164 /* wrapper to pass dn_cfg fields to SYSCTL_* */
165 //#define DC(x)	(&(VNET_NAME(_base_dn_cfg).x))
166 #define DC(x)	(&(dn_cfg.x))
167 /* parameters */
168 
169 
170 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hash_size,
171     CTLTYPE_INT | CTLFLAG_RW, 0, 0, sysctl_hash_size,
172     "I", "Default hash table size");
173 
174 
175 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit,
176     CTLTYPE_LONG | CTLFLAG_RW, 0, 1, sysctl_limits,
177     "L", "Upper limit in slots for pipe queue.");
178 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit,
179     CTLTYPE_LONG | CTLFLAG_RW, 0, 0, sysctl_limits,
180     "L", "Upper limit in bytes for pipe queue.");
181 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast,
182     CTLFLAG_RW, DC(io_fast), 0, "Enable fast dummynet io.");
183 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug,
184     CTLFLAG_RW, DC(debug), 0, "Dummynet debug level");
185 
186 /* RED parameters */
187 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
188     CTLFLAG_RD, DC(red_lookup_depth), 0, "Depth of RED lookup table");
189 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
190     CTLFLAG_RD, DC(red_avg_pkt_size), 0, "RED Medium packet size");
191 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
192     CTLFLAG_RD, DC(red_max_pkt_size), 0, "RED Max packet size");
193 
194 /* time adjustment */
195 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta,
196     CTLFLAG_RD, &tick_delta, 0, "Last vs standard tick difference (usec).");
197 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum,
198     CTLFLAG_RD, &tick_delta_sum, 0, "Accumulated tick difference (usec).");
199 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment,
200     CTLFLAG_RD, &tick_adjustment, 0, "Tick adjustments done.");
201 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff,
202     CTLFLAG_RD, &tick_diff, 0,
203     "Adjusted vs non-adjusted curr_time difference (ticks).");
204 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost,
205     CTLFLAG_RD, &tick_lost, 0,
206     "Number of ticks coalesced by dummynet taskqueue.");
207 
208 /* Drain parameters */
209 SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire,
210     CTLFLAG_RW, DC(expire), 0, "Expire empty queues/pipes");
211 SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire_cycle,
212     CTLFLAG_RD, DC(expire_cycle), 0, "Expire cycle for queues/pipes");
213 
214 /* statistics */
215 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, schk_count,
216     CTLFLAG_RD, DC(schk_count), 0, "Number of schedulers");
217 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, si_count,
218     CTLFLAG_RD, DC(si_count), 0, "Number of scheduler instances");
219 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, fsk_count,
220     CTLFLAG_RD, DC(fsk_count), 0, "Number of flowsets");
221 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, queue_count,
222     CTLFLAG_RD, DC(queue_count), 0, "Number of queues");
223 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt,
224     CTLFLAG_RD, &io_pkt, 0,
225     "Number of packets passed to dummynet.");
226 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast,
227     CTLFLAG_RD, &io_pkt_fast, 0,
228     "Number of packets bypassed dummynet scheduler.");
229 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop,
230     CTLFLAG_RD, &io_pkt_drop, 0,
231     "Number of packets dropped by dummynet.");
232 #undef DC
233 SYSEND
234 
235 #endif
236 
237 static void	dummynet_send(struct mbuf *);
238 
239 /*
240  * Return the mbuf tag holding the dummynet state (it should
241  * be the first one on the list).
242  */
243 struct dn_pkt_tag *
244 dn_tag_get(struct mbuf *m)
245 {
246 	struct m_tag *mtag = m_tag_first(m);
247 #ifdef NEW_AQM
248 	/* XXX: to skip ts m_tag. For Debugging only*/
249 	if (mtag != NULL && mtag->m_tag_id == DN_AQM_MTAG_TS) {
250 		m_tag_delete(m,mtag);
251 		mtag = m_tag_first(m);
252 		D("skip TS tag");
253 	}
254 #endif
255 	KASSERT(mtag != NULL &&
256 	    mtag->m_tag_cookie == MTAG_ABI_COMPAT &&
257 	    mtag->m_tag_id == PACKET_TAG_DUMMYNET,
258 	    ("packet on dummynet queue w/o dummynet tag!"));
259 	return (struct dn_pkt_tag *)(mtag+1);
260 }
261 
262 #ifndef NEW_AQM
263 static inline void
264 mq_append(struct mq *q, struct mbuf *m)
265 {
266 #ifdef USERSPACE
267 	// buffers from netmap need to be copied
268 	// XXX note that the routine is not expected to fail
269 	ND("append %p to %p", m, q);
270 	if (m->m_flags & M_STACK) {
271 		struct mbuf *m_new;
272 		void *p;
273 		int l, ofs;
274 
275 		ofs = m->m_data - m->__m_extbuf;
276 		// XXX allocate
277 		MGETHDR(m_new, M_NOWAIT, MT_DATA);
278 		ND("*** WARNING, volatile buf %p ext %p %d dofs %d m_new %p",
279 			m, m->__m_extbuf, m->__m_extlen, ofs, m_new);
280 		p = m_new->__m_extbuf;	/* new pointer */
281 		l = m_new->__m_extlen;	/* new len */
282 		if (l <= m->__m_extlen) {
283 			panic("extlen too large");
284 		}
285 
286 		*m_new = *m;	// copy
287 		m_new->m_flags &= ~M_STACK;
288 		m_new->__m_extbuf = p; // point to new buffer
289 		_pkt_copy(m->__m_extbuf, p, m->__m_extlen);
290 		m_new->m_data = p + ofs;
291 		m = m_new;
292 	}
293 #endif /* USERSPACE */
294 	if (q->head == NULL)
295 		q->head = m;
296 	else
297 		q->tail->m_nextpkt = m;
298 	q->count++;
299 	q->tail = m;
300 	m->m_nextpkt = NULL;
301 }
302 #endif
303 
304 /*
305  * Dispose a list of packet. Use a functions so if we need to do
306  * more work, this is a central point to do it.
307  */
308 void dn_free_pkts(struct mbuf *mnext)
309 {
310         struct mbuf *m;
311 
312         while ((m = mnext) != NULL) {
313                 mnext = m->m_nextpkt;
314                 FREE_PKT(m);
315         }
316 }
317 
318 static int
319 red_drops (struct dn_queue *q, int len)
320 {
321 	/*
322 	 * RED algorithm
323 	 *
324 	 * RED calculates the average queue size (avg) using a low-pass filter
325 	 * with an exponential weighted (w_q) moving average:
326 	 * 	avg  <-  (1-w_q) * avg + w_q * q_size
327 	 * where q_size is the queue length (measured in bytes or * packets).
328 	 *
329 	 * If q_size == 0, we compute the idle time for the link, and set
330 	 *	avg = (1 - w_q)^(idle/s)
331 	 * where s is the time needed for transmitting a medium-sized packet.
332 	 *
333 	 * Now, if avg < min_th the packet is enqueued.
334 	 * If avg > max_th the packet is dropped. Otherwise, the packet is
335 	 * dropped with probability P function of avg.
336 	 */
337 
338 	struct dn_fsk *fs = q->fs;
339 	int64_t p_b = 0;
340 
341 	/* Queue in bytes or packets? */
342 	uint32_t q_size = (fs->fs.flags & DN_QSIZE_BYTES) ?
343 	    q->ni.len_bytes : q->ni.length;
344 
345 	/* Average queue size estimation. */
346 	if (q_size != 0) {
347 		/* Queue is not empty, avg <- avg + (q_size - avg) * w_q */
348 		int diff = SCALE(q_size) - q->avg;
349 		int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
350 
351 		q->avg += (int)v;
352 	} else {
353 		/*
354 		 * Queue is empty, find for how long the queue has been
355 		 * empty and use a lookup table for computing
356 		 * (1 - * w_q)^(idle_time/s) where s is the time to send a
357 		 * (small) packet.
358 		 * XXX check wraps...
359 		 */
360 		if (q->avg) {
361 			u_int t = div64((dn_cfg.curr_time - q->q_time), fs->lookup_step);
362 
363 			q->avg = (t < fs->lookup_depth) ?
364 			    SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
365 		}
366 	}
367 
368 	/* Should i drop? */
369 	if (q->avg < fs->min_th) {
370 		q->count = -1;
371 		return (0);	/* accept packet */
372 	}
373 	if (q->avg >= fs->max_th) {	/* average queue >=  max threshold */
374 		if (fs->fs.flags & DN_IS_ECN)
375 			return (1);
376 		if (fs->fs.flags & DN_IS_GENTLE_RED) {
377 			/*
378 			 * According to Gentle-RED, if avg is greater than
379 			 * max_th the packet is dropped with a probability
380 			 *	 p_b = c_3 * avg - c_4
381 			 * where c_3 = (1 - max_p) / max_th
382 			 *       c_4 = 1 - 2 * max_p
383 			 */
384 			p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) -
385 			    fs->c_4;
386 		} else {
387 			q->count = -1;
388 			return (1);
389 		}
390 	} else if (q->avg > fs->min_th) {
391 		if (fs->fs.flags & DN_IS_ECN)
392 			return (1);
393 		/*
394 		 * We compute p_b using the linear dropping function
395 		 *	 p_b = c_1 * avg - c_2
396 		 * where c_1 = max_p / (max_th - min_th)
397 		 * 	 c_2 = max_p * min_th / (max_th - min_th)
398 		 */
399 		p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
400 	}
401 
402 	if (fs->fs.flags & DN_QSIZE_BYTES)
403 		p_b = div64((p_b * len) , fs->max_pkt_size);
404 	if (++q->count == 0)
405 		q->random = random() & 0xffff;
406 	else {
407 		/*
408 		 * q->count counts packets arrived since last drop, so a greater
409 		 * value of q->count means a greater packet drop probability.
410 		 */
411 		if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
412 			q->count = 0;
413 			/* After a drop we calculate a new random value. */
414 			q->random = random() & 0xffff;
415 			return (1);	/* drop */
416 		}
417 	}
418 	/* End of RED algorithm. */
419 
420 	return (0);	/* accept */
421 
422 }
423 
424 /*
425  * ECN/ECT Processing (partially adopted from altq)
426  */
427 #ifndef NEW_AQM
428 static
429 #endif
430 int
431 ecn_mark(struct mbuf* m)
432 {
433 	struct ip *ip;
434 	ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off);
435 
436 	switch (ip->ip_v) {
437 	case IPVERSION:
438 	{
439 		uint16_t old;
440 
441 		if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT)
442 			return (0);	/* not-ECT */
443 		if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE)
444 			return (1);	/* already marked */
445 
446 		/*
447 		 * ecn-capable but not marked,
448 		 * mark CE and update checksum
449 		 */
450 		old = *(uint16_t *)ip;
451 		ip->ip_tos |= IPTOS_ECN_CE;
452 		ip->ip_sum = cksum_adjust(ip->ip_sum, old, *(uint16_t *)ip);
453 		return (1);
454 	}
455 #ifdef INET6
456 	case (IPV6_VERSION >> 4):
457 	{
458 		struct ip6_hdr *ip6 = (struct ip6_hdr *)ip;
459 		u_int32_t flowlabel;
460 
461 		flowlabel = ntohl(ip6->ip6_flow);
462 		if ((flowlabel >> 28) != 6)
463 			return (0);	/* version mismatch! */
464 		if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
465 		    (IPTOS_ECN_NOTECT << 20))
466 			return (0);	/* not-ECT */
467 		if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
468 		    (IPTOS_ECN_CE << 20))
469 			return (1);	/* already marked */
470 		/*
471 		 * ecn-capable but not marked, mark CE
472 		 */
473 		flowlabel |= (IPTOS_ECN_CE << 20);
474 		ip6->ip6_flow = htonl(flowlabel);
475 		return (1);
476 	}
477 #endif
478 	}
479 	return (0);
480 }
481 
482 /*
483  * Enqueue a packet in q, subject to space and queue management policy
484  * (whose parameters are in q->fs).
485  * Update stats for the queue and the scheduler.
486  * Return 0 on success, 1 on drop. The packet is consumed anyways.
487  */
488 int
489 dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop)
490 {
491 	struct dn_fs *f;
492 	struct dn_flow *ni;	/* stats for scheduler instance */
493 	uint64_t len;
494 
495 	if (q->fs == NULL || q->_si == NULL) {
496 		printf("%s fs %p si %p, dropping\n",
497 			__FUNCTION__, q->fs, q->_si);
498 		FREE_PKT(m);
499 		return 1;
500 	}
501 	f = &(q->fs->fs);
502 	ni = &q->_si->ni;
503 	len = m->m_pkthdr.len;
504 	/* Update statistics, then check reasons to drop pkt. */
505 	q->ni.tot_bytes += len;
506 	q->ni.tot_pkts++;
507 	ni->tot_bytes += len;
508 	ni->tot_pkts++;
509 	if (drop)
510 		goto drop;
511 	if (f->plr && random() < f->plr)
512 		goto drop;
513 #ifdef NEW_AQM
514 	/* Call AQM enqueue function */
515 	if (q->fs->aqmfp)
516 		return q->fs->aqmfp->enqueue(q ,m);
517 #endif
518 	if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len)) {
519 		if (!(f->flags & DN_IS_ECN) || !ecn_mark(m))
520 			goto drop;
521 	}
522 	if (f->flags & DN_QSIZE_BYTES) {
523 		if (q->ni.len_bytes > f->qsize)
524 			goto drop;
525 	} else if (q->ni.length >= f->qsize) {
526 		goto drop;
527 	}
528 	mq_append(&q->mq, m);
529 	q->ni.length++;
530 	q->ni.len_bytes += len;
531 	ni->length++;
532 	ni->len_bytes += len;
533 	return (0);
534 
535 drop:
536 	io_pkt_drop++;
537 	q->ni.drops++;
538 	ni->drops++;
539 	FREE_PKT(m);
540 	return (1);
541 }
542 
543 /*
544  * Fetch packets from the delay line which are due now. If there are
545  * leftover packets, reinsert the delay line in the heap.
546  * Runs under scheduler lock.
547  */
548 static void
549 transmit_event(struct mq *q, struct delay_line *dline, uint64_t now)
550 {
551 	struct mbuf *m;
552 	struct dn_pkt_tag *pkt = NULL;
553 
554 	dline->oid.subtype = 0; /* not in heap */
555 	while ((m = dline->mq.head) != NULL) {
556 		pkt = dn_tag_get(m);
557 		if (!DN_KEY_LEQ(pkt->output_time, now))
558 			break;
559 		dline->mq.head = m->m_nextpkt;
560 		dline->mq.count--;
561 		mq_append(q, m);
562 	}
563 	if (m != NULL) {
564 		dline->oid.subtype = 1; /* in heap */
565 		heap_insert(&dn_cfg.evheap, pkt->output_time, dline);
566 	}
567 }
568 
569 /*
570  * Convert the additional MAC overheads/delays into an equivalent
571  * number of bits for the given data rate. The samples are
572  * in milliseconds so we need to divide by 1000.
573  */
574 static uint64_t
575 extra_bits(struct mbuf *m, struct dn_schk *s)
576 {
577 	int index;
578 	uint64_t bits;
579 	struct dn_profile *pf = s->profile;
580 
581 	if (!pf || pf->samples_no == 0)
582 		return 0;
583 	index  = random() % pf->samples_no;
584 	bits = div64((uint64_t)pf->samples[index] * s->link.bandwidth, 1000);
585 	if (index >= pf->loss_level) {
586 		struct dn_pkt_tag *dt = dn_tag_get(m);
587 		if (dt)
588 			dt->dn_dir = DIR_DROP;
589 	}
590 	return bits;
591 }
592 
593 /*
594  * Send traffic from a scheduler instance due by 'now'.
595  * Return a pointer to the head of the queue.
596  */
597 static struct mbuf *
598 serve_sched(struct mq *q, struct dn_sch_inst *si, uint64_t now)
599 {
600 	struct mq def_q;
601 	struct dn_schk *s = si->sched;
602 	struct mbuf *m = NULL;
603 	int delay_line_idle = (si->dline.mq.head == NULL);
604 	int done, bw;
605 
606 	if (q == NULL) {
607 		q = &def_q;
608 		q->head = NULL;
609 	}
610 
611 	bw = s->link.bandwidth;
612 	si->kflags &= ~DN_ACTIVE;
613 
614 	if (bw > 0)
615 		si->credit += (now - si->sched_time) * bw;
616 	else
617 		si->credit = 0;
618 	si->sched_time = now;
619 	done = 0;
620 	while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) {
621 		uint64_t len_scaled;
622 
623 		done++;
624 		len_scaled = (bw == 0) ? 0 : hz *
625 			(m->m_pkthdr.len * 8 + extra_bits(m, s));
626 		si->credit -= len_scaled;
627 		/* Move packet in the delay line */
628 		dn_tag_get(m)->output_time = dn_cfg.curr_time + s->link.delay ;
629 		mq_append(&si->dline.mq, m);
630 	}
631 
632 	/*
633 	 * If credit >= 0 the instance is idle, mark time.
634 	 * Otherwise put back in the heap, and adjust the output
635 	 * time of the last inserted packet, m, which was too early.
636 	 */
637 	if (si->credit >= 0) {
638 		si->idle_time = now;
639 	} else {
640 		uint64_t t;
641 		KASSERT (bw > 0, ("bw=0 and credit<0 ?"));
642 		t = div64(bw - 1 - si->credit, bw);
643 		if (m)
644 			dn_tag_get(m)->output_time += t;
645 		si->kflags |= DN_ACTIVE;
646 		heap_insert(&dn_cfg.evheap, now + t, si);
647 	}
648 	if (delay_line_idle && done)
649 		transmit_event(q, &si->dline, now);
650 	return q->head;
651 }
652 
653 /*
654  * The timer handler for dummynet. Time is computed in ticks, but
655  * but the code is tolerant to the actual rate at which this is called.
656  * Once complete, the function reschedules itself for the next tick.
657  */
658 void
659 dummynet_task(void *context, int pending)
660 {
661 	struct timeval t;
662 	struct mq q = { NULL, NULL }; /* queue to accumulate results */
663 
664 	CURVNET_SET((struct vnet *)context);
665 
666 	DN_BH_WLOCK();
667 
668 	/* Update number of lost(coalesced) ticks. */
669 	tick_lost += pending - 1;
670 
671 	getmicrouptime(&t);
672 	/* Last tick duration (usec). */
673 	tick_last = (t.tv_sec - dn_cfg.prev_t.tv_sec) * 1000000 +
674 	(t.tv_usec - dn_cfg.prev_t.tv_usec);
675 	/* Last tick vs standard tick difference (usec). */
676 	tick_delta = (tick_last * hz - 1000000) / hz;
677 	/* Accumulated tick difference (usec). */
678 	tick_delta_sum += tick_delta;
679 
680 	dn_cfg.prev_t = t;
681 
682 	/*
683 	* Adjust curr_time if the accumulated tick difference is
684 	* greater than the 'standard' tick. Since curr_time should
685 	* be monotonically increasing, we do positive adjustments
686 	* as required, and throttle curr_time in case of negative
687 	* adjustment.
688 	*/
689 	dn_cfg.curr_time++;
690 	if (tick_delta_sum - tick >= 0) {
691 		int diff = tick_delta_sum / tick;
692 
693 		dn_cfg.curr_time += diff;
694 		tick_diff += diff;
695 		tick_delta_sum %= tick;
696 		tick_adjustment++;
697 	} else if (tick_delta_sum + tick <= 0) {
698 		dn_cfg.curr_time--;
699 		tick_diff--;
700 		tick_delta_sum += tick;
701 		tick_adjustment++;
702 	}
703 
704 	/* serve pending events, accumulate in q */
705 	for (;;) {
706 		struct dn_id *p;    /* generic parameter to handler */
707 
708 		if (dn_cfg.evheap.elements == 0 ||
709 		    DN_KEY_LT(dn_cfg.curr_time, HEAP_TOP(&dn_cfg.evheap)->key))
710 			break;
711 		p = HEAP_TOP(&dn_cfg.evheap)->object;
712 		heap_extract(&dn_cfg.evheap, NULL);
713 
714 		if (p->type == DN_SCH_I) {
715 			serve_sched(&q, (struct dn_sch_inst *)p, dn_cfg.curr_time);
716 		} else { /* extracted a delay line */
717 			transmit_event(&q, (struct delay_line *)p, dn_cfg.curr_time);
718 		}
719 	}
720 	if (dn_cfg.expire && ++dn_cfg.expire_cycle >= dn_cfg.expire) {
721 		dn_cfg.expire_cycle = 0;
722 		dn_drain_scheduler();
723 		dn_drain_queue();
724 	}
725 
726 	dn_reschedule();
727 	DN_BH_WUNLOCK();
728 	if (q.head != NULL)
729 		dummynet_send(q.head);
730 	CURVNET_RESTORE();
731 }
732 
733 /*
734  * forward a chain of packets to the proper destination.
735  * This runs outside the dummynet lock.
736  */
737 static void
738 dummynet_send(struct mbuf *m)
739 {
740 	struct mbuf *n;
741 
742 	for (; m != NULL; m = n) {
743 		struct ifnet *ifp = NULL;	/* gcc 3.4.6 complains */
744         	struct m_tag *tag;
745 		int dst;
746 
747 		n = m->m_nextpkt;
748 		m->m_nextpkt = NULL;
749 		tag = m_tag_first(m);
750 		if (tag == NULL) { /* should not happen */
751 			dst = DIR_DROP;
752 		} else {
753 			struct dn_pkt_tag *pkt = dn_tag_get(m);
754 			/* extract the dummynet info, rename the tag
755 			 * to carry reinject info.
756 			 */
757 			if (pkt->dn_dir == (DIR_OUT | PROTO_LAYER2) &&
758 				pkt->ifp == NULL) {
759 				dst = DIR_DROP;
760 			} else {
761 				dst = pkt->dn_dir;
762 				ifp = pkt->ifp;
763 				tag->m_tag_cookie = MTAG_IPFW_RULE;
764 				tag->m_tag_id = 0;
765 			}
766 		}
767 
768 		switch (dst) {
769 		case DIR_OUT:
770 			ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
771 			break ;
772 
773 		case DIR_IN :
774 			netisr_dispatch(NETISR_IP, m);
775 			break;
776 
777 #ifdef INET6
778 		case DIR_IN | PROTO_IPV6:
779 			netisr_dispatch(NETISR_IPV6, m);
780 			break;
781 
782 		case DIR_OUT | PROTO_IPV6:
783 			ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
784 			break;
785 #endif
786 
787 		case DIR_FWD | PROTO_IFB: /* DN_TO_IFB_FWD: */
788 			if (bridge_dn_p != NULL)
789 				((*bridge_dn_p)(m, ifp));
790 			else
791 				printf("dummynet: if_bridge not loaded\n");
792 
793 			break;
794 
795 		case DIR_IN | PROTO_LAYER2: /* DN_TO_ETH_DEMUX: */
796 			/*
797 			 * The Ethernet code assumes the Ethernet header is
798 			 * contiguous in the first mbuf header.
799 			 * Insure this is true.
800 			 */
801 			if (m->m_len < ETHER_HDR_LEN &&
802 			    (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
803 				printf("dummynet/ether: pullup failed, "
804 				    "dropping packet\n");
805 				break;
806 			}
807 			ether_demux(m->m_pkthdr.rcvif, m);
808 			break;
809 
810 		case DIR_OUT | PROTO_LAYER2: /* N_TO_ETH_OUT: */
811 			ether_output_frame(ifp, m);
812 			break;
813 
814 		case DIR_DROP:
815 			/* drop the packet after some time */
816 			FREE_PKT(m);
817 			break;
818 
819 		default:
820 			printf("dummynet: bad switch %d!\n", dst);
821 			FREE_PKT(m);
822 			break;
823 		}
824 	}
825 }
826 
827 static inline int
828 tag_mbuf(struct mbuf *m, int dir, struct ip_fw_args *fwa)
829 {
830 	struct dn_pkt_tag *dt;
831 	struct m_tag *mtag;
832 
833 	mtag = m_tag_get(PACKET_TAG_DUMMYNET,
834 		    sizeof(*dt), M_NOWAIT | M_ZERO);
835 	if (mtag == NULL)
836 		return 1;		/* Cannot allocate packet header. */
837 	m_tag_prepend(m, mtag);		/* Attach to mbuf chain. */
838 	dt = (struct dn_pkt_tag *)(mtag + 1);
839 	dt->rule = fwa->rule;
840 	dt->rule.info &= IPFW_ONEPASS;	/* only keep this info */
841 	dt->dn_dir = dir;
842 	dt->ifp = fwa->oif;
843 	/* dt->output tame is updated as we move through */
844 	dt->output_time = dn_cfg.curr_time;
845 	dt->iphdr_off = (dir & PROTO_LAYER2) ? ETHER_HDR_LEN : 0;
846 	return 0;
847 }
848 
849 
850 /*
851  * dummynet hook for packets.
852  * We use the argument to locate the flowset fs and the sched_set sch
853  * associated to it. The we apply flow_mask and sched_mask to
854  * determine the queue and scheduler instances.
855  *
856  * dir		where shall we send the packet after dummynet.
857  * *m0		the mbuf with the packet
858  * ifp		the 'ifp' parameter from the caller.
859  *		NULL in ip_input, destination interface in ip_output,
860  */
861 int
862 dummynet_io(struct mbuf **m0, int dir, struct ip_fw_args *fwa)
863 {
864 	struct mbuf *m = *m0;
865 	struct dn_fsk *fs = NULL;
866 	struct dn_sch_inst *si;
867 	struct dn_queue *q = NULL;	/* default */
868 
869 	int fs_id = (fwa->rule.info & IPFW_INFO_MASK) +
870 		((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0);
871 	DN_BH_WLOCK();
872 	io_pkt++;
873 	/* we could actually tag outside the lock, but who cares... */
874 	if (tag_mbuf(m, dir, fwa))
875 		goto dropit;
876 	if (dn_cfg.busy) {
877 		/* if the upper half is busy doing something expensive,
878 		 * lets queue the packet and move forward
879 		 */
880 		mq_append(&dn_cfg.pending, m);
881 		m = *m0 = NULL; /* consumed */
882 		goto done; /* already active, nothing to do */
883 	}
884 	/* XXX locate_flowset could be optimised with a direct ref. */
885 	fs = dn_ht_find(dn_cfg.fshash, fs_id, 0, NULL);
886 	if (fs == NULL)
887 		goto dropit;	/* This queue/pipe does not exist! */
888 	if (fs->sched == NULL)	/* should not happen */
889 		goto dropit;
890 	/* find scheduler instance, possibly applying sched_mask */
891 	si = ipdn_si_find(fs->sched, &(fwa->f_id));
892 	if (si == NULL)
893 		goto dropit;
894 	/*
895 	 * If the scheduler supports multiple queues, find the right one
896 	 * (otherwise it will be ignored by enqueue).
897 	 */
898 	if (fs->sched->fp->flags & DN_MULTIQUEUE) {
899 		q = ipdn_q_find(fs, si, &(fwa->f_id));
900 		if (q == NULL)
901 			goto dropit;
902 	}
903 	if (fs->sched->fp->enqueue(si, q, m)) {
904 		/* packet was dropped by enqueue() */
905 		m = *m0 = NULL;
906 
907 		/* dn_enqueue already increases io_pkt_drop */
908 		io_pkt_drop--;
909 
910 		goto dropit;
911 	}
912 
913 	if (si->kflags & DN_ACTIVE) {
914 		m = *m0 = NULL; /* consumed */
915 		goto done; /* already active, nothing to do */
916 	}
917 
918 	/* compute the initial allowance */
919 	if (si->idle_time < dn_cfg.curr_time) {
920 	    /* Do this only on the first packet on an idle pipe */
921 	    struct dn_link *p = &fs->sched->link;
922 
923 	    si->sched_time = dn_cfg.curr_time;
924 	    si->credit = dn_cfg.io_fast ? p->bandwidth : 0;
925 	    if (p->burst) {
926 		uint64_t burst = (dn_cfg.curr_time - si->idle_time) * p->bandwidth;
927 		if (burst > p->burst)
928 			burst = p->burst;
929 		si->credit += burst;
930 	    }
931 	}
932 	/* pass through scheduler and delay line */
933 	m = serve_sched(NULL, si, dn_cfg.curr_time);
934 
935 	/* optimization -- pass it back to ipfw for immediate send */
936 	/* XXX Don't call dummynet_send() if scheduler return the packet
937 	 *     just enqueued. This avoid a lock order reversal.
938 	 *
939 	 */
940 	if (/*dn_cfg.io_fast &&*/ m == *m0 && (dir & PROTO_LAYER2) == 0 ) {
941 		/* fast io, rename the tag * to carry reinject info. */
942 		struct m_tag *tag = m_tag_first(m);
943 
944 		tag->m_tag_cookie = MTAG_IPFW_RULE;
945 		tag->m_tag_id = 0;
946 		io_pkt_fast++;
947 		if (m->m_nextpkt != NULL) {
948 			printf("dummynet: fast io: pkt chain detected!\n");
949 			m->m_nextpkt = NULL;
950 		}
951 		m = NULL;
952 	} else {
953 		*m0 = NULL;
954 	}
955 done:
956 	DN_BH_WUNLOCK();
957 	if (m)
958 		dummynet_send(m);
959 	return 0;
960 
961 dropit:
962 	io_pkt_drop++;
963 	DN_BH_WUNLOCK();
964 	if (m)
965 		FREE_PKT(m);
966 	*m0 = NULL;
967 	return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS;
968 }
969