xref: /freebsd/sys/netpfil/ipfw/ip_dn_io.c (revision f18976136625a7d016e97bfd9eabddf640b3e06d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa
5  * All rights reserved
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * Dummynet portions related to packet handling.
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_inet6.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/module.h>
44 #include <sys/mutex.h>
45 #include <sys/priv.h>
46 #include <sys/proc.h>
47 #include <sys/rwlock.h>
48 #include <sys/socket.h>
49 #include <sys/time.h>
50 #include <sys/sysctl.h>
51 
52 #include <net/if.h>	/* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */
53 #include <net/if_var.h>	/* NET_EPOCH_... */
54 #include <net/netisr.h>
55 #include <net/vnet.h>
56 
57 #include <netinet/in.h>
58 #include <netinet/ip.h>		/* ip_len, ip_off */
59 #include <netinet/ip_var.h>	/* ip_output(), IP_FORWARDING */
60 #include <netinet/ip_fw.h>
61 #include <netinet/ip_dummynet.h>
62 #include <netinet/if_ether.h> /* various ether_* routines */
63 #include <netinet/ip6.h>       /* for ip6_input, ip6_output prototypes */
64 #include <netinet6/ip6_var.h>
65 
66 #include <netpfil/ipfw/ip_fw_private.h>
67 #include <netpfil/ipfw/dn_heap.h>
68 #include <netpfil/ipfw/ip_dn_private.h>
69 #ifdef NEW_AQM
70 #include <netpfil/ipfw/dn_aqm.h>
71 #endif
72 #include <netpfil/ipfw/dn_sched.h>
73 
74 /*
75  * We keep a private variable for the simulation time, but we could
76  * probably use an existing one ("softticks" in sys/kern/kern_timeout.c)
77  * instead of dn_cfg.curr_time
78  */
79 
80 struct dn_parms dn_cfg;
81 //VNET_DEFINE(struct dn_parms, _base_dn_cfg);
82 
83 static long tick_last;		/* Last tick duration (usec). */
84 static long tick_delta;		/* Last vs standard tick diff (usec). */
85 static long tick_delta_sum;	/* Accumulated tick difference (usec).*/
86 static long tick_adjustment;	/* Tick adjustments done. */
87 static long tick_lost;		/* Lost(coalesced) ticks number. */
88 /* Adjusted vs non-adjusted curr_time difference (ticks). */
89 static long tick_diff;
90 
91 static unsigned long	io_pkt;
92 static unsigned long	io_pkt_fast;
93 
94 #ifdef NEW_AQM
95 unsigned long	io_pkt_drop;
96 #else
97 static unsigned long	io_pkt_drop;
98 #endif
99 /*
100  * We use a heap to store entities for which we have pending timer events.
101  * The heap is checked at every tick and all entities with expired events
102  * are extracted.
103  */
104 
105 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
106 
107 extern	void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
108 
109 #ifdef SYSCTL_NODE
110 
111 /*
112  * Because of the way the SYSBEGIN/SYSEND macros work on other
113  * platforms, there should not be functions between them.
114  * So keep the handlers outside the block.
115  */
116 static int
117 sysctl_hash_size(SYSCTL_HANDLER_ARGS)
118 {
119 	int error, value;
120 
121 	value = dn_cfg.hash_size;
122 	error = sysctl_handle_int(oidp, &value, 0, req);
123 	if (error != 0 || req->newptr == NULL)
124 		return (error);
125 	if (value < 16 || value > 65536)
126 		return (EINVAL);
127 	dn_cfg.hash_size = value;
128 	return (0);
129 }
130 
131 static int
132 sysctl_limits(SYSCTL_HANDLER_ARGS)
133 {
134 	int error;
135 	long value;
136 
137 	if (arg2 != 0)
138 		value = dn_cfg.slot_limit;
139 	else
140 		value = dn_cfg.byte_limit;
141 	error = sysctl_handle_long(oidp, &value, 0, req);
142 
143 	if (error != 0 || req->newptr == NULL)
144 		return (error);
145 	if (arg2 != 0) {
146 		if (value < 1)
147 			return (EINVAL);
148 		dn_cfg.slot_limit = value;
149 	} else {
150 		if (value < 1500)
151 			return (EINVAL);
152 		dn_cfg.byte_limit = value;
153 	}
154 	return (0);
155 }
156 
157 SYSBEGIN(f4)
158 
159 SYSCTL_DECL(_net_inet);
160 SYSCTL_DECL(_net_inet_ip);
161 #ifdef NEW_AQM
162 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW, 0, "Dummynet");
163 #else
164 static SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW, 0, "Dummynet");
165 #endif
166 
167 /* wrapper to pass dn_cfg fields to SYSCTL_* */
168 //#define DC(x)	(&(VNET_NAME(_base_dn_cfg).x))
169 #define DC(x)	(&(dn_cfg.x))
170 /* parameters */
171 
172 
173 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hash_size,
174     CTLTYPE_INT | CTLFLAG_RW, 0, 0, sysctl_hash_size,
175     "I", "Default hash table size");
176 
177 
178 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit,
179     CTLTYPE_LONG | CTLFLAG_RW, 0, 1, sysctl_limits,
180     "L", "Upper limit in slots for pipe queue.");
181 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit,
182     CTLTYPE_LONG | CTLFLAG_RW, 0, 0, sysctl_limits,
183     "L", "Upper limit in bytes for pipe queue.");
184 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast,
185     CTLFLAG_RW, DC(io_fast), 0, "Enable fast dummynet io.");
186 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug,
187     CTLFLAG_RW, DC(debug), 0, "Dummynet debug level");
188 
189 /* RED parameters */
190 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
191     CTLFLAG_RD, DC(red_lookup_depth), 0, "Depth of RED lookup table");
192 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
193     CTLFLAG_RD, DC(red_avg_pkt_size), 0, "RED Medium packet size");
194 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
195     CTLFLAG_RD, DC(red_max_pkt_size), 0, "RED Max packet size");
196 
197 /* time adjustment */
198 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta,
199     CTLFLAG_RD, &tick_delta, 0, "Last vs standard tick difference (usec).");
200 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum,
201     CTLFLAG_RD, &tick_delta_sum, 0, "Accumulated tick difference (usec).");
202 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment,
203     CTLFLAG_RD, &tick_adjustment, 0, "Tick adjustments done.");
204 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff,
205     CTLFLAG_RD, &tick_diff, 0,
206     "Adjusted vs non-adjusted curr_time difference (ticks).");
207 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost,
208     CTLFLAG_RD, &tick_lost, 0,
209     "Number of ticks coalesced by dummynet taskqueue.");
210 
211 /* Drain parameters */
212 SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire,
213     CTLFLAG_RW, DC(expire), 0, "Expire empty queues/pipes");
214 SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire_cycle,
215     CTLFLAG_RD, DC(expire_cycle), 0, "Expire cycle for queues/pipes");
216 
217 /* statistics */
218 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, schk_count,
219     CTLFLAG_RD, DC(schk_count), 0, "Number of schedulers");
220 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, si_count,
221     CTLFLAG_RD, DC(si_count), 0, "Number of scheduler instances");
222 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, fsk_count,
223     CTLFLAG_RD, DC(fsk_count), 0, "Number of flowsets");
224 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, queue_count,
225     CTLFLAG_RD, DC(queue_count), 0, "Number of queues");
226 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt,
227     CTLFLAG_RD, &io_pkt, 0,
228     "Number of packets passed to dummynet.");
229 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast,
230     CTLFLAG_RD, &io_pkt_fast, 0,
231     "Number of packets bypassed dummynet scheduler.");
232 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop,
233     CTLFLAG_RD, &io_pkt_drop, 0,
234     "Number of packets dropped by dummynet.");
235 #undef DC
236 SYSEND
237 
238 #endif
239 
240 static void	dummynet_send(struct mbuf *);
241 
242 /*
243  * Return the mbuf tag holding the dummynet state (it should
244  * be the first one on the list).
245  */
246 struct dn_pkt_tag *
247 dn_tag_get(struct mbuf *m)
248 {
249 	struct m_tag *mtag = m_tag_first(m);
250 #ifdef NEW_AQM
251 	/* XXX: to skip ts m_tag. For Debugging only*/
252 	if (mtag != NULL && mtag->m_tag_id == DN_AQM_MTAG_TS) {
253 		m_tag_delete(m,mtag);
254 		mtag = m_tag_first(m);
255 		D("skip TS tag");
256 	}
257 #endif
258 	KASSERT(mtag != NULL &&
259 	    mtag->m_tag_cookie == MTAG_ABI_COMPAT &&
260 	    mtag->m_tag_id == PACKET_TAG_DUMMYNET,
261 	    ("packet on dummynet queue w/o dummynet tag!"));
262 	return (struct dn_pkt_tag *)(mtag+1);
263 }
264 
265 #ifndef NEW_AQM
266 static inline void
267 mq_append(struct mq *q, struct mbuf *m)
268 {
269 #ifdef USERSPACE
270 	// buffers from netmap need to be copied
271 	// XXX note that the routine is not expected to fail
272 	ND("append %p to %p", m, q);
273 	if (m->m_flags & M_STACK) {
274 		struct mbuf *m_new;
275 		void *p;
276 		int l, ofs;
277 
278 		ofs = m->m_data - m->__m_extbuf;
279 		// XXX allocate
280 		MGETHDR(m_new, M_NOWAIT, MT_DATA);
281 		ND("*** WARNING, volatile buf %p ext %p %d dofs %d m_new %p",
282 			m, m->__m_extbuf, m->__m_extlen, ofs, m_new);
283 		p = m_new->__m_extbuf;	/* new pointer */
284 		l = m_new->__m_extlen;	/* new len */
285 		if (l <= m->__m_extlen) {
286 			panic("extlen too large");
287 		}
288 
289 		*m_new = *m;	// copy
290 		m_new->m_flags &= ~M_STACK;
291 		m_new->__m_extbuf = p; // point to new buffer
292 		_pkt_copy(m->__m_extbuf, p, m->__m_extlen);
293 		m_new->m_data = p + ofs;
294 		m = m_new;
295 	}
296 #endif /* USERSPACE */
297 	if (q->head == NULL)
298 		q->head = m;
299 	else
300 		q->tail->m_nextpkt = m;
301 	q->count++;
302 	q->tail = m;
303 	m->m_nextpkt = NULL;
304 }
305 #endif
306 
307 /*
308  * Dispose a list of packet. Use a functions so if we need to do
309  * more work, this is a central point to do it.
310  */
311 void dn_free_pkts(struct mbuf *mnext)
312 {
313         struct mbuf *m;
314 
315         while ((m = mnext) != NULL) {
316                 mnext = m->m_nextpkt;
317                 FREE_PKT(m);
318         }
319 }
320 
321 static int
322 red_drops (struct dn_queue *q, int len)
323 {
324 	/*
325 	 * RED algorithm
326 	 *
327 	 * RED calculates the average queue size (avg) using a low-pass filter
328 	 * with an exponential weighted (w_q) moving average:
329 	 * 	avg  <-  (1-w_q) * avg + w_q * q_size
330 	 * where q_size is the queue length (measured in bytes or * packets).
331 	 *
332 	 * If q_size == 0, we compute the idle time for the link, and set
333 	 *	avg = (1 - w_q)^(idle/s)
334 	 * where s is the time needed for transmitting a medium-sized packet.
335 	 *
336 	 * Now, if avg < min_th the packet is enqueued.
337 	 * If avg > max_th the packet is dropped. Otherwise, the packet is
338 	 * dropped with probability P function of avg.
339 	 */
340 
341 	struct dn_fsk *fs = q->fs;
342 	int64_t p_b = 0;
343 
344 	/* Queue in bytes or packets? */
345 	uint32_t q_size = (fs->fs.flags & DN_QSIZE_BYTES) ?
346 	    q->ni.len_bytes : q->ni.length;
347 
348 	/* Average queue size estimation. */
349 	if (q_size != 0) {
350 		/* Queue is not empty, avg <- avg + (q_size - avg) * w_q */
351 		int diff = SCALE(q_size) - q->avg;
352 		int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
353 
354 		q->avg += (int)v;
355 	} else {
356 		/*
357 		 * Queue is empty, find for how long the queue has been
358 		 * empty and use a lookup table for computing
359 		 * (1 - * w_q)^(idle_time/s) where s is the time to send a
360 		 * (small) packet.
361 		 * XXX check wraps...
362 		 */
363 		if (q->avg) {
364 			u_int t = div64((dn_cfg.curr_time - q->q_time), fs->lookup_step);
365 
366 			q->avg = (t < fs->lookup_depth) ?
367 			    SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
368 		}
369 	}
370 
371 	/* Should i drop? */
372 	if (q->avg < fs->min_th) {
373 		q->count = -1;
374 		return (0);	/* accept packet */
375 	}
376 	if (q->avg >= fs->max_th) {	/* average queue >=  max threshold */
377 		if (fs->fs.flags & DN_IS_ECN)
378 			return (1);
379 		if (fs->fs.flags & DN_IS_GENTLE_RED) {
380 			/*
381 			 * According to Gentle-RED, if avg is greater than
382 			 * max_th the packet is dropped with a probability
383 			 *	 p_b = c_3 * avg - c_4
384 			 * where c_3 = (1 - max_p) / max_th
385 			 *       c_4 = 1 - 2 * max_p
386 			 */
387 			p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) -
388 			    fs->c_4;
389 		} else {
390 			q->count = -1;
391 			return (1);
392 		}
393 	} else if (q->avg > fs->min_th) {
394 		if (fs->fs.flags & DN_IS_ECN)
395 			return (1);
396 		/*
397 		 * We compute p_b using the linear dropping function
398 		 *	 p_b = c_1 * avg - c_2
399 		 * where c_1 = max_p / (max_th - min_th)
400 		 * 	 c_2 = max_p * min_th / (max_th - min_th)
401 		 */
402 		p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
403 	}
404 
405 	if (fs->fs.flags & DN_QSIZE_BYTES)
406 		p_b = div64((p_b * len) , fs->max_pkt_size);
407 	if (++q->count == 0)
408 		q->random = random() & 0xffff;
409 	else {
410 		/*
411 		 * q->count counts packets arrived since last drop, so a greater
412 		 * value of q->count means a greater packet drop probability.
413 		 */
414 		if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
415 			q->count = 0;
416 			/* After a drop we calculate a new random value. */
417 			q->random = random() & 0xffff;
418 			return (1);	/* drop */
419 		}
420 	}
421 	/* End of RED algorithm. */
422 
423 	return (0);	/* accept */
424 
425 }
426 
427 /*
428  * ECN/ECT Processing (partially adopted from altq)
429  */
430 #ifndef NEW_AQM
431 static
432 #endif
433 int
434 ecn_mark(struct mbuf* m)
435 {
436 	struct ip *ip;
437 	ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off);
438 
439 	switch (ip->ip_v) {
440 	case IPVERSION:
441 	{
442 		uint16_t old;
443 
444 		if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT)
445 			return (0);	/* not-ECT */
446 		if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE)
447 			return (1);	/* already marked */
448 
449 		/*
450 		 * ecn-capable but not marked,
451 		 * mark CE and update checksum
452 		 */
453 		old = *(uint16_t *)ip;
454 		ip->ip_tos |= IPTOS_ECN_CE;
455 		ip->ip_sum = cksum_adjust(ip->ip_sum, old, *(uint16_t *)ip);
456 		return (1);
457 	}
458 #ifdef INET6
459 	case (IPV6_VERSION >> 4):
460 	{
461 		struct ip6_hdr *ip6 = (struct ip6_hdr *)ip;
462 		u_int32_t flowlabel;
463 
464 		flowlabel = ntohl(ip6->ip6_flow);
465 		if ((flowlabel >> 28) != 6)
466 			return (0);	/* version mismatch! */
467 		if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
468 		    (IPTOS_ECN_NOTECT << 20))
469 			return (0);	/* not-ECT */
470 		if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
471 		    (IPTOS_ECN_CE << 20))
472 			return (1);	/* already marked */
473 		/*
474 		 * ecn-capable but not marked, mark CE
475 		 */
476 		flowlabel |= (IPTOS_ECN_CE << 20);
477 		ip6->ip6_flow = htonl(flowlabel);
478 		return (1);
479 	}
480 #endif
481 	}
482 	return (0);
483 }
484 
485 /*
486  * Enqueue a packet in q, subject to space and queue management policy
487  * (whose parameters are in q->fs).
488  * Update stats for the queue and the scheduler.
489  * Return 0 on success, 1 on drop. The packet is consumed anyways.
490  */
491 int
492 dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop)
493 {
494 	struct dn_fs *f;
495 	struct dn_flow *ni;	/* stats for scheduler instance */
496 	uint64_t len;
497 
498 	if (q->fs == NULL || q->_si == NULL) {
499 		printf("%s fs %p si %p, dropping\n",
500 			__FUNCTION__, q->fs, q->_si);
501 		FREE_PKT(m);
502 		return 1;
503 	}
504 	f = &(q->fs->fs);
505 	ni = &q->_si->ni;
506 	len = m->m_pkthdr.len;
507 	/* Update statistics, then check reasons to drop pkt. */
508 	q->ni.tot_bytes += len;
509 	q->ni.tot_pkts++;
510 	ni->tot_bytes += len;
511 	ni->tot_pkts++;
512 	if (drop)
513 		goto drop;
514 	if (f->plr && random() < f->plr)
515 		goto drop;
516 #ifdef NEW_AQM
517 	/* Call AQM enqueue function */
518 	if (q->fs->aqmfp)
519 		return q->fs->aqmfp->enqueue(q ,m);
520 #endif
521 	if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len)) {
522 		if (!(f->flags & DN_IS_ECN) || !ecn_mark(m))
523 			goto drop;
524 	}
525 	if (f->flags & DN_QSIZE_BYTES) {
526 		if (q->ni.len_bytes > f->qsize)
527 			goto drop;
528 	} else if (q->ni.length >= f->qsize) {
529 		goto drop;
530 	}
531 	mq_append(&q->mq, m);
532 	q->ni.length++;
533 	q->ni.len_bytes += len;
534 	ni->length++;
535 	ni->len_bytes += len;
536 	return (0);
537 
538 drop:
539 	io_pkt_drop++;
540 	q->ni.drops++;
541 	ni->drops++;
542 	FREE_PKT(m);
543 	return (1);
544 }
545 
546 /*
547  * Fetch packets from the delay line which are due now. If there are
548  * leftover packets, reinsert the delay line in the heap.
549  * Runs under scheduler lock.
550  */
551 static void
552 transmit_event(struct mq *q, struct delay_line *dline, uint64_t now)
553 {
554 	struct mbuf *m;
555 	struct dn_pkt_tag *pkt = NULL;
556 
557 	dline->oid.subtype = 0; /* not in heap */
558 	while ((m = dline->mq.head) != NULL) {
559 		pkt = dn_tag_get(m);
560 		if (!DN_KEY_LEQ(pkt->output_time, now))
561 			break;
562 		dline->mq.head = m->m_nextpkt;
563 		dline->mq.count--;
564 		mq_append(q, m);
565 	}
566 	if (m != NULL) {
567 		dline->oid.subtype = 1; /* in heap */
568 		heap_insert(&dn_cfg.evheap, pkt->output_time, dline);
569 	}
570 }
571 
572 /*
573  * Convert the additional MAC overheads/delays into an equivalent
574  * number of bits for the given data rate. The samples are
575  * in milliseconds so we need to divide by 1000.
576  */
577 static uint64_t
578 extra_bits(struct mbuf *m, struct dn_schk *s)
579 {
580 	int index;
581 	uint64_t bits;
582 	struct dn_profile *pf = s->profile;
583 
584 	if (!pf || pf->samples_no == 0)
585 		return 0;
586 	index  = random() % pf->samples_no;
587 	bits = div64((uint64_t)pf->samples[index] * s->link.bandwidth, 1000);
588 	if (index >= pf->loss_level) {
589 		struct dn_pkt_tag *dt = dn_tag_get(m);
590 		if (dt)
591 			dt->dn_dir = DIR_DROP;
592 	}
593 	return bits;
594 }
595 
596 /*
597  * Send traffic from a scheduler instance due by 'now'.
598  * Return a pointer to the head of the queue.
599  */
600 static struct mbuf *
601 serve_sched(struct mq *q, struct dn_sch_inst *si, uint64_t now)
602 {
603 	struct mq def_q;
604 	struct dn_schk *s = si->sched;
605 	struct mbuf *m = NULL;
606 	int delay_line_idle = (si->dline.mq.head == NULL);
607 	int done, bw;
608 
609 	if (q == NULL) {
610 		q = &def_q;
611 		q->head = NULL;
612 	}
613 
614 	bw = s->link.bandwidth;
615 	si->kflags &= ~DN_ACTIVE;
616 
617 	if (bw > 0)
618 		si->credit += (now - si->sched_time) * bw;
619 	else
620 		si->credit = 0;
621 	si->sched_time = now;
622 	done = 0;
623 	while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) {
624 		uint64_t len_scaled;
625 
626 		done++;
627 		len_scaled = (bw == 0) ? 0 : hz *
628 			(m->m_pkthdr.len * 8 + extra_bits(m, s));
629 		si->credit -= len_scaled;
630 		/* Move packet in the delay line */
631 		dn_tag_get(m)->output_time = dn_cfg.curr_time + s->link.delay ;
632 		mq_append(&si->dline.mq, m);
633 	}
634 
635 	/*
636 	 * If credit >= 0 the instance is idle, mark time.
637 	 * Otherwise put back in the heap, and adjust the output
638 	 * time of the last inserted packet, m, which was too early.
639 	 */
640 	if (si->credit >= 0) {
641 		si->idle_time = now;
642 	} else {
643 		uint64_t t;
644 		KASSERT (bw > 0, ("bw=0 and credit<0 ?"));
645 		t = div64(bw - 1 - si->credit, bw);
646 		if (m)
647 			dn_tag_get(m)->output_time += t;
648 		si->kflags |= DN_ACTIVE;
649 		heap_insert(&dn_cfg.evheap, now + t, si);
650 	}
651 	if (delay_line_idle && done)
652 		transmit_event(q, &si->dline, now);
653 	return q->head;
654 }
655 
656 /*
657  * The timer handler for dummynet. Time is computed in ticks, but
658  * but the code is tolerant to the actual rate at which this is called.
659  * Once complete, the function reschedules itself for the next tick.
660  */
661 void
662 dummynet_task(void *context, int pending)
663 {
664 	struct timeval t;
665 	struct mq q = { NULL, NULL }; /* queue to accumulate results */
666 
667 	CURVNET_SET((struct vnet *)context);
668 
669 	DN_BH_WLOCK();
670 
671 	/* Update number of lost(coalesced) ticks. */
672 	tick_lost += pending - 1;
673 
674 	getmicrouptime(&t);
675 	/* Last tick duration (usec). */
676 	tick_last = (t.tv_sec - dn_cfg.prev_t.tv_sec) * 1000000 +
677 	(t.tv_usec - dn_cfg.prev_t.tv_usec);
678 	/* Last tick vs standard tick difference (usec). */
679 	tick_delta = (tick_last * hz - 1000000) / hz;
680 	/* Accumulated tick difference (usec). */
681 	tick_delta_sum += tick_delta;
682 
683 	dn_cfg.prev_t = t;
684 
685 	/*
686 	* Adjust curr_time if the accumulated tick difference is
687 	* greater than the 'standard' tick. Since curr_time should
688 	* be monotonically increasing, we do positive adjustments
689 	* as required, and throttle curr_time in case of negative
690 	* adjustment.
691 	*/
692 	dn_cfg.curr_time++;
693 	if (tick_delta_sum - tick >= 0) {
694 		int diff = tick_delta_sum / tick;
695 
696 		dn_cfg.curr_time += diff;
697 		tick_diff += diff;
698 		tick_delta_sum %= tick;
699 		tick_adjustment++;
700 	} else if (tick_delta_sum + tick <= 0) {
701 		dn_cfg.curr_time--;
702 		tick_diff--;
703 		tick_delta_sum += tick;
704 		tick_adjustment++;
705 	}
706 
707 	/* serve pending events, accumulate in q */
708 	for (;;) {
709 		struct dn_id *p;    /* generic parameter to handler */
710 
711 		if (dn_cfg.evheap.elements == 0 ||
712 		    DN_KEY_LT(dn_cfg.curr_time, HEAP_TOP(&dn_cfg.evheap)->key))
713 			break;
714 		p = HEAP_TOP(&dn_cfg.evheap)->object;
715 		heap_extract(&dn_cfg.evheap, NULL);
716 
717 		if (p->type == DN_SCH_I) {
718 			serve_sched(&q, (struct dn_sch_inst *)p, dn_cfg.curr_time);
719 		} else { /* extracted a delay line */
720 			transmit_event(&q, (struct delay_line *)p, dn_cfg.curr_time);
721 		}
722 	}
723 	if (dn_cfg.expire && ++dn_cfg.expire_cycle >= dn_cfg.expire) {
724 		dn_cfg.expire_cycle = 0;
725 		dn_drain_scheduler();
726 		dn_drain_queue();
727 	}
728 
729 	dn_reschedule();
730 	DN_BH_WUNLOCK();
731 	if (q.head != NULL)
732 		dummynet_send(q.head);
733 	CURVNET_RESTORE();
734 }
735 
736 /*
737  * forward a chain of packets to the proper destination.
738  * This runs outside the dummynet lock.
739  */
740 static void
741 dummynet_send(struct mbuf *m)
742 {
743 	struct mbuf *n;
744 
745 	NET_EPOCH_ASSERT();
746 
747 	for (; m != NULL; m = n) {
748 		struct ifnet *ifp = NULL;	/* gcc 3.4.6 complains */
749         	struct m_tag *tag;
750 		int dst;
751 
752 		n = m->m_nextpkt;
753 		m->m_nextpkt = NULL;
754 		tag = m_tag_first(m);
755 		if (tag == NULL) { /* should not happen */
756 			dst = DIR_DROP;
757 		} else {
758 			struct dn_pkt_tag *pkt = dn_tag_get(m);
759 			/* extract the dummynet info, rename the tag
760 			 * to carry reinject info.
761 			 */
762 			if (pkt->dn_dir == (DIR_OUT | PROTO_LAYER2) &&
763 				pkt->ifp == NULL) {
764 				dst = DIR_DROP;
765 			} else {
766 				dst = pkt->dn_dir;
767 				ifp = pkt->ifp;
768 				tag->m_tag_cookie = MTAG_IPFW_RULE;
769 				tag->m_tag_id = 0;
770 			}
771 		}
772 
773 		switch (dst) {
774 		case DIR_OUT:
775 			ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
776 			break ;
777 
778 		case DIR_IN :
779 			netisr_dispatch(NETISR_IP, m);
780 			break;
781 
782 #ifdef INET6
783 		case DIR_IN | PROTO_IPV6:
784 			netisr_dispatch(NETISR_IPV6, m);
785 			break;
786 
787 		case DIR_OUT | PROTO_IPV6:
788 			ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
789 			break;
790 #endif
791 
792 		case DIR_FWD | PROTO_IFB: /* DN_TO_IFB_FWD: */
793 			if (bridge_dn_p != NULL)
794 				((*bridge_dn_p)(m, ifp));
795 			else
796 				printf("dummynet: if_bridge not loaded\n");
797 
798 			break;
799 
800 		case DIR_IN | PROTO_LAYER2: /* DN_TO_ETH_DEMUX: */
801 			/*
802 			 * The Ethernet code assumes the Ethernet header is
803 			 * contiguous in the first mbuf header.
804 			 * Insure this is true.
805 			 */
806 			if (m->m_len < ETHER_HDR_LEN &&
807 			    (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
808 				printf("dummynet/ether: pullup failed, "
809 				    "dropping packet\n");
810 				break;
811 			}
812 			ether_demux(m->m_pkthdr.rcvif, m);
813 			break;
814 
815 		case DIR_OUT | PROTO_LAYER2: /* DN_TO_ETH_OUT: */
816 			ether_output_frame(ifp, m);
817 			break;
818 
819 		case DIR_DROP:
820 			/* drop the packet after some time */
821 			FREE_PKT(m);
822 			break;
823 
824 		default:
825 			printf("dummynet: bad switch %d!\n", dst);
826 			FREE_PKT(m);
827 			break;
828 		}
829 	}
830 }
831 
832 static inline int
833 tag_mbuf(struct mbuf *m, int dir, struct ip_fw_args *fwa)
834 {
835 	struct dn_pkt_tag *dt;
836 	struct m_tag *mtag;
837 
838 	mtag = m_tag_get(PACKET_TAG_DUMMYNET,
839 		    sizeof(*dt), M_NOWAIT | M_ZERO);
840 	if (mtag == NULL)
841 		return 1;		/* Cannot allocate packet header. */
842 	m_tag_prepend(m, mtag);		/* Attach to mbuf chain. */
843 	dt = (struct dn_pkt_tag *)(mtag + 1);
844 	dt->rule = fwa->rule;
845 	dt->rule.info &= IPFW_ONEPASS;	/* only keep this info */
846 	dt->dn_dir = dir;
847 	dt->ifp = fwa->flags & IPFW_ARGS_OUT ? fwa->ifp : NULL;
848 	/* dt->output tame is updated as we move through */
849 	dt->output_time = dn_cfg.curr_time;
850 	dt->iphdr_off = (dir & PROTO_LAYER2) ? ETHER_HDR_LEN : 0;
851 	return 0;
852 }
853 
854 
855 /*
856  * dummynet hook for packets.
857  * We use the argument to locate the flowset fs and the sched_set sch
858  * associated to it. The we apply flow_mask and sched_mask to
859  * determine the queue and scheduler instances.
860  */
861 int
862 dummynet_io(struct mbuf **m0, struct ip_fw_args *fwa)
863 {
864 	struct mbuf *m = *m0;
865 	struct dn_fsk *fs = NULL;
866 	struct dn_sch_inst *si;
867 	struct dn_queue *q = NULL;	/* default */
868 	int fs_id, dir;
869 
870 	fs_id = (fwa->rule.info & IPFW_INFO_MASK) +
871 		((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0);
872 	/* XXXGL: convert args to dir */
873 	if (fwa->flags & IPFW_ARGS_IN)
874 		dir = DIR_IN;
875 	else
876 		dir = DIR_OUT;
877 	if (fwa->flags & IPFW_ARGS_ETHER)
878 		dir |= PROTO_LAYER2;
879 	else if (fwa->flags & IPFW_ARGS_IP6)
880 		dir |= PROTO_IPV6;
881 	DN_BH_WLOCK();
882 	io_pkt++;
883 	/* we could actually tag outside the lock, but who cares... */
884 	if (tag_mbuf(m, dir, fwa))
885 		goto dropit;
886 	if (dn_cfg.busy) {
887 		/* if the upper half is busy doing something expensive,
888 		 * lets queue the packet and move forward
889 		 */
890 		mq_append(&dn_cfg.pending, m);
891 		m = *m0 = NULL; /* consumed */
892 		goto done; /* already active, nothing to do */
893 	}
894 	/* XXX locate_flowset could be optimised with a direct ref. */
895 	fs = dn_ht_find(dn_cfg.fshash, fs_id, 0, NULL);
896 	if (fs == NULL)
897 		goto dropit;	/* This queue/pipe does not exist! */
898 	if (fs->sched == NULL)	/* should not happen */
899 		goto dropit;
900 	/* find scheduler instance, possibly applying sched_mask */
901 	si = ipdn_si_find(fs->sched, &(fwa->f_id));
902 	if (si == NULL)
903 		goto dropit;
904 	/*
905 	 * If the scheduler supports multiple queues, find the right one
906 	 * (otherwise it will be ignored by enqueue).
907 	 */
908 	if (fs->sched->fp->flags & DN_MULTIQUEUE) {
909 		q = ipdn_q_find(fs, si, &(fwa->f_id));
910 		if (q == NULL)
911 			goto dropit;
912 	}
913 	if (fs->sched->fp->enqueue(si, q, m)) {
914 		/* packet was dropped by enqueue() */
915 		m = *m0 = NULL;
916 
917 		/* dn_enqueue already increases io_pkt_drop */
918 		io_pkt_drop--;
919 
920 		goto dropit;
921 	}
922 
923 	if (si->kflags & DN_ACTIVE) {
924 		m = *m0 = NULL; /* consumed */
925 		goto done; /* already active, nothing to do */
926 	}
927 
928 	/* compute the initial allowance */
929 	if (si->idle_time < dn_cfg.curr_time) {
930 	    /* Do this only on the first packet on an idle pipe */
931 	    struct dn_link *p = &fs->sched->link;
932 
933 	    si->sched_time = dn_cfg.curr_time;
934 	    si->credit = dn_cfg.io_fast ? p->bandwidth : 0;
935 	    if (p->burst) {
936 		uint64_t burst = (dn_cfg.curr_time - si->idle_time) * p->bandwidth;
937 		if (burst > p->burst)
938 			burst = p->burst;
939 		si->credit += burst;
940 	    }
941 	}
942 	/* pass through scheduler and delay line */
943 	m = serve_sched(NULL, si, dn_cfg.curr_time);
944 
945 	/* optimization -- pass it back to ipfw for immediate send */
946 	/* XXX Don't call dummynet_send() if scheduler return the packet
947 	 *     just enqueued. This avoid a lock order reversal.
948 	 *
949 	 */
950 	if (/*dn_cfg.io_fast &&*/ m == *m0 && (dir & PROTO_LAYER2) == 0 ) {
951 		/* fast io, rename the tag * to carry reinject info. */
952 		struct m_tag *tag = m_tag_first(m);
953 
954 		tag->m_tag_cookie = MTAG_IPFW_RULE;
955 		tag->m_tag_id = 0;
956 		io_pkt_fast++;
957 		if (m->m_nextpkt != NULL) {
958 			printf("dummynet: fast io: pkt chain detected!\n");
959 			m->m_nextpkt = NULL;
960 		}
961 		m = NULL;
962 	} else {
963 		*m0 = NULL;
964 	}
965 done:
966 	DN_BH_WUNLOCK();
967 	if (m)
968 		dummynet_send(m);
969 	return 0;
970 
971 dropit:
972 	io_pkt_drop++;
973 	DN_BH_WUNLOCK();
974 	if (m)
975 		FREE_PKT(m);
976 	*m0 = NULL;
977 	return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS;
978 }
979