xref: /freebsd/sys/netpfil/ipfw/ip_dn_private.h (revision 5e53a4f90f82c4345f277dd87cc9292f26e04a29)
1 /*-
2  * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa
3  * All rights reserved
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * internal dummynet APIs.
29  *
30  * $FreeBSD$
31  */
32 
33 #ifndef _IP_DN_PRIVATE_H
34 #define _IP_DN_PRIVATE_H
35 
36 /* debugging support
37  * use ND() to remove debugging, D() to print a line,
38  * DX(level, ...) to print above a certain level
39  * If you redefine D() you are expected to redefine all.
40  */
41 #ifndef D
42 #define ND(fmt, ...) do {} while (0)
43 #define D1(fmt, ...) do {} while (0)
44 #define D(fmt, ...) printf("%-10s " fmt "\n",      \
45         __FUNCTION__, ## __VA_ARGS__)
46 #define DX(lev, fmt, ...) do {              \
47         if (dn_cfg.debug > lev) D(fmt, ## __VA_ARGS__); } while (0)
48 #endif
49 
50 MALLOC_DECLARE(M_DUMMYNET);
51 
52 #ifndef __linux__
53 #define div64(a, b)  ((int64_t)(a) / (int64_t)(b))
54 #endif
55 
56 #define DN_LOCK_INIT() do {				\
57 	mtx_init(&dn_cfg.uh_mtx, "dn_uh", NULL, MTX_DEF);	\
58 	mtx_init(&dn_cfg.bh_mtx, "dn_bh", NULL, MTX_DEF);	\
59 	} while (0)
60 #define DN_LOCK_DESTROY() do {				\
61 	mtx_destroy(&dn_cfg.uh_mtx);			\
62 	mtx_destroy(&dn_cfg.bh_mtx);			\
63 	} while (0)
64 #if 0 /* not used yet */
65 #define DN_UH_RLOCK()		mtx_lock(&dn_cfg.uh_mtx)
66 #define DN_UH_RUNLOCK()		mtx_unlock(&dn_cfg.uh_mtx)
67 #define DN_UH_WLOCK()		mtx_lock(&dn_cfg.uh_mtx)
68 #define DN_UH_WUNLOCK()		mtx_unlock(&dn_cfg.uh_mtx)
69 #define DN_UH_LOCK_ASSERT()	mtx_assert(&dn_cfg.uh_mtx, MA_OWNED)
70 #endif
71 
72 #define DN_BH_RLOCK()		mtx_lock(&dn_cfg.uh_mtx)
73 #define DN_BH_RUNLOCK()		mtx_unlock(&dn_cfg.uh_mtx)
74 #define DN_BH_WLOCK()		mtx_lock(&dn_cfg.uh_mtx)
75 #define DN_BH_WUNLOCK()		mtx_unlock(&dn_cfg.uh_mtx)
76 #define DN_BH_LOCK_ASSERT()	mtx_assert(&dn_cfg.uh_mtx, MA_OWNED)
77 
78 SLIST_HEAD(dn_schk_head, dn_schk);
79 SLIST_HEAD(dn_sch_inst_head, dn_sch_inst);
80 SLIST_HEAD(dn_fsk_head, dn_fsk);
81 SLIST_HEAD(dn_queue_head, dn_queue);
82 SLIST_HEAD(dn_alg_head, dn_alg);
83 
84 #ifdef NEW_AQM
85 SLIST_HEAD(dn_aqm_head, dn_aqm); /* for new AQMs */
86 #endif
87 
88 struct mq {	/* a basic queue of packets*/
89         struct mbuf *head, *tail;
90 	int count;
91 };
92 
93 static inline void
94 set_oid(struct dn_id *o, int type, int len)
95 {
96         o->type = type;
97         o->len = len;
98         o->subtype = 0;
99 }
100 
101 /*
102  * configuration and global data for a dummynet instance
103  *
104  * When a configuration is modified from userland, 'id' is incremented
105  * so we can use the value to check for stale pointers.
106  */
107 struct dn_parms {
108 	uint32_t	id;		/* configuration version */
109 
110 	/* defaults (sysctl-accessible) */
111 	int	red_lookup_depth;
112 	int	red_avg_pkt_size;
113 	int	red_max_pkt_size;
114 	int	hash_size;
115 	int	max_hash_size;
116 	long	byte_limit;		/* max queue sizes */
117 	long	slot_limit;
118 
119 	int	io_fast;
120 	int	debug;
121 
122 	/* timekeeping */
123 	struct timeval prev_t;		/* last time dummynet_tick ran */
124 	struct dn_heap	evheap;		/* scheduled events */
125 
126 	/* counters of objects -- used for reporting space */
127 	int	schk_count;
128 	int	si_count;
129 	int	fsk_count;
130 	int	queue_count;
131 
132 	/* ticks and other stuff */
133 	uint64_t	curr_time;
134 	/* flowsets and schedulers are in hash tables, with 'hash_size'
135 	 * buckets. fshash is looked up at every packet arrival
136 	 * so better be generous if we expect many entries.
137 	 */
138 	struct dn_ht	*fshash;
139 	struct dn_ht	*schedhash;
140 	/* list of flowsets without a scheduler -- use sch_chain */
141 	struct dn_fsk_head	fsu;	/* list of unlinked flowsets */
142 	struct dn_alg_head	schedlist;	/* list of algorithms */
143 #ifdef NEW_AQM
144 	struct dn_aqm_head	aqmlist;	/* list of AQMs */
145 #endif
146 
147 	/* Store the fs/sch to scan when draining. The value is the
148 	 * bucket number of the hash table. Expire can be disabled
149 	 * with net.inet.ip.dummynet.expire=0, or it happens every
150 	 * expire ticks.
151 	 **/
152 	int drain_fs;
153 	int drain_sch;
154 	uint32_t expire;
155 	uint32_t expire_cycle;	/* tick count */
156 
157 	int init_done;
158 
159 	/* if the upper half is busy doing something long,
160 	 * can set the busy flag and we will enqueue packets in
161 	 * a queue for later processing.
162 	 */
163 	int	busy;
164 	struct	mq	pending;
165 
166 #ifdef _KERNEL
167 	/*
168 	 * This file is normally used in the kernel, unless we do
169 	 * some userland tests, in which case we do not need a mtx.
170 	 * uh_mtx arbitrates between system calls and also
171 	 * protects fshash, schedhash and fsunlinked.
172 	 * These structures are readonly for the lower half.
173 	 * bh_mtx protects all other structures which may be
174 	 * modified upon packet arrivals
175 	 */
176 #if defined( __linux__ ) || defined( _WIN32 )
177 	spinlock_t uh_mtx;
178 	spinlock_t bh_mtx;
179 #else
180 	struct mtx uh_mtx;
181 	struct mtx bh_mtx;
182 #endif
183 
184 #endif /* _KERNEL */
185 };
186 
187 /*
188  * Delay line, contains all packets on output from a link.
189  * Every scheduler instance has one.
190  */
191 struct delay_line {
192 	struct dn_id oid;
193 	struct dn_sch_inst *si;
194 	struct mq mq;
195 };
196 
197 /*
198  * The kernel side of a flowset. It is linked in a hash table
199  * of flowsets, and in a list of children of their parent scheduler.
200  * qht is either the queue or (if HAVE_MASK) a hash table queues.
201  * Note that the mask to use is the (flow_mask|sched_mask), which
202  * changes as we attach/detach schedulers. So we store it here.
203  *
204  * XXX If we want to add scheduler-specific parameters, we need to
205  * put them in external storage because the scheduler may not be
206  * available when the fsk is created.
207  */
208 struct dn_fsk { /* kernel side of a flowset */
209 	struct dn_fs fs;
210 	SLIST_ENTRY(dn_fsk) fsk_next;	/* hash chain for fshash */
211 
212 	struct ipfw_flow_id fsk_mask;
213 
214 	/* qht is a hash table of queues, or just a single queue
215 	 * a bit in fs.flags tells us which one
216 	 */
217 	struct dn_ht	*qht;
218 	struct dn_schk *sched;		/* Sched we are linked to */
219 	SLIST_ENTRY(dn_fsk) sch_chain;	/* list of fsk attached to sched */
220 
221 	/* bucket index used by drain routine to drain queues for this
222 	 * flowset
223 	 */
224 	int drain_bucket;
225 	/* Parameter realted to RED / GRED */
226 	/* original values are in dn_fs*/
227 	int w_q ;		/* queue weight (scaled) */
228 	int max_th ;		/* maximum threshold for queue (scaled) */
229 	int min_th ;		/* minimum threshold for queue (scaled) */
230 	int max_p ;		/* maximum value for p_b (scaled) */
231 
232 	u_int c_1 ;		/* max_p/(max_th-min_th) (scaled) */
233 	u_int c_2 ;		/* max_p*min_th/(max_th-min_th) (scaled) */
234 	u_int c_3 ;		/* for GRED, (1-max_p)/max_th (scaled) */
235 	u_int c_4 ;		/* for GRED, 1 - 2*max_p (scaled) */
236 	u_int * w_q_lookup ;	/* lookup table for computing (1-w_q)^t */
237 	u_int lookup_depth ;	/* depth of lookup table */
238 	int lookup_step ;	/* granularity inside the lookup table */
239 	int lookup_weight ;	/* equal to (1-w_q)^t / (1-w_q)^(t+1) */
240 	int avg_pkt_size ;	/* medium packet size */
241 	int max_pkt_size ;	/* max packet size */
242 #ifdef NEW_AQM
243 	struct dn_aqm *aqmfp;	/* Pointer to AQM functions */
244 	void *aqmcfg;	/* configuration parameters for AQM */
245 #endif
246 };
247 
248 /*
249  * A queue is created as a child of a flowset unless it belongs to
250  * a !MULTIQUEUE scheduler. It is normally in a hash table in the
251  * flowset. fs always points to the parent flowset.
252  * si normally points to the sch_inst, unless the flowset has been
253  * detached from the scheduler -- in this case si == NULL and we
254  * should not enqueue.
255  */
256 struct dn_queue {
257 	struct dn_flow ni;	/* oid, flow_id, stats */
258 	struct mq mq;	/* packets queue */
259 	struct dn_sch_inst *_si;	/* owner scheduler instance */
260 	SLIST_ENTRY(dn_queue) q_next; /* hash chain list for qht */
261 	struct dn_fsk *fs;		/* parent flowset. */
262 
263 	/* RED parameters */
264 	int avg;		/* average queue length est. (scaled) */
265 	int count;		/* arrivals since last RED drop */
266 	int random;		/* random value (scaled) */
267 	uint64_t q_time;	/* start of queue idle time */
268 #ifdef NEW_AQM
269 	void *aqm_status;	/* per-queue status variables*/
270 #endif
271 
272 };
273 
274 /*
275  * The kernel side of a scheduler. Contains the userland config,
276  * a link, pointer to extra config arguments from command line,
277  * kernel flags, and a pointer to the scheduler methods.
278  * It is stored in a hash table, and holds a list of all
279  * flowsets and scheduler instances.
280  * XXX sch must be at the beginning, see schk_hash().
281  */
282 struct dn_schk {
283 	struct dn_sch sch;
284 	struct dn_alg *fp;	/* Pointer to scheduler functions */
285 	struct dn_link link;	/* The link, embedded */
286 	struct dn_profile *profile; /* delay profile, if any */
287 	struct dn_id *cfg;	/* extra config arguments */
288 
289 	SLIST_ENTRY(dn_schk) schk_next;  /* hash chain for schedhash */
290 
291 	struct dn_fsk_head fsk_list;  /* all fsk linked to me */
292 	struct dn_fsk *fs;	/* Flowset for !MULTIQUEUE */
293 
294 	/* bucket index used by the drain routine to drain the scheduler
295 	 * instance for this flowset.
296 	 */
297 	int drain_bucket;
298 
299 	/* Hash table of all instances (through sch.sched_mask)
300 	 * or single instance if no mask. Always valid.
301 	 */
302 	struct dn_ht	*siht;
303 };
304 
305 
306 /*
307  * Scheduler instance.
308  * Contains variables and all queues relative to a this instance.
309  * This struct is created a runtime.
310  */
311 struct dn_sch_inst {
312 	struct dn_flow	ni;	/* oid, flowid and stats */
313 	SLIST_ENTRY(dn_sch_inst) si_next; /* hash chain for siht */
314 	struct delay_line dline;
315 	struct dn_schk *sched;	/* the template */
316 	int		kflags;	/* DN_ACTIVE */
317 
318 	int64_t	credit;		/* bits I can transmit (more or less). */
319 	uint64_t sched_time;	/* time link was scheduled in ready_heap */
320 	uint64_t idle_time;	/* start of scheduler instance idle time */
321 
322 	/* q_count is the number of queues that this instance is using.
323 	 * The counter is incremented or decremented when
324 	 * a reference from the queue is created or deleted.
325 	 * It is used to make sure that a scheduler instance can be safely
326 	 * deleted by the drain routine. See notes below.
327 	 */
328 	int q_count;
329 
330 };
331 
332 /*
333  * NOTE about object drain.
334  * The system will automatically (XXX check when) drain queues and
335  * scheduler instances when they are idle.
336  * A queue is idle when it has no packets; an instance is idle when
337  * it is not in the evheap heap, and the corresponding delay line is empty.
338  * A queue can be safely deleted when it is idle because of the scheduler
339  * function xxx_free_queue() will remove any references to it.
340  * An instance can be only deleted when no queues reference it. To be sure
341  * of that, a counter (q_count) stores the number of queues that are pointing
342  * to the instance.
343  *
344  * XXX
345  * Order of scan:
346  * - take all flowset in a bucket for the flowset hash table
347  * - take all queues in a bucket for the flowset
348  * - increment the queue bucket
349  * - scan next flowset bucket
350  * Nothing is done if a bucket contains no entries.
351  *
352  * The same schema is used for sceduler instances
353  */
354 
355 
356 /* kernel-side flags. Linux has DN_DELETE in fcntl.h
357  */
358 enum {
359 	/* 1 and 2 are reserved for the SCAN flags */
360 	DN_DESTROY	= 0x0004, /* destroy */
361 	DN_DELETE_FS	= 0x0008, /* destroy flowset */
362 	DN_DETACH	= 0x0010,
363 	DN_ACTIVE	= 0x0020, /* object is in evheap */
364 	DN_F_DLINE	= 0x0040, /* object is a delay line */
365 	DN_DEL_SAFE	= 0x0080, /* delete a queue only if no longer needed
366 				   * by scheduler */
367 	DN_QHT_IS_Q	= 0x0100, /* in flowset, qht is a single queue */
368 };
369 
370 /*
371  * Packets processed by dummynet have an mbuf tag associated with
372  * them that carries their dummynet state.
373  * Outside dummynet, only the 'rule' field is relevant, and it must
374  * be at the beginning of the structure.
375  */
376 struct dn_pkt_tag {
377 	struct ipfw_rule_ref rule;	/* matching rule	*/
378 
379 	/* second part, dummynet specific */
380 	int dn_dir;		/* action when packet comes out.*/
381 				/* see ip_fw_private.h		*/
382 	uint64_t output_time;	/* when the pkt is due for delivery*/
383 	struct ifnet *ifp;	/* interface, for ip_output	*/
384 	struct _ip6dn_args ip6opt;	/* XXX ipv6 options	*/
385 	uint16_t iphdr_off;	/* IP header offset for mtodo()	*/
386 };
387 
388 extern struct dn_parms dn_cfg;
389 //VNET_DECLARE(struct dn_parms, _base_dn_cfg);
390 //#define dn_cfg	VNET(_base_dn_cfg)
391 
392 int dummynet_io(struct mbuf **, int , struct ip_fw_args *);
393 void dummynet_task(void *context, int pending);
394 void dn_reschedule(void);
395 struct dn_pkt_tag * dn_tag_get(struct mbuf *m);
396 
397 struct dn_queue *ipdn_q_find(struct dn_fsk *, struct dn_sch_inst *,
398         struct ipfw_flow_id *);
399 struct dn_sch_inst *ipdn_si_find(struct dn_schk *, struct ipfw_flow_id *);
400 
401 /*
402  * copy_range is a template for requests for ranges of pipes/queues/scheds.
403  * The number of ranges is variable and can be derived by o.len.
404  * As a default, we use a small number of entries so that the struct
405  * fits easily on the stack and is sufficient for most common requests.
406  */
407 #define DEFAULT_RANGES	5
408 struct copy_range {
409         struct dn_id o;
410         uint32_t	r[ 2 * DEFAULT_RANGES ];
411 };
412 
413 struct copy_args {
414 	char **start;
415 	char *end;
416 	int flags;
417 	int type;
418 	struct copy_range *extra;	/* extra filtering */
419 };
420 
421 struct sockopt;
422 int ip_dummynet_compat(struct sockopt *sopt);
423 int dummynet_get(struct sockopt *sopt, void **compat);
424 int dn_c_copy_q (void *_ni, void *arg);
425 int dn_c_copy_pipe(struct dn_schk *s, struct copy_args *a, int nq);
426 int dn_c_copy_fs(struct dn_fsk *f, struct copy_args *a, int nq);
427 int dn_compat_copy_queue(struct copy_args *a, void *_o);
428 int dn_compat_copy_pipe(struct copy_args *a, void *_o);
429 int copy_data_helper_compat(void *_o, void *_arg);
430 int dn_compat_calc_size(void);
431 int do_config(void *p, int l);
432 
433 /* function to drain idle object */
434 void dn_drain_scheduler(void);
435 void dn_drain_queue(void);
436 
437 #ifdef NEW_AQM
438 int ecn_mark(struct mbuf* m);
439 
440 /* moved from ip_dn_io.c to here to be available for AQMs modules*/
441 static inline void
442 mq_append(struct mq *q, struct mbuf *m)
443 {
444 #ifdef USERSPACE
445 	// buffers from netmap need to be copied
446 	// XXX note that the routine is not expected to fail
447 	ND("append %p to %p", m, q);
448 	if (m->m_flags & M_STACK) {
449 		struct mbuf *m_new;
450 		void *p;
451 		int l, ofs;
452 
453 		ofs = m->m_data - m->__m_extbuf;
454 		// XXX allocate
455 		MGETHDR(m_new, M_NOWAIT, MT_DATA);
456 		ND("*** WARNING, volatile buf %p ext %p %d dofs %d m_new %p",
457 			m, m->__m_extbuf, m->__m_extlen, ofs, m_new);
458 		p = m_new->__m_extbuf;	/* new pointer */
459 		l = m_new->__m_extlen;	/* new len */
460 		if (l <= m->__m_extlen) {
461 			panic("extlen too large");
462 		}
463 
464 		*m_new = *m;	// copy
465 		m_new->m_flags &= ~M_STACK;
466 		m_new->__m_extbuf = p; // point to new buffer
467 		_pkt_copy(m->__m_extbuf, p, m->__m_extlen);
468 		m_new->m_data = p + ofs;
469 		m = m_new;
470 	}
471 #endif /* USERSPACE */
472 	if (q->head == NULL)
473 		q->head = m;
474 	else
475 		q->tail->m_nextpkt = m;
476 	q->count++;
477 	q->tail = m;
478 	m->m_nextpkt = NULL;
479 }
480 #endif /* NEW_AQM */
481 
482 #endif /* _IP_DN_PRIVATE_H */
483