1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa
5 * All rights reserved
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * internal dummynet APIs.
31 */
32
33 #ifndef _IP_DN_PRIVATE_H
34 #define _IP_DN_PRIVATE_H
35
36 /* debugging support
37 * use ND() to remove debugging, D() to print a line,
38 * DX(level, ...) to print above a certain level
39 * If you redefine D() you are expected to redefine all.
40 */
41 #ifndef D
42 #define ND(fmt, ...) do {} while (0)
43 #define D1(fmt, ...) do {} while (0)
44 #define D(fmt, ...) printf("%-10s " fmt "\n", \
45 __FUNCTION__, ## __VA_ARGS__)
46 #define DX(lev, fmt, ...) do { \
47 if (V_dn_cfg.debug > lev) D(fmt, ## __VA_ARGS__); } while (0)
48 #endif
49
50 MALLOC_DECLARE(M_DUMMYNET);
51
52 #ifndef __linux__
53 #define div64(a, b) ((int64_t)(a) / (int64_t)(b))
54 #endif
55
56 #define DN_LOCK_INIT() do { \
57 mtx_init(&V_dn_cfg.uh_mtx, "dn_uh", NULL, MTX_DEF); \
58 mtx_init(&V_dn_cfg.bh_mtx, "dn_bh", NULL, MTX_DEF); \
59 } while (0)
60 #define DN_LOCK_DESTROY() do { \
61 mtx_destroy(&V_dn_cfg.uh_mtx); \
62 mtx_destroy(&V_dn_cfg.bh_mtx); \
63 } while (0)
64 #if 0 /* not used yet */
65 #define DN_UH_RLOCK() mtx_lock(&V_dn_cfg.uh_mtx)
66 #define DN_UH_RUNLOCK() mtx_unlock(&V_dn_cfg.uh_mtx)
67 #define DN_UH_WLOCK() mtx_lock(&V_dn_cfg.uh_mtx)
68 #define DN_UH_WUNLOCK() mtx_unlock(&V_dn_cfg.uh_mtx)
69 #define DN_UH_LOCK_ASSERT() mtx_assert(&V_dn_cfg.uh_mtx, MA_OWNED)
70 #endif
71
72 #define DN_BH_RLOCK() mtx_lock(&V_dn_cfg.uh_mtx)
73 #define DN_BH_RUNLOCK() mtx_unlock(&V_dn_cfg.uh_mtx)
74 #define DN_BH_WLOCK() mtx_lock(&V_dn_cfg.uh_mtx)
75 #define DN_BH_WUNLOCK() mtx_unlock(&V_dn_cfg.uh_mtx)
76 #define DN_BH_LOCK_ASSERT() mtx_assert(&V_dn_cfg.uh_mtx, MA_OWNED)
77
78 SLIST_HEAD(dn_fsk_head, dn_fsk);
79
80 struct mq { /* a basic queue of packets*/
81 struct mbuf *head, *tail;
82 int count;
83 };
84
85 static inline void
set_oid(struct dn_id * o,int type,int len)86 set_oid(struct dn_id *o, int type, int len)
87 {
88 o->type = type;
89 o->len = len;
90 o->subtype = 0;
91 }
92
93 /*
94 * configuration and data for a dummynet instance
95 *
96 * When a configuration is modified from userland, 'id' is incremented
97 * so we can use the value to check for stale pointers.
98 */
99 struct dn_parms {
100 uint32_t id; /* configuration version */
101
102 /* defaults (sysctl-accessible) */
103 int red_lookup_depth;
104 int red_avg_pkt_size;
105 int red_max_pkt_size;
106 int hash_size;
107 int max_hash_size;
108 long byte_limit; /* max queue sizes */
109 long slot_limit;
110
111 int io_fast;
112 int debug;
113
114 /* timekeeping */
115 struct timeval prev_t; /* last time dummynet_tick ran */
116 struct dn_heap evheap; /* scheduled events */
117
118 long tick_last; /* Last tick duration (usec). */
119 long tick_delta; /* Last vs standard tick diff (usec). */
120 long tick_delta_sum; /* Accumulated tick difference (usec).*/
121 long tick_adjustment; /* Tick adjustments done. */
122 long tick_lost; /* Lost(coalesced) ticks number. */
123 /* Adjusted vs non-adjusted curr_time difference (ticks). */
124 long tick_diff;
125
126 /* counters of objects -- used for reporting space */
127 int schk_count;
128 int si_count;
129 int fsk_count;
130 int queue_count;
131
132 /* packet counters */
133 unsigned long io_pkt;
134 unsigned long io_pkt_fast;
135 unsigned long io_pkt_drop;
136
137 /* ticks and other stuff */
138 uint64_t curr_time;
139 /* flowsets and schedulers are in hash tables, with 'hash_size'
140 * buckets. fshash is looked up at every packet arrival
141 * so better be generous if we expect many entries.
142 */
143 struct dn_ht *fshash;
144 struct dn_ht *schedhash;
145 /* list of flowsets without a scheduler -- use sch_chain */
146 struct dn_fsk_head fsu; /* list of unlinked flowsets */
147
148 /* Store the fs/sch to scan when draining. The value is the
149 * bucket number of the hash table. Expire can be disabled
150 * with net.inet.ip.dummynet.expire=0, or it happens every
151 * expire ticks.
152 **/
153 int drain_fs;
154 int drain_sch;
155 uint32_t expire;
156 uint32_t expire_cycle; /* tick count */
157
158 int init_done;
159
160 #ifdef _KERNEL
161 /*
162 * This file is normally used in the kernel, unless we do
163 * some userland tests, in which case we do not need a mtx.
164 * uh_mtx arbitrates between system calls and also
165 * protects fshash, schedhash and fsunlinked.
166 * These structures are readonly for the lower half.
167 * bh_mtx protects all other structures which may be
168 * modified upon packet arrivals
169 */
170 #if defined( __linux__ ) || defined( _WIN32 )
171 spinlock_t uh_mtx;
172 spinlock_t bh_mtx;
173 #else
174 struct mtx uh_mtx;
175 struct mtx bh_mtx;
176 #endif
177
178 #endif /* _KERNEL */
179 };
180
181 /*
182 * Delay line, contains all packets on output from a link.
183 * Every scheduler instance has one.
184 */
185 struct delay_line {
186 struct dn_id oid;
187 struct dn_sch_inst *si;
188 struct mq mq;
189 };
190
191 /*
192 * The kernel side of a flowset. It is linked in a hash table
193 * of flowsets, and in a list of children of their parent scheduler.
194 * qht is either the queue or (if HAVE_MASK) a hash table queues.
195 * Note that the mask to use is the (flow_mask|sched_mask), which
196 * changes as we attach/detach schedulers. So we store it here.
197 *
198 * XXX If we want to add scheduler-specific parameters, we need to
199 * put them in external storage because the scheduler may not be
200 * available when the fsk is created.
201 */
202 struct dn_fsk { /* kernel side of a flowset */
203 struct dn_fs fs;
204 SLIST_ENTRY(dn_fsk) fsk_next; /* hash chain for fshash */
205
206 struct ipfw_flow_id fsk_mask;
207
208 /* qht is a hash table of queues, or just a single queue
209 * a bit in fs.flags tells us which one
210 */
211 struct dn_ht *qht;
212 struct dn_schk *sched; /* Sched we are linked to */
213 SLIST_ENTRY(dn_fsk) sch_chain; /* list of fsk attached to sched */
214
215 /* bucket index used by drain routine to drain queues for this
216 * flowset
217 */
218 int drain_bucket;
219 /* Parameter realted to RED / GRED */
220 /* original values are in dn_fs*/
221 int w_q ; /* queue weight (scaled) */
222 int max_th ; /* maximum threshold for queue (scaled) */
223 int min_th ; /* minimum threshold for queue (scaled) */
224 int max_p ; /* maximum value for p_b (scaled) */
225
226 u_int c_1 ; /* max_p/(max_th-min_th) (scaled) */
227 u_int c_2 ; /* max_p*min_th/(max_th-min_th) (scaled) */
228 u_int c_3 ; /* for GRED, (1-max_p)/max_th (scaled) */
229 u_int c_4 ; /* for GRED, 1 - 2*max_p (scaled) */
230 u_int * w_q_lookup ; /* lookup table for computing (1-w_q)^t */
231 u_int lookup_depth ; /* depth of lookup table */
232 int lookup_step ; /* granularity inside the lookup table */
233 int lookup_weight ; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */
234 int avg_pkt_size ; /* medium packet size */
235 int max_pkt_size ; /* max packet size */
236 #ifdef NEW_AQM
237 struct dn_aqm *aqmfp; /* Pointer to AQM functions */
238 void *aqmcfg; /* configuration parameters for AQM */
239 #endif
240 };
241
242 /*
243 * A queue is created as a child of a flowset unless it belongs to
244 * a !MULTIQUEUE scheduler. It is normally in a hash table in the
245 * flowset. fs always points to the parent flowset.
246 * si normally points to the sch_inst, unless the flowset has been
247 * detached from the scheduler -- in this case si == NULL and we
248 * should not enqueue.
249 */
250 struct dn_queue {
251 struct dn_flow ni; /* oid, flow_id, stats */
252 struct mq mq; /* packets queue */
253 struct dn_sch_inst *_si; /* owner scheduler instance */
254 SLIST_ENTRY(dn_queue) q_next; /* hash chain list for qht */
255 struct dn_fsk *fs; /* parent flowset. */
256
257 /* RED parameters */
258 int avg; /* average queue length est. (scaled) */
259 int count; /* arrivals since last RED drop */
260 int random; /* random value (scaled) */
261 uint64_t q_time; /* start of queue idle time */
262 #ifdef NEW_AQM
263 void *aqm_status; /* per-queue status variables*/
264 #endif
265
266 };
267
268 /*
269 * The kernel side of a scheduler. Contains the userland config,
270 * a link, pointer to extra config arguments from command line,
271 * kernel flags, and a pointer to the scheduler methods.
272 * It is stored in a hash table, and holds a list of all
273 * flowsets and scheduler instances.
274 * XXX sch must be at the beginning, see schk_hash().
275 */
276 struct dn_schk {
277 struct dn_sch sch;
278 struct dn_alg *fp; /* Pointer to scheduler functions */
279 struct dn_link link; /* The link, embedded */
280 struct dn_profile *profile; /* delay profile, if any */
281 struct dn_id *cfg; /* extra config arguments */
282
283 SLIST_ENTRY(dn_schk) schk_next; /* hash chain for schedhash */
284
285 struct dn_fsk_head fsk_list; /* all fsk linked to me */
286 struct dn_fsk *fs; /* Flowset for !MULTIQUEUE */
287
288 /* bucket index used by the drain routine to drain the scheduler
289 * instance for this flowset.
290 */
291 int drain_bucket;
292
293 /* Hash table of all instances (through sch.sched_mask)
294 * or single instance if no mask. Always valid.
295 */
296 struct dn_ht *siht;
297 };
298
299 /*
300 * Scheduler instance.
301 * Contains variables and all queues relative to a this instance.
302 * This struct is created a runtime.
303 */
304 struct dn_sch_inst {
305 struct dn_flow ni; /* oid, flowid and stats */
306 SLIST_ENTRY(dn_sch_inst) si_next; /* hash chain for siht */
307 struct delay_line dline;
308 struct dn_schk *sched; /* the template */
309 int kflags; /* DN_ACTIVE */
310
311 int64_t credit; /* bits I can transmit (more or less). */
312 uint64_t sched_time; /* time link was scheduled in ready_heap */
313 uint64_t idle_time; /* start of scheduler instance idle time */
314
315 /* q_count is the number of queues that this instance is using.
316 * The counter is incremented or decremented when
317 * a reference from the queue is created or deleted.
318 * It is used to make sure that a scheduler instance can be safely
319 * deleted by the drain routine. See notes below.
320 */
321 int q_count;
322
323 };
324
325 /*
326 * NOTE about object drain.
327 * The system will automatically (XXX check when) drain queues and
328 * scheduler instances when they are idle.
329 * A queue is idle when it has no packets; an instance is idle when
330 * it is not in the evheap heap, and the corresponding delay line is empty.
331 * A queue can be safely deleted when it is idle because of the scheduler
332 * function xxx_free_queue() will remove any references to it.
333 * An instance can be only deleted when no queues reference it. To be sure
334 * of that, a counter (q_count) stores the number of queues that are pointing
335 * to the instance.
336 *
337 * XXX
338 * Order of scan:
339 * - take all flowset in a bucket for the flowset hash table
340 * - take all queues in a bucket for the flowset
341 * - increment the queue bucket
342 * - scan next flowset bucket
343 * Nothing is done if a bucket contains no entries.
344 *
345 * The same schema is used for sceduler instances
346 */
347
348 /* kernel-side flags. Linux has DN_DELETE in fcntl.h
349 */
350 enum {
351 /* 1 and 2 are reserved for the SCAN flags */
352 DN_DESTROY = 0x0004, /* destroy */
353 DN_DELETE_FS = 0x0008, /* destroy flowset */
354 DN_DETACH = 0x0010,
355 DN_ACTIVE = 0x0020, /* object is in evheap */
356 DN_F_DLINE = 0x0040, /* object is a delay line */
357 DN_DEL_SAFE = 0x0080, /* delete a queue only if no longer needed
358 * by scheduler */
359 DN_QHT_IS_Q = 0x0100, /* in flowset, qht is a single queue */
360 };
361
362 /*
363 * Packets processed by dummynet have an mbuf tag associated with
364 * them that carries their dummynet state.
365 * Outside dummynet, only the 'rule' field is relevant, and it must
366 * be at the beginning of the structure.
367 */
368 struct dn_pkt_tag {
369 struct ipfw_rule_ref rule; /* matching rule */
370
371 /* second part, dummynet specific */
372 int dn_dir; /* action when packet comes out.*/
373 /* see ip_fw_private.h */
374 uint64_t output_time; /* when the pkt is due for delivery*/
375 uint16_t if_index;
376 uint16_t if_idxgen;
377 uint16_t iphdr_off; /* IP header offset for mtodo() */
378 };
379
380 /*
381 * Possible values for dn_dir. XXXGL: this needs to be reviewed
382 * and converted to same values ip_fw_args.flags use.
383 */
384 enum {
385 DIR_OUT = 0,
386 DIR_IN = 1,
387 DIR_FWD = 2,
388 DIR_DROP = 3,
389 PROTO_LAYER2 = 0x4, /* set for layer 2 */
390 PROTO_IPV4 = 0x08,
391 PROTO_IPV6 = 0x10,
392 PROTO_IFB = 0x0c, /* layer2 + ifbridge */
393 };
394
395 /*
396 * States for the Packet Loss Rate Gilbert-Elliott
397 * channel model
398 */
399 enum {
400 PLR_STATE_G = 0,
401 PLR_STATE_B,
402 };
403
404 //extern struct dn_parms V_dn_cfg;
405 VNET_DECLARE(struct dn_parms, dn_cfg);
406 #define V_dn_cfg VNET(dn_cfg)
407
408 int dummynet_io(struct mbuf **, struct ip_fw_args *);
409 void dummynet_sched_lock(void);
410 void dummynet_sched_unlock(void);
411 void dummynet_task(void *context, int pending);
412 void dn_reschedule(void);
413 struct dn_pkt_tag * dn_tag_get(struct mbuf *m);
414
415 struct dn_queue *ipdn_q_find(struct dn_fsk *, struct dn_sch_inst *,
416 struct ipfw_flow_id *);
417 struct dn_sch_inst *ipdn_si_find(struct dn_schk *, struct ipfw_flow_id *);
418
419 /*
420 * copy_range is a template for requests for ranges of pipes/queues/scheds.
421 * The number of ranges is variable and can be derived by o.len.
422 * As a default, we use a small number of entries so that the struct
423 * fits easily on the stack and is sufficient for most common requests.
424 */
425 #define DEFAULT_RANGES 5
426 struct copy_range {
427 struct dn_id o;
428 uint32_t r[ 2 * DEFAULT_RANGES ];
429 };
430
431 struct copy_args {
432 char **start;
433 char *end;
434 int flags;
435 int type;
436 struct copy_range *extra; /* extra filtering */
437 };
438
439 struct sockopt;
440 int ip_dummynet_compat(struct sockopt *sopt);
441 int dummynet_get(struct sockopt *sopt, void **compat);
442 int dn_c_copy_q (void *_ni, void *arg);
443 int dn_c_copy_pipe(struct dn_schk *s, struct copy_args *a, int nq);
444 int dn_c_copy_fs(struct dn_fsk *f, struct copy_args *a, int nq);
445 int dn_compat_copy_queue(struct copy_args *a, void *_o);
446 int dn_compat_copy_pipe(struct copy_args *a, void *_o);
447 int copy_data_helper_compat(void *_o, void *_arg);
448 int dn_compat_calc_size(void);
449 int do_config(void *p, size_t l);
450
451 /* function to drain idle object */
452 void dn_drain_scheduler(void);
453 void dn_drain_queue(void);
454
455 #ifdef NEW_AQM
456 int ecn_mark(struct mbuf* m);
457
458 /* moved from ip_dn_io.c to here to be available for AQMs modules*/
459 static inline void
mq_append(struct mq * q,struct mbuf * m)460 mq_append(struct mq *q, struct mbuf *m)
461 {
462 #ifdef USERSPACE
463 // buffers from netmap need to be copied
464 // XXX note that the routine is not expected to fail
465 ND("append %p to %p", m, q);
466 if (m->m_flags & M_STACK) {
467 struct mbuf *m_new;
468 void *p;
469 int l, ofs;
470
471 ofs = m->m_data - m->__m_extbuf;
472 // XXX allocate
473 MGETHDR(m_new, M_NOWAIT, MT_DATA);
474 ND("*** WARNING, volatile buf %p ext %p %d dofs %d m_new %p",
475 m, m->__m_extbuf, m->__m_extlen, ofs, m_new);
476 p = m_new->__m_extbuf; /* new pointer */
477 l = m_new->__m_extlen; /* new len */
478 if (l <= m->__m_extlen) {
479 panic("extlen too large");
480 }
481
482 *m_new = *m; // copy
483 m_new->m_flags &= ~M_STACK;
484 m_new->__m_extbuf = p; // point to new buffer
485 _pkt_copy(m->__m_extbuf, p, m->__m_extlen);
486 m_new->m_data = p + ofs;
487 m = m_new;
488 }
489 #endif /* USERSPACE */
490 if (q->head == NULL)
491 q->head = m;
492 else
493 q->tail->m_nextpkt = m;
494 q->count++;
495 q->tail = m;
496 m->m_nextpkt = NULL;
497 }
498 #endif /* NEW_AQM */
499
500 #endif /* _IP_DN_PRIVATE_H */
501