xref: /freebsd/sys/netpfil/ipfw/dn_sched_qfq.c (revision 1f4bcc459a76b7aa664f3fd557684cd0ba6da352)
1 /*
2  * Copyright (c) 2010 Fabio Checconi, Luigi Rizzo, Paolo Valente
3  * All rights reserved
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * $FreeBSD$
29  */
30 
31 #ifdef _KERNEL
32 #include <sys/malloc.h>
33 #include <sys/socket.h>
34 #include <sys/socketvar.h>
35 #include <sys/kernel.h>
36 #include <sys/mbuf.h>
37 #include <sys/module.h>
38 #include <net/if.h>	/* IFNAMSIZ */
39 #include <netinet/in.h>
40 #include <netinet/ip_var.h>		/* ipfw_rule_ref */
41 #include <netinet/ip_fw.h>	/* flow_id */
42 #include <netinet/ip_dummynet.h>
43 #include <netpfil/ipfw/dn_heap.h>
44 #include <netpfil/ipfw/ip_dn_private.h>
45 #include <netpfil/ipfw/dn_sched.h>
46 #else
47 #include <dn_test.h>
48 #endif
49 
50 #ifdef QFQ_DEBUG
51 #define _P64	unsigned long long	/* cast for printing uint64_t */
52 struct qfq_sched;
53 static void dump_sched(struct qfq_sched *q, const char *msg);
54 #define	NO(x)	x
55 #else
56 #define NO(x)
57 #endif
58 #define DN_SCHED_QFQ	4 // XXX Where?
59 typedef	unsigned long	bitmap;
60 
61 /*
62  * bitmaps ops are critical. Some linux versions have __fls
63  * and the bitmap ops. Some machines have ffs
64  * NOTE: fls() returns 1 for the least significant bit,
65  *       __fls() returns 0 for the same case.
66  * We use the base-0 version __fls() to match the description in
67  * the ToN QFQ paper
68  */
69 #if defined(_WIN32) || (defined(__MIPSEL__) && defined(LINUX_24))
70 int fls(unsigned int n)
71 {
72 	int i = 0;
73 	for (i = 0; n > 0; n >>= 1, i++)
74 		;
75 	return i;
76 }
77 #endif
78 
79 #if !defined(_KERNEL) || defined( __FreeBSD__ ) || defined(_WIN32) || (defined(__MIPSEL__) && defined(LINUX_24))
80 static inline unsigned long __fls(unsigned long word)
81 {
82 	return fls(word) - 1;
83 }
84 #endif
85 
86 #if !defined(_KERNEL) || !defined(__linux__)
87 #ifdef QFQ_DEBUG
88 static int test_bit(int ix, bitmap *p)
89 {
90 	if (ix < 0 || ix > 31)
91 		D("bad index %d", ix);
92 	return *p & (1<<ix);
93 }
94 static void __set_bit(int ix, bitmap *p)
95 {
96 	if (ix < 0 || ix > 31)
97 		D("bad index %d", ix);
98 	*p |= (1<<ix);
99 }
100 static void __clear_bit(int ix, bitmap *p)
101 {
102 	if (ix < 0 || ix > 31)
103 		D("bad index %d", ix);
104 	*p &= ~(1<<ix);
105 }
106 #else /* !QFQ_DEBUG */
107 /* XXX do we have fast version, or leave it to the compiler ? */
108 #define test_bit(ix, pData)	((*pData) & (1<<(ix)))
109 #define __set_bit(ix, pData)	(*pData) |= (1<<(ix))
110 #define __clear_bit(ix, pData)	(*pData) &= ~(1<<(ix))
111 #endif /* !QFQ_DEBUG */
112 #endif /* !__linux__ */
113 
114 #ifdef __MIPSEL__
115 #define __clear_bit(ix, pData)	(*pData) &= ~(1<<(ix))
116 #endif
117 
118 /*-------------------------------------------*/
119 /*
120 
121 Virtual time computations.
122 
123 S, F and V are all computed in fixed point arithmetic with
124 FRAC_BITS decimal bits.
125 
126    QFQ_MAX_INDEX is the maximum index allowed for a group. We need
127   	one bit per index.
128    QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
129    The layout of the bits is as below:
130 
131                    [ MTU_SHIFT ][      FRAC_BITS    ]
132                    [ MAX_INDEX    ][ MIN_SLOT_SHIFT ]
133   				 ^.__grp->index = 0
134   				 *.__grp->slot_shift
135 
136    where MIN_SLOT_SHIFT is derived by difference from the others.
137 
138 The max group index corresponds to Lmax/w_min, where
139 Lmax=1<<MTU_SHIFT, w_min = 1 .
140 From this, and knowing how many groups (MAX_INDEX) we want,
141 we can derive the shift corresponding to each group.
142 
143 Because we often need to compute
144 	F = S + len/w_i  and V = V + len/wsum
145 instead of storing w_i store the value
146 	inv_w = (1<<FRAC_BITS)/w_i
147 so we can do F = S + len * inv_w * wsum.
148 We use W_TOT in the formulas so we can easily move between
149 static and adaptive weight sum.
150 
151 The per-scheduler-instance data contain all the data structures
152 for the scheduler: bitmaps and bucket lists.
153 
154  */
155 /*
156  * Maximum number of consecutive slots occupied by backlogged classes
157  * inside a group. This is approx lmax/lmin + 5.
158  * XXX check because it poses constraints on MAX_INDEX
159  */
160 #define QFQ_MAX_SLOTS	32
161 /*
162  * Shifts used for class<->group mapping. Class weights are
163  * in the range [1, QFQ_MAX_WEIGHT], we to map each class i to the
164  * group with the smallest index that can support the L_i / r_i
165  * configured for the class.
166  *
167  * grp->index is the index of the group; and grp->slot_shift
168  * is the shift for the corresponding (scaled) sigma_i.
169  *
170  * When computing the group index, we do (len<<FP_SHIFT)/weight,
171  * then compute an FLS (which is like a log2()), and if the result
172  * is below the MAX_INDEX region we use 0 (which is the same as
173  * using a larger len).
174  */
175 #define QFQ_MAX_INDEX		19
176 #define QFQ_MAX_WSHIFT		16	/* log2(max_weight) */
177 
178 #define	QFQ_MAX_WEIGHT		(1<<QFQ_MAX_WSHIFT)
179 #define QFQ_MAX_WSUM		(2*QFQ_MAX_WEIGHT)
180 
181 #define FRAC_BITS		30	/* fixed point arithmetic */
182 #define ONE_FP			(1UL << FRAC_BITS)
183 
184 #define QFQ_MTU_SHIFT		11	/* log2(max_len) */
185 #define QFQ_MIN_SLOT_SHIFT	(FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
186 
187 /*
188  * Possible group states, also indexes for the bitmaps array in
189  * struct qfq_queue. We rely on ER, IR, EB, IB being numbered 0..3
190  */
191 enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
192 
193 struct qfq_group;
194 /*
195  * additional queue info. Some of this info should come from
196  * the flowset, we copy them here for faster processing.
197  * This is an overlay of the struct dn_queue
198  */
199 struct qfq_class {
200 	struct dn_queue _q;
201 	uint64_t S, F;		/* flow timestamps (exact) */
202 	struct qfq_class *next; /* Link for the slot list. */
203 
204 	/* group we belong to. In principle we would need the index,
205 	 * which is log_2(lmax/weight), but we never reference it
206 	 * directly, only the group.
207 	 */
208 	struct qfq_group *grp;
209 
210 	/* these are copied from the flowset. */
211 	uint32_t	inv_w;	/* ONE_FP/weight */
212 	uint32_t 	lmax;	/* Max packet size for this flow. */
213 };
214 
215 /* Group descriptor, see the paper for details.
216  * Basically this contains the bucket lists
217  */
218 struct qfq_group {
219 	uint64_t S, F;			/* group timestamps (approx). */
220 	unsigned int slot_shift;	/* Slot shift. */
221 	unsigned int index;		/* Group index. */
222 	unsigned int front;		/* Index of the front slot. */
223 	bitmap full_slots;		/* non-empty slots */
224 
225 	/* Array of lists of active classes. */
226 	struct qfq_class *slots[QFQ_MAX_SLOTS];
227 };
228 
229 /* scheduler instance descriptor. */
230 struct qfq_sched {
231 	uint64_t	V;		/* Precise virtual time. */
232 	uint32_t	wsum;		/* weight sum */
233 	uint32_t	iwsum;		/* inverse weight sum */
234 	NO(uint32_t	i_wsum;)	/* ONE_FP/w_sum */
235 	NO(uint32_t	queued;)	/* debugging */
236 	NO(uint32_t	loops;)		/* debugging */
237 	bitmap bitmaps[QFQ_MAX_STATE];	/* Group bitmaps. */
238 	struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
239 };
240 
241 /*---- support functions ----------------------------*/
242 
243 /* Generic comparison function, handling wraparound. */
244 static inline int qfq_gt(uint64_t a, uint64_t b)
245 {
246 	return (int64_t)(a - b) > 0;
247 }
248 
249 /* Round a precise timestamp to its slotted value. */
250 static inline uint64_t qfq_round_down(uint64_t ts, unsigned int shift)
251 {
252 	return ts & ~((1ULL << shift) - 1);
253 }
254 
255 /* return the pointer to the group with lowest index in the bitmap */
256 static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
257 					unsigned long bitmap)
258 {
259 	int index = ffs(bitmap) - 1; // zero-based
260 	return &q->groups[index];
261 }
262 
263 /*
264  * Calculate a flow index, given its weight and maximum packet length.
265  * index = log_2(maxlen/weight) but we need to apply the scaling.
266  * This is used only once at flow creation.
267  */
268 static int qfq_calc_index(uint32_t inv_w, unsigned int maxlen)
269 {
270 	uint64_t slot_size = (uint64_t)maxlen *inv_w;
271 	unsigned long size_map;
272 	int index = 0;
273 
274 	size_map = (unsigned long)(slot_size >> QFQ_MIN_SLOT_SHIFT);
275 	if (!size_map)
276 		goto out;
277 
278 	index = __fls(size_map) + 1;	// basically a log_2()
279 	index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1)));
280 
281 	if (index < 0)
282 		index = 0;
283 
284 out:
285 	ND("W = %d, L = %d, I = %d\n", ONE_FP/inv_w, maxlen, index);
286 	return index;
287 }
288 /*---- end support functions ----*/
289 
290 /*-------- API calls --------------------------------*/
291 /*
292  * Validate and copy parameters from flowset.
293  */
294 static int
295 qfq_new_queue(struct dn_queue *_q)
296 {
297 	struct qfq_sched *q = (struct qfq_sched *)(_q->_si + 1);
298 	struct qfq_class *cl = (struct qfq_class *)_q;
299 	int i;
300 	uint32_t w;	/* approximated weight */
301 
302 	/* import parameters from the flowset. They should be correct
303 	 * already.
304 	 */
305 	w = _q->fs->fs.par[0];
306 	cl->lmax = _q->fs->fs.par[1];
307 	if (!w || w > QFQ_MAX_WEIGHT) {
308 		w = 1;
309 		D("rounding weight to 1");
310 	}
311 	cl->inv_w = ONE_FP/w;
312 	w = ONE_FP/cl->inv_w;
313 	if (q->wsum + w > QFQ_MAX_WSUM)
314 		return EINVAL;
315 
316 	i = qfq_calc_index(cl->inv_w, cl->lmax);
317 	cl->grp = &q->groups[i];
318 	q->wsum += w;
319 	q->iwsum = ONE_FP / q->wsum; /* XXX note theory */
320 	// XXX cl->S = q->V; ?
321 	return 0;
322 }
323 
324 /* remove an empty queue */
325 static int
326 qfq_free_queue(struct dn_queue *_q)
327 {
328 	struct qfq_sched *q = (struct qfq_sched *)(_q->_si + 1);
329 	struct qfq_class *cl = (struct qfq_class *)_q;
330 	if (cl->inv_w) {
331 		q->wsum -= ONE_FP/cl->inv_w;
332 		if (q->wsum != 0)
333 			q->iwsum = ONE_FP / q->wsum;
334 		cl->inv_w = 0; /* reset weight to avoid run twice */
335 	}
336 	return 0;
337 }
338 
339 /* Calculate a mask to mimic what would be ffs_from(). */
340 static inline unsigned long
341 mask_from(unsigned long bitmap, int from)
342 {
343 	return bitmap & ~((1UL << from) - 1);
344 }
345 
346 /*
347  * The state computation relies on ER=0, IR=1, EB=2, IB=3
348  * First compute eligibility comparing grp->S, q->V,
349  * then check if someone is blocking us and possibly add EB
350  */
351 static inline unsigned int
352 qfq_calc_state(struct qfq_sched *q, struct qfq_group *grp)
353 {
354 	/* if S > V we are not eligible */
355 	unsigned int state = qfq_gt(grp->S, q->V);
356 	unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
357 	struct qfq_group *next;
358 
359 	if (mask) {
360 		next = qfq_ffs(q, mask);
361 		if (qfq_gt(grp->F, next->F))
362 			state |= EB;
363 	}
364 
365 	return state;
366 }
367 
368 /*
369  * In principle
370  *	q->bitmaps[dst] |= q->bitmaps[src] & mask;
371  *	q->bitmaps[src] &= ~mask;
372  * but we should make sure that src != dst
373  */
374 static inline void
375 qfq_move_groups(struct qfq_sched *q, unsigned long mask, int src, int dst)
376 {
377 	q->bitmaps[dst] |= q->bitmaps[src] & mask;
378 	q->bitmaps[src] &= ~mask;
379 }
380 
381 static inline void
382 qfq_unblock_groups(struct qfq_sched *q, int index, uint64_t old_finish)
383 {
384 	unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
385 	struct qfq_group *next;
386 
387 	if (mask) {
388 		next = qfq_ffs(q, mask);
389 		if (!qfq_gt(next->F, old_finish))
390 			return;
391 	}
392 
393 	mask = (1UL << index) - 1;
394 	qfq_move_groups(q, mask, EB, ER);
395 	qfq_move_groups(q, mask, IB, IR);
396 }
397 
398 /*
399  * perhaps
400  *
401 	old_V ^= q->V;
402 	old_V >>= QFQ_MIN_SLOT_SHIFT;
403 	if (old_V) {
404 		...
405 	}
406  *
407  */
408 static inline void
409 qfq_make_eligible(struct qfq_sched *q, uint64_t old_V)
410 {
411 	unsigned long mask, vslot, old_vslot;
412 
413 	vslot = q->V >> QFQ_MIN_SLOT_SHIFT;
414 	old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT;
415 
416 	if (vslot != old_vslot) {
417 		/* must be 2ULL, see ToN QFQ article fig.5, we use base-0 fls */
418 		mask = (2ULL << (__fls(vslot ^ old_vslot))) - 1;
419 		qfq_move_groups(q, mask, IR, ER);
420 		qfq_move_groups(q, mask, IB, EB);
421 	}
422 }
423 
424 /*
425  * XXX we should make sure that slot becomes less than 32.
426  * This is guaranteed by the input values.
427  * roundedS is always cl->S rounded on grp->slot_shift bits.
428  */
429 static inline void
430 qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl, uint64_t roundedS)
431 {
432 	uint64_t slot = (roundedS - grp->S) >> grp->slot_shift;
433 	unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS;
434 
435 	cl->next = grp->slots[i];
436 	grp->slots[i] = cl;
437 	__set_bit(slot, &grp->full_slots);
438 }
439 
440 /*
441  * remove the entry from the slot
442  */
443 static inline void
444 qfq_front_slot_remove(struct qfq_group *grp)
445 {
446 	struct qfq_class **h = &grp->slots[grp->front];
447 
448 	*h = (*h)->next;
449 	if (!*h)
450 		__clear_bit(0, &grp->full_slots);
451 }
452 
453 /*
454  * Returns the first full queue in a group. As a side effect,
455  * adjust the bucket list so the first non-empty bucket is at
456  * position 0 in full_slots.
457  */
458 static inline struct qfq_class *
459 qfq_slot_scan(struct qfq_group *grp)
460 {
461 	int i;
462 
463 	ND("grp %d full %x", grp->index, grp->full_slots);
464 	if (!grp->full_slots)
465 		return NULL;
466 
467 	i = ffs(grp->full_slots) - 1; // zero-based
468 	if (i > 0) {
469 		grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
470 		grp->full_slots >>= i;
471 	}
472 
473 	return grp->slots[grp->front];
474 }
475 
476 /*
477  * adjust the bucket list. When the start time of a group decreases,
478  * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
479  * move the objects. The mask of occupied slots must be shifted
480  * because we use ffs() to find the first non-empty slot.
481  * This covers decreases in the group's start time, but what about
482  * increases of the start time ?
483  * Here too we should make sure that i is less than 32
484  */
485 static inline void
486 qfq_slot_rotate(struct qfq_sched *q, struct qfq_group *grp, uint64_t roundedS)
487 {
488 	unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
489 
490 	(void)q;
491 	grp->full_slots <<= i;
492 	grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
493 }
494 
495 
496 static inline void
497 qfq_update_eligible(struct qfq_sched *q, uint64_t old_V)
498 {
499 	bitmap ineligible;
500 
501 	ineligible = q->bitmaps[IR] | q->bitmaps[IB];
502 	if (ineligible) {
503 		if (!q->bitmaps[ER]) {
504 			struct qfq_group *grp;
505 			grp = qfq_ffs(q, ineligible);
506 			if (qfq_gt(grp->S, q->V))
507 				q->V = grp->S;
508 		}
509 		qfq_make_eligible(q, old_V);
510 	}
511 }
512 
513 /*
514  * Updates the class, returns true if also the group needs to be updated.
515  */
516 static inline int
517 qfq_update_class(struct qfq_sched *q, struct qfq_group *grp,
518 	    struct qfq_class *cl)
519 {
520 
521 	(void)q;
522 	cl->S = cl->F;
523 	if (cl->_q.mq.head == NULL)  {
524 		qfq_front_slot_remove(grp);
525 	} else {
526 		unsigned int len;
527 		uint64_t roundedS;
528 
529 		len = cl->_q.mq.head->m_pkthdr.len;
530 		cl->F = cl->S + (uint64_t)len * cl->inv_w;
531 		roundedS = qfq_round_down(cl->S, grp->slot_shift);
532 		if (roundedS == grp->S)
533 			return 0;
534 
535 		qfq_front_slot_remove(grp);
536 		qfq_slot_insert(grp, cl, roundedS);
537 	}
538 	return 1;
539 }
540 
541 static struct mbuf *
542 qfq_dequeue(struct dn_sch_inst *si)
543 {
544 	struct qfq_sched *q = (struct qfq_sched *)(si + 1);
545 	struct qfq_group *grp;
546 	struct qfq_class *cl;
547 	struct mbuf *m;
548 	uint64_t old_V;
549 
550 	NO(q->loops++;)
551 	if (!q->bitmaps[ER]) {
552 		NO(if (q->queued)
553 			dump_sched(q, "start dequeue");)
554 		return NULL;
555 	}
556 
557 	grp = qfq_ffs(q, q->bitmaps[ER]);
558 
559 	cl = grp->slots[grp->front];
560 	/* extract from the first bucket in the bucket list */
561 	m = dn_dequeue(&cl->_q);
562 
563 	if (!m) {
564 		D("BUG/* non-workconserving leaf */");
565 		return NULL;
566 	}
567 	NO(q->queued--;)
568 	old_V = q->V;
569 	q->V += (uint64_t)m->m_pkthdr.len * q->iwsum;
570 	ND("m is %p F 0x%llx V now 0x%llx", m, cl->F, q->V);
571 
572 	if (qfq_update_class(q, grp, cl)) {
573 		uint64_t old_F = grp->F;
574 		cl = qfq_slot_scan(grp);
575 		if (!cl) { /* group gone, remove from ER */
576 			__clear_bit(grp->index, &q->bitmaps[ER]);
577 			// grp->S = grp->F + 1; // XXX debugging only
578 		} else {
579 			uint64_t roundedS = qfq_round_down(cl->S, grp->slot_shift);
580 			unsigned int s;
581 
582 			if (grp->S == roundedS)
583 				goto skip_unblock;
584 			grp->S = roundedS;
585 			grp->F = roundedS + (2ULL << grp->slot_shift);
586 			/* remove from ER and put in the new set */
587 			__clear_bit(grp->index, &q->bitmaps[ER]);
588 			s = qfq_calc_state(q, grp);
589 			__set_bit(grp->index, &q->bitmaps[s]);
590 		}
591 		/* we need to unblock even if the group has gone away */
592 		qfq_unblock_groups(q, grp->index, old_F);
593 	}
594 
595 skip_unblock:
596 	qfq_update_eligible(q, old_V);
597 	NO(if (!q->bitmaps[ER] && q->queued)
598 		dump_sched(q, "end dequeue");)
599 
600 	return m;
601 }
602 
603 /*
604  * Assign a reasonable start time for a new flow k in group i.
605  * Admissible values for \hat(F) are multiples of \sigma_i
606  * no greater than V+\sigma_i . Larger values mean that
607  * we had a wraparound so we consider the timestamp to be stale.
608  *
609  * If F is not stale and F >= V then we set S = F.
610  * Otherwise we should assign S = V, but this may violate
611  * the ordering in ER. So, if we have groups in ER, set S to
612  * the F_j of the first group j which would be blocking us.
613  * We are guaranteed not to move S backward because
614  * otherwise our group i would still be blocked.
615  */
616 static inline void
617 qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
618 {
619 	unsigned long mask;
620 	uint64_t limit, roundedF;
621 	int slot_shift = cl->grp->slot_shift;
622 
623 	roundedF = qfq_round_down(cl->F, slot_shift);
624 	limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
625 
626 	if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
627 		/* timestamp was stale */
628 		mask = mask_from(q->bitmaps[ER], cl->grp->index);
629 		if (mask) {
630 			struct qfq_group *next = qfq_ffs(q, mask);
631 			if (qfq_gt(roundedF, next->F)) {
632 				/* from pv 71261956973ba9e0637848a5adb4a5819b4bae83 */
633 				if (qfq_gt(limit, next->F))
634 					cl->S = next->F;
635 				else /* preserve timestamp correctness */
636 					cl->S = limit;
637 				return;
638 			}
639 		}
640 		cl->S = q->V;
641 	} else { /* timestamp is not stale */
642 		cl->S = cl->F;
643 	}
644 }
645 
646 static int
647 qfq_enqueue(struct dn_sch_inst *si, struct dn_queue *_q, struct mbuf *m)
648 {
649 	struct qfq_sched *q = (struct qfq_sched *)(si + 1);
650 	struct qfq_group *grp;
651 	struct qfq_class *cl = (struct qfq_class *)_q;
652 	uint64_t roundedS;
653 	int s;
654 
655 	NO(q->loops++;)
656 	DX(4, "len %d flow %p inv_w 0x%x grp %d", m->m_pkthdr.len,
657 		_q, cl->inv_w, cl->grp->index);
658 	/* XXX verify that the packet obeys the parameters */
659 	if (m != _q->mq.head) {
660 		if (dn_enqueue(_q, m, 0)) /* packet was dropped */
661 			return 1;
662 		NO(q->queued++;)
663 		if (m != _q->mq.head)
664 			return 0;
665 	}
666 	/* If reach this point, queue q was idle */
667 	grp = cl->grp;
668 	qfq_update_start(q, cl); /* adjust start time */
669 	/* compute new finish time and rounded start. */
670 	cl->F = cl->S + (uint64_t)(m->m_pkthdr.len) * cl->inv_w;
671 	roundedS = qfq_round_down(cl->S, grp->slot_shift);
672 
673 	/*
674 	 * insert cl in the correct bucket.
675 	 * If cl->S >= grp->S we don't need to adjust the
676 	 * bucket list and simply go to the insertion phase.
677 	 * Otherwise grp->S is decreasing, we must make room
678 	 * in the bucket list, and also recompute the group state.
679 	 * Finally, if there were no flows in this group and nobody
680 	 * was in ER make sure to adjust V.
681 	 */
682 	if (grp->full_slots) {
683 		if (!qfq_gt(grp->S, cl->S))
684 			goto skip_update;
685 		/* create a slot for this cl->S */
686 		qfq_slot_rotate(q, grp, roundedS);
687 		/* group was surely ineligible, remove */
688 		__clear_bit(grp->index, &q->bitmaps[IR]);
689 		__clear_bit(grp->index, &q->bitmaps[IB]);
690 	} else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V))
691 		q->V = roundedS;
692 
693 	grp->S = roundedS;
694 	grp->F = roundedS + (2ULL << grp->slot_shift); // i.e. 2\sigma_i
695 	s = qfq_calc_state(q, grp);
696 	__set_bit(grp->index, &q->bitmaps[s]);
697 	ND("new state %d 0x%x", s, q->bitmaps[s]);
698 	ND("S %llx F %llx V %llx", cl->S, cl->F, q->V);
699 skip_update:
700 	qfq_slot_insert(grp, cl, roundedS);
701 
702 	return 0;
703 }
704 
705 
706 #if 0
707 static inline void
708 qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
709 	struct qfq_class *cl, struct qfq_class **pprev)
710 {
711 	unsigned int i, offset;
712 	uint64_t roundedS;
713 
714 	roundedS = qfq_round_down(cl->S, grp->slot_shift);
715 	offset = (roundedS - grp->S) >> grp->slot_shift;
716 	i = (grp->front + offset) % QFQ_MAX_SLOTS;
717 
718 #ifdef notyet
719 	if (!pprev) {
720 		pprev = &grp->slots[i];
721 		while (*pprev && *pprev != cl)
722 			pprev = &(*pprev)->next;
723 	}
724 #endif
725 
726 	*pprev = cl->next;
727 	if (!grp->slots[i])
728 		__clear_bit(offset, &grp->full_slots);
729 }
730 
731 /*
732  * called to forcibly destroy a queue.
733  * If the queue is not in the front bucket, or if it has
734  * other queues in the front bucket, we can simply remove
735  * the queue with no other side effects.
736  * Otherwise we must propagate the event up.
737  * XXX description to be completed.
738  */
739 static void
740 qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl,
741 				 struct qfq_class **pprev)
742 {
743 	struct qfq_group *grp = &q->groups[cl->index];
744 	unsigned long mask;
745 	uint64_t roundedS;
746 	int s;
747 
748 	cl->F = cl->S;	// not needed if the class goes away.
749 	qfq_slot_remove(q, grp, cl, pprev);
750 
751 	if (!grp->full_slots) {
752 		/* nothing left in the group, remove from all sets.
753 		 * Do ER last because if we were blocking other groups
754 		 * we must unblock them.
755 		 */
756 		__clear_bit(grp->index, &q->bitmaps[IR]);
757 		__clear_bit(grp->index, &q->bitmaps[EB]);
758 		__clear_bit(grp->index, &q->bitmaps[IB]);
759 
760 		if (test_bit(grp->index, &q->bitmaps[ER]) &&
761 		    !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
762 			mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
763 			if (mask)
764 				mask = ~((1UL << __fls(mask)) - 1);
765 			else
766 				mask = ~0UL;
767 			qfq_move_groups(q, mask, EB, ER);
768 			qfq_move_groups(q, mask, IB, IR);
769 		}
770 		__clear_bit(grp->index, &q->bitmaps[ER]);
771 	} else if (!grp->slots[grp->front]) {
772 		cl = qfq_slot_scan(grp);
773 		roundedS = qfq_round_down(cl->S, grp->slot_shift);
774 		if (grp->S != roundedS) {
775 			__clear_bit(grp->index, &q->bitmaps[ER]);
776 			__clear_bit(grp->index, &q->bitmaps[IR]);
777 			__clear_bit(grp->index, &q->bitmaps[EB]);
778 			__clear_bit(grp->index, &q->bitmaps[IB]);
779 			grp->S = roundedS;
780 			grp->F = roundedS + (2ULL << grp->slot_shift);
781 			s = qfq_calc_state(q, grp);
782 			__set_bit(grp->index, &q->bitmaps[s]);
783 		}
784 	}
785 	qfq_update_eligible(q, q->V);
786 }
787 #endif
788 
789 static int
790 qfq_new_fsk(struct dn_fsk *f)
791 {
792 	ipdn_bound_var(&f->fs.par[0], 1, 1, QFQ_MAX_WEIGHT, "qfq weight");
793 	ipdn_bound_var(&f->fs.par[1], 1500, 1, 2000, "qfq maxlen");
794 	ND("weight %d len %d\n", f->fs.par[0], f->fs.par[1]);
795 	return 0;
796 }
797 
798 /*
799  * initialize a new scheduler instance
800  */
801 static int
802 qfq_new_sched(struct dn_sch_inst *si)
803 {
804 	struct qfq_sched *q = (struct qfq_sched *)(si + 1);
805 	struct qfq_group *grp;
806 	int i;
807 
808 	for (i = 0; i <= QFQ_MAX_INDEX; i++) {
809 		grp = &q->groups[i];
810 		grp->index = i;
811 		grp->slot_shift = QFQ_MTU_SHIFT + FRAC_BITS -
812 					(QFQ_MAX_INDEX - i);
813 	}
814 	return 0;
815 }
816 
817 /*
818  * QFQ scheduler descriptor
819  */
820 static struct dn_alg qfq_desc = {
821 	_SI( .type = ) DN_SCHED_QFQ,
822 	_SI( .name = ) "QFQ",
823 	_SI( .flags = ) DN_MULTIQUEUE,
824 
825 	_SI( .schk_datalen = ) 0,
826 	_SI( .si_datalen = ) sizeof(struct qfq_sched),
827 	_SI( .q_datalen = ) sizeof(struct qfq_class) - sizeof(struct dn_queue),
828 
829 	_SI( .enqueue = ) qfq_enqueue,
830 	_SI( .dequeue = ) qfq_dequeue,
831 
832 	_SI( .config = )  NULL,
833 	_SI( .destroy = )  NULL,
834 	_SI( .new_sched = ) qfq_new_sched,
835 	_SI( .free_sched = )  NULL,
836 	_SI( .new_fsk = ) qfq_new_fsk,
837 	_SI( .free_fsk = )  NULL,
838 	_SI( .new_queue = ) qfq_new_queue,
839 	_SI( .free_queue = ) qfq_free_queue,
840 };
841 
842 DECLARE_DNSCHED_MODULE(dn_qfq, &qfq_desc);
843 
844 #ifdef QFQ_DEBUG
845 static void
846 dump_groups(struct qfq_sched *q, uint32_t mask)
847 {
848 	int i, j;
849 
850 	for (i = 0; i < QFQ_MAX_INDEX + 1; i++) {
851 		struct qfq_group *g = &q->groups[i];
852 
853 		if (0 == (mask & (1<<i)))
854 			continue;
855 		for (j = 0; j < QFQ_MAX_SLOTS; j++) {
856 			if (g->slots[j])
857 				D("    bucket %d %p", j, g->slots[j]);
858 		}
859 		D("full_slots 0x%llx", (_P64)g->full_slots);
860 		D("        %2d S 0x%20llx F 0x%llx %c", i,
861 			(_P64)g->S, (_P64)g->F,
862 			mask & (1<<i) ? '1' : '0');
863 	}
864 }
865 
866 static void
867 dump_sched(struct qfq_sched *q, const char *msg)
868 {
869 	D("--- in %s: ---", msg);
870 	D("loops %d queued %d V 0x%llx", q->loops, q->queued, (_P64)q->V);
871 	D("    ER 0x%08x", (unsigned)q->bitmaps[ER]);
872 	D("    EB 0x%08x", (unsigned)q->bitmaps[EB]);
873 	D("    IR 0x%08x", (unsigned)q->bitmaps[IR]);
874 	D("    IB 0x%08x", (unsigned)q->bitmaps[IB]);
875 	dump_groups(q, 0xffffffff);
876 };
877 #endif /* QFQ_DEBUG */
878