xref: /linux/net/sched/sch_sfb.c (revision 5e8c0fb6a95728b852d56c0a9244425d474670c0)
1 /*
2  * net/sched/sch_sfb.c	  Stochastic Fair Blue
3  *
4  * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
5  * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * version 2 as published by the Free Software Foundation.
10  *
11  * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
12  * A New Class of Active Queue Management Algorithms.
13  * U. Michigan CSE-TR-387-99, April 1999.
14  *
15  * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
16  *
17  */
18 
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/random.h>
25 #include <linux/jhash.h>
26 #include <net/ip.h>
27 #include <net/pkt_sched.h>
28 #include <net/inet_ecn.h>
29 #include <net/flow_keys.h>
30 
31 /*
32  * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
33  * This implementation uses L = 8 and N = 16
34  * This permits us to split one 32bit hash (provided per packet by rxhash or
35  * external classifier) into 8 subhashes of 4 bits.
36  */
37 #define SFB_BUCKET_SHIFT 4
38 #define SFB_NUMBUCKETS	(1 << SFB_BUCKET_SHIFT) /* N bins per Level */
39 #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
40 #define SFB_LEVELS	(32 / SFB_BUCKET_SHIFT) /* L */
41 
42 /* SFB algo uses a virtual queue, named "bin" */
43 struct sfb_bucket {
44 	u16		qlen; /* length of virtual queue */
45 	u16		p_mark; /* marking probability */
46 };
47 
48 /* We use a double buffering right before hash change
49  * (Section 4.4 of SFB reference : moving hash functions)
50  */
51 struct sfb_bins {
52 	u32		  perturbation; /* jhash perturbation */
53 	struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
54 };
55 
56 struct sfb_sched_data {
57 	struct Qdisc	*qdisc;
58 	struct tcf_proto __rcu *filter_list;
59 	unsigned long	rehash_interval;
60 	unsigned long	warmup_time;	/* double buffering warmup time in jiffies */
61 	u32		max;
62 	u32		bin_size;	/* maximum queue length per bin */
63 	u32		increment;	/* d1 */
64 	u32		decrement;	/* d2 */
65 	u32		limit;		/* HARD maximal queue length */
66 	u32		penalty_rate;
67 	u32		penalty_burst;
68 	u32		tokens_avail;
69 	unsigned long	rehash_time;
70 	unsigned long	token_time;
71 
72 	u8		slot;		/* current active bins (0 or 1) */
73 	bool		double_buffering;
74 	struct sfb_bins bins[2];
75 
76 	struct {
77 		u32	earlydrop;
78 		u32	penaltydrop;
79 		u32	bucketdrop;
80 		u32	queuedrop;
81 		u32	childdrop;	/* drops in child qdisc */
82 		u32	marked;		/* ECN mark */
83 	} stats;
84 };
85 
86 /*
87  * Each queued skb might be hashed on one or two bins
88  * We store in skb_cb the two hash values.
89  * (A zero value means double buffering was not used)
90  */
91 struct sfb_skb_cb {
92 	u32 hashes[2];
93 };
94 
95 static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
96 {
97 	qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
98 	return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
99 }
100 
101 /*
102  * If using 'internal' SFB flow classifier, hash comes from skb rxhash
103  * If using external classifier, hash comes from the classid.
104  */
105 static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
106 {
107 	return sfb_skb_cb(skb)->hashes[slot];
108 }
109 
110 /* Probabilities are coded as Q0.16 fixed-point values,
111  * with 0xFFFF representing 65535/65536 (almost 1.0)
112  * Addition and subtraction are saturating in [0, 65535]
113  */
114 static u32 prob_plus(u32 p1, u32 p2)
115 {
116 	u32 res = p1 + p2;
117 
118 	return min_t(u32, res, SFB_MAX_PROB);
119 }
120 
121 static u32 prob_minus(u32 p1, u32 p2)
122 {
123 	return p1 > p2 ? p1 - p2 : 0;
124 }
125 
126 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
127 {
128 	int i;
129 	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
130 
131 	for (i = 0; i < SFB_LEVELS; i++) {
132 		u32 hash = sfbhash & SFB_BUCKET_MASK;
133 
134 		sfbhash >>= SFB_BUCKET_SHIFT;
135 		if (b[hash].qlen < 0xFFFF)
136 			b[hash].qlen++;
137 		b += SFB_NUMBUCKETS; /* next level */
138 	}
139 }
140 
141 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
142 {
143 	u32 sfbhash;
144 
145 	sfbhash = sfb_hash(skb, 0);
146 	if (sfbhash)
147 		increment_one_qlen(sfbhash, 0, q);
148 
149 	sfbhash = sfb_hash(skb, 1);
150 	if (sfbhash)
151 		increment_one_qlen(sfbhash, 1, q);
152 }
153 
154 static void decrement_one_qlen(u32 sfbhash, u32 slot,
155 			       struct sfb_sched_data *q)
156 {
157 	int i;
158 	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
159 
160 	for (i = 0; i < SFB_LEVELS; i++) {
161 		u32 hash = sfbhash & SFB_BUCKET_MASK;
162 
163 		sfbhash >>= SFB_BUCKET_SHIFT;
164 		if (b[hash].qlen > 0)
165 			b[hash].qlen--;
166 		b += SFB_NUMBUCKETS; /* next level */
167 	}
168 }
169 
170 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
171 {
172 	u32 sfbhash;
173 
174 	sfbhash = sfb_hash(skb, 0);
175 	if (sfbhash)
176 		decrement_one_qlen(sfbhash, 0, q);
177 
178 	sfbhash = sfb_hash(skb, 1);
179 	if (sfbhash)
180 		decrement_one_qlen(sfbhash, 1, q);
181 }
182 
183 static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
184 {
185 	b->p_mark = prob_minus(b->p_mark, q->decrement);
186 }
187 
188 static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
189 {
190 	b->p_mark = prob_plus(b->p_mark, q->increment);
191 }
192 
193 static void sfb_zero_all_buckets(struct sfb_sched_data *q)
194 {
195 	memset(&q->bins, 0, sizeof(q->bins));
196 }
197 
198 /*
199  * compute max qlen, max p_mark, and avg p_mark
200  */
201 static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
202 {
203 	int i;
204 	u32 qlen = 0, prob = 0, totalpm = 0;
205 	const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
206 
207 	for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
208 		if (qlen < b->qlen)
209 			qlen = b->qlen;
210 		totalpm += b->p_mark;
211 		if (prob < b->p_mark)
212 			prob = b->p_mark;
213 		b++;
214 	}
215 	*prob_r = prob;
216 	*avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
217 	return qlen;
218 }
219 
220 
221 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
222 {
223 	q->bins[slot].perturbation = prandom_u32();
224 }
225 
226 static void sfb_swap_slot(struct sfb_sched_data *q)
227 {
228 	sfb_init_perturbation(q->slot, q);
229 	q->slot ^= 1;
230 	q->double_buffering = false;
231 }
232 
233 /* Non elastic flows are allowed to use part of the bandwidth, expressed
234  * in "penalty_rate" packets per second, with "penalty_burst" burst
235  */
236 static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
237 {
238 	if (q->penalty_rate == 0 || q->penalty_burst == 0)
239 		return true;
240 
241 	if (q->tokens_avail < 1) {
242 		unsigned long age = min(10UL * HZ, jiffies - q->token_time);
243 
244 		q->tokens_avail = (age * q->penalty_rate) / HZ;
245 		if (q->tokens_avail > q->penalty_burst)
246 			q->tokens_avail = q->penalty_burst;
247 		q->token_time = jiffies;
248 		if (q->tokens_avail < 1)
249 			return true;
250 	}
251 
252 	q->tokens_avail--;
253 	return false;
254 }
255 
256 static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
257 			 int *qerr, u32 *salt)
258 {
259 	struct tcf_result res;
260 	int result;
261 
262 	result = tc_classify(skb, fl, &res);
263 	if (result >= 0) {
264 #ifdef CONFIG_NET_CLS_ACT
265 		switch (result) {
266 		case TC_ACT_STOLEN:
267 		case TC_ACT_QUEUED:
268 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
269 		case TC_ACT_SHOT:
270 			return false;
271 		}
272 #endif
273 		*salt = TC_H_MIN(res.classid);
274 		return true;
275 	}
276 	return false;
277 }
278 
279 static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
280 {
281 
282 	struct sfb_sched_data *q = qdisc_priv(sch);
283 	struct Qdisc *child = q->qdisc;
284 	struct tcf_proto *fl;
285 	int i;
286 	u32 p_min = ~0;
287 	u32 minqlen = ~0;
288 	u32 r, slot, salt, sfbhash;
289 	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
290 	struct flow_keys keys;
291 
292 	if (unlikely(sch->q.qlen >= q->limit)) {
293 		qdisc_qstats_overlimit(sch);
294 		q->stats.queuedrop++;
295 		goto drop;
296 	}
297 
298 	if (q->rehash_interval > 0) {
299 		unsigned long limit = q->rehash_time + q->rehash_interval;
300 
301 		if (unlikely(time_after(jiffies, limit))) {
302 			sfb_swap_slot(q);
303 			q->rehash_time = jiffies;
304 		} else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
305 				    time_after(jiffies, limit - q->warmup_time))) {
306 			q->double_buffering = true;
307 		}
308 	}
309 
310 	fl = rcu_dereference_bh(q->filter_list);
311 	if (fl) {
312 		/* If using external classifiers, get result and record it. */
313 		if (!sfb_classify(skb, fl, &ret, &salt))
314 			goto other_drop;
315 		keys.src = salt;
316 		keys.dst = 0;
317 		keys.ports = 0;
318 	} else {
319 		skb_flow_dissect(skb, &keys);
320 	}
321 
322 	slot = q->slot;
323 
324 	sfbhash = jhash_3words((__force u32)keys.dst,
325 			       (__force u32)keys.src,
326 			       (__force u32)keys.ports,
327 			       q->bins[slot].perturbation);
328 	if (!sfbhash)
329 		sfbhash = 1;
330 	sfb_skb_cb(skb)->hashes[slot] = sfbhash;
331 
332 	for (i = 0; i < SFB_LEVELS; i++) {
333 		u32 hash = sfbhash & SFB_BUCKET_MASK;
334 		struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
335 
336 		sfbhash >>= SFB_BUCKET_SHIFT;
337 		if (b->qlen == 0)
338 			decrement_prob(b, q);
339 		else if (b->qlen >= q->bin_size)
340 			increment_prob(b, q);
341 		if (minqlen > b->qlen)
342 			minqlen = b->qlen;
343 		if (p_min > b->p_mark)
344 			p_min = b->p_mark;
345 	}
346 
347 	slot ^= 1;
348 	sfb_skb_cb(skb)->hashes[slot] = 0;
349 
350 	if (unlikely(minqlen >= q->max)) {
351 		qdisc_qstats_overlimit(sch);
352 		q->stats.bucketdrop++;
353 		goto drop;
354 	}
355 
356 	if (unlikely(p_min >= SFB_MAX_PROB)) {
357 		/* Inelastic flow */
358 		if (q->double_buffering) {
359 			sfbhash = jhash_3words((__force u32)keys.dst,
360 					       (__force u32)keys.src,
361 					       (__force u32)keys.ports,
362 					       q->bins[slot].perturbation);
363 			if (!sfbhash)
364 				sfbhash = 1;
365 			sfb_skb_cb(skb)->hashes[slot] = sfbhash;
366 
367 			for (i = 0; i < SFB_LEVELS; i++) {
368 				u32 hash = sfbhash & SFB_BUCKET_MASK;
369 				struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
370 
371 				sfbhash >>= SFB_BUCKET_SHIFT;
372 				if (b->qlen == 0)
373 					decrement_prob(b, q);
374 				else if (b->qlen >= q->bin_size)
375 					increment_prob(b, q);
376 			}
377 		}
378 		if (sfb_rate_limit(skb, q)) {
379 			qdisc_qstats_overlimit(sch);
380 			q->stats.penaltydrop++;
381 			goto drop;
382 		}
383 		goto enqueue;
384 	}
385 
386 	r = prandom_u32() & SFB_MAX_PROB;
387 
388 	if (unlikely(r < p_min)) {
389 		if (unlikely(p_min > SFB_MAX_PROB / 2)) {
390 			/* If we're marking that many packets, then either
391 			 * this flow is unresponsive, or we're badly congested.
392 			 * In either case, we want to start dropping packets.
393 			 */
394 			if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
395 				q->stats.earlydrop++;
396 				goto drop;
397 			}
398 		}
399 		if (INET_ECN_set_ce(skb)) {
400 			q->stats.marked++;
401 		} else {
402 			q->stats.earlydrop++;
403 			goto drop;
404 		}
405 	}
406 
407 enqueue:
408 	ret = qdisc_enqueue(skb, child);
409 	if (likely(ret == NET_XMIT_SUCCESS)) {
410 		sch->q.qlen++;
411 		increment_qlen(skb, q);
412 	} else if (net_xmit_drop_count(ret)) {
413 		q->stats.childdrop++;
414 		qdisc_qstats_drop(sch);
415 	}
416 	return ret;
417 
418 drop:
419 	qdisc_drop(skb, sch);
420 	return NET_XMIT_CN;
421 other_drop:
422 	if (ret & __NET_XMIT_BYPASS)
423 		qdisc_qstats_drop(sch);
424 	kfree_skb(skb);
425 	return ret;
426 }
427 
428 static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
429 {
430 	struct sfb_sched_data *q = qdisc_priv(sch);
431 	struct Qdisc *child = q->qdisc;
432 	struct sk_buff *skb;
433 
434 	skb = child->dequeue(q->qdisc);
435 
436 	if (skb) {
437 		qdisc_bstats_update(sch, skb);
438 		sch->q.qlen--;
439 		decrement_qlen(skb, q);
440 	}
441 
442 	return skb;
443 }
444 
445 static struct sk_buff *sfb_peek(struct Qdisc *sch)
446 {
447 	struct sfb_sched_data *q = qdisc_priv(sch);
448 	struct Qdisc *child = q->qdisc;
449 
450 	return child->ops->peek(child);
451 }
452 
453 /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
454 
455 static void sfb_reset(struct Qdisc *sch)
456 {
457 	struct sfb_sched_data *q = qdisc_priv(sch);
458 
459 	qdisc_reset(q->qdisc);
460 	sch->q.qlen = 0;
461 	q->slot = 0;
462 	q->double_buffering = false;
463 	sfb_zero_all_buckets(q);
464 	sfb_init_perturbation(0, q);
465 }
466 
467 static void sfb_destroy(struct Qdisc *sch)
468 {
469 	struct sfb_sched_data *q = qdisc_priv(sch);
470 
471 	tcf_destroy_chain(&q->filter_list);
472 	qdisc_destroy(q->qdisc);
473 }
474 
475 static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
476 	[TCA_SFB_PARMS]	= { .len = sizeof(struct tc_sfb_qopt) },
477 };
478 
479 static const struct tc_sfb_qopt sfb_default_ops = {
480 	.rehash_interval = 600 * MSEC_PER_SEC,
481 	.warmup_time = 60 * MSEC_PER_SEC,
482 	.limit = 0,
483 	.max = 25,
484 	.bin_size = 20,
485 	.increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
486 	.decrement = (SFB_MAX_PROB + 3000) / 6000,
487 	.penalty_rate = 10,
488 	.penalty_burst = 20,
489 };
490 
491 static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
492 {
493 	struct sfb_sched_data *q = qdisc_priv(sch);
494 	struct Qdisc *child;
495 	struct nlattr *tb[TCA_SFB_MAX + 1];
496 	const struct tc_sfb_qopt *ctl = &sfb_default_ops;
497 	u32 limit;
498 	int err;
499 
500 	if (opt) {
501 		err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy);
502 		if (err < 0)
503 			return -EINVAL;
504 
505 		if (tb[TCA_SFB_PARMS] == NULL)
506 			return -EINVAL;
507 
508 		ctl = nla_data(tb[TCA_SFB_PARMS]);
509 	}
510 
511 	limit = ctl->limit;
512 	if (limit == 0)
513 		limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
514 
515 	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
516 	if (IS_ERR(child))
517 		return PTR_ERR(child);
518 
519 	sch_tree_lock(sch);
520 
521 	qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
522 	qdisc_destroy(q->qdisc);
523 	q->qdisc = child;
524 
525 	q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
526 	q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
527 	q->rehash_time = jiffies;
528 	q->limit = limit;
529 	q->increment = ctl->increment;
530 	q->decrement = ctl->decrement;
531 	q->max = ctl->max;
532 	q->bin_size = ctl->bin_size;
533 	q->penalty_rate = ctl->penalty_rate;
534 	q->penalty_burst = ctl->penalty_burst;
535 	q->tokens_avail = ctl->penalty_burst;
536 	q->token_time = jiffies;
537 
538 	q->slot = 0;
539 	q->double_buffering = false;
540 	sfb_zero_all_buckets(q);
541 	sfb_init_perturbation(0, q);
542 	sfb_init_perturbation(1, q);
543 
544 	sch_tree_unlock(sch);
545 
546 	return 0;
547 }
548 
549 static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
550 {
551 	struct sfb_sched_data *q = qdisc_priv(sch);
552 
553 	q->qdisc = &noop_qdisc;
554 	return sfb_change(sch, opt);
555 }
556 
557 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
558 {
559 	struct sfb_sched_data *q = qdisc_priv(sch);
560 	struct nlattr *opts;
561 	struct tc_sfb_qopt opt = {
562 		.rehash_interval = jiffies_to_msecs(q->rehash_interval),
563 		.warmup_time = jiffies_to_msecs(q->warmup_time),
564 		.limit = q->limit,
565 		.max = q->max,
566 		.bin_size = q->bin_size,
567 		.increment = q->increment,
568 		.decrement = q->decrement,
569 		.penalty_rate = q->penalty_rate,
570 		.penalty_burst = q->penalty_burst,
571 	};
572 
573 	sch->qstats.backlog = q->qdisc->qstats.backlog;
574 	opts = nla_nest_start(skb, TCA_OPTIONS);
575 	if (opts == NULL)
576 		goto nla_put_failure;
577 	if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
578 		goto nla_put_failure;
579 	return nla_nest_end(skb, opts);
580 
581 nla_put_failure:
582 	nla_nest_cancel(skb, opts);
583 	return -EMSGSIZE;
584 }
585 
586 static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
587 {
588 	struct sfb_sched_data *q = qdisc_priv(sch);
589 	struct tc_sfb_xstats st = {
590 		.earlydrop = q->stats.earlydrop,
591 		.penaltydrop = q->stats.penaltydrop,
592 		.bucketdrop = q->stats.bucketdrop,
593 		.queuedrop = q->stats.queuedrop,
594 		.childdrop = q->stats.childdrop,
595 		.marked = q->stats.marked,
596 	};
597 
598 	st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
599 
600 	return gnet_stats_copy_app(d, &st, sizeof(st));
601 }
602 
603 static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
604 			  struct sk_buff *skb, struct tcmsg *tcm)
605 {
606 	return -ENOSYS;
607 }
608 
609 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
610 		     struct Qdisc **old)
611 {
612 	struct sfb_sched_data *q = qdisc_priv(sch);
613 
614 	if (new == NULL)
615 		new = &noop_qdisc;
616 
617 	sch_tree_lock(sch);
618 	*old = q->qdisc;
619 	q->qdisc = new;
620 	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
621 	qdisc_reset(*old);
622 	sch_tree_unlock(sch);
623 	return 0;
624 }
625 
626 static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
627 {
628 	struct sfb_sched_data *q = qdisc_priv(sch);
629 
630 	return q->qdisc;
631 }
632 
633 static unsigned long sfb_get(struct Qdisc *sch, u32 classid)
634 {
635 	return 1;
636 }
637 
638 static void sfb_put(struct Qdisc *sch, unsigned long arg)
639 {
640 }
641 
642 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
643 			    struct nlattr **tca, unsigned long *arg)
644 {
645 	return -ENOSYS;
646 }
647 
648 static int sfb_delete(struct Qdisc *sch, unsigned long cl)
649 {
650 	return -ENOSYS;
651 }
652 
653 static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
654 {
655 	if (!walker->stop) {
656 		if (walker->count >= walker->skip)
657 			if (walker->fn(sch, 1, walker) < 0) {
658 				walker->stop = 1;
659 				return;
660 			}
661 		walker->count++;
662 	}
663 }
664 
665 static struct tcf_proto __rcu **sfb_find_tcf(struct Qdisc *sch,
666 					     unsigned long cl)
667 {
668 	struct sfb_sched_data *q = qdisc_priv(sch);
669 
670 	if (cl)
671 		return NULL;
672 	return &q->filter_list;
673 }
674 
675 static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
676 			      u32 classid)
677 {
678 	return 0;
679 }
680 
681 
682 static const struct Qdisc_class_ops sfb_class_ops = {
683 	.graft		=	sfb_graft,
684 	.leaf		=	sfb_leaf,
685 	.get		=	sfb_get,
686 	.put		=	sfb_put,
687 	.change		=	sfb_change_class,
688 	.delete		=	sfb_delete,
689 	.walk		=	sfb_walk,
690 	.tcf_chain	=	sfb_find_tcf,
691 	.bind_tcf	=	sfb_bind,
692 	.unbind_tcf	=	sfb_put,
693 	.dump		=	sfb_dump_class,
694 };
695 
696 static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
697 	.id		=	"sfb",
698 	.priv_size	=	sizeof(struct sfb_sched_data),
699 	.cl_ops		=	&sfb_class_ops,
700 	.enqueue	=	sfb_enqueue,
701 	.dequeue	=	sfb_dequeue,
702 	.peek		=	sfb_peek,
703 	.init		=	sfb_init,
704 	.reset		=	sfb_reset,
705 	.destroy	=	sfb_destroy,
706 	.change		=	sfb_change,
707 	.dump		=	sfb_dump,
708 	.dump_stats	=	sfb_dump_stats,
709 	.owner		=	THIS_MODULE,
710 };
711 
712 static int __init sfb_module_init(void)
713 {
714 	return register_qdisc(&sfb_qdisc_ops);
715 }
716 
717 static void __exit sfb_module_exit(void)
718 {
719 	unregister_qdisc(&sfb_qdisc_ops);
720 }
721 
722 module_init(sfb_module_init)
723 module_exit(sfb_module_exit)
724 
725 MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
726 MODULE_AUTHOR("Juliusz Chroboczek");
727 MODULE_AUTHOR("Eric Dumazet");
728 MODULE_LICENSE("GPL");
729