xref: /linux/net/sched/sch_gred.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  * net/sched/sch_gred.c	Generic Random Early Detection queue.
3  *
4  *
5  *              This program is free software; you can redistribute it and/or
6  *              modify it under the terms of the GNU General Public License
7  *              as published by the Free Software Foundation; either version
8  *              2 of the License, or (at your option) any later version.
9  *
10  * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
11  *
12  *             991129: -  Bug fix with grio mode
13  *		       - a better sing. AvgQ mode with Grio(WRED)
14  *		       - A finer grained VQ dequeue based on sugestion
15  *		         from Ren Liu
16  *		       - More error checks
17  *
18  *  For all the glorious comments look at include/net/red.h
19  */
20 
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/netdevice.h>
25 #include <linux/skbuff.h>
26 #include <net/pkt_sched.h>
27 #include <net/red.h>
28 
29 #define GRED_DEF_PRIO (MAX_DPs / 2)
30 #define GRED_VQ_MASK (MAX_DPs - 1)
31 
32 struct gred_sched_data;
33 struct gred_sched;
34 
35 struct gred_sched_data
36 {
37 	u32		limit;		/* HARD maximal queue length	*/
38 	u32      	DP;		/* the drop pramaters */
39 	u32		bytesin;	/* bytes seen on virtualQ so far*/
40 	u32		packetsin;	/* packets seen on virtualQ so far*/
41 	u32		backlog;	/* bytes on the virtualQ */
42 	u8		prio;		/* the prio of this vq */
43 
44 	struct red_parms parms;
45 	struct red_stats stats;
46 };
47 
48 enum {
49 	GRED_WRED_MODE = 1,
50 	GRED_RIO_MODE,
51 };
52 
53 struct gred_sched
54 {
55 	struct gred_sched_data *tab[MAX_DPs];
56 	unsigned long	flags;
57 	u32		red_flags;
58 	u32 		DPs;
59 	u32 		def;
60 	struct red_parms wred_set;
61 };
62 
63 static inline int gred_wred_mode(struct gred_sched *table)
64 {
65 	return test_bit(GRED_WRED_MODE, &table->flags);
66 }
67 
68 static inline void gred_enable_wred_mode(struct gred_sched *table)
69 {
70 	__set_bit(GRED_WRED_MODE, &table->flags);
71 }
72 
73 static inline void gred_disable_wred_mode(struct gred_sched *table)
74 {
75 	__clear_bit(GRED_WRED_MODE, &table->flags);
76 }
77 
78 static inline int gred_rio_mode(struct gred_sched *table)
79 {
80 	return test_bit(GRED_RIO_MODE, &table->flags);
81 }
82 
83 static inline void gred_enable_rio_mode(struct gred_sched *table)
84 {
85 	__set_bit(GRED_RIO_MODE, &table->flags);
86 }
87 
88 static inline void gred_disable_rio_mode(struct gred_sched *table)
89 {
90 	__clear_bit(GRED_RIO_MODE, &table->flags);
91 }
92 
93 static inline int gred_wred_mode_check(struct Qdisc *sch)
94 {
95 	struct gred_sched *table = qdisc_priv(sch);
96 	int i;
97 
98 	/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
99 	for (i = 0; i < table->DPs; i++) {
100 		struct gred_sched_data *q = table->tab[i];
101 		int n;
102 
103 		if (q == NULL)
104 			continue;
105 
106 		for (n = 0; n < table->DPs; n++)
107 			if (table->tab[n] && table->tab[n] != q &&
108 			    table->tab[n]->prio == q->prio)
109 				return 1;
110 	}
111 
112 	return 0;
113 }
114 
115 static inline unsigned int gred_backlog(struct gred_sched *table,
116 					struct gred_sched_data *q,
117 					struct Qdisc *sch)
118 {
119 	if (gred_wred_mode(table))
120 		return sch->qstats.backlog;
121 	else
122 		return q->backlog;
123 }
124 
125 static inline u16 tc_index_to_dp(struct sk_buff *skb)
126 {
127 	return skb->tc_index & GRED_VQ_MASK;
128 }
129 
130 static inline void gred_load_wred_set(struct gred_sched *table,
131 				      struct gred_sched_data *q)
132 {
133 	q->parms.qavg = table->wred_set.qavg;
134 	q->parms.qidlestart = table->wred_set.qidlestart;
135 }
136 
137 static inline void gred_store_wred_set(struct gred_sched *table,
138 				       struct gred_sched_data *q)
139 {
140 	table->wred_set.qavg = q->parms.qavg;
141 }
142 
143 static inline int gred_use_ecn(struct gred_sched *t)
144 {
145 	return t->red_flags & TC_RED_ECN;
146 }
147 
148 static inline int gred_use_harddrop(struct gred_sched *t)
149 {
150 	return t->red_flags & TC_RED_HARDDROP;
151 }
152 
153 static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
154 {
155 	struct gred_sched_data *q=NULL;
156 	struct gred_sched *t= qdisc_priv(sch);
157 	unsigned long qavg = 0;
158 	u16 dp = tc_index_to_dp(skb);
159 
160 	if (dp >= t->DPs  || (q = t->tab[dp]) == NULL) {
161 		dp = t->def;
162 
163 		if ((q = t->tab[dp]) == NULL) {
164 			/* Pass through packets not assigned to a DP
165 			 * if no default DP has been configured. This
166 			 * allows for DP flows to be left untouched.
167 			 */
168 			if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len)
169 				return qdisc_enqueue_tail(skb, sch);
170 			else
171 				goto drop;
172 		}
173 
174 		/* fix tc_index? --could be controvesial but needed for
175 		   requeueing */
176 		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
177 	}
178 
179 	/* sum up all the qaves of prios <= to ours to get the new qave */
180 	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
181 		int i;
182 
183 		for (i = 0; i < t->DPs; i++) {
184 			if (t->tab[i] && t->tab[i]->prio < q->prio &&
185 			    !red_is_idling(&t->tab[i]->parms))
186 				qavg +=t->tab[i]->parms.qavg;
187 		}
188 
189 	}
190 
191 	q->packetsin++;
192 	q->bytesin += skb->len;
193 
194 	if (gred_wred_mode(t))
195 		gred_load_wred_set(t, q);
196 
197 	q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
198 
199 	if (red_is_idling(&q->parms))
200 		red_end_of_idle_period(&q->parms);
201 
202 	if (gred_wred_mode(t))
203 		gred_store_wred_set(t, q);
204 
205 	switch (red_action(&q->parms, q->parms.qavg + qavg)) {
206 		case RED_DONT_MARK:
207 			break;
208 
209 		case RED_PROB_MARK:
210 			sch->qstats.overlimits++;
211 			if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
212 				q->stats.prob_drop++;
213 				goto congestion_drop;
214 			}
215 
216 			q->stats.prob_mark++;
217 			break;
218 
219 		case RED_HARD_MARK:
220 			sch->qstats.overlimits++;
221 			if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
222 			    !INET_ECN_set_ce(skb)) {
223 				q->stats.forced_drop++;
224 				goto congestion_drop;
225 			}
226 			q->stats.forced_mark++;
227 			break;
228 	}
229 
230 	if (q->backlog + skb->len <= q->limit) {
231 		q->backlog += skb->len;
232 		return qdisc_enqueue_tail(skb, sch);
233 	}
234 
235 	q->stats.pdrop++;
236 drop:
237 	return qdisc_drop(skb, sch);
238 
239 congestion_drop:
240 	qdisc_drop(skb, sch);
241 	return NET_XMIT_CN;
242 }
243 
244 static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
245 {
246 	struct gred_sched *t = qdisc_priv(sch);
247 	struct gred_sched_data *q;
248 	u16 dp = tc_index_to_dp(skb);
249 
250 	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
251 		if (net_ratelimit())
252 			printk(KERN_WARNING "GRED: Unable to relocate VQ 0x%x "
253 			       "for requeue, screwing up backlog.\n",
254 			       tc_index_to_dp(skb));
255 	} else {
256 		if (red_is_idling(&q->parms))
257 			red_end_of_idle_period(&q->parms);
258 		q->backlog += skb->len;
259 	}
260 
261 	return qdisc_requeue(skb, sch);
262 }
263 
264 static struct sk_buff *gred_dequeue(struct Qdisc* sch)
265 {
266 	struct sk_buff *skb;
267 	struct gred_sched *t = qdisc_priv(sch);
268 
269 	skb = qdisc_dequeue_head(sch);
270 
271 	if (skb) {
272 		struct gred_sched_data *q;
273 		u16 dp = tc_index_to_dp(skb);
274 
275 		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
276 			if (net_ratelimit())
277 				printk(KERN_WARNING "GRED: Unable to relocate "
278 				       "VQ 0x%x after dequeue, screwing up "
279 				       "backlog.\n", tc_index_to_dp(skb));
280 		} else {
281 			q->backlog -= skb->len;
282 
283 			if (!q->backlog && !gred_wred_mode(t))
284 				red_start_of_idle_period(&q->parms);
285 		}
286 
287 		return skb;
288 	}
289 
290 	if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
291 		red_start_of_idle_period(&t->wred_set);
292 
293 	return NULL;
294 }
295 
296 static unsigned int gred_drop(struct Qdisc* sch)
297 {
298 	struct sk_buff *skb;
299 	struct gred_sched *t = qdisc_priv(sch);
300 
301 	skb = qdisc_dequeue_tail(sch);
302 	if (skb) {
303 		unsigned int len = skb->len;
304 		struct gred_sched_data *q;
305 		u16 dp = tc_index_to_dp(skb);
306 
307 		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
308 			if (net_ratelimit())
309 				printk(KERN_WARNING "GRED: Unable to relocate "
310 				       "VQ 0x%x while dropping, screwing up "
311 				       "backlog.\n", tc_index_to_dp(skb));
312 		} else {
313 			q->backlog -= len;
314 			q->stats.other++;
315 
316 			if (!q->backlog && !gred_wred_mode(t))
317 				red_start_of_idle_period(&q->parms);
318 		}
319 
320 		qdisc_drop(skb, sch);
321 		return len;
322 	}
323 
324 	if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
325 		red_start_of_idle_period(&t->wred_set);
326 
327 	return 0;
328 
329 }
330 
331 static void gred_reset(struct Qdisc* sch)
332 {
333 	int i;
334 	struct gred_sched *t = qdisc_priv(sch);
335 
336 	qdisc_reset_queue(sch);
337 
338         for (i = 0; i < t->DPs; i++) {
339 		struct gred_sched_data *q = t->tab[i];
340 
341 		if (!q)
342 			continue;
343 
344 		red_restart(&q->parms);
345 		q->backlog = 0;
346 	}
347 }
348 
349 static inline void gred_destroy_vq(struct gred_sched_data *q)
350 {
351 	kfree(q);
352 }
353 
354 static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
355 {
356 	struct gred_sched *table = qdisc_priv(sch);
357 	struct tc_gred_sopt *sopt;
358 	int i;
359 
360 	if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt))
361 		return -EINVAL;
362 
363 	sopt = RTA_DATA(dps);
364 
365 	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
366 		return -EINVAL;
367 
368 	sch_tree_lock(sch);
369 	table->DPs = sopt->DPs;
370 	table->def = sopt->def_DP;
371 	table->red_flags = sopt->flags;
372 
373 	/*
374 	 * Every entry point to GRED is synchronized with the above code
375 	 * and the DP is checked against DPs, i.e. shadowed VQs can no
376 	 * longer be found so we can unlock right here.
377 	 */
378 	sch_tree_unlock(sch);
379 
380 	if (sopt->grio) {
381 		gred_enable_rio_mode(table);
382 		gred_disable_wred_mode(table);
383 		if (gred_wred_mode_check(sch))
384 			gred_enable_wred_mode(table);
385 	} else {
386 		gred_disable_rio_mode(table);
387 		gred_disable_wred_mode(table);
388 	}
389 
390 	for (i = table->DPs; i < MAX_DPs; i++) {
391 		if (table->tab[i]) {
392 			printk(KERN_WARNING "GRED: Warning: Destroying "
393 			       "shadowed VQ 0x%x\n", i);
394 			gred_destroy_vq(table->tab[i]);
395 			table->tab[i] = NULL;
396   		}
397 	}
398 
399 	return 0;
400 }
401 
402 static inline int gred_change_vq(struct Qdisc *sch, int dp,
403 				 struct tc_gred_qopt *ctl, int prio, u8 *stab)
404 {
405 	struct gred_sched *table = qdisc_priv(sch);
406 	struct gred_sched_data *q;
407 
408 	if (table->tab[dp] == NULL) {
409 		table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL);
410 		if (table->tab[dp] == NULL)
411 			return -ENOMEM;
412 		memset(table->tab[dp], 0, sizeof(*q));
413 	}
414 
415 	q = table->tab[dp];
416 	q->DP = dp;
417 	q->prio = prio;
418 	q->limit = ctl->limit;
419 
420 	if (q->backlog == 0)
421 		red_end_of_idle_period(&q->parms);
422 
423 	red_set_parms(&q->parms,
424 		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
425 		      ctl->Scell_log, stab);
426 
427 	return 0;
428 }
429 
430 static int gred_change(struct Qdisc *sch, struct rtattr *opt)
431 {
432 	struct gred_sched *table = qdisc_priv(sch);
433 	struct tc_gred_qopt *ctl;
434 	struct rtattr *tb[TCA_GRED_MAX];
435 	int err = -EINVAL, prio = GRED_DEF_PRIO;
436 	u8 *stab;
437 
438 	if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
439 		return -EINVAL;
440 
441 	if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
442 		return gred_change_table_def(sch, opt);
443 
444 	if (tb[TCA_GRED_PARMS-1] == NULL ||
445 	    RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
446 	    tb[TCA_GRED_STAB-1] == NULL ||
447 	    RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
448 		return -EINVAL;
449 
450 	ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
451 	stab = RTA_DATA(tb[TCA_GRED_STAB-1]);
452 
453 	if (ctl->DP >= table->DPs)
454 		goto errout;
455 
456 	if (gred_rio_mode(table)) {
457 		if (ctl->prio == 0) {
458 			int def_prio = GRED_DEF_PRIO;
459 
460 			if (table->tab[table->def])
461 				def_prio = table->tab[table->def]->prio;
462 
463 			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
464 			       "setting default to %d\n", ctl->DP, def_prio);
465 
466 			prio = def_prio;
467 		} else
468 			prio = ctl->prio;
469 	}
470 
471 	sch_tree_lock(sch);
472 
473 	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
474 	if (err < 0)
475 		goto errout_locked;
476 
477 	if (gred_rio_mode(table)) {
478 		gred_disable_wred_mode(table);
479 		if (gred_wred_mode_check(sch))
480 			gred_enable_wred_mode(table);
481 	}
482 
483 	err = 0;
484 
485 errout_locked:
486 	sch_tree_unlock(sch);
487 errout:
488 	return err;
489 }
490 
491 static int gred_init(struct Qdisc *sch, struct rtattr *opt)
492 {
493 	struct rtattr *tb[TCA_GRED_MAX];
494 
495 	if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
496 		return -EINVAL;
497 
498 	if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1])
499 		return -EINVAL;
500 
501 	return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
502 }
503 
504 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
505 {
506 	struct gred_sched *table = qdisc_priv(sch);
507 	struct rtattr *parms, *opts = NULL;
508 	int i;
509 	struct tc_gred_sopt sopt = {
510 		.DPs	= table->DPs,
511 		.def_DP	= table->def,
512 		.grio	= gred_rio_mode(table),
513 		.flags	= table->red_flags,
514 	};
515 
516 	opts = RTA_NEST(skb, TCA_OPTIONS);
517 	RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
518 	parms = RTA_NEST(skb, TCA_GRED_PARMS);
519 
520 	for (i = 0; i < MAX_DPs; i++) {
521 		struct gred_sched_data *q = table->tab[i];
522 		struct tc_gred_qopt opt;
523 
524 		memset(&opt, 0, sizeof(opt));
525 
526 		if (!q) {
527 			/* hack -- fix at some point with proper message
528 			   This is how we indicate to tc that there is no VQ
529 			   at this DP */
530 
531 			opt.DP = MAX_DPs + i;
532 			goto append_opt;
533 		}
534 
535 		opt.limit	= q->limit;
536 		opt.DP		= q->DP;
537 		opt.backlog	= q->backlog;
538 		opt.prio	= q->prio;
539 		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
540 		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
541 		opt.Wlog	= q->parms.Wlog;
542 		opt.Plog	= q->parms.Plog;
543 		opt.Scell_log	= q->parms.Scell_log;
544 		opt.other	= q->stats.other;
545 		opt.early	= q->stats.prob_drop;
546 		opt.forced	= q->stats.forced_drop;
547 		opt.pdrop	= q->stats.pdrop;
548 		opt.packets	= q->packetsin;
549 		opt.bytesin	= q->bytesin;
550 
551 		if (gred_wred_mode(table)) {
552 			q->parms.qidlestart =
553 				table->tab[table->def]->parms.qidlestart;
554 			q->parms.qavg = table->tab[table->def]->parms.qavg;
555 		}
556 
557 		opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
558 
559 append_opt:
560 		RTA_APPEND(skb, sizeof(opt), &opt);
561 	}
562 
563 	RTA_NEST_END(skb, parms);
564 
565 	return RTA_NEST_END(skb, opts);
566 
567 rtattr_failure:
568 	return RTA_NEST_CANCEL(skb, opts);
569 }
570 
571 static void gred_destroy(struct Qdisc *sch)
572 {
573 	struct gred_sched *table = qdisc_priv(sch);
574 	int i;
575 
576 	for (i = 0; i < table->DPs; i++) {
577 		if (table->tab[i])
578 			gred_destroy_vq(table->tab[i]);
579 	}
580 }
581 
582 static struct Qdisc_ops gred_qdisc_ops = {
583 	.id		=	"gred",
584 	.priv_size	=	sizeof(struct gred_sched),
585 	.enqueue	=	gred_enqueue,
586 	.dequeue	=	gred_dequeue,
587 	.requeue	=	gred_requeue,
588 	.drop		=	gred_drop,
589 	.init		=	gred_init,
590 	.reset		=	gred_reset,
591 	.destroy	=	gred_destroy,
592 	.change		=	gred_change,
593 	.dump		=	gred_dump,
594 	.owner		=	THIS_MODULE,
595 };
596 
597 static int __init gred_module_init(void)
598 {
599 	return register_qdisc(&gred_qdisc_ops);
600 }
601 
602 static void __exit gred_module_exit(void)
603 {
604 	unregister_qdisc(&gred_qdisc_ops);
605 }
606 
607 module_init(gred_module_init)
608 module_exit(gred_module_exit)
609 
610 MODULE_LICENSE("GPL");
611