xref: /linux/net/sched/sch_red.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2  * net/sched/sch_red.c	Random Early Detection queue.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  * Changes:
12  * J Hadi Salim 980914:	computation fixes
13  * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14  * J Hadi Salim 980816:  ECN support
15  */
16 
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_sched.h>
22 #include <net/inet_ecn.h>
23 #include <net/red.h>
24 
25 
26 /*	Parameters, settable by user:
27 	-----------------------------
28 
29 	limit		- bytes (must be > qth_max + burst)
30 
31 	Hard limit on queue length, should be chosen >qth_max
32 	to allow packet bursts. This parameter does not
33 	affect the algorithms behaviour and can be chosen
34 	arbitrarily high (well, less than ram size)
35 	Really, this limit will never be reached
36 	if RED works correctly.
37  */
38 
39 struct red_sched_data {
40 	u32			limit;		/* HARD maximal queue length */
41 	unsigned char		flags;
42 	struct timer_list	adapt_timer;
43 	struct red_parms	parms;
44 	struct red_vars		vars;
45 	struct red_stats	stats;
46 	struct Qdisc		*qdisc;
47 };
48 
49 static inline int red_use_ecn(struct red_sched_data *q)
50 {
51 	return q->flags & TC_RED_ECN;
52 }
53 
54 static inline int red_use_harddrop(struct red_sched_data *q)
55 {
56 	return q->flags & TC_RED_HARDDROP;
57 }
58 
59 static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
60 {
61 	struct red_sched_data *q = qdisc_priv(sch);
62 	struct Qdisc *child = q->qdisc;
63 	int ret;
64 
65 	q->vars.qavg = red_calc_qavg(&q->parms,
66 				     &q->vars,
67 				     child->qstats.backlog);
68 
69 	if (red_is_idling(&q->vars))
70 		red_end_of_idle_period(&q->vars);
71 
72 	switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
73 	case RED_DONT_MARK:
74 		break;
75 
76 	case RED_PROB_MARK:
77 		sch->qstats.overlimits++;
78 		if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
79 			q->stats.prob_drop++;
80 			goto congestion_drop;
81 		}
82 
83 		q->stats.prob_mark++;
84 		break;
85 
86 	case RED_HARD_MARK:
87 		sch->qstats.overlimits++;
88 		if (red_use_harddrop(q) || !red_use_ecn(q) ||
89 		    !INET_ECN_set_ce(skb)) {
90 			q->stats.forced_drop++;
91 			goto congestion_drop;
92 		}
93 
94 		q->stats.forced_mark++;
95 		break;
96 	}
97 
98 	ret = qdisc_enqueue(skb, child);
99 	if (likely(ret == NET_XMIT_SUCCESS)) {
100 		sch->q.qlen++;
101 	} else if (net_xmit_drop_count(ret)) {
102 		q->stats.pdrop++;
103 		sch->qstats.drops++;
104 	}
105 	return ret;
106 
107 congestion_drop:
108 	qdisc_drop(skb, sch);
109 	return NET_XMIT_CN;
110 }
111 
112 static struct sk_buff *red_dequeue(struct Qdisc *sch)
113 {
114 	struct sk_buff *skb;
115 	struct red_sched_data *q = qdisc_priv(sch);
116 	struct Qdisc *child = q->qdisc;
117 
118 	skb = child->dequeue(child);
119 	if (skb) {
120 		qdisc_bstats_update(sch, skb);
121 		sch->q.qlen--;
122 	} else {
123 		if (!red_is_idling(&q->vars))
124 			red_start_of_idle_period(&q->vars);
125 	}
126 	return skb;
127 }
128 
129 static struct sk_buff *red_peek(struct Qdisc *sch)
130 {
131 	struct red_sched_data *q = qdisc_priv(sch);
132 	struct Qdisc *child = q->qdisc;
133 
134 	return child->ops->peek(child);
135 }
136 
137 static unsigned int red_drop(struct Qdisc *sch)
138 {
139 	struct red_sched_data *q = qdisc_priv(sch);
140 	struct Qdisc *child = q->qdisc;
141 	unsigned int len;
142 
143 	if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
144 		q->stats.other++;
145 		sch->qstats.drops++;
146 		sch->q.qlen--;
147 		return len;
148 	}
149 
150 	if (!red_is_idling(&q->vars))
151 		red_start_of_idle_period(&q->vars);
152 
153 	return 0;
154 }
155 
156 static void red_reset(struct Qdisc *sch)
157 {
158 	struct red_sched_data *q = qdisc_priv(sch);
159 
160 	qdisc_reset(q->qdisc);
161 	sch->q.qlen = 0;
162 	red_restart(&q->vars);
163 }
164 
165 static void red_destroy(struct Qdisc *sch)
166 {
167 	struct red_sched_data *q = qdisc_priv(sch);
168 
169 	del_timer_sync(&q->adapt_timer);
170 	qdisc_destroy(q->qdisc);
171 }
172 
173 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
174 	[TCA_RED_PARMS]	= { .len = sizeof(struct tc_red_qopt) },
175 	[TCA_RED_STAB]	= { .len = RED_STAB_SIZE },
176 	[TCA_RED_MAX_P] = { .type = NLA_U32 },
177 };
178 
179 static int red_change(struct Qdisc *sch, struct nlattr *opt)
180 {
181 	struct red_sched_data *q = qdisc_priv(sch);
182 	struct nlattr *tb[TCA_RED_MAX + 1];
183 	struct tc_red_qopt *ctl;
184 	struct Qdisc *child = NULL;
185 	int err;
186 	u32 max_P;
187 
188 	if (opt == NULL)
189 		return -EINVAL;
190 
191 	err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy);
192 	if (err < 0)
193 		return err;
194 
195 	if (tb[TCA_RED_PARMS] == NULL ||
196 	    tb[TCA_RED_STAB] == NULL)
197 		return -EINVAL;
198 
199 	max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
200 
201 	ctl = nla_data(tb[TCA_RED_PARMS]);
202 
203 	if (ctl->limit > 0) {
204 		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
205 		if (IS_ERR(child))
206 			return PTR_ERR(child);
207 	}
208 
209 	sch_tree_lock(sch);
210 	q->flags = ctl->flags;
211 	q->limit = ctl->limit;
212 	if (child) {
213 		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
214 		qdisc_destroy(q->qdisc);
215 		q->qdisc = child;
216 	}
217 
218 	red_set_parms(&q->parms,
219 		      ctl->qth_min, ctl->qth_max, ctl->Wlog,
220 		      ctl->Plog, ctl->Scell_log,
221 		      nla_data(tb[TCA_RED_STAB]),
222 		      max_P);
223 	red_set_vars(&q->vars);
224 
225 	del_timer(&q->adapt_timer);
226 	if (ctl->flags & TC_RED_ADAPTATIVE)
227 		mod_timer(&q->adapt_timer, jiffies + HZ/2);
228 
229 	if (!q->qdisc->q.qlen)
230 		red_start_of_idle_period(&q->vars);
231 
232 	sch_tree_unlock(sch);
233 	return 0;
234 }
235 
236 static inline void red_adaptative_timer(unsigned long arg)
237 {
238 	struct Qdisc *sch = (struct Qdisc *)arg;
239 	struct red_sched_data *q = qdisc_priv(sch);
240 	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
241 
242 	spin_lock(root_lock);
243 	red_adaptative_algo(&q->parms, &q->vars);
244 	mod_timer(&q->adapt_timer, jiffies + HZ/2);
245 	spin_unlock(root_lock);
246 }
247 
248 static int red_init(struct Qdisc *sch, struct nlattr *opt)
249 {
250 	struct red_sched_data *q = qdisc_priv(sch);
251 
252 	q->qdisc = &noop_qdisc;
253 	setup_timer(&q->adapt_timer, red_adaptative_timer, (unsigned long)sch);
254 	return red_change(sch, opt);
255 }
256 
257 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
258 {
259 	struct red_sched_data *q = qdisc_priv(sch);
260 	struct nlattr *opts = NULL;
261 	struct tc_red_qopt opt = {
262 		.limit		= q->limit,
263 		.flags		= q->flags,
264 		.qth_min	= q->parms.qth_min >> q->parms.Wlog,
265 		.qth_max	= q->parms.qth_max >> q->parms.Wlog,
266 		.Wlog		= q->parms.Wlog,
267 		.Plog		= q->parms.Plog,
268 		.Scell_log	= q->parms.Scell_log,
269 	};
270 
271 	sch->qstats.backlog = q->qdisc->qstats.backlog;
272 	opts = nla_nest_start(skb, TCA_OPTIONS);
273 	if (opts == NULL)
274 		goto nla_put_failure;
275 	if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
276 	    nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
277 		goto nla_put_failure;
278 	return nla_nest_end(skb, opts);
279 
280 nla_put_failure:
281 	nla_nest_cancel(skb, opts);
282 	return -EMSGSIZE;
283 }
284 
285 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
286 {
287 	struct red_sched_data *q = qdisc_priv(sch);
288 	struct tc_red_xstats st = {
289 		.early	= q->stats.prob_drop + q->stats.forced_drop,
290 		.pdrop	= q->stats.pdrop,
291 		.other	= q->stats.other,
292 		.marked	= q->stats.prob_mark + q->stats.forced_mark,
293 	};
294 
295 	return gnet_stats_copy_app(d, &st, sizeof(st));
296 }
297 
298 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
299 			  struct sk_buff *skb, struct tcmsg *tcm)
300 {
301 	struct red_sched_data *q = qdisc_priv(sch);
302 
303 	tcm->tcm_handle |= TC_H_MIN(1);
304 	tcm->tcm_info = q->qdisc->handle;
305 	return 0;
306 }
307 
308 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
309 		     struct Qdisc **old)
310 {
311 	struct red_sched_data *q = qdisc_priv(sch);
312 
313 	if (new == NULL)
314 		new = &noop_qdisc;
315 
316 	sch_tree_lock(sch);
317 	*old = q->qdisc;
318 	q->qdisc = new;
319 	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
320 	qdisc_reset(*old);
321 	sch_tree_unlock(sch);
322 	return 0;
323 }
324 
325 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
326 {
327 	struct red_sched_data *q = qdisc_priv(sch);
328 	return q->qdisc;
329 }
330 
331 static unsigned long red_get(struct Qdisc *sch, u32 classid)
332 {
333 	return 1;
334 }
335 
336 static void red_put(struct Qdisc *sch, unsigned long arg)
337 {
338 }
339 
340 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
341 {
342 	if (!walker->stop) {
343 		if (walker->count >= walker->skip)
344 			if (walker->fn(sch, 1, walker) < 0) {
345 				walker->stop = 1;
346 				return;
347 			}
348 		walker->count++;
349 	}
350 }
351 
352 static const struct Qdisc_class_ops red_class_ops = {
353 	.graft		=	red_graft,
354 	.leaf		=	red_leaf,
355 	.get		=	red_get,
356 	.put		=	red_put,
357 	.walk		=	red_walk,
358 	.dump		=	red_dump_class,
359 };
360 
361 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
362 	.id		=	"red",
363 	.priv_size	=	sizeof(struct red_sched_data),
364 	.cl_ops		=	&red_class_ops,
365 	.enqueue	=	red_enqueue,
366 	.dequeue	=	red_dequeue,
367 	.peek		=	red_peek,
368 	.drop		=	red_drop,
369 	.init		=	red_init,
370 	.reset		=	red_reset,
371 	.destroy	=	red_destroy,
372 	.change		=	red_change,
373 	.dump		=	red_dump,
374 	.dump_stats	=	red_dump_stats,
375 	.owner		=	THIS_MODULE,
376 };
377 
378 static int __init red_module_init(void)
379 {
380 	return register_qdisc(&red_qdisc_ops);
381 }
382 
383 static void __exit red_module_exit(void)
384 {
385 	unregister_qdisc(&red_qdisc_ops);
386 }
387 
388 module_init(red_module_init)
389 module_exit(red_module_exit)
390 
391 MODULE_LICENSE("GPL");
392