xref: /linux/net/sched/sch_tbf.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * net/sched/sch_tbf.c	Token Bucket Filter queue.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *		Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11  *						 original idea by Martin Devera
12  *
13  */
14 
15 #include <linux/module.h>
16 #include <asm/uaccess.h>
17 #include <asm/system.h>
18 #include <linux/bitops.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/jiffies.h>
22 #include <linux/string.h>
23 #include <linux/mm.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
26 #include <linux/in.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
29 #include <linux/if_ether.h>
30 #include <linux/inet.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/notifier.h>
34 #include <net/ip.h>
35 #include <net/route.h>
36 #include <linux/skbuff.h>
37 #include <net/sock.h>
38 #include <net/pkt_sched.h>
39 
40 
41 /*	Simple Token Bucket Filter.
42 	=======================================
43 
44 	SOURCE.
45 	-------
46 
47 	None.
48 
49 	Description.
50 	------------
51 
52 	A data flow obeys TBF with rate R and depth B, if for any
53 	time interval t_i...t_f the number of transmitted bits
54 	does not exceed B + R*(t_f-t_i).
55 
56 	Packetized version of this definition:
57 	The sequence of packets of sizes s_i served at moments t_i
58 	obeys TBF, if for any i<=k:
59 
60 	s_i+....+s_k <= B + R*(t_k - t_i)
61 
62 	Algorithm.
63 	----------
64 
65 	Let N(t_i) be B/R initially and N(t) grow continuously with time as:
66 
67 	N(t+delta) = min{B/R, N(t) + delta}
68 
69 	If the first packet in queue has length S, it may be
70 	transmitted only at the time t_* when S/R <= N(t_*),
71 	and in this case N(t) jumps:
72 
73 	N(t_* + 0) = N(t_* - 0) - S/R.
74 
75 
76 
77 	Actually, QoS requires two TBF to be applied to a data stream.
78 	One of them controls steady state burst size, another
79 	one with rate P (peak rate) and depth M (equal to link MTU)
80 	limits bursts at a smaller time scale.
81 
82 	It is easy to see that P>R, and B>M. If P is infinity, this double
83 	TBF is equivalent to a single one.
84 
85 	When TBF works in reshaping mode, latency is estimated as:
86 
87 	lat = max ((L-B)/R, (L-M)/P)
88 
89 
90 	NOTES.
91 	------
92 
93 	If TBF throttles, it starts a watchdog timer, which will wake it up
94 	when it is ready to transmit.
95 	Note that the minimal timer resolution is 1/HZ.
96 	If no new packets arrive during this period,
97 	or if the device is not awaken by EOI for some previous packet,
98 	TBF can stop its activity for 1/HZ.
99 
100 
101 	This means, that with depth B, the maximal rate is
102 
103 	R_crit = B*HZ
104 
105 	F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
106 
107 	Note that the peak rate TBF is much more tough: with MTU 1500
108 	P_crit = 150Kbytes/sec. So, if you need greater peak
109 	rates, use alpha with HZ=1000 :-)
110 
111 	With classful TBF, limit is just kept for backwards compatibility.
112 	It is passed to the default bfifo qdisc - if the inner qdisc is
113 	changed the limit is not effective anymore.
114 */
115 
116 struct tbf_sched_data
117 {
118 /* Parameters */
119 	u32		limit;		/* Maximal length of backlog: bytes */
120 	u32		buffer;		/* Token bucket depth/rate: MUST BE >= MTU/B */
121 	u32		mtu;
122 	u32		max_size;
123 	struct qdisc_rate_table	*R_tab;
124 	struct qdisc_rate_table	*P_tab;
125 
126 /* Variables */
127 	long	tokens;			/* Current number of B tokens */
128 	long	ptokens;		/* Current number of P tokens */
129 	psched_time_t	t_c;		/* Time check-point */
130 	struct timer_list wd_timer;	/* Watchdog timer */
131 	struct Qdisc	*qdisc;		/* Inner qdisc, default - bfifo queue */
132 };
133 
134 #define L2T(q,L)   ((q)->R_tab->data[(L)>>(q)->R_tab->rate.cell_log])
135 #define L2T_P(q,L) ((q)->P_tab->data[(L)>>(q)->P_tab->rate.cell_log])
136 
137 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
138 {
139 	struct tbf_sched_data *q = qdisc_priv(sch);
140 	int ret;
141 
142 	if (skb->len > q->max_size) {
143 		sch->qstats.drops++;
144 #ifdef CONFIG_NET_CLS_POLICE
145 		if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
146 #endif
147 			kfree_skb(skb);
148 
149 		return NET_XMIT_DROP;
150 	}
151 
152 	if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) {
153 		sch->qstats.drops++;
154 		return ret;
155 	}
156 
157 	sch->q.qlen++;
158 	sch->bstats.bytes += skb->len;
159 	sch->bstats.packets++;
160 	return 0;
161 }
162 
163 static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
164 {
165 	struct tbf_sched_data *q = qdisc_priv(sch);
166 	int ret;
167 
168 	if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
169 		sch->q.qlen++;
170 		sch->qstats.requeues++;
171 	}
172 
173 	return ret;
174 }
175 
176 static unsigned int tbf_drop(struct Qdisc* sch)
177 {
178 	struct tbf_sched_data *q = qdisc_priv(sch);
179 	unsigned int len = 0;
180 
181 	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
182 		sch->q.qlen--;
183 		sch->qstats.drops++;
184 	}
185 	return len;
186 }
187 
188 static void tbf_watchdog(unsigned long arg)
189 {
190 	struct Qdisc *sch = (struct Qdisc*)arg;
191 
192 	sch->flags &= ~TCQ_F_THROTTLED;
193 	netif_schedule(sch->dev);
194 }
195 
196 static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
197 {
198 	struct tbf_sched_data *q = qdisc_priv(sch);
199 	struct sk_buff *skb;
200 
201 	skb = q->qdisc->dequeue(q->qdisc);
202 
203 	if (skb) {
204 		psched_time_t now;
205 		long toks, delay;
206 		long ptoks = 0;
207 		unsigned int len = skb->len;
208 
209 		PSCHED_GET_TIME(now);
210 
211 		toks = PSCHED_TDIFF_SAFE(now, q->t_c, q->buffer);
212 
213 		if (q->P_tab) {
214 			ptoks = toks + q->ptokens;
215 			if (ptoks > (long)q->mtu)
216 				ptoks = q->mtu;
217 			ptoks -= L2T_P(q, len);
218 		}
219 		toks += q->tokens;
220 		if (toks > (long)q->buffer)
221 			toks = q->buffer;
222 		toks -= L2T(q, len);
223 
224 		if ((toks|ptoks) >= 0) {
225 			q->t_c = now;
226 			q->tokens = toks;
227 			q->ptokens = ptoks;
228 			sch->q.qlen--;
229 			sch->flags &= ~TCQ_F_THROTTLED;
230 			return skb;
231 		}
232 
233 		delay = PSCHED_US2JIFFIE(max_t(long, -toks, -ptoks));
234 
235 		if (delay == 0)
236 			delay = 1;
237 
238 		mod_timer(&q->wd_timer, jiffies+delay);
239 
240 		/* Maybe we have a shorter packet in the queue,
241 		   which can be sent now. It sounds cool,
242 		   but, however, this is wrong in principle.
243 		   We MUST NOT reorder packets under these circumstances.
244 
245 		   Really, if we split the flow into independent
246 		   subflows, it would be a very good solution.
247 		   This is the main idea of all FQ algorithms
248 		   (cf. CSZ, HPFQ, HFSC)
249 		 */
250 
251 		if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
252 			/* When requeue fails skb is dropped */
253 			qdisc_tree_decrease_qlen(q->qdisc, 1);
254 			sch->qstats.drops++;
255 		}
256 
257 		sch->flags |= TCQ_F_THROTTLED;
258 		sch->qstats.overlimits++;
259 	}
260 	return NULL;
261 }
262 
263 static void tbf_reset(struct Qdisc* sch)
264 {
265 	struct tbf_sched_data *q = qdisc_priv(sch);
266 
267 	qdisc_reset(q->qdisc);
268 	sch->q.qlen = 0;
269 	PSCHED_GET_TIME(q->t_c);
270 	q->tokens = q->buffer;
271 	q->ptokens = q->mtu;
272 	sch->flags &= ~TCQ_F_THROTTLED;
273 	del_timer(&q->wd_timer);
274 }
275 
276 static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit)
277 {
278 	struct Qdisc *q;
279 	struct rtattr *rta;
280 	int ret;
281 
282 	q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
283 			      TC_H_MAKE(sch->handle, 1));
284 	if (q) {
285 		rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
286 		if (rta) {
287 			rta->rta_type = RTM_NEWQDISC;
288 			rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
289 			((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
290 
291 			ret = q->ops->change(q, rta);
292 			kfree(rta);
293 
294 			if (ret == 0)
295 				return q;
296 		}
297 		qdisc_destroy(q);
298 	}
299 
300 	return NULL;
301 }
302 
303 static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
304 {
305 	int err = -EINVAL;
306 	struct tbf_sched_data *q = qdisc_priv(sch);
307 	struct rtattr *tb[TCA_TBF_PTAB];
308 	struct tc_tbf_qopt *qopt;
309 	struct qdisc_rate_table *rtab = NULL;
310 	struct qdisc_rate_table *ptab = NULL;
311 	struct Qdisc *child = NULL;
312 	int max_size,n;
313 
314 	if (rtattr_parse_nested(tb, TCA_TBF_PTAB, opt) ||
315 	    tb[TCA_TBF_PARMS-1] == NULL ||
316 	    RTA_PAYLOAD(tb[TCA_TBF_PARMS-1]) < sizeof(*qopt))
317 		goto done;
318 
319 	qopt = RTA_DATA(tb[TCA_TBF_PARMS-1]);
320 	rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB-1]);
321 	if (rtab == NULL)
322 		goto done;
323 
324 	if (qopt->peakrate.rate) {
325 		if (qopt->peakrate.rate > qopt->rate.rate)
326 			ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB-1]);
327 		if (ptab == NULL)
328 			goto done;
329 	}
330 
331 	for (n = 0; n < 256; n++)
332 		if (rtab->data[n] > qopt->buffer) break;
333 	max_size = (n << qopt->rate.cell_log)-1;
334 	if (ptab) {
335 		int size;
336 
337 		for (n = 0; n < 256; n++)
338 			if (ptab->data[n] > qopt->mtu) break;
339 		size = (n << qopt->peakrate.cell_log)-1;
340 		if (size < max_size) max_size = size;
341 	}
342 	if (max_size < 0)
343 		goto done;
344 
345 	if (qopt->limit > 0) {
346 		if ((child = tbf_create_dflt_qdisc(sch, qopt->limit)) == NULL)
347 			goto done;
348 	}
349 
350 	sch_tree_lock(sch);
351 	if (child) {
352 		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
353 		qdisc_destroy(xchg(&q->qdisc, child));
354 	}
355 	q->limit = qopt->limit;
356 	q->mtu = qopt->mtu;
357 	q->max_size = max_size;
358 	q->buffer = qopt->buffer;
359 	q->tokens = q->buffer;
360 	q->ptokens = q->mtu;
361 	rtab = xchg(&q->R_tab, rtab);
362 	ptab = xchg(&q->P_tab, ptab);
363 	sch_tree_unlock(sch);
364 	err = 0;
365 done:
366 	if (rtab)
367 		qdisc_put_rtab(rtab);
368 	if (ptab)
369 		qdisc_put_rtab(ptab);
370 	return err;
371 }
372 
373 static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
374 {
375 	struct tbf_sched_data *q = qdisc_priv(sch);
376 
377 	if (opt == NULL)
378 		return -EINVAL;
379 
380 	PSCHED_GET_TIME(q->t_c);
381 	init_timer(&q->wd_timer);
382 	q->wd_timer.function = tbf_watchdog;
383 	q->wd_timer.data = (unsigned long)sch;
384 
385 	q->qdisc = &noop_qdisc;
386 
387 	return tbf_change(sch, opt);
388 }
389 
390 static void tbf_destroy(struct Qdisc *sch)
391 {
392 	struct tbf_sched_data *q = qdisc_priv(sch);
393 
394 	del_timer(&q->wd_timer);
395 
396 	if (q->P_tab)
397 		qdisc_put_rtab(q->P_tab);
398 	if (q->R_tab)
399 		qdisc_put_rtab(q->R_tab);
400 
401 	qdisc_destroy(q->qdisc);
402 }
403 
404 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
405 {
406 	struct tbf_sched_data *q = qdisc_priv(sch);
407 	unsigned char	 *b = skb->tail;
408 	struct rtattr *rta;
409 	struct tc_tbf_qopt opt;
410 
411 	rta = (struct rtattr*)b;
412 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
413 
414 	opt.limit = q->limit;
415 	opt.rate = q->R_tab->rate;
416 	if (q->P_tab)
417 		opt.peakrate = q->P_tab->rate;
418 	else
419 		memset(&opt.peakrate, 0, sizeof(opt.peakrate));
420 	opt.mtu = q->mtu;
421 	opt.buffer = q->buffer;
422 	RTA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
423 	rta->rta_len = skb->tail - b;
424 
425 	return skb->len;
426 
427 rtattr_failure:
428 	skb_trim(skb, b - skb->data);
429 	return -1;
430 }
431 
432 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
433 			  struct sk_buff *skb, struct tcmsg *tcm)
434 {
435 	struct tbf_sched_data *q = qdisc_priv(sch);
436 
437 	if (cl != 1) 	/* only one class */
438 		return -ENOENT;
439 
440 	tcm->tcm_handle |= TC_H_MIN(1);
441 	tcm->tcm_info = q->qdisc->handle;
442 
443 	return 0;
444 }
445 
446 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
447 		     struct Qdisc **old)
448 {
449 	struct tbf_sched_data *q = qdisc_priv(sch);
450 
451 	if (new == NULL)
452 		new = &noop_qdisc;
453 
454 	sch_tree_lock(sch);
455 	*old = xchg(&q->qdisc, new);
456 	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
457 	qdisc_reset(*old);
458 	sch_tree_unlock(sch);
459 
460 	return 0;
461 }
462 
463 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
464 {
465 	struct tbf_sched_data *q = qdisc_priv(sch);
466 	return q->qdisc;
467 }
468 
469 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
470 {
471 	return 1;
472 }
473 
474 static void tbf_put(struct Qdisc *sch, unsigned long arg)
475 {
476 }
477 
478 static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
479 			    struct rtattr **tca, unsigned long *arg)
480 {
481 	return -ENOSYS;
482 }
483 
484 static int tbf_delete(struct Qdisc *sch, unsigned long arg)
485 {
486 	return -ENOSYS;
487 }
488 
489 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
490 {
491 	if (!walker->stop) {
492 		if (walker->count >= walker->skip)
493 			if (walker->fn(sch, 1, walker) < 0) {
494 				walker->stop = 1;
495 				return;
496 			}
497 		walker->count++;
498 	}
499 }
500 
501 static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
502 {
503 	return NULL;
504 }
505 
506 static struct Qdisc_class_ops tbf_class_ops =
507 {
508 	.graft		=	tbf_graft,
509 	.leaf		=	tbf_leaf,
510 	.get		=	tbf_get,
511 	.put		=	tbf_put,
512 	.change		=	tbf_change_class,
513 	.delete		=	tbf_delete,
514 	.walk		=	tbf_walk,
515 	.tcf_chain	=	tbf_find_tcf,
516 	.dump		=	tbf_dump_class,
517 };
518 
519 static struct Qdisc_ops tbf_qdisc_ops = {
520 	.next		=	NULL,
521 	.cl_ops		=	&tbf_class_ops,
522 	.id		=	"tbf",
523 	.priv_size	=	sizeof(struct tbf_sched_data),
524 	.enqueue	=	tbf_enqueue,
525 	.dequeue	=	tbf_dequeue,
526 	.requeue	=	tbf_requeue,
527 	.drop		=	tbf_drop,
528 	.init		=	tbf_init,
529 	.reset		=	tbf_reset,
530 	.destroy	=	tbf_destroy,
531 	.change		=	tbf_change,
532 	.dump		=	tbf_dump,
533 	.owner		=	THIS_MODULE,
534 };
535 
536 static int __init tbf_module_init(void)
537 {
538 	return register_qdisc(&tbf_qdisc_ops);
539 }
540 
541 static void __exit tbf_module_exit(void)
542 {
543 	unregister_qdisc(&tbf_qdisc_ops);
544 }
545 module_init(tbf_module_init)
546 module_exit(tbf_module_exit)
547 MODULE_LICENSE("GPL");
548