xref: /linux/net/sched/sch_tbf.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  * net/sched/sch_tbf.c	Token Bucket Filter queue.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *		Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11  *						 original idea by Martin Devera
12  *
13  */
14 
15 #include <linux/module.h>
16 #include <asm/uaccess.h>
17 #include <asm/system.h>
18 #include <linux/bitops.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/jiffies.h>
22 #include <linux/string.h>
23 #include <linux/mm.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
26 #include <linux/in.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
29 #include <linux/if_ether.h>
30 #include <linux/inet.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/notifier.h>
34 #include <net/ip.h>
35 #include <net/route.h>
36 #include <linux/skbuff.h>
37 #include <net/sock.h>
38 #include <net/pkt_sched.h>
39 
40 
41 /*	Simple Token Bucket Filter.
42 	=======================================
43 
44 	SOURCE.
45 	-------
46 
47 	None.
48 
49 	Description.
50 	------------
51 
52 	A data flow obeys TBF with rate R and depth B, if for any
53 	time interval t_i...t_f the number of transmitted bits
54 	does not exceed B + R*(t_f-t_i).
55 
56 	Packetized version of this definition:
57 	The sequence of packets of sizes s_i served at moments t_i
58 	obeys TBF, if for any i<=k:
59 
60 	s_i+....+s_k <= B + R*(t_k - t_i)
61 
62 	Algorithm.
63 	----------
64 
65 	Let N(t_i) be B/R initially and N(t) grow continuously with time as:
66 
67 	N(t+delta) = min{B/R, N(t) + delta}
68 
69 	If the first packet in queue has length S, it may be
70 	transmitted only at the time t_* when S/R <= N(t_*),
71 	and in this case N(t) jumps:
72 
73 	N(t_* + 0) = N(t_* - 0) - S/R.
74 
75 
76 
77 	Actually, QoS requires two TBF to be applied to a data stream.
78 	One of them controls steady state burst size, another
79 	one with rate P (peak rate) and depth M (equal to link MTU)
80 	limits bursts at a smaller time scale.
81 
82 	It is easy to see that P>R, and B>M. If P is infinity, this double
83 	TBF is equivalent to a single one.
84 
85 	When TBF works in reshaping mode, latency is estimated as:
86 
87 	lat = max ((L-B)/R, (L-M)/P)
88 
89 
90 	NOTES.
91 	------
92 
93 	If TBF throttles, it starts a watchdog timer, which will wake it up
94 	when it is ready to transmit.
95 	Note that the minimal timer resolution is 1/HZ.
96 	If no new packets arrive during this period,
97 	or if the device is not awaken by EOI for some previous packet,
98 	TBF can stop its activity for 1/HZ.
99 
100 
101 	This means, that with depth B, the maximal rate is
102 
103 	R_crit = B*HZ
104 
105 	F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
106 
107 	Note that the peak rate TBF is much more tough: with MTU 1500
108 	P_crit = 150Kbytes/sec. So, if you need greater peak
109 	rates, use alpha with HZ=1000 :-)
110 
111 	With classful TBF, limit is just kept for backwards compatibility.
112 	It is passed to the default bfifo qdisc - if the inner qdisc is
113 	changed the limit is not effective anymore.
114 */
115 
116 struct tbf_sched_data
117 {
118 /* Parameters */
119 	u32		limit;		/* Maximal length of backlog: bytes */
120 	u32		buffer;		/* Token bucket depth/rate: MUST BE >= MTU/B */
121 	u32		mtu;
122 	u32		max_size;
123 	struct qdisc_rate_table	*R_tab;
124 	struct qdisc_rate_table	*P_tab;
125 
126 /* Variables */
127 	long	tokens;			/* Current number of B tokens */
128 	long	ptokens;		/* Current number of P tokens */
129 	psched_time_t	t_c;		/* Time check-point */
130 	struct timer_list wd_timer;	/* Watchdog timer */
131 	struct Qdisc	*qdisc;		/* Inner qdisc, default - bfifo queue */
132 };
133 
134 #define L2T(q,L)   ((q)->R_tab->data[(L)>>(q)->R_tab->rate.cell_log])
135 #define L2T_P(q,L) ((q)->P_tab->data[(L)>>(q)->P_tab->rate.cell_log])
136 
137 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
138 {
139 	struct tbf_sched_data *q = qdisc_priv(sch);
140 	int ret;
141 
142 	if (skb->len > q->max_size) {
143 		sch->qstats.drops++;
144 #ifdef CONFIG_NET_CLS_POLICE
145 		if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
146 #endif
147 			kfree_skb(skb);
148 
149 		return NET_XMIT_DROP;
150 	}
151 
152 	if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) {
153 		sch->qstats.drops++;
154 		return ret;
155 	}
156 
157 	sch->q.qlen++;
158 	sch->bstats.bytes += skb->len;
159 	sch->bstats.packets++;
160 	return 0;
161 }
162 
163 static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
164 {
165 	struct tbf_sched_data *q = qdisc_priv(sch);
166 	int ret;
167 
168 	if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
169 		sch->q.qlen++;
170 		sch->qstats.requeues++;
171 	}
172 
173 	return ret;
174 }
175 
176 static unsigned int tbf_drop(struct Qdisc* sch)
177 {
178 	struct tbf_sched_data *q = qdisc_priv(sch);
179 	unsigned int len = 0;
180 
181 	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
182 		sch->q.qlen--;
183 		sch->qstats.drops++;
184 	}
185 	return len;
186 }
187 
188 static void tbf_watchdog(unsigned long arg)
189 {
190 	struct Qdisc *sch = (struct Qdisc*)arg;
191 
192 	sch->flags &= ~TCQ_F_THROTTLED;
193 	netif_schedule(sch->dev);
194 }
195 
196 static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
197 {
198 	struct tbf_sched_data *q = qdisc_priv(sch);
199 	struct sk_buff *skb;
200 
201 	skb = q->qdisc->dequeue(q->qdisc);
202 
203 	if (skb) {
204 		psched_time_t now;
205 		long toks, delay;
206 		long ptoks = 0;
207 		unsigned int len = skb->len;
208 
209 		PSCHED_GET_TIME(now);
210 
211 		toks = PSCHED_TDIFF_SAFE(now, q->t_c, q->buffer);
212 
213 		if (q->P_tab) {
214 			ptoks = toks + q->ptokens;
215 			if (ptoks > (long)q->mtu)
216 				ptoks = q->mtu;
217 			ptoks -= L2T_P(q, len);
218 		}
219 		toks += q->tokens;
220 		if (toks > (long)q->buffer)
221 			toks = q->buffer;
222 		toks -= L2T(q, len);
223 
224 		if ((toks|ptoks) >= 0) {
225 			q->t_c = now;
226 			q->tokens = toks;
227 			q->ptokens = ptoks;
228 			sch->q.qlen--;
229 			sch->flags &= ~TCQ_F_THROTTLED;
230 			return skb;
231 		}
232 
233 		delay = PSCHED_US2JIFFIE(max_t(long, -toks, -ptoks));
234 
235 		if (delay == 0)
236 			delay = 1;
237 
238 		mod_timer(&q->wd_timer, jiffies+delay);
239 
240 		/* Maybe we have a shorter packet in the queue,
241 		   which can be sent now. It sounds cool,
242 		   but, however, this is wrong in principle.
243 		   We MUST NOT reorder packets under these circumstances.
244 
245 		   Really, if we split the flow into independent
246 		   subflows, it would be a very good solution.
247 		   This is the main idea of all FQ algorithms
248 		   (cf. CSZ, HPFQ, HFSC)
249 		 */
250 
251 		if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
252 			/* When requeue fails skb is dropped */
253 			sch->q.qlen--;
254 			sch->qstats.drops++;
255 		}
256 
257 		sch->flags |= TCQ_F_THROTTLED;
258 		sch->qstats.overlimits++;
259 	}
260 	return NULL;
261 }
262 
263 static void tbf_reset(struct Qdisc* sch)
264 {
265 	struct tbf_sched_data *q = qdisc_priv(sch);
266 
267 	qdisc_reset(q->qdisc);
268 	sch->q.qlen = 0;
269 	PSCHED_GET_TIME(q->t_c);
270 	q->tokens = q->buffer;
271 	q->ptokens = q->mtu;
272 	sch->flags &= ~TCQ_F_THROTTLED;
273 	del_timer(&q->wd_timer);
274 }
275 
276 static struct Qdisc *tbf_create_dflt_qdisc(struct net_device *dev, u32 limit)
277 {
278 	struct Qdisc *q = qdisc_create_dflt(dev, &bfifo_qdisc_ops);
279         struct rtattr *rta;
280 	int ret;
281 
282 	if (q) {
283 		rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
284 		if (rta) {
285 			rta->rta_type = RTM_NEWQDISC;
286 			rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
287 			((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
288 
289 			ret = q->ops->change(q, rta);
290 			kfree(rta);
291 
292 			if (ret == 0)
293 				return q;
294 		}
295 		qdisc_destroy(q);
296 	}
297 
298 	return NULL;
299 }
300 
301 static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
302 {
303 	int err = -EINVAL;
304 	struct tbf_sched_data *q = qdisc_priv(sch);
305 	struct rtattr *tb[TCA_TBF_PTAB];
306 	struct tc_tbf_qopt *qopt;
307 	struct qdisc_rate_table *rtab = NULL;
308 	struct qdisc_rate_table *ptab = NULL;
309 	struct Qdisc *child = NULL;
310 	int max_size,n;
311 
312 	if (rtattr_parse_nested(tb, TCA_TBF_PTAB, opt) ||
313 	    tb[TCA_TBF_PARMS-1] == NULL ||
314 	    RTA_PAYLOAD(tb[TCA_TBF_PARMS-1]) < sizeof(*qopt))
315 		goto done;
316 
317 	qopt = RTA_DATA(tb[TCA_TBF_PARMS-1]);
318 	rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB-1]);
319 	if (rtab == NULL)
320 		goto done;
321 
322 	if (qopt->peakrate.rate) {
323 		if (qopt->peakrate.rate > qopt->rate.rate)
324 			ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB-1]);
325 		if (ptab == NULL)
326 			goto done;
327 	}
328 
329 	for (n = 0; n < 256; n++)
330 		if (rtab->data[n] > qopt->buffer) break;
331 	max_size = (n << qopt->rate.cell_log)-1;
332 	if (ptab) {
333 		int size;
334 
335 		for (n = 0; n < 256; n++)
336 			if (ptab->data[n] > qopt->mtu) break;
337 		size = (n << qopt->peakrate.cell_log)-1;
338 		if (size < max_size) max_size = size;
339 	}
340 	if (max_size < 0)
341 		goto done;
342 
343 	if (qopt->limit > 0) {
344 		if ((child = tbf_create_dflt_qdisc(sch->dev, qopt->limit)) == NULL)
345 			goto done;
346 	}
347 
348 	sch_tree_lock(sch);
349 	if (child)
350 		qdisc_destroy(xchg(&q->qdisc, child));
351 	q->limit = qopt->limit;
352 	q->mtu = qopt->mtu;
353 	q->max_size = max_size;
354 	q->buffer = qopt->buffer;
355 	q->tokens = q->buffer;
356 	q->ptokens = q->mtu;
357 	rtab = xchg(&q->R_tab, rtab);
358 	ptab = xchg(&q->P_tab, ptab);
359 	sch_tree_unlock(sch);
360 	err = 0;
361 done:
362 	if (rtab)
363 		qdisc_put_rtab(rtab);
364 	if (ptab)
365 		qdisc_put_rtab(ptab);
366 	return err;
367 }
368 
369 static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
370 {
371 	struct tbf_sched_data *q = qdisc_priv(sch);
372 
373 	if (opt == NULL)
374 		return -EINVAL;
375 
376 	PSCHED_GET_TIME(q->t_c);
377 	init_timer(&q->wd_timer);
378 	q->wd_timer.function = tbf_watchdog;
379 	q->wd_timer.data = (unsigned long)sch;
380 
381 	q->qdisc = &noop_qdisc;
382 
383 	return tbf_change(sch, opt);
384 }
385 
386 static void tbf_destroy(struct Qdisc *sch)
387 {
388 	struct tbf_sched_data *q = qdisc_priv(sch);
389 
390 	del_timer(&q->wd_timer);
391 
392 	if (q->P_tab)
393 		qdisc_put_rtab(q->P_tab);
394 	if (q->R_tab)
395 		qdisc_put_rtab(q->R_tab);
396 
397 	qdisc_destroy(q->qdisc);
398 }
399 
400 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
401 {
402 	struct tbf_sched_data *q = qdisc_priv(sch);
403 	unsigned char	 *b = skb->tail;
404 	struct rtattr *rta;
405 	struct tc_tbf_qopt opt;
406 
407 	rta = (struct rtattr*)b;
408 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
409 
410 	opt.limit = q->limit;
411 	opt.rate = q->R_tab->rate;
412 	if (q->P_tab)
413 		opt.peakrate = q->P_tab->rate;
414 	else
415 		memset(&opt.peakrate, 0, sizeof(opt.peakrate));
416 	opt.mtu = q->mtu;
417 	opt.buffer = q->buffer;
418 	RTA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
419 	rta->rta_len = skb->tail - b;
420 
421 	return skb->len;
422 
423 rtattr_failure:
424 	skb_trim(skb, b - skb->data);
425 	return -1;
426 }
427 
428 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
429 			  struct sk_buff *skb, struct tcmsg *tcm)
430 {
431 	struct tbf_sched_data *q = qdisc_priv(sch);
432 
433 	if (cl != 1) 	/* only one class */
434 		return -ENOENT;
435 
436 	tcm->tcm_handle |= TC_H_MIN(1);
437 	tcm->tcm_info = q->qdisc->handle;
438 
439 	return 0;
440 }
441 
442 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
443 		     struct Qdisc **old)
444 {
445 	struct tbf_sched_data *q = qdisc_priv(sch);
446 
447 	if (new == NULL)
448 		new = &noop_qdisc;
449 
450 	sch_tree_lock(sch);
451 	*old = xchg(&q->qdisc, new);
452 	qdisc_reset(*old);
453 	sch->q.qlen = 0;
454 	sch_tree_unlock(sch);
455 
456 	return 0;
457 }
458 
459 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
460 {
461 	struct tbf_sched_data *q = qdisc_priv(sch);
462 	return q->qdisc;
463 }
464 
465 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
466 {
467 	return 1;
468 }
469 
470 static void tbf_put(struct Qdisc *sch, unsigned long arg)
471 {
472 }
473 
474 static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
475 			    struct rtattr **tca, unsigned long *arg)
476 {
477 	return -ENOSYS;
478 }
479 
480 static int tbf_delete(struct Qdisc *sch, unsigned long arg)
481 {
482 	return -ENOSYS;
483 }
484 
485 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
486 {
487 	if (!walker->stop) {
488 		if (walker->count >= walker->skip)
489 			if (walker->fn(sch, 1, walker) < 0) {
490 				walker->stop = 1;
491 				return;
492 			}
493 		walker->count++;
494 	}
495 }
496 
497 static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
498 {
499 	return NULL;
500 }
501 
502 static struct Qdisc_class_ops tbf_class_ops =
503 {
504 	.graft		=	tbf_graft,
505 	.leaf		=	tbf_leaf,
506 	.get		=	tbf_get,
507 	.put		=	tbf_put,
508 	.change		=	tbf_change_class,
509 	.delete		=	tbf_delete,
510 	.walk		=	tbf_walk,
511 	.tcf_chain	=	tbf_find_tcf,
512 	.dump		=	tbf_dump_class,
513 };
514 
515 static struct Qdisc_ops tbf_qdisc_ops = {
516 	.next		=	NULL,
517 	.cl_ops		=	&tbf_class_ops,
518 	.id		=	"tbf",
519 	.priv_size	=	sizeof(struct tbf_sched_data),
520 	.enqueue	=	tbf_enqueue,
521 	.dequeue	=	tbf_dequeue,
522 	.requeue	=	tbf_requeue,
523 	.drop		=	tbf_drop,
524 	.init		=	tbf_init,
525 	.reset		=	tbf_reset,
526 	.destroy	=	tbf_destroy,
527 	.change		=	tbf_change,
528 	.dump		=	tbf_dump,
529 	.owner		=	THIS_MODULE,
530 };
531 
532 static int __init tbf_module_init(void)
533 {
534 	return register_qdisc(&tbf_qdisc_ops);
535 }
536 
537 static void __exit tbf_module_exit(void)
538 {
539 	unregister_qdisc(&tbf_qdisc_ops);
540 }
541 module_init(tbf_module_init)
542 module_exit(tbf_module_exit)
543 MODULE_LICENSE("GPL");
544