xref: /linux/net/sched/sch_tbf.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * net/sched/sch_tbf.c	Token Bucket Filter queue.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *		Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11  *						 original idea by Martin Devera
12  *
13  */
14 
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <net/netlink.h>
22 #include <net/sch_generic.h>
23 #include <net/pkt_sched.h>
24 
25 
26 /*	Simple Token Bucket Filter.
27 	=======================================
28 
29 	SOURCE.
30 	-------
31 
32 	None.
33 
34 	Description.
35 	------------
36 
37 	A data flow obeys TBF with rate R and depth B, if for any
38 	time interval t_i...t_f the number of transmitted bits
39 	does not exceed B + R*(t_f-t_i).
40 
41 	Packetized version of this definition:
42 	The sequence of packets of sizes s_i served at moments t_i
43 	obeys TBF, if for any i<=k:
44 
45 	s_i+....+s_k <= B + R*(t_k - t_i)
46 
47 	Algorithm.
48 	----------
49 
50 	Let N(t_i) be B/R initially and N(t) grow continuously with time as:
51 
52 	N(t+delta) = min{B/R, N(t) + delta}
53 
54 	If the first packet in queue has length S, it may be
55 	transmitted only at the time t_* when S/R <= N(t_*),
56 	and in this case N(t) jumps:
57 
58 	N(t_* + 0) = N(t_* - 0) - S/R.
59 
60 
61 
62 	Actually, QoS requires two TBF to be applied to a data stream.
63 	One of them controls steady state burst size, another
64 	one with rate P (peak rate) and depth M (equal to link MTU)
65 	limits bursts at a smaller time scale.
66 
67 	It is easy to see that P>R, and B>M. If P is infinity, this double
68 	TBF is equivalent to a single one.
69 
70 	When TBF works in reshaping mode, latency is estimated as:
71 
72 	lat = max ((L-B)/R, (L-M)/P)
73 
74 
75 	NOTES.
76 	------
77 
78 	If TBF throttles, it starts a watchdog timer, which will wake it up
79 	when it is ready to transmit.
80 	Note that the minimal timer resolution is 1/HZ.
81 	If no new packets arrive during this period,
82 	or if the device is not awaken by EOI for some previous packet,
83 	TBF can stop its activity for 1/HZ.
84 
85 
86 	This means, that with depth B, the maximal rate is
87 
88 	R_crit = B*HZ
89 
90 	F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
91 
92 	Note that the peak rate TBF is much more tough: with MTU 1500
93 	P_crit = 150Kbytes/sec. So, if you need greater peak
94 	rates, use alpha with HZ=1000 :-)
95 
96 	With classful TBF, limit is just kept for backwards compatibility.
97 	It is passed to the default bfifo qdisc - if the inner qdisc is
98 	changed the limit is not effective anymore.
99 */
100 
101 struct tbf_sched_data {
102 /* Parameters */
103 	u32		limit;		/* Maximal length of backlog: bytes */
104 	u32		max_size;
105 	s64		buffer;		/* Token bucket depth/rate: MUST BE >= MTU/B */
106 	s64		mtu;
107 	struct psched_ratecfg rate;
108 	struct psched_ratecfg peak;
109 
110 /* Variables */
111 	s64	tokens;			/* Current number of B tokens */
112 	s64	ptokens;		/* Current number of P tokens */
113 	s64	t_c;			/* Time check-point */
114 	struct Qdisc	*qdisc;		/* Inner qdisc, default - bfifo queue */
115 	struct qdisc_watchdog watchdog;	/* Watchdog timer */
116 };
117 
118 
119 /* Time to Length, convert time in ns to length in bytes
120  * to determinate how many bytes can be sent in given time.
121  */
122 static u64 psched_ns_t2l(const struct psched_ratecfg *r,
123 			 u64 time_in_ns)
124 {
125 	/* The formula is :
126 	 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
127 	 */
128 	u64 len = time_in_ns * r->rate_bytes_ps;
129 
130 	do_div(len, NSEC_PER_SEC);
131 
132 	if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
133 		do_div(len, 53);
134 		len = len * 48;
135 	}
136 
137 	if (len > r->overhead)
138 		len -= r->overhead;
139 	else
140 		len = 0;
141 
142 	return len;
143 }
144 
145 /*
146  * Return length of individual segments of a gso packet,
147  * including all headers (MAC, IP, TCP/UDP)
148  */
149 static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
150 {
151 	unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
152 	return hdr_len + skb_gso_transport_seglen(skb);
153 }
154 
155 /* GSO packet is too big, segment it so that tbf can transmit
156  * each segment in time
157  */
158 static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
159 {
160 	struct tbf_sched_data *q = qdisc_priv(sch);
161 	struct sk_buff *segs, *nskb;
162 	netdev_features_t features = netif_skb_features(skb);
163 	int ret, nb;
164 
165 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
166 
167 	if (IS_ERR_OR_NULL(segs))
168 		return qdisc_reshape_fail(skb, sch);
169 
170 	nb = 0;
171 	while (segs) {
172 		nskb = segs->next;
173 		segs->next = NULL;
174 		qdisc_skb_cb(segs)->pkt_len = segs->len;
175 		ret = qdisc_enqueue(segs, q->qdisc);
176 		if (ret != NET_XMIT_SUCCESS) {
177 			if (net_xmit_drop_count(ret))
178 				sch->qstats.drops++;
179 		} else {
180 			nb++;
181 		}
182 		segs = nskb;
183 	}
184 	sch->q.qlen += nb;
185 	if (nb > 1)
186 		qdisc_tree_decrease_qlen(sch, 1 - nb);
187 	consume_skb(skb);
188 	return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
189 }
190 
191 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
192 {
193 	struct tbf_sched_data *q = qdisc_priv(sch);
194 	int ret;
195 
196 	if (qdisc_pkt_len(skb) > q->max_size) {
197 		if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
198 			return tbf_segment(skb, sch);
199 		return qdisc_reshape_fail(skb, sch);
200 	}
201 	ret = qdisc_enqueue(skb, q->qdisc);
202 	if (ret != NET_XMIT_SUCCESS) {
203 		if (net_xmit_drop_count(ret))
204 			sch->qstats.drops++;
205 		return ret;
206 	}
207 
208 	sch->q.qlen++;
209 	return NET_XMIT_SUCCESS;
210 }
211 
212 static unsigned int tbf_drop(struct Qdisc *sch)
213 {
214 	struct tbf_sched_data *q = qdisc_priv(sch);
215 	unsigned int len = 0;
216 
217 	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
218 		sch->q.qlen--;
219 		sch->qstats.drops++;
220 	}
221 	return len;
222 }
223 
224 static bool tbf_peak_present(const struct tbf_sched_data *q)
225 {
226 	return q->peak.rate_bytes_ps;
227 }
228 
229 static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
230 {
231 	struct tbf_sched_data *q = qdisc_priv(sch);
232 	struct sk_buff *skb;
233 
234 	skb = q->qdisc->ops->peek(q->qdisc);
235 
236 	if (skb) {
237 		s64 now;
238 		s64 toks;
239 		s64 ptoks = 0;
240 		unsigned int len = qdisc_pkt_len(skb);
241 
242 		now = ktime_to_ns(ktime_get());
243 		toks = min_t(s64, now - q->t_c, q->buffer);
244 
245 		if (tbf_peak_present(q)) {
246 			ptoks = toks + q->ptokens;
247 			if (ptoks > q->mtu)
248 				ptoks = q->mtu;
249 			ptoks -= (s64) psched_l2t_ns(&q->peak, len);
250 		}
251 		toks += q->tokens;
252 		if (toks > q->buffer)
253 			toks = q->buffer;
254 		toks -= (s64) psched_l2t_ns(&q->rate, len);
255 
256 		if ((toks|ptoks) >= 0) {
257 			skb = qdisc_dequeue_peeked(q->qdisc);
258 			if (unlikely(!skb))
259 				return NULL;
260 
261 			q->t_c = now;
262 			q->tokens = toks;
263 			q->ptokens = ptoks;
264 			sch->q.qlen--;
265 			qdisc_unthrottled(sch);
266 			qdisc_bstats_update(sch, skb);
267 			return skb;
268 		}
269 
270 		qdisc_watchdog_schedule_ns(&q->watchdog,
271 					   now + max_t(long, -toks, -ptoks));
272 
273 		/* Maybe we have a shorter packet in the queue,
274 		   which can be sent now. It sounds cool,
275 		   but, however, this is wrong in principle.
276 		   We MUST NOT reorder packets under these circumstances.
277 
278 		   Really, if we split the flow into independent
279 		   subflows, it would be a very good solution.
280 		   This is the main idea of all FQ algorithms
281 		   (cf. CSZ, HPFQ, HFSC)
282 		 */
283 
284 		sch->qstats.overlimits++;
285 	}
286 	return NULL;
287 }
288 
289 static void tbf_reset(struct Qdisc *sch)
290 {
291 	struct tbf_sched_data *q = qdisc_priv(sch);
292 
293 	qdisc_reset(q->qdisc);
294 	sch->q.qlen = 0;
295 	q->t_c = ktime_to_ns(ktime_get());
296 	q->tokens = q->buffer;
297 	q->ptokens = q->mtu;
298 	qdisc_watchdog_cancel(&q->watchdog);
299 }
300 
301 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
302 	[TCA_TBF_PARMS]	= { .len = sizeof(struct tc_tbf_qopt) },
303 	[TCA_TBF_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
304 	[TCA_TBF_PTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
305 	[TCA_TBF_RATE64]	= { .type = NLA_U64 },
306 	[TCA_TBF_PRATE64]	= { .type = NLA_U64 },
307 	[TCA_TBF_BURST] = { .type = NLA_U32 },
308 	[TCA_TBF_PBURST] = { .type = NLA_U32 },
309 };
310 
311 static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
312 {
313 	int err;
314 	struct tbf_sched_data *q = qdisc_priv(sch);
315 	struct nlattr *tb[TCA_TBF_MAX + 1];
316 	struct tc_tbf_qopt *qopt;
317 	struct Qdisc *child = NULL;
318 	struct psched_ratecfg rate;
319 	struct psched_ratecfg peak;
320 	u64 max_size;
321 	s64 buffer, mtu;
322 	u64 rate64 = 0, prate64 = 0;
323 
324 	err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
325 	if (err < 0)
326 		return err;
327 
328 	err = -EINVAL;
329 	if (tb[TCA_TBF_PARMS] == NULL)
330 		goto done;
331 
332 	qopt = nla_data(tb[TCA_TBF_PARMS]);
333 	if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
334 		qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
335 					      tb[TCA_TBF_RTAB]));
336 
337 	if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
338 			qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
339 						      tb[TCA_TBF_PTAB]));
340 
341 	buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
342 	mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
343 
344 	if (tb[TCA_TBF_RATE64])
345 		rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
346 	psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
347 
348 	if (tb[TCA_TBF_BURST]) {
349 		max_size = nla_get_u32(tb[TCA_TBF_BURST]);
350 		buffer = psched_l2t_ns(&rate, max_size);
351 	} else {
352 		max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
353 	}
354 
355 	if (qopt->peakrate.rate) {
356 		if (tb[TCA_TBF_PRATE64])
357 			prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
358 		psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
359 		if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
360 			pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
361 					peak.rate_bytes_ps, rate.rate_bytes_ps);
362 			err = -EINVAL;
363 			goto done;
364 		}
365 
366 		if (tb[TCA_TBF_PBURST]) {
367 			u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
368 			max_size = min_t(u32, max_size, pburst);
369 			mtu = psched_l2t_ns(&peak, pburst);
370 		} else {
371 			max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
372 		}
373 	} else {
374 		memset(&peak, 0, sizeof(peak));
375 	}
376 
377 	if (max_size < psched_mtu(qdisc_dev(sch)))
378 		pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
379 				    max_size, qdisc_dev(sch)->name,
380 				    psched_mtu(qdisc_dev(sch)));
381 
382 	if (!max_size) {
383 		err = -EINVAL;
384 		goto done;
385 	}
386 
387 	if (q->qdisc != &noop_qdisc) {
388 		err = fifo_set_limit(q->qdisc, qopt->limit);
389 		if (err)
390 			goto done;
391 	} else if (qopt->limit > 0) {
392 		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
393 		if (IS_ERR(child)) {
394 			err = PTR_ERR(child);
395 			goto done;
396 		}
397 	}
398 
399 	sch_tree_lock(sch);
400 	if (child) {
401 		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
402 		qdisc_destroy(q->qdisc);
403 		q->qdisc = child;
404 	}
405 	q->limit = qopt->limit;
406 	if (tb[TCA_TBF_PBURST])
407 		q->mtu = mtu;
408 	else
409 		q->mtu = PSCHED_TICKS2NS(qopt->mtu);
410 	q->max_size = max_size;
411 	if (tb[TCA_TBF_BURST])
412 		q->buffer = buffer;
413 	else
414 		q->buffer = PSCHED_TICKS2NS(qopt->buffer);
415 	q->tokens = q->buffer;
416 	q->ptokens = q->mtu;
417 
418 	memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
419 	memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
420 
421 	sch_tree_unlock(sch);
422 	err = 0;
423 done:
424 	return err;
425 }
426 
427 static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
428 {
429 	struct tbf_sched_data *q = qdisc_priv(sch);
430 
431 	if (opt == NULL)
432 		return -EINVAL;
433 
434 	q->t_c = ktime_to_ns(ktime_get());
435 	qdisc_watchdog_init(&q->watchdog, sch);
436 	q->qdisc = &noop_qdisc;
437 
438 	return tbf_change(sch, opt);
439 }
440 
441 static void tbf_destroy(struct Qdisc *sch)
442 {
443 	struct tbf_sched_data *q = qdisc_priv(sch);
444 
445 	qdisc_watchdog_cancel(&q->watchdog);
446 	qdisc_destroy(q->qdisc);
447 }
448 
449 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
450 {
451 	struct tbf_sched_data *q = qdisc_priv(sch);
452 	struct nlattr *nest;
453 	struct tc_tbf_qopt opt;
454 
455 	sch->qstats.backlog = q->qdisc->qstats.backlog;
456 	nest = nla_nest_start(skb, TCA_OPTIONS);
457 	if (nest == NULL)
458 		goto nla_put_failure;
459 
460 	opt.limit = q->limit;
461 	psched_ratecfg_getrate(&opt.rate, &q->rate);
462 	if (tbf_peak_present(q))
463 		psched_ratecfg_getrate(&opt.peakrate, &q->peak);
464 	else
465 		memset(&opt.peakrate, 0, sizeof(opt.peakrate));
466 	opt.mtu = PSCHED_NS2TICKS(q->mtu);
467 	opt.buffer = PSCHED_NS2TICKS(q->buffer);
468 	if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
469 		goto nla_put_failure;
470 	if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
471 	    nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
472 		goto nla_put_failure;
473 	if (tbf_peak_present(q) &&
474 	    q->peak.rate_bytes_ps >= (1ULL << 32) &&
475 	    nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
476 		goto nla_put_failure;
477 
478 	return nla_nest_end(skb, nest);
479 
480 nla_put_failure:
481 	nla_nest_cancel(skb, nest);
482 	return -1;
483 }
484 
485 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
486 			  struct sk_buff *skb, struct tcmsg *tcm)
487 {
488 	struct tbf_sched_data *q = qdisc_priv(sch);
489 
490 	tcm->tcm_handle |= TC_H_MIN(1);
491 	tcm->tcm_info = q->qdisc->handle;
492 
493 	return 0;
494 }
495 
496 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
497 		     struct Qdisc **old)
498 {
499 	struct tbf_sched_data *q = qdisc_priv(sch);
500 
501 	if (new == NULL)
502 		new = &noop_qdisc;
503 
504 	sch_tree_lock(sch);
505 	*old = q->qdisc;
506 	q->qdisc = new;
507 	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
508 	qdisc_reset(*old);
509 	sch_tree_unlock(sch);
510 
511 	return 0;
512 }
513 
514 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
515 {
516 	struct tbf_sched_data *q = qdisc_priv(sch);
517 	return q->qdisc;
518 }
519 
520 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
521 {
522 	return 1;
523 }
524 
525 static void tbf_put(struct Qdisc *sch, unsigned long arg)
526 {
527 }
528 
529 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
530 {
531 	if (!walker->stop) {
532 		if (walker->count >= walker->skip)
533 			if (walker->fn(sch, 1, walker) < 0) {
534 				walker->stop = 1;
535 				return;
536 			}
537 		walker->count++;
538 	}
539 }
540 
541 static const struct Qdisc_class_ops tbf_class_ops = {
542 	.graft		=	tbf_graft,
543 	.leaf		=	tbf_leaf,
544 	.get		=	tbf_get,
545 	.put		=	tbf_put,
546 	.walk		=	tbf_walk,
547 	.dump		=	tbf_dump_class,
548 };
549 
550 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
551 	.next		=	NULL,
552 	.cl_ops		=	&tbf_class_ops,
553 	.id		=	"tbf",
554 	.priv_size	=	sizeof(struct tbf_sched_data),
555 	.enqueue	=	tbf_enqueue,
556 	.dequeue	=	tbf_dequeue,
557 	.peek		=	qdisc_peek_dequeued,
558 	.drop		=	tbf_drop,
559 	.init		=	tbf_init,
560 	.reset		=	tbf_reset,
561 	.destroy	=	tbf_destroy,
562 	.change		=	tbf_change,
563 	.dump		=	tbf_dump,
564 	.owner		=	THIS_MODULE,
565 };
566 
567 static int __init tbf_module_init(void)
568 {
569 	return register_qdisc(&tbf_qdisc_ops);
570 }
571 
572 static void __exit tbf_module_exit(void)
573 {
574 	unregister_qdisc(&tbf_qdisc_ops);
575 }
576 module_init(tbf_module_init)
577 module_exit(tbf_module_exit)
578 MODULE_LICENSE("GPL");
579