xref: /linux/net/sched/sch_pie.c (revision ec63e2a4897075e427c121d863bd89c44578094f)
1 /* Copyright (C) 2013 Cisco Systems, Inc, 2013.
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of the GNU General Public License
5  * as published by the Free Software Foundation; either version 2
6  * of the License.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * Author: Vijay Subramanian <vijaynsu@cisco.com>
14  * Author: Mythili Prabhu <mysuryan@cisco.com>
15  *
16  * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
17  * University of Oslo, Norway.
18  *
19  * References:
20  * RFC 8033: https://tools.ietf.org/html/rfc8033
21  */
22 
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/skbuff.h>
29 #include <net/pkt_sched.h>
30 #include <net/inet_ecn.h>
31 
32 #define QUEUE_THRESHOLD 16384
33 #define DQCOUNT_INVALID -1
34 #define MAX_PROB 0xffffffffffffffff
35 #define PIE_SCALE 8
36 
37 /* parameters used */
38 struct pie_params {
39 	psched_time_t target;	/* user specified target delay in pschedtime */
40 	u32 tupdate;		/* timer frequency (in jiffies) */
41 	u32 limit;		/* number of packets that can be enqueued */
42 	u32 alpha;		/* alpha and beta are between 0 and 32 */
43 	u32 beta;		/* and are used for shift relative to 1 */
44 	bool ecn;		/* true if ecn is enabled */
45 	bool bytemode;		/* to scale drop early prob based on pkt size */
46 };
47 
48 /* variables used */
49 struct pie_vars {
50 	u64 prob;		/* probability but scaled by u64 limit. */
51 	psched_time_t burst_time;
52 	psched_time_t qdelay;
53 	psched_time_t qdelay_old;
54 	u64 dq_count;		/* measured in bytes */
55 	psched_time_t dq_tstamp;	/* drain rate */
56 	u64 accu_prob;		/* accumulated drop probability */
57 	u32 avg_dq_rate;	/* bytes per pschedtime tick,scaled */
58 	u32 qlen_old;		/* in bytes */
59 	u8 accu_prob_overflows;	/* overflows of accu_prob */
60 };
61 
62 /* statistics gathering */
63 struct pie_stats {
64 	u32 packets_in;		/* total number of packets enqueued */
65 	u32 dropped;		/* packets dropped due to pie_action */
66 	u32 overlimit;		/* dropped due to lack of space in queue */
67 	u32 maxq;		/* maximum queue size */
68 	u32 ecn_mark;		/* packets marked with ECN */
69 };
70 
71 /* private data for the Qdisc */
72 struct pie_sched_data {
73 	struct pie_params params;
74 	struct pie_vars vars;
75 	struct pie_stats stats;
76 	struct timer_list adapt_timer;
77 	struct Qdisc *sch;
78 };
79 
80 static void pie_params_init(struct pie_params *params)
81 {
82 	params->alpha = 2;
83 	params->beta = 20;
84 	params->tupdate = usecs_to_jiffies(15 * USEC_PER_MSEC);	/* 15 ms */
85 	params->limit = 1000;	/* default of 1000 packets */
86 	params->target = PSCHED_NS2TICKS(15 * NSEC_PER_MSEC);	/* 15 ms */
87 	params->ecn = false;
88 	params->bytemode = false;
89 }
90 
91 static void pie_vars_init(struct pie_vars *vars)
92 {
93 	vars->dq_count = DQCOUNT_INVALID;
94 	vars->accu_prob = 0;
95 	vars->avg_dq_rate = 0;
96 	/* default of 150 ms in pschedtime */
97 	vars->burst_time = PSCHED_NS2TICKS(150 * NSEC_PER_MSEC);
98 	vars->accu_prob_overflows = 0;
99 }
100 
101 static bool drop_early(struct Qdisc *sch, u32 packet_size)
102 {
103 	struct pie_sched_data *q = qdisc_priv(sch);
104 	u64 rnd;
105 	u64 local_prob = q->vars.prob;
106 	u32 mtu = psched_mtu(qdisc_dev(sch));
107 
108 	/* If there is still burst allowance left skip random early drop */
109 	if (q->vars.burst_time > 0)
110 		return false;
111 
112 	/* If current delay is less than half of target, and
113 	 * if drop prob is low already, disable early_drop
114 	 */
115 	if ((q->vars.qdelay < q->params.target / 2) &&
116 	    (q->vars.prob < MAX_PROB / 5))
117 		return false;
118 
119 	/* If we have fewer than 2 mtu-sized packets, disable drop_early,
120 	 * similar to min_th in RED
121 	 */
122 	if (sch->qstats.backlog < 2 * mtu)
123 		return false;
124 
125 	/* If bytemode is turned on, use packet size to compute new
126 	 * probablity. Smaller packets will have lower drop prob in this case
127 	 */
128 	if (q->params.bytemode && packet_size <= mtu)
129 		local_prob = (u64)packet_size * div_u64(local_prob, mtu);
130 	else
131 		local_prob = q->vars.prob;
132 
133 	if (local_prob == 0) {
134 		q->vars.accu_prob = 0;
135 		q->vars.accu_prob_overflows = 0;
136 	}
137 
138 	if (local_prob > MAX_PROB - q->vars.accu_prob)
139 		q->vars.accu_prob_overflows++;
140 
141 	q->vars.accu_prob += local_prob;
142 
143 	if (q->vars.accu_prob_overflows == 0 &&
144 	    q->vars.accu_prob < (MAX_PROB / 100) * 85)
145 		return false;
146 	if (q->vars.accu_prob_overflows == 8 &&
147 	    q->vars.accu_prob >= MAX_PROB / 2)
148 		return true;
149 
150 	prandom_bytes(&rnd, 8);
151 	if (rnd < local_prob) {
152 		q->vars.accu_prob = 0;
153 		q->vars.accu_prob_overflows = 0;
154 		return true;
155 	}
156 
157 	return false;
158 }
159 
160 static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
161 			     struct sk_buff **to_free)
162 {
163 	struct pie_sched_data *q = qdisc_priv(sch);
164 	bool enqueue = false;
165 
166 	if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
167 		q->stats.overlimit++;
168 		goto out;
169 	}
170 
171 	if (!drop_early(sch, skb->len)) {
172 		enqueue = true;
173 	} else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
174 		   INET_ECN_set_ce(skb)) {
175 		/* If packet is ecn capable, mark it if drop probability
176 		 * is lower than 10%, else drop it.
177 		 */
178 		q->stats.ecn_mark++;
179 		enqueue = true;
180 	}
181 
182 	/* we can enqueue the packet */
183 	if (enqueue) {
184 		q->stats.packets_in++;
185 		if (qdisc_qlen(sch) > q->stats.maxq)
186 			q->stats.maxq = qdisc_qlen(sch);
187 
188 		return qdisc_enqueue_tail(skb, sch);
189 	}
190 
191 out:
192 	q->stats.dropped++;
193 	q->vars.accu_prob = 0;
194 	q->vars.accu_prob_overflows = 0;
195 	return qdisc_drop(skb, sch, to_free);
196 }
197 
198 static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
199 	[TCA_PIE_TARGET] = {.type = NLA_U32},
200 	[TCA_PIE_LIMIT] = {.type = NLA_U32},
201 	[TCA_PIE_TUPDATE] = {.type = NLA_U32},
202 	[TCA_PIE_ALPHA] = {.type = NLA_U32},
203 	[TCA_PIE_BETA] = {.type = NLA_U32},
204 	[TCA_PIE_ECN] = {.type = NLA_U32},
205 	[TCA_PIE_BYTEMODE] = {.type = NLA_U32},
206 };
207 
208 static int pie_change(struct Qdisc *sch, struct nlattr *opt,
209 		      struct netlink_ext_ack *extack)
210 {
211 	struct pie_sched_data *q = qdisc_priv(sch);
212 	struct nlattr *tb[TCA_PIE_MAX + 1];
213 	unsigned int qlen, dropped = 0;
214 	int err;
215 
216 	if (!opt)
217 		return -EINVAL;
218 
219 	err = nla_parse_nested(tb, TCA_PIE_MAX, opt, pie_policy, NULL);
220 	if (err < 0)
221 		return err;
222 
223 	sch_tree_lock(sch);
224 
225 	/* convert from microseconds to pschedtime */
226 	if (tb[TCA_PIE_TARGET]) {
227 		/* target is in us */
228 		u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
229 
230 		/* convert to pschedtime */
231 		q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
232 	}
233 
234 	/* tupdate is in jiffies */
235 	if (tb[TCA_PIE_TUPDATE])
236 		q->params.tupdate =
237 			usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
238 
239 	if (tb[TCA_PIE_LIMIT]) {
240 		u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
241 
242 		q->params.limit = limit;
243 		sch->limit = limit;
244 	}
245 
246 	if (tb[TCA_PIE_ALPHA])
247 		q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]);
248 
249 	if (tb[TCA_PIE_BETA])
250 		q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]);
251 
252 	if (tb[TCA_PIE_ECN])
253 		q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]);
254 
255 	if (tb[TCA_PIE_BYTEMODE])
256 		q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);
257 
258 	/* Drop excess packets if new limit is lower */
259 	qlen = sch->q.qlen;
260 	while (sch->q.qlen > sch->limit) {
261 		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
262 
263 		dropped += qdisc_pkt_len(skb);
264 		qdisc_qstats_backlog_dec(sch, skb);
265 		rtnl_qdisc_drop(skb, sch);
266 	}
267 	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
268 
269 	sch_tree_unlock(sch);
270 	return 0;
271 }
272 
273 static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
274 {
275 	struct pie_sched_data *q = qdisc_priv(sch);
276 	int qlen = sch->qstats.backlog;	/* current queue size in bytes */
277 
278 	/* If current queue is about 10 packets or more and dq_count is unset
279 	 * we have enough packets to calculate the drain rate. Save
280 	 * current time as dq_tstamp and start measurement cycle.
281 	 */
282 	if (qlen >= QUEUE_THRESHOLD && q->vars.dq_count == DQCOUNT_INVALID) {
283 		q->vars.dq_tstamp = psched_get_time();
284 		q->vars.dq_count = 0;
285 	}
286 
287 	/* Calculate the average drain rate from this value.  If queue length
288 	 * has receded to a small value viz., <= QUEUE_THRESHOLD bytes,reset
289 	 * the dq_count to -1 as we don't have enough packets to calculate the
290 	 * drain rate anymore The following if block is entered only when we
291 	 * have a substantial queue built up (QUEUE_THRESHOLD bytes or more)
292 	 * and we calculate the drain rate for the threshold here.  dq_count is
293 	 * in bytes, time difference in psched_time, hence rate is in
294 	 * bytes/psched_time.
295 	 */
296 	if (q->vars.dq_count != DQCOUNT_INVALID) {
297 		q->vars.dq_count += skb->len;
298 
299 		if (q->vars.dq_count >= QUEUE_THRESHOLD) {
300 			psched_time_t now = psched_get_time();
301 			u32 dtime = now - q->vars.dq_tstamp;
302 			u32 count = q->vars.dq_count << PIE_SCALE;
303 
304 			if (dtime == 0)
305 				return;
306 
307 			count = count / dtime;
308 
309 			if (q->vars.avg_dq_rate == 0)
310 				q->vars.avg_dq_rate = count;
311 			else
312 				q->vars.avg_dq_rate =
313 				    (q->vars.avg_dq_rate -
314 				     (q->vars.avg_dq_rate >> 3)) + (count >> 3);
315 
316 			/* If the queue has receded below the threshold, we hold
317 			 * on to the last drain rate calculated, else we reset
318 			 * dq_count to 0 to re-enter the if block when the next
319 			 * packet is dequeued
320 			 */
321 			if (qlen < QUEUE_THRESHOLD) {
322 				q->vars.dq_count = DQCOUNT_INVALID;
323 			} else {
324 				q->vars.dq_count = 0;
325 				q->vars.dq_tstamp = psched_get_time();
326 			}
327 
328 			if (q->vars.burst_time > 0) {
329 				if (q->vars.burst_time > dtime)
330 					q->vars.burst_time -= dtime;
331 				else
332 					q->vars.burst_time = 0;
333 			}
334 		}
335 	}
336 }
337 
338 static void calculate_probability(struct Qdisc *sch)
339 {
340 	struct pie_sched_data *q = qdisc_priv(sch);
341 	u32 qlen = sch->qstats.backlog;	/* queue size in bytes */
342 	psched_time_t qdelay = 0;	/* in pschedtime */
343 	psched_time_t qdelay_old = q->vars.qdelay;	/* in pschedtime */
344 	s64 delta = 0;		/* determines the change in probability */
345 	u64 oldprob;
346 	u64 alpha, beta;
347 	u32 power;
348 	bool update_prob = true;
349 
350 	q->vars.qdelay_old = q->vars.qdelay;
351 
352 	if (q->vars.avg_dq_rate > 0)
353 		qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate;
354 	else
355 		qdelay = 0;
356 
357 	/* If qdelay is zero and qlen is not, it means qlen is very small, less
358 	 * than dequeue_rate, so we do not update probabilty in this round
359 	 */
360 	if (qdelay == 0 && qlen != 0)
361 		update_prob = false;
362 
363 	/* In the algorithm, alpha and beta are between 0 and 2 with typical
364 	 * value for alpha as 0.125. In this implementation, we use values 0-32
365 	 * passed from user space to represent this. Also, alpha and beta have
366 	 * unit of HZ and need to be scaled before they can used to update
367 	 * probability. alpha/beta are updated locally below by scaling down
368 	 * by 16 to come to 0-2 range.
369 	 */
370 	alpha = ((u64)q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
371 	beta = ((u64)q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
372 
373 	/* We scale alpha and beta differently depending on how heavy the
374 	 * congestion is. Please see RFC 8033 for details.
375 	 */
376 	if (q->vars.prob < MAX_PROB / 10) {
377 		alpha >>= 1;
378 		beta >>= 1;
379 
380 		power = 100;
381 		while (q->vars.prob < div_u64(MAX_PROB, power) &&
382 		       power <= 1000000) {
383 			alpha >>= 2;
384 			beta >>= 2;
385 			power *= 10;
386 		}
387 	}
388 
389 	/* alpha and beta should be between 0 and 32, in multiples of 1/16 */
390 	delta += alpha * (u64)(qdelay - q->params.target);
391 	delta += beta * (u64)(qdelay - qdelay_old);
392 
393 	oldprob = q->vars.prob;
394 
395 	/* to ensure we increase probability in steps of no more than 2% */
396 	if (delta > (s64)(MAX_PROB / (100 / 2)) &&
397 	    q->vars.prob >= MAX_PROB / 10)
398 		delta = (MAX_PROB / 100) * 2;
399 
400 	/* Non-linear drop:
401 	 * Tune drop probability to increase quickly for high delays(>= 250ms)
402 	 * 250ms is derived through experiments and provides error protection
403 	 */
404 
405 	if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
406 		delta += MAX_PROB / (100 / 2);
407 
408 	q->vars.prob += delta;
409 
410 	if (delta > 0) {
411 		/* prevent overflow */
412 		if (q->vars.prob < oldprob) {
413 			q->vars.prob = MAX_PROB;
414 			/* Prevent normalization error. If probability is at
415 			 * maximum value already, we normalize it here, and
416 			 * skip the check to do a non-linear drop in the next
417 			 * section.
418 			 */
419 			update_prob = false;
420 		}
421 	} else {
422 		/* prevent underflow */
423 		if (q->vars.prob > oldprob)
424 			q->vars.prob = 0;
425 	}
426 
427 	/* Non-linear drop in probability: Reduce drop probability quickly if
428 	 * delay is 0 for 2 consecutive Tupdate periods.
429 	 */
430 
431 	if (qdelay == 0 && qdelay_old == 0 && update_prob)
432 		/* Reduce drop probability to 98.4% */
433 		q->vars.prob -= q->vars.prob / 64u;
434 
435 	q->vars.qdelay = qdelay;
436 	q->vars.qlen_old = qlen;
437 
438 	/* We restart the measurement cycle if the following conditions are met
439 	 * 1. If the delay has been low for 2 consecutive Tupdate periods
440 	 * 2. Calculated drop probability is zero
441 	 * 3. We have atleast one estimate for the avg_dq_rate ie.,
442 	 *    is a non-zero value
443 	 */
444 	if ((q->vars.qdelay < q->params.target / 2) &&
445 	    (q->vars.qdelay_old < q->params.target / 2) &&
446 	    q->vars.prob == 0 &&
447 	    q->vars.avg_dq_rate > 0)
448 		pie_vars_init(&q->vars);
449 }
450 
451 static void pie_timer(struct timer_list *t)
452 {
453 	struct pie_sched_data *q = from_timer(q, t, adapt_timer);
454 	struct Qdisc *sch = q->sch;
455 	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
456 
457 	spin_lock(root_lock);
458 	calculate_probability(sch);
459 
460 	/* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */
461 	if (q->params.tupdate)
462 		mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
463 	spin_unlock(root_lock);
464 }
465 
466 static int pie_init(struct Qdisc *sch, struct nlattr *opt,
467 		    struct netlink_ext_ack *extack)
468 {
469 	struct pie_sched_data *q = qdisc_priv(sch);
470 
471 	pie_params_init(&q->params);
472 	pie_vars_init(&q->vars);
473 	sch->limit = q->params.limit;
474 
475 	q->sch = sch;
476 	timer_setup(&q->adapt_timer, pie_timer, 0);
477 
478 	if (opt) {
479 		int err = pie_change(sch, opt, extack);
480 
481 		if (err)
482 			return err;
483 	}
484 
485 	mod_timer(&q->adapt_timer, jiffies + HZ / 2);
486 	return 0;
487 }
488 
489 static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
490 {
491 	struct pie_sched_data *q = qdisc_priv(sch);
492 	struct nlattr *opts;
493 
494 	opts = nla_nest_start(skb, TCA_OPTIONS);
495 	if (!opts)
496 		goto nla_put_failure;
497 
498 	/* convert target from pschedtime to us */
499 	if (nla_put_u32(skb, TCA_PIE_TARGET,
500 			((u32)PSCHED_TICKS2NS(q->params.target)) /
501 			NSEC_PER_USEC) ||
502 	    nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
503 	    nla_put_u32(skb, TCA_PIE_TUPDATE,
504 			jiffies_to_usecs(q->params.tupdate)) ||
505 	    nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
506 	    nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
507 	    nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
508 	    nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode))
509 		goto nla_put_failure;
510 
511 	return nla_nest_end(skb, opts);
512 
513 nla_put_failure:
514 	nla_nest_cancel(skb, opts);
515 	return -1;
516 }
517 
518 static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
519 {
520 	struct pie_sched_data *q = qdisc_priv(sch);
521 	struct tc_pie_xstats st = {
522 		.prob		= q->vars.prob,
523 		.delay		= ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
524 				   NSEC_PER_USEC,
525 		/* unscale and return dq_rate in bytes per sec */
526 		.avg_dq_rate	= q->vars.avg_dq_rate *
527 				  (PSCHED_TICKS_PER_SEC) >> PIE_SCALE,
528 		.packets_in	= q->stats.packets_in,
529 		.overlimit	= q->stats.overlimit,
530 		.maxq		= q->stats.maxq,
531 		.dropped	= q->stats.dropped,
532 		.ecn_mark	= q->stats.ecn_mark,
533 	};
534 
535 	return gnet_stats_copy_app(d, &st, sizeof(st));
536 }
537 
538 static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
539 {
540 	struct sk_buff *skb = qdisc_dequeue_head(sch);
541 
542 	if (!skb)
543 		return NULL;
544 
545 	pie_process_dequeue(sch, skb);
546 	return skb;
547 }
548 
549 static void pie_reset(struct Qdisc *sch)
550 {
551 	struct pie_sched_data *q = qdisc_priv(sch);
552 
553 	qdisc_reset_queue(sch);
554 	pie_vars_init(&q->vars);
555 }
556 
557 static void pie_destroy(struct Qdisc *sch)
558 {
559 	struct pie_sched_data *q = qdisc_priv(sch);
560 
561 	q->params.tupdate = 0;
562 	del_timer_sync(&q->adapt_timer);
563 }
564 
565 static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
566 	.id = "pie",
567 	.priv_size	= sizeof(struct pie_sched_data),
568 	.enqueue	= pie_qdisc_enqueue,
569 	.dequeue	= pie_qdisc_dequeue,
570 	.peek		= qdisc_peek_dequeued,
571 	.init		= pie_init,
572 	.destroy	= pie_destroy,
573 	.reset		= pie_reset,
574 	.change		= pie_change,
575 	.dump		= pie_dump,
576 	.dump_stats	= pie_dump_stats,
577 	.owner		= THIS_MODULE,
578 };
579 
580 static int __init pie_module_init(void)
581 {
582 	return register_qdisc(&pie_qdisc_ops);
583 }
584 
585 static void __exit pie_module_exit(void)
586 {
587 	unregister_qdisc(&pie_qdisc_ops);
588 }
589 
590 module_init(pie_module_init);
591 module_exit(pie_module_exit);
592 
593 MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler");
594 MODULE_AUTHOR("Vijay Subramanian");
595 MODULE_AUTHOR("Mythili Prabhu");
596 MODULE_LICENSE("GPL");
597