xref: /linux/net/sched/sch_fq_pie.c (revision ec8a42e7343234802b9054874fe01810880289ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Flow Queue PIE discipline
3  *
4  * Copyright (C) 2019 Mohit P. Tahiliani <tahiliani@nitk.edu.in>
5  * Copyright (C) 2019 Sachin D. Patil <sdp.sachin@gmail.com>
6  * Copyright (C) 2019 V. Saicharan <vsaicharan1998@gmail.com>
7  * Copyright (C) 2019 Mohit Bhasi <mohitbhasi1998@gmail.com>
8  * Copyright (C) 2019 Leslie Monis <lesliemonis@gmail.com>
9  * Copyright (C) 2019 Gautam Ramakrishnan <gautamramk@gmail.com>
10  */
11 
12 #include <linux/jhash.h>
13 #include <linux/sizes.h>
14 #include <linux/vmalloc.h>
15 #include <net/pkt_cls.h>
16 #include <net/pie.h>
17 
18 /* Flow Queue PIE
19  *
20  * Principles:
21  *   - Packets are classified on flows.
22  *   - This is a Stochastic model (as we use a hash, several flows might
23  *                                 be hashed to the same slot)
24  *   - Each flow has a PIE managed queue.
25  *   - Flows are linked onto two (Round Robin) lists,
26  *     so that new flows have priority on old ones.
27  *   - For a given flow, packets are not reordered.
28  *   - Drops during enqueue only.
29  *   - ECN capability is off by default.
30  *   - ECN threshold (if ECN is enabled) is at 10% by default.
31  *   - Uses timestamps to calculate queue delay by default.
32  */
33 
34 /**
35  * struct fq_pie_flow - contains data for each flow
36  * @vars:	pie vars associated with the flow
37  * @deficit:	number of remaining byte credits
38  * @backlog:	size of data in the flow
39  * @qlen:	number of packets in the flow
40  * @flowchain:	flowchain for the flow
41  * @head:	first packet in the flow
42  * @tail:	last packet in the flow
43  */
44 struct fq_pie_flow {
45 	struct pie_vars vars;
46 	s32 deficit;
47 	u32 backlog;
48 	u32 qlen;
49 	struct list_head flowchain;
50 	struct sk_buff *head;
51 	struct sk_buff *tail;
52 };
53 
54 struct fq_pie_sched_data {
55 	struct tcf_proto __rcu *filter_list; /* optional external classifier */
56 	struct tcf_block *block;
57 	struct fq_pie_flow *flows;
58 	struct Qdisc *sch;
59 	struct list_head old_flows;
60 	struct list_head new_flows;
61 	struct pie_params p_params;
62 	u32 ecn_prob;
63 	u32 flows_cnt;
64 	u32 quantum;
65 	u32 memory_limit;
66 	u32 new_flow_count;
67 	u32 memory_usage;
68 	u32 overmemory;
69 	struct pie_stats stats;
70 	struct timer_list adapt_timer;
71 };
72 
73 static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q,
74 				struct sk_buff *skb)
75 {
76 	return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
77 }
78 
79 static unsigned int fq_pie_classify(struct sk_buff *skb, struct Qdisc *sch,
80 				    int *qerr)
81 {
82 	struct fq_pie_sched_data *q = qdisc_priv(sch);
83 	struct tcf_proto *filter;
84 	struct tcf_result res;
85 	int result;
86 
87 	if (TC_H_MAJ(skb->priority) == sch->handle &&
88 	    TC_H_MIN(skb->priority) > 0 &&
89 	    TC_H_MIN(skb->priority) <= q->flows_cnt)
90 		return TC_H_MIN(skb->priority);
91 
92 	filter = rcu_dereference_bh(q->filter_list);
93 	if (!filter)
94 		return fq_pie_hash(q, skb) + 1;
95 
96 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
97 	result = tcf_classify(skb, filter, &res, false);
98 	if (result >= 0) {
99 #ifdef CONFIG_NET_CLS_ACT
100 		switch (result) {
101 		case TC_ACT_STOLEN:
102 		case TC_ACT_QUEUED:
103 		case TC_ACT_TRAP:
104 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
105 			fallthrough;
106 		case TC_ACT_SHOT:
107 			return 0;
108 		}
109 #endif
110 		if (TC_H_MIN(res.classid) <= q->flows_cnt)
111 			return TC_H_MIN(res.classid);
112 	}
113 	return 0;
114 }
115 
116 /* add skb to flow queue (tail add) */
117 static inline void flow_queue_add(struct fq_pie_flow *flow,
118 				  struct sk_buff *skb)
119 {
120 	if (!flow->head)
121 		flow->head = skb;
122 	else
123 		flow->tail->next = skb;
124 	flow->tail = skb;
125 	skb->next = NULL;
126 }
127 
128 static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
129 				struct sk_buff **to_free)
130 {
131 	struct fq_pie_sched_data *q = qdisc_priv(sch);
132 	struct fq_pie_flow *sel_flow;
133 	int ret;
134 	u8 memory_limited = false;
135 	u8 enqueue = false;
136 	u32 pkt_len;
137 	u32 idx;
138 
139 	/* Classifies packet into corresponding flow */
140 	idx = fq_pie_classify(skb, sch, &ret);
141 	sel_flow = &q->flows[idx];
142 
143 	/* Checks whether adding a new packet would exceed memory limit */
144 	get_pie_cb(skb)->mem_usage = skb->truesize;
145 	memory_limited = q->memory_usage > q->memory_limit + skb->truesize;
146 
147 	/* Checks if the qdisc is full */
148 	if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
149 		q->stats.overlimit++;
150 		goto out;
151 	} else if (unlikely(memory_limited)) {
152 		q->overmemory++;
153 	}
154 
155 	if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars,
156 			    sel_flow->backlog, skb->len)) {
157 		enqueue = true;
158 	} else if (q->p_params.ecn &&
159 		   sel_flow->vars.prob <= (MAX_PROB / 100) * q->ecn_prob &&
160 		   INET_ECN_set_ce(skb)) {
161 		/* If packet is ecn capable, mark it if drop probability
162 		 * is lower than the parameter ecn_prob, else drop it.
163 		 */
164 		q->stats.ecn_mark++;
165 		enqueue = true;
166 	}
167 	if (enqueue) {
168 		/* Set enqueue time only when dq_rate_estimator is disabled. */
169 		if (!q->p_params.dq_rate_estimator)
170 			pie_set_enqueue_time(skb);
171 
172 		pkt_len = qdisc_pkt_len(skb);
173 		q->stats.packets_in++;
174 		q->memory_usage += skb->truesize;
175 		sch->qstats.backlog += pkt_len;
176 		sch->q.qlen++;
177 		flow_queue_add(sel_flow, skb);
178 		if (list_empty(&sel_flow->flowchain)) {
179 			list_add_tail(&sel_flow->flowchain, &q->new_flows);
180 			q->new_flow_count++;
181 			sel_flow->deficit = q->quantum;
182 			sel_flow->qlen = 0;
183 			sel_flow->backlog = 0;
184 		}
185 		sel_flow->qlen++;
186 		sel_flow->backlog += pkt_len;
187 		return NET_XMIT_SUCCESS;
188 	}
189 out:
190 	q->stats.dropped++;
191 	sel_flow->vars.accu_prob = 0;
192 	__qdisc_drop(skb, to_free);
193 	qdisc_qstats_drop(sch);
194 	return NET_XMIT_CN;
195 }
196 
197 static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
198 	[TCA_FQ_PIE_LIMIT]		= {.type = NLA_U32},
199 	[TCA_FQ_PIE_FLOWS]		= {.type = NLA_U32},
200 	[TCA_FQ_PIE_TARGET]		= {.type = NLA_U32},
201 	[TCA_FQ_PIE_TUPDATE]		= {.type = NLA_U32},
202 	[TCA_FQ_PIE_ALPHA]		= {.type = NLA_U32},
203 	[TCA_FQ_PIE_BETA]		= {.type = NLA_U32},
204 	[TCA_FQ_PIE_QUANTUM]		= {.type = NLA_U32},
205 	[TCA_FQ_PIE_MEMORY_LIMIT]	= {.type = NLA_U32},
206 	[TCA_FQ_PIE_ECN_PROB]		= {.type = NLA_U32},
207 	[TCA_FQ_PIE_ECN]		= {.type = NLA_U32},
208 	[TCA_FQ_PIE_BYTEMODE]		= {.type = NLA_U32},
209 	[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]	= {.type = NLA_U32},
210 };
211 
212 static inline struct sk_buff *dequeue_head(struct fq_pie_flow *flow)
213 {
214 	struct sk_buff *skb = flow->head;
215 
216 	flow->head = skb->next;
217 	skb->next = NULL;
218 	return skb;
219 }
220 
221 static struct sk_buff *fq_pie_qdisc_dequeue(struct Qdisc *sch)
222 {
223 	struct fq_pie_sched_data *q = qdisc_priv(sch);
224 	struct sk_buff *skb = NULL;
225 	struct fq_pie_flow *flow;
226 	struct list_head *head;
227 	u32 pkt_len;
228 
229 begin:
230 	head = &q->new_flows;
231 	if (list_empty(head)) {
232 		head = &q->old_flows;
233 		if (list_empty(head))
234 			return NULL;
235 	}
236 
237 	flow = list_first_entry(head, struct fq_pie_flow, flowchain);
238 	/* Flow has exhausted all its credits */
239 	if (flow->deficit <= 0) {
240 		flow->deficit += q->quantum;
241 		list_move_tail(&flow->flowchain, &q->old_flows);
242 		goto begin;
243 	}
244 
245 	if (flow->head) {
246 		skb = dequeue_head(flow);
247 		pkt_len = qdisc_pkt_len(skb);
248 		sch->qstats.backlog -= pkt_len;
249 		sch->q.qlen--;
250 		qdisc_bstats_update(sch, skb);
251 	}
252 
253 	if (!skb) {
254 		/* force a pass through old_flows to prevent starvation */
255 		if (head == &q->new_flows && !list_empty(&q->old_flows))
256 			list_move_tail(&flow->flowchain, &q->old_flows);
257 		else
258 			list_del_init(&flow->flowchain);
259 		goto begin;
260 	}
261 
262 	flow->qlen--;
263 	flow->deficit -= pkt_len;
264 	flow->backlog -= pkt_len;
265 	q->memory_usage -= get_pie_cb(skb)->mem_usage;
266 	pie_process_dequeue(skb, &q->p_params, &flow->vars, flow->backlog);
267 	return skb;
268 }
269 
270 static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
271 			 struct netlink_ext_ack *extack)
272 {
273 	struct fq_pie_sched_data *q = qdisc_priv(sch);
274 	struct nlattr *tb[TCA_FQ_PIE_MAX + 1];
275 	unsigned int len_dropped = 0;
276 	unsigned int num_dropped = 0;
277 	int err;
278 
279 	if (!opt)
280 		return -EINVAL;
281 
282 	err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack);
283 	if (err < 0)
284 		return err;
285 
286 	sch_tree_lock(sch);
287 	if (tb[TCA_FQ_PIE_LIMIT]) {
288 		u32 limit = nla_get_u32(tb[TCA_FQ_PIE_LIMIT]);
289 
290 		q->p_params.limit = limit;
291 		sch->limit = limit;
292 	}
293 	if (tb[TCA_FQ_PIE_FLOWS]) {
294 		if (q->flows) {
295 			NL_SET_ERR_MSG_MOD(extack,
296 					   "Number of flows cannot be changed");
297 			goto flow_error;
298 		}
299 		q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
300 		if (!q->flows_cnt || q->flows_cnt >= 65536) {
301 			NL_SET_ERR_MSG_MOD(extack,
302 					   "Number of flows must range in [1..65535]");
303 			goto flow_error;
304 		}
305 	}
306 
307 	/* convert from microseconds to pschedtime */
308 	if (tb[TCA_FQ_PIE_TARGET]) {
309 		/* target is in us */
310 		u32 target = nla_get_u32(tb[TCA_FQ_PIE_TARGET]);
311 
312 		/* convert to pschedtime */
313 		q->p_params.target =
314 			PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
315 	}
316 
317 	/* tupdate is in jiffies */
318 	if (tb[TCA_FQ_PIE_TUPDATE])
319 		q->p_params.tupdate =
320 			usecs_to_jiffies(nla_get_u32(tb[TCA_FQ_PIE_TUPDATE]));
321 
322 	if (tb[TCA_FQ_PIE_ALPHA])
323 		q->p_params.alpha = nla_get_u32(tb[TCA_FQ_PIE_ALPHA]);
324 
325 	if (tb[TCA_FQ_PIE_BETA])
326 		q->p_params.beta = nla_get_u32(tb[TCA_FQ_PIE_BETA]);
327 
328 	if (tb[TCA_FQ_PIE_QUANTUM])
329 		q->quantum = nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]);
330 
331 	if (tb[TCA_FQ_PIE_MEMORY_LIMIT])
332 		q->memory_limit = nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT]);
333 
334 	if (tb[TCA_FQ_PIE_ECN_PROB])
335 		q->ecn_prob = nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB]);
336 
337 	if (tb[TCA_FQ_PIE_ECN])
338 		q->p_params.ecn = nla_get_u32(tb[TCA_FQ_PIE_ECN]);
339 
340 	if (tb[TCA_FQ_PIE_BYTEMODE])
341 		q->p_params.bytemode = nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE]);
342 
343 	if (tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR])
344 		q->p_params.dq_rate_estimator =
345 			nla_get_u32(tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]);
346 
347 	/* Drop excess packets if new limit is lower */
348 	while (sch->q.qlen > sch->limit) {
349 		struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
350 
351 		len_dropped += qdisc_pkt_len(skb);
352 		num_dropped += 1;
353 		rtnl_kfree_skbs(skb, skb);
354 	}
355 	qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped);
356 
357 	sch_tree_unlock(sch);
358 	return 0;
359 
360 flow_error:
361 	sch_tree_unlock(sch);
362 	return -EINVAL;
363 }
364 
365 static void fq_pie_timer(struct timer_list *t)
366 {
367 	struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
368 	struct Qdisc *sch = q->sch;
369 	spinlock_t *root_lock; /* to lock qdisc for probability calculations */
370 	u16 idx;
371 
372 	root_lock = qdisc_lock(qdisc_root_sleeping(sch));
373 	spin_lock(root_lock);
374 
375 	for (idx = 0; idx < q->flows_cnt; idx++)
376 		pie_calculate_probability(&q->p_params, &q->flows[idx].vars,
377 					  q->flows[idx].backlog);
378 
379 	/* reset the timer to fire after 'tupdate' jiffies. */
380 	if (q->p_params.tupdate)
381 		mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate);
382 
383 	spin_unlock(root_lock);
384 }
385 
386 static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
387 		       struct netlink_ext_ack *extack)
388 {
389 	struct fq_pie_sched_data *q = qdisc_priv(sch);
390 	int err;
391 	u16 idx;
392 
393 	pie_params_init(&q->p_params);
394 	sch->limit = 10 * 1024;
395 	q->p_params.limit = sch->limit;
396 	q->quantum = psched_mtu(qdisc_dev(sch));
397 	q->sch = sch;
398 	q->ecn_prob = 10;
399 	q->flows_cnt = 1024;
400 	q->memory_limit = SZ_32M;
401 
402 	INIT_LIST_HEAD(&q->new_flows);
403 	INIT_LIST_HEAD(&q->old_flows);
404 	timer_setup(&q->adapt_timer, fq_pie_timer, 0);
405 
406 	if (opt) {
407 		err = fq_pie_change(sch, opt, extack);
408 
409 		if (err)
410 			return err;
411 	}
412 
413 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
414 	if (err)
415 		goto init_failure;
416 
417 	q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow),
418 			    GFP_KERNEL);
419 	if (!q->flows) {
420 		err = -ENOMEM;
421 		goto init_failure;
422 	}
423 	for (idx = 0; idx < q->flows_cnt; idx++) {
424 		struct fq_pie_flow *flow = q->flows + idx;
425 
426 		INIT_LIST_HEAD(&flow->flowchain);
427 		pie_vars_init(&flow->vars);
428 	}
429 
430 	mod_timer(&q->adapt_timer, jiffies + HZ / 2);
431 
432 	return 0;
433 
434 init_failure:
435 	q->flows_cnt = 0;
436 
437 	return err;
438 }
439 
440 static int fq_pie_dump(struct Qdisc *sch, struct sk_buff *skb)
441 {
442 	struct fq_pie_sched_data *q = qdisc_priv(sch);
443 	struct nlattr *opts;
444 
445 	opts = nla_nest_start(skb, TCA_OPTIONS);
446 	if (!opts)
447 		return -EMSGSIZE;
448 
449 	/* convert target from pschedtime to us */
450 	if (nla_put_u32(skb, TCA_FQ_PIE_LIMIT, sch->limit) ||
451 	    nla_put_u32(skb, TCA_FQ_PIE_FLOWS, q->flows_cnt) ||
452 	    nla_put_u32(skb, TCA_FQ_PIE_TARGET,
453 			((u32)PSCHED_TICKS2NS(q->p_params.target)) /
454 			NSEC_PER_USEC) ||
455 	    nla_put_u32(skb, TCA_FQ_PIE_TUPDATE,
456 			jiffies_to_usecs(q->p_params.tupdate)) ||
457 	    nla_put_u32(skb, TCA_FQ_PIE_ALPHA, q->p_params.alpha) ||
458 	    nla_put_u32(skb, TCA_FQ_PIE_BETA, q->p_params.beta) ||
459 	    nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, q->quantum) ||
460 	    nla_put_u32(skb, TCA_FQ_PIE_MEMORY_LIMIT, q->memory_limit) ||
461 	    nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, q->ecn_prob) ||
462 	    nla_put_u32(skb, TCA_FQ_PIE_ECN, q->p_params.ecn) ||
463 	    nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, q->p_params.bytemode) ||
464 	    nla_put_u32(skb, TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
465 			q->p_params.dq_rate_estimator))
466 		goto nla_put_failure;
467 
468 	return nla_nest_end(skb, opts);
469 
470 nla_put_failure:
471 	nla_nest_cancel(skb, opts);
472 	return -EMSGSIZE;
473 }
474 
475 static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
476 {
477 	struct fq_pie_sched_data *q = qdisc_priv(sch);
478 	struct tc_fq_pie_xstats st = {
479 		.packets_in	= q->stats.packets_in,
480 		.overlimit	= q->stats.overlimit,
481 		.overmemory	= q->overmemory,
482 		.dropped	= q->stats.dropped,
483 		.ecn_mark	= q->stats.ecn_mark,
484 		.new_flow_count = q->new_flow_count,
485 		.memory_usage   = q->memory_usage,
486 	};
487 	struct list_head *pos;
488 
489 	sch_tree_lock(sch);
490 	list_for_each(pos, &q->new_flows)
491 		st.new_flows_len++;
492 
493 	list_for_each(pos, &q->old_flows)
494 		st.old_flows_len++;
495 	sch_tree_unlock(sch);
496 
497 	return gnet_stats_copy_app(d, &st, sizeof(st));
498 }
499 
500 static void fq_pie_reset(struct Qdisc *sch)
501 {
502 	struct fq_pie_sched_data *q = qdisc_priv(sch);
503 	u16 idx;
504 
505 	INIT_LIST_HEAD(&q->new_flows);
506 	INIT_LIST_HEAD(&q->old_flows);
507 	for (idx = 0; idx < q->flows_cnt; idx++) {
508 		struct fq_pie_flow *flow = q->flows + idx;
509 
510 		/* Removes all packets from flow */
511 		rtnl_kfree_skbs(flow->head, flow->tail);
512 		flow->head = NULL;
513 
514 		INIT_LIST_HEAD(&flow->flowchain);
515 		pie_vars_init(&flow->vars);
516 	}
517 
518 	sch->q.qlen = 0;
519 	sch->qstats.backlog = 0;
520 }
521 
522 static void fq_pie_destroy(struct Qdisc *sch)
523 {
524 	struct fq_pie_sched_data *q = qdisc_priv(sch);
525 
526 	tcf_block_put(q->block);
527 	del_timer_sync(&q->adapt_timer);
528 	kvfree(q->flows);
529 }
530 
531 static struct Qdisc_ops fq_pie_qdisc_ops __read_mostly = {
532 	.id		= "fq_pie",
533 	.priv_size	= sizeof(struct fq_pie_sched_data),
534 	.enqueue	= fq_pie_qdisc_enqueue,
535 	.dequeue	= fq_pie_qdisc_dequeue,
536 	.peek		= qdisc_peek_dequeued,
537 	.init		= fq_pie_init,
538 	.destroy	= fq_pie_destroy,
539 	.reset		= fq_pie_reset,
540 	.change		= fq_pie_change,
541 	.dump		= fq_pie_dump,
542 	.dump_stats	= fq_pie_dump_stats,
543 	.owner		= THIS_MODULE,
544 };
545 
546 static int __init fq_pie_module_init(void)
547 {
548 	return register_qdisc(&fq_pie_qdisc_ops);
549 }
550 
551 static void __exit fq_pie_module_exit(void)
552 {
553 	unregister_qdisc(&fq_pie_qdisc_ops);
554 }
555 
556 module_init(fq_pie_module_init);
557 module_exit(fq_pie_module_exit);
558 
559 MODULE_DESCRIPTION("Flow Queue Proportional Integral controller Enhanced (FQ-PIE)");
560 MODULE_AUTHOR("Mohit P. Tahiliani");
561 MODULE_LICENSE("GPL");
562