xref: /linux/net/sched/sch_fq.c (revision b7d3826c2ed6c3e626e7ae796c5df2c0d2551c6a)
1 /*
2  * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
3  *
4  *  Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  *
11  *  Meant to be mostly used for locally generated traffic :
12  *  Fast classification depends on skb->sk being set before reaching us.
13  *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
14  *  All packets belonging to a socket are considered as a 'flow'.
15  *
16  *  Flows are dynamically allocated and stored in a hash table of RB trees
17  *  They are also part of one Round Robin 'queues' (new or old flows)
18  *
19  *  Burst avoidance (aka pacing) capability :
20  *
21  *  Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
22  *  bunch of packets, and this packet scheduler adds delay between
23  *  packets to respect rate limitation.
24  *
25  *  enqueue() :
26  *   - lookup one RB tree (out of 1024 or more) to find the flow.
27  *     If non existent flow, create it, add it to the tree.
28  *     Add skb to the per flow list of skb (fifo).
29  *   - Use a special fifo for high prio packets
30  *
31  *  dequeue() : serves flows in Round Robin
32  *  Note : When a flow becomes empty, we do not immediately remove it from
33  *  rb trees, for performance reasons (its expected to send additional packets,
34  *  or SLAB cache will reuse socket for another flow)
35  */
36 
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/jiffies.h>
41 #include <linux/string.h>
42 #include <linux/in.h>
43 #include <linux/errno.h>
44 #include <linux/init.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/rbtree.h>
48 #include <linux/hash.h>
49 #include <linux/prefetch.h>
50 #include <linux/vmalloc.h>
51 #include <net/netlink.h>
52 #include <net/pkt_sched.h>
53 #include <net/sock.h>
54 #include <net/tcp_states.h>
55 #include <net/tcp.h>
56 
57 /*
58  * Per flow structure, dynamically allocated
59  */
60 struct fq_flow {
61 	struct sk_buff	*head;		/* list of skbs for this flow : first skb */
62 	union {
63 		struct sk_buff *tail;	/* last skb in the list */
64 		unsigned long  age;	/* jiffies when flow was emptied, for gc */
65 	};
66 	struct rb_node	fq_node;	/* anchor in fq_root[] trees */
67 	struct sock	*sk;
68 	int		qlen;		/* number of packets in flow queue */
69 	int		credit;
70 	u32		socket_hash;	/* sk_hash */
71 	struct fq_flow *next;		/* next pointer in RR lists, or &detached */
72 
73 	struct rb_node  rate_node;	/* anchor in q->delayed tree */
74 	u64		time_next_packet;
75 };
76 
77 struct fq_flow_head {
78 	struct fq_flow *first;
79 	struct fq_flow *last;
80 };
81 
82 struct fq_sched_data {
83 	struct fq_flow_head new_flows;
84 
85 	struct fq_flow_head old_flows;
86 
87 	struct rb_root	delayed;	/* for rate limited flows */
88 	u64		time_next_delayed_flow;
89 	unsigned long	unthrottle_latency_ns;
90 
91 	struct fq_flow	internal;	/* for non classified or high prio packets */
92 	u32		quantum;
93 	u32		initial_quantum;
94 	u32		flow_refill_delay;
95 	u32		flow_max_rate;	/* optional max rate per flow */
96 	u32		flow_plimit;	/* max packets per flow */
97 	u32		orphan_mask;	/* mask for orphaned skb */
98 	u32		low_rate_threshold;
99 	struct rb_root	*fq_root;
100 	u8		rate_enable;
101 	u8		fq_trees_log;
102 
103 	u32		flows;
104 	u32		inactive_flows;
105 	u32		throttled_flows;
106 
107 	u64		stat_gc_flows;
108 	u64		stat_internal_packets;
109 	u64		stat_throttled;
110 	u64		stat_flows_plimit;
111 	u64		stat_pkts_too_long;
112 	u64		stat_allocation_errors;
113 	struct qdisc_watchdog watchdog;
114 };
115 
116 /* special value to mark a detached flow (not on old/new list) */
117 static struct fq_flow detached, throttled;
118 
119 static void fq_flow_set_detached(struct fq_flow *f)
120 {
121 	f->next = &detached;
122 	f->age = jiffies;
123 }
124 
125 static bool fq_flow_is_detached(const struct fq_flow *f)
126 {
127 	return f->next == &detached;
128 }
129 
130 static bool fq_flow_is_throttled(const struct fq_flow *f)
131 {
132 	return f->next == &throttled;
133 }
134 
135 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
136 {
137 	if (head->first)
138 		head->last->next = flow;
139 	else
140 		head->first = flow;
141 	head->last = flow;
142 	flow->next = NULL;
143 }
144 
145 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
146 {
147 	rb_erase(&f->rate_node, &q->delayed);
148 	q->throttled_flows--;
149 	fq_flow_add_tail(&q->old_flows, f);
150 }
151 
152 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
153 {
154 	struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
155 
156 	while (*p) {
157 		struct fq_flow *aux;
158 
159 		parent = *p;
160 		aux = rb_entry(parent, struct fq_flow, rate_node);
161 		if (f->time_next_packet >= aux->time_next_packet)
162 			p = &parent->rb_right;
163 		else
164 			p = &parent->rb_left;
165 	}
166 	rb_link_node(&f->rate_node, parent, p);
167 	rb_insert_color(&f->rate_node, &q->delayed);
168 	q->throttled_flows++;
169 	q->stat_throttled++;
170 
171 	f->next = &throttled;
172 	if (q->time_next_delayed_flow > f->time_next_packet)
173 		q->time_next_delayed_flow = f->time_next_packet;
174 }
175 
176 
177 static struct kmem_cache *fq_flow_cachep __read_mostly;
178 
179 
180 /* limit number of collected flows per round */
181 #define FQ_GC_MAX 8
182 #define FQ_GC_AGE (3*HZ)
183 
184 static bool fq_gc_candidate(const struct fq_flow *f)
185 {
186 	return fq_flow_is_detached(f) &&
187 	       time_after(jiffies, f->age + FQ_GC_AGE);
188 }
189 
190 static void fq_gc(struct fq_sched_data *q,
191 		  struct rb_root *root,
192 		  struct sock *sk)
193 {
194 	struct fq_flow *f, *tofree[FQ_GC_MAX];
195 	struct rb_node **p, *parent;
196 	int fcnt = 0;
197 
198 	p = &root->rb_node;
199 	parent = NULL;
200 	while (*p) {
201 		parent = *p;
202 
203 		f = rb_entry(parent, struct fq_flow, fq_node);
204 		if (f->sk == sk)
205 			break;
206 
207 		if (fq_gc_candidate(f)) {
208 			tofree[fcnt++] = f;
209 			if (fcnt == FQ_GC_MAX)
210 				break;
211 		}
212 
213 		if (f->sk > sk)
214 			p = &parent->rb_right;
215 		else
216 			p = &parent->rb_left;
217 	}
218 
219 	q->flows -= fcnt;
220 	q->inactive_flows -= fcnt;
221 	q->stat_gc_flows += fcnt;
222 	while (fcnt) {
223 		struct fq_flow *f = tofree[--fcnt];
224 
225 		rb_erase(&f->fq_node, root);
226 		kmem_cache_free(fq_flow_cachep, f);
227 	}
228 }
229 
230 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
231 {
232 	struct rb_node **p, *parent;
233 	struct sock *sk = skb->sk;
234 	struct rb_root *root;
235 	struct fq_flow *f;
236 
237 	/* warning: no starvation prevention... */
238 	if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
239 		return &q->internal;
240 
241 	/* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
242 	 * or a listener (SYNCOOKIE mode)
243 	 * 1) request sockets are not full blown,
244 	 *    they do not contain sk_pacing_rate
245 	 * 2) They are not part of a 'flow' yet
246 	 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
247 	 *    especially if the listener set SO_MAX_PACING_RATE
248 	 * 4) We pretend they are orphaned
249 	 */
250 	if (!sk || sk_listener(sk)) {
251 		unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
252 
253 		/* By forcing low order bit to 1, we make sure to not
254 		 * collide with a local flow (socket pointers are word aligned)
255 		 */
256 		sk = (struct sock *)((hash << 1) | 1UL);
257 		skb_orphan(skb);
258 	}
259 
260 	root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
261 
262 	if (q->flows >= (2U << q->fq_trees_log) &&
263 	    q->inactive_flows > q->flows/2)
264 		fq_gc(q, root, sk);
265 
266 	p = &root->rb_node;
267 	parent = NULL;
268 	while (*p) {
269 		parent = *p;
270 
271 		f = rb_entry(parent, struct fq_flow, fq_node);
272 		if (f->sk == sk) {
273 			/* socket might have been reallocated, so check
274 			 * if its sk_hash is the same.
275 			 * It not, we need to refill credit with
276 			 * initial quantum
277 			 */
278 			if (unlikely(skb->sk &&
279 				     f->socket_hash != sk->sk_hash)) {
280 				f->credit = q->initial_quantum;
281 				f->socket_hash = sk->sk_hash;
282 				if (fq_flow_is_throttled(f))
283 					fq_flow_unset_throttled(q, f);
284 				f->time_next_packet = 0ULL;
285 			}
286 			return f;
287 		}
288 		if (f->sk > sk)
289 			p = &parent->rb_right;
290 		else
291 			p = &parent->rb_left;
292 	}
293 
294 	f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
295 	if (unlikely(!f)) {
296 		q->stat_allocation_errors++;
297 		return &q->internal;
298 	}
299 	fq_flow_set_detached(f);
300 	f->sk = sk;
301 	if (skb->sk)
302 		f->socket_hash = sk->sk_hash;
303 	f->credit = q->initial_quantum;
304 
305 	rb_link_node(&f->fq_node, parent, p);
306 	rb_insert_color(&f->fq_node, root);
307 
308 	q->flows++;
309 	q->inactive_flows++;
310 	return f;
311 }
312 
313 
314 /* remove one skb from head of flow queue */
315 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
316 {
317 	struct sk_buff *skb = flow->head;
318 
319 	if (skb) {
320 		flow->head = skb->next;
321 		skb_mark_not_on_list(skb);
322 		flow->qlen--;
323 		qdisc_qstats_backlog_dec(sch, skb);
324 		sch->q.qlen--;
325 	}
326 	return skb;
327 }
328 
329 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
330 {
331 	struct sk_buff *head = flow->head;
332 
333 	skb->next = NULL;
334 	if (!head)
335 		flow->head = skb;
336 	else
337 		flow->tail->next = skb;
338 
339 	flow->tail = skb;
340 }
341 
342 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
343 		      struct sk_buff **to_free)
344 {
345 	struct fq_sched_data *q = qdisc_priv(sch);
346 	struct fq_flow *f;
347 
348 	if (unlikely(sch->q.qlen >= sch->limit))
349 		return qdisc_drop(skb, sch, to_free);
350 
351 	f = fq_classify(skb, q);
352 	if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
353 		q->stat_flows_plimit++;
354 		return qdisc_drop(skb, sch, to_free);
355 	}
356 
357 	f->qlen++;
358 	qdisc_qstats_backlog_inc(sch, skb);
359 	if (fq_flow_is_detached(f)) {
360 		struct sock *sk = skb->sk;
361 
362 		fq_flow_add_tail(&q->new_flows, f);
363 		if (time_after(jiffies, f->age + q->flow_refill_delay))
364 			f->credit = max_t(u32, f->credit, q->quantum);
365 		if (sk && q->rate_enable) {
366 			if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
367 				     SK_PACING_FQ))
368 				smp_store_release(&sk->sk_pacing_status,
369 						  SK_PACING_FQ);
370 		}
371 		q->inactive_flows--;
372 	}
373 
374 	/* Note: this overwrites f->age */
375 	flow_queue_add(f, skb);
376 
377 	if (unlikely(f == &q->internal)) {
378 		q->stat_internal_packets++;
379 	}
380 	sch->q.qlen++;
381 
382 	return NET_XMIT_SUCCESS;
383 }
384 
385 static void fq_check_throttled(struct fq_sched_data *q, u64 now)
386 {
387 	unsigned long sample;
388 	struct rb_node *p;
389 
390 	if (q->time_next_delayed_flow > now)
391 		return;
392 
393 	/* Update unthrottle latency EWMA.
394 	 * This is cheap and can help diagnosing timer/latency problems.
395 	 */
396 	sample = (unsigned long)(now - q->time_next_delayed_flow);
397 	q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
398 	q->unthrottle_latency_ns += sample >> 3;
399 
400 	q->time_next_delayed_flow = ~0ULL;
401 	while ((p = rb_first(&q->delayed)) != NULL) {
402 		struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
403 
404 		if (f->time_next_packet > now) {
405 			q->time_next_delayed_flow = f->time_next_packet;
406 			break;
407 		}
408 		fq_flow_unset_throttled(q, f);
409 	}
410 }
411 
412 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
413 {
414 	struct fq_sched_data *q = qdisc_priv(sch);
415 	u64 now = ktime_get_ns();
416 	struct fq_flow_head *head;
417 	struct sk_buff *skb;
418 	struct fq_flow *f;
419 	u32 rate, plen;
420 
421 	skb = fq_dequeue_head(sch, &q->internal);
422 	if (skb)
423 		goto out;
424 	fq_check_throttled(q, now);
425 begin:
426 	head = &q->new_flows;
427 	if (!head->first) {
428 		head = &q->old_flows;
429 		if (!head->first) {
430 			if (q->time_next_delayed_flow != ~0ULL)
431 				qdisc_watchdog_schedule_ns(&q->watchdog,
432 							   q->time_next_delayed_flow);
433 			return NULL;
434 		}
435 	}
436 	f = head->first;
437 
438 	if (f->credit <= 0) {
439 		f->credit += q->quantum;
440 		head->first = f->next;
441 		fq_flow_add_tail(&q->old_flows, f);
442 		goto begin;
443 	}
444 
445 	skb = f->head;
446 	if (skb && !skb_is_tcp_pure_ack(skb)) {
447 		u64 time_next_packet = max_t(u64, ktime_to_ns(skb->tstamp),
448 					     f->time_next_packet);
449 
450 		if (now < time_next_packet) {
451 			head->first = f->next;
452 			f->time_next_packet = time_next_packet;
453 			fq_flow_set_throttled(q, f);
454 			goto begin;
455 		}
456 	}
457 
458 	skb = fq_dequeue_head(sch, f);
459 	if (!skb) {
460 		head->first = f->next;
461 		/* force a pass through old_flows to prevent starvation */
462 		if ((head == &q->new_flows) && q->old_flows.first) {
463 			fq_flow_add_tail(&q->old_flows, f);
464 		} else {
465 			fq_flow_set_detached(f);
466 			q->inactive_flows++;
467 		}
468 		goto begin;
469 	}
470 	prefetch(&skb->end);
471 	f->credit -= qdisc_pkt_len(skb);
472 
473 	if (ktime_to_ns(skb->tstamp) || !q->rate_enable)
474 		goto out;
475 
476 	rate = q->flow_max_rate;
477 	if (skb->sk)
478 		rate = min(skb->sk->sk_pacing_rate, rate);
479 
480 	if (rate <= q->low_rate_threshold) {
481 		f->credit = 0;
482 		plen = qdisc_pkt_len(skb);
483 	} else {
484 		plen = max(qdisc_pkt_len(skb), q->quantum);
485 		if (f->credit > 0)
486 			goto out;
487 	}
488 	if (rate != ~0U) {
489 		u64 len = (u64)plen * NSEC_PER_SEC;
490 
491 		if (likely(rate))
492 			do_div(len, rate);
493 		/* Since socket rate can change later,
494 		 * clamp the delay to 1 second.
495 		 * Really, providers of too big packets should be fixed !
496 		 */
497 		if (unlikely(len > NSEC_PER_SEC)) {
498 			len = NSEC_PER_SEC;
499 			q->stat_pkts_too_long++;
500 		}
501 		/* Account for schedule/timers drifts.
502 		 * f->time_next_packet was set when prior packet was sent,
503 		 * and current time (@now) can be too late by tens of us.
504 		 */
505 		if (f->time_next_packet)
506 			len -= min(len/2, now - f->time_next_packet);
507 		f->time_next_packet = now + len;
508 	}
509 out:
510 	qdisc_bstats_update(sch, skb);
511 	return skb;
512 }
513 
514 static void fq_flow_purge(struct fq_flow *flow)
515 {
516 	rtnl_kfree_skbs(flow->head, flow->tail);
517 	flow->head = NULL;
518 	flow->qlen = 0;
519 }
520 
521 static void fq_reset(struct Qdisc *sch)
522 {
523 	struct fq_sched_data *q = qdisc_priv(sch);
524 	struct rb_root *root;
525 	struct rb_node *p;
526 	struct fq_flow *f;
527 	unsigned int idx;
528 
529 	sch->q.qlen = 0;
530 	sch->qstats.backlog = 0;
531 
532 	fq_flow_purge(&q->internal);
533 
534 	if (!q->fq_root)
535 		return;
536 
537 	for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
538 		root = &q->fq_root[idx];
539 		while ((p = rb_first(root)) != NULL) {
540 			f = rb_entry(p, struct fq_flow, fq_node);
541 			rb_erase(p, root);
542 
543 			fq_flow_purge(f);
544 
545 			kmem_cache_free(fq_flow_cachep, f);
546 		}
547 	}
548 	q->new_flows.first	= NULL;
549 	q->old_flows.first	= NULL;
550 	q->delayed		= RB_ROOT;
551 	q->flows		= 0;
552 	q->inactive_flows	= 0;
553 	q->throttled_flows	= 0;
554 }
555 
556 static void fq_rehash(struct fq_sched_data *q,
557 		      struct rb_root *old_array, u32 old_log,
558 		      struct rb_root *new_array, u32 new_log)
559 {
560 	struct rb_node *op, **np, *parent;
561 	struct rb_root *oroot, *nroot;
562 	struct fq_flow *of, *nf;
563 	int fcnt = 0;
564 	u32 idx;
565 
566 	for (idx = 0; idx < (1U << old_log); idx++) {
567 		oroot = &old_array[idx];
568 		while ((op = rb_first(oroot)) != NULL) {
569 			rb_erase(op, oroot);
570 			of = rb_entry(op, struct fq_flow, fq_node);
571 			if (fq_gc_candidate(of)) {
572 				fcnt++;
573 				kmem_cache_free(fq_flow_cachep, of);
574 				continue;
575 			}
576 			nroot = &new_array[hash_ptr(of->sk, new_log)];
577 
578 			np = &nroot->rb_node;
579 			parent = NULL;
580 			while (*np) {
581 				parent = *np;
582 
583 				nf = rb_entry(parent, struct fq_flow, fq_node);
584 				BUG_ON(nf->sk == of->sk);
585 
586 				if (nf->sk > of->sk)
587 					np = &parent->rb_right;
588 				else
589 					np = &parent->rb_left;
590 			}
591 
592 			rb_link_node(&of->fq_node, parent, np);
593 			rb_insert_color(&of->fq_node, nroot);
594 		}
595 	}
596 	q->flows -= fcnt;
597 	q->inactive_flows -= fcnt;
598 	q->stat_gc_flows += fcnt;
599 }
600 
601 static void fq_free(void *addr)
602 {
603 	kvfree(addr);
604 }
605 
606 static int fq_resize(struct Qdisc *sch, u32 log)
607 {
608 	struct fq_sched_data *q = qdisc_priv(sch);
609 	struct rb_root *array;
610 	void *old_fq_root;
611 	u32 idx;
612 
613 	if (q->fq_root && log == q->fq_trees_log)
614 		return 0;
615 
616 	/* If XPS was setup, we can allocate memory on right NUMA node */
617 	array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
618 			      netdev_queue_numa_node_read(sch->dev_queue));
619 	if (!array)
620 		return -ENOMEM;
621 
622 	for (idx = 0; idx < (1U << log); idx++)
623 		array[idx] = RB_ROOT;
624 
625 	sch_tree_lock(sch);
626 
627 	old_fq_root = q->fq_root;
628 	if (old_fq_root)
629 		fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
630 
631 	q->fq_root = array;
632 	q->fq_trees_log = log;
633 
634 	sch_tree_unlock(sch);
635 
636 	fq_free(old_fq_root);
637 
638 	return 0;
639 }
640 
641 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
642 	[TCA_FQ_PLIMIT]			= { .type = NLA_U32 },
643 	[TCA_FQ_FLOW_PLIMIT]		= { .type = NLA_U32 },
644 	[TCA_FQ_QUANTUM]		= { .type = NLA_U32 },
645 	[TCA_FQ_INITIAL_QUANTUM]	= { .type = NLA_U32 },
646 	[TCA_FQ_RATE_ENABLE]		= { .type = NLA_U32 },
647 	[TCA_FQ_FLOW_DEFAULT_RATE]	= { .type = NLA_U32 },
648 	[TCA_FQ_FLOW_MAX_RATE]		= { .type = NLA_U32 },
649 	[TCA_FQ_BUCKETS_LOG]		= { .type = NLA_U32 },
650 	[TCA_FQ_FLOW_REFILL_DELAY]	= { .type = NLA_U32 },
651 	[TCA_FQ_LOW_RATE_THRESHOLD]	= { .type = NLA_U32 },
652 };
653 
654 static int fq_change(struct Qdisc *sch, struct nlattr *opt,
655 		     struct netlink_ext_ack *extack)
656 {
657 	struct fq_sched_data *q = qdisc_priv(sch);
658 	struct nlattr *tb[TCA_FQ_MAX + 1];
659 	int err, drop_count = 0;
660 	unsigned drop_len = 0;
661 	u32 fq_log;
662 
663 	if (!opt)
664 		return -EINVAL;
665 
666 	err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy, NULL);
667 	if (err < 0)
668 		return err;
669 
670 	sch_tree_lock(sch);
671 
672 	fq_log = q->fq_trees_log;
673 
674 	if (tb[TCA_FQ_BUCKETS_LOG]) {
675 		u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
676 
677 		if (nval >= 1 && nval <= ilog2(256*1024))
678 			fq_log = nval;
679 		else
680 			err = -EINVAL;
681 	}
682 	if (tb[TCA_FQ_PLIMIT])
683 		sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
684 
685 	if (tb[TCA_FQ_FLOW_PLIMIT])
686 		q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
687 
688 	if (tb[TCA_FQ_QUANTUM]) {
689 		u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
690 
691 		if (quantum > 0)
692 			q->quantum = quantum;
693 		else
694 			err = -EINVAL;
695 	}
696 
697 	if (tb[TCA_FQ_INITIAL_QUANTUM])
698 		q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
699 
700 	if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
701 		pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
702 				    nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
703 
704 	if (tb[TCA_FQ_FLOW_MAX_RATE])
705 		q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
706 
707 	if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
708 		q->low_rate_threshold =
709 			nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
710 
711 	if (tb[TCA_FQ_RATE_ENABLE]) {
712 		u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
713 
714 		if (enable <= 1)
715 			q->rate_enable = enable;
716 		else
717 			err = -EINVAL;
718 	}
719 
720 	if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
721 		u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
722 
723 		q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
724 	}
725 
726 	if (tb[TCA_FQ_ORPHAN_MASK])
727 		q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
728 
729 	if (!err) {
730 		sch_tree_unlock(sch);
731 		err = fq_resize(sch, fq_log);
732 		sch_tree_lock(sch);
733 	}
734 	while (sch->q.qlen > sch->limit) {
735 		struct sk_buff *skb = fq_dequeue(sch);
736 
737 		if (!skb)
738 			break;
739 		drop_len += qdisc_pkt_len(skb);
740 		rtnl_kfree_skbs(skb, skb);
741 		drop_count++;
742 	}
743 	qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
744 
745 	sch_tree_unlock(sch);
746 	return err;
747 }
748 
749 static void fq_destroy(struct Qdisc *sch)
750 {
751 	struct fq_sched_data *q = qdisc_priv(sch);
752 
753 	fq_reset(sch);
754 	fq_free(q->fq_root);
755 	qdisc_watchdog_cancel(&q->watchdog);
756 }
757 
758 static int fq_init(struct Qdisc *sch, struct nlattr *opt,
759 		   struct netlink_ext_ack *extack)
760 {
761 	struct fq_sched_data *q = qdisc_priv(sch);
762 	int err;
763 
764 	sch->limit		= 10000;
765 	q->flow_plimit		= 100;
766 	q->quantum		= 2 * psched_mtu(qdisc_dev(sch));
767 	q->initial_quantum	= 10 * psched_mtu(qdisc_dev(sch));
768 	q->flow_refill_delay	= msecs_to_jiffies(40);
769 	q->flow_max_rate	= ~0U;
770 	q->time_next_delayed_flow = ~0ULL;
771 	q->rate_enable		= 1;
772 	q->new_flows.first	= NULL;
773 	q->old_flows.first	= NULL;
774 	q->delayed		= RB_ROOT;
775 	q->fq_root		= NULL;
776 	q->fq_trees_log		= ilog2(1024);
777 	q->orphan_mask		= 1024 - 1;
778 	q->low_rate_threshold	= 550000 / 8;
779 	qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
780 
781 	if (opt)
782 		err = fq_change(sch, opt, extack);
783 	else
784 		err = fq_resize(sch, q->fq_trees_log);
785 
786 	return err;
787 }
788 
789 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
790 {
791 	struct fq_sched_data *q = qdisc_priv(sch);
792 	struct nlattr *opts;
793 
794 	opts = nla_nest_start(skb, TCA_OPTIONS);
795 	if (opts == NULL)
796 		goto nla_put_failure;
797 
798 	/* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
799 
800 	if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
801 	    nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
802 	    nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
803 	    nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
804 	    nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
805 	    nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
806 	    nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
807 			jiffies_to_usecs(q->flow_refill_delay)) ||
808 	    nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
809 	    nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
810 			q->low_rate_threshold) ||
811 	    nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
812 		goto nla_put_failure;
813 
814 	return nla_nest_end(skb, opts);
815 
816 nla_put_failure:
817 	return -1;
818 }
819 
820 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
821 {
822 	struct fq_sched_data *q = qdisc_priv(sch);
823 	struct tc_fq_qd_stats st;
824 
825 	sch_tree_lock(sch);
826 
827 	st.gc_flows		  = q->stat_gc_flows;
828 	st.highprio_packets	  = q->stat_internal_packets;
829 	st.tcp_retrans		  = 0;
830 	st.throttled		  = q->stat_throttled;
831 	st.flows_plimit		  = q->stat_flows_plimit;
832 	st.pkts_too_long	  = q->stat_pkts_too_long;
833 	st.allocation_errors	  = q->stat_allocation_errors;
834 	st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
835 	st.flows		  = q->flows;
836 	st.inactive_flows	  = q->inactive_flows;
837 	st.throttled_flows	  = q->throttled_flows;
838 	st.unthrottle_latency_ns  = min_t(unsigned long,
839 					  q->unthrottle_latency_ns, ~0U);
840 	sch_tree_unlock(sch);
841 
842 	return gnet_stats_copy_app(d, &st, sizeof(st));
843 }
844 
845 static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
846 	.id		=	"fq",
847 	.priv_size	=	sizeof(struct fq_sched_data),
848 
849 	.enqueue	=	fq_enqueue,
850 	.dequeue	=	fq_dequeue,
851 	.peek		=	qdisc_peek_dequeued,
852 	.init		=	fq_init,
853 	.reset		=	fq_reset,
854 	.destroy	=	fq_destroy,
855 	.change		=	fq_change,
856 	.dump		=	fq_dump,
857 	.dump_stats	=	fq_dump_stats,
858 	.owner		=	THIS_MODULE,
859 };
860 
861 static int __init fq_module_init(void)
862 {
863 	int ret;
864 
865 	fq_flow_cachep = kmem_cache_create("fq_flow_cache",
866 					   sizeof(struct fq_flow),
867 					   0, 0, NULL);
868 	if (!fq_flow_cachep)
869 		return -ENOMEM;
870 
871 	ret = register_qdisc(&fq_qdisc_ops);
872 	if (ret)
873 		kmem_cache_destroy(fq_flow_cachep);
874 	return ret;
875 }
876 
877 static void __exit fq_module_exit(void)
878 {
879 	unregister_qdisc(&fq_qdisc_ops);
880 	kmem_cache_destroy(fq_flow_cachep);
881 }
882 
883 module_init(fq_module_init)
884 module_exit(fq_module_exit)
885 MODULE_AUTHOR("Eric Dumazet");
886 MODULE_LICENSE("GPL");
887