xref: /linux/net/sched/sch_fq.c (revision 74ce1896c6c65b2f8cccbf59162d542988835835)
1 /*
2  * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
3  *
4  *  Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  *
11  *  Meant to be mostly used for locally generated traffic :
12  *  Fast classification depends on skb->sk being set before reaching us.
13  *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
14  *  All packets belonging to a socket are considered as a 'flow'.
15  *
16  *  Flows are dynamically allocated and stored in a hash table of RB trees
17  *  They are also part of one Round Robin 'queues' (new or old flows)
18  *
19  *  Burst avoidance (aka pacing) capability :
20  *
21  *  Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
22  *  bunch of packets, and this packet scheduler adds delay between
23  *  packets to respect rate limitation.
24  *
25  *  enqueue() :
26  *   - lookup one RB tree (out of 1024 or more) to find the flow.
27  *     If non existent flow, create it, add it to the tree.
28  *     Add skb to the per flow list of skb (fifo).
29  *   - Use a special fifo for high prio packets
30  *
31  *  dequeue() : serves flows in Round Robin
32  *  Note : When a flow becomes empty, we do not immediately remove it from
33  *  rb trees, for performance reasons (its expected to send additional packets,
34  *  or SLAB cache will reuse socket for another flow)
35  */
36 
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/jiffies.h>
41 #include <linux/string.h>
42 #include <linux/in.h>
43 #include <linux/errno.h>
44 #include <linux/init.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/rbtree.h>
48 #include <linux/hash.h>
49 #include <linux/prefetch.h>
50 #include <linux/vmalloc.h>
51 #include <net/netlink.h>
52 #include <net/pkt_sched.h>
53 #include <net/sock.h>
54 #include <net/tcp_states.h>
55 #include <net/tcp.h>
56 
57 /*
58  * Per flow structure, dynamically allocated
59  */
60 struct fq_flow {
61 	struct sk_buff	*head;		/* list of skbs for this flow : first skb */
62 	union {
63 		struct sk_buff *tail;	/* last skb in the list */
64 		unsigned long  age;	/* jiffies when flow was emptied, for gc */
65 	};
66 	struct rb_node	fq_node;	/* anchor in fq_root[] trees */
67 	struct sock	*sk;
68 	int		qlen;		/* number of packets in flow queue */
69 	int		credit;
70 	u32		socket_hash;	/* sk_hash */
71 	struct fq_flow *next;		/* next pointer in RR lists, or &detached */
72 
73 	struct rb_node  rate_node;	/* anchor in q->delayed tree */
74 	u64		time_next_packet;
75 };
76 
77 struct fq_flow_head {
78 	struct fq_flow *first;
79 	struct fq_flow *last;
80 };
81 
82 struct fq_sched_data {
83 	struct fq_flow_head new_flows;
84 
85 	struct fq_flow_head old_flows;
86 
87 	struct rb_root	delayed;	/* for rate limited flows */
88 	u64		time_next_delayed_flow;
89 	unsigned long	unthrottle_latency_ns;
90 
91 	struct fq_flow	internal;	/* for non classified or high prio packets */
92 	u32		quantum;
93 	u32		initial_quantum;
94 	u32		flow_refill_delay;
95 	u32		flow_max_rate;	/* optional max rate per flow */
96 	u32		flow_plimit;	/* max packets per flow */
97 	u32		orphan_mask;	/* mask for orphaned skb */
98 	u32		low_rate_threshold;
99 	struct rb_root	*fq_root;
100 	u8		rate_enable;
101 	u8		fq_trees_log;
102 
103 	u32		flows;
104 	u32		inactive_flows;
105 	u32		throttled_flows;
106 
107 	u64		stat_gc_flows;
108 	u64		stat_internal_packets;
109 	u64		stat_tcp_retrans;
110 	u64		stat_throttled;
111 	u64		stat_flows_plimit;
112 	u64		stat_pkts_too_long;
113 	u64		stat_allocation_errors;
114 	struct qdisc_watchdog watchdog;
115 };
116 
117 /* special value to mark a detached flow (not on old/new list) */
118 static struct fq_flow detached, throttled;
119 
120 static void fq_flow_set_detached(struct fq_flow *f)
121 {
122 	f->next = &detached;
123 	f->age = jiffies;
124 }
125 
126 static bool fq_flow_is_detached(const struct fq_flow *f)
127 {
128 	return f->next == &detached;
129 }
130 
131 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
132 {
133 	struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
134 
135 	while (*p) {
136 		struct fq_flow *aux;
137 
138 		parent = *p;
139 		aux = rb_entry(parent, struct fq_flow, rate_node);
140 		if (f->time_next_packet >= aux->time_next_packet)
141 			p = &parent->rb_right;
142 		else
143 			p = &parent->rb_left;
144 	}
145 	rb_link_node(&f->rate_node, parent, p);
146 	rb_insert_color(&f->rate_node, &q->delayed);
147 	q->throttled_flows++;
148 	q->stat_throttled++;
149 
150 	f->next = &throttled;
151 	if (q->time_next_delayed_flow > f->time_next_packet)
152 		q->time_next_delayed_flow = f->time_next_packet;
153 }
154 
155 
156 static struct kmem_cache *fq_flow_cachep __read_mostly;
157 
158 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
159 {
160 	if (head->first)
161 		head->last->next = flow;
162 	else
163 		head->first = flow;
164 	head->last = flow;
165 	flow->next = NULL;
166 }
167 
168 /* limit number of collected flows per round */
169 #define FQ_GC_MAX 8
170 #define FQ_GC_AGE (3*HZ)
171 
172 static bool fq_gc_candidate(const struct fq_flow *f)
173 {
174 	return fq_flow_is_detached(f) &&
175 	       time_after(jiffies, f->age + FQ_GC_AGE);
176 }
177 
178 static void fq_gc(struct fq_sched_data *q,
179 		  struct rb_root *root,
180 		  struct sock *sk)
181 {
182 	struct fq_flow *f, *tofree[FQ_GC_MAX];
183 	struct rb_node **p, *parent;
184 	int fcnt = 0;
185 
186 	p = &root->rb_node;
187 	parent = NULL;
188 	while (*p) {
189 		parent = *p;
190 
191 		f = rb_entry(parent, struct fq_flow, fq_node);
192 		if (f->sk == sk)
193 			break;
194 
195 		if (fq_gc_candidate(f)) {
196 			tofree[fcnt++] = f;
197 			if (fcnt == FQ_GC_MAX)
198 				break;
199 		}
200 
201 		if (f->sk > sk)
202 			p = &parent->rb_right;
203 		else
204 			p = &parent->rb_left;
205 	}
206 
207 	q->flows -= fcnt;
208 	q->inactive_flows -= fcnt;
209 	q->stat_gc_flows += fcnt;
210 	while (fcnt) {
211 		struct fq_flow *f = tofree[--fcnt];
212 
213 		rb_erase(&f->fq_node, root);
214 		kmem_cache_free(fq_flow_cachep, f);
215 	}
216 }
217 
218 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
219 {
220 	struct rb_node **p, *parent;
221 	struct sock *sk = skb->sk;
222 	struct rb_root *root;
223 	struct fq_flow *f;
224 
225 	/* warning: no starvation prevention... */
226 	if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
227 		return &q->internal;
228 
229 	/* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
230 	 * or a listener (SYNCOOKIE mode)
231 	 * 1) request sockets are not full blown,
232 	 *    they do not contain sk_pacing_rate
233 	 * 2) They are not part of a 'flow' yet
234 	 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
235 	 *    especially if the listener set SO_MAX_PACING_RATE
236 	 * 4) We pretend they are orphaned
237 	 */
238 	if (!sk || sk_listener(sk)) {
239 		unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
240 
241 		/* By forcing low order bit to 1, we make sure to not
242 		 * collide with a local flow (socket pointers are word aligned)
243 		 */
244 		sk = (struct sock *)((hash << 1) | 1UL);
245 		skb_orphan(skb);
246 	}
247 
248 	root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
249 
250 	if (q->flows >= (2U << q->fq_trees_log) &&
251 	    q->inactive_flows > q->flows/2)
252 		fq_gc(q, root, sk);
253 
254 	p = &root->rb_node;
255 	parent = NULL;
256 	while (*p) {
257 		parent = *p;
258 
259 		f = rb_entry(parent, struct fq_flow, fq_node);
260 		if (f->sk == sk) {
261 			/* socket might have been reallocated, so check
262 			 * if its sk_hash is the same.
263 			 * It not, we need to refill credit with
264 			 * initial quantum
265 			 */
266 			if (unlikely(skb->sk &&
267 				     f->socket_hash != sk->sk_hash)) {
268 				f->credit = q->initial_quantum;
269 				f->socket_hash = sk->sk_hash;
270 				f->time_next_packet = 0ULL;
271 			}
272 			return f;
273 		}
274 		if (f->sk > sk)
275 			p = &parent->rb_right;
276 		else
277 			p = &parent->rb_left;
278 	}
279 
280 	f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
281 	if (unlikely(!f)) {
282 		q->stat_allocation_errors++;
283 		return &q->internal;
284 	}
285 	fq_flow_set_detached(f);
286 	f->sk = sk;
287 	if (skb->sk)
288 		f->socket_hash = sk->sk_hash;
289 	f->credit = q->initial_quantum;
290 
291 	rb_link_node(&f->fq_node, parent, p);
292 	rb_insert_color(&f->fq_node, root);
293 
294 	q->flows++;
295 	q->inactive_flows++;
296 	return f;
297 }
298 
299 
300 /* remove one skb from head of flow queue */
301 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
302 {
303 	struct sk_buff *skb = flow->head;
304 
305 	if (skb) {
306 		flow->head = skb->next;
307 		skb->next = NULL;
308 		flow->qlen--;
309 		qdisc_qstats_backlog_dec(sch, skb);
310 		sch->q.qlen--;
311 	}
312 	return skb;
313 }
314 
315 /* We might add in the future detection of retransmits
316  * For the time being, just return false
317  */
318 static bool skb_is_retransmit(struct sk_buff *skb)
319 {
320 	return false;
321 }
322 
323 /* add skb to flow queue
324  * flow queue is a linked list, kind of FIFO, except for TCP retransmits
325  * We special case tcp retransmits to be transmitted before other packets.
326  * We rely on fact that TCP retransmits are unlikely, so we do not waste
327  * a separate queue or a pointer.
328  * head->  [retrans pkt 1]
329  *         [retrans pkt 2]
330  *         [ normal pkt 1]
331  *         [ normal pkt 2]
332  *         [ normal pkt 3]
333  * tail->  [ normal pkt 4]
334  */
335 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
336 {
337 	struct sk_buff *prev, *head = flow->head;
338 
339 	skb->next = NULL;
340 	if (!head) {
341 		flow->head = skb;
342 		flow->tail = skb;
343 		return;
344 	}
345 	if (likely(!skb_is_retransmit(skb))) {
346 		flow->tail->next = skb;
347 		flow->tail = skb;
348 		return;
349 	}
350 
351 	/* This skb is a tcp retransmit,
352 	 * find the last retrans packet in the queue
353 	 */
354 	prev = NULL;
355 	while (skb_is_retransmit(head)) {
356 		prev = head;
357 		head = head->next;
358 		if (!head)
359 			break;
360 	}
361 	if (!prev) { /* no rtx packet in queue, become the new head */
362 		skb->next = flow->head;
363 		flow->head = skb;
364 	} else {
365 		if (prev == flow->tail)
366 			flow->tail = skb;
367 		else
368 			skb->next = prev->next;
369 		prev->next = skb;
370 	}
371 }
372 
373 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
374 		      struct sk_buff **to_free)
375 {
376 	struct fq_sched_data *q = qdisc_priv(sch);
377 	struct fq_flow *f;
378 
379 	if (unlikely(sch->q.qlen >= sch->limit))
380 		return qdisc_drop(skb, sch, to_free);
381 
382 	f = fq_classify(skb, q);
383 	if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
384 		q->stat_flows_plimit++;
385 		return qdisc_drop(skb, sch, to_free);
386 	}
387 
388 	f->qlen++;
389 	if (skb_is_retransmit(skb))
390 		q->stat_tcp_retrans++;
391 	qdisc_qstats_backlog_inc(sch, skb);
392 	if (fq_flow_is_detached(f)) {
393 		struct sock *sk = skb->sk;
394 
395 		fq_flow_add_tail(&q->new_flows, f);
396 		if (time_after(jiffies, f->age + q->flow_refill_delay))
397 			f->credit = max_t(u32, f->credit, q->quantum);
398 		if (sk && q->rate_enable) {
399 			if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
400 				     SK_PACING_FQ))
401 				smp_store_release(&sk->sk_pacing_status,
402 						  SK_PACING_FQ);
403 		}
404 		q->inactive_flows--;
405 	}
406 
407 	/* Note: this overwrites f->age */
408 	flow_queue_add(f, skb);
409 
410 	if (unlikely(f == &q->internal)) {
411 		q->stat_internal_packets++;
412 	}
413 	sch->q.qlen++;
414 
415 	return NET_XMIT_SUCCESS;
416 }
417 
418 static void fq_check_throttled(struct fq_sched_data *q, u64 now)
419 {
420 	unsigned long sample;
421 	struct rb_node *p;
422 
423 	if (q->time_next_delayed_flow > now)
424 		return;
425 
426 	/* Update unthrottle latency EWMA.
427 	 * This is cheap and can help diagnosing timer/latency problems.
428 	 */
429 	sample = (unsigned long)(now - q->time_next_delayed_flow);
430 	q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
431 	q->unthrottle_latency_ns += sample >> 3;
432 
433 	q->time_next_delayed_flow = ~0ULL;
434 	while ((p = rb_first(&q->delayed)) != NULL) {
435 		struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
436 
437 		if (f->time_next_packet > now) {
438 			q->time_next_delayed_flow = f->time_next_packet;
439 			break;
440 		}
441 		rb_erase(p, &q->delayed);
442 		q->throttled_flows--;
443 		fq_flow_add_tail(&q->old_flows, f);
444 	}
445 }
446 
447 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
448 {
449 	struct fq_sched_data *q = qdisc_priv(sch);
450 	u64 now = ktime_get_ns();
451 	struct fq_flow_head *head;
452 	struct sk_buff *skb;
453 	struct fq_flow *f;
454 	u32 rate, plen;
455 
456 	skb = fq_dequeue_head(sch, &q->internal);
457 	if (skb)
458 		goto out;
459 	fq_check_throttled(q, now);
460 begin:
461 	head = &q->new_flows;
462 	if (!head->first) {
463 		head = &q->old_flows;
464 		if (!head->first) {
465 			if (q->time_next_delayed_flow != ~0ULL)
466 				qdisc_watchdog_schedule_ns(&q->watchdog,
467 							   q->time_next_delayed_flow);
468 			return NULL;
469 		}
470 	}
471 	f = head->first;
472 
473 	if (f->credit <= 0) {
474 		f->credit += q->quantum;
475 		head->first = f->next;
476 		fq_flow_add_tail(&q->old_flows, f);
477 		goto begin;
478 	}
479 
480 	skb = f->head;
481 	if (unlikely(skb && now < f->time_next_packet &&
482 		     !skb_is_tcp_pure_ack(skb))) {
483 		head->first = f->next;
484 		fq_flow_set_throttled(q, f);
485 		goto begin;
486 	}
487 
488 	skb = fq_dequeue_head(sch, f);
489 	if (!skb) {
490 		head->first = f->next;
491 		/* force a pass through old_flows to prevent starvation */
492 		if ((head == &q->new_flows) && q->old_flows.first) {
493 			fq_flow_add_tail(&q->old_flows, f);
494 		} else {
495 			fq_flow_set_detached(f);
496 			q->inactive_flows++;
497 		}
498 		goto begin;
499 	}
500 	prefetch(&skb->end);
501 	f->credit -= qdisc_pkt_len(skb);
502 
503 	if (!q->rate_enable)
504 		goto out;
505 
506 	/* Do not pace locally generated ack packets */
507 	if (skb_is_tcp_pure_ack(skb))
508 		goto out;
509 
510 	rate = q->flow_max_rate;
511 	if (skb->sk)
512 		rate = min(skb->sk->sk_pacing_rate, rate);
513 
514 	if (rate <= q->low_rate_threshold) {
515 		f->credit = 0;
516 		plen = qdisc_pkt_len(skb);
517 	} else {
518 		plen = max(qdisc_pkt_len(skb), q->quantum);
519 		if (f->credit > 0)
520 			goto out;
521 	}
522 	if (rate != ~0U) {
523 		u64 len = (u64)plen * NSEC_PER_SEC;
524 
525 		if (likely(rate))
526 			do_div(len, rate);
527 		/* Since socket rate can change later,
528 		 * clamp the delay to 1 second.
529 		 * Really, providers of too big packets should be fixed !
530 		 */
531 		if (unlikely(len > NSEC_PER_SEC)) {
532 			len = NSEC_PER_SEC;
533 			q->stat_pkts_too_long++;
534 		}
535 		/* Account for schedule/timers drifts.
536 		 * f->time_next_packet was set when prior packet was sent,
537 		 * and current time (@now) can be too late by tens of us.
538 		 */
539 		if (f->time_next_packet)
540 			len -= min(len/2, now - f->time_next_packet);
541 		f->time_next_packet = now + len;
542 	}
543 out:
544 	qdisc_bstats_update(sch, skb);
545 	return skb;
546 }
547 
548 static void fq_flow_purge(struct fq_flow *flow)
549 {
550 	rtnl_kfree_skbs(flow->head, flow->tail);
551 	flow->head = NULL;
552 	flow->qlen = 0;
553 }
554 
555 static void fq_reset(struct Qdisc *sch)
556 {
557 	struct fq_sched_data *q = qdisc_priv(sch);
558 	struct rb_root *root;
559 	struct rb_node *p;
560 	struct fq_flow *f;
561 	unsigned int idx;
562 
563 	sch->q.qlen = 0;
564 	sch->qstats.backlog = 0;
565 
566 	fq_flow_purge(&q->internal);
567 
568 	if (!q->fq_root)
569 		return;
570 
571 	for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
572 		root = &q->fq_root[idx];
573 		while ((p = rb_first(root)) != NULL) {
574 			f = rb_entry(p, struct fq_flow, fq_node);
575 			rb_erase(p, root);
576 
577 			fq_flow_purge(f);
578 
579 			kmem_cache_free(fq_flow_cachep, f);
580 		}
581 	}
582 	q->new_flows.first	= NULL;
583 	q->old_flows.first	= NULL;
584 	q->delayed		= RB_ROOT;
585 	q->flows		= 0;
586 	q->inactive_flows	= 0;
587 	q->throttled_flows	= 0;
588 }
589 
590 static void fq_rehash(struct fq_sched_data *q,
591 		      struct rb_root *old_array, u32 old_log,
592 		      struct rb_root *new_array, u32 new_log)
593 {
594 	struct rb_node *op, **np, *parent;
595 	struct rb_root *oroot, *nroot;
596 	struct fq_flow *of, *nf;
597 	int fcnt = 0;
598 	u32 idx;
599 
600 	for (idx = 0; idx < (1U << old_log); idx++) {
601 		oroot = &old_array[idx];
602 		while ((op = rb_first(oroot)) != NULL) {
603 			rb_erase(op, oroot);
604 			of = rb_entry(op, struct fq_flow, fq_node);
605 			if (fq_gc_candidate(of)) {
606 				fcnt++;
607 				kmem_cache_free(fq_flow_cachep, of);
608 				continue;
609 			}
610 			nroot = &new_array[hash_ptr(of->sk, new_log)];
611 
612 			np = &nroot->rb_node;
613 			parent = NULL;
614 			while (*np) {
615 				parent = *np;
616 
617 				nf = rb_entry(parent, struct fq_flow, fq_node);
618 				BUG_ON(nf->sk == of->sk);
619 
620 				if (nf->sk > of->sk)
621 					np = &parent->rb_right;
622 				else
623 					np = &parent->rb_left;
624 			}
625 
626 			rb_link_node(&of->fq_node, parent, np);
627 			rb_insert_color(&of->fq_node, nroot);
628 		}
629 	}
630 	q->flows -= fcnt;
631 	q->inactive_flows -= fcnt;
632 	q->stat_gc_flows += fcnt;
633 }
634 
635 static void fq_free(void *addr)
636 {
637 	kvfree(addr);
638 }
639 
640 static int fq_resize(struct Qdisc *sch, u32 log)
641 {
642 	struct fq_sched_data *q = qdisc_priv(sch);
643 	struct rb_root *array;
644 	void *old_fq_root;
645 	u32 idx;
646 
647 	if (q->fq_root && log == q->fq_trees_log)
648 		return 0;
649 
650 	/* If XPS was setup, we can allocate memory on right NUMA node */
651 	array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
652 			      netdev_queue_numa_node_read(sch->dev_queue));
653 	if (!array)
654 		return -ENOMEM;
655 
656 	for (idx = 0; idx < (1U << log); idx++)
657 		array[idx] = RB_ROOT;
658 
659 	sch_tree_lock(sch);
660 
661 	old_fq_root = q->fq_root;
662 	if (old_fq_root)
663 		fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
664 
665 	q->fq_root = array;
666 	q->fq_trees_log = log;
667 
668 	sch_tree_unlock(sch);
669 
670 	fq_free(old_fq_root);
671 
672 	return 0;
673 }
674 
675 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
676 	[TCA_FQ_PLIMIT]			= { .type = NLA_U32 },
677 	[TCA_FQ_FLOW_PLIMIT]		= { .type = NLA_U32 },
678 	[TCA_FQ_QUANTUM]		= { .type = NLA_U32 },
679 	[TCA_FQ_INITIAL_QUANTUM]	= { .type = NLA_U32 },
680 	[TCA_FQ_RATE_ENABLE]		= { .type = NLA_U32 },
681 	[TCA_FQ_FLOW_DEFAULT_RATE]	= { .type = NLA_U32 },
682 	[TCA_FQ_FLOW_MAX_RATE]		= { .type = NLA_U32 },
683 	[TCA_FQ_BUCKETS_LOG]		= { .type = NLA_U32 },
684 	[TCA_FQ_FLOW_REFILL_DELAY]	= { .type = NLA_U32 },
685 	[TCA_FQ_LOW_RATE_THRESHOLD]	= { .type = NLA_U32 },
686 };
687 
688 static int fq_change(struct Qdisc *sch, struct nlattr *opt)
689 {
690 	struct fq_sched_data *q = qdisc_priv(sch);
691 	struct nlattr *tb[TCA_FQ_MAX + 1];
692 	int err, drop_count = 0;
693 	unsigned drop_len = 0;
694 	u32 fq_log;
695 
696 	if (!opt)
697 		return -EINVAL;
698 
699 	err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy, NULL);
700 	if (err < 0)
701 		return err;
702 
703 	sch_tree_lock(sch);
704 
705 	fq_log = q->fq_trees_log;
706 
707 	if (tb[TCA_FQ_BUCKETS_LOG]) {
708 		u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
709 
710 		if (nval >= 1 && nval <= ilog2(256*1024))
711 			fq_log = nval;
712 		else
713 			err = -EINVAL;
714 	}
715 	if (tb[TCA_FQ_PLIMIT])
716 		sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
717 
718 	if (tb[TCA_FQ_FLOW_PLIMIT])
719 		q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
720 
721 	if (tb[TCA_FQ_QUANTUM]) {
722 		u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
723 
724 		if (quantum > 0)
725 			q->quantum = quantum;
726 		else
727 			err = -EINVAL;
728 	}
729 
730 	if (tb[TCA_FQ_INITIAL_QUANTUM])
731 		q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
732 
733 	if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
734 		pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
735 				    nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
736 
737 	if (tb[TCA_FQ_FLOW_MAX_RATE])
738 		q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
739 
740 	if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
741 		q->low_rate_threshold =
742 			nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
743 
744 	if (tb[TCA_FQ_RATE_ENABLE]) {
745 		u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
746 
747 		if (enable <= 1)
748 			q->rate_enable = enable;
749 		else
750 			err = -EINVAL;
751 	}
752 
753 	if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
754 		u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
755 
756 		q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
757 	}
758 
759 	if (tb[TCA_FQ_ORPHAN_MASK])
760 		q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
761 
762 	if (!err) {
763 		sch_tree_unlock(sch);
764 		err = fq_resize(sch, fq_log);
765 		sch_tree_lock(sch);
766 	}
767 	while (sch->q.qlen > sch->limit) {
768 		struct sk_buff *skb = fq_dequeue(sch);
769 
770 		if (!skb)
771 			break;
772 		drop_len += qdisc_pkt_len(skb);
773 		rtnl_kfree_skbs(skb, skb);
774 		drop_count++;
775 	}
776 	qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
777 
778 	sch_tree_unlock(sch);
779 	return err;
780 }
781 
782 static void fq_destroy(struct Qdisc *sch)
783 {
784 	struct fq_sched_data *q = qdisc_priv(sch);
785 
786 	fq_reset(sch);
787 	fq_free(q->fq_root);
788 	qdisc_watchdog_cancel(&q->watchdog);
789 }
790 
791 static int fq_init(struct Qdisc *sch, struct nlattr *opt)
792 {
793 	struct fq_sched_data *q = qdisc_priv(sch);
794 	int err;
795 
796 	sch->limit		= 10000;
797 	q->flow_plimit		= 100;
798 	q->quantum		= 2 * psched_mtu(qdisc_dev(sch));
799 	q->initial_quantum	= 10 * psched_mtu(qdisc_dev(sch));
800 	q->flow_refill_delay	= msecs_to_jiffies(40);
801 	q->flow_max_rate	= ~0U;
802 	q->time_next_delayed_flow = ~0ULL;
803 	q->rate_enable		= 1;
804 	q->new_flows.first	= NULL;
805 	q->old_flows.first	= NULL;
806 	q->delayed		= RB_ROOT;
807 	q->fq_root		= NULL;
808 	q->fq_trees_log		= ilog2(1024);
809 	q->orphan_mask		= 1024 - 1;
810 	q->low_rate_threshold	= 550000 / 8;
811 	qdisc_watchdog_init(&q->watchdog, sch);
812 
813 	if (opt)
814 		err = fq_change(sch, opt);
815 	else
816 		err = fq_resize(sch, q->fq_trees_log);
817 
818 	return err;
819 }
820 
821 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
822 {
823 	struct fq_sched_data *q = qdisc_priv(sch);
824 	struct nlattr *opts;
825 
826 	opts = nla_nest_start(skb, TCA_OPTIONS);
827 	if (opts == NULL)
828 		goto nla_put_failure;
829 
830 	/* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
831 
832 	if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
833 	    nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
834 	    nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
835 	    nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
836 	    nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
837 	    nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
838 	    nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
839 			jiffies_to_usecs(q->flow_refill_delay)) ||
840 	    nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
841 	    nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
842 			q->low_rate_threshold) ||
843 	    nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
844 		goto nla_put_failure;
845 
846 	return nla_nest_end(skb, opts);
847 
848 nla_put_failure:
849 	return -1;
850 }
851 
852 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
853 {
854 	struct fq_sched_data *q = qdisc_priv(sch);
855 	struct tc_fq_qd_stats st;
856 
857 	sch_tree_lock(sch);
858 
859 	st.gc_flows		  = q->stat_gc_flows;
860 	st.highprio_packets	  = q->stat_internal_packets;
861 	st.tcp_retrans		  = q->stat_tcp_retrans;
862 	st.throttled		  = q->stat_throttled;
863 	st.flows_plimit		  = q->stat_flows_plimit;
864 	st.pkts_too_long	  = q->stat_pkts_too_long;
865 	st.allocation_errors	  = q->stat_allocation_errors;
866 	st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
867 	st.flows		  = q->flows;
868 	st.inactive_flows	  = q->inactive_flows;
869 	st.throttled_flows	  = q->throttled_flows;
870 	st.unthrottle_latency_ns  = min_t(unsigned long,
871 					  q->unthrottle_latency_ns, ~0U);
872 	sch_tree_unlock(sch);
873 
874 	return gnet_stats_copy_app(d, &st, sizeof(st));
875 }
876 
877 static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
878 	.id		=	"fq",
879 	.priv_size	=	sizeof(struct fq_sched_data),
880 
881 	.enqueue	=	fq_enqueue,
882 	.dequeue	=	fq_dequeue,
883 	.peek		=	qdisc_peek_dequeued,
884 	.init		=	fq_init,
885 	.reset		=	fq_reset,
886 	.destroy	=	fq_destroy,
887 	.change		=	fq_change,
888 	.dump		=	fq_dump,
889 	.dump_stats	=	fq_dump_stats,
890 	.owner		=	THIS_MODULE,
891 };
892 
893 static int __init fq_module_init(void)
894 {
895 	int ret;
896 
897 	fq_flow_cachep = kmem_cache_create("fq_flow_cache",
898 					   sizeof(struct fq_flow),
899 					   0, 0, NULL);
900 	if (!fq_flow_cachep)
901 		return -ENOMEM;
902 
903 	ret = register_qdisc(&fq_qdisc_ops);
904 	if (ret)
905 		kmem_cache_destroy(fq_flow_cachep);
906 	return ret;
907 }
908 
909 static void __exit fq_module_exit(void)
910 {
911 	unregister_qdisc(&fq_qdisc_ops);
912 	kmem_cache_destroy(fq_flow_cachep);
913 }
914 
915 module_init(fq_module_init)
916 module_exit(fq_module_exit)
917 MODULE_AUTHOR("Eric Dumazet");
918 MODULE_LICENSE("GPL");
919