xref: /linux/net/sched/sch_fq.c (revision 8341c989ac77d712c7d6e2bce29e8a4bcb2eeae4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
4  *
5  *  Copyright (C) 2013-2023 Eric Dumazet <edumazet@google.com>
6  *
7  *  Meant to be mostly used for locally generated traffic :
8  *  Fast classification depends on skb->sk being set before reaching us.
9  *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
10  *  All packets belonging to a socket are considered as a 'flow'.
11  *
12  *  Flows are dynamically allocated and stored in a hash table of RB trees
13  *  They are also part of one Round Robin 'queues' (new or old flows)
14  *
15  *  Burst avoidance (aka pacing) capability :
16  *
17  *  Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
18  *  bunch of packets, and this packet scheduler adds delay between
19  *  packets to respect rate limitation.
20  *
21  *  enqueue() :
22  *   - lookup one RB tree (out of 1024 or more) to find the flow.
23  *     If non existent flow, create it, add it to the tree.
24  *     Add skb to the per flow list of skb (fifo).
25  *   - Use a special fifo for high prio packets
26  *
27  *  dequeue() : serves flows in Round Robin
28  *  Note : When a flow becomes empty, we do not immediately remove it from
29  *  rb trees, for performance reasons (its expected to send additional packets,
30  *  or SLAB cache will reuse socket for another flow)
31  */
32 
33 #include <linux/module.h>
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/jiffies.h>
37 #include <linux/string.h>
38 #include <linux/in.h>
39 #include <linux/errno.h>
40 #include <linux/init.h>
41 #include <linux/skbuff.h>
42 #include <linux/slab.h>
43 #include <linux/rbtree.h>
44 #include <linux/hash.h>
45 #include <linux/prefetch.h>
46 #include <linux/vmalloc.h>
47 #include <net/netlink.h>
48 #include <net/pkt_sched.h>
49 #include <net/sock.h>
50 #include <net/tcp_states.h>
51 #include <net/tcp.h>
52 
53 struct fq_skb_cb {
54 	u64	time_to_send;
55 	u8	band;
56 };
57 
58 static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb)
59 {
60 	qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb));
61 	return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data;
62 }
63 
64 /*
65  * Per flow structure, dynamically allocated.
66  * If packets have monotically increasing time_to_send, they are placed in O(1)
67  * in linear list (head,tail), otherwise are placed in a rbtree (t_root).
68  */
69 struct fq_flow {
70 /* First cache line : used in fq_gc(), fq_enqueue(), fq_dequeue() */
71 	struct rb_root	t_root;
72 	struct sk_buff	*head;		/* list of skbs for this flow : first skb */
73 	union {
74 		struct sk_buff *tail;	/* last skb in the list */
75 		unsigned long  age;	/* (jiffies | 1UL) when flow was emptied, for gc */
76 	};
77 	union {
78 		struct rb_node	fq_node;	/* anchor in fq_root[] trees */
79 		/* Following field is only used for q->internal,
80 		 * because q->internal is not hashed in fq_root[]
81 		 */
82 		u64		stat_fastpath_packets;
83 	};
84 	struct sock	*sk;
85 	u32		socket_hash;	/* sk_hash */
86 	int		qlen;		/* number of packets in flow queue */
87 
88 /* Second cache line */
89 	int		credit;
90 	int		band;
91 	struct fq_flow *next;		/* next pointer in RR lists */
92 
93 	struct rb_node  rate_node;	/* anchor in q->delayed tree */
94 	u64		time_next_packet;
95 };
96 
97 struct fq_flow_head {
98 	struct fq_flow *first;
99 	struct fq_flow *last;
100 };
101 
102 struct fq_perband_flows {
103 	struct fq_flow_head new_flows;
104 	struct fq_flow_head old_flows;
105 	int		    credit;
106 	int		    quantum; /* based on band nr : 576KB, 192KB, 64KB */
107 };
108 
109 #define FQ_PRIO2BAND_CRUMB_SIZE ((TC_PRIO_MAX + 1) >> 2)
110 
111 struct fq_sched_data {
112 /* Read mostly cache line */
113 
114 	u64		offload_horizon;
115 	u32		quantum;
116 	u32		initial_quantum;
117 	u32		flow_refill_delay;
118 	u32		flow_plimit;	/* max packets per flow */
119 	unsigned long	flow_max_rate;	/* optional max rate per flow */
120 	u64		ce_threshold;
121 	u64		horizon;	/* horizon in ns */
122 	u32		orphan_mask;	/* mask for orphaned skb */
123 	u32		low_rate_threshold;
124 	struct rb_root	*fq_root;
125 	u8		rate_enable;
126 	u8		fq_trees_log;
127 	u8		horizon_drop;
128 	u8		prio2band[FQ_PRIO2BAND_CRUMB_SIZE];
129 	u32		timer_slack; /* hrtimer slack in ns */
130 
131 /* Read/Write fields. */
132 
133 	unsigned int band_nr; /* band being serviced in fq_dequeue() */
134 
135 	struct fq_perband_flows band_flows[FQ_BANDS];
136 
137 	struct fq_flow	internal;	/* fastpath queue. */
138 	struct rb_root	delayed;	/* for rate limited flows */
139 	u64		time_next_delayed_flow;
140 	unsigned long	unthrottle_latency_ns;
141 
142 	u32		band_pkt_count[FQ_BANDS];
143 	u32		flows;
144 	u32		inactive_flows; /* Flows with no packet to send. */
145 	u32		throttled_flows;
146 
147 	u64		stat_throttled;
148 	struct qdisc_watchdog watchdog;
149 	u64		stat_gc_flows;
150 
151 /* Seldom used fields. */
152 
153 	u64		stat_band_drops[FQ_BANDS];
154 	u64		stat_ce_mark;
155 	u64		stat_horizon_drops;
156 	u64		stat_horizon_caps;
157 	u64		stat_flows_plimit;
158 	u64		stat_pkts_too_long;
159 	u64		stat_allocation_errors;
160 };
161 
162 /* return the i-th 2-bit value ("crumb") */
163 static u8 fq_prio2band(const u8 *prio2band, unsigned int prio)
164 {
165 	return (READ_ONCE(prio2band[prio / 4]) >> (2 * (prio & 0x3))) & 0x3;
166 }
167 
168 /*
169  * f->tail and f->age share the same location.
170  * We can use the low order bit to differentiate if this location points
171  * to a sk_buff or contains a jiffies value, if we force this value to be odd.
172  * This assumes f->tail low order bit must be 0 since alignof(struct sk_buff) >= 2
173  */
174 static void fq_flow_set_detached(struct fq_flow *f)
175 {
176 	f->age = jiffies | 1UL;
177 }
178 
179 static bool fq_flow_is_detached(const struct fq_flow *f)
180 {
181 	return !!(f->age & 1UL);
182 }
183 
184 /* special value to mark a throttled flow (not on old/new list) */
185 static struct fq_flow throttled;
186 
187 static bool fq_flow_is_throttled(const struct fq_flow *f)
188 {
189 	return f->next == &throttled;
190 }
191 
192 enum new_flow {
193 	NEW_FLOW,
194 	OLD_FLOW
195 };
196 
197 static void fq_flow_add_tail(struct fq_sched_data *q, struct fq_flow *flow,
198 			     enum new_flow list_sel)
199 {
200 	struct fq_perband_flows *pband = &q->band_flows[flow->band];
201 	struct fq_flow_head *head = (list_sel == NEW_FLOW) ?
202 					&pband->new_flows :
203 					&pband->old_flows;
204 
205 	if (head->first)
206 		head->last->next = flow;
207 	else
208 		head->first = flow;
209 	head->last = flow;
210 	flow->next = NULL;
211 }
212 
213 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
214 {
215 	rb_erase(&f->rate_node, &q->delayed);
216 	q->throttled_flows--;
217 	fq_flow_add_tail(q, f, OLD_FLOW);
218 }
219 
220 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
221 {
222 	struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
223 
224 	while (*p) {
225 		struct fq_flow *aux;
226 
227 		parent = *p;
228 		aux = rb_entry(parent, struct fq_flow, rate_node);
229 		if (f->time_next_packet >= aux->time_next_packet)
230 			p = &parent->rb_right;
231 		else
232 			p = &parent->rb_left;
233 	}
234 	rb_link_node(&f->rate_node, parent, p);
235 	rb_insert_color(&f->rate_node, &q->delayed);
236 	q->throttled_flows++;
237 	q->stat_throttled++;
238 
239 	f->next = &throttled;
240 	if (q->time_next_delayed_flow > f->time_next_packet)
241 		q->time_next_delayed_flow = f->time_next_packet;
242 }
243 
244 
245 static struct kmem_cache *fq_flow_cachep __read_mostly;
246 
247 
248 #define FQ_GC_AGE (3*HZ)
249 
250 static bool fq_gc_candidate(const struct fq_flow *f)
251 {
252 	return fq_flow_is_detached(f) &&
253 	       time_after(jiffies, f->age + FQ_GC_AGE);
254 }
255 
256 static void fq_gc(struct fq_sched_data *q,
257 		  struct rb_root *root,
258 		  struct sock *sk)
259 {
260 	struct fq_flow *f, *tofree = NULL;
261 	struct rb_node **p, *parent;
262 	int fcnt;
263 
264 	p = &root->rb_node;
265 	parent = NULL;
266 	while (*p) {
267 		parent = *p;
268 
269 		f = rb_entry(parent, struct fq_flow, fq_node);
270 		if (f->sk == sk)
271 			break;
272 
273 		if (fq_gc_candidate(f)) {
274 			f->next = tofree;
275 			tofree = f;
276 		}
277 
278 		if (f->sk > sk)
279 			p = &parent->rb_right;
280 		else
281 			p = &parent->rb_left;
282 	}
283 
284 	if (!tofree)
285 		return;
286 
287 	fcnt = 0;
288 	while (tofree) {
289 		f = tofree;
290 		tofree = f->next;
291 		rb_erase(&f->fq_node, root);
292 		kmem_cache_free(fq_flow_cachep, f);
293 		fcnt++;
294 	}
295 	q->flows -= fcnt;
296 	q->inactive_flows -= fcnt;
297 	q->stat_gc_flows += fcnt;
298 }
299 
300 /* Fast path can be used if :
301  * 1) Packet tstamp is in the past, or within the pacing offload horizon.
302  * 2) FQ qlen == 0   OR
303  *   (no flow is currently eligible for transmit,
304  *    AND fast path queue has less than 8 packets)
305  * 3) No SO_MAX_PACING_RATE on the socket (if any).
306  * 4) No @maxrate attribute on this qdisc,
307  *
308  * FQ can not use generic TCQ_F_CAN_BYPASS infrastructure.
309  */
310 static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb,
311 			      u64 now)
312 {
313 	const struct fq_sched_data *q = qdisc_priv(sch);
314 	const struct sock *sk;
315 
316 	if (fq_skb_cb(skb)->time_to_send > now + q->offload_horizon)
317 		return false;
318 
319 	if (sch->q.qlen != 0) {
320 		/* Even if some packets are stored in this qdisc,
321 		 * we can still enable fast path if all of them are
322 		 * scheduled in the future (ie no flows are eligible)
323 		 * or in the fast path queue.
324 		 */
325 		if (q->flows != q->inactive_flows + q->throttled_flows)
326 			return false;
327 
328 		/* Do not allow fast path queue to explode, we want Fair Queue mode
329 		 * under pressure.
330 		 */
331 		if (q->internal.qlen >= 8)
332 			return false;
333 
334 		/* Ordering invariants fall apart if some delayed flows
335 		 * are ready but we haven't serviced them, yet.
336 		 */
337 		if (q->time_next_delayed_flow <= now + q->offload_horizon)
338 			return false;
339 	}
340 
341 	sk = skb->sk;
342 	if (sk && sk_fullsock(sk) && !sk_is_tcp(sk) &&
343 	    sk->sk_max_pacing_rate != ~0UL)
344 		return false;
345 
346 	if (q->flow_max_rate != ~0UL)
347 		return false;
348 
349 	return true;
350 }
351 
352 static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb,
353 				   u64 now)
354 {
355 	struct fq_sched_data *q = qdisc_priv(sch);
356 	struct rb_node **p, *parent;
357 	struct sock *sk = skb->sk;
358 	struct rb_root *root;
359 	struct fq_flow *f;
360 
361 	/* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
362 	 * or a listener (SYNCOOKIE mode)
363 	 * 1) request sockets are not full blown,
364 	 *    they do not contain sk_pacing_rate
365 	 * 2) They are not part of a 'flow' yet
366 	 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
367 	 *    especially if the listener set SO_MAX_PACING_RATE
368 	 * 4) We pretend they are orphaned
369 	 * TCP can also associate TIME_WAIT sockets with RST or ACK packets.
370 	 */
371 	if (!sk || sk_listener_or_tw(sk)) {
372 		unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
373 
374 		/* By forcing low order bit to 1, we make sure to not
375 		 * collide with a local flow (socket pointers are word aligned)
376 		 */
377 		sk = (struct sock *)((hash << 1) | 1UL);
378 		skb_orphan(skb);
379 	} else if (sk->sk_state == TCP_CLOSE) {
380 		unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
381 		/*
382 		 * Sockets in TCP_CLOSE are non connected.
383 		 * Typical use case is UDP sockets, they can send packets
384 		 * with sendto() to many different destinations.
385 		 * We probably could use a generic bit advertising
386 		 * non connected sockets, instead of sk_state == TCP_CLOSE,
387 		 * if we care enough.
388 		 */
389 		sk = (struct sock *)((hash << 1) | 1UL);
390 	}
391 
392 	if (fq_fastpath_check(sch, skb, now)) {
393 		q->internal.stat_fastpath_packets++;
394 		if (skb->sk == sk && q->rate_enable &&
395 		    READ_ONCE(sk->sk_pacing_status) != SK_PACING_FQ)
396 			smp_store_release(&sk->sk_pacing_status,
397 					  SK_PACING_FQ);
398 		return &q->internal;
399 	}
400 
401 	root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
402 
403 	fq_gc(q, root, sk);
404 
405 	p = &root->rb_node;
406 	parent = NULL;
407 	while (*p) {
408 		parent = *p;
409 
410 		f = rb_entry(parent, struct fq_flow, fq_node);
411 		if (f->sk == sk) {
412 			/* socket might have been reallocated, so check
413 			 * if its sk_hash is the same.
414 			 * It not, we need to refill credit with
415 			 * initial quantum
416 			 */
417 			if (unlikely(skb->sk == sk &&
418 				     f->socket_hash != sk->sk_hash)) {
419 				f->credit = q->initial_quantum;
420 				f->socket_hash = sk->sk_hash;
421 				if (q->rate_enable)
422 					smp_store_release(&sk->sk_pacing_status,
423 							  SK_PACING_FQ);
424 				if (fq_flow_is_throttled(f))
425 					fq_flow_unset_throttled(q, f);
426 				f->time_next_packet = 0ULL;
427 			}
428 			return f;
429 		}
430 		if (f->sk > sk)
431 			p = &parent->rb_right;
432 		else
433 			p = &parent->rb_left;
434 	}
435 
436 	f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
437 	if (unlikely(!f)) {
438 		q->stat_allocation_errors++;
439 		return &q->internal;
440 	}
441 	/* f->t_root is already zeroed after kmem_cache_zalloc() */
442 
443 	fq_flow_set_detached(f);
444 	f->sk = sk;
445 	if (skb->sk == sk) {
446 		f->socket_hash = sk->sk_hash;
447 		if (q->rate_enable)
448 			smp_store_release(&sk->sk_pacing_status,
449 					  SK_PACING_FQ);
450 	}
451 	f->credit = q->initial_quantum;
452 
453 	rb_link_node(&f->fq_node, parent, p);
454 	rb_insert_color(&f->fq_node, root);
455 
456 	q->flows++;
457 	q->inactive_flows++;
458 	return f;
459 }
460 
461 static struct sk_buff *fq_peek(struct fq_flow *flow)
462 {
463 	struct sk_buff *skb = skb_rb_first(&flow->t_root);
464 	struct sk_buff *head = flow->head;
465 
466 	if (!skb)
467 		return head;
468 
469 	if (!head)
470 		return skb;
471 
472 	if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send)
473 		return skb;
474 	return head;
475 }
476 
477 static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
478 			  struct sk_buff *skb)
479 {
480 	if (skb == flow->head) {
481 		struct sk_buff *next = skb->next;
482 
483 		prefetch(next);
484 		flow->head = next;
485 	} else {
486 		rb_erase(&skb->rbnode, &flow->t_root);
487 		skb->dev = qdisc_dev(sch);
488 	}
489 }
490 
491 /* Remove one skb from flow queue.
492  * This skb must be the return value of prior fq_peek().
493  */
494 static void fq_dequeue_skb(struct Qdisc *sch, struct fq_flow *flow,
495 			   struct sk_buff *skb)
496 {
497 	fq_erase_head(sch, flow, skb);
498 	skb_mark_not_on_list(skb);
499 	qdisc_qstats_backlog_dec(sch, skb);
500 	sch->q.qlen--;
501 	qdisc_bstats_update(sch, skb);
502 }
503 
504 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
505 {
506 	struct rb_node **p, *parent;
507 	struct sk_buff *head, *aux;
508 
509 	head = flow->head;
510 	if (!head ||
511 	    fq_skb_cb(skb)->time_to_send >= fq_skb_cb(flow->tail)->time_to_send) {
512 		if (!head)
513 			flow->head = skb;
514 		else
515 			flow->tail->next = skb;
516 		flow->tail = skb;
517 		skb->next = NULL;
518 		return;
519 	}
520 
521 	p = &flow->t_root.rb_node;
522 	parent = NULL;
523 
524 	while (*p) {
525 		parent = *p;
526 		aux = rb_to_skb(parent);
527 		if (fq_skb_cb(skb)->time_to_send >= fq_skb_cb(aux)->time_to_send)
528 			p = &parent->rb_right;
529 		else
530 			p = &parent->rb_left;
531 	}
532 	rb_link_node(&skb->rbnode, parent, p);
533 	rb_insert_color(&skb->rbnode, &flow->t_root);
534 }
535 
536 static bool fq_packet_beyond_horizon(const struct sk_buff *skb,
537 				     const struct fq_sched_data *q, u64 now)
538 {
539 	return unlikely((s64)skb->tstamp > (s64)(now + q->horizon));
540 }
541 
542 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
543 		      struct sk_buff **to_free)
544 {
545 	struct fq_sched_data *q = qdisc_priv(sch);
546 	struct fq_flow *f;
547 	u64 now;
548 	u8 band;
549 
550 	band = fq_prio2band(q->prio2band, skb->priority & TC_PRIO_MAX);
551 	if (unlikely(q->band_pkt_count[band] >= sch->limit)) {
552 		q->stat_band_drops[band]++;
553 		return qdisc_drop_reason(skb, sch, to_free, QDISC_DROP_BAND_LIMIT);
554 	}
555 
556 	now = ktime_get_ns();
557 	if (!skb->tstamp) {
558 		fq_skb_cb(skb)->time_to_send = now;
559 	} else {
560 		/* Check if packet timestamp is too far in the future. */
561 		if (fq_packet_beyond_horizon(skb, q, now)) {
562 			if (q->horizon_drop) {
563 				q->stat_horizon_drops++;
564 				return qdisc_drop_reason(skb, sch, to_free,
565 							 QDISC_DROP_HORIZON_LIMIT);
566 			}
567 			q->stat_horizon_caps++;
568 			skb->tstamp = now + q->horizon;
569 		}
570 		fq_skb_cb(skb)->time_to_send = skb->tstamp;
571 	}
572 
573 	f = fq_classify(sch, skb, now);
574 
575 	if (f != &q->internal) {
576 		if (unlikely(f->qlen >= q->flow_plimit)) {
577 			q->stat_flows_plimit++;
578 			return qdisc_drop_reason(skb, sch, to_free,
579 						 QDISC_DROP_FLOW_LIMIT);
580 		}
581 
582 		if (fq_flow_is_detached(f)) {
583 			fq_flow_add_tail(q, f, NEW_FLOW);
584 			if (time_after(jiffies, f->age + q->flow_refill_delay))
585 				f->credit = max_t(u32, f->credit, q->quantum);
586 		}
587 
588 		f->band = band;
589 		q->band_pkt_count[band]++;
590 		fq_skb_cb(skb)->band = band;
591 		if (f->qlen == 0)
592 			q->inactive_flows--;
593 	}
594 
595 	f->qlen++;
596 	/* Note: this overwrites f->age */
597 	flow_queue_add(f, skb);
598 
599 	qdisc_qstats_backlog_inc(sch, skb);
600 	sch->q.qlen++;
601 
602 	return NET_XMIT_SUCCESS;
603 }
604 
605 static void fq_check_throttled(struct fq_sched_data *q, u64 now)
606 {
607 	unsigned long sample;
608 	struct rb_node *p;
609 
610 	if (q->time_next_delayed_flow > now + q->offload_horizon)
611 		return;
612 
613 	/* Update unthrottle latency EWMA.
614 	 * This is cheap and can help diagnosing timer/latency problems.
615 	 */
616 	sample = (unsigned long)(now - q->time_next_delayed_flow);
617 	if ((long)sample > 0) {
618 		q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
619 		q->unthrottle_latency_ns += sample >> 3;
620 	}
621 	now += q->offload_horizon;
622 
623 	q->time_next_delayed_flow = ~0ULL;
624 	while ((p = rb_first(&q->delayed)) != NULL) {
625 		struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
626 
627 		if (f->time_next_packet > now) {
628 			q->time_next_delayed_flow = f->time_next_packet;
629 			break;
630 		}
631 		fq_flow_unset_throttled(q, f);
632 	}
633 }
634 
635 static struct fq_flow_head *fq_pband_head_select(struct fq_perband_flows *pband)
636 {
637 	if (pband->credit <= 0)
638 		return NULL;
639 
640 	if (pband->new_flows.first)
641 		return &pband->new_flows;
642 
643 	return pband->old_flows.first ? &pband->old_flows : NULL;
644 }
645 
646 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
647 {
648 	struct fq_sched_data *q = qdisc_priv(sch);
649 	struct fq_perband_flows *pband;
650 	struct fq_flow_head *head;
651 	struct sk_buff *skb;
652 	struct fq_flow *f;
653 	unsigned long rate;
654 	int retry;
655 	u32 plen;
656 	u64 now;
657 
658 	if (!sch->q.qlen)
659 		return NULL;
660 
661 	skb = fq_peek(&q->internal);
662 	if (skb) {
663 		q->internal.qlen--;
664 		fq_dequeue_skb(sch, &q->internal, skb);
665 		goto out;
666 	}
667 
668 	now = ktime_get_ns();
669 	fq_check_throttled(q, now);
670 	retry = 0;
671 	pband = &q->band_flows[q->band_nr];
672 begin:
673 	head = fq_pband_head_select(pband);
674 	if (!head) {
675 		while (++retry <= FQ_BANDS) {
676 			if (++q->band_nr == FQ_BANDS)
677 				q->band_nr = 0;
678 			pband = &q->band_flows[q->band_nr];
679 			pband->credit = min(pband->credit + pband->quantum,
680 					    pband->quantum);
681 			if (pband->credit > 0)
682 				goto begin;
683 			retry = 0;
684 		}
685 		if (q->time_next_delayed_flow != ~0ULL)
686 			qdisc_watchdog_schedule_range_ns(&q->watchdog,
687 							q->time_next_delayed_flow,
688 							q->timer_slack);
689 		return NULL;
690 	}
691 	f = head->first;
692 	retry = 0;
693 	if (f->credit <= 0) {
694 		f->credit += q->quantum;
695 		head->first = f->next;
696 		fq_flow_add_tail(q, f, OLD_FLOW);
697 		goto begin;
698 	}
699 
700 	skb = fq_peek(f);
701 	if (skb) {
702 		u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send,
703 					     f->time_next_packet);
704 
705 		if (now + q->offload_horizon < time_next_packet) {
706 			head->first = f->next;
707 			f->time_next_packet = time_next_packet;
708 			fq_flow_set_throttled(q, f);
709 			goto begin;
710 		}
711 		prefetch(&skb->end);
712 		fq_dequeue_skb(sch, f, skb);
713 		if (unlikely((s64)(now - time_next_packet - q->ce_threshold) > 0)) {
714 			INET_ECN_set_ce(skb);
715 			q->stat_ce_mark++;
716 		}
717 		if (--f->qlen == 0)
718 			q->inactive_flows++;
719 		q->band_pkt_count[fq_skb_cb(skb)->band]--;
720 	} else {
721 		head->first = f->next;
722 		/* force a pass through old_flows to prevent starvation */
723 		if (head == &pband->new_flows) {
724 			fq_flow_add_tail(q, f, OLD_FLOW);
725 		} else {
726 			fq_flow_set_detached(f);
727 		}
728 		goto begin;
729 	}
730 	plen = qdisc_pkt_len(skb);
731 	f->credit -= plen;
732 	pband->credit -= plen;
733 
734 	if (!q->rate_enable)
735 		goto out;
736 
737 	rate = q->flow_max_rate;
738 
739 	/* If EDT time was provided for this skb, we need to
740 	 * update f->time_next_packet only if this qdisc enforces
741 	 * a flow max rate.
742 	 */
743 	if (!skb->tstamp) {
744 		if (skb->sk)
745 			rate = min(READ_ONCE(skb->sk->sk_pacing_rate), rate);
746 
747 		if (rate <= q->low_rate_threshold) {
748 			f->credit = 0;
749 		} else {
750 			plen = max(plen, q->quantum);
751 			if (f->credit > 0)
752 				goto out;
753 		}
754 	}
755 	if (rate != ~0UL) {
756 		u64 len = (u64)plen * NSEC_PER_SEC;
757 
758 		if (likely(rate))
759 			len = div64_ul(len, rate);
760 		/* Since socket rate can change later,
761 		 * clamp the delay to 1 second.
762 		 * Really, providers of too big packets should be fixed !
763 		 */
764 		if (unlikely(len > NSEC_PER_SEC)) {
765 			len = NSEC_PER_SEC;
766 			q->stat_pkts_too_long++;
767 		}
768 		/* Account for schedule/timers drifts.
769 		 * f->time_next_packet was set when prior packet was sent,
770 		 * and current time (@now) can be too late by tens of us.
771 		 */
772 		if (f->time_next_packet)
773 			len -= min(len/2, now - f->time_next_packet);
774 		f->time_next_packet = now + len;
775 	}
776 out:
777 	return skb;
778 }
779 
780 static void fq_flow_purge(struct fq_flow *flow)
781 {
782 	struct rb_node *p = rb_first(&flow->t_root);
783 
784 	while (p) {
785 		struct sk_buff *skb = rb_to_skb(p);
786 
787 		p = rb_next(p);
788 		rb_erase(&skb->rbnode, &flow->t_root);
789 		rtnl_kfree_skbs(skb, skb);
790 	}
791 	rtnl_kfree_skbs(flow->head, flow->tail);
792 	flow->head = NULL;
793 	flow->qlen = 0;
794 }
795 
796 static void fq_reset(struct Qdisc *sch)
797 {
798 	struct fq_sched_data *q = qdisc_priv(sch);
799 	struct rb_root *root;
800 	struct rb_node *p;
801 	struct fq_flow *f;
802 	unsigned int idx;
803 
804 	sch->q.qlen = 0;
805 	sch->qstats.backlog = 0;
806 
807 	fq_flow_purge(&q->internal);
808 
809 	if (!q->fq_root)
810 		return;
811 
812 	for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
813 		root = &q->fq_root[idx];
814 		while ((p = rb_first(root)) != NULL) {
815 			f = rb_entry(p, struct fq_flow, fq_node);
816 			rb_erase(p, root);
817 
818 			fq_flow_purge(f);
819 
820 			kmem_cache_free(fq_flow_cachep, f);
821 		}
822 	}
823 	for (idx = 0; idx < FQ_BANDS; idx++) {
824 		q->band_flows[idx].new_flows.first = NULL;
825 		q->band_flows[idx].old_flows.first = NULL;
826 	}
827 	q->delayed		= RB_ROOT;
828 	q->flows		= 0;
829 	q->inactive_flows	= 0;
830 	q->throttled_flows	= 0;
831 }
832 
833 static void fq_rehash(struct fq_sched_data *q,
834 		      struct rb_root *old_array, u32 old_log,
835 		      struct rb_root *new_array, u32 new_log)
836 {
837 	struct rb_node *op, **np, *parent;
838 	struct rb_root *oroot, *nroot;
839 	struct fq_flow *of, *nf;
840 	int fcnt = 0;
841 	u32 idx;
842 
843 	for (idx = 0; idx < (1U << old_log); idx++) {
844 		oroot = &old_array[idx];
845 		while ((op = rb_first(oroot)) != NULL) {
846 			rb_erase(op, oroot);
847 			of = rb_entry(op, struct fq_flow, fq_node);
848 			if (fq_gc_candidate(of)) {
849 				fcnt++;
850 				kmem_cache_free(fq_flow_cachep, of);
851 				continue;
852 			}
853 			nroot = &new_array[hash_ptr(of->sk, new_log)];
854 
855 			np = &nroot->rb_node;
856 			parent = NULL;
857 			while (*np) {
858 				parent = *np;
859 
860 				nf = rb_entry(parent, struct fq_flow, fq_node);
861 				BUG_ON(nf->sk == of->sk);
862 
863 				if (nf->sk > of->sk)
864 					np = &parent->rb_right;
865 				else
866 					np = &parent->rb_left;
867 			}
868 
869 			rb_link_node(&of->fq_node, parent, np);
870 			rb_insert_color(&of->fq_node, nroot);
871 		}
872 	}
873 	q->flows -= fcnt;
874 	q->inactive_flows -= fcnt;
875 	q->stat_gc_flows += fcnt;
876 }
877 
878 static void fq_free(void *addr)
879 {
880 	kvfree(addr);
881 }
882 
883 static int fq_resize(struct Qdisc *sch, u32 log)
884 {
885 	struct fq_sched_data *q = qdisc_priv(sch);
886 	struct rb_root *array;
887 	void *old_fq_root;
888 	u32 idx;
889 
890 	if (q->fq_root && log == q->fq_trees_log)
891 		return 0;
892 
893 	/* If XPS was setup, we can allocate memory on right NUMA node */
894 	array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
895 			      netdev_queue_numa_node_read(sch->dev_queue));
896 	if (!array)
897 		return -ENOMEM;
898 
899 	for (idx = 0; idx < (1U << log); idx++)
900 		array[idx] = RB_ROOT;
901 
902 	sch_tree_lock(sch);
903 
904 	old_fq_root = q->fq_root;
905 	if (old_fq_root)
906 		fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
907 
908 	q->fq_root = array;
909 	WRITE_ONCE(q->fq_trees_log, log);
910 
911 	sch_tree_unlock(sch);
912 
913 	fq_free(old_fq_root);
914 
915 	return 0;
916 }
917 
918 static const struct netlink_range_validation iq_range = {
919 	.max = INT_MAX,
920 };
921 
922 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
923 	[TCA_FQ_UNSPEC]			= { .strict_start_type = TCA_FQ_TIMER_SLACK },
924 
925 	[TCA_FQ_PLIMIT]			= { .type = NLA_U32 },
926 	[TCA_FQ_FLOW_PLIMIT]		= { .type = NLA_U32 },
927 	[TCA_FQ_QUANTUM]		= { .type = NLA_U32 },
928 	[TCA_FQ_INITIAL_QUANTUM]	= NLA_POLICY_FULL_RANGE(NLA_U32, &iq_range),
929 	[TCA_FQ_RATE_ENABLE]		= { .type = NLA_U32 },
930 	[TCA_FQ_FLOW_DEFAULT_RATE]	= { .type = NLA_U32 },
931 	[TCA_FQ_FLOW_MAX_RATE]		= { .type = NLA_U32 },
932 	[TCA_FQ_BUCKETS_LOG]		= { .type = NLA_U32 },
933 	[TCA_FQ_FLOW_REFILL_DELAY]	= { .type = NLA_U32 },
934 	[TCA_FQ_ORPHAN_MASK]		= { .type = NLA_U32 },
935 	[TCA_FQ_LOW_RATE_THRESHOLD]	= { .type = NLA_U32 },
936 	[TCA_FQ_CE_THRESHOLD]		= { .type = NLA_U32 },
937 	[TCA_FQ_TIMER_SLACK]		= { .type = NLA_U32 },
938 	[TCA_FQ_HORIZON]		= { .type = NLA_U32 },
939 	[TCA_FQ_HORIZON_DROP]		= { .type = NLA_U8 },
940 	[TCA_FQ_PRIOMAP]		= NLA_POLICY_EXACT_LEN(sizeof(struct tc_prio_qopt)),
941 	[TCA_FQ_WEIGHTS]		= NLA_POLICY_EXACT_LEN(FQ_BANDS * sizeof(s32)),
942 	[TCA_FQ_OFFLOAD_HORIZON]	= { .type = NLA_U32 },
943 };
944 
945 /* compress a u8 array with all elems <= 3 to an array of 2-bit fields */
946 static void fq_prio2band_compress_crumb(const u8 *in, u8 *out)
947 {
948 	const int num_elems = TC_PRIO_MAX + 1;
949 	u8 tmp[FQ_PRIO2BAND_CRUMB_SIZE];
950 	int i;
951 
952 	memset(tmp, 0, sizeof(tmp));
953 	for (i = 0; i < num_elems; i++)
954 		tmp[i / 4] |= in[i] << (2 * (i & 0x3));
955 
956 	for (i = 0; i < FQ_PRIO2BAND_CRUMB_SIZE; i++)
957 		WRITE_ONCE(out[i], tmp[i]);
958 }
959 
960 static void fq_prio2band_decompress_crumb(const u8 *in, u8 *out)
961 {
962 	const int num_elems = TC_PRIO_MAX + 1;
963 	int i;
964 
965 	for (i = 0; i < num_elems; i++)
966 		out[i] = fq_prio2band(in, i);
967 }
968 
969 static int fq_load_weights(struct fq_sched_data *q,
970 			   const struct nlattr *attr,
971 			   struct netlink_ext_ack *extack)
972 {
973 	s32 *weights = nla_data(attr);
974 	int i;
975 
976 	for (i = 0; i < FQ_BANDS; i++) {
977 		if (weights[i] < FQ_MIN_WEIGHT) {
978 			NL_SET_ERR_MSG_FMT_MOD(extack, "Weight %d less that minimum allowed %d",
979 					       weights[i], FQ_MIN_WEIGHT);
980 			return -EINVAL;
981 		}
982 	}
983 	for (i = 0; i < FQ_BANDS; i++)
984 		WRITE_ONCE(q->band_flows[i].quantum, weights[i]);
985 	return 0;
986 }
987 
988 static int fq_load_priomap(struct fq_sched_data *q,
989 			   const struct nlattr *attr,
990 			   struct netlink_ext_ack *extack)
991 {
992 	const struct tc_prio_qopt *map = nla_data(attr);
993 	int i;
994 
995 	if (map->bands != FQ_BANDS) {
996 		NL_SET_ERR_MSG_MOD(extack, "FQ only supports 3 bands");
997 		return -EINVAL;
998 	}
999 	for (i = 0; i < TC_PRIO_MAX + 1; i++) {
1000 		if (map->priomap[i] >= FQ_BANDS) {
1001 			NL_SET_ERR_MSG_FMT_MOD(extack, "FQ priomap field %d maps to a too high band %d",
1002 					       i, map->priomap[i]);
1003 			return -EINVAL;
1004 		}
1005 	}
1006 	fq_prio2band_compress_crumb(map->priomap, q->prio2band);
1007 	return 0;
1008 }
1009 
1010 static int fq_change(struct Qdisc *sch, struct nlattr *opt,
1011 		     struct netlink_ext_ack *extack)
1012 {
1013 	unsigned int dropped_pkts = 0, dropped_bytes = 0;
1014 	struct fq_sched_data *q = qdisc_priv(sch);
1015 	struct nlattr *tb[TCA_FQ_MAX + 1];
1016 	u32 fq_log;
1017 	int err;
1018 
1019 	err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
1020 					  NULL);
1021 	if (err < 0)
1022 		return err;
1023 
1024 	sch_tree_lock(sch);
1025 
1026 	fq_log = q->fq_trees_log;
1027 
1028 	if (tb[TCA_FQ_BUCKETS_LOG]) {
1029 		u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
1030 
1031 		if (nval >= 1 && nval <= ilog2(256*1024))
1032 			fq_log = nval;
1033 		else
1034 			err = -EINVAL;
1035 	}
1036 	if (tb[TCA_FQ_PLIMIT])
1037 		WRITE_ONCE(sch->limit,
1038 			   nla_get_u32(tb[TCA_FQ_PLIMIT]));
1039 
1040 	if (tb[TCA_FQ_FLOW_PLIMIT])
1041 		WRITE_ONCE(q->flow_plimit,
1042 			   nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]));
1043 
1044 	if (tb[TCA_FQ_QUANTUM]) {
1045 		u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
1046 
1047 		if (quantum > 0 && quantum <= (1 << 20)) {
1048 			WRITE_ONCE(q->quantum, quantum);
1049 		} else {
1050 			NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
1051 			err = -EINVAL;
1052 		}
1053 	}
1054 
1055 	if (tb[TCA_FQ_INITIAL_QUANTUM])
1056 		WRITE_ONCE(q->initial_quantum,
1057 			   nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]));
1058 
1059 	if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
1060 		pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
1061 				    nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
1062 
1063 	if (tb[TCA_FQ_FLOW_MAX_RATE]) {
1064 		u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
1065 
1066 		WRITE_ONCE(q->flow_max_rate,
1067 			   (rate == ~0U) ? ~0UL : rate);
1068 	}
1069 	if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
1070 		WRITE_ONCE(q->low_rate_threshold,
1071 			   nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]));
1072 
1073 	if (tb[TCA_FQ_RATE_ENABLE]) {
1074 		u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
1075 
1076 		if (enable <= 1)
1077 			WRITE_ONCE(q->rate_enable,
1078 				   enable);
1079 		else
1080 			err = -EINVAL;
1081 	}
1082 
1083 	if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
1084 		u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
1085 
1086 		WRITE_ONCE(q->flow_refill_delay,
1087 			   usecs_to_jiffies(usecs_delay));
1088 	}
1089 
1090 	if (!err && tb[TCA_FQ_PRIOMAP])
1091 		err = fq_load_priomap(q, tb[TCA_FQ_PRIOMAP], extack);
1092 
1093 	if (!err && tb[TCA_FQ_WEIGHTS])
1094 		err = fq_load_weights(q, tb[TCA_FQ_WEIGHTS], extack);
1095 
1096 	if (tb[TCA_FQ_ORPHAN_MASK])
1097 		WRITE_ONCE(q->orphan_mask,
1098 			   nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]));
1099 
1100 	if (tb[TCA_FQ_CE_THRESHOLD])
1101 		WRITE_ONCE(q->ce_threshold,
1102 			   (u64)NSEC_PER_USEC *
1103 			   nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]));
1104 
1105 	if (tb[TCA_FQ_TIMER_SLACK])
1106 		WRITE_ONCE(q->timer_slack,
1107 			   nla_get_u32(tb[TCA_FQ_TIMER_SLACK]));
1108 
1109 	if (tb[TCA_FQ_HORIZON])
1110 		WRITE_ONCE(q->horizon,
1111 			   (u64)NSEC_PER_USEC *
1112 			   nla_get_u32(tb[TCA_FQ_HORIZON]));
1113 
1114 	if (tb[TCA_FQ_HORIZON_DROP])
1115 		WRITE_ONCE(q->horizon_drop,
1116 			   nla_get_u8(tb[TCA_FQ_HORIZON_DROP]));
1117 
1118 	if (tb[TCA_FQ_OFFLOAD_HORIZON]) {
1119 		u64 offload_horizon = (u64)NSEC_PER_USEC *
1120 				      nla_get_u32(tb[TCA_FQ_OFFLOAD_HORIZON]);
1121 
1122 		if (offload_horizon <= qdisc_dev(sch)->max_pacing_offload_horizon) {
1123 			WRITE_ONCE(q->offload_horizon, offload_horizon);
1124 		} else {
1125 			NL_SET_ERR_MSG_MOD(extack, "invalid offload_horizon");
1126 			err = -EINVAL;
1127 		}
1128 	}
1129 	if (!err) {
1130 
1131 		sch_tree_unlock(sch);
1132 		err = fq_resize(sch, fq_log);
1133 		sch_tree_lock(sch);
1134 	}
1135 
1136 	while (sch->q.qlen > sch->limit) {
1137 		struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
1138 
1139 		if (!skb)
1140 			break;
1141 
1142 		dropped_pkts++;
1143 		dropped_bytes += qdisc_pkt_len(skb);
1144 		rtnl_kfree_skbs(skb, skb);
1145 	}
1146 	qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
1147 
1148 	sch_tree_unlock(sch);
1149 	return err;
1150 }
1151 
1152 static void fq_destroy(struct Qdisc *sch)
1153 {
1154 	struct fq_sched_data *q = qdisc_priv(sch);
1155 
1156 	fq_reset(sch);
1157 	fq_free(q->fq_root);
1158 	qdisc_watchdog_cancel(&q->watchdog);
1159 }
1160 
1161 static int fq_init(struct Qdisc *sch, struct nlattr *opt,
1162 		   struct netlink_ext_ack *extack)
1163 {
1164 	struct fq_sched_data *q = qdisc_priv(sch);
1165 	int i, err;
1166 
1167 	sch->limit		= 10000;
1168 	q->flow_plimit		= 100;
1169 	q->quantum		= 2 * psched_mtu(qdisc_dev(sch));
1170 	q->initial_quantum	= 10 * psched_mtu(qdisc_dev(sch));
1171 	q->flow_refill_delay	= msecs_to_jiffies(40);
1172 	q->flow_max_rate	= ~0UL;
1173 	q->time_next_delayed_flow = ~0ULL;
1174 	q->rate_enable		= 1;
1175 	for (i = 0; i < FQ_BANDS; i++) {
1176 		q->band_flows[i].new_flows.first = NULL;
1177 		q->band_flows[i].old_flows.first = NULL;
1178 	}
1179 	q->band_flows[0].quantum = 9 << 16;
1180 	q->band_flows[1].quantum = 3 << 16;
1181 	q->band_flows[2].quantum = 1 << 16;
1182 	q->delayed		= RB_ROOT;
1183 	q->fq_root		= NULL;
1184 	q->fq_trees_log		= ilog2(1024);
1185 	q->orphan_mask		= 1024 - 1;
1186 	q->low_rate_threshold	= 550000 / 8;
1187 
1188 	q->timer_slack = 10 * NSEC_PER_USEC; /* 10 usec of hrtimer slack */
1189 
1190 	q->horizon = 10ULL * NSEC_PER_SEC; /* 10 seconds */
1191 	q->horizon_drop = 1; /* by default, drop packets beyond horizon */
1192 
1193 	/* Default ce_threshold of 4294 seconds */
1194 	q->ce_threshold		= (u64)NSEC_PER_USEC * ~0U;
1195 
1196 	fq_prio2band_compress_crumb(sch_default_prio2band, q->prio2band);
1197 	qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
1198 
1199 	if (opt)
1200 		err = fq_change(sch, opt, extack);
1201 	else
1202 		err = fq_resize(sch, q->fq_trees_log);
1203 
1204 	return err;
1205 }
1206 
1207 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
1208 {
1209 	struct fq_sched_data *q = qdisc_priv(sch);
1210 	struct tc_prio_qopt prio = {
1211 		.bands = FQ_BANDS,
1212 	};
1213 	struct nlattr *opts;
1214 	u64 offload_horizon;
1215 	u64 ce_threshold;
1216 	s32 weights[3];
1217 	u64 horizon;
1218 
1219 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
1220 	if (opts == NULL)
1221 		goto nla_put_failure;
1222 
1223 	/* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
1224 
1225 	ce_threshold = READ_ONCE(q->ce_threshold);
1226 	do_div(ce_threshold, NSEC_PER_USEC);
1227 
1228 	horizon = READ_ONCE(q->horizon);
1229 	do_div(horizon, NSEC_PER_USEC);
1230 
1231 	offload_horizon = READ_ONCE(q->offload_horizon);
1232 	do_div(offload_horizon, NSEC_PER_USEC);
1233 
1234 	if (nla_put_u32(skb, TCA_FQ_PLIMIT,
1235 			READ_ONCE(sch->limit)) ||
1236 	    nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT,
1237 			READ_ONCE(q->flow_plimit)) ||
1238 	    nla_put_u32(skb, TCA_FQ_QUANTUM,
1239 			READ_ONCE(q->quantum)) ||
1240 	    nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM,
1241 			READ_ONCE(q->initial_quantum)) ||
1242 	    nla_put_u32(skb, TCA_FQ_RATE_ENABLE,
1243 			READ_ONCE(q->rate_enable)) ||
1244 	    nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
1245 			min_t(unsigned long,
1246 			      READ_ONCE(q->flow_max_rate), ~0U)) ||
1247 	    nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
1248 			jiffies_to_usecs(READ_ONCE(q->flow_refill_delay))) ||
1249 	    nla_put_u32(skb, TCA_FQ_ORPHAN_MASK,
1250 			READ_ONCE(q->orphan_mask)) ||
1251 	    nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
1252 			READ_ONCE(q->low_rate_threshold)) ||
1253 	    nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
1254 	    nla_put_u32(skb, TCA_FQ_BUCKETS_LOG,
1255 			READ_ONCE(q->fq_trees_log)) ||
1256 	    nla_put_u32(skb, TCA_FQ_TIMER_SLACK,
1257 			READ_ONCE(q->timer_slack)) ||
1258 	    nla_put_u32(skb, TCA_FQ_HORIZON, (u32)horizon) ||
1259 	    nla_put_u32(skb, TCA_FQ_OFFLOAD_HORIZON, (u32)offload_horizon) ||
1260 	    nla_put_u8(skb, TCA_FQ_HORIZON_DROP,
1261 		       READ_ONCE(q->horizon_drop)))
1262 		goto nla_put_failure;
1263 
1264 	fq_prio2band_decompress_crumb(q->prio2band, prio.priomap);
1265 	if (nla_put(skb, TCA_FQ_PRIOMAP, sizeof(prio), &prio))
1266 		goto nla_put_failure;
1267 
1268 	weights[0] = READ_ONCE(q->band_flows[0].quantum);
1269 	weights[1] = READ_ONCE(q->band_flows[1].quantum);
1270 	weights[2] = READ_ONCE(q->band_flows[2].quantum);
1271 	if (nla_put(skb, TCA_FQ_WEIGHTS, sizeof(weights), &weights))
1272 		goto nla_put_failure;
1273 
1274 	return nla_nest_end(skb, opts);
1275 
1276 nla_put_failure:
1277 	return -1;
1278 }
1279 
1280 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1281 {
1282 	struct fq_sched_data *q = qdisc_priv(sch);
1283 	struct tc_fq_qd_stats st;
1284 	int i;
1285 
1286 	st.pad = 0;
1287 
1288 	sch_tree_lock(sch);
1289 
1290 	st.gc_flows		  = q->stat_gc_flows;
1291 	st.highprio_packets	  = 0;
1292 	st.fastpath_packets	  = q->internal.stat_fastpath_packets;
1293 	st.tcp_retrans		  = 0;
1294 	st.throttled		  = q->stat_throttled;
1295 	st.flows_plimit		  = q->stat_flows_plimit;
1296 	st.pkts_too_long	  = q->stat_pkts_too_long;
1297 	st.allocation_errors	  = q->stat_allocation_errors;
1298 	st.time_next_delayed_flow = q->time_next_delayed_flow + q->timer_slack -
1299 				    ktime_get_ns();
1300 	st.flows		  = q->flows;
1301 	st.inactive_flows	  = q->inactive_flows;
1302 	st.throttled_flows	  = q->throttled_flows;
1303 	st.unthrottle_latency_ns  = min_t(unsigned long,
1304 					  q->unthrottle_latency_ns, ~0U);
1305 	st.ce_mark		  = q->stat_ce_mark;
1306 	st.horizon_drops	  = q->stat_horizon_drops;
1307 	st.horizon_caps		  = q->stat_horizon_caps;
1308 	for (i = 0; i < FQ_BANDS; i++) {
1309 		st.band_drops[i]  = q->stat_band_drops[i];
1310 		st.band_pkt_count[i] = q->band_pkt_count[i];
1311 	}
1312 	sch_tree_unlock(sch);
1313 
1314 	return gnet_stats_copy_app(d, &st, sizeof(st));
1315 }
1316 
1317 static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
1318 	.id		=	"fq",
1319 	.priv_size	=	sizeof(struct fq_sched_data),
1320 
1321 	.enqueue	=	fq_enqueue,
1322 	.dequeue	=	fq_dequeue,
1323 	.peek		=	qdisc_peek_dequeued,
1324 	.init		=	fq_init,
1325 	.reset		=	fq_reset,
1326 	.destroy	=	fq_destroy,
1327 	.change		=	fq_change,
1328 	.dump		=	fq_dump,
1329 	.dump_stats	=	fq_dump_stats,
1330 	.owner		=	THIS_MODULE,
1331 };
1332 MODULE_ALIAS_NET_SCH("fq");
1333 
1334 static int __init fq_module_init(void)
1335 {
1336 	int ret;
1337 
1338 	fq_flow_cachep = kmem_cache_create("fq_flow_cache",
1339 					   sizeof(struct fq_flow),
1340 					   0, SLAB_HWCACHE_ALIGN, NULL);
1341 	if (!fq_flow_cachep)
1342 		return -ENOMEM;
1343 
1344 	ret = register_qdisc(&fq_qdisc_ops);
1345 	if (ret)
1346 		kmem_cache_destroy(fq_flow_cachep);
1347 	return ret;
1348 }
1349 
1350 static void __exit fq_module_exit(void)
1351 {
1352 	unregister_qdisc(&fq_qdisc_ops);
1353 	kmem_cache_destroy(fq_flow_cachep);
1354 }
1355 
1356 module_init(fq_module_init)
1357 module_exit(fq_module_exit)
1358 MODULE_AUTHOR("Eric Dumazet");
1359 MODULE_LICENSE("GPL");
1360 MODULE_DESCRIPTION("Fair Queue Packet Scheduler");
1361