xref: /linux/net/sched/sch_fq_codel.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /*
2  * Fair Queue CoDel discipline
3  *
4  *	This program is free software; you can redistribute it and/or
5  *	modify it under the terms of the GNU General Public License
6  *	as published by the Free Software Foundation; either version
7  *	2 of the License, or (at your option) any later version.
8  *
9  *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
17 #include <linux/in.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/jhash.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/codel.h>
27 #include <net/codel_impl.h>
28 #include <net/codel_qdisc.h>
29 
30 /*	Fair Queue CoDel.
31  *
32  * Principles :
33  * Packets are classified (internal classifier or external) on flows.
34  * This is a Stochastic model (as we use a hash, several flows
35  *			       might be hashed on same slot)
36  * Each flow has a CoDel managed queue.
37  * Flows are linked onto two (Round Robin) lists,
38  * so that new flows have priority on old ones.
39  *
40  * For a given flow, packets are not reordered (CoDel uses a FIFO)
41  * head drops only.
42  * ECN capability is on by default.
43  * Low memory footprint (64 bytes per flow)
44  */
45 
46 struct fq_codel_flow {
47 	struct sk_buff	  *head;
48 	struct sk_buff	  *tail;
49 	struct list_head  flowchain;
50 	int		  deficit;
51 	u32		  dropped; /* number of drops (or ECN marks) on this flow */
52 	struct codel_vars cvars;
53 }; /* please try to keep this structure <= 64 bytes */
54 
55 struct fq_codel_sched_data {
56 	struct tcf_proto __rcu *filter_list; /* optional external classifier */
57 	struct fq_codel_flow *flows;	/* Flows table [flows_cnt] */
58 	u32		*backlogs;	/* backlog table [flows_cnt] */
59 	u32		flows_cnt;	/* number of flows */
60 	u32		perturbation;	/* hash perturbation */
61 	u32		quantum;	/* psched_mtu(qdisc_dev(sch)); */
62 	u32		drop_batch_size;
63 	u32		memory_limit;
64 	struct codel_params cparams;
65 	struct codel_stats cstats;
66 	u32		memory_usage;
67 	u32		drop_overmemory;
68 	u32		drop_overlimit;
69 	u32		new_flow_count;
70 
71 	struct list_head new_flows;	/* list of new flows */
72 	struct list_head old_flows;	/* list of old flows */
73 };
74 
75 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
76 				  struct sk_buff *skb)
77 {
78 	u32 hash = skb_get_hash_perturb(skb, q->perturbation);
79 
80 	return reciprocal_scale(hash, q->flows_cnt);
81 }
82 
83 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
84 				      int *qerr)
85 {
86 	struct fq_codel_sched_data *q = qdisc_priv(sch);
87 	struct tcf_proto *filter;
88 	struct tcf_result res;
89 	int result;
90 
91 	if (TC_H_MAJ(skb->priority) == sch->handle &&
92 	    TC_H_MIN(skb->priority) > 0 &&
93 	    TC_H_MIN(skb->priority) <= q->flows_cnt)
94 		return TC_H_MIN(skb->priority);
95 
96 	filter = rcu_dereference_bh(q->filter_list);
97 	if (!filter)
98 		return fq_codel_hash(q, skb) + 1;
99 
100 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
101 	result = tc_classify(skb, filter, &res, false);
102 	if (result >= 0) {
103 #ifdef CONFIG_NET_CLS_ACT
104 		switch (result) {
105 		case TC_ACT_STOLEN:
106 		case TC_ACT_QUEUED:
107 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
108 		case TC_ACT_SHOT:
109 			return 0;
110 		}
111 #endif
112 		if (TC_H_MIN(res.classid) <= q->flows_cnt)
113 			return TC_H_MIN(res.classid);
114 	}
115 	return 0;
116 }
117 
118 /* helper functions : might be changed when/if skb use a standard list_head */
119 
120 /* remove one skb from head of slot queue */
121 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
122 {
123 	struct sk_buff *skb = flow->head;
124 
125 	flow->head = skb->next;
126 	skb->next = NULL;
127 	return skb;
128 }
129 
130 /* add skb to flow queue (tail add) */
131 static inline void flow_queue_add(struct fq_codel_flow *flow,
132 				  struct sk_buff *skb)
133 {
134 	if (flow->head == NULL)
135 		flow->head = skb;
136 	else
137 		flow->tail->next = skb;
138 	flow->tail = skb;
139 	skb->next = NULL;
140 }
141 
142 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets)
143 {
144 	struct fq_codel_sched_data *q = qdisc_priv(sch);
145 	struct sk_buff *skb;
146 	unsigned int maxbacklog = 0, idx = 0, i, len;
147 	struct fq_codel_flow *flow;
148 	unsigned int threshold;
149 	unsigned int mem = 0;
150 
151 	/* Queue is full! Find the fat flow and drop packet(s) from it.
152 	 * This might sound expensive, but with 1024 flows, we scan
153 	 * 4KB of memory, and we dont need to handle a complex tree
154 	 * in fast path (packet queue/enqueue) with many cache misses.
155 	 * In stress mode, we'll try to drop 64 packets from the flow,
156 	 * amortizing this linear lookup to one cache line per drop.
157 	 */
158 	for (i = 0; i < q->flows_cnt; i++) {
159 		if (q->backlogs[i] > maxbacklog) {
160 			maxbacklog = q->backlogs[i];
161 			idx = i;
162 		}
163 	}
164 
165 	/* Our goal is to drop half of this fat flow backlog */
166 	threshold = maxbacklog >> 1;
167 
168 	flow = &q->flows[idx];
169 	len = 0;
170 	i = 0;
171 	do {
172 		skb = dequeue_head(flow);
173 		len += qdisc_pkt_len(skb);
174 		mem += skb->truesize;
175 		kfree_skb(skb);
176 	} while (++i < max_packets && len < threshold);
177 
178 	flow->dropped += i;
179 	q->backlogs[idx] -= len;
180 	q->memory_usage -= mem;
181 	sch->qstats.drops += i;
182 	sch->qstats.backlog -= len;
183 	sch->q.qlen -= i;
184 	return idx;
185 }
186 
187 static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
188 {
189 	unsigned int prev_backlog;
190 
191 	prev_backlog = sch->qstats.backlog;
192 	fq_codel_drop(sch, 1U);
193 	return prev_backlog - sch->qstats.backlog;
194 }
195 
196 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
197 {
198 	struct fq_codel_sched_data *q = qdisc_priv(sch);
199 	unsigned int idx, prev_backlog, prev_qlen;
200 	struct fq_codel_flow *flow;
201 	int uninitialized_var(ret);
202 	unsigned int pkt_len;
203 	bool memory_limited;
204 
205 	idx = fq_codel_classify(skb, sch, &ret);
206 	if (idx == 0) {
207 		if (ret & __NET_XMIT_BYPASS)
208 			qdisc_qstats_drop(sch);
209 		kfree_skb(skb);
210 		return ret;
211 	}
212 	idx--;
213 
214 	codel_set_enqueue_time(skb);
215 	flow = &q->flows[idx];
216 	flow_queue_add(flow, skb);
217 	q->backlogs[idx] += qdisc_pkt_len(skb);
218 	qdisc_qstats_backlog_inc(sch, skb);
219 
220 	if (list_empty(&flow->flowchain)) {
221 		list_add_tail(&flow->flowchain, &q->new_flows);
222 		q->new_flow_count++;
223 		flow->deficit = q->quantum;
224 		flow->dropped = 0;
225 	}
226 	q->memory_usage += skb->truesize;
227 	memory_limited = q->memory_usage > q->memory_limit;
228 	if (++sch->q.qlen <= sch->limit && !memory_limited)
229 		return NET_XMIT_SUCCESS;
230 
231 	prev_backlog = sch->qstats.backlog;
232 	prev_qlen = sch->q.qlen;
233 
234 	/* save this packet length as it might be dropped by fq_codel_drop() */
235 	pkt_len = qdisc_pkt_len(skb);
236 	/* fq_codel_drop() is quite expensive, as it performs a linear search
237 	 * in q->backlogs[] to find a fat flow.
238 	 * So instead of dropping a single packet, drop half of its backlog
239 	 * with a 64 packets limit to not add a too big cpu spike here.
240 	 */
241 	ret = fq_codel_drop(sch, q->drop_batch_size);
242 
243 	prev_qlen -= sch->q.qlen;
244 	prev_backlog -= sch->qstats.backlog;
245 	q->drop_overlimit += prev_qlen;
246 	if (memory_limited)
247 		q->drop_overmemory += prev_qlen;
248 
249 	/* As we dropped packet(s), better let upper stack know this.
250 	 * If we dropped a packet for this flow, return NET_XMIT_CN,
251 	 * but in this case, our parents wont increase their backlogs.
252 	 */
253 	if (ret == idx) {
254 		qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
255 					  prev_backlog - pkt_len);
256 		return NET_XMIT_CN;
257 	}
258 	qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
259 	return NET_XMIT_SUCCESS;
260 }
261 
262 /* This is the specific function called from codel_dequeue()
263  * to dequeue a packet from queue. Note: backlog is handled in
264  * codel, we dont need to reduce it here.
265  */
266 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
267 {
268 	struct Qdisc *sch = ctx;
269 	struct fq_codel_sched_data *q = qdisc_priv(sch);
270 	struct fq_codel_flow *flow;
271 	struct sk_buff *skb = NULL;
272 
273 	flow = container_of(vars, struct fq_codel_flow, cvars);
274 	if (flow->head) {
275 		skb = dequeue_head(flow);
276 		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
277 		q->memory_usage -= skb->truesize;
278 		sch->q.qlen--;
279 		sch->qstats.backlog -= qdisc_pkt_len(skb);
280 	}
281 	return skb;
282 }
283 
284 static void drop_func(struct sk_buff *skb, void *ctx)
285 {
286 	struct Qdisc *sch = ctx;
287 
288 	qdisc_drop(skb, sch);
289 }
290 
291 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
292 {
293 	struct fq_codel_sched_data *q = qdisc_priv(sch);
294 	struct sk_buff *skb;
295 	struct fq_codel_flow *flow;
296 	struct list_head *head;
297 	u32 prev_drop_count, prev_ecn_mark;
298 	unsigned int prev_backlog;
299 
300 begin:
301 	head = &q->new_flows;
302 	if (list_empty(head)) {
303 		head = &q->old_flows;
304 		if (list_empty(head))
305 			return NULL;
306 	}
307 	flow = list_first_entry(head, struct fq_codel_flow, flowchain);
308 
309 	if (flow->deficit <= 0) {
310 		flow->deficit += q->quantum;
311 		list_move_tail(&flow->flowchain, &q->old_flows);
312 		goto begin;
313 	}
314 
315 	prev_drop_count = q->cstats.drop_count;
316 	prev_ecn_mark = q->cstats.ecn_mark;
317 	prev_backlog = sch->qstats.backlog;
318 
319 	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
320 			    &flow->cvars, &q->cstats, qdisc_pkt_len,
321 			    codel_get_enqueue_time, drop_func, dequeue_func);
322 
323 	flow->dropped += q->cstats.drop_count - prev_drop_count;
324 	flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
325 
326 	if (!skb) {
327 		/* force a pass through old_flows to prevent starvation */
328 		if ((head == &q->new_flows) && !list_empty(&q->old_flows))
329 			list_move_tail(&flow->flowchain, &q->old_flows);
330 		else
331 			list_del_init(&flow->flowchain);
332 		goto begin;
333 	}
334 	qdisc_bstats_update(sch, skb);
335 	flow->deficit -= qdisc_pkt_len(skb);
336 	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
337 	 * or HTB crashes. Defer it for next round.
338 	 */
339 	if (q->cstats.drop_count && sch->q.qlen) {
340 		qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
341 					  q->cstats.drop_len);
342 		q->cstats.drop_count = 0;
343 		q->cstats.drop_len = 0;
344 	}
345 	return skb;
346 }
347 
348 static void fq_codel_reset(struct Qdisc *sch)
349 {
350 	struct fq_codel_sched_data *q = qdisc_priv(sch);
351 	int i;
352 
353 	INIT_LIST_HEAD(&q->new_flows);
354 	INIT_LIST_HEAD(&q->old_flows);
355 	for (i = 0; i < q->flows_cnt; i++) {
356 		struct fq_codel_flow *flow = q->flows + i;
357 
358 		while (flow->head) {
359 			struct sk_buff *skb = dequeue_head(flow);
360 
361 			qdisc_qstats_backlog_dec(sch, skb);
362 			kfree_skb(skb);
363 		}
364 
365 		INIT_LIST_HEAD(&flow->flowchain);
366 		codel_vars_init(&flow->cvars);
367 	}
368 	memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
369 	sch->q.qlen = 0;
370 	q->memory_usage = 0;
371 }
372 
373 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
374 	[TCA_FQ_CODEL_TARGET]	= { .type = NLA_U32 },
375 	[TCA_FQ_CODEL_LIMIT]	= { .type = NLA_U32 },
376 	[TCA_FQ_CODEL_INTERVAL]	= { .type = NLA_U32 },
377 	[TCA_FQ_CODEL_ECN]	= { .type = NLA_U32 },
378 	[TCA_FQ_CODEL_FLOWS]	= { .type = NLA_U32 },
379 	[TCA_FQ_CODEL_QUANTUM]	= { .type = NLA_U32 },
380 	[TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
381 	[TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
382 	[TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
383 };
384 
385 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
386 {
387 	struct fq_codel_sched_data *q = qdisc_priv(sch);
388 	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
389 	int err;
390 
391 	if (!opt)
392 		return -EINVAL;
393 
394 	err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
395 	if (err < 0)
396 		return err;
397 	if (tb[TCA_FQ_CODEL_FLOWS]) {
398 		if (q->flows)
399 			return -EINVAL;
400 		q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
401 		if (!q->flows_cnt ||
402 		    q->flows_cnt > 65536)
403 			return -EINVAL;
404 	}
405 	sch_tree_lock(sch);
406 
407 	if (tb[TCA_FQ_CODEL_TARGET]) {
408 		u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
409 
410 		q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
411 	}
412 
413 	if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
414 		u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
415 
416 		q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
417 	}
418 
419 	if (tb[TCA_FQ_CODEL_INTERVAL]) {
420 		u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
421 
422 		q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
423 	}
424 
425 	if (tb[TCA_FQ_CODEL_LIMIT])
426 		sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
427 
428 	if (tb[TCA_FQ_CODEL_ECN])
429 		q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
430 
431 	if (tb[TCA_FQ_CODEL_QUANTUM])
432 		q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
433 
434 	if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
435 		q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
436 
437 	if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
438 		q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
439 
440 	while (sch->q.qlen > sch->limit ||
441 	       q->memory_usage > q->memory_limit) {
442 		struct sk_buff *skb = fq_codel_dequeue(sch);
443 
444 		q->cstats.drop_len += qdisc_pkt_len(skb);
445 		kfree_skb(skb);
446 		q->cstats.drop_count++;
447 	}
448 	qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
449 	q->cstats.drop_count = 0;
450 	q->cstats.drop_len = 0;
451 
452 	sch_tree_unlock(sch);
453 	return 0;
454 }
455 
456 static void *fq_codel_zalloc(size_t sz)
457 {
458 	void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
459 
460 	if (!ptr)
461 		ptr = vzalloc(sz);
462 	return ptr;
463 }
464 
465 static void fq_codel_free(void *addr)
466 {
467 	kvfree(addr);
468 }
469 
470 static void fq_codel_destroy(struct Qdisc *sch)
471 {
472 	struct fq_codel_sched_data *q = qdisc_priv(sch);
473 
474 	tcf_destroy_chain(&q->filter_list);
475 	fq_codel_free(q->backlogs);
476 	fq_codel_free(q->flows);
477 }
478 
479 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
480 {
481 	struct fq_codel_sched_data *q = qdisc_priv(sch);
482 	int i;
483 
484 	sch->limit = 10*1024;
485 	q->flows_cnt = 1024;
486 	q->memory_limit = 32 << 20; /* 32 MBytes */
487 	q->drop_batch_size = 64;
488 	q->quantum = psched_mtu(qdisc_dev(sch));
489 	q->perturbation = prandom_u32();
490 	INIT_LIST_HEAD(&q->new_flows);
491 	INIT_LIST_HEAD(&q->old_flows);
492 	codel_params_init(&q->cparams);
493 	codel_stats_init(&q->cstats);
494 	q->cparams.ecn = true;
495 	q->cparams.mtu = psched_mtu(qdisc_dev(sch));
496 
497 	if (opt) {
498 		int err = fq_codel_change(sch, opt);
499 		if (err)
500 			return err;
501 	}
502 
503 	if (!q->flows) {
504 		q->flows = fq_codel_zalloc(q->flows_cnt *
505 					   sizeof(struct fq_codel_flow));
506 		if (!q->flows)
507 			return -ENOMEM;
508 		q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
509 		if (!q->backlogs) {
510 			fq_codel_free(q->flows);
511 			return -ENOMEM;
512 		}
513 		for (i = 0; i < q->flows_cnt; i++) {
514 			struct fq_codel_flow *flow = q->flows + i;
515 
516 			INIT_LIST_HEAD(&flow->flowchain);
517 			codel_vars_init(&flow->cvars);
518 		}
519 	}
520 	if (sch->limit >= 1)
521 		sch->flags |= TCQ_F_CAN_BYPASS;
522 	else
523 		sch->flags &= ~TCQ_F_CAN_BYPASS;
524 	return 0;
525 }
526 
527 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
528 {
529 	struct fq_codel_sched_data *q = qdisc_priv(sch);
530 	struct nlattr *opts;
531 
532 	opts = nla_nest_start(skb, TCA_OPTIONS);
533 	if (opts == NULL)
534 		goto nla_put_failure;
535 
536 	if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
537 			codel_time_to_us(q->cparams.target)) ||
538 	    nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
539 			sch->limit) ||
540 	    nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
541 			codel_time_to_us(q->cparams.interval)) ||
542 	    nla_put_u32(skb, TCA_FQ_CODEL_ECN,
543 			q->cparams.ecn) ||
544 	    nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
545 			q->quantum) ||
546 	    nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
547 			q->drop_batch_size) ||
548 	    nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
549 			q->memory_limit) ||
550 	    nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
551 			q->flows_cnt))
552 		goto nla_put_failure;
553 
554 	if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
555 	    nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
556 			codel_time_to_us(q->cparams.ce_threshold)))
557 		goto nla_put_failure;
558 
559 	return nla_nest_end(skb, opts);
560 
561 nla_put_failure:
562 	return -1;
563 }
564 
565 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
566 {
567 	struct fq_codel_sched_data *q = qdisc_priv(sch);
568 	struct tc_fq_codel_xstats st = {
569 		.type				= TCA_FQ_CODEL_XSTATS_QDISC,
570 	};
571 	struct list_head *pos;
572 
573 	st.qdisc_stats.maxpacket = q->cstats.maxpacket;
574 	st.qdisc_stats.drop_overlimit = q->drop_overlimit;
575 	st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
576 	st.qdisc_stats.new_flow_count = q->new_flow_count;
577 	st.qdisc_stats.ce_mark = q->cstats.ce_mark;
578 	st.qdisc_stats.memory_usage  = q->memory_usage;
579 	st.qdisc_stats.drop_overmemory = q->drop_overmemory;
580 
581 	list_for_each(pos, &q->new_flows)
582 		st.qdisc_stats.new_flows_len++;
583 
584 	list_for_each(pos, &q->old_flows)
585 		st.qdisc_stats.old_flows_len++;
586 
587 	return gnet_stats_copy_app(d, &st, sizeof(st));
588 }
589 
590 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
591 {
592 	return NULL;
593 }
594 
595 static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
596 {
597 	return 0;
598 }
599 
600 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
601 			      u32 classid)
602 {
603 	/* we cannot bypass queue discipline anymore */
604 	sch->flags &= ~TCQ_F_CAN_BYPASS;
605 	return 0;
606 }
607 
608 static void fq_codel_put(struct Qdisc *q, unsigned long cl)
609 {
610 }
611 
612 static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
613 						  unsigned long cl)
614 {
615 	struct fq_codel_sched_data *q = qdisc_priv(sch);
616 
617 	if (cl)
618 		return NULL;
619 	return &q->filter_list;
620 }
621 
622 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
623 			  struct sk_buff *skb, struct tcmsg *tcm)
624 {
625 	tcm->tcm_handle |= TC_H_MIN(cl);
626 	return 0;
627 }
628 
629 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
630 				     struct gnet_dump *d)
631 {
632 	struct fq_codel_sched_data *q = qdisc_priv(sch);
633 	u32 idx = cl - 1;
634 	struct gnet_stats_queue qs = { 0 };
635 	struct tc_fq_codel_xstats xstats;
636 
637 	if (idx < q->flows_cnt) {
638 		const struct fq_codel_flow *flow = &q->flows[idx];
639 		const struct sk_buff *skb = flow->head;
640 
641 		memset(&xstats, 0, sizeof(xstats));
642 		xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
643 		xstats.class_stats.deficit = flow->deficit;
644 		xstats.class_stats.ldelay =
645 			codel_time_to_us(flow->cvars.ldelay);
646 		xstats.class_stats.count = flow->cvars.count;
647 		xstats.class_stats.lastcount = flow->cvars.lastcount;
648 		xstats.class_stats.dropping = flow->cvars.dropping;
649 		if (flow->cvars.dropping) {
650 			codel_tdiff_t delta = flow->cvars.drop_next -
651 					      codel_get_time();
652 
653 			xstats.class_stats.drop_next = (delta >= 0) ?
654 				codel_time_to_us(delta) :
655 				-codel_time_to_us(-delta);
656 		}
657 		while (skb) {
658 			qs.qlen++;
659 			skb = skb->next;
660 		}
661 		qs.backlog = q->backlogs[idx];
662 		qs.drops = flow->dropped;
663 	}
664 	if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
665 		return -1;
666 	if (idx < q->flows_cnt)
667 		return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
668 	return 0;
669 }
670 
671 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
672 {
673 	struct fq_codel_sched_data *q = qdisc_priv(sch);
674 	unsigned int i;
675 
676 	if (arg->stop)
677 		return;
678 
679 	for (i = 0; i < q->flows_cnt; i++) {
680 		if (list_empty(&q->flows[i].flowchain) ||
681 		    arg->count < arg->skip) {
682 			arg->count++;
683 			continue;
684 		}
685 		if (arg->fn(sch, i + 1, arg) < 0) {
686 			arg->stop = 1;
687 			break;
688 		}
689 		arg->count++;
690 	}
691 }
692 
693 static const struct Qdisc_class_ops fq_codel_class_ops = {
694 	.leaf		=	fq_codel_leaf,
695 	.get		=	fq_codel_get,
696 	.put		=	fq_codel_put,
697 	.tcf_chain	=	fq_codel_find_tcf,
698 	.bind_tcf	=	fq_codel_bind,
699 	.unbind_tcf	=	fq_codel_put,
700 	.dump		=	fq_codel_dump_class,
701 	.dump_stats	=	fq_codel_dump_class_stats,
702 	.walk		=	fq_codel_walk,
703 };
704 
705 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
706 	.cl_ops		=	&fq_codel_class_ops,
707 	.id		=	"fq_codel",
708 	.priv_size	=	sizeof(struct fq_codel_sched_data),
709 	.enqueue	=	fq_codel_enqueue,
710 	.dequeue	=	fq_codel_dequeue,
711 	.peek		=	qdisc_peek_dequeued,
712 	.drop		=	fq_codel_qdisc_drop,
713 	.init		=	fq_codel_init,
714 	.reset		=	fq_codel_reset,
715 	.destroy	=	fq_codel_destroy,
716 	.change		=	fq_codel_change,
717 	.dump		=	fq_codel_dump,
718 	.dump_stats =	fq_codel_dump_stats,
719 	.owner		=	THIS_MODULE,
720 };
721 
722 static int __init fq_codel_module_init(void)
723 {
724 	return register_qdisc(&fq_codel_qdisc_ops);
725 }
726 
727 static void __exit fq_codel_module_exit(void)
728 {
729 	unregister_qdisc(&fq_codel_qdisc_ops);
730 }
731 
732 module_init(fq_codel_module_init)
733 module_exit(fq_codel_module_exit)
734 MODULE_AUTHOR("Eric Dumazet");
735 MODULE_LICENSE("GPL");
736