xref: /linux/net/sched/sch_htb.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Martin Devera, <devik@cdi.cz>
10  *
11  * Credits (in time order) for older HTB versions:
12  *              Stef Coene <stef.coene@docum.org>
13  *			HTB support at LARTC mailing list
14  *		Ondrej Kraus, <krauso@barr.cz>
15  *			found missing INIT_QDISC(htb)
16  *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17  *			helped a lot to locate nasty class stall bug
18  *		Andi Kleen, Jamal Hadi, Bert Hubert
19  *			code review and helpful comments on shaping
20  *		Tomasz Wrona, <tw@eter.tym.pl>
21  *			created test case so that I was able to fix nasty bug
22  *		Wilfried Weissmann
23  *			spotted bug in dequeue code and helped with fix
24  *		Jiri Fojtasek
25  *			fixed requeue routine
26  *		and many others. thanks.
27  */
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <linux/errno.h>
34 #include <linux/skbuff.h>
35 #include <linux/list.h>
36 #include <linux/compiler.h>
37 #include <linux/rbtree.h>
38 #include <linux/workqueue.h>
39 #include <linux/slab.h>
40 #include <net/netlink.h>
41 #include <net/sch_generic.h>
42 #include <net/pkt_sched.h>
43 #include <net/pkt_cls.h>
44 
45 /* HTB algorithm.
46     Author: devik@cdi.cz
47     ========================================================================
48     HTB is like TBF with multiple classes. It is also similar to CBQ because
49     it allows to assign priority to each class in hierarchy.
50     In fact it is another implementation of Floyd's formal sharing.
51 
52     Levels:
53     Each class is assigned level. Leaf has ALWAYS level 0 and root
54     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
55     one less than their parent.
56 */
57 
58 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
59 #define HTB_VER 0x30011		/* major must be matched with number suplied by TC as version */
60 
61 #if HTB_VER >> 16 != TC_HTB_PROTOVER
62 #error "Mismatched sch_htb.c and pkt_sch.h"
63 #endif
64 
65 /* Module parameter and sysfs export */
66 module_param    (htb_hysteresis, int, 0640);
67 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
68 
69 static int htb_rate_est = 0; /* htb classes have a default rate estimator */
70 module_param(htb_rate_est, int, 0640);
71 MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
72 
73 /* used internaly to keep status of single class */
74 enum htb_cmode {
75 	HTB_CANT_SEND,		/* class can't send and can't borrow */
76 	HTB_MAY_BORROW,		/* class can't send but may borrow */
77 	HTB_CAN_SEND		/* class can send */
78 };
79 
80 struct htb_prio {
81 	union {
82 		struct rb_root	row;
83 		struct rb_root	feed;
84 	};
85 	struct rb_node	*ptr;
86 	/* When class changes from state 1->2 and disconnects from
87 	 * parent's feed then we lost ptr value and start from the
88 	 * first child again. Here we store classid of the
89 	 * last valid ptr (used when ptr is NULL).
90 	 */
91 	u32		last_ptr_id;
92 };
93 
94 /* interior & leaf nodes; props specific to leaves are marked L:
95  * To reduce false sharing, place mostly read fields at beginning,
96  * and mostly written ones at the end.
97  */
98 struct htb_class {
99 	struct Qdisc_class_common common;
100 	struct psched_ratecfg	rate;
101 	struct psched_ratecfg	ceil;
102 	s64			buffer, cbuffer;/* token bucket depth/rate */
103 	s64			mbuffer;	/* max wait time */
104 	u32			prio;		/* these two are used only by leaves... */
105 	int			quantum;	/* but stored for parent-to-leaf return */
106 
107 	struct tcf_proto __rcu	*filter_list;	/* class attached filters */
108 	int			filter_cnt;
109 	int			refcnt;		/* usage count of this class */
110 
111 	int			level;		/* our level (see above) */
112 	unsigned int		children;
113 	struct htb_class	*parent;	/* parent class */
114 
115 	struct net_rate_estimator __rcu *rate_est;
116 
117 	/*
118 	 * Written often fields
119 	 */
120 	struct gnet_stats_basic_packed bstats;
121 	struct tc_htb_xstats	xstats;	/* our special stats */
122 
123 	/* token bucket parameters */
124 	s64			tokens, ctokens;/* current number of tokens */
125 	s64			t_c;		/* checkpoint time */
126 
127 	union {
128 		struct htb_class_leaf {
129 			struct list_head drop_list;
130 			int		deficit[TC_HTB_MAXDEPTH];
131 			struct Qdisc	*q;
132 		} leaf;
133 		struct htb_class_inner {
134 			struct htb_prio clprio[TC_HTB_NUMPRIO];
135 		} inner;
136 	} un;
137 	s64			pq_key;
138 
139 	int			prio_activity;	/* for which prios are we active */
140 	enum htb_cmode		cmode;		/* current mode of the class */
141 	struct rb_node		pq_node;	/* node for event queue */
142 	struct rb_node		node[TC_HTB_NUMPRIO];	/* node for self or feed tree */
143 
144 	unsigned int drops ____cacheline_aligned_in_smp;
145 };
146 
147 struct htb_level {
148 	struct rb_root	wait_pq;
149 	struct htb_prio hprio[TC_HTB_NUMPRIO];
150 };
151 
152 struct htb_sched {
153 	struct Qdisc_class_hash clhash;
154 	int			defcls;		/* class where unclassified flows go to */
155 	int			rate2quantum;	/* quant = rate / rate2quantum */
156 
157 	/* filters for qdisc itself */
158 	struct tcf_proto __rcu	*filter_list;
159 
160 #define HTB_WARN_TOOMANYEVENTS	0x1
161 	unsigned int		warned;	/* only one warning */
162 	int			direct_qlen;
163 	struct work_struct	work;
164 
165 	/* non shaped skbs; let them go directly thru */
166 	struct qdisc_skb_head	direct_queue;
167 	long			direct_pkts;
168 
169 	struct qdisc_watchdog	watchdog;
170 
171 	s64			now;	/* cached dequeue time */
172 	struct list_head	drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
173 
174 	/* time of nearest event per level (row) */
175 	s64			near_ev_cache[TC_HTB_MAXDEPTH];
176 
177 	int			row_mask[TC_HTB_MAXDEPTH];
178 
179 	struct htb_level	hlevel[TC_HTB_MAXDEPTH];
180 };
181 
182 /* find class in global hash table using given handle */
183 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
184 {
185 	struct htb_sched *q = qdisc_priv(sch);
186 	struct Qdisc_class_common *clc;
187 
188 	clc = qdisc_class_find(&q->clhash, handle);
189 	if (clc == NULL)
190 		return NULL;
191 	return container_of(clc, struct htb_class, common);
192 }
193 
194 /**
195  * htb_classify - classify a packet into class
196  *
197  * It returns NULL if the packet should be dropped or -1 if the packet
198  * should be passed directly thru. In all other cases leaf class is returned.
199  * We allow direct class selection by classid in priority. The we examine
200  * filters in qdisc and in inner nodes (if higher filter points to the inner
201  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
202  * internal fifo (direct). These packets then go directly thru. If we still
203  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
204  * then finish and return direct queue.
205  */
206 #define HTB_DIRECT ((struct htb_class *)-1L)
207 
208 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
209 				      int *qerr)
210 {
211 	struct htb_sched *q = qdisc_priv(sch);
212 	struct htb_class *cl;
213 	struct tcf_result res;
214 	struct tcf_proto *tcf;
215 	int result;
216 
217 	/* allow to select class by setting skb->priority to valid classid;
218 	 * note that nfmark can be used too by attaching filter fw with no
219 	 * rules in it
220 	 */
221 	if (skb->priority == sch->handle)
222 		return HTB_DIRECT;	/* X:0 (direct flow) selected */
223 	cl = htb_find(skb->priority, sch);
224 	if (cl) {
225 		if (cl->level == 0)
226 			return cl;
227 		/* Start with inner filter chain if a non-leaf class is selected */
228 		tcf = rcu_dereference_bh(cl->filter_list);
229 	} else {
230 		tcf = rcu_dereference_bh(q->filter_list);
231 	}
232 
233 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
234 	while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
235 #ifdef CONFIG_NET_CLS_ACT
236 		switch (result) {
237 		case TC_ACT_QUEUED:
238 		case TC_ACT_STOLEN:
239 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
240 		case TC_ACT_SHOT:
241 			return NULL;
242 		}
243 #endif
244 		cl = (void *)res.class;
245 		if (!cl) {
246 			if (res.classid == sch->handle)
247 				return HTB_DIRECT;	/* X:0 (direct flow) */
248 			cl = htb_find(res.classid, sch);
249 			if (!cl)
250 				break;	/* filter selected invalid classid */
251 		}
252 		if (!cl->level)
253 			return cl;	/* we hit leaf; return it */
254 
255 		/* we have got inner class; apply inner filter chain */
256 		tcf = rcu_dereference_bh(cl->filter_list);
257 	}
258 	/* classification failed; try to use default class */
259 	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
260 	if (!cl || cl->level)
261 		return HTB_DIRECT;	/* bad default .. this is safe bet */
262 	return cl;
263 }
264 
265 /**
266  * htb_add_to_id_tree - adds class to the round robin list
267  *
268  * Routine adds class to the list (actually tree) sorted by classid.
269  * Make sure that class is not already on such list for given prio.
270  */
271 static void htb_add_to_id_tree(struct rb_root *root,
272 			       struct htb_class *cl, int prio)
273 {
274 	struct rb_node **p = &root->rb_node, *parent = NULL;
275 
276 	while (*p) {
277 		struct htb_class *c;
278 		parent = *p;
279 		c = rb_entry(parent, struct htb_class, node[prio]);
280 
281 		if (cl->common.classid > c->common.classid)
282 			p = &parent->rb_right;
283 		else
284 			p = &parent->rb_left;
285 	}
286 	rb_link_node(&cl->node[prio], parent, p);
287 	rb_insert_color(&cl->node[prio], root);
288 }
289 
290 /**
291  * htb_add_to_wait_tree - adds class to the event queue with delay
292  *
293  * The class is added to priority event queue to indicate that class will
294  * change its mode in cl->pq_key microseconds. Make sure that class is not
295  * already in the queue.
296  */
297 static void htb_add_to_wait_tree(struct htb_sched *q,
298 				 struct htb_class *cl, s64 delay)
299 {
300 	struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
301 
302 	cl->pq_key = q->now + delay;
303 	if (cl->pq_key == q->now)
304 		cl->pq_key++;
305 
306 	/* update the nearest event cache */
307 	if (q->near_ev_cache[cl->level] > cl->pq_key)
308 		q->near_ev_cache[cl->level] = cl->pq_key;
309 
310 	while (*p) {
311 		struct htb_class *c;
312 		parent = *p;
313 		c = rb_entry(parent, struct htb_class, pq_node);
314 		if (cl->pq_key >= c->pq_key)
315 			p = &parent->rb_right;
316 		else
317 			p = &parent->rb_left;
318 	}
319 	rb_link_node(&cl->pq_node, parent, p);
320 	rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
321 }
322 
323 /**
324  * htb_next_rb_node - finds next node in binary tree
325  *
326  * When we are past last key we return NULL.
327  * Average complexity is 2 steps per call.
328  */
329 static inline void htb_next_rb_node(struct rb_node **n)
330 {
331 	*n = rb_next(*n);
332 }
333 
334 /**
335  * htb_add_class_to_row - add class to its row
336  *
337  * The class is added to row at priorities marked in mask.
338  * It does nothing if mask == 0.
339  */
340 static inline void htb_add_class_to_row(struct htb_sched *q,
341 					struct htb_class *cl, int mask)
342 {
343 	q->row_mask[cl->level] |= mask;
344 	while (mask) {
345 		int prio = ffz(~mask);
346 		mask &= ~(1 << prio);
347 		htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
348 	}
349 }
350 
351 /* If this triggers, it is a bug in this code, but it need not be fatal */
352 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
353 {
354 	if (RB_EMPTY_NODE(rb)) {
355 		WARN_ON(1);
356 	} else {
357 		rb_erase(rb, root);
358 		RB_CLEAR_NODE(rb);
359 	}
360 }
361 
362 
363 /**
364  * htb_remove_class_from_row - removes class from its row
365  *
366  * The class is removed from row at priorities marked in mask.
367  * It does nothing if mask == 0.
368  */
369 static inline void htb_remove_class_from_row(struct htb_sched *q,
370 						 struct htb_class *cl, int mask)
371 {
372 	int m = 0;
373 	struct htb_level *hlevel = &q->hlevel[cl->level];
374 
375 	while (mask) {
376 		int prio = ffz(~mask);
377 		struct htb_prio *hprio = &hlevel->hprio[prio];
378 
379 		mask &= ~(1 << prio);
380 		if (hprio->ptr == cl->node + prio)
381 			htb_next_rb_node(&hprio->ptr);
382 
383 		htb_safe_rb_erase(cl->node + prio, &hprio->row);
384 		if (!hprio->row.rb_node)
385 			m |= 1 << prio;
386 	}
387 	q->row_mask[cl->level] &= ~m;
388 }
389 
390 /**
391  * htb_activate_prios - creates active classe's feed chain
392  *
393  * The class is connected to ancestors and/or appropriate rows
394  * for priorities it is participating on. cl->cmode must be new
395  * (activated) mode. It does nothing if cl->prio_activity == 0.
396  */
397 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
398 {
399 	struct htb_class *p = cl->parent;
400 	long m, mask = cl->prio_activity;
401 
402 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
403 		m = mask;
404 		while (m) {
405 			int prio = ffz(~m);
406 			m &= ~(1 << prio);
407 
408 			if (p->un.inner.clprio[prio].feed.rb_node)
409 				/* parent already has its feed in use so that
410 				 * reset bit in mask as parent is already ok
411 				 */
412 				mask &= ~(1 << prio);
413 
414 			htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
415 		}
416 		p->prio_activity |= mask;
417 		cl = p;
418 		p = cl->parent;
419 
420 	}
421 	if (cl->cmode == HTB_CAN_SEND && mask)
422 		htb_add_class_to_row(q, cl, mask);
423 }
424 
425 /**
426  * htb_deactivate_prios - remove class from feed chain
427  *
428  * cl->cmode must represent old mode (before deactivation). It does
429  * nothing if cl->prio_activity == 0. Class is removed from all feed
430  * chains and rows.
431  */
432 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
433 {
434 	struct htb_class *p = cl->parent;
435 	long m, mask = cl->prio_activity;
436 
437 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
438 		m = mask;
439 		mask = 0;
440 		while (m) {
441 			int prio = ffz(~m);
442 			m &= ~(1 << prio);
443 
444 			if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
445 				/* we are removing child which is pointed to from
446 				 * parent feed - forget the pointer but remember
447 				 * classid
448 				 */
449 				p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
450 				p->un.inner.clprio[prio].ptr = NULL;
451 			}
452 
453 			htb_safe_rb_erase(cl->node + prio,
454 					  &p->un.inner.clprio[prio].feed);
455 
456 			if (!p->un.inner.clprio[prio].feed.rb_node)
457 				mask |= 1 << prio;
458 		}
459 
460 		p->prio_activity &= ~mask;
461 		cl = p;
462 		p = cl->parent;
463 
464 	}
465 	if (cl->cmode == HTB_CAN_SEND && mask)
466 		htb_remove_class_from_row(q, cl, mask);
467 }
468 
469 static inline s64 htb_lowater(const struct htb_class *cl)
470 {
471 	if (htb_hysteresis)
472 		return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
473 	else
474 		return 0;
475 }
476 static inline s64 htb_hiwater(const struct htb_class *cl)
477 {
478 	if (htb_hysteresis)
479 		return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
480 	else
481 		return 0;
482 }
483 
484 
485 /**
486  * htb_class_mode - computes and returns current class mode
487  *
488  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
489  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
490  * from now to time when cl will change its state.
491  * Also it is worth to note that class mode doesn't change simply
492  * at cl->{c,}tokens == 0 but there can rather be hysteresis of
493  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
494  * mode transitions per time unit. The speed gain is about 1/6.
495  */
496 static inline enum htb_cmode
497 htb_class_mode(struct htb_class *cl, s64 *diff)
498 {
499 	s64 toks;
500 
501 	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
502 		*diff = -toks;
503 		return HTB_CANT_SEND;
504 	}
505 
506 	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
507 		return HTB_CAN_SEND;
508 
509 	*diff = -toks;
510 	return HTB_MAY_BORROW;
511 }
512 
513 /**
514  * htb_change_class_mode - changes classe's mode
515  *
516  * This should be the only way how to change classe's mode under normal
517  * cirsumstances. Routine will update feed lists linkage, change mode
518  * and add class to the wait event queue if appropriate. New mode should
519  * be different from old one and cl->pq_key has to be valid if changing
520  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
521  */
522 static void
523 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
524 {
525 	enum htb_cmode new_mode = htb_class_mode(cl, diff);
526 
527 	if (new_mode == cl->cmode)
528 		return;
529 
530 	if (cl->prio_activity) {	/* not necessary: speed optimization */
531 		if (cl->cmode != HTB_CANT_SEND)
532 			htb_deactivate_prios(q, cl);
533 		cl->cmode = new_mode;
534 		if (new_mode != HTB_CANT_SEND)
535 			htb_activate_prios(q, cl);
536 	} else
537 		cl->cmode = new_mode;
538 }
539 
540 /**
541  * htb_activate - inserts leaf cl into appropriate active feeds
542  *
543  * Routine learns (new) priority of leaf and activates feed chain
544  * for the prio. It can be called on already active leaf safely.
545  * It also adds leaf into droplist.
546  */
547 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
548 {
549 	WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
550 
551 	if (!cl->prio_activity) {
552 		cl->prio_activity = 1 << cl->prio;
553 		htb_activate_prios(q, cl);
554 		list_add_tail(&cl->un.leaf.drop_list,
555 			      q->drops + cl->prio);
556 	}
557 }
558 
559 /**
560  * htb_deactivate - remove leaf cl from active feeds
561  *
562  * Make sure that leaf is active. In the other words it can't be called
563  * with non-active leaf. It also removes class from the drop list.
564  */
565 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
566 {
567 	WARN_ON(!cl->prio_activity);
568 
569 	htb_deactivate_prios(q, cl);
570 	cl->prio_activity = 0;
571 	list_del_init(&cl->un.leaf.drop_list);
572 }
573 
574 static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
575 			     struct qdisc_skb_head *qh)
576 {
577 	struct sk_buff *last = qh->tail;
578 
579 	if (last) {
580 		skb->next = NULL;
581 		last->next = skb;
582 		qh->tail = skb;
583 	} else {
584 		qh->tail = skb;
585 		qh->head = skb;
586 	}
587 	qh->qlen++;
588 }
589 
590 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
591 		       struct sk_buff **to_free)
592 {
593 	int uninitialized_var(ret);
594 	struct htb_sched *q = qdisc_priv(sch);
595 	struct htb_class *cl = htb_classify(skb, sch, &ret);
596 
597 	if (cl == HTB_DIRECT) {
598 		/* enqueue to helper queue */
599 		if (q->direct_queue.qlen < q->direct_qlen) {
600 			htb_enqueue_tail(skb, sch, &q->direct_queue);
601 			q->direct_pkts++;
602 		} else {
603 			return qdisc_drop(skb, sch, to_free);
604 		}
605 #ifdef CONFIG_NET_CLS_ACT
606 	} else if (!cl) {
607 		if (ret & __NET_XMIT_BYPASS)
608 			qdisc_qstats_drop(sch);
609 		__qdisc_drop(skb, to_free);
610 		return ret;
611 #endif
612 	} else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q,
613 					to_free)) != NET_XMIT_SUCCESS) {
614 		if (net_xmit_drop_count(ret)) {
615 			qdisc_qstats_drop(sch);
616 			cl->drops++;
617 		}
618 		return ret;
619 	} else {
620 		htb_activate(q, cl);
621 	}
622 
623 	qdisc_qstats_backlog_inc(sch, skb);
624 	sch->q.qlen++;
625 	return NET_XMIT_SUCCESS;
626 }
627 
628 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
629 {
630 	s64 toks = diff + cl->tokens;
631 
632 	if (toks > cl->buffer)
633 		toks = cl->buffer;
634 	toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
635 	if (toks <= -cl->mbuffer)
636 		toks = 1 - cl->mbuffer;
637 
638 	cl->tokens = toks;
639 }
640 
641 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
642 {
643 	s64 toks = diff + cl->ctokens;
644 
645 	if (toks > cl->cbuffer)
646 		toks = cl->cbuffer;
647 	toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
648 	if (toks <= -cl->mbuffer)
649 		toks = 1 - cl->mbuffer;
650 
651 	cl->ctokens = toks;
652 }
653 
654 /**
655  * htb_charge_class - charges amount "bytes" to leaf and ancestors
656  *
657  * Routine assumes that packet "bytes" long was dequeued from leaf cl
658  * borrowing from "level". It accounts bytes to ceil leaky bucket for
659  * leaf and all ancestors and to rate bucket for ancestors at levels
660  * "level" and higher. It also handles possible change of mode resulting
661  * from the update. Note that mode can also increase here (MAY_BORROW to
662  * CAN_SEND) because we can use more precise clock that event queue here.
663  * In such case we remove class from event queue first.
664  */
665 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
666 			     int level, struct sk_buff *skb)
667 {
668 	int bytes = qdisc_pkt_len(skb);
669 	enum htb_cmode old_mode;
670 	s64 diff;
671 
672 	while (cl) {
673 		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
674 		if (cl->level >= level) {
675 			if (cl->level == level)
676 				cl->xstats.lends++;
677 			htb_accnt_tokens(cl, bytes, diff);
678 		} else {
679 			cl->xstats.borrows++;
680 			cl->tokens += diff;	/* we moved t_c; update tokens */
681 		}
682 		htb_accnt_ctokens(cl, bytes, diff);
683 		cl->t_c = q->now;
684 
685 		old_mode = cl->cmode;
686 		diff = 0;
687 		htb_change_class_mode(q, cl, &diff);
688 		if (old_mode != cl->cmode) {
689 			if (old_mode != HTB_CAN_SEND)
690 				htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
691 			if (cl->cmode != HTB_CAN_SEND)
692 				htb_add_to_wait_tree(q, cl, diff);
693 		}
694 
695 		/* update basic stats except for leaves which are already updated */
696 		if (cl->level)
697 			bstats_update(&cl->bstats, skb);
698 
699 		cl = cl->parent;
700 	}
701 }
702 
703 /**
704  * htb_do_events - make mode changes to classes at the level
705  *
706  * Scans event queue for pending events and applies them. Returns time of
707  * next pending event (0 for no event in pq, q->now for too many events).
708  * Note: Applied are events whose have cl->pq_key <= q->now.
709  */
710 static s64 htb_do_events(struct htb_sched *q, const int level,
711 			 unsigned long start)
712 {
713 	/* don't run for longer than 2 jiffies; 2 is used instead of
714 	 * 1 to simplify things when jiffy is going to be incremented
715 	 * too soon
716 	 */
717 	unsigned long stop_at = start + 2;
718 	struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
719 
720 	while (time_before(jiffies, stop_at)) {
721 		struct htb_class *cl;
722 		s64 diff;
723 		struct rb_node *p = rb_first(wait_pq);
724 
725 		if (!p)
726 			return 0;
727 
728 		cl = rb_entry(p, struct htb_class, pq_node);
729 		if (cl->pq_key > q->now)
730 			return cl->pq_key;
731 
732 		htb_safe_rb_erase(p, wait_pq);
733 		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
734 		htb_change_class_mode(q, cl, &diff);
735 		if (cl->cmode != HTB_CAN_SEND)
736 			htb_add_to_wait_tree(q, cl, diff);
737 	}
738 
739 	/* too much load - let's continue after a break for scheduling */
740 	if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
741 		pr_warn("htb: too many events!\n");
742 		q->warned |= HTB_WARN_TOOMANYEVENTS;
743 	}
744 
745 	return q->now;
746 }
747 
748 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
749  * is no such one exists.
750  */
751 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
752 					      u32 id)
753 {
754 	struct rb_node *r = NULL;
755 	while (n) {
756 		struct htb_class *cl =
757 		    rb_entry(n, struct htb_class, node[prio]);
758 
759 		if (id > cl->common.classid) {
760 			n = n->rb_right;
761 		} else if (id < cl->common.classid) {
762 			r = n;
763 			n = n->rb_left;
764 		} else {
765 			return n;
766 		}
767 	}
768 	return r;
769 }
770 
771 /**
772  * htb_lookup_leaf - returns next leaf class in DRR order
773  *
774  * Find leaf where current feed pointers points to.
775  */
776 static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
777 {
778 	int i;
779 	struct {
780 		struct rb_node *root;
781 		struct rb_node **pptr;
782 		u32 *pid;
783 	} stk[TC_HTB_MAXDEPTH], *sp = stk;
784 
785 	BUG_ON(!hprio->row.rb_node);
786 	sp->root = hprio->row.rb_node;
787 	sp->pptr = &hprio->ptr;
788 	sp->pid = &hprio->last_ptr_id;
789 
790 	for (i = 0; i < 65535; i++) {
791 		if (!*sp->pptr && *sp->pid) {
792 			/* ptr was invalidated but id is valid - try to recover
793 			 * the original or next ptr
794 			 */
795 			*sp->pptr =
796 			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
797 		}
798 		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
799 				 * can become out of date quickly
800 				 */
801 		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
802 			*sp->pptr = sp->root;
803 			while ((*sp->pptr)->rb_left)
804 				*sp->pptr = (*sp->pptr)->rb_left;
805 			if (sp > stk) {
806 				sp--;
807 				if (!*sp->pptr) {
808 					WARN_ON(1);
809 					return NULL;
810 				}
811 				htb_next_rb_node(sp->pptr);
812 			}
813 		} else {
814 			struct htb_class *cl;
815 			struct htb_prio *clp;
816 
817 			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
818 			if (!cl->level)
819 				return cl;
820 			clp = &cl->un.inner.clprio[prio];
821 			(++sp)->root = clp->feed.rb_node;
822 			sp->pptr = &clp->ptr;
823 			sp->pid = &clp->last_ptr_id;
824 		}
825 	}
826 	WARN_ON(1);
827 	return NULL;
828 }
829 
830 /* dequeues packet at given priority and level; call only if
831  * you are sure that there is active class at prio/level
832  */
833 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
834 					const int level)
835 {
836 	struct sk_buff *skb = NULL;
837 	struct htb_class *cl, *start;
838 	struct htb_level *hlevel = &q->hlevel[level];
839 	struct htb_prio *hprio = &hlevel->hprio[prio];
840 
841 	/* look initial class up in the row */
842 	start = cl = htb_lookup_leaf(hprio, prio);
843 
844 	do {
845 next:
846 		if (unlikely(!cl))
847 			return NULL;
848 
849 		/* class can be empty - it is unlikely but can be true if leaf
850 		 * qdisc drops packets in enqueue routine or if someone used
851 		 * graft operation on the leaf since last dequeue;
852 		 * simply deactivate and skip such class
853 		 */
854 		if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
855 			struct htb_class *next;
856 			htb_deactivate(q, cl);
857 
858 			/* row/level might become empty */
859 			if ((q->row_mask[level] & (1 << prio)) == 0)
860 				return NULL;
861 
862 			next = htb_lookup_leaf(hprio, prio);
863 
864 			if (cl == start)	/* fix start if we just deleted it */
865 				start = next;
866 			cl = next;
867 			goto next;
868 		}
869 
870 		skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
871 		if (likely(skb != NULL))
872 			break;
873 
874 		qdisc_warn_nonwc("htb", cl->un.leaf.q);
875 		htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
876 					 &q->hlevel[0].hprio[prio].ptr);
877 		cl = htb_lookup_leaf(hprio, prio);
878 
879 	} while (cl != start);
880 
881 	if (likely(skb != NULL)) {
882 		bstats_update(&cl->bstats, skb);
883 		cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
884 		if (cl->un.leaf.deficit[level] < 0) {
885 			cl->un.leaf.deficit[level] += cl->quantum;
886 			htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
887 						 &q->hlevel[0].hprio[prio].ptr);
888 		}
889 		/* this used to be after charge_class but this constelation
890 		 * gives us slightly better performance
891 		 */
892 		if (!cl->un.leaf.q->q.qlen)
893 			htb_deactivate(q, cl);
894 		htb_charge_class(q, cl, level, skb);
895 	}
896 	return skb;
897 }
898 
899 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
900 {
901 	struct sk_buff *skb;
902 	struct htb_sched *q = qdisc_priv(sch);
903 	int level;
904 	s64 next_event;
905 	unsigned long start_at;
906 
907 	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
908 	skb = __qdisc_dequeue_head(&q->direct_queue);
909 	if (skb != NULL) {
910 ok:
911 		qdisc_bstats_update(sch, skb);
912 		qdisc_qstats_backlog_dec(sch, skb);
913 		sch->q.qlen--;
914 		return skb;
915 	}
916 
917 	if (!sch->q.qlen)
918 		goto fin;
919 	q->now = ktime_get_ns();
920 	start_at = jiffies;
921 
922 	next_event = q->now + 5LLU * NSEC_PER_SEC;
923 
924 	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
925 		/* common case optimization - skip event handler quickly */
926 		int m;
927 		s64 event = q->near_ev_cache[level];
928 
929 		if (q->now >= event) {
930 			event = htb_do_events(q, level, start_at);
931 			if (!event)
932 				event = q->now + NSEC_PER_SEC;
933 			q->near_ev_cache[level] = event;
934 		}
935 
936 		if (next_event > event)
937 			next_event = event;
938 
939 		m = ~q->row_mask[level];
940 		while (m != (int)(-1)) {
941 			int prio = ffz(m);
942 
943 			m |= 1 << prio;
944 			skb = htb_dequeue_tree(q, prio, level);
945 			if (likely(skb != NULL))
946 				goto ok;
947 		}
948 	}
949 	qdisc_qstats_overlimit(sch);
950 	if (likely(next_event > q->now))
951 		qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
952 	else
953 		schedule_work(&q->work);
954 fin:
955 	return skb;
956 }
957 
958 /* reset all classes */
959 /* always caled under BH & queue lock */
960 static void htb_reset(struct Qdisc *sch)
961 {
962 	struct htb_sched *q = qdisc_priv(sch);
963 	struct htb_class *cl;
964 	unsigned int i;
965 
966 	for (i = 0; i < q->clhash.hashsize; i++) {
967 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
968 			if (cl->level)
969 				memset(&cl->un.inner, 0, sizeof(cl->un.inner));
970 			else {
971 				if (cl->un.leaf.q)
972 					qdisc_reset(cl->un.leaf.q);
973 				INIT_LIST_HEAD(&cl->un.leaf.drop_list);
974 			}
975 			cl->prio_activity = 0;
976 			cl->cmode = HTB_CAN_SEND;
977 		}
978 	}
979 	qdisc_watchdog_cancel(&q->watchdog);
980 	__qdisc_reset_queue(&q->direct_queue);
981 	sch->q.qlen = 0;
982 	sch->qstats.backlog = 0;
983 	memset(q->hlevel, 0, sizeof(q->hlevel));
984 	memset(q->row_mask, 0, sizeof(q->row_mask));
985 	for (i = 0; i < TC_HTB_NUMPRIO; i++)
986 		INIT_LIST_HEAD(q->drops + i);
987 }
988 
989 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
990 	[TCA_HTB_PARMS]	= { .len = sizeof(struct tc_htb_opt) },
991 	[TCA_HTB_INIT]	= { .len = sizeof(struct tc_htb_glob) },
992 	[TCA_HTB_CTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
993 	[TCA_HTB_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
994 	[TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
995 	[TCA_HTB_RATE64] = { .type = NLA_U64 },
996 	[TCA_HTB_CEIL64] = { .type = NLA_U64 },
997 };
998 
999 static void htb_work_func(struct work_struct *work)
1000 {
1001 	struct htb_sched *q = container_of(work, struct htb_sched, work);
1002 	struct Qdisc *sch = q->watchdog.qdisc;
1003 
1004 	rcu_read_lock();
1005 	__netif_schedule(qdisc_root(sch));
1006 	rcu_read_unlock();
1007 }
1008 
1009 static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1010 {
1011 	struct htb_sched *q = qdisc_priv(sch);
1012 	struct nlattr *tb[TCA_HTB_MAX + 1];
1013 	struct tc_htb_glob *gopt;
1014 	int err;
1015 	int i;
1016 
1017 	if (!opt)
1018 		return -EINVAL;
1019 
1020 	err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL);
1021 	if (err < 0)
1022 		return err;
1023 
1024 	if (!tb[TCA_HTB_INIT])
1025 		return -EINVAL;
1026 
1027 	gopt = nla_data(tb[TCA_HTB_INIT]);
1028 	if (gopt->version != HTB_VER >> 16)
1029 		return -EINVAL;
1030 
1031 	err = qdisc_class_hash_init(&q->clhash);
1032 	if (err < 0)
1033 		return err;
1034 	for (i = 0; i < TC_HTB_NUMPRIO; i++)
1035 		INIT_LIST_HEAD(q->drops + i);
1036 
1037 	qdisc_watchdog_init(&q->watchdog, sch);
1038 	INIT_WORK(&q->work, htb_work_func);
1039 	qdisc_skb_head_init(&q->direct_queue);
1040 
1041 	if (tb[TCA_HTB_DIRECT_QLEN])
1042 		q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1043 	else
1044 		q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1045 
1046 	if ((q->rate2quantum = gopt->rate2quantum) < 1)
1047 		q->rate2quantum = 1;
1048 	q->defcls = gopt->defcls;
1049 
1050 	return 0;
1051 }
1052 
1053 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1054 {
1055 	struct htb_sched *q = qdisc_priv(sch);
1056 	struct nlattr *nest;
1057 	struct tc_htb_glob gopt;
1058 
1059 	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1060 	 * no change can happen on the qdisc parameters.
1061 	 */
1062 
1063 	gopt.direct_pkts = q->direct_pkts;
1064 	gopt.version = HTB_VER;
1065 	gopt.rate2quantum = q->rate2quantum;
1066 	gopt.defcls = q->defcls;
1067 	gopt.debug = 0;
1068 
1069 	nest = nla_nest_start(skb, TCA_OPTIONS);
1070 	if (nest == NULL)
1071 		goto nla_put_failure;
1072 	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1073 	    nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1074 		goto nla_put_failure;
1075 
1076 	return nla_nest_end(skb, nest);
1077 
1078 nla_put_failure:
1079 	nla_nest_cancel(skb, nest);
1080 	return -1;
1081 }
1082 
1083 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1084 			  struct sk_buff *skb, struct tcmsg *tcm)
1085 {
1086 	struct htb_class *cl = (struct htb_class *)arg;
1087 	struct nlattr *nest;
1088 	struct tc_htb_opt opt;
1089 
1090 	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1091 	 * no change can happen on the class parameters.
1092 	 */
1093 	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1094 	tcm->tcm_handle = cl->common.classid;
1095 	if (!cl->level && cl->un.leaf.q)
1096 		tcm->tcm_info = cl->un.leaf.q->handle;
1097 
1098 	nest = nla_nest_start(skb, TCA_OPTIONS);
1099 	if (nest == NULL)
1100 		goto nla_put_failure;
1101 
1102 	memset(&opt, 0, sizeof(opt));
1103 
1104 	psched_ratecfg_getrate(&opt.rate, &cl->rate);
1105 	opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1106 	psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1107 	opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1108 	opt.quantum = cl->quantum;
1109 	opt.prio = cl->prio;
1110 	opt.level = cl->level;
1111 	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1112 		goto nla_put_failure;
1113 	if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1114 	    nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1115 			      TCA_HTB_PAD))
1116 		goto nla_put_failure;
1117 	if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1118 	    nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1119 			      TCA_HTB_PAD))
1120 		goto nla_put_failure;
1121 
1122 	return nla_nest_end(skb, nest);
1123 
1124 nla_put_failure:
1125 	nla_nest_cancel(skb, nest);
1126 	return -1;
1127 }
1128 
1129 static int
1130 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1131 {
1132 	struct htb_class *cl = (struct htb_class *)arg;
1133 	struct gnet_stats_queue qs = {
1134 		.drops = cl->drops,
1135 	};
1136 	__u32 qlen = 0;
1137 
1138 	if (!cl->level && cl->un.leaf.q) {
1139 		qlen = cl->un.leaf.q->q.qlen;
1140 		qs.backlog = cl->un.leaf.q->qstats.backlog;
1141 	}
1142 	cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1143 				    INT_MIN, INT_MAX);
1144 	cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1145 				     INT_MIN, INT_MAX);
1146 
1147 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1148 				  d, NULL, &cl->bstats) < 0 ||
1149 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1150 	    gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
1151 		return -1;
1152 
1153 	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1154 }
1155 
1156 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1157 		     struct Qdisc **old)
1158 {
1159 	struct htb_class *cl = (struct htb_class *)arg;
1160 
1161 	if (cl->level)
1162 		return -EINVAL;
1163 	if (new == NULL &&
1164 	    (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1165 				     cl->common.classid)) == NULL)
1166 		return -ENOBUFS;
1167 
1168 	*old = qdisc_replace(sch, new, &cl->un.leaf.q);
1169 	return 0;
1170 }
1171 
1172 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1173 {
1174 	struct htb_class *cl = (struct htb_class *)arg;
1175 	return !cl->level ? cl->un.leaf.q : NULL;
1176 }
1177 
1178 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1179 {
1180 	struct htb_class *cl = (struct htb_class *)arg;
1181 
1182 	if (cl->un.leaf.q->q.qlen == 0)
1183 		htb_deactivate(qdisc_priv(sch), cl);
1184 }
1185 
1186 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1187 {
1188 	struct htb_class *cl = htb_find(classid, sch);
1189 	if (cl)
1190 		cl->refcnt++;
1191 	return (unsigned long)cl;
1192 }
1193 
1194 static inline int htb_parent_last_child(struct htb_class *cl)
1195 {
1196 	if (!cl->parent)
1197 		/* the root class */
1198 		return 0;
1199 	if (cl->parent->children > 1)
1200 		/* not the last child */
1201 		return 0;
1202 	return 1;
1203 }
1204 
1205 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1206 			       struct Qdisc *new_q)
1207 {
1208 	struct htb_class *parent = cl->parent;
1209 
1210 	WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1211 
1212 	if (parent->cmode != HTB_CAN_SEND)
1213 		htb_safe_rb_erase(&parent->pq_node,
1214 				  &q->hlevel[parent->level].wait_pq);
1215 
1216 	parent->level = 0;
1217 	memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1218 	INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1219 	parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1220 	parent->tokens = parent->buffer;
1221 	parent->ctokens = parent->cbuffer;
1222 	parent->t_c = ktime_get_ns();
1223 	parent->cmode = HTB_CAN_SEND;
1224 }
1225 
1226 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1227 {
1228 	if (!cl->level) {
1229 		WARN_ON(!cl->un.leaf.q);
1230 		qdisc_destroy(cl->un.leaf.q);
1231 	}
1232 	gen_kill_estimator(&cl->rate_est);
1233 	tcf_destroy_chain(&cl->filter_list);
1234 	kfree(cl);
1235 }
1236 
1237 static void htb_destroy(struct Qdisc *sch)
1238 {
1239 	struct htb_sched *q = qdisc_priv(sch);
1240 	struct hlist_node *next;
1241 	struct htb_class *cl;
1242 	unsigned int i;
1243 
1244 	cancel_work_sync(&q->work);
1245 	qdisc_watchdog_cancel(&q->watchdog);
1246 	/* This line used to be after htb_destroy_class call below
1247 	 * and surprisingly it worked in 2.4. But it must precede it
1248 	 * because filter need its target class alive to be able to call
1249 	 * unbind_filter on it (without Oops).
1250 	 */
1251 	tcf_destroy_chain(&q->filter_list);
1252 
1253 	for (i = 0; i < q->clhash.hashsize; i++) {
1254 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
1255 			tcf_destroy_chain(&cl->filter_list);
1256 	}
1257 	for (i = 0; i < q->clhash.hashsize; i++) {
1258 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1259 					  common.hnode)
1260 			htb_destroy_class(sch, cl);
1261 	}
1262 	qdisc_class_hash_destroy(&q->clhash);
1263 	__qdisc_reset_queue(&q->direct_queue);
1264 }
1265 
1266 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1267 {
1268 	struct htb_sched *q = qdisc_priv(sch);
1269 	struct htb_class *cl = (struct htb_class *)arg;
1270 	struct Qdisc *new_q = NULL;
1271 	int last_child = 0;
1272 
1273 	/* TODO: why don't allow to delete subtree ? references ? does
1274 	 * tc subsys guarantee us that in htb_destroy it holds no class
1275 	 * refs so that we can remove children safely there ?
1276 	 */
1277 	if (cl->children || cl->filter_cnt)
1278 		return -EBUSY;
1279 
1280 	if (!cl->level && htb_parent_last_child(cl)) {
1281 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1282 					  cl->parent->common.classid);
1283 		last_child = 1;
1284 	}
1285 
1286 	sch_tree_lock(sch);
1287 
1288 	if (!cl->level) {
1289 		unsigned int qlen = cl->un.leaf.q->q.qlen;
1290 		unsigned int backlog = cl->un.leaf.q->qstats.backlog;
1291 
1292 		qdisc_reset(cl->un.leaf.q);
1293 		qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
1294 	}
1295 
1296 	/* delete from hash and active; remainder in destroy_class */
1297 	qdisc_class_hash_remove(&q->clhash, &cl->common);
1298 	if (cl->parent)
1299 		cl->parent->children--;
1300 
1301 	if (cl->prio_activity)
1302 		htb_deactivate(q, cl);
1303 
1304 	if (cl->cmode != HTB_CAN_SEND)
1305 		htb_safe_rb_erase(&cl->pq_node,
1306 				  &q->hlevel[cl->level].wait_pq);
1307 
1308 	if (last_child)
1309 		htb_parent_to_leaf(q, cl, new_q);
1310 
1311 	BUG_ON(--cl->refcnt == 0);
1312 	/*
1313 	 * This shouldn't happen: we "hold" one cops->get() when called
1314 	 * from tc_ctl_tclass; the destroy method is done from cops->put().
1315 	 */
1316 
1317 	sch_tree_unlock(sch);
1318 	return 0;
1319 }
1320 
1321 static void htb_put(struct Qdisc *sch, unsigned long arg)
1322 {
1323 	struct htb_class *cl = (struct htb_class *)arg;
1324 
1325 	if (--cl->refcnt == 0)
1326 		htb_destroy_class(sch, cl);
1327 }
1328 
1329 static int htb_change_class(struct Qdisc *sch, u32 classid,
1330 			    u32 parentid, struct nlattr **tca,
1331 			    unsigned long *arg)
1332 {
1333 	int err = -EINVAL;
1334 	struct htb_sched *q = qdisc_priv(sch);
1335 	struct htb_class *cl = (struct htb_class *)*arg, *parent;
1336 	struct nlattr *opt = tca[TCA_OPTIONS];
1337 	struct nlattr *tb[TCA_HTB_MAX + 1];
1338 	struct tc_htb_opt *hopt;
1339 	u64 rate64, ceil64;
1340 
1341 	/* extract all subattrs from opt attr */
1342 	if (!opt)
1343 		goto failure;
1344 
1345 	err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL);
1346 	if (err < 0)
1347 		goto failure;
1348 
1349 	err = -EINVAL;
1350 	if (tb[TCA_HTB_PARMS] == NULL)
1351 		goto failure;
1352 
1353 	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1354 
1355 	hopt = nla_data(tb[TCA_HTB_PARMS]);
1356 	if (!hopt->rate.rate || !hopt->ceil.rate)
1357 		goto failure;
1358 
1359 	/* Keeping backward compatible with rate_table based iproute2 tc */
1360 	if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1361 		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]));
1362 
1363 	if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1364 		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]));
1365 
1366 	if (!cl) {		/* new class */
1367 		struct Qdisc *new_q;
1368 		int prio;
1369 		struct {
1370 			struct nlattr		nla;
1371 			struct gnet_estimator	opt;
1372 		} est = {
1373 			.nla = {
1374 				.nla_len	= nla_attr_size(sizeof(est.opt)),
1375 				.nla_type	= TCA_RATE,
1376 			},
1377 			.opt = {
1378 				/* 4s interval, 16s averaging constant */
1379 				.interval	= 2,
1380 				.ewma_log	= 2,
1381 			},
1382 		};
1383 
1384 		/* check for valid classid */
1385 		if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1386 		    htb_find(classid, sch))
1387 			goto failure;
1388 
1389 		/* check maximal depth */
1390 		if (parent && parent->parent && parent->parent->level < 2) {
1391 			pr_err("htb: tree is too deep\n");
1392 			goto failure;
1393 		}
1394 		err = -ENOBUFS;
1395 		cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1396 		if (!cl)
1397 			goto failure;
1398 
1399 		if (htb_rate_est || tca[TCA_RATE]) {
1400 			err = gen_new_estimator(&cl->bstats, NULL,
1401 						&cl->rate_est,
1402 						NULL,
1403 						qdisc_root_sleeping_running(sch),
1404 						tca[TCA_RATE] ? : &est.nla);
1405 			if (err) {
1406 				kfree(cl);
1407 				goto failure;
1408 			}
1409 		}
1410 
1411 		cl->refcnt = 1;
1412 		cl->children = 0;
1413 		INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1414 		RB_CLEAR_NODE(&cl->pq_node);
1415 
1416 		for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1417 			RB_CLEAR_NODE(&cl->node[prio]);
1418 
1419 		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1420 		 * so that can't be used inside of sch_tree_lock
1421 		 * -- thanks to Karlis Peisenieks
1422 		 */
1423 		new_q = qdisc_create_dflt(sch->dev_queue,
1424 					  &pfifo_qdisc_ops, classid);
1425 		sch_tree_lock(sch);
1426 		if (parent && !parent->level) {
1427 			unsigned int qlen = parent->un.leaf.q->q.qlen;
1428 			unsigned int backlog = parent->un.leaf.q->qstats.backlog;
1429 
1430 			/* turn parent into inner node */
1431 			qdisc_reset(parent->un.leaf.q);
1432 			qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
1433 			qdisc_destroy(parent->un.leaf.q);
1434 			if (parent->prio_activity)
1435 				htb_deactivate(q, parent);
1436 
1437 			/* remove from evt list because of level change */
1438 			if (parent->cmode != HTB_CAN_SEND) {
1439 				htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1440 				parent->cmode = HTB_CAN_SEND;
1441 			}
1442 			parent->level = (parent->parent ? parent->parent->level
1443 					 : TC_HTB_MAXDEPTH) - 1;
1444 			memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1445 		}
1446 		/* leaf (we) needs elementary qdisc */
1447 		cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1448 
1449 		cl->common.classid = classid;
1450 		cl->parent = parent;
1451 
1452 		/* set class to be in HTB_CAN_SEND state */
1453 		cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1454 		cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1455 		cl->mbuffer = 60ULL * NSEC_PER_SEC;	/* 1min */
1456 		cl->t_c = ktime_get_ns();
1457 		cl->cmode = HTB_CAN_SEND;
1458 
1459 		/* attach to the hash list and parent's family */
1460 		qdisc_class_hash_insert(&q->clhash, &cl->common);
1461 		if (parent)
1462 			parent->children++;
1463 		if (cl->un.leaf.q != &noop_qdisc)
1464 			qdisc_hash_add(cl->un.leaf.q, true);
1465 	} else {
1466 		if (tca[TCA_RATE]) {
1467 			err = gen_replace_estimator(&cl->bstats, NULL,
1468 						    &cl->rate_est,
1469 						    NULL,
1470 						    qdisc_root_sleeping_running(sch),
1471 						    tca[TCA_RATE]);
1472 			if (err)
1473 				return err;
1474 		}
1475 		sch_tree_lock(sch);
1476 	}
1477 
1478 	rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1479 
1480 	ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1481 
1482 	psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1483 	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1484 
1485 	/* it used to be a nasty bug here, we have to check that node
1486 	 * is really leaf before changing cl->un.leaf !
1487 	 */
1488 	if (!cl->level) {
1489 		u64 quantum = cl->rate.rate_bytes_ps;
1490 
1491 		do_div(quantum, q->rate2quantum);
1492 		cl->quantum = min_t(u64, quantum, INT_MAX);
1493 
1494 		if (!hopt->quantum && cl->quantum < 1000) {
1495 			pr_warn("HTB: quantum of class %X is small. Consider r2q change.\n",
1496 				cl->common.classid);
1497 			cl->quantum = 1000;
1498 		}
1499 		if (!hopt->quantum && cl->quantum > 200000) {
1500 			pr_warn("HTB: quantum of class %X is big. Consider r2q change.\n",
1501 				cl->common.classid);
1502 			cl->quantum = 200000;
1503 		}
1504 		if (hopt->quantum)
1505 			cl->quantum = hopt->quantum;
1506 		if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1507 			cl->prio = TC_HTB_NUMPRIO - 1;
1508 	}
1509 
1510 	cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1511 	cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1512 
1513 	sch_tree_unlock(sch);
1514 
1515 	qdisc_class_hash_grow(sch, &q->clhash);
1516 
1517 	*arg = (unsigned long)cl;
1518 	return 0;
1519 
1520 failure:
1521 	return err;
1522 }
1523 
1524 static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch,
1525 					     unsigned long arg)
1526 {
1527 	struct htb_sched *q = qdisc_priv(sch);
1528 	struct htb_class *cl = (struct htb_class *)arg;
1529 	struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list;
1530 
1531 	return fl;
1532 }
1533 
1534 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1535 				     u32 classid)
1536 {
1537 	struct htb_class *cl = htb_find(classid, sch);
1538 
1539 	/*if (cl && !cl->level) return 0;
1540 	 * The line above used to be there to prevent attaching filters to
1541 	 * leaves. But at least tc_index filter uses this just to get class
1542 	 * for other reasons so that we have to allow for it.
1543 	 * ----
1544 	 * 19.6.2002 As Werner explained it is ok - bind filter is just
1545 	 * another way to "lock" the class - unlike "get" this lock can
1546 	 * be broken by class during destroy IIUC.
1547 	 */
1548 	if (cl)
1549 		cl->filter_cnt++;
1550 	return (unsigned long)cl;
1551 }
1552 
1553 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1554 {
1555 	struct htb_class *cl = (struct htb_class *)arg;
1556 
1557 	if (cl)
1558 		cl->filter_cnt--;
1559 }
1560 
1561 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1562 {
1563 	struct htb_sched *q = qdisc_priv(sch);
1564 	struct htb_class *cl;
1565 	unsigned int i;
1566 
1567 	if (arg->stop)
1568 		return;
1569 
1570 	for (i = 0; i < q->clhash.hashsize; i++) {
1571 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1572 			if (arg->count < arg->skip) {
1573 				arg->count++;
1574 				continue;
1575 			}
1576 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1577 				arg->stop = 1;
1578 				return;
1579 			}
1580 			arg->count++;
1581 		}
1582 	}
1583 }
1584 
1585 static const struct Qdisc_class_ops htb_class_ops = {
1586 	.graft		=	htb_graft,
1587 	.leaf		=	htb_leaf,
1588 	.qlen_notify	=	htb_qlen_notify,
1589 	.get		=	htb_get,
1590 	.put		=	htb_put,
1591 	.change		=	htb_change_class,
1592 	.delete		=	htb_delete,
1593 	.walk		=	htb_walk,
1594 	.tcf_chain	=	htb_find_tcf,
1595 	.bind_tcf	=	htb_bind_filter,
1596 	.unbind_tcf	=	htb_unbind_filter,
1597 	.dump		=	htb_dump_class,
1598 	.dump_stats	=	htb_dump_class_stats,
1599 };
1600 
1601 static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1602 	.cl_ops		=	&htb_class_ops,
1603 	.id		=	"htb",
1604 	.priv_size	=	sizeof(struct htb_sched),
1605 	.enqueue	=	htb_enqueue,
1606 	.dequeue	=	htb_dequeue,
1607 	.peek		=	qdisc_peek_dequeued,
1608 	.init		=	htb_init,
1609 	.reset		=	htb_reset,
1610 	.destroy	=	htb_destroy,
1611 	.dump		=	htb_dump,
1612 	.owner		=	THIS_MODULE,
1613 };
1614 
1615 static int __init htb_module_init(void)
1616 {
1617 	return register_qdisc(&htb_qdisc_ops);
1618 }
1619 static void __exit htb_module_exit(void)
1620 {
1621 	unregister_qdisc(&htb_qdisc_ops);
1622 }
1623 
1624 module_init(htb_module_init)
1625 module_exit(htb_module_exit)
1626 MODULE_LICENSE("GPL");
1627