xref: /linux/net/sched/sch_htb.c (revision 606d099cdd1080bbb50ea50dc52d98252f8f10a1)
1 /*
2  * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Martin Devera, <devik@cdi.cz>
10  *
11  * Credits (in time order) for older HTB versions:
12  *              Stef Coene <stef.coene@docum.org>
13  *			HTB support at LARTC mailing list
14  *		Ondrej Kraus, <krauso@barr.cz>
15  *			found missing INIT_QDISC(htb)
16  *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17  *			helped a lot to locate nasty class stall bug
18  *		Andi Kleen, Jamal Hadi, Bert Hubert
19  *			code review and helpful comments on shaping
20  *		Tomasz Wrona, <tw@eter.tym.pl>
21  *			created test case so that I was able to fix nasty bug
22  *		Wilfried Weissmann
23  *			spotted bug in dequeue code and helped with fix
24  *		Jiri Fojtasek
25  *			fixed requeue routine
26  *		and many others. thanks.
27  *
28  * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29  */
30 #include <linux/module.h>
31 #include <asm/uaccess.h>
32 #include <asm/system.h>
33 #include <linux/bitops.h>
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/string.h>
38 #include <linux/mm.h>
39 #include <linux/socket.h>
40 #include <linux/sockios.h>
41 #include <linux/in.h>
42 #include <linux/errno.h>
43 #include <linux/interrupt.h>
44 #include <linux/if_ether.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/notifier.h>
49 #include <net/ip.h>
50 #include <net/route.h>
51 #include <linux/skbuff.h>
52 #include <linux/list.h>
53 #include <linux/compiler.h>
54 #include <net/sock.h>
55 #include <net/pkt_sched.h>
56 #include <linux/rbtree.h>
57 
58 /* HTB algorithm.
59     Author: devik@cdi.cz
60     ========================================================================
61     HTB is like TBF with multiple classes. It is also similar to CBQ because
62     it allows to assign priority to each class in hierarchy.
63     In fact it is another implementation of Floyd's formal sharing.
64 
65     Levels:
66     Each class is assigned level. Leaf has ALWAYS level 0 and root
67     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
68     one less than their parent.
69 */
70 
71 #define HTB_HSIZE 16		/* classid hash size */
72 #define HTB_EWMAC 2		/* rate average over HTB_EWMAC*HTB_HSIZE sec */
73 #define HTB_RATECM 1		/* whether to use rate computer */
74 #define HTB_HYSTERESIS 1	/* whether to use mode hysteresis for speedup */
75 #define HTB_VER 0x30011		/* major must be matched with number suplied by TC as version */
76 
77 #if HTB_VER >> 16 != TC_HTB_PROTOVER
78 #error "Mismatched sch_htb.c and pkt_sch.h"
79 #endif
80 
81 /* used internaly to keep status of single class */
82 enum htb_cmode {
83 	HTB_CANT_SEND,		/* class can't send and can't borrow */
84 	HTB_MAY_BORROW,		/* class can't send but may borrow */
85 	HTB_CAN_SEND		/* class can send */
86 };
87 
88 /* interior & leaf nodes; props specific to leaves are marked L: */
89 struct htb_class {
90 	/* general class parameters */
91 	u32 classid;
92 	struct gnet_stats_basic bstats;
93 	struct gnet_stats_queue qstats;
94 	struct gnet_stats_rate_est rate_est;
95 	struct tc_htb_xstats xstats;	/* our special stats */
96 	int refcnt;		/* usage count of this class */
97 
98 #ifdef HTB_RATECM
99 	/* rate measurement counters */
100 	unsigned long rate_bytes, sum_bytes;
101 	unsigned long rate_packets, sum_packets;
102 #endif
103 
104 	/* topology */
105 	int level;		/* our level (see above) */
106 	struct htb_class *parent;	/* parent class */
107 	struct hlist_node hlist;	/* classid hash list item */
108 	struct list_head sibling;	/* sibling list item */
109 	struct list_head children;	/* children list */
110 
111 	union {
112 		struct htb_class_leaf {
113 			struct Qdisc *q;
114 			int prio;
115 			int aprio;
116 			int quantum;
117 			int deficit[TC_HTB_MAXDEPTH];
118 			struct list_head drop_list;
119 		} leaf;
120 		struct htb_class_inner {
121 			struct rb_root feed[TC_HTB_NUMPRIO];	/* feed trees */
122 			struct rb_node *ptr[TC_HTB_NUMPRIO];	/* current class ptr */
123 			/* When class changes from state 1->2 and disconnects from
124 			   parent's feed then we lost ptr value and start from the
125 			   first child again. Here we store classid of the
126 			   last valid ptr (used when ptr is NULL). */
127 			u32 last_ptr_id[TC_HTB_NUMPRIO];
128 		} inner;
129 	} un;
130 	struct rb_node node[TC_HTB_NUMPRIO];	/* node for self or feed tree */
131 	struct rb_node pq_node;	/* node for event queue */
132 	unsigned long pq_key;	/* the same type as jiffies global */
133 
134 	int prio_activity;	/* for which prios are we active */
135 	enum htb_cmode cmode;	/* current mode of the class */
136 
137 	/* class attached filters */
138 	struct tcf_proto *filter_list;
139 	int filter_cnt;
140 
141 	int warned;		/* only one warning about non work conserving .. */
142 
143 	/* token bucket parameters */
144 	struct qdisc_rate_table *rate;	/* rate table of the class itself */
145 	struct qdisc_rate_table *ceil;	/* ceiling rate (limits borrows too) */
146 	long buffer, cbuffer;	/* token bucket depth/rate */
147 	psched_tdiff_t mbuffer;	/* max wait time */
148 	long tokens, ctokens;	/* current number of tokens */
149 	psched_time_t t_c;	/* checkpoint time */
150 };
151 
152 /* TODO: maybe compute rate when size is too large .. or drop ? */
153 static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
154 			   int size)
155 {
156 	int slot = size >> rate->rate.cell_log;
157 	if (slot > 255) {
158 		cl->xstats.giants++;
159 		slot = 255;
160 	}
161 	return rate->data[slot];
162 }
163 
164 struct htb_sched {
165 	struct list_head root;	/* root classes list */
166 	struct hlist_head hash[HTB_HSIZE];	/* hashed by classid */
167 	struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
168 
169 	/* self list - roots of self generating tree */
170 	struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
171 	int row_mask[TC_HTB_MAXDEPTH];
172 	struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
173 	u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
174 
175 	/* self wait list - roots of wait PQs per row */
176 	struct rb_root wait_pq[TC_HTB_MAXDEPTH];
177 
178 	/* time of nearest event per level (row) */
179 	unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
180 
181 	/* cached value of jiffies in dequeue */
182 	unsigned long jiffies;
183 
184 	/* whether we hit non-work conserving class during this dequeue; we use */
185 	int nwc_hit;		/* this to disable mindelay complaint in dequeue */
186 
187 	int defcls;		/* class where unclassified flows go to */
188 
189 	/* filters for qdisc itself */
190 	struct tcf_proto *filter_list;
191 	int filter_cnt;
192 
193 	int rate2quantum;	/* quant = rate / rate2quantum */
194 	psched_time_t now;	/* cached dequeue time */
195 	struct timer_list timer;	/* send delay timer */
196 #ifdef HTB_RATECM
197 	struct timer_list rttim;	/* rate computer timer */
198 	int recmp_bucket;	/* which hash bucket to recompute next */
199 #endif
200 
201 	/* non shaped skbs; let them go directly thru */
202 	struct sk_buff_head direct_queue;
203 	int direct_qlen;	/* max qlen of above */
204 
205 	long direct_pkts;
206 };
207 
208 /* compute hash of size HTB_HSIZE for given handle */
209 static inline int htb_hash(u32 h)
210 {
211 #if HTB_HSIZE != 16
212 #error "Declare new hash for your HTB_HSIZE"
213 #endif
214 	h ^= h >> 8;		/* stolen from cbq_hash */
215 	h ^= h >> 4;
216 	return h & 0xf;
217 }
218 
219 /* find class in global hash table using given handle */
220 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
221 {
222 	struct htb_sched *q = qdisc_priv(sch);
223 	struct hlist_node *p;
224 	struct htb_class *cl;
225 
226 	if (TC_H_MAJ(handle) != sch->handle)
227 		return NULL;
228 
229 	hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
230 		if (cl->classid == handle)
231 			return cl;
232 	}
233 	return NULL;
234 }
235 
236 /**
237  * htb_classify - classify a packet into class
238  *
239  * It returns NULL if the packet should be dropped or -1 if the packet
240  * should be passed directly thru. In all other cases leaf class is returned.
241  * We allow direct class selection by classid in priority. The we examine
242  * filters in qdisc and in inner nodes (if higher filter points to the inner
243  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
244  * internal fifo (direct). These packets then go directly thru. If we still
245  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
246  * then finish and return direct queue.
247  */
248 #define HTB_DIRECT (struct htb_class*)-1
249 static inline u32 htb_classid(struct htb_class *cl)
250 {
251 	return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
252 }
253 
254 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
255 				      int *qerr)
256 {
257 	struct htb_sched *q = qdisc_priv(sch);
258 	struct htb_class *cl;
259 	struct tcf_result res;
260 	struct tcf_proto *tcf;
261 	int result;
262 
263 	/* allow to select class by setting skb->priority to valid classid;
264 	   note that nfmark can be used too by attaching filter fw with no
265 	   rules in it */
266 	if (skb->priority == sch->handle)
267 		return HTB_DIRECT;	/* X:0 (direct flow) selected */
268 	if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
269 		return cl;
270 
271 	*qerr = NET_XMIT_BYPASS;
272 	tcf = q->filter_list;
273 	while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
274 #ifdef CONFIG_NET_CLS_ACT
275 		switch (result) {
276 		case TC_ACT_QUEUED:
277 		case TC_ACT_STOLEN:
278 			*qerr = NET_XMIT_SUCCESS;
279 		case TC_ACT_SHOT:
280 			return NULL;
281 		}
282 #elif defined(CONFIG_NET_CLS_POLICE)
283 		if (result == TC_POLICE_SHOT)
284 			return HTB_DIRECT;
285 #endif
286 		if ((cl = (void *)res.class) == NULL) {
287 			if (res.classid == sch->handle)
288 				return HTB_DIRECT;	/* X:0 (direct flow) */
289 			if ((cl = htb_find(res.classid, sch)) == NULL)
290 				break;	/* filter selected invalid classid */
291 		}
292 		if (!cl->level)
293 			return cl;	/* we hit leaf; return it */
294 
295 		/* we have got inner class; apply inner filter chain */
296 		tcf = cl->filter_list;
297 	}
298 	/* classification failed; try to use default class */
299 	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
300 	if (!cl || cl->level)
301 		return HTB_DIRECT;	/* bad default .. this is safe bet */
302 	return cl;
303 }
304 
305 /**
306  * htb_add_to_id_tree - adds class to the round robin list
307  *
308  * Routine adds class to the list (actually tree) sorted by classid.
309  * Make sure that class is not already on such list for given prio.
310  */
311 static void htb_add_to_id_tree(struct rb_root *root,
312 			       struct htb_class *cl, int prio)
313 {
314 	struct rb_node **p = &root->rb_node, *parent = NULL;
315 
316 	while (*p) {
317 		struct htb_class *c;
318 		parent = *p;
319 		c = rb_entry(parent, struct htb_class, node[prio]);
320 
321 		if (cl->classid > c->classid)
322 			p = &parent->rb_right;
323 		else
324 			p = &parent->rb_left;
325 	}
326 	rb_link_node(&cl->node[prio], parent, p);
327 	rb_insert_color(&cl->node[prio], root);
328 }
329 
330 /**
331  * htb_add_to_wait_tree - adds class to the event queue with delay
332  *
333  * The class is added to priority event queue to indicate that class will
334  * change its mode in cl->pq_key microseconds. Make sure that class is not
335  * already in the queue.
336  */
337 static void htb_add_to_wait_tree(struct htb_sched *q,
338 				 struct htb_class *cl, long delay)
339 {
340 	struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
341 
342 	cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
343 	if (cl->pq_key == q->jiffies)
344 		cl->pq_key++;
345 
346 	/* update the nearest event cache */
347 	if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
348 		q->near_ev_cache[cl->level] = cl->pq_key;
349 
350 	while (*p) {
351 		struct htb_class *c;
352 		parent = *p;
353 		c = rb_entry(parent, struct htb_class, pq_node);
354 		if (time_after_eq(cl->pq_key, c->pq_key))
355 			p = &parent->rb_right;
356 		else
357 			p = &parent->rb_left;
358 	}
359 	rb_link_node(&cl->pq_node, parent, p);
360 	rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
361 }
362 
363 /**
364  * htb_next_rb_node - finds next node in binary tree
365  *
366  * When we are past last key we return NULL.
367  * Average complexity is 2 steps per call.
368  */
369 static inline void htb_next_rb_node(struct rb_node **n)
370 {
371 	*n = rb_next(*n);
372 }
373 
374 /**
375  * htb_add_class_to_row - add class to its row
376  *
377  * The class is added to row at priorities marked in mask.
378  * It does nothing if mask == 0.
379  */
380 static inline void htb_add_class_to_row(struct htb_sched *q,
381 					struct htb_class *cl, int mask)
382 {
383 	q->row_mask[cl->level] |= mask;
384 	while (mask) {
385 		int prio = ffz(~mask);
386 		mask &= ~(1 << prio);
387 		htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
388 	}
389 }
390 
391 /* If this triggers, it is a bug in this code, but it need not be fatal */
392 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
393 {
394 	if (RB_EMPTY_NODE(rb)) {
395 		WARN_ON(1);
396 	} else {
397 		rb_erase(rb, root);
398 		RB_CLEAR_NODE(rb);
399 	}
400 }
401 
402 
403 /**
404  * htb_remove_class_from_row - removes class from its row
405  *
406  * The class is removed from row at priorities marked in mask.
407  * It does nothing if mask == 0.
408  */
409 static inline void htb_remove_class_from_row(struct htb_sched *q,
410 						 struct htb_class *cl, int mask)
411 {
412 	int m = 0;
413 
414 	while (mask) {
415 		int prio = ffz(~mask);
416 
417 		mask &= ~(1 << prio);
418 		if (q->ptr[cl->level][prio] == cl->node + prio)
419 			htb_next_rb_node(q->ptr[cl->level] + prio);
420 
421 		htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
422 		if (!q->row[cl->level][prio].rb_node)
423 			m |= 1 << prio;
424 	}
425 	q->row_mask[cl->level] &= ~m;
426 }
427 
428 /**
429  * htb_activate_prios - creates active classe's feed chain
430  *
431  * The class is connected to ancestors and/or appropriate rows
432  * for priorities it is participating on. cl->cmode must be new
433  * (activated) mode. It does nothing if cl->prio_activity == 0.
434  */
435 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
436 {
437 	struct htb_class *p = cl->parent;
438 	long m, mask = cl->prio_activity;
439 
440 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
441 		m = mask;
442 		while (m) {
443 			int prio = ffz(~m);
444 			m &= ~(1 << prio);
445 
446 			if (p->un.inner.feed[prio].rb_node)
447 				/* parent already has its feed in use so that
448 				   reset bit in mask as parent is already ok */
449 				mask &= ~(1 << prio);
450 
451 			htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
452 		}
453 		p->prio_activity |= mask;
454 		cl = p;
455 		p = cl->parent;
456 
457 	}
458 	if (cl->cmode == HTB_CAN_SEND && mask)
459 		htb_add_class_to_row(q, cl, mask);
460 }
461 
462 /**
463  * htb_deactivate_prios - remove class from feed chain
464  *
465  * cl->cmode must represent old mode (before deactivation). It does
466  * nothing if cl->prio_activity == 0. Class is removed from all feed
467  * chains and rows.
468  */
469 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
470 {
471 	struct htb_class *p = cl->parent;
472 	long m, mask = cl->prio_activity;
473 
474 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
475 		m = mask;
476 		mask = 0;
477 		while (m) {
478 			int prio = ffz(~m);
479 			m &= ~(1 << prio);
480 
481 			if (p->un.inner.ptr[prio] == cl->node + prio) {
482 				/* we are removing child which is pointed to from
483 				   parent feed - forget the pointer but remember
484 				   classid */
485 				p->un.inner.last_ptr_id[prio] = cl->classid;
486 				p->un.inner.ptr[prio] = NULL;
487 			}
488 
489 			htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
490 
491 			if (!p->un.inner.feed[prio].rb_node)
492 				mask |= 1 << prio;
493 		}
494 
495 		p->prio_activity &= ~mask;
496 		cl = p;
497 		p = cl->parent;
498 
499 	}
500 	if (cl->cmode == HTB_CAN_SEND && mask)
501 		htb_remove_class_from_row(q, cl, mask);
502 }
503 
504 #if HTB_HYSTERESIS
505 static inline long htb_lowater(const struct htb_class *cl)
506 {
507 	return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
508 }
509 static inline long htb_hiwater(const struct htb_class *cl)
510 {
511 	return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
512 }
513 #else
514 #define htb_lowater(cl)	(0)
515 #define htb_hiwater(cl)	(0)
516 #endif
517 
518 /**
519  * htb_class_mode - computes and returns current class mode
520  *
521  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
522  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
523  * from now to time when cl will change its state.
524  * Also it is worth to note that class mode doesn't change simply
525  * at cl->{c,}tokens == 0 but there can rather be hysteresis of
526  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
527  * mode transitions per time unit. The speed gain is about 1/6.
528  */
529 static inline enum htb_cmode
530 htb_class_mode(struct htb_class *cl, long *diff)
531 {
532 	long toks;
533 
534 	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
535 		*diff = -toks;
536 		return HTB_CANT_SEND;
537 	}
538 
539 	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
540 		return HTB_CAN_SEND;
541 
542 	*diff = -toks;
543 	return HTB_MAY_BORROW;
544 }
545 
546 /**
547  * htb_change_class_mode - changes classe's mode
548  *
549  * This should be the only way how to change classe's mode under normal
550  * cirsumstances. Routine will update feed lists linkage, change mode
551  * and add class to the wait event queue if appropriate. New mode should
552  * be different from old one and cl->pq_key has to be valid if changing
553  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
554  */
555 static void
556 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
557 {
558 	enum htb_cmode new_mode = htb_class_mode(cl, diff);
559 
560 	if (new_mode == cl->cmode)
561 		return;
562 
563 	if (cl->prio_activity) {	/* not necessary: speed optimization */
564 		if (cl->cmode != HTB_CANT_SEND)
565 			htb_deactivate_prios(q, cl);
566 		cl->cmode = new_mode;
567 		if (new_mode != HTB_CANT_SEND)
568 			htb_activate_prios(q, cl);
569 	} else
570 		cl->cmode = new_mode;
571 }
572 
573 /**
574  * htb_activate - inserts leaf cl into appropriate active feeds
575  *
576  * Routine learns (new) priority of leaf and activates feed chain
577  * for the prio. It can be called on already active leaf safely.
578  * It also adds leaf into droplist.
579  */
580 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
581 {
582 	BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
583 
584 	if (!cl->prio_activity) {
585 		cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
586 		htb_activate_prios(q, cl);
587 		list_add_tail(&cl->un.leaf.drop_list,
588 			      q->drops + cl->un.leaf.aprio);
589 	}
590 }
591 
592 /**
593  * htb_deactivate - remove leaf cl from active feeds
594  *
595  * Make sure that leaf is active. In the other words it can't be called
596  * with non-active leaf. It also removes class from the drop list.
597  */
598 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
599 {
600 	BUG_TRAP(cl->prio_activity);
601 
602 	htb_deactivate_prios(q, cl);
603 	cl->prio_activity = 0;
604 	list_del_init(&cl->un.leaf.drop_list);
605 }
606 
607 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
608 {
609 	int ret;
610 	struct htb_sched *q = qdisc_priv(sch);
611 	struct htb_class *cl = htb_classify(skb, sch, &ret);
612 
613 	if (cl == HTB_DIRECT) {
614 		/* enqueue to helper queue */
615 		if (q->direct_queue.qlen < q->direct_qlen) {
616 			__skb_queue_tail(&q->direct_queue, skb);
617 			q->direct_pkts++;
618 		} else {
619 			kfree_skb(skb);
620 			sch->qstats.drops++;
621 			return NET_XMIT_DROP;
622 		}
623 #ifdef CONFIG_NET_CLS_ACT
624 	} else if (!cl) {
625 		if (ret == NET_XMIT_BYPASS)
626 			sch->qstats.drops++;
627 		kfree_skb(skb);
628 		return ret;
629 #endif
630 	} else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
631 		   NET_XMIT_SUCCESS) {
632 		sch->qstats.drops++;
633 		cl->qstats.drops++;
634 		return NET_XMIT_DROP;
635 	} else {
636 		cl->bstats.packets++;
637 		cl->bstats.bytes += skb->len;
638 		htb_activate(q, cl);
639 	}
640 
641 	sch->q.qlen++;
642 	sch->bstats.packets++;
643 	sch->bstats.bytes += skb->len;
644 	return NET_XMIT_SUCCESS;
645 }
646 
647 /* TODO: requeuing packet charges it to policers again !! */
648 static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
649 {
650 	struct htb_sched *q = qdisc_priv(sch);
651 	int ret = NET_XMIT_SUCCESS;
652 	struct htb_class *cl = htb_classify(skb, sch, &ret);
653 	struct sk_buff *tskb;
654 
655 	if (cl == HTB_DIRECT || !cl) {
656 		/* enqueue to helper queue */
657 		if (q->direct_queue.qlen < q->direct_qlen && cl) {
658 			__skb_queue_head(&q->direct_queue, skb);
659 		} else {
660 			__skb_queue_head(&q->direct_queue, skb);
661 			tskb = __skb_dequeue_tail(&q->direct_queue);
662 			kfree_skb(tskb);
663 			sch->qstats.drops++;
664 			return NET_XMIT_CN;
665 		}
666 	} else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
667 		   NET_XMIT_SUCCESS) {
668 		sch->qstats.drops++;
669 		cl->qstats.drops++;
670 		return NET_XMIT_DROP;
671 	} else
672 		htb_activate(q, cl);
673 
674 	sch->q.qlen++;
675 	sch->qstats.requeues++;
676 	return NET_XMIT_SUCCESS;
677 }
678 
679 static void htb_timer(unsigned long arg)
680 {
681 	struct Qdisc *sch = (struct Qdisc *)arg;
682 	sch->flags &= ~TCQ_F_THROTTLED;
683 	wmb();
684 	netif_schedule(sch->dev);
685 }
686 
687 #ifdef HTB_RATECM
688 #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
689 static void htb_rate_timer(unsigned long arg)
690 {
691 	struct Qdisc *sch = (struct Qdisc *)arg;
692 	struct htb_sched *q = qdisc_priv(sch);
693 	struct hlist_node *p;
694 	struct htb_class *cl;
695 
696 
697 	/* lock queue so that we can muck with it */
698 	spin_lock_bh(&sch->dev->queue_lock);
699 
700 	q->rttim.expires = jiffies + HZ;
701 	add_timer(&q->rttim);
702 
703 	/* scan and recompute one bucket at time */
704 	if (++q->recmp_bucket >= HTB_HSIZE)
705 		q->recmp_bucket = 0;
706 
707 	hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) {
708 		RT_GEN(cl->sum_bytes, cl->rate_bytes);
709 		RT_GEN(cl->sum_packets, cl->rate_packets);
710 	}
711 	spin_unlock_bh(&sch->dev->queue_lock);
712 }
713 #endif
714 
715 /**
716  * htb_charge_class - charges amount "bytes" to leaf and ancestors
717  *
718  * Routine assumes that packet "bytes" long was dequeued from leaf cl
719  * borrowing from "level". It accounts bytes to ceil leaky bucket for
720  * leaf and all ancestors and to rate bucket for ancestors at levels
721  * "level" and higher. It also handles possible change of mode resulting
722  * from the update. Note that mode can also increase here (MAY_BORROW to
723  * CAN_SEND) because we can use more precise clock that event queue here.
724  * In such case we remove class from event queue first.
725  */
726 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
727 			     int level, int bytes)
728 {
729 	long toks, diff;
730 	enum htb_cmode old_mode;
731 
732 #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
733 	if (toks > cl->B) toks = cl->B; \
734 	toks -= L2T(cl, cl->R, bytes); \
735 	if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
736 	cl->T = toks
737 
738 	while (cl) {
739 		diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
740 		if (cl->level >= level) {
741 			if (cl->level == level)
742 				cl->xstats.lends++;
743 			HTB_ACCNT(tokens, buffer, rate);
744 		} else {
745 			cl->xstats.borrows++;
746 			cl->tokens += diff;	/* we moved t_c; update tokens */
747 		}
748 		HTB_ACCNT(ctokens, cbuffer, ceil);
749 		cl->t_c = q->now;
750 
751 		old_mode = cl->cmode;
752 		diff = 0;
753 		htb_change_class_mode(q, cl, &diff);
754 		if (old_mode != cl->cmode) {
755 			if (old_mode != HTB_CAN_SEND)
756 				htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
757 			if (cl->cmode != HTB_CAN_SEND)
758 				htb_add_to_wait_tree(q, cl, diff);
759 		}
760 #ifdef HTB_RATECM
761 		/* update rate counters */
762 		cl->sum_bytes += bytes;
763 		cl->sum_packets++;
764 #endif
765 
766 		/* update byte stats except for leaves which are already updated */
767 		if (cl->level) {
768 			cl->bstats.bytes += bytes;
769 			cl->bstats.packets++;
770 		}
771 		cl = cl->parent;
772 	}
773 }
774 
775 /**
776  * htb_do_events - make mode changes to classes at the level
777  *
778  * Scans event queue for pending events and applies them. Returns jiffies to
779  * next pending event (0 for no event in pq).
780  * Note: Aplied are events whose have cl->pq_key <= jiffies.
781  */
782 static long htb_do_events(struct htb_sched *q, int level)
783 {
784 	int i;
785 
786 	for (i = 0; i < 500; i++) {
787 		struct htb_class *cl;
788 		long diff;
789 		struct rb_node *p = rb_first(&q->wait_pq[level]);
790 
791 		if (!p)
792 			return 0;
793 
794 		cl = rb_entry(p, struct htb_class, pq_node);
795 		if (time_after(cl->pq_key, q->jiffies)) {
796 			return cl->pq_key - q->jiffies;
797 		}
798 		htb_safe_rb_erase(p, q->wait_pq + level);
799 		diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
800 		htb_change_class_mode(q, cl, &diff);
801 		if (cl->cmode != HTB_CAN_SEND)
802 			htb_add_to_wait_tree(q, cl, diff);
803 	}
804 	if (net_ratelimit())
805 		printk(KERN_WARNING "htb: too many events !\n");
806 	return HZ / 10;
807 }
808 
809 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
810    is no such one exists. */
811 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
812 					      u32 id)
813 {
814 	struct rb_node *r = NULL;
815 	while (n) {
816 		struct htb_class *cl =
817 		    rb_entry(n, struct htb_class, node[prio]);
818 		if (id == cl->classid)
819 			return n;
820 
821 		if (id > cl->classid) {
822 			n = n->rb_right;
823 		} else {
824 			r = n;
825 			n = n->rb_left;
826 		}
827 	}
828 	return r;
829 }
830 
831 /**
832  * htb_lookup_leaf - returns next leaf class in DRR order
833  *
834  * Find leaf where current feed pointers points to.
835  */
836 static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
837 					 struct rb_node **pptr, u32 * pid)
838 {
839 	int i;
840 	struct {
841 		struct rb_node *root;
842 		struct rb_node **pptr;
843 		u32 *pid;
844 	} stk[TC_HTB_MAXDEPTH], *sp = stk;
845 
846 	BUG_TRAP(tree->rb_node);
847 	sp->root = tree->rb_node;
848 	sp->pptr = pptr;
849 	sp->pid = pid;
850 
851 	for (i = 0; i < 65535; i++) {
852 		if (!*sp->pptr && *sp->pid) {
853 			/* ptr was invalidated but id is valid - try to recover
854 			   the original or next ptr */
855 			*sp->pptr =
856 			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
857 		}
858 		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
859 				   can become out of date quickly */
860 		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
861 			*sp->pptr = sp->root;
862 			while ((*sp->pptr)->rb_left)
863 				*sp->pptr = (*sp->pptr)->rb_left;
864 			if (sp > stk) {
865 				sp--;
866 				BUG_TRAP(*sp->pptr);
867 				if (!*sp->pptr)
868 					return NULL;
869 				htb_next_rb_node(sp->pptr);
870 			}
871 		} else {
872 			struct htb_class *cl;
873 			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
874 			if (!cl->level)
875 				return cl;
876 			(++sp)->root = cl->un.inner.feed[prio].rb_node;
877 			sp->pptr = cl->un.inner.ptr + prio;
878 			sp->pid = cl->un.inner.last_ptr_id + prio;
879 		}
880 	}
881 	BUG_TRAP(0);
882 	return NULL;
883 }
884 
885 /* dequeues packet at given priority and level; call only if
886    you are sure that there is active class at prio/level */
887 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
888 					int level)
889 {
890 	struct sk_buff *skb = NULL;
891 	struct htb_class *cl, *start;
892 	/* look initial class up in the row */
893 	start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
894 				     q->ptr[level] + prio,
895 				     q->last_ptr_id[level] + prio);
896 
897 	do {
898 next:
899 		BUG_TRAP(cl);
900 		if (!cl)
901 			return NULL;
902 
903 		/* class can be empty - it is unlikely but can be true if leaf
904 		   qdisc drops packets in enqueue routine or if someone used
905 		   graft operation on the leaf since last dequeue;
906 		   simply deactivate and skip such class */
907 		if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
908 			struct htb_class *next;
909 			htb_deactivate(q, cl);
910 
911 			/* row/level might become empty */
912 			if ((q->row_mask[level] & (1 << prio)) == 0)
913 				return NULL;
914 
915 			next = htb_lookup_leaf(q->row[level] + prio,
916 					       prio, q->ptr[level] + prio,
917 					       q->last_ptr_id[level] + prio);
918 
919 			if (cl == start)	/* fix start if we just deleted it */
920 				start = next;
921 			cl = next;
922 			goto next;
923 		}
924 
925 		skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
926 		if (likely(skb != NULL))
927 			break;
928 		if (!cl->warned) {
929 			printk(KERN_WARNING
930 			       "htb: class %X isn't work conserving ?!\n",
931 			       cl->classid);
932 			cl->warned = 1;
933 		}
934 		q->nwc_hit++;
935 		htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
936 				  ptr[0]) + prio);
937 		cl = htb_lookup_leaf(q->row[level] + prio, prio,
938 				     q->ptr[level] + prio,
939 				     q->last_ptr_id[level] + prio);
940 
941 	} while (cl != start);
942 
943 	if (likely(skb != NULL)) {
944 		if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
945 			cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
946 			htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
947 					  ptr[0]) + prio);
948 		}
949 		/* this used to be after charge_class but this constelation
950 		   gives us slightly better performance */
951 		if (!cl->un.leaf.q->q.qlen)
952 			htb_deactivate(q, cl);
953 		htb_charge_class(q, cl, level, skb->len);
954 	}
955 	return skb;
956 }
957 
958 static void htb_delay_by(struct Qdisc *sch, long delay)
959 {
960 	struct htb_sched *q = qdisc_priv(sch);
961 	if (delay <= 0)
962 		delay = 1;
963 	if (unlikely(delay > 5 * HZ)) {
964 		if (net_ratelimit())
965 			printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
966 		delay = 5 * HZ;
967 	}
968 	/* why don't use jiffies here ? because expires can be in past */
969 	mod_timer(&q->timer, q->jiffies + delay);
970 	sch->flags |= TCQ_F_THROTTLED;
971 	sch->qstats.overlimits++;
972 }
973 
974 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
975 {
976 	struct sk_buff *skb = NULL;
977 	struct htb_sched *q = qdisc_priv(sch);
978 	int level;
979 	long min_delay;
980 
981 	q->jiffies = jiffies;
982 
983 	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
984 	skb = __skb_dequeue(&q->direct_queue);
985 	if (skb != NULL) {
986 		sch->flags &= ~TCQ_F_THROTTLED;
987 		sch->q.qlen--;
988 		return skb;
989 	}
990 
991 	if (!sch->q.qlen)
992 		goto fin;
993 	PSCHED_GET_TIME(q->now);
994 
995 	min_delay = LONG_MAX;
996 	q->nwc_hit = 0;
997 	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
998 		/* common case optimization - skip event handler quickly */
999 		int m;
1000 		long delay;
1001 		if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
1002 			delay = htb_do_events(q, level);
1003 			q->near_ev_cache[level] =
1004 			    q->jiffies + (delay ? delay : HZ);
1005 		} else
1006 			delay = q->near_ev_cache[level] - q->jiffies;
1007 
1008 		if (delay && min_delay > delay)
1009 			min_delay = delay;
1010 		m = ~q->row_mask[level];
1011 		while (m != (int)(-1)) {
1012 			int prio = ffz(m);
1013 			m |= 1 << prio;
1014 			skb = htb_dequeue_tree(q, prio, level);
1015 			if (likely(skb != NULL)) {
1016 				sch->q.qlen--;
1017 				sch->flags &= ~TCQ_F_THROTTLED;
1018 				goto fin;
1019 			}
1020 		}
1021 	}
1022 	htb_delay_by(sch, min_delay > 5 * HZ ? 5 * HZ : min_delay);
1023 fin:
1024 	return skb;
1025 }
1026 
1027 /* try to drop from each class (by prio) until one succeed */
1028 static unsigned int htb_drop(struct Qdisc *sch)
1029 {
1030 	struct htb_sched *q = qdisc_priv(sch);
1031 	int prio;
1032 
1033 	for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
1034 		struct list_head *p;
1035 		list_for_each(p, q->drops + prio) {
1036 			struct htb_class *cl = list_entry(p, struct htb_class,
1037 							  un.leaf.drop_list);
1038 			unsigned int len;
1039 			if (cl->un.leaf.q->ops->drop &&
1040 			    (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
1041 				sch->q.qlen--;
1042 				if (!cl->un.leaf.q->q.qlen)
1043 					htb_deactivate(q, cl);
1044 				return len;
1045 			}
1046 		}
1047 	}
1048 	return 0;
1049 }
1050 
1051 /* reset all classes */
1052 /* always caled under BH & queue lock */
1053 static void htb_reset(struct Qdisc *sch)
1054 {
1055 	struct htb_sched *q = qdisc_priv(sch);
1056 	int i;
1057 
1058 	for (i = 0; i < HTB_HSIZE; i++) {
1059 		struct hlist_node *p;
1060 		struct htb_class *cl;
1061 
1062 		hlist_for_each_entry(cl, p, q->hash + i, hlist) {
1063 			if (cl->level)
1064 				memset(&cl->un.inner, 0, sizeof(cl->un.inner));
1065 			else {
1066 				if (cl->un.leaf.q)
1067 					qdisc_reset(cl->un.leaf.q);
1068 				INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1069 			}
1070 			cl->prio_activity = 0;
1071 			cl->cmode = HTB_CAN_SEND;
1072 
1073 		}
1074 	}
1075 	sch->flags &= ~TCQ_F_THROTTLED;
1076 	del_timer(&q->timer);
1077 	__skb_queue_purge(&q->direct_queue);
1078 	sch->q.qlen = 0;
1079 	memset(q->row, 0, sizeof(q->row));
1080 	memset(q->row_mask, 0, sizeof(q->row_mask));
1081 	memset(q->wait_pq, 0, sizeof(q->wait_pq));
1082 	memset(q->ptr, 0, sizeof(q->ptr));
1083 	for (i = 0; i < TC_HTB_NUMPRIO; i++)
1084 		INIT_LIST_HEAD(q->drops + i);
1085 }
1086 
1087 static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1088 {
1089 	struct htb_sched *q = qdisc_priv(sch);
1090 	struct rtattr *tb[TCA_HTB_INIT];
1091 	struct tc_htb_glob *gopt;
1092 	int i;
1093 	if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
1094 	    tb[TCA_HTB_INIT - 1] == NULL ||
1095 	    RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) {
1096 		printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1097 		return -EINVAL;
1098 	}
1099 	gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
1100 	if (gopt->version != HTB_VER >> 16) {
1101 		printk(KERN_ERR
1102 		       "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1103 		       HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1104 		return -EINVAL;
1105 	}
1106 
1107 	INIT_LIST_HEAD(&q->root);
1108 	for (i = 0; i < HTB_HSIZE; i++)
1109 		INIT_HLIST_HEAD(q->hash + i);
1110 	for (i = 0; i < TC_HTB_NUMPRIO; i++)
1111 		INIT_LIST_HEAD(q->drops + i);
1112 
1113 	init_timer(&q->timer);
1114 	skb_queue_head_init(&q->direct_queue);
1115 
1116 	q->direct_qlen = sch->dev->tx_queue_len;
1117 	if (q->direct_qlen < 2)	/* some devices have zero tx_queue_len */
1118 		q->direct_qlen = 2;
1119 	q->timer.function = htb_timer;
1120 	q->timer.data = (unsigned long)sch;
1121 
1122 #ifdef HTB_RATECM
1123 	init_timer(&q->rttim);
1124 	q->rttim.function = htb_rate_timer;
1125 	q->rttim.data = (unsigned long)sch;
1126 	q->rttim.expires = jiffies + HZ;
1127 	add_timer(&q->rttim);
1128 #endif
1129 	if ((q->rate2quantum = gopt->rate2quantum) < 1)
1130 		q->rate2quantum = 1;
1131 	q->defcls = gopt->defcls;
1132 
1133 	return 0;
1134 }
1135 
1136 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1137 {
1138 	struct htb_sched *q = qdisc_priv(sch);
1139 	unsigned char *b = skb->tail;
1140 	struct rtattr *rta;
1141 	struct tc_htb_glob gopt;
1142 	spin_lock_bh(&sch->dev->queue_lock);
1143 	gopt.direct_pkts = q->direct_pkts;
1144 
1145 	gopt.version = HTB_VER;
1146 	gopt.rate2quantum = q->rate2quantum;
1147 	gopt.defcls = q->defcls;
1148 	gopt.debug = 0;
1149 	rta = (struct rtattr *)b;
1150 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1151 	RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1152 	rta->rta_len = skb->tail - b;
1153 	spin_unlock_bh(&sch->dev->queue_lock);
1154 	return skb->len;
1155 rtattr_failure:
1156 	spin_unlock_bh(&sch->dev->queue_lock);
1157 	skb_trim(skb, skb->tail - skb->data);
1158 	return -1;
1159 }
1160 
1161 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1162 			  struct sk_buff *skb, struct tcmsg *tcm)
1163 {
1164 	struct htb_class *cl = (struct htb_class *)arg;
1165 	unsigned char *b = skb->tail;
1166 	struct rtattr *rta;
1167 	struct tc_htb_opt opt;
1168 
1169 	spin_lock_bh(&sch->dev->queue_lock);
1170 	tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1171 	tcm->tcm_handle = cl->classid;
1172 	if (!cl->level && cl->un.leaf.q)
1173 		tcm->tcm_info = cl->un.leaf.q->handle;
1174 
1175 	rta = (struct rtattr *)b;
1176 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1177 
1178 	memset(&opt, 0, sizeof(opt));
1179 
1180 	opt.rate = cl->rate->rate;
1181 	opt.buffer = cl->buffer;
1182 	opt.ceil = cl->ceil->rate;
1183 	opt.cbuffer = cl->cbuffer;
1184 	opt.quantum = cl->un.leaf.quantum;
1185 	opt.prio = cl->un.leaf.prio;
1186 	opt.level = cl->level;
1187 	RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1188 	rta->rta_len = skb->tail - b;
1189 	spin_unlock_bh(&sch->dev->queue_lock);
1190 	return skb->len;
1191 rtattr_failure:
1192 	spin_unlock_bh(&sch->dev->queue_lock);
1193 	skb_trim(skb, b - skb->data);
1194 	return -1;
1195 }
1196 
1197 static int
1198 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1199 {
1200 	struct htb_class *cl = (struct htb_class *)arg;
1201 
1202 #ifdef HTB_RATECM
1203 	cl->rate_est.bps = cl->rate_bytes / (HTB_EWMAC * HTB_HSIZE);
1204 	cl->rate_est.pps = cl->rate_packets / (HTB_EWMAC * HTB_HSIZE);
1205 #endif
1206 
1207 	if (!cl->level && cl->un.leaf.q)
1208 		cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1209 	cl->xstats.tokens = cl->tokens;
1210 	cl->xstats.ctokens = cl->ctokens;
1211 
1212 	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1213 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1214 	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
1215 		return -1;
1216 
1217 	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1218 }
1219 
1220 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1221 		     struct Qdisc **old)
1222 {
1223 	struct htb_class *cl = (struct htb_class *)arg;
1224 
1225 	if (cl && !cl->level) {
1226 		if (new == NULL &&
1227 		    (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1228 		    			     cl->classid))
1229 		    == NULL)
1230 			return -ENOBUFS;
1231 		sch_tree_lock(sch);
1232 		if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1233 			qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1234 			qdisc_reset(*old);
1235 		}
1236 		sch_tree_unlock(sch);
1237 		return 0;
1238 	}
1239 	return -ENOENT;
1240 }
1241 
1242 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1243 {
1244 	struct htb_class *cl = (struct htb_class *)arg;
1245 	return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1246 }
1247 
1248 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1249 {
1250 	struct htb_class *cl = (struct htb_class *)arg;
1251 
1252 	if (cl->un.leaf.q->q.qlen == 0)
1253 		htb_deactivate(qdisc_priv(sch), cl);
1254 }
1255 
1256 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1257 {
1258 	struct htb_class *cl = htb_find(classid, sch);
1259 	if (cl)
1260 		cl->refcnt++;
1261 	return (unsigned long)cl;
1262 }
1263 
1264 static void htb_destroy_filters(struct tcf_proto **fl)
1265 {
1266 	struct tcf_proto *tp;
1267 
1268 	while ((tp = *fl) != NULL) {
1269 		*fl = tp->next;
1270 		tcf_destroy(tp);
1271 	}
1272 }
1273 
1274 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1275 {
1276 	struct htb_sched *q = qdisc_priv(sch);
1277 
1278 	if (!cl->level) {
1279 		BUG_TRAP(cl->un.leaf.q);
1280 		qdisc_destroy(cl->un.leaf.q);
1281 	}
1282 	qdisc_put_rtab(cl->rate);
1283 	qdisc_put_rtab(cl->ceil);
1284 
1285 	htb_destroy_filters(&cl->filter_list);
1286 
1287 	while (!list_empty(&cl->children))
1288 		htb_destroy_class(sch, list_entry(cl->children.next,
1289 						  struct htb_class, sibling));
1290 
1291 	/* note: this delete may happen twice (see htb_delete) */
1292 	hlist_del_init(&cl->hlist);
1293 	list_del(&cl->sibling);
1294 
1295 	if (cl->prio_activity)
1296 		htb_deactivate(q, cl);
1297 
1298 	if (cl->cmode != HTB_CAN_SEND)
1299 		htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1300 
1301 	kfree(cl);
1302 }
1303 
1304 /* always caled under BH & queue lock */
1305 static void htb_destroy(struct Qdisc *sch)
1306 {
1307 	struct htb_sched *q = qdisc_priv(sch);
1308 
1309 	del_timer_sync(&q->timer);
1310 #ifdef HTB_RATECM
1311 	del_timer_sync(&q->rttim);
1312 #endif
1313 	/* This line used to be after htb_destroy_class call below
1314 	   and surprisingly it worked in 2.4. But it must precede it
1315 	   because filter need its target class alive to be able to call
1316 	   unbind_filter on it (without Oops). */
1317 	htb_destroy_filters(&q->filter_list);
1318 
1319 	while (!list_empty(&q->root))
1320 		htb_destroy_class(sch, list_entry(q->root.next,
1321 						  struct htb_class, sibling));
1322 
1323 	__skb_queue_purge(&q->direct_queue);
1324 }
1325 
1326 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1327 {
1328 	struct htb_sched *q = qdisc_priv(sch);
1329 	struct htb_class *cl = (struct htb_class *)arg;
1330 	unsigned int qlen;
1331 
1332 	// TODO: why don't allow to delete subtree ? references ? does
1333 	// tc subsys quarantee us that in htb_destroy it holds no class
1334 	// refs so that we can remove children safely there ?
1335 	if (!list_empty(&cl->children) || cl->filter_cnt)
1336 		return -EBUSY;
1337 
1338 	sch_tree_lock(sch);
1339 
1340 	/* delete from hash and active; remainder in destroy_class */
1341 	hlist_del_init(&cl->hlist);
1342 
1343 	if (!cl->level) {
1344 		qlen = cl->un.leaf.q->q.qlen;
1345 		qdisc_reset(cl->un.leaf.q);
1346 		qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
1347 	}
1348 
1349 	if (cl->prio_activity)
1350 		htb_deactivate(q, cl);
1351 
1352 	if (--cl->refcnt == 0)
1353 		htb_destroy_class(sch, cl);
1354 
1355 	sch_tree_unlock(sch);
1356 	return 0;
1357 }
1358 
1359 static void htb_put(struct Qdisc *sch, unsigned long arg)
1360 {
1361 	struct htb_class *cl = (struct htb_class *)arg;
1362 
1363 	if (--cl->refcnt == 0)
1364 		htb_destroy_class(sch, cl);
1365 }
1366 
1367 static int htb_change_class(struct Qdisc *sch, u32 classid,
1368 			    u32 parentid, struct rtattr **tca,
1369 			    unsigned long *arg)
1370 {
1371 	int err = -EINVAL;
1372 	struct htb_sched *q = qdisc_priv(sch);
1373 	struct htb_class *cl = (struct htb_class *)*arg, *parent;
1374 	struct rtattr *opt = tca[TCA_OPTIONS - 1];
1375 	struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1376 	struct rtattr *tb[TCA_HTB_RTAB];
1377 	struct tc_htb_opt *hopt;
1378 
1379 	/* extract all subattrs from opt attr */
1380 	if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
1381 	    tb[TCA_HTB_PARMS - 1] == NULL ||
1382 	    RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt))
1383 		goto failure;
1384 
1385 	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1386 
1387 	hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]);
1388 
1389 	rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
1390 	ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
1391 	if (!rtab || !ctab)
1392 		goto failure;
1393 
1394 	if (!cl) {		/* new class */
1395 		struct Qdisc *new_q;
1396 		int prio;
1397 
1398 		/* check for valid classid */
1399 		if (!classid || TC_H_MAJ(classid ^ sch->handle)
1400 		    || htb_find(classid, sch))
1401 			goto failure;
1402 
1403 		/* check maximal depth */
1404 		if (parent && parent->parent && parent->parent->level < 2) {
1405 			printk(KERN_ERR "htb: tree is too deep\n");
1406 			goto failure;
1407 		}
1408 		err = -ENOBUFS;
1409 		if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1410 			goto failure;
1411 
1412 		cl->refcnt = 1;
1413 		INIT_LIST_HEAD(&cl->sibling);
1414 		INIT_HLIST_NODE(&cl->hlist);
1415 		INIT_LIST_HEAD(&cl->children);
1416 		INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1417 		RB_CLEAR_NODE(&cl->pq_node);
1418 
1419 		for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1420 			RB_CLEAR_NODE(&cl->node[prio]);
1421 
1422 		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1423 		   so that can't be used inside of sch_tree_lock
1424 		   -- thanks to Karlis Peisenieks */
1425 		new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
1426 		sch_tree_lock(sch);
1427 		if (parent && !parent->level) {
1428 			unsigned int qlen = parent->un.leaf.q->q.qlen;
1429 
1430 			/* turn parent into inner node */
1431 			qdisc_reset(parent->un.leaf.q);
1432 			qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
1433 			qdisc_destroy(parent->un.leaf.q);
1434 			if (parent->prio_activity)
1435 				htb_deactivate(q, parent);
1436 
1437 			/* remove from evt list because of level change */
1438 			if (parent->cmode != HTB_CAN_SEND) {
1439 				htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
1440 				parent->cmode = HTB_CAN_SEND;
1441 			}
1442 			parent->level = (parent->parent ? parent->parent->level
1443 					 : TC_HTB_MAXDEPTH) - 1;
1444 			memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1445 		}
1446 		/* leaf (we) needs elementary qdisc */
1447 		cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1448 
1449 		cl->classid = classid;
1450 		cl->parent = parent;
1451 
1452 		/* set class to be in HTB_CAN_SEND state */
1453 		cl->tokens = hopt->buffer;
1454 		cl->ctokens = hopt->cbuffer;
1455 		cl->mbuffer = PSCHED_JIFFIE2US(HZ * 60);	/* 1min */
1456 		PSCHED_GET_TIME(cl->t_c);
1457 		cl->cmode = HTB_CAN_SEND;
1458 
1459 		/* attach to the hash list and parent's family */
1460 		hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
1461 		list_add_tail(&cl->sibling,
1462 			      parent ? &parent->children : &q->root);
1463 	} else
1464 		sch_tree_lock(sch);
1465 
1466 	/* it used to be a nasty bug here, we have to check that node
1467 	   is really leaf before changing cl->un.leaf ! */
1468 	if (!cl->level) {
1469 		cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1470 		if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1471 			printk(KERN_WARNING
1472 			       "HTB: quantum of class %X is small. Consider r2q change.\n",
1473 			       cl->classid);
1474 			cl->un.leaf.quantum = 1000;
1475 		}
1476 		if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1477 			printk(KERN_WARNING
1478 			       "HTB: quantum of class %X is big. Consider r2q change.\n",
1479 			       cl->classid);
1480 			cl->un.leaf.quantum = 200000;
1481 		}
1482 		if (hopt->quantum)
1483 			cl->un.leaf.quantum = hopt->quantum;
1484 		if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1485 			cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
1486 	}
1487 
1488 	cl->buffer = hopt->buffer;
1489 	cl->cbuffer = hopt->cbuffer;
1490 	if (cl->rate)
1491 		qdisc_put_rtab(cl->rate);
1492 	cl->rate = rtab;
1493 	if (cl->ceil)
1494 		qdisc_put_rtab(cl->ceil);
1495 	cl->ceil = ctab;
1496 	sch_tree_unlock(sch);
1497 
1498 	*arg = (unsigned long)cl;
1499 	return 0;
1500 
1501 failure:
1502 	if (rtab)
1503 		qdisc_put_rtab(rtab);
1504 	if (ctab)
1505 		qdisc_put_rtab(ctab);
1506 	return err;
1507 }
1508 
1509 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1510 {
1511 	struct htb_sched *q = qdisc_priv(sch);
1512 	struct htb_class *cl = (struct htb_class *)arg;
1513 	struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1514 
1515 	return fl;
1516 }
1517 
1518 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1519 				     u32 classid)
1520 {
1521 	struct htb_sched *q = qdisc_priv(sch);
1522 	struct htb_class *cl = htb_find(classid, sch);
1523 
1524 	/*if (cl && !cl->level) return 0;
1525 	   The line above used to be there to prevent attaching filters to
1526 	   leaves. But at least tc_index filter uses this just to get class
1527 	   for other reasons so that we have to allow for it.
1528 	   ----
1529 	   19.6.2002 As Werner explained it is ok - bind filter is just
1530 	   another way to "lock" the class - unlike "get" this lock can
1531 	   be broken by class during destroy IIUC.
1532 	 */
1533 	if (cl)
1534 		cl->filter_cnt++;
1535 	else
1536 		q->filter_cnt++;
1537 	return (unsigned long)cl;
1538 }
1539 
1540 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1541 {
1542 	struct htb_sched *q = qdisc_priv(sch);
1543 	struct htb_class *cl = (struct htb_class *)arg;
1544 
1545 	if (cl)
1546 		cl->filter_cnt--;
1547 	else
1548 		q->filter_cnt--;
1549 }
1550 
1551 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1552 {
1553 	struct htb_sched *q = qdisc_priv(sch);
1554 	int i;
1555 
1556 	if (arg->stop)
1557 		return;
1558 
1559 	for (i = 0; i < HTB_HSIZE; i++) {
1560 		struct hlist_node *p;
1561 		struct htb_class *cl;
1562 
1563 		hlist_for_each_entry(cl, p, q->hash + i, hlist) {
1564 			if (arg->count < arg->skip) {
1565 				arg->count++;
1566 				continue;
1567 			}
1568 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1569 				arg->stop = 1;
1570 				return;
1571 			}
1572 			arg->count++;
1573 		}
1574 	}
1575 }
1576 
1577 static struct Qdisc_class_ops htb_class_ops = {
1578 	.graft		=	htb_graft,
1579 	.leaf		=	htb_leaf,
1580 	.qlen_notify	=	htb_qlen_notify,
1581 	.get		=	htb_get,
1582 	.put		=	htb_put,
1583 	.change		=	htb_change_class,
1584 	.delete		=	htb_delete,
1585 	.walk		=	htb_walk,
1586 	.tcf_chain	=	htb_find_tcf,
1587 	.bind_tcf	=	htb_bind_filter,
1588 	.unbind_tcf	=	htb_unbind_filter,
1589 	.dump		=	htb_dump_class,
1590 	.dump_stats	=	htb_dump_class_stats,
1591 };
1592 
1593 static struct Qdisc_ops htb_qdisc_ops = {
1594 	.next		=	NULL,
1595 	.cl_ops		=	&htb_class_ops,
1596 	.id		=	"htb",
1597 	.priv_size	=	sizeof(struct htb_sched),
1598 	.enqueue	=	htb_enqueue,
1599 	.dequeue	=	htb_dequeue,
1600 	.requeue	=	htb_requeue,
1601 	.drop		=	htb_drop,
1602 	.init		=	htb_init,
1603 	.reset		=	htb_reset,
1604 	.destroy	=	htb_destroy,
1605 	.change		=	NULL /* htb_change */,
1606 	.dump		=	htb_dump,
1607 	.owner		=	THIS_MODULE,
1608 };
1609 
1610 static int __init htb_module_init(void)
1611 {
1612 	return register_qdisc(&htb_qdisc_ops);
1613 }
1614 static void __exit htb_module_exit(void)
1615 {
1616 	unregister_qdisc(&htb_qdisc_ops);
1617 }
1618 
1619 module_init(htb_module_init)
1620 module_exit(htb_module_exit)
1621 MODULE_LICENSE("GPL");
1622