xref: /linux/net/sched/sch_api.c (revision 96c63fa7393d0a346acfe5a91e0c7d4c7782641b)
1 /*
2  * net/sched/sch_api.c	Packet scheduler API.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  * Fixes:
12  *
13  * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15  * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16  */
17 
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
32 
33 #include <net/net_namespace.h>
34 #include <net/sock.h>
35 #include <net/netlink.h>
36 #include <net/pkt_sched.h>
37 
38 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
39 			struct nlmsghdr *n, u32 clid,
40 			struct Qdisc *old, struct Qdisc *new);
41 static int tclass_notify(struct net *net, struct sk_buff *oskb,
42 			 struct nlmsghdr *n, struct Qdisc *q,
43 			 unsigned long cl, int event);
44 
45 /*
46 
47    Short review.
48    -------------
49 
50    This file consists of two interrelated parts:
51 
52    1. queueing disciplines manager frontend.
53    2. traffic classes manager frontend.
54 
55    Generally, queueing discipline ("qdisc") is a black box,
56    which is able to enqueue packets and to dequeue them (when
57    device is ready to send something) in order and at times
58    determined by algorithm hidden in it.
59 
60    qdisc's are divided to two categories:
61    - "queues", which have no internal structure visible from outside.
62    - "schedulers", which split all the packets to "traffic classes",
63      using "packet classifiers" (look at cls_api.c)
64 
65    In turn, classes may have child qdiscs (as rule, queues)
66    attached to them etc. etc. etc.
67 
68    The goal of the routines in this file is to translate
69    information supplied by user in the form of handles
70    to more intelligible for kernel form, to make some sanity
71    checks and part of work, which is common to all qdiscs
72    and to provide rtnetlink notifications.
73 
74    All real intelligent work is done inside qdisc modules.
75 
76 
77 
78    Every discipline has two major routines: enqueue and dequeue.
79 
80    ---dequeue
81 
82    dequeue usually returns a skb to send. It is allowed to return NULL,
83    but it does not mean that queue is empty, it just means that
84    discipline does not want to send anything this time.
85    Queue is really empty if q->q.qlen == 0.
86    For complicated disciplines with multiple queues q->q is not
87    real packet queue, but however q->q.qlen must be valid.
88 
89    ---enqueue
90 
91    enqueue returns 0, if packet was enqueued successfully.
92    If packet (this one or another one) was dropped, it returns
93    not zero error code.
94    NET_XMIT_DROP 	- this packet dropped
95      Expected action: do not backoff, but wait until queue will clear.
96    NET_XMIT_CN	 	- probably this packet enqueued, but another one dropped.
97      Expected action: backoff or ignore
98    NET_XMIT_POLICED	- dropped by police.
99      Expected action: backoff or error to real-time apps.
100 
101    Auxiliary routines:
102 
103    ---peek
104 
105    like dequeue but without removing a packet from the queue
106 
107    ---reset
108 
109    returns qdisc to initial state: purge all buffers, clear all
110    timers, counters (except for statistics) etc.
111 
112    ---init
113 
114    initializes newly created qdisc.
115 
116    ---destroy
117 
118    destroys resources allocated by init and during lifetime of qdisc.
119 
120    ---change
121 
122    changes qdisc parameters.
123  */
124 
125 /* Protects list of registered TC modules. It is pure SMP lock. */
126 static DEFINE_RWLOCK(qdisc_mod_lock);
127 
128 
129 /************************************************
130  *	Queueing disciplines manipulation.	*
131  ************************************************/
132 
133 
134 /* The list of all installed queueing disciplines. */
135 
136 static struct Qdisc_ops *qdisc_base;
137 
138 /* Register/unregister queueing discipline */
139 
140 int register_qdisc(struct Qdisc_ops *qops)
141 {
142 	struct Qdisc_ops *q, **qp;
143 	int rc = -EEXIST;
144 
145 	write_lock(&qdisc_mod_lock);
146 	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
147 		if (!strcmp(qops->id, q->id))
148 			goto out;
149 
150 	if (qops->enqueue == NULL)
151 		qops->enqueue = noop_qdisc_ops.enqueue;
152 	if (qops->peek == NULL) {
153 		if (qops->dequeue == NULL)
154 			qops->peek = noop_qdisc_ops.peek;
155 		else
156 			goto out_einval;
157 	}
158 	if (qops->dequeue == NULL)
159 		qops->dequeue = noop_qdisc_ops.dequeue;
160 
161 	if (qops->cl_ops) {
162 		const struct Qdisc_class_ops *cops = qops->cl_ops;
163 
164 		if (!(cops->get && cops->put && cops->walk && cops->leaf))
165 			goto out_einval;
166 
167 		if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
168 			goto out_einval;
169 	}
170 
171 	qops->next = NULL;
172 	*qp = qops;
173 	rc = 0;
174 out:
175 	write_unlock(&qdisc_mod_lock);
176 	return rc;
177 
178 out_einval:
179 	rc = -EINVAL;
180 	goto out;
181 }
182 EXPORT_SYMBOL(register_qdisc);
183 
184 int unregister_qdisc(struct Qdisc_ops *qops)
185 {
186 	struct Qdisc_ops *q, **qp;
187 	int err = -ENOENT;
188 
189 	write_lock(&qdisc_mod_lock);
190 	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
191 		if (q == qops)
192 			break;
193 	if (q) {
194 		*qp = q->next;
195 		q->next = NULL;
196 		err = 0;
197 	}
198 	write_unlock(&qdisc_mod_lock);
199 	return err;
200 }
201 EXPORT_SYMBOL(unregister_qdisc);
202 
203 /* Get default qdisc if not otherwise specified */
204 void qdisc_get_default(char *name, size_t len)
205 {
206 	read_lock(&qdisc_mod_lock);
207 	strlcpy(name, default_qdisc_ops->id, len);
208 	read_unlock(&qdisc_mod_lock);
209 }
210 
211 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
212 {
213 	struct Qdisc_ops *q = NULL;
214 
215 	for (q = qdisc_base; q; q = q->next) {
216 		if (!strcmp(name, q->id)) {
217 			if (!try_module_get(q->owner))
218 				q = NULL;
219 			break;
220 		}
221 	}
222 
223 	return q;
224 }
225 
226 /* Set new default qdisc to use */
227 int qdisc_set_default(const char *name)
228 {
229 	const struct Qdisc_ops *ops;
230 
231 	if (!capable(CAP_NET_ADMIN))
232 		return -EPERM;
233 
234 	write_lock(&qdisc_mod_lock);
235 	ops = qdisc_lookup_default(name);
236 	if (!ops) {
237 		/* Not found, drop lock and try to load module */
238 		write_unlock(&qdisc_mod_lock);
239 		request_module("sch_%s", name);
240 		write_lock(&qdisc_mod_lock);
241 
242 		ops = qdisc_lookup_default(name);
243 	}
244 
245 	if (ops) {
246 		/* Set new default */
247 		module_put(default_qdisc_ops->owner);
248 		default_qdisc_ops = ops;
249 	}
250 	write_unlock(&qdisc_mod_lock);
251 
252 	return ops ? 0 : -ENOENT;
253 }
254 
255 /* We know handle. Find qdisc among all qdisc's attached to device
256  * (root qdisc, all its children, children of children etc.)
257  * Note: caller either uses rtnl or rcu_read_lock()
258  */
259 
260 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
261 {
262 	struct Qdisc *q;
263 
264 	if (!(root->flags & TCQ_F_BUILTIN) &&
265 	    root->handle == handle)
266 		return root;
267 
268 	list_for_each_entry_rcu(q, &root->list, list) {
269 		if (q->handle == handle)
270 			return q;
271 	}
272 	return NULL;
273 }
274 
275 void qdisc_list_add(struct Qdisc *q)
276 {
277 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
278 		struct Qdisc *root = qdisc_dev(q)->qdisc;
279 
280 		WARN_ON_ONCE(root == &noop_qdisc);
281 		ASSERT_RTNL();
282 		list_add_tail_rcu(&q->list, &root->list);
283 	}
284 }
285 EXPORT_SYMBOL(qdisc_list_add);
286 
287 void qdisc_list_del(struct Qdisc *q)
288 {
289 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
290 		ASSERT_RTNL();
291 		list_del_rcu(&q->list);
292 	}
293 }
294 EXPORT_SYMBOL(qdisc_list_del);
295 
296 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
297 {
298 	struct Qdisc *q;
299 
300 	q = qdisc_match_from_root(dev->qdisc, handle);
301 	if (q)
302 		goto out;
303 
304 	if (dev_ingress_queue(dev))
305 		q = qdisc_match_from_root(
306 			dev_ingress_queue(dev)->qdisc_sleeping,
307 			handle);
308 out:
309 	return q;
310 }
311 
312 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
313 {
314 	unsigned long cl;
315 	struct Qdisc *leaf;
316 	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
317 
318 	if (cops == NULL)
319 		return NULL;
320 	cl = cops->get(p, classid);
321 
322 	if (cl == 0)
323 		return NULL;
324 	leaf = cops->leaf(p, cl);
325 	cops->put(p, cl);
326 	return leaf;
327 }
328 
329 /* Find queueing discipline by name */
330 
331 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
332 {
333 	struct Qdisc_ops *q = NULL;
334 
335 	if (kind) {
336 		read_lock(&qdisc_mod_lock);
337 		for (q = qdisc_base; q; q = q->next) {
338 			if (nla_strcmp(kind, q->id) == 0) {
339 				if (!try_module_get(q->owner))
340 					q = NULL;
341 				break;
342 			}
343 		}
344 		read_unlock(&qdisc_mod_lock);
345 	}
346 	return q;
347 }
348 
349 /* The linklayer setting were not transferred from iproute2, in older
350  * versions, and the rate tables lookup systems have been dropped in
351  * the kernel. To keep backward compatible with older iproute2 tc
352  * utils, we detect the linklayer setting by detecting if the rate
353  * table were modified.
354  *
355  * For linklayer ATM table entries, the rate table will be aligned to
356  * 48 bytes, thus some table entries will contain the same value.  The
357  * mpu (min packet unit) is also encoded into the old rate table, thus
358  * starting from the mpu, we find low and high table entries for
359  * mapping this cell.  If these entries contain the same value, when
360  * the rate tables have been modified for linklayer ATM.
361  *
362  * This is done by rounding mpu to the nearest 48 bytes cell/entry,
363  * and then roundup to the next cell, calc the table entry one below,
364  * and compare.
365  */
366 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
367 {
368 	int low       = roundup(r->mpu, 48);
369 	int high      = roundup(low+1, 48);
370 	int cell_low  = low >> r->cell_log;
371 	int cell_high = (high >> r->cell_log) - 1;
372 
373 	/* rtab is too inaccurate at rates > 100Mbit/s */
374 	if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
375 		pr_debug("TC linklayer: Giving up ATM detection\n");
376 		return TC_LINKLAYER_ETHERNET;
377 	}
378 
379 	if ((cell_high > cell_low) && (cell_high < 256)
380 	    && (rtab[cell_low] == rtab[cell_high])) {
381 		pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
382 			 cell_low, cell_high, rtab[cell_high]);
383 		return TC_LINKLAYER_ATM;
384 	}
385 	return TC_LINKLAYER_ETHERNET;
386 }
387 
388 static struct qdisc_rate_table *qdisc_rtab_list;
389 
390 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
391 {
392 	struct qdisc_rate_table *rtab;
393 
394 	if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
395 	    nla_len(tab) != TC_RTAB_SIZE)
396 		return NULL;
397 
398 	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
399 		if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
400 		    !memcmp(&rtab->data, nla_data(tab), 1024)) {
401 			rtab->refcnt++;
402 			return rtab;
403 		}
404 	}
405 
406 	rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
407 	if (rtab) {
408 		rtab->rate = *r;
409 		rtab->refcnt = 1;
410 		memcpy(rtab->data, nla_data(tab), 1024);
411 		if (r->linklayer == TC_LINKLAYER_UNAWARE)
412 			r->linklayer = __detect_linklayer(r, rtab->data);
413 		rtab->next = qdisc_rtab_list;
414 		qdisc_rtab_list = rtab;
415 	}
416 	return rtab;
417 }
418 EXPORT_SYMBOL(qdisc_get_rtab);
419 
420 void qdisc_put_rtab(struct qdisc_rate_table *tab)
421 {
422 	struct qdisc_rate_table *rtab, **rtabp;
423 
424 	if (!tab || --tab->refcnt)
425 		return;
426 
427 	for (rtabp = &qdisc_rtab_list;
428 	     (rtab = *rtabp) != NULL;
429 	     rtabp = &rtab->next) {
430 		if (rtab == tab) {
431 			*rtabp = rtab->next;
432 			kfree(rtab);
433 			return;
434 		}
435 	}
436 }
437 EXPORT_SYMBOL(qdisc_put_rtab);
438 
439 static LIST_HEAD(qdisc_stab_list);
440 static DEFINE_SPINLOCK(qdisc_stab_lock);
441 
442 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
443 	[TCA_STAB_BASE]	= { .len = sizeof(struct tc_sizespec) },
444 	[TCA_STAB_DATA] = { .type = NLA_BINARY },
445 };
446 
447 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
448 {
449 	struct nlattr *tb[TCA_STAB_MAX + 1];
450 	struct qdisc_size_table *stab;
451 	struct tc_sizespec *s;
452 	unsigned int tsize = 0;
453 	u16 *tab = NULL;
454 	int err;
455 
456 	err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
457 	if (err < 0)
458 		return ERR_PTR(err);
459 	if (!tb[TCA_STAB_BASE])
460 		return ERR_PTR(-EINVAL);
461 
462 	s = nla_data(tb[TCA_STAB_BASE]);
463 
464 	if (s->tsize > 0) {
465 		if (!tb[TCA_STAB_DATA])
466 			return ERR_PTR(-EINVAL);
467 		tab = nla_data(tb[TCA_STAB_DATA]);
468 		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
469 	}
470 
471 	if (tsize != s->tsize || (!tab && tsize > 0))
472 		return ERR_PTR(-EINVAL);
473 
474 	spin_lock(&qdisc_stab_lock);
475 
476 	list_for_each_entry(stab, &qdisc_stab_list, list) {
477 		if (memcmp(&stab->szopts, s, sizeof(*s)))
478 			continue;
479 		if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
480 			continue;
481 		stab->refcnt++;
482 		spin_unlock(&qdisc_stab_lock);
483 		return stab;
484 	}
485 
486 	spin_unlock(&qdisc_stab_lock);
487 
488 	stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
489 	if (!stab)
490 		return ERR_PTR(-ENOMEM);
491 
492 	stab->refcnt = 1;
493 	stab->szopts = *s;
494 	if (tsize > 0)
495 		memcpy(stab->data, tab, tsize * sizeof(u16));
496 
497 	spin_lock(&qdisc_stab_lock);
498 	list_add_tail(&stab->list, &qdisc_stab_list);
499 	spin_unlock(&qdisc_stab_lock);
500 
501 	return stab;
502 }
503 
504 static void stab_kfree_rcu(struct rcu_head *head)
505 {
506 	kfree(container_of(head, struct qdisc_size_table, rcu));
507 }
508 
509 void qdisc_put_stab(struct qdisc_size_table *tab)
510 {
511 	if (!tab)
512 		return;
513 
514 	spin_lock(&qdisc_stab_lock);
515 
516 	if (--tab->refcnt == 0) {
517 		list_del(&tab->list);
518 		call_rcu_bh(&tab->rcu, stab_kfree_rcu);
519 	}
520 
521 	spin_unlock(&qdisc_stab_lock);
522 }
523 EXPORT_SYMBOL(qdisc_put_stab);
524 
525 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
526 {
527 	struct nlattr *nest;
528 
529 	nest = nla_nest_start(skb, TCA_STAB);
530 	if (nest == NULL)
531 		goto nla_put_failure;
532 	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
533 		goto nla_put_failure;
534 	nla_nest_end(skb, nest);
535 
536 	return skb->len;
537 
538 nla_put_failure:
539 	return -1;
540 }
541 
542 void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
543 {
544 	int pkt_len, slot;
545 
546 	pkt_len = skb->len + stab->szopts.overhead;
547 	if (unlikely(!stab->szopts.tsize))
548 		goto out;
549 
550 	slot = pkt_len + stab->szopts.cell_align;
551 	if (unlikely(slot < 0))
552 		slot = 0;
553 
554 	slot >>= stab->szopts.cell_log;
555 	if (likely(slot < stab->szopts.tsize))
556 		pkt_len = stab->data[slot];
557 	else
558 		pkt_len = stab->data[stab->szopts.tsize - 1] *
559 				(slot / stab->szopts.tsize) +
560 				stab->data[slot % stab->szopts.tsize];
561 
562 	pkt_len <<= stab->szopts.size_log;
563 out:
564 	if (unlikely(pkt_len < 1))
565 		pkt_len = 1;
566 	qdisc_skb_cb(skb)->pkt_len = pkt_len;
567 }
568 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
569 
570 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
571 {
572 	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
573 		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
574 			txt, qdisc->ops->id, qdisc->handle >> 16);
575 		qdisc->flags |= TCQ_F_WARN_NONWC;
576 	}
577 }
578 EXPORT_SYMBOL(qdisc_warn_nonwc);
579 
580 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
581 {
582 	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
583 						 timer);
584 
585 	rcu_read_lock();
586 	qdisc_unthrottled(wd->qdisc);
587 	__netif_schedule(qdisc_root(wd->qdisc));
588 	rcu_read_unlock();
589 
590 	return HRTIMER_NORESTART;
591 }
592 
593 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
594 {
595 	hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
596 	wd->timer.function = qdisc_watchdog;
597 	wd->qdisc = qdisc;
598 }
599 EXPORT_SYMBOL(qdisc_watchdog_init);
600 
601 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
602 {
603 	if (test_bit(__QDISC_STATE_DEACTIVATED,
604 		     &qdisc_root_sleeping(wd->qdisc)->state))
605 		return;
606 
607 	if (throttle)
608 		qdisc_throttled(wd->qdisc);
609 
610 	if (wd->last_expires == expires)
611 		return;
612 
613 	wd->last_expires = expires;
614 	hrtimer_start(&wd->timer,
615 		      ns_to_ktime(expires),
616 		      HRTIMER_MODE_ABS_PINNED);
617 }
618 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
619 
620 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
621 {
622 	hrtimer_cancel(&wd->timer);
623 	qdisc_unthrottled(wd->qdisc);
624 }
625 EXPORT_SYMBOL(qdisc_watchdog_cancel);
626 
627 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
628 {
629 	unsigned int size = n * sizeof(struct hlist_head), i;
630 	struct hlist_head *h;
631 
632 	if (size <= PAGE_SIZE)
633 		h = kmalloc(size, GFP_KERNEL);
634 	else
635 		h = (struct hlist_head *)
636 			__get_free_pages(GFP_KERNEL, get_order(size));
637 
638 	if (h != NULL) {
639 		for (i = 0; i < n; i++)
640 			INIT_HLIST_HEAD(&h[i]);
641 	}
642 	return h;
643 }
644 
645 static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
646 {
647 	unsigned int size = n * sizeof(struct hlist_head);
648 
649 	if (size <= PAGE_SIZE)
650 		kfree(h);
651 	else
652 		free_pages((unsigned long)h, get_order(size));
653 }
654 
655 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
656 {
657 	struct Qdisc_class_common *cl;
658 	struct hlist_node *next;
659 	struct hlist_head *nhash, *ohash;
660 	unsigned int nsize, nmask, osize;
661 	unsigned int i, h;
662 
663 	/* Rehash when load factor exceeds 0.75 */
664 	if (clhash->hashelems * 4 <= clhash->hashsize * 3)
665 		return;
666 	nsize = clhash->hashsize * 2;
667 	nmask = nsize - 1;
668 	nhash = qdisc_class_hash_alloc(nsize);
669 	if (nhash == NULL)
670 		return;
671 
672 	ohash = clhash->hash;
673 	osize = clhash->hashsize;
674 
675 	sch_tree_lock(sch);
676 	for (i = 0; i < osize; i++) {
677 		hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
678 			h = qdisc_class_hash(cl->classid, nmask);
679 			hlist_add_head(&cl->hnode, &nhash[h]);
680 		}
681 	}
682 	clhash->hash     = nhash;
683 	clhash->hashsize = nsize;
684 	clhash->hashmask = nmask;
685 	sch_tree_unlock(sch);
686 
687 	qdisc_class_hash_free(ohash, osize);
688 }
689 EXPORT_SYMBOL(qdisc_class_hash_grow);
690 
691 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
692 {
693 	unsigned int size = 4;
694 
695 	clhash->hash = qdisc_class_hash_alloc(size);
696 	if (clhash->hash == NULL)
697 		return -ENOMEM;
698 	clhash->hashsize  = size;
699 	clhash->hashmask  = size - 1;
700 	clhash->hashelems = 0;
701 	return 0;
702 }
703 EXPORT_SYMBOL(qdisc_class_hash_init);
704 
705 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
706 {
707 	qdisc_class_hash_free(clhash->hash, clhash->hashsize);
708 }
709 EXPORT_SYMBOL(qdisc_class_hash_destroy);
710 
711 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
712 			     struct Qdisc_class_common *cl)
713 {
714 	unsigned int h;
715 
716 	INIT_HLIST_NODE(&cl->hnode);
717 	h = qdisc_class_hash(cl->classid, clhash->hashmask);
718 	hlist_add_head(&cl->hnode, &clhash->hash[h]);
719 	clhash->hashelems++;
720 }
721 EXPORT_SYMBOL(qdisc_class_hash_insert);
722 
723 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
724 			     struct Qdisc_class_common *cl)
725 {
726 	hlist_del(&cl->hnode);
727 	clhash->hashelems--;
728 }
729 EXPORT_SYMBOL(qdisc_class_hash_remove);
730 
731 /* Allocate an unique handle from space managed by kernel
732  * Possible range is [8000-FFFF]:0000 (0x8000 values)
733  */
734 static u32 qdisc_alloc_handle(struct net_device *dev)
735 {
736 	int i = 0x8000;
737 	static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
738 
739 	do {
740 		autohandle += TC_H_MAKE(0x10000U, 0);
741 		if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
742 			autohandle = TC_H_MAKE(0x80000000U, 0);
743 		if (!qdisc_lookup(dev, autohandle))
744 			return autohandle;
745 		cond_resched();
746 	} while	(--i > 0);
747 
748 	return 0;
749 }
750 
751 void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
752 			       unsigned int len)
753 {
754 	const struct Qdisc_class_ops *cops;
755 	unsigned long cl;
756 	u32 parentid;
757 	int drops;
758 
759 	if (n == 0 && len == 0)
760 		return;
761 	drops = max_t(int, n, 0);
762 	rcu_read_lock();
763 	while ((parentid = sch->parent)) {
764 		if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
765 			break;
766 
767 		if (sch->flags & TCQ_F_NOPARENT)
768 			break;
769 		/* TODO: perform the search on a per txq basis */
770 		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
771 		if (sch == NULL) {
772 			WARN_ON_ONCE(parentid != TC_H_ROOT);
773 			break;
774 		}
775 		cops = sch->ops->cl_ops;
776 		if (cops->qlen_notify) {
777 			cl = cops->get(sch, parentid);
778 			cops->qlen_notify(sch, cl);
779 			cops->put(sch, cl);
780 		}
781 		sch->q.qlen -= n;
782 		sch->qstats.backlog -= len;
783 		__qdisc_qstats_drop(sch, drops);
784 	}
785 	rcu_read_unlock();
786 }
787 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
788 
789 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
790 			       struct nlmsghdr *n, u32 clid,
791 			       struct Qdisc *old, struct Qdisc *new)
792 {
793 	if (new || old)
794 		qdisc_notify(net, skb, n, clid, old, new);
795 
796 	if (old)
797 		qdisc_destroy(old);
798 }
799 
800 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
801  * to device "dev".
802  *
803  * When appropriate send a netlink notification using 'skb'
804  * and "n".
805  *
806  * On success, destroy old qdisc.
807  */
808 
809 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
810 		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
811 		       struct Qdisc *new, struct Qdisc *old)
812 {
813 	struct Qdisc *q = old;
814 	struct net *net = dev_net(dev);
815 	int err = 0;
816 
817 	if (parent == NULL) {
818 		unsigned int i, num_q, ingress;
819 
820 		ingress = 0;
821 		num_q = dev->num_tx_queues;
822 		if ((q && q->flags & TCQ_F_INGRESS) ||
823 		    (new && new->flags & TCQ_F_INGRESS)) {
824 			num_q = 1;
825 			ingress = 1;
826 			if (!dev_ingress_queue(dev))
827 				return -ENOENT;
828 		}
829 
830 		if (dev->flags & IFF_UP)
831 			dev_deactivate(dev);
832 
833 		if (new && new->ops->attach)
834 			goto skip;
835 
836 		for (i = 0; i < num_q; i++) {
837 			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
838 
839 			if (!ingress)
840 				dev_queue = netdev_get_tx_queue(dev, i);
841 
842 			old = dev_graft_qdisc(dev_queue, new);
843 			if (new && i > 0)
844 				atomic_inc(&new->refcnt);
845 
846 			if (!ingress)
847 				qdisc_destroy(old);
848 		}
849 
850 skip:
851 		if (!ingress) {
852 			notify_and_destroy(net, skb, n, classid,
853 					   dev->qdisc, new);
854 			if (new && !new->ops->attach)
855 				atomic_inc(&new->refcnt);
856 			dev->qdisc = new ? : &noop_qdisc;
857 
858 			if (new && new->ops->attach)
859 				new->ops->attach(new);
860 		} else {
861 			notify_and_destroy(net, skb, n, classid, old, new);
862 		}
863 
864 		if (dev->flags & IFF_UP)
865 			dev_activate(dev);
866 	} else {
867 		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
868 
869 		err = -EOPNOTSUPP;
870 		if (cops && cops->graft) {
871 			unsigned long cl = cops->get(parent, classid);
872 			if (cl) {
873 				err = cops->graft(parent, cl, new, &old);
874 				cops->put(parent, cl);
875 			} else
876 				err = -ENOENT;
877 		}
878 		if (!err)
879 			notify_and_destroy(net, skb, n, classid, old, new);
880 	}
881 	return err;
882 }
883 
884 /* lockdep annotation is needed for ingress; egress gets it only for name */
885 static struct lock_class_key qdisc_tx_lock;
886 static struct lock_class_key qdisc_rx_lock;
887 
888 /*
889    Allocate and initialize new qdisc.
890 
891    Parameters are passed via opt.
892  */
893 
894 static struct Qdisc *
895 qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
896 	     struct Qdisc *p, u32 parent, u32 handle,
897 	     struct nlattr **tca, int *errp)
898 {
899 	int err;
900 	struct nlattr *kind = tca[TCA_KIND];
901 	struct Qdisc *sch;
902 	struct Qdisc_ops *ops;
903 	struct qdisc_size_table *stab;
904 
905 	ops = qdisc_lookup_ops(kind);
906 #ifdef CONFIG_MODULES
907 	if (ops == NULL && kind != NULL) {
908 		char name[IFNAMSIZ];
909 		if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
910 			/* We dropped the RTNL semaphore in order to
911 			 * perform the module load.  So, even if we
912 			 * succeeded in loading the module we have to
913 			 * tell the caller to replay the request.  We
914 			 * indicate this using -EAGAIN.
915 			 * We replay the request because the device may
916 			 * go away in the mean time.
917 			 */
918 			rtnl_unlock();
919 			request_module("sch_%s", name);
920 			rtnl_lock();
921 			ops = qdisc_lookup_ops(kind);
922 			if (ops != NULL) {
923 				/* We will try again qdisc_lookup_ops,
924 				 * so don't keep a reference.
925 				 */
926 				module_put(ops->owner);
927 				err = -EAGAIN;
928 				goto err_out;
929 			}
930 		}
931 	}
932 #endif
933 
934 	err = -ENOENT;
935 	if (ops == NULL)
936 		goto err_out;
937 
938 	sch = qdisc_alloc(dev_queue, ops);
939 	if (IS_ERR(sch)) {
940 		err = PTR_ERR(sch);
941 		goto err_out2;
942 	}
943 
944 	sch->parent = parent;
945 
946 	if (handle == TC_H_INGRESS) {
947 		sch->flags |= TCQ_F_INGRESS;
948 		handle = TC_H_MAKE(TC_H_INGRESS, 0);
949 		lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
950 	} else {
951 		if (handle == 0) {
952 			handle = qdisc_alloc_handle(dev);
953 			err = -ENOMEM;
954 			if (handle == 0)
955 				goto err_out3;
956 		}
957 		lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
958 		if (!netif_is_multiqueue(dev))
959 			sch->flags |= TCQ_F_ONETXQUEUE;
960 	}
961 
962 	sch->handle = handle;
963 
964 	if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
965 		if (qdisc_is_percpu_stats(sch)) {
966 			sch->cpu_bstats =
967 				netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
968 			if (!sch->cpu_bstats)
969 				goto err_out4;
970 
971 			sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
972 			if (!sch->cpu_qstats)
973 				goto err_out4;
974 		}
975 
976 		if (tca[TCA_STAB]) {
977 			stab = qdisc_get_stab(tca[TCA_STAB]);
978 			if (IS_ERR(stab)) {
979 				err = PTR_ERR(stab);
980 				goto err_out4;
981 			}
982 			rcu_assign_pointer(sch->stab, stab);
983 		}
984 		if (tca[TCA_RATE]) {
985 			seqcount_t *running;
986 
987 			err = -EOPNOTSUPP;
988 			if (sch->flags & TCQ_F_MQROOT)
989 				goto err_out4;
990 
991 			if ((sch->parent != TC_H_ROOT) &&
992 			    !(sch->flags & TCQ_F_INGRESS) &&
993 			    (!p || !(p->flags & TCQ_F_MQROOT)))
994 				running = qdisc_root_sleeping_running(sch);
995 			else
996 				running = &sch->running;
997 
998 			err = gen_new_estimator(&sch->bstats,
999 						sch->cpu_bstats,
1000 						&sch->rate_est,
1001 						NULL,
1002 						running,
1003 						tca[TCA_RATE]);
1004 			if (err)
1005 				goto err_out4;
1006 		}
1007 
1008 		qdisc_list_add(sch);
1009 
1010 		return sch;
1011 	}
1012 err_out3:
1013 	dev_put(dev);
1014 	kfree((char *) sch - sch->padded);
1015 err_out2:
1016 	module_put(ops->owner);
1017 err_out:
1018 	*errp = err;
1019 	return NULL;
1020 
1021 err_out4:
1022 	free_percpu(sch->cpu_bstats);
1023 	free_percpu(sch->cpu_qstats);
1024 	/*
1025 	 * Any broken qdiscs that would require a ops->reset() here?
1026 	 * The qdisc was never in action so it shouldn't be necessary.
1027 	 */
1028 	qdisc_put_stab(rtnl_dereference(sch->stab));
1029 	if (ops->destroy)
1030 		ops->destroy(sch);
1031 	goto err_out3;
1032 }
1033 
1034 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
1035 {
1036 	struct qdisc_size_table *ostab, *stab = NULL;
1037 	int err = 0;
1038 
1039 	if (tca[TCA_OPTIONS]) {
1040 		if (sch->ops->change == NULL)
1041 			return -EINVAL;
1042 		err = sch->ops->change(sch, tca[TCA_OPTIONS]);
1043 		if (err)
1044 			return err;
1045 	}
1046 
1047 	if (tca[TCA_STAB]) {
1048 		stab = qdisc_get_stab(tca[TCA_STAB]);
1049 		if (IS_ERR(stab))
1050 			return PTR_ERR(stab);
1051 	}
1052 
1053 	ostab = rtnl_dereference(sch->stab);
1054 	rcu_assign_pointer(sch->stab, stab);
1055 	qdisc_put_stab(ostab);
1056 
1057 	if (tca[TCA_RATE]) {
1058 		/* NB: ignores errors from replace_estimator
1059 		   because change can't be undone. */
1060 		if (sch->flags & TCQ_F_MQROOT)
1061 			goto out;
1062 		gen_replace_estimator(&sch->bstats,
1063 				      sch->cpu_bstats,
1064 				      &sch->rate_est,
1065 				      NULL,
1066 				      qdisc_root_sleeping_running(sch),
1067 				      tca[TCA_RATE]);
1068 	}
1069 out:
1070 	return 0;
1071 }
1072 
1073 struct check_loop_arg {
1074 	struct qdisc_walker	w;
1075 	struct Qdisc		*p;
1076 	int			depth;
1077 };
1078 
1079 static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
1080 
1081 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1082 {
1083 	struct check_loop_arg	arg;
1084 
1085 	if (q->ops->cl_ops == NULL)
1086 		return 0;
1087 
1088 	arg.w.stop = arg.w.skip = arg.w.count = 0;
1089 	arg.w.fn = check_loop_fn;
1090 	arg.depth = depth;
1091 	arg.p = p;
1092 	q->ops->cl_ops->walk(q, &arg.w);
1093 	return arg.w.stop ? -ELOOP : 0;
1094 }
1095 
1096 static int
1097 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1098 {
1099 	struct Qdisc *leaf;
1100 	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1101 	struct check_loop_arg *arg = (struct check_loop_arg *)w;
1102 
1103 	leaf = cops->leaf(q, cl);
1104 	if (leaf) {
1105 		if (leaf == arg->p || arg->depth > 7)
1106 			return -ELOOP;
1107 		return check_loop(leaf, arg->p, arg->depth + 1);
1108 	}
1109 	return 0;
1110 }
1111 
1112 /*
1113  * Delete/get qdisc.
1114  */
1115 
1116 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1117 {
1118 	struct net *net = sock_net(skb->sk);
1119 	struct tcmsg *tcm = nlmsg_data(n);
1120 	struct nlattr *tca[TCA_MAX + 1];
1121 	struct net_device *dev;
1122 	u32 clid;
1123 	struct Qdisc *q = NULL;
1124 	struct Qdisc *p = NULL;
1125 	int err;
1126 
1127 	if ((n->nlmsg_type != RTM_GETQDISC) &&
1128 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1129 		return -EPERM;
1130 
1131 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1132 	if (err < 0)
1133 		return err;
1134 
1135 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1136 	if (!dev)
1137 		return -ENODEV;
1138 
1139 	clid = tcm->tcm_parent;
1140 	if (clid) {
1141 		if (clid != TC_H_ROOT) {
1142 			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1143 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1144 				if (!p)
1145 					return -ENOENT;
1146 				q = qdisc_leaf(p, clid);
1147 			} else if (dev_ingress_queue(dev)) {
1148 				q = dev_ingress_queue(dev)->qdisc_sleeping;
1149 			}
1150 		} else {
1151 			q = dev->qdisc;
1152 		}
1153 		if (!q)
1154 			return -ENOENT;
1155 
1156 		if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1157 			return -EINVAL;
1158 	} else {
1159 		q = qdisc_lookup(dev, tcm->tcm_handle);
1160 		if (!q)
1161 			return -ENOENT;
1162 	}
1163 
1164 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1165 		return -EINVAL;
1166 
1167 	if (n->nlmsg_type == RTM_DELQDISC) {
1168 		if (!clid)
1169 			return -EINVAL;
1170 		if (q->handle == 0)
1171 			return -ENOENT;
1172 		err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1173 		if (err != 0)
1174 			return err;
1175 	} else {
1176 		qdisc_notify(net, skb, n, clid, NULL, q);
1177 	}
1178 	return 0;
1179 }
1180 
1181 /*
1182  * Create/change qdisc.
1183  */
1184 
1185 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1186 {
1187 	struct net *net = sock_net(skb->sk);
1188 	struct tcmsg *tcm;
1189 	struct nlattr *tca[TCA_MAX + 1];
1190 	struct net_device *dev;
1191 	u32 clid;
1192 	struct Qdisc *q, *p;
1193 	int err;
1194 
1195 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1196 		return -EPERM;
1197 
1198 replay:
1199 	/* Reinit, just in case something touches this. */
1200 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1201 	if (err < 0)
1202 		return err;
1203 
1204 	tcm = nlmsg_data(n);
1205 	clid = tcm->tcm_parent;
1206 	q = p = NULL;
1207 
1208 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1209 	if (!dev)
1210 		return -ENODEV;
1211 
1212 
1213 	if (clid) {
1214 		if (clid != TC_H_ROOT) {
1215 			if (clid != TC_H_INGRESS) {
1216 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1217 				if (!p)
1218 					return -ENOENT;
1219 				q = qdisc_leaf(p, clid);
1220 			} else if (dev_ingress_queue_create(dev)) {
1221 				q = dev_ingress_queue(dev)->qdisc_sleeping;
1222 			}
1223 		} else {
1224 			q = dev->qdisc;
1225 		}
1226 
1227 		/* It may be default qdisc, ignore it */
1228 		if (q && q->handle == 0)
1229 			q = NULL;
1230 
1231 		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1232 			if (tcm->tcm_handle) {
1233 				if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
1234 					return -EEXIST;
1235 				if (TC_H_MIN(tcm->tcm_handle))
1236 					return -EINVAL;
1237 				q = qdisc_lookup(dev, tcm->tcm_handle);
1238 				if (!q)
1239 					goto create_n_graft;
1240 				if (n->nlmsg_flags & NLM_F_EXCL)
1241 					return -EEXIST;
1242 				if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1243 					return -EINVAL;
1244 				if (q == p ||
1245 				    (p && check_loop(q, p, 0)))
1246 					return -ELOOP;
1247 				atomic_inc(&q->refcnt);
1248 				goto graft;
1249 			} else {
1250 				if (!q)
1251 					goto create_n_graft;
1252 
1253 				/* This magic test requires explanation.
1254 				 *
1255 				 *   We know, that some child q is already
1256 				 *   attached to this parent and have choice:
1257 				 *   either to change it or to create/graft new one.
1258 				 *
1259 				 *   1. We are allowed to create/graft only
1260 				 *   if CREATE and REPLACE flags are set.
1261 				 *
1262 				 *   2. If EXCL is set, requestor wanted to say,
1263 				 *   that qdisc tcm_handle is not expected
1264 				 *   to exist, so that we choose create/graft too.
1265 				 *
1266 				 *   3. The last case is when no flags are set.
1267 				 *   Alas, it is sort of hole in API, we
1268 				 *   cannot decide what to do unambiguously.
1269 				 *   For now we select create/graft, if
1270 				 *   user gave KIND, which does not match existing.
1271 				 */
1272 				if ((n->nlmsg_flags & NLM_F_CREATE) &&
1273 				    (n->nlmsg_flags & NLM_F_REPLACE) &&
1274 				    ((n->nlmsg_flags & NLM_F_EXCL) ||
1275 				     (tca[TCA_KIND] &&
1276 				      nla_strcmp(tca[TCA_KIND], q->ops->id))))
1277 					goto create_n_graft;
1278 			}
1279 		}
1280 	} else {
1281 		if (!tcm->tcm_handle)
1282 			return -EINVAL;
1283 		q = qdisc_lookup(dev, tcm->tcm_handle);
1284 	}
1285 
1286 	/* Change qdisc parameters */
1287 	if (q == NULL)
1288 		return -ENOENT;
1289 	if (n->nlmsg_flags & NLM_F_EXCL)
1290 		return -EEXIST;
1291 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1292 		return -EINVAL;
1293 	err = qdisc_change(q, tca);
1294 	if (err == 0)
1295 		qdisc_notify(net, skb, n, clid, NULL, q);
1296 	return err;
1297 
1298 create_n_graft:
1299 	if (!(n->nlmsg_flags & NLM_F_CREATE))
1300 		return -ENOENT;
1301 	if (clid == TC_H_INGRESS) {
1302 		if (dev_ingress_queue(dev))
1303 			q = qdisc_create(dev, dev_ingress_queue(dev), p,
1304 					 tcm->tcm_parent, tcm->tcm_parent,
1305 					 tca, &err);
1306 		else
1307 			err = -ENOENT;
1308 	} else {
1309 		struct netdev_queue *dev_queue;
1310 
1311 		if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1312 			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1313 		else if (p)
1314 			dev_queue = p->dev_queue;
1315 		else
1316 			dev_queue = netdev_get_tx_queue(dev, 0);
1317 
1318 		q = qdisc_create(dev, dev_queue, p,
1319 				 tcm->tcm_parent, tcm->tcm_handle,
1320 				 tca, &err);
1321 	}
1322 	if (q == NULL) {
1323 		if (err == -EAGAIN)
1324 			goto replay;
1325 		return err;
1326 	}
1327 
1328 graft:
1329 	err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1330 	if (err) {
1331 		if (q)
1332 			qdisc_destroy(q);
1333 		return err;
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1340 			 u32 portid, u32 seq, u16 flags, int event)
1341 {
1342 	struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
1343 	struct gnet_stats_queue __percpu *cpu_qstats = NULL;
1344 	struct tcmsg *tcm;
1345 	struct nlmsghdr  *nlh;
1346 	unsigned char *b = skb_tail_pointer(skb);
1347 	struct gnet_dump d;
1348 	struct qdisc_size_table *stab;
1349 	__u32 qlen;
1350 
1351 	cond_resched();
1352 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1353 	if (!nlh)
1354 		goto out_nlmsg_trim;
1355 	tcm = nlmsg_data(nlh);
1356 	tcm->tcm_family = AF_UNSPEC;
1357 	tcm->tcm__pad1 = 0;
1358 	tcm->tcm__pad2 = 0;
1359 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1360 	tcm->tcm_parent = clid;
1361 	tcm->tcm_handle = q->handle;
1362 	tcm->tcm_info = atomic_read(&q->refcnt);
1363 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1364 		goto nla_put_failure;
1365 	if (q->ops->dump && q->ops->dump(q, skb) < 0)
1366 		goto nla_put_failure;
1367 	qlen = q->q.qlen;
1368 
1369 	stab = rtnl_dereference(q->stab);
1370 	if (stab && qdisc_dump_stab(skb, stab) < 0)
1371 		goto nla_put_failure;
1372 
1373 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1374 					 NULL, &d, TCA_PAD) < 0)
1375 		goto nla_put_failure;
1376 
1377 	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1378 		goto nla_put_failure;
1379 
1380 	if (qdisc_is_percpu_stats(q)) {
1381 		cpu_bstats = q->cpu_bstats;
1382 		cpu_qstats = q->cpu_qstats;
1383 	}
1384 
1385 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
1386 				  &d, cpu_bstats, &q->bstats) < 0 ||
1387 	    gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
1388 	    gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
1389 		goto nla_put_failure;
1390 
1391 	if (gnet_stats_finish_copy(&d) < 0)
1392 		goto nla_put_failure;
1393 
1394 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1395 	return skb->len;
1396 
1397 out_nlmsg_trim:
1398 nla_put_failure:
1399 	nlmsg_trim(skb, b);
1400 	return -1;
1401 }
1402 
1403 static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1404 {
1405 	return (q->flags & TCQ_F_BUILTIN) ? true : false;
1406 }
1407 
1408 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1409 			struct nlmsghdr *n, u32 clid,
1410 			struct Qdisc *old, struct Qdisc *new)
1411 {
1412 	struct sk_buff *skb;
1413 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1414 
1415 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1416 	if (!skb)
1417 		return -ENOBUFS;
1418 
1419 	if (old && !tc_qdisc_dump_ignore(old)) {
1420 		if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1421 				  0, RTM_DELQDISC) < 0)
1422 			goto err_out;
1423 	}
1424 	if (new && !tc_qdisc_dump_ignore(new)) {
1425 		if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1426 				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1427 			goto err_out;
1428 	}
1429 
1430 	if (skb->len)
1431 		return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1432 				      n->nlmsg_flags & NLM_F_ECHO);
1433 
1434 err_out:
1435 	kfree_skb(skb);
1436 	return -EINVAL;
1437 }
1438 
1439 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1440 			      struct netlink_callback *cb,
1441 			      int *q_idx_p, int s_q_idx)
1442 {
1443 	int ret = 0, q_idx = *q_idx_p;
1444 	struct Qdisc *q;
1445 
1446 	if (!root)
1447 		return 0;
1448 
1449 	q = root;
1450 	if (q_idx < s_q_idx) {
1451 		q_idx++;
1452 	} else {
1453 		if (!tc_qdisc_dump_ignore(q) &&
1454 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1455 				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1456 			goto done;
1457 		q_idx++;
1458 	}
1459 	list_for_each_entry(q, &root->list, list) {
1460 		if (q_idx < s_q_idx) {
1461 			q_idx++;
1462 			continue;
1463 		}
1464 		if (!tc_qdisc_dump_ignore(q) &&
1465 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1466 				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1467 			goto done;
1468 		q_idx++;
1469 	}
1470 
1471 out:
1472 	*q_idx_p = q_idx;
1473 	return ret;
1474 done:
1475 	ret = -1;
1476 	goto out;
1477 }
1478 
1479 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1480 {
1481 	struct net *net = sock_net(skb->sk);
1482 	int idx, q_idx;
1483 	int s_idx, s_q_idx;
1484 	struct net_device *dev;
1485 
1486 	s_idx = cb->args[0];
1487 	s_q_idx = q_idx = cb->args[1];
1488 
1489 	idx = 0;
1490 	ASSERT_RTNL();
1491 	for_each_netdev(net, dev) {
1492 		struct netdev_queue *dev_queue;
1493 
1494 		if (idx < s_idx)
1495 			goto cont;
1496 		if (idx > s_idx)
1497 			s_q_idx = 0;
1498 		q_idx = 0;
1499 
1500 		if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
1501 			goto done;
1502 
1503 		dev_queue = dev_ingress_queue(dev);
1504 		if (dev_queue &&
1505 		    tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1506 				       &q_idx, s_q_idx) < 0)
1507 			goto done;
1508 
1509 cont:
1510 		idx++;
1511 	}
1512 
1513 done:
1514 	cb->args[0] = idx;
1515 	cb->args[1] = q_idx;
1516 
1517 	return skb->len;
1518 }
1519 
1520 
1521 
1522 /************************************************
1523  *	Traffic classes manipulation.		*
1524  ************************************************/
1525 
1526 
1527 
1528 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
1529 {
1530 	struct net *net = sock_net(skb->sk);
1531 	struct tcmsg *tcm = nlmsg_data(n);
1532 	struct nlattr *tca[TCA_MAX + 1];
1533 	struct net_device *dev;
1534 	struct Qdisc *q = NULL;
1535 	const struct Qdisc_class_ops *cops;
1536 	unsigned long cl = 0;
1537 	unsigned long new_cl;
1538 	u32 portid;
1539 	u32 clid;
1540 	u32 qid;
1541 	int err;
1542 
1543 	if ((n->nlmsg_type != RTM_GETTCLASS) &&
1544 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1545 		return -EPERM;
1546 
1547 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1548 	if (err < 0)
1549 		return err;
1550 
1551 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1552 	if (!dev)
1553 		return -ENODEV;
1554 
1555 	/*
1556 	   parent == TC_H_UNSPEC - unspecified parent.
1557 	   parent == TC_H_ROOT   - class is root, which has no parent.
1558 	   parent == X:0	 - parent is root class.
1559 	   parent == X:Y	 - parent is a node in hierarchy.
1560 	   parent == 0:Y	 - parent is X:Y, where X:0 is qdisc.
1561 
1562 	   handle == 0:0	 - generate handle from kernel pool.
1563 	   handle == 0:Y	 - class is X:Y, where X:0 is qdisc.
1564 	   handle == X:Y	 - clear.
1565 	   handle == X:0	 - root class.
1566 	 */
1567 
1568 	/* Step 1. Determine qdisc handle X:0 */
1569 
1570 	portid = tcm->tcm_parent;
1571 	clid = tcm->tcm_handle;
1572 	qid = TC_H_MAJ(clid);
1573 
1574 	if (portid != TC_H_ROOT) {
1575 		u32 qid1 = TC_H_MAJ(portid);
1576 
1577 		if (qid && qid1) {
1578 			/* If both majors are known, they must be identical. */
1579 			if (qid != qid1)
1580 				return -EINVAL;
1581 		} else if (qid1) {
1582 			qid = qid1;
1583 		} else if (qid == 0)
1584 			qid = dev->qdisc->handle;
1585 
1586 		/* Now qid is genuine qdisc handle consistent
1587 		 * both with parent and child.
1588 		 *
1589 		 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1590 		 */
1591 		if (portid)
1592 			portid = TC_H_MAKE(qid, portid);
1593 	} else {
1594 		if (qid == 0)
1595 			qid = dev->qdisc->handle;
1596 	}
1597 
1598 	/* OK. Locate qdisc */
1599 	q = qdisc_lookup(dev, qid);
1600 	if (!q)
1601 		return -ENOENT;
1602 
1603 	/* An check that it supports classes */
1604 	cops = q->ops->cl_ops;
1605 	if (cops == NULL)
1606 		return -EINVAL;
1607 
1608 	/* Now try to get class */
1609 	if (clid == 0) {
1610 		if (portid == TC_H_ROOT)
1611 			clid = qid;
1612 	} else
1613 		clid = TC_H_MAKE(qid, clid);
1614 
1615 	if (clid)
1616 		cl = cops->get(q, clid);
1617 
1618 	if (cl == 0) {
1619 		err = -ENOENT;
1620 		if (n->nlmsg_type != RTM_NEWTCLASS ||
1621 		    !(n->nlmsg_flags & NLM_F_CREATE))
1622 			goto out;
1623 	} else {
1624 		switch (n->nlmsg_type) {
1625 		case RTM_NEWTCLASS:
1626 			err = -EEXIST;
1627 			if (n->nlmsg_flags & NLM_F_EXCL)
1628 				goto out;
1629 			break;
1630 		case RTM_DELTCLASS:
1631 			err = -EOPNOTSUPP;
1632 			if (cops->delete)
1633 				err = cops->delete(q, cl);
1634 			if (err == 0)
1635 				tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
1636 			goto out;
1637 		case RTM_GETTCLASS:
1638 			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1639 			goto out;
1640 		default:
1641 			err = -EINVAL;
1642 			goto out;
1643 		}
1644 	}
1645 
1646 	new_cl = cl;
1647 	err = -EOPNOTSUPP;
1648 	if (cops->change)
1649 		err = cops->change(q, clid, portid, tca, &new_cl);
1650 	if (err == 0)
1651 		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1652 
1653 out:
1654 	if (cl)
1655 		cops->put(q, cl);
1656 
1657 	return err;
1658 }
1659 
1660 
1661 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1662 			  unsigned long cl,
1663 			  u32 portid, u32 seq, u16 flags, int event)
1664 {
1665 	struct tcmsg *tcm;
1666 	struct nlmsghdr  *nlh;
1667 	unsigned char *b = skb_tail_pointer(skb);
1668 	struct gnet_dump d;
1669 	const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1670 
1671 	cond_resched();
1672 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1673 	if (!nlh)
1674 		goto out_nlmsg_trim;
1675 	tcm = nlmsg_data(nlh);
1676 	tcm->tcm_family = AF_UNSPEC;
1677 	tcm->tcm__pad1 = 0;
1678 	tcm->tcm__pad2 = 0;
1679 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1680 	tcm->tcm_parent = q->handle;
1681 	tcm->tcm_handle = q->handle;
1682 	tcm->tcm_info = 0;
1683 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1684 		goto nla_put_failure;
1685 	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1686 		goto nla_put_failure;
1687 
1688 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1689 					 NULL, &d, TCA_PAD) < 0)
1690 		goto nla_put_failure;
1691 
1692 	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1693 		goto nla_put_failure;
1694 
1695 	if (gnet_stats_finish_copy(&d) < 0)
1696 		goto nla_put_failure;
1697 
1698 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1699 	return skb->len;
1700 
1701 out_nlmsg_trim:
1702 nla_put_failure:
1703 	nlmsg_trim(skb, b);
1704 	return -1;
1705 }
1706 
1707 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1708 			 struct nlmsghdr *n, struct Qdisc *q,
1709 			 unsigned long cl, int event)
1710 {
1711 	struct sk_buff *skb;
1712 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1713 
1714 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1715 	if (!skb)
1716 		return -ENOBUFS;
1717 
1718 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1719 		kfree_skb(skb);
1720 		return -EINVAL;
1721 	}
1722 
1723 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1724 			      n->nlmsg_flags & NLM_F_ECHO);
1725 }
1726 
1727 struct qdisc_dump_args {
1728 	struct qdisc_walker	w;
1729 	struct sk_buff		*skb;
1730 	struct netlink_callback	*cb;
1731 };
1732 
1733 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1734 {
1735 	struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1736 
1737 	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1738 			      a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1739 }
1740 
1741 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1742 				struct tcmsg *tcm, struct netlink_callback *cb,
1743 				int *t_p, int s_t)
1744 {
1745 	struct qdisc_dump_args arg;
1746 
1747 	if (tc_qdisc_dump_ignore(q) ||
1748 	    *t_p < s_t || !q->ops->cl_ops ||
1749 	    (tcm->tcm_parent &&
1750 	     TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1751 		(*t_p)++;
1752 		return 0;
1753 	}
1754 	if (*t_p > s_t)
1755 		memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1756 	arg.w.fn = qdisc_class_dump;
1757 	arg.skb = skb;
1758 	arg.cb = cb;
1759 	arg.w.stop  = 0;
1760 	arg.w.skip = cb->args[1];
1761 	arg.w.count = 0;
1762 	q->ops->cl_ops->walk(q, &arg.w);
1763 	cb->args[1] = arg.w.count;
1764 	if (arg.w.stop)
1765 		return -1;
1766 	(*t_p)++;
1767 	return 0;
1768 }
1769 
1770 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1771 			       struct tcmsg *tcm, struct netlink_callback *cb,
1772 			       int *t_p, int s_t)
1773 {
1774 	struct Qdisc *q;
1775 
1776 	if (!root)
1777 		return 0;
1778 
1779 	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1780 		return -1;
1781 
1782 	list_for_each_entry(q, &root->list, list) {
1783 		if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1784 			return -1;
1785 	}
1786 
1787 	return 0;
1788 }
1789 
1790 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1791 {
1792 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
1793 	struct net *net = sock_net(skb->sk);
1794 	struct netdev_queue *dev_queue;
1795 	struct net_device *dev;
1796 	int t, s_t;
1797 
1798 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1799 		return 0;
1800 	dev = dev_get_by_index(net, tcm->tcm_ifindex);
1801 	if (!dev)
1802 		return 0;
1803 
1804 	s_t = cb->args[0];
1805 	t = 0;
1806 
1807 	if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1808 		goto done;
1809 
1810 	dev_queue = dev_ingress_queue(dev);
1811 	if (dev_queue &&
1812 	    tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1813 				&t, s_t) < 0)
1814 		goto done;
1815 
1816 done:
1817 	cb->args[0] = t;
1818 
1819 	dev_put(dev);
1820 	return skb->len;
1821 }
1822 
1823 /* Main classifier routine: scans classifier chain attached
1824  * to this qdisc, (optionally) tests for protocol and asks
1825  * specific classifiers.
1826  */
1827 int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1828 		struct tcf_result *res, bool compat_mode)
1829 {
1830 	__be16 protocol = tc_skb_protocol(skb);
1831 #ifdef CONFIG_NET_CLS_ACT
1832 	const struct tcf_proto *old_tp = tp;
1833 	int limit = 0;
1834 
1835 reclassify:
1836 #endif
1837 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1838 		int err;
1839 
1840 		if (tp->protocol != protocol &&
1841 		    tp->protocol != htons(ETH_P_ALL))
1842 			continue;
1843 
1844 		err = tp->classify(skb, tp, res);
1845 #ifdef CONFIG_NET_CLS_ACT
1846 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode))
1847 			goto reset;
1848 #endif
1849 		if (err >= 0)
1850 			return err;
1851 	}
1852 
1853 	return TC_ACT_UNSPEC; /* signal: continue lookup */
1854 #ifdef CONFIG_NET_CLS_ACT
1855 reset:
1856 	if (unlikely(limit++ >= MAX_REC_LOOP)) {
1857 		net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
1858 				       tp->q->ops->id, tp->prio & 0xffff,
1859 				       ntohs(tp->protocol));
1860 		return TC_ACT_SHOT;
1861 	}
1862 
1863 	tp = old_tp;
1864 	protocol = tc_skb_protocol(skb);
1865 	goto reclassify;
1866 #endif
1867 }
1868 EXPORT_SYMBOL(tc_classify);
1869 
1870 bool tcf_destroy(struct tcf_proto *tp, bool force)
1871 {
1872 	if (tp->ops->destroy(tp, force)) {
1873 		module_put(tp->ops->owner);
1874 		kfree_rcu(tp, rcu);
1875 		return true;
1876 	}
1877 
1878 	return false;
1879 }
1880 
1881 void tcf_destroy_chain(struct tcf_proto __rcu **fl)
1882 {
1883 	struct tcf_proto *tp;
1884 
1885 	while ((tp = rtnl_dereference(*fl)) != NULL) {
1886 		RCU_INIT_POINTER(*fl, tp->next);
1887 		tcf_destroy(tp, true);
1888 	}
1889 }
1890 EXPORT_SYMBOL(tcf_destroy_chain);
1891 
1892 #ifdef CONFIG_PROC_FS
1893 static int psched_show(struct seq_file *seq, void *v)
1894 {
1895 	seq_printf(seq, "%08x %08x %08x %08x\n",
1896 		   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1897 		   1000000,
1898 		   (u32)NSEC_PER_SEC / hrtimer_resolution);
1899 
1900 	return 0;
1901 }
1902 
1903 static int psched_open(struct inode *inode, struct file *file)
1904 {
1905 	return single_open(file, psched_show, NULL);
1906 }
1907 
1908 static const struct file_operations psched_fops = {
1909 	.owner = THIS_MODULE,
1910 	.open = psched_open,
1911 	.read  = seq_read,
1912 	.llseek = seq_lseek,
1913 	.release = single_release,
1914 };
1915 
1916 static int __net_init psched_net_init(struct net *net)
1917 {
1918 	struct proc_dir_entry *e;
1919 
1920 	e = proc_create("psched", 0, net->proc_net, &psched_fops);
1921 	if (e == NULL)
1922 		return -ENOMEM;
1923 
1924 	return 0;
1925 }
1926 
1927 static void __net_exit psched_net_exit(struct net *net)
1928 {
1929 	remove_proc_entry("psched", net->proc_net);
1930 }
1931 #else
1932 static int __net_init psched_net_init(struct net *net)
1933 {
1934 	return 0;
1935 }
1936 
1937 static void __net_exit psched_net_exit(struct net *net)
1938 {
1939 }
1940 #endif
1941 
1942 static struct pernet_operations psched_net_ops = {
1943 	.init = psched_net_init,
1944 	.exit = psched_net_exit,
1945 };
1946 
1947 static int __init pktsched_init(void)
1948 {
1949 	int err;
1950 
1951 	err = register_pernet_subsys(&psched_net_ops);
1952 	if (err) {
1953 		pr_err("pktsched_init: "
1954 		       "cannot initialize per netns operations\n");
1955 		return err;
1956 	}
1957 
1958 	register_qdisc(&pfifo_fast_ops);
1959 	register_qdisc(&pfifo_qdisc_ops);
1960 	register_qdisc(&bfifo_qdisc_ops);
1961 	register_qdisc(&pfifo_head_drop_qdisc_ops);
1962 	register_qdisc(&mq_qdisc_ops);
1963 	register_qdisc(&noqueue_qdisc_ops);
1964 
1965 	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1966 	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1967 	rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1968 	rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1969 	rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1970 	rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
1971 
1972 	return 0;
1973 }
1974 
1975 subsys_initcall(pktsched_init);
1976