xref: /linux/net/sched/sch_api.c (revision ea8d7647f9ddf1f81e2027ed305299797299aa03)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/sch_api.c	Packet scheduler API.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Fixes:
8  *
9  * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
10  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
11  * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
12  */
13 
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/kmod.h>
24 #include <linux/list.h>
25 #include <linux/hrtimer.h>
26 #include <linux/slab.h>
27 #include <linux/hashtable.h>
28 
29 #include <net/netdev_lock.h>
30 #include <net/net_namespace.h>
31 #include <net/sock.h>
32 #include <net/netlink.h>
33 #include <net/pkt_sched.h>
34 #include <net/pkt_cls.h>
35 #include <net/tc_wrapper.h>
36 
37 #include <trace/events/qdisc.h>
38 
39 /*
40 
41    Short review.
42    -------------
43 
44    This file consists of two interrelated parts:
45 
46    1. queueing disciplines manager frontend.
47    2. traffic classes manager frontend.
48 
49    Generally, queueing discipline ("qdisc") is a black box,
50    which is able to enqueue packets and to dequeue them (when
51    device is ready to send something) in order and at times
52    determined by algorithm hidden in it.
53 
54    qdisc's are divided to two categories:
55    - "queues", which have no internal structure visible from outside.
56    - "schedulers", which split all the packets to "traffic classes",
57      using "packet classifiers" (look at cls_api.c)
58 
59    In turn, classes may have child qdiscs (as rule, queues)
60    attached to them etc. etc. etc.
61 
62    The goal of the routines in this file is to translate
63    information supplied by user in the form of handles
64    to more intelligible for kernel form, to make some sanity
65    checks and part of work, which is common to all qdiscs
66    and to provide rtnetlink notifications.
67 
68    All real intelligent work is done inside qdisc modules.
69 
70 
71 
72    Every discipline has two major routines: enqueue and dequeue.
73 
74    ---dequeue
75 
76    dequeue usually returns a skb to send. It is allowed to return NULL,
77    but it does not mean that queue is empty, it just means that
78    discipline does not want to send anything this time.
79    Queue is really empty if q->q.qlen == 0.
80    For complicated disciplines with multiple queues q->q is not
81    real packet queue, but however q->q.qlen must be valid.
82 
83    ---enqueue
84 
85    enqueue returns 0, if packet was enqueued successfully.
86    If packet (this one or another one) was dropped, it returns
87    not zero error code.
88    NET_XMIT_DROP 	- this packet dropped
89      Expected action: do not backoff, but wait until queue will clear.
90    NET_XMIT_CN	 	- probably this packet enqueued, but another one dropped.
91      Expected action: backoff or ignore
92 
93    Auxiliary routines:
94 
95    ---peek
96 
97    like dequeue but without removing a packet from the queue
98 
99    ---reset
100 
101    returns qdisc to initial state: purge all buffers, clear all
102    timers, counters (except for statistics) etc.
103 
104    ---init
105 
106    initializes newly created qdisc.
107 
108    ---destroy
109 
110    destroys resources allocated by init and during lifetime of qdisc.
111 
112    ---change
113 
114    changes qdisc parameters.
115  */
116 
117 /* Protects list of registered TC modules. It is pure SMP lock. */
118 static DEFINE_RWLOCK(qdisc_mod_lock);
119 
120 
121 /************************************************
122  *	Queueing disciplines manipulation.	*
123  ************************************************/
124 
125 
126 /* The list of all installed queueing disciplines. */
127 
128 static struct Qdisc_ops *qdisc_base;
129 
130 /* Register/unregister queueing discipline */
131 
132 int register_qdisc(struct Qdisc_ops *qops)
133 {
134 	struct Qdisc_ops *q, **qp;
135 	int rc = -EEXIST;
136 
137 	write_lock(&qdisc_mod_lock);
138 	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
139 		if (!strcmp(qops->id, q->id))
140 			goto out;
141 
142 	if (qops->enqueue == NULL)
143 		qops->enqueue = noop_qdisc_ops.enqueue;
144 	if (qops->peek == NULL) {
145 		if (qops->dequeue == NULL)
146 			qops->peek = noop_qdisc_ops.peek;
147 		else
148 			goto out_einval;
149 	}
150 	if (qops->dequeue == NULL)
151 		qops->dequeue = noop_qdisc_ops.dequeue;
152 
153 	if (qops->cl_ops) {
154 		const struct Qdisc_class_ops *cops = qops->cl_ops;
155 
156 		if (!(cops->find && cops->walk && cops->leaf))
157 			goto out_einval;
158 
159 		if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
160 			goto out_einval;
161 	}
162 
163 	qops->next = NULL;
164 	*qp = qops;
165 	rc = 0;
166 out:
167 	write_unlock(&qdisc_mod_lock);
168 	return rc;
169 
170 out_einval:
171 	rc = -EINVAL;
172 	goto out;
173 }
174 EXPORT_SYMBOL(register_qdisc);
175 
176 void unregister_qdisc(struct Qdisc_ops *qops)
177 {
178 	struct Qdisc_ops *q, **qp;
179 	int err = -ENOENT;
180 
181 	write_lock(&qdisc_mod_lock);
182 	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
183 		if (q == qops)
184 			break;
185 	if (q) {
186 		*qp = q->next;
187 		q->next = NULL;
188 		err = 0;
189 	}
190 	write_unlock(&qdisc_mod_lock);
191 
192 	WARN(err, "unregister qdisc(%s) failed\n", qops->id);
193 }
194 EXPORT_SYMBOL(unregister_qdisc);
195 
196 /* Get default qdisc if not otherwise specified */
197 void qdisc_get_default(char *name, size_t len)
198 {
199 	read_lock(&qdisc_mod_lock);
200 	strscpy(name, default_qdisc_ops->id, len);
201 	read_unlock(&qdisc_mod_lock);
202 }
203 
204 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
205 {
206 	struct Qdisc_ops *q = NULL;
207 
208 	for (q = qdisc_base; q; q = q->next) {
209 		if (!strcmp(name, q->id)) {
210 			if (!try_module_get(q->owner))
211 				q = NULL;
212 			break;
213 		}
214 	}
215 
216 	return q;
217 }
218 
219 /* Set new default qdisc to use */
220 int qdisc_set_default(const char *name)
221 {
222 	const struct Qdisc_ops *ops;
223 
224 	if (!capable(CAP_NET_ADMIN))
225 		return -EPERM;
226 
227 	write_lock(&qdisc_mod_lock);
228 	ops = qdisc_lookup_default(name);
229 	if (!ops) {
230 		/* Not found, drop lock and try to load module */
231 		write_unlock(&qdisc_mod_lock);
232 		request_module(NET_SCH_ALIAS_PREFIX "%s", name);
233 		write_lock(&qdisc_mod_lock);
234 
235 		ops = qdisc_lookup_default(name);
236 	}
237 
238 	if (ops) {
239 		/* Set new default */
240 		module_put(default_qdisc_ops->owner);
241 		default_qdisc_ops = ops;
242 	}
243 	write_unlock(&qdisc_mod_lock);
244 
245 	return ops ? 0 : -ENOENT;
246 }
247 
248 #ifdef CONFIG_NET_SCH_DEFAULT
249 /* Set default value from kernel config */
250 static int __init sch_default_qdisc(void)
251 {
252 	return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
253 }
254 late_initcall(sch_default_qdisc);
255 #endif
256 
257 /* We know handle. Find qdisc among all qdisc's attached to device
258  * (root qdisc, all its children, children of children etc.)
259  * Note: caller either uses rtnl or rcu_read_lock()
260  */
261 
262 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
263 {
264 	struct Qdisc *q;
265 
266 	if (!qdisc_dev(root))
267 		return (root->handle == handle ? root : NULL);
268 
269 	if (!(root->flags & TCQ_F_BUILTIN) &&
270 	    root->handle == handle)
271 		return root;
272 
273 	hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
274 				   lockdep_rtnl_is_held()) {
275 		if (q->handle == handle)
276 			return q;
277 	}
278 	return NULL;
279 }
280 
281 void qdisc_hash_add(struct Qdisc *q, bool invisible)
282 {
283 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
284 		ASSERT_RTNL();
285 		hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
286 		if (invisible)
287 			q->flags |= TCQ_F_INVISIBLE;
288 	}
289 }
290 EXPORT_SYMBOL(qdisc_hash_add);
291 
292 void qdisc_hash_del(struct Qdisc *q)
293 {
294 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
295 		ASSERT_RTNL();
296 		hash_del_rcu(&q->hash);
297 	}
298 }
299 EXPORT_SYMBOL(qdisc_hash_del);
300 
301 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
302 {
303 	struct Qdisc *q;
304 
305 	if (!handle)
306 		return NULL;
307 	q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
308 	if (q)
309 		goto out;
310 
311 	if (dev_ingress_queue(dev))
312 		q = qdisc_match_from_root(
313 			rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping),
314 			handle);
315 out:
316 	return q;
317 }
318 
319 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
320 {
321 	struct netdev_queue *nq;
322 	struct Qdisc *q;
323 
324 	if (!handle)
325 		return NULL;
326 	q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
327 	if (q)
328 		goto out;
329 
330 	nq = dev_ingress_queue_rcu(dev);
331 	if (nq)
332 		q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping),
333 					  handle);
334 out:
335 	return q;
336 }
337 
338 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
339 {
340 	unsigned long cl;
341 	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
342 
343 	if (cops == NULL)
344 		return NULL;
345 	cl = cops->find(p, classid);
346 
347 	if (cl == 0)
348 		return NULL;
349 	return cops->leaf(p, cl);
350 }
351 
352 /* Find queueing discipline by name */
353 
354 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
355 {
356 	struct Qdisc_ops *q = NULL;
357 
358 	if (kind) {
359 		read_lock(&qdisc_mod_lock);
360 		for (q = qdisc_base; q; q = q->next) {
361 			if (nla_strcmp(kind, q->id) == 0) {
362 				if (!try_module_get(q->owner))
363 					q = NULL;
364 				break;
365 			}
366 		}
367 		read_unlock(&qdisc_mod_lock);
368 	}
369 	return q;
370 }
371 
372 /* The linklayer setting were not transferred from iproute2, in older
373  * versions, and the rate tables lookup systems have been dropped in
374  * the kernel. To keep backward compatible with older iproute2 tc
375  * utils, we detect the linklayer setting by detecting if the rate
376  * table were modified.
377  *
378  * For linklayer ATM table entries, the rate table will be aligned to
379  * 48 bytes, thus some table entries will contain the same value.  The
380  * mpu (min packet unit) is also encoded into the old rate table, thus
381  * starting from the mpu, we find low and high table entries for
382  * mapping this cell.  If these entries contain the same value, when
383  * the rate tables have been modified for linklayer ATM.
384  *
385  * This is done by rounding mpu to the nearest 48 bytes cell/entry,
386  * and then roundup to the next cell, calc the table entry one below,
387  * and compare.
388  */
389 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
390 {
391 	int low       = roundup(r->mpu, 48);
392 	int high      = roundup(low+1, 48);
393 	int cell_low  = low >> r->cell_log;
394 	int cell_high = (high >> r->cell_log) - 1;
395 
396 	/* rtab is too inaccurate at rates > 100Mbit/s */
397 	if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
398 		pr_debug("TC linklayer: Giving up ATM detection\n");
399 		return TC_LINKLAYER_ETHERNET;
400 	}
401 
402 	if ((cell_high > cell_low) && (cell_high < 256)
403 	    && (rtab[cell_low] == rtab[cell_high])) {
404 		pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
405 			 cell_low, cell_high, rtab[cell_high]);
406 		return TC_LINKLAYER_ATM;
407 	}
408 	return TC_LINKLAYER_ETHERNET;
409 }
410 
411 static struct qdisc_rate_table *qdisc_rtab_list;
412 
413 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
414 					struct nlattr *tab,
415 					struct netlink_ext_ack *extack)
416 {
417 	struct qdisc_rate_table *rtab;
418 
419 	if (tab == NULL || r->rate == 0 ||
420 	    r->cell_log == 0 || r->cell_log >= 32 ||
421 	    nla_len(tab) != TC_RTAB_SIZE) {
422 		NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
423 		return NULL;
424 	}
425 
426 	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
427 		if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
428 		    !memcmp(&rtab->data, nla_data(tab), 1024)) {
429 			rtab->refcnt++;
430 			return rtab;
431 		}
432 	}
433 
434 	rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
435 	if (rtab) {
436 		rtab->rate = *r;
437 		rtab->refcnt = 1;
438 		memcpy(rtab->data, nla_data(tab), 1024);
439 		if (r->linklayer == TC_LINKLAYER_UNAWARE)
440 			r->linklayer = __detect_linklayer(r, rtab->data);
441 		rtab->next = qdisc_rtab_list;
442 		qdisc_rtab_list = rtab;
443 	} else {
444 		NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
445 	}
446 	return rtab;
447 }
448 EXPORT_SYMBOL(qdisc_get_rtab);
449 
450 void qdisc_put_rtab(struct qdisc_rate_table *tab)
451 {
452 	struct qdisc_rate_table *rtab, **rtabp;
453 
454 	if (!tab || --tab->refcnt)
455 		return;
456 
457 	for (rtabp = &qdisc_rtab_list;
458 	     (rtab = *rtabp) != NULL;
459 	     rtabp = &rtab->next) {
460 		if (rtab == tab) {
461 			*rtabp = rtab->next;
462 			kfree(rtab);
463 			return;
464 		}
465 	}
466 }
467 EXPORT_SYMBOL(qdisc_put_rtab);
468 
469 static LIST_HEAD(qdisc_stab_list);
470 
471 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
472 	[TCA_STAB_BASE]	= { .len = sizeof(struct tc_sizespec) },
473 	[TCA_STAB_DATA] = { .type = NLA_BINARY },
474 };
475 
476 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
477 					       struct netlink_ext_ack *extack)
478 {
479 	struct nlattr *tb[TCA_STAB_MAX + 1];
480 	struct qdisc_size_table *stab;
481 	struct tc_sizespec *s;
482 	unsigned int tsize = 0;
483 	u16 *tab = NULL;
484 	int err;
485 
486 	err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
487 					  extack);
488 	if (err < 0)
489 		return ERR_PTR(err);
490 	if (!tb[TCA_STAB_BASE]) {
491 		NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
492 		return ERR_PTR(-EINVAL);
493 	}
494 
495 	s = nla_data(tb[TCA_STAB_BASE]);
496 
497 	if (s->tsize > 0) {
498 		if (!tb[TCA_STAB_DATA]) {
499 			NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
500 			return ERR_PTR(-EINVAL);
501 		}
502 		tab = nla_data(tb[TCA_STAB_DATA]);
503 		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
504 	}
505 
506 	if (tsize != s->tsize || (!tab && tsize > 0)) {
507 		NL_SET_ERR_MSG(extack, "Invalid size of size table");
508 		return ERR_PTR(-EINVAL);
509 	}
510 
511 	list_for_each_entry(stab, &qdisc_stab_list, list) {
512 		if (memcmp(&stab->szopts, s, sizeof(*s)))
513 			continue;
514 		if (tsize > 0 &&
515 		    memcmp(stab->data, tab, flex_array_size(stab, data, tsize)))
516 			continue;
517 		stab->refcnt++;
518 		return stab;
519 	}
520 
521 	if (s->size_log > STAB_SIZE_LOG_MAX ||
522 	    s->cell_log > STAB_SIZE_LOG_MAX) {
523 		NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
524 		return ERR_PTR(-EINVAL);
525 	}
526 
527 	stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL);
528 	if (!stab)
529 		return ERR_PTR(-ENOMEM);
530 
531 	stab->refcnt = 1;
532 	stab->szopts = *s;
533 	if (tsize > 0)
534 		memcpy(stab->data, tab, flex_array_size(stab, data, tsize));
535 
536 	list_add_tail(&stab->list, &qdisc_stab_list);
537 
538 	return stab;
539 }
540 
541 void qdisc_put_stab(struct qdisc_size_table *tab)
542 {
543 	if (!tab)
544 		return;
545 
546 	if (--tab->refcnt == 0) {
547 		list_del(&tab->list);
548 		kfree_rcu(tab, rcu);
549 	}
550 }
551 EXPORT_SYMBOL(qdisc_put_stab);
552 
553 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
554 {
555 	struct nlattr *nest;
556 
557 	nest = nla_nest_start_noflag(skb, TCA_STAB);
558 	if (nest == NULL)
559 		goto nla_put_failure;
560 	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
561 		goto nla_put_failure;
562 	nla_nest_end(skb, nest);
563 
564 	return skb->len;
565 
566 nla_put_failure:
567 	return -1;
568 }
569 
570 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
571 			       const struct qdisc_size_table *stab)
572 {
573 	int pkt_len, slot;
574 
575 	pkt_len = skb->len + stab->szopts.overhead;
576 	if (unlikely(!stab->szopts.tsize))
577 		goto out;
578 
579 	slot = pkt_len + stab->szopts.cell_align;
580 	if (unlikely(slot < 0))
581 		slot = 0;
582 
583 	slot >>= stab->szopts.cell_log;
584 	if (likely(slot < stab->szopts.tsize))
585 		pkt_len = stab->data[slot];
586 	else
587 		pkt_len = stab->data[stab->szopts.tsize - 1] *
588 				(slot / stab->szopts.tsize) +
589 				stab->data[slot % stab->szopts.tsize];
590 
591 	pkt_len <<= stab->szopts.size_log;
592 out:
593 	if (unlikely(pkt_len < 1))
594 		pkt_len = 1;
595 	qdisc_skb_cb(skb)->pkt_len = pkt_len;
596 }
597 
598 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
599 {
600 	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
601 		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
602 			txt, qdisc->ops->id, qdisc->handle >> 16);
603 		qdisc->flags |= TCQ_F_WARN_NONWC;
604 	}
605 }
606 EXPORT_SYMBOL(qdisc_warn_nonwc);
607 
608 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
609 {
610 	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
611 						 timer);
612 
613 	rcu_read_lock();
614 	__netif_schedule(qdisc_root(wd->qdisc));
615 	rcu_read_unlock();
616 
617 	return HRTIMER_NORESTART;
618 }
619 
620 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
621 				 clockid_t clockid)
622 {
623 	hrtimer_setup(&wd->timer, qdisc_watchdog, clockid, HRTIMER_MODE_ABS_PINNED);
624 	wd->qdisc = qdisc;
625 }
626 EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
627 
628 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
629 {
630 	qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
631 }
632 EXPORT_SYMBOL(qdisc_watchdog_init);
633 
634 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
635 				      u64 delta_ns)
636 {
637 	bool deactivated;
638 
639 	rcu_read_lock();
640 	deactivated = test_bit(__QDISC_STATE_DEACTIVATED,
641 			       &qdisc_root_sleeping(wd->qdisc)->state);
642 	rcu_read_unlock();
643 	if (deactivated)
644 		return;
645 
646 	if (hrtimer_is_queued(&wd->timer)) {
647 		u64 softexpires;
648 
649 		softexpires = ktime_to_ns(hrtimer_get_softexpires(&wd->timer));
650 		/* If timer is already set in [expires, expires + delta_ns],
651 		 * do not reprogram it.
652 		 */
653 		if (softexpires - expires <= delta_ns)
654 			return;
655 	}
656 
657 	hrtimer_start_range_ns(&wd->timer,
658 			       ns_to_ktime(expires),
659 			       delta_ns,
660 			       HRTIMER_MODE_ABS_PINNED);
661 }
662 EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
663 
664 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
665 {
666 	hrtimer_cancel(&wd->timer);
667 }
668 EXPORT_SYMBOL(qdisc_watchdog_cancel);
669 
670 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
671 {
672 	struct hlist_head *h;
673 	unsigned int i;
674 
675 	h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
676 
677 	if (h != NULL) {
678 		for (i = 0; i < n; i++)
679 			INIT_HLIST_HEAD(&h[i]);
680 	}
681 	return h;
682 }
683 
684 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
685 {
686 	struct Qdisc_class_common *cl;
687 	struct hlist_node *next;
688 	struct hlist_head *nhash, *ohash;
689 	unsigned int nsize, nmask, osize;
690 	unsigned int i, h;
691 
692 	/* Rehash when load factor exceeds 0.75 */
693 	if (clhash->hashelems * 4 <= clhash->hashsize * 3)
694 		return;
695 	nsize = clhash->hashsize * 2;
696 	nmask = nsize - 1;
697 	nhash = qdisc_class_hash_alloc(nsize);
698 	if (nhash == NULL)
699 		return;
700 
701 	ohash = clhash->hash;
702 	osize = clhash->hashsize;
703 
704 	sch_tree_lock(sch);
705 	for (i = 0; i < osize; i++) {
706 		hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
707 			h = qdisc_class_hash(cl->classid, nmask);
708 			hlist_add_head(&cl->hnode, &nhash[h]);
709 		}
710 	}
711 	clhash->hash     = nhash;
712 	clhash->hashsize = nsize;
713 	clhash->hashmask = nmask;
714 	sch_tree_unlock(sch);
715 
716 	kvfree(ohash);
717 }
718 EXPORT_SYMBOL(qdisc_class_hash_grow);
719 
720 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
721 {
722 	unsigned int size = 4;
723 
724 	clhash->hash = qdisc_class_hash_alloc(size);
725 	if (!clhash->hash)
726 		return -ENOMEM;
727 	clhash->hashsize  = size;
728 	clhash->hashmask  = size - 1;
729 	clhash->hashelems = 0;
730 	return 0;
731 }
732 EXPORT_SYMBOL(qdisc_class_hash_init);
733 
734 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
735 {
736 	kvfree(clhash->hash);
737 }
738 EXPORT_SYMBOL(qdisc_class_hash_destroy);
739 
740 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
741 			     struct Qdisc_class_common *cl)
742 {
743 	unsigned int h;
744 
745 	INIT_HLIST_NODE(&cl->hnode);
746 	h = qdisc_class_hash(cl->classid, clhash->hashmask);
747 	hlist_add_head(&cl->hnode, &clhash->hash[h]);
748 	clhash->hashelems++;
749 }
750 EXPORT_SYMBOL(qdisc_class_hash_insert);
751 
752 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
753 			     struct Qdisc_class_common *cl)
754 {
755 	hlist_del(&cl->hnode);
756 	clhash->hashelems--;
757 }
758 EXPORT_SYMBOL(qdisc_class_hash_remove);
759 
760 /* Allocate an unique handle from space managed by kernel
761  * Possible range is [8000-FFFF]:0000 (0x8000 values)
762  */
763 static u32 qdisc_alloc_handle(struct net_device *dev)
764 {
765 	int i = 0x8000;
766 	static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
767 
768 	do {
769 		autohandle += TC_H_MAKE(0x10000U, 0);
770 		if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
771 			autohandle = TC_H_MAKE(0x80000000U, 0);
772 		if (!qdisc_lookup(dev, autohandle))
773 			return autohandle;
774 		cond_resched();
775 	} while	(--i > 0);
776 
777 	return 0;
778 }
779 
780 void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
781 {
782 	bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
783 	const struct Qdisc_class_ops *cops;
784 	unsigned long cl;
785 	u32 parentid;
786 	bool notify;
787 	int drops;
788 
789 	if (n == 0 && len == 0)
790 		return;
791 	drops = max_t(int, n, 0);
792 	rcu_read_lock();
793 	while ((parentid = sch->parent)) {
794 		if (parentid == TC_H_ROOT)
795 			break;
796 
797 		if (sch->flags & TCQ_F_NOPARENT)
798 			break;
799 		/* Notify parent qdisc only if child qdisc becomes empty.
800 		 *
801 		 * If child was empty even before update then backlog
802 		 * counter is screwed and we skip notification because
803 		 * parent class is already passive.
804 		 *
805 		 * If the original child was offloaded then it is allowed
806 		 * to be seem as empty, so the parent is notified anyway.
807 		 */
808 		notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
809 						       !qdisc_is_offloaded);
810 		/* TODO: perform the search on a per txq basis */
811 		sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
812 		if (sch == NULL) {
813 			WARN_ON_ONCE(parentid != TC_H_ROOT);
814 			break;
815 		}
816 		cops = sch->ops->cl_ops;
817 		if (notify && cops->qlen_notify) {
818 			cl = cops->find(sch, parentid);
819 			cops->qlen_notify(sch, cl);
820 		}
821 		sch->q.qlen -= n;
822 		sch->qstats.backlog -= len;
823 		__qdisc_qstats_drop(sch, drops);
824 	}
825 	rcu_read_unlock();
826 }
827 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
828 
829 int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
830 			      void *type_data)
831 {
832 	struct net_device *dev = qdisc_dev(sch);
833 	int err;
834 
835 	sch->flags &= ~TCQ_F_OFFLOADED;
836 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
837 		return 0;
838 
839 	err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
840 	if (err == -EOPNOTSUPP)
841 		return 0;
842 
843 	if (!err)
844 		sch->flags |= TCQ_F_OFFLOADED;
845 
846 	return err;
847 }
848 EXPORT_SYMBOL(qdisc_offload_dump_helper);
849 
850 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
851 				struct Qdisc *new, struct Qdisc *old,
852 				enum tc_setup_type type, void *type_data,
853 				struct netlink_ext_ack *extack)
854 {
855 	bool any_qdisc_is_offloaded;
856 	int err;
857 
858 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
859 		return;
860 
861 	err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
862 
863 	/* Don't report error if the graft is part of destroy operation. */
864 	if (!err || !new || new == &noop_qdisc)
865 		return;
866 
867 	/* Don't report error if the parent, the old child and the new
868 	 * one are not offloaded.
869 	 */
870 	any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
871 	any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
872 	any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
873 
874 	if (any_qdisc_is_offloaded)
875 		NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
876 }
877 EXPORT_SYMBOL(qdisc_offload_graft_helper);
878 
879 void qdisc_offload_query_caps(struct net_device *dev,
880 			      enum tc_setup_type type,
881 			      void *caps, size_t caps_len)
882 {
883 	const struct net_device_ops *ops = dev->netdev_ops;
884 	struct tc_query_caps_base base = {
885 		.type = type,
886 		.caps = caps,
887 	};
888 
889 	memset(caps, 0, caps_len);
890 
891 	if (ops->ndo_setup_tc)
892 		ops->ndo_setup_tc(dev, TC_QUERY_CAPS, &base);
893 }
894 EXPORT_SYMBOL(qdisc_offload_query_caps);
895 
896 static void qdisc_offload_graft_root(struct net_device *dev,
897 				     struct Qdisc *new, struct Qdisc *old,
898 				     struct netlink_ext_ack *extack)
899 {
900 	struct tc_root_qopt_offload graft_offload = {
901 		.command	= TC_ROOT_GRAFT,
902 		.handle		= new ? new->handle : 0,
903 		.ingress	= (new && new->flags & TCQ_F_INGRESS) ||
904 				  (old && old->flags & TCQ_F_INGRESS),
905 	};
906 
907 	qdisc_offload_graft_helper(dev, NULL, new, old,
908 				   TC_SETUP_ROOT_QDISC, &graft_offload, extack);
909 }
910 
911 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
912 			 u32 portid, u32 seq, u16 flags, int event,
913 			 struct netlink_ext_ack *extack)
914 {
915 	struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
916 	struct gnet_stats_queue __percpu *cpu_qstats = NULL;
917 	struct tcmsg *tcm;
918 	struct nlmsghdr  *nlh;
919 	unsigned char *b = skb_tail_pointer(skb);
920 	struct gnet_dump d;
921 	struct qdisc_size_table *stab;
922 	u32 block_index;
923 	__u32 qlen;
924 
925 	cond_resched();
926 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
927 	if (!nlh)
928 		goto out_nlmsg_trim;
929 	tcm = nlmsg_data(nlh);
930 	tcm->tcm_family = AF_UNSPEC;
931 	tcm->tcm__pad1 = 0;
932 	tcm->tcm__pad2 = 0;
933 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
934 	tcm->tcm_parent = clid;
935 	tcm->tcm_handle = q->handle;
936 	tcm->tcm_info = refcount_read(&q->refcnt);
937 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
938 		goto nla_put_failure;
939 	if (q->ops->ingress_block_get) {
940 		block_index = q->ops->ingress_block_get(q);
941 		if (block_index &&
942 		    nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
943 			goto nla_put_failure;
944 	}
945 	if (q->ops->egress_block_get) {
946 		block_index = q->ops->egress_block_get(q);
947 		if (block_index &&
948 		    nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
949 			goto nla_put_failure;
950 	}
951 	if (q->ops->dump && q->ops->dump(q, skb) < 0)
952 		goto nla_put_failure;
953 	if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
954 		goto nla_put_failure;
955 	qlen = qdisc_qlen_sum(q);
956 
957 	stab = rtnl_dereference(q->stab);
958 	if (stab && qdisc_dump_stab(skb, stab) < 0)
959 		goto nla_put_failure;
960 
961 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
962 					 NULL, &d, TCA_PAD) < 0)
963 		goto nla_put_failure;
964 
965 	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
966 		goto nla_put_failure;
967 
968 	if (qdisc_is_percpu_stats(q)) {
969 		cpu_bstats = q->cpu_bstats;
970 		cpu_qstats = q->cpu_qstats;
971 	}
972 
973 	if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 ||
974 	    gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
975 	    gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
976 		goto nla_put_failure;
977 
978 	if (gnet_stats_finish_copy(&d) < 0)
979 		goto nla_put_failure;
980 
981 	if (extack && extack->_msg &&
982 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
983 		goto out_nlmsg_trim;
984 
985 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
986 
987 	return skb->len;
988 
989 out_nlmsg_trim:
990 nla_put_failure:
991 	nlmsg_trim(skb, b);
992 	return -1;
993 }
994 
995 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
996 {
997 	if (q->flags & TCQ_F_BUILTIN)
998 		return true;
999 	if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
1000 		return true;
1001 
1002 	return false;
1003 }
1004 
1005 static int qdisc_get_notify(struct net *net, struct sk_buff *oskb,
1006 			    struct nlmsghdr *n, u32 clid, struct Qdisc *q,
1007 			    struct netlink_ext_ack *extack)
1008 {
1009 	struct sk_buff *skb;
1010 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1011 
1012 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1013 	if (!skb)
1014 		return -ENOBUFS;
1015 
1016 	if (!tc_qdisc_dump_ignore(q, false)) {
1017 		if (tc_fill_qdisc(skb, q, clid, portid, n->nlmsg_seq, 0,
1018 				  RTM_NEWQDISC, extack) < 0)
1019 			goto err_out;
1020 	}
1021 
1022 	if (skb->len)
1023 		return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1024 				      n->nlmsg_flags & NLM_F_ECHO);
1025 
1026 err_out:
1027 	kfree_skb(skb);
1028 	return -EINVAL;
1029 }
1030 
1031 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1032 			struct nlmsghdr *n, u32 clid,
1033 			struct Qdisc *old, struct Qdisc *new,
1034 			struct netlink_ext_ack *extack)
1035 {
1036 	struct sk_buff *skb;
1037 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1038 
1039 	if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
1040 		return 0;
1041 
1042 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1043 	if (!skb)
1044 		return -ENOBUFS;
1045 
1046 	if (old && !tc_qdisc_dump_ignore(old, false)) {
1047 		if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1048 				  0, RTM_DELQDISC, extack) < 0)
1049 			goto err_out;
1050 	}
1051 	if (new && !tc_qdisc_dump_ignore(new, false)) {
1052 		if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1053 				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC, extack) < 0)
1054 			goto err_out;
1055 	}
1056 
1057 	if (skb->len)
1058 		return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1059 				      n->nlmsg_flags & NLM_F_ECHO);
1060 
1061 err_out:
1062 	kfree_skb(skb);
1063 	return -EINVAL;
1064 }
1065 
1066 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
1067 			       struct nlmsghdr *n, u32 clid,
1068 			       struct Qdisc *old, struct Qdisc *new,
1069 			       struct netlink_ext_ack *extack)
1070 {
1071 	if (new || old)
1072 		qdisc_notify(net, skb, n, clid, old, new, extack);
1073 
1074 	if (old)
1075 		qdisc_put(old);
1076 }
1077 
1078 static void qdisc_clear_nolock(struct Qdisc *sch)
1079 {
1080 	sch->flags &= ~TCQ_F_NOLOCK;
1081 	if (!(sch->flags & TCQ_F_CPUSTATS))
1082 		return;
1083 
1084 	free_percpu(sch->cpu_bstats);
1085 	free_percpu(sch->cpu_qstats);
1086 	sch->cpu_bstats = NULL;
1087 	sch->cpu_qstats = NULL;
1088 	sch->flags &= ~TCQ_F_CPUSTATS;
1089 }
1090 
1091 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
1092  * to device "dev".
1093  *
1094  * When appropriate send a netlink notification using 'skb'
1095  * and "n".
1096  *
1097  * On success, destroy old qdisc.
1098  */
1099 
1100 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1101 		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1102 		       struct Qdisc *new, struct Qdisc *old,
1103 		       struct netlink_ext_ack *extack)
1104 {
1105 	struct Qdisc *q = old;
1106 	struct net *net = dev_net(dev);
1107 
1108 	if (parent == NULL) {
1109 		unsigned int i, num_q, ingress;
1110 		struct netdev_queue *dev_queue;
1111 
1112 		ingress = 0;
1113 		num_q = dev->num_tx_queues;
1114 		if ((q && q->flags & TCQ_F_INGRESS) ||
1115 		    (new && new->flags & TCQ_F_INGRESS)) {
1116 			ingress = 1;
1117 			dev_queue = dev_ingress_queue(dev);
1118 			if (!dev_queue) {
1119 				NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1120 				return -ENOENT;
1121 			}
1122 
1123 			q = rtnl_dereference(dev_queue->qdisc_sleeping);
1124 
1125 			/* This is the counterpart of that qdisc_refcount_inc_nz() call in
1126 			 * __tcf_qdisc_find() for filter requests.
1127 			 */
1128 			if (!qdisc_refcount_dec_if_one(q)) {
1129 				NL_SET_ERR_MSG(extack,
1130 					       "Current ingress or clsact Qdisc has ongoing filter requests");
1131 				return -EBUSY;
1132 			}
1133 		}
1134 
1135 		if (dev->flags & IFF_UP)
1136 			dev_deactivate(dev);
1137 
1138 		qdisc_offload_graft_root(dev, new, old, extack);
1139 
1140 		if (new && new->ops->attach && !ingress)
1141 			goto skip;
1142 
1143 		if (!ingress) {
1144 			for (i = 0; i < num_q; i++) {
1145 				dev_queue = netdev_get_tx_queue(dev, i);
1146 				old = dev_graft_qdisc(dev_queue, new);
1147 
1148 				if (new && i > 0)
1149 					qdisc_refcount_inc(new);
1150 				qdisc_put(old);
1151 			}
1152 		} else {
1153 			old = dev_graft_qdisc(dev_queue, NULL);
1154 
1155 			/* {ingress,clsact}_destroy() @old before grafting @new to avoid
1156 			 * unprotected concurrent accesses to net_device::miniq_{in,e}gress
1157 			 * pointer(s) in mini_qdisc_pair_swap().
1158 			 */
1159 			qdisc_notify(net, skb, n, classid, old, new, extack);
1160 			qdisc_destroy(old);
1161 
1162 			dev_graft_qdisc(dev_queue, new);
1163 		}
1164 
1165 skip:
1166 		if (!ingress) {
1167 			old = rtnl_dereference(dev->qdisc);
1168 			if (new && !new->ops->attach)
1169 				qdisc_refcount_inc(new);
1170 			rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
1171 
1172 			notify_and_destroy(net, skb, n, classid, old, new, extack);
1173 
1174 			if (new && new->ops->attach)
1175 				new->ops->attach(new);
1176 		}
1177 
1178 		if (dev->flags & IFF_UP)
1179 			dev_activate(dev);
1180 	} else {
1181 		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1182 		unsigned long cl;
1183 		int err;
1184 
1185 		/* Only support running class lockless if parent is lockless */
1186 		if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
1187 			qdisc_clear_nolock(new);
1188 
1189 		if (!cops || !cops->graft)
1190 			return -EOPNOTSUPP;
1191 
1192 		cl = cops->find(parent, classid);
1193 		if (!cl) {
1194 			NL_SET_ERR_MSG(extack, "Specified class not found");
1195 			return -ENOENT;
1196 		}
1197 
1198 		if (new && new->ops == &noqueue_qdisc_ops) {
1199 			NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
1200 			return -EINVAL;
1201 		}
1202 
1203 		if (new &&
1204 		    !(parent->flags & TCQ_F_MQROOT) &&
1205 		    rcu_access_pointer(new->stab)) {
1206 			NL_SET_ERR_MSG(extack, "STAB not supported on a non root");
1207 			return -EINVAL;
1208 		}
1209 		err = cops->graft(parent, cl, new, &old, extack);
1210 		if (err)
1211 			return err;
1212 		notify_and_destroy(net, skb, n, classid, old, new, extack);
1213 	}
1214 	return 0;
1215 }
1216 
1217 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1218 				   struct netlink_ext_ack *extack)
1219 {
1220 	u32 block_index;
1221 
1222 	if (tca[TCA_INGRESS_BLOCK]) {
1223 		block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1224 
1225 		if (!block_index) {
1226 			NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1227 			return -EINVAL;
1228 		}
1229 		if (!sch->ops->ingress_block_set) {
1230 			NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1231 			return -EOPNOTSUPP;
1232 		}
1233 		sch->ops->ingress_block_set(sch, block_index);
1234 	}
1235 	if (tca[TCA_EGRESS_BLOCK]) {
1236 		block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1237 
1238 		if (!block_index) {
1239 			NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1240 			return -EINVAL;
1241 		}
1242 		if (!sch->ops->egress_block_set) {
1243 			NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1244 			return -EOPNOTSUPP;
1245 		}
1246 		sch->ops->egress_block_set(sch, block_index);
1247 	}
1248 	return 0;
1249 }
1250 
1251 /*
1252    Allocate and initialize new qdisc.
1253 
1254    Parameters are passed via opt.
1255  */
1256 
1257 static struct Qdisc *qdisc_create(struct net_device *dev,
1258 				  struct netdev_queue *dev_queue,
1259 				  u32 parent, u32 handle,
1260 				  struct nlattr **tca, int *errp,
1261 				  struct netlink_ext_ack *extack)
1262 {
1263 	int err;
1264 	struct nlattr *kind = tca[TCA_KIND];
1265 	struct Qdisc *sch;
1266 	struct Qdisc_ops *ops;
1267 	struct qdisc_size_table *stab;
1268 
1269 	ops = qdisc_lookup_ops(kind);
1270 #ifdef CONFIG_MODULES
1271 	if (ops == NULL && kind != NULL) {
1272 		char name[IFNAMSIZ];
1273 		if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
1274 			/* We dropped the RTNL semaphore in order to
1275 			 * perform the module load.  So, even if we
1276 			 * succeeded in loading the module we have to
1277 			 * tell the caller to replay the request.  We
1278 			 * indicate this using -EAGAIN.
1279 			 * We replay the request because the device may
1280 			 * go away in the mean time.
1281 			 */
1282 			netdev_unlock_ops(dev);
1283 			rtnl_unlock();
1284 			request_module(NET_SCH_ALIAS_PREFIX "%s", name);
1285 			rtnl_lock();
1286 			netdev_lock_ops(dev);
1287 			ops = qdisc_lookup_ops(kind);
1288 			if (ops != NULL) {
1289 				/* We will try again qdisc_lookup_ops,
1290 				 * so don't keep a reference.
1291 				 */
1292 				module_put(ops->owner);
1293 				err = -EAGAIN;
1294 				goto err_out;
1295 			}
1296 		}
1297 	}
1298 #endif
1299 
1300 	err = -ENOENT;
1301 	if (!ops) {
1302 		NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
1303 		goto err_out;
1304 	}
1305 
1306 	sch = qdisc_alloc(dev_queue, ops, extack);
1307 	if (IS_ERR(sch)) {
1308 		err = PTR_ERR(sch);
1309 		goto err_out2;
1310 	}
1311 
1312 	sch->parent = parent;
1313 
1314 	if (handle == TC_H_INGRESS) {
1315 		if (!(sch->flags & TCQ_F_INGRESS)) {
1316 			NL_SET_ERR_MSG(extack,
1317 				       "Specified parent ID is reserved for ingress and clsact Qdiscs");
1318 			err = -EINVAL;
1319 			goto err_out3;
1320 		}
1321 		handle = TC_H_MAKE(TC_H_INGRESS, 0);
1322 	} else {
1323 		if (handle == 0) {
1324 			handle = qdisc_alloc_handle(dev);
1325 			if (handle == 0) {
1326 				NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1327 				err = -ENOSPC;
1328 				goto err_out3;
1329 			}
1330 		}
1331 		if (!netif_is_multiqueue(dev))
1332 			sch->flags |= TCQ_F_ONETXQUEUE;
1333 	}
1334 
1335 	sch->handle = handle;
1336 
1337 	/* This exist to keep backward compatible with a userspace
1338 	 * loophole, what allowed userspace to get IFF_NO_QUEUE
1339 	 * facility on older kernels by setting tx_queue_len=0 (prior
1340 	 * to qdisc init), and then forgot to reinit tx_queue_len
1341 	 * before again attaching a qdisc.
1342 	 */
1343 	if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1344 		WRITE_ONCE(dev->tx_queue_len, DEFAULT_TX_QUEUE_LEN);
1345 		netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1346 	}
1347 
1348 	err = qdisc_block_indexes_set(sch, tca, extack);
1349 	if (err)
1350 		goto err_out3;
1351 
1352 	if (tca[TCA_STAB]) {
1353 		stab = qdisc_get_stab(tca[TCA_STAB], extack);
1354 		if (IS_ERR(stab)) {
1355 			err = PTR_ERR(stab);
1356 			goto err_out3;
1357 		}
1358 		rcu_assign_pointer(sch->stab, stab);
1359 	}
1360 
1361 	if (ops->init) {
1362 		err = ops->init(sch, tca[TCA_OPTIONS], extack);
1363 		if (err != 0)
1364 			goto err_out4;
1365 	}
1366 
1367 	if (tca[TCA_RATE]) {
1368 		err = -EOPNOTSUPP;
1369 		if (sch->flags & TCQ_F_MQROOT) {
1370 			NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1371 			goto err_out4;
1372 		}
1373 
1374 		err = gen_new_estimator(&sch->bstats,
1375 					sch->cpu_bstats,
1376 					&sch->rate_est,
1377 					NULL,
1378 					true,
1379 					tca[TCA_RATE]);
1380 		if (err) {
1381 			NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1382 			goto err_out4;
1383 		}
1384 	}
1385 
1386 	qdisc_hash_add(sch, false);
1387 	trace_qdisc_create(ops, dev, parent);
1388 
1389 	return sch;
1390 
1391 err_out4:
1392 	/* Even if ops->init() failed, we call ops->destroy()
1393 	 * like qdisc_create_dflt().
1394 	 */
1395 	if (ops->destroy)
1396 		ops->destroy(sch);
1397 	qdisc_put_stab(rtnl_dereference(sch->stab));
1398 err_out3:
1399 	lockdep_unregister_key(&sch->root_lock_key);
1400 	netdev_put(dev, &sch->dev_tracker);
1401 	qdisc_free(sch);
1402 err_out2:
1403 	module_put(ops->owner);
1404 err_out:
1405 	*errp = err;
1406 	return NULL;
1407 }
1408 
1409 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1410 			struct netlink_ext_ack *extack)
1411 {
1412 	struct qdisc_size_table *ostab, *stab = NULL;
1413 	int err = 0;
1414 
1415 	if (tca[TCA_OPTIONS]) {
1416 		if (!sch->ops->change) {
1417 			NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1418 			return -EINVAL;
1419 		}
1420 		if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1421 			NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1422 			return -EOPNOTSUPP;
1423 		}
1424 		err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1425 		if (err)
1426 			return err;
1427 	}
1428 
1429 	if (tca[TCA_STAB]) {
1430 		stab = qdisc_get_stab(tca[TCA_STAB], extack);
1431 		if (IS_ERR(stab))
1432 			return PTR_ERR(stab);
1433 	}
1434 
1435 	ostab = rtnl_dereference(sch->stab);
1436 	rcu_assign_pointer(sch->stab, stab);
1437 	qdisc_put_stab(ostab);
1438 
1439 	if (tca[TCA_RATE]) {
1440 		/* NB: ignores errors from replace_estimator
1441 		   because change can't be undone. */
1442 		if (sch->flags & TCQ_F_MQROOT)
1443 			goto out;
1444 		gen_replace_estimator(&sch->bstats,
1445 				      sch->cpu_bstats,
1446 				      &sch->rate_est,
1447 				      NULL,
1448 				      true,
1449 				      tca[TCA_RATE]);
1450 	}
1451 out:
1452 	return 0;
1453 }
1454 
1455 struct check_loop_arg {
1456 	struct qdisc_walker	w;
1457 	struct Qdisc		*p;
1458 	int			depth;
1459 };
1460 
1461 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1462 			 struct qdisc_walker *w);
1463 
1464 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1465 {
1466 	struct check_loop_arg	arg;
1467 
1468 	if (q->ops->cl_ops == NULL)
1469 		return 0;
1470 
1471 	arg.w.stop = arg.w.skip = arg.w.count = 0;
1472 	arg.w.fn = check_loop_fn;
1473 	arg.depth = depth;
1474 	arg.p = p;
1475 	q->ops->cl_ops->walk(q, &arg.w);
1476 	return arg.w.stop ? -ELOOP : 0;
1477 }
1478 
1479 static int
1480 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1481 {
1482 	struct Qdisc *leaf;
1483 	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1484 	struct check_loop_arg *arg = (struct check_loop_arg *)w;
1485 
1486 	leaf = cops->leaf(q, cl);
1487 	if (leaf) {
1488 		if (leaf == arg->p || arg->depth > 7)
1489 			return -ELOOP;
1490 		return check_loop(leaf, arg->p, arg->depth + 1);
1491 	}
1492 	return 0;
1493 }
1494 
1495 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1496 	[TCA_KIND]		= { .type = NLA_STRING },
1497 	[TCA_RATE]		= { .type = NLA_BINARY,
1498 				    .len = sizeof(struct tc_estimator) },
1499 	[TCA_STAB]		= { .type = NLA_NESTED },
1500 	[TCA_DUMP_INVISIBLE]	= { .type = NLA_FLAG },
1501 	[TCA_CHAIN]		= { .type = NLA_U32 },
1502 	[TCA_INGRESS_BLOCK]	= { .type = NLA_U32 },
1503 	[TCA_EGRESS_BLOCK]	= { .type = NLA_U32 },
1504 };
1505 
1506 /*
1507  * Delete/get qdisc.
1508  */
1509 
1510 static int __tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1511 			  struct netlink_ext_ack *extack,
1512 			  struct net_device *dev,
1513 			  struct nlattr *tca[TCA_MAX + 1],
1514 			  struct tcmsg *tcm)
1515 {
1516 	struct net *net = sock_net(skb->sk);
1517 	struct Qdisc *q = NULL;
1518 	struct Qdisc *p = NULL;
1519 	u32 clid;
1520 	int err;
1521 
1522 	clid = tcm->tcm_parent;
1523 	if (clid) {
1524 		if (clid != TC_H_ROOT) {
1525 			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1526 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1527 				if (!p) {
1528 					NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1529 					return -ENOENT;
1530 				}
1531 				q = qdisc_leaf(p, clid);
1532 			} else if (dev_ingress_queue(dev)) {
1533 				q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
1534 			}
1535 		} else {
1536 			q = rtnl_dereference(dev->qdisc);
1537 		}
1538 		if (!q) {
1539 			NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1540 			return -ENOENT;
1541 		}
1542 
1543 		if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1544 			NL_SET_ERR_MSG(extack, "Invalid handle");
1545 			return -EINVAL;
1546 		}
1547 	} else {
1548 		q = qdisc_lookup(dev, tcm->tcm_handle);
1549 		if (!q) {
1550 			NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1551 			return -ENOENT;
1552 		}
1553 	}
1554 
1555 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1556 		NL_SET_ERR_MSG(extack, "Invalid qdisc name: must match existing qdisc");
1557 		return -EINVAL;
1558 	}
1559 
1560 	if (n->nlmsg_type == RTM_DELQDISC) {
1561 		if (!clid) {
1562 			NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1563 			return -EINVAL;
1564 		}
1565 		if (q->handle == 0) {
1566 			NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1567 			return -ENOENT;
1568 		}
1569 		err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1570 		if (err != 0)
1571 			return err;
1572 	} else {
1573 		qdisc_get_notify(net, skb, n, clid, q, NULL);
1574 	}
1575 	return 0;
1576 }
1577 
1578 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1579 			struct netlink_ext_ack *extack)
1580 {
1581 	struct net *net = sock_net(skb->sk);
1582 	struct tcmsg *tcm = nlmsg_data(n);
1583 	struct nlattr *tca[TCA_MAX + 1];
1584 	struct net_device *dev;
1585 	int err;
1586 
1587 	err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1588 				     rtm_tca_policy, extack);
1589 	if (err < 0)
1590 		return err;
1591 
1592 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1593 	if (!dev)
1594 		return -ENODEV;
1595 
1596 	netdev_lock_ops(dev);
1597 	err = __tc_get_qdisc(skb, n, extack, dev, tca, tcm);
1598 	netdev_unlock_ops(dev);
1599 
1600 	return err;
1601 }
1602 
1603 static bool req_create_or_replace(struct nlmsghdr *n)
1604 {
1605 	return (n->nlmsg_flags & NLM_F_CREATE &&
1606 		n->nlmsg_flags & NLM_F_REPLACE);
1607 }
1608 
1609 static bool req_create_exclusive(struct nlmsghdr *n)
1610 {
1611 	return (n->nlmsg_flags & NLM_F_CREATE &&
1612 		n->nlmsg_flags & NLM_F_EXCL);
1613 }
1614 
1615 static bool req_change(struct nlmsghdr *n)
1616 {
1617 	return (!(n->nlmsg_flags & NLM_F_CREATE) &&
1618 		!(n->nlmsg_flags & NLM_F_REPLACE) &&
1619 		!(n->nlmsg_flags & NLM_F_EXCL));
1620 }
1621 
1622 static int __tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1623 			     struct netlink_ext_ack *extack,
1624 			     struct net_device *dev,
1625 			     struct nlattr *tca[TCA_MAX + 1],
1626 			     struct tcmsg *tcm,
1627 			     bool *replay)
1628 {
1629 	struct Qdisc *q = NULL;
1630 	struct Qdisc *p = NULL;
1631 	u32 clid;
1632 	int err;
1633 
1634 	clid = tcm->tcm_parent;
1635 
1636 	if (clid) {
1637 		if (clid != TC_H_ROOT) {
1638 			if (clid != TC_H_INGRESS) {
1639 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1640 				if (!p) {
1641 					NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1642 					return -ENOENT;
1643 				}
1644 				q = qdisc_leaf(p, clid);
1645 			} else if (dev_ingress_queue_create(dev)) {
1646 				q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
1647 			}
1648 		} else {
1649 			q = rtnl_dereference(dev->qdisc);
1650 		}
1651 
1652 		/* It may be default qdisc, ignore it */
1653 		if (q && q->handle == 0)
1654 			q = NULL;
1655 
1656 		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1657 			if (tcm->tcm_handle) {
1658 				if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1659 					NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1660 					return -EEXIST;
1661 				}
1662 				if (TC_H_MIN(tcm->tcm_handle)) {
1663 					NL_SET_ERR_MSG(extack, "Invalid minor handle");
1664 					return -EINVAL;
1665 				}
1666 				q = qdisc_lookup(dev, tcm->tcm_handle);
1667 				if (!q)
1668 					goto create_n_graft;
1669 				if (q->parent != tcm->tcm_parent) {
1670 					NL_SET_ERR_MSG(extack, "Cannot move an existing qdisc to a different parent");
1671 					return -EINVAL;
1672 				}
1673 				if (n->nlmsg_flags & NLM_F_EXCL) {
1674 					NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1675 					return -EEXIST;
1676 				}
1677 				if (tca[TCA_KIND] &&
1678 				    nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1679 					NL_SET_ERR_MSG(extack, "Invalid qdisc name: must match existing qdisc");
1680 					return -EINVAL;
1681 				}
1682 				if (q->flags & TCQ_F_INGRESS) {
1683 					NL_SET_ERR_MSG(extack,
1684 						       "Cannot regraft ingress or clsact Qdiscs");
1685 					return -EINVAL;
1686 				}
1687 				if (q == p ||
1688 				    (p && check_loop(q, p, 0))) {
1689 					NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1690 					return -ELOOP;
1691 				}
1692 				if (clid == TC_H_INGRESS) {
1693 					NL_SET_ERR_MSG(extack, "Ingress cannot graft directly");
1694 					return -EINVAL;
1695 				}
1696 				qdisc_refcount_inc(q);
1697 				goto graft;
1698 			} else {
1699 				if (!q)
1700 					goto create_n_graft;
1701 
1702 				/* This magic test requires explanation.
1703 				 *
1704 				 *   We know, that some child q is already
1705 				 *   attached to this parent and have choice:
1706 				 *   1) change it or 2) create/graft new one.
1707 				 *   If the requested qdisc kind is different
1708 				 *   than the existing one, then we choose graft.
1709 				 *   If they are the same then this is "change"
1710 				 *   operation - just let it fallthrough..
1711 				 *
1712 				 *   1. We are allowed to create/graft only
1713 				 *   if the request is explicitly stating
1714 				 *   "please create if it doesn't exist".
1715 				 *
1716 				 *   2. If the request is to exclusive create
1717 				 *   then the qdisc tcm_handle is not expected
1718 				 *   to exist, so that we choose create/graft too.
1719 				 *
1720 				 *   3. The last case is when no flags are set.
1721 				 *   This will happen when for example tc
1722 				 *   utility issues a "change" command.
1723 				 *   Alas, it is sort of hole in API, we
1724 				 *   cannot decide what to do unambiguously.
1725 				 *   For now we select create/graft.
1726 				 */
1727 				if (tca[TCA_KIND] &&
1728 				    nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1729 					if (req_create_or_replace(n) ||
1730 					    req_create_exclusive(n))
1731 						goto create_n_graft;
1732 					else if (req_change(n))
1733 						goto create_n_graft2;
1734 				}
1735 			}
1736 		}
1737 	} else {
1738 		if (!tcm->tcm_handle) {
1739 			NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1740 			return -EINVAL;
1741 		}
1742 		q = qdisc_lookup(dev, tcm->tcm_handle);
1743 	}
1744 
1745 	/* Change qdisc parameters */
1746 	if (!q) {
1747 		NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1748 		return -ENOENT;
1749 	}
1750 	if (n->nlmsg_flags & NLM_F_EXCL) {
1751 		NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1752 		return -EEXIST;
1753 	}
1754 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1755 		NL_SET_ERR_MSG(extack, "Invalid qdisc name: must match existing qdisc");
1756 		return -EINVAL;
1757 	}
1758 	err = qdisc_change(q, tca, extack);
1759 	if (err == 0)
1760 		qdisc_notify(sock_net(skb->sk), skb, n, clid, NULL, q, extack);
1761 	return err;
1762 
1763 create_n_graft:
1764 	if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1765 		NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1766 		return -ENOENT;
1767 	}
1768 create_n_graft2:
1769 	if (clid == TC_H_INGRESS) {
1770 		if (dev_ingress_queue(dev)) {
1771 			q = qdisc_create(dev, dev_ingress_queue(dev),
1772 					 tcm->tcm_parent, tcm->tcm_parent,
1773 					 tca, &err, extack);
1774 		} else {
1775 			NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1776 			err = -ENOENT;
1777 		}
1778 	} else {
1779 		struct netdev_queue *dev_queue;
1780 
1781 		if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1782 			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1783 		else if (p)
1784 			dev_queue = p->dev_queue;
1785 		else
1786 			dev_queue = netdev_get_tx_queue(dev, 0);
1787 
1788 		q = qdisc_create(dev, dev_queue,
1789 				 tcm->tcm_parent, tcm->tcm_handle,
1790 				 tca, &err, extack);
1791 	}
1792 	if (q == NULL) {
1793 		if (err == -EAGAIN) {
1794 			*replay = true;
1795 			return 0;
1796 		}
1797 		return err;
1798 	}
1799 
1800 graft:
1801 	err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1802 	if (err) {
1803 		if (q)
1804 			qdisc_put(q);
1805 		return err;
1806 	}
1807 
1808 	return 0;
1809 }
1810 
1811 /*
1812  * Create/change qdisc.
1813  */
1814 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1815 			   struct netlink_ext_ack *extack)
1816 {
1817 	struct net *net = sock_net(skb->sk);
1818 	struct nlattr *tca[TCA_MAX + 1];
1819 	struct net_device *dev;
1820 	struct tcmsg *tcm;
1821 	bool replay;
1822 	int err;
1823 
1824 replay:
1825 	/* Reinit, just in case something touches this. */
1826 	err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1827 				     rtm_tca_policy, extack);
1828 	if (err < 0)
1829 		return err;
1830 
1831 	tcm = nlmsg_data(n);
1832 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1833 	if (!dev)
1834 		return -ENODEV;
1835 
1836 	replay = false;
1837 	netdev_lock_ops(dev);
1838 	err = __tc_modify_qdisc(skb, n, extack, dev, tca, tcm, &replay);
1839 	netdev_unlock_ops(dev);
1840 	if (replay)
1841 		goto replay;
1842 
1843 	return err;
1844 }
1845 
1846 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1847 			      struct netlink_callback *cb,
1848 			      int *q_idx_p, int s_q_idx, bool recur,
1849 			      bool dump_invisible)
1850 {
1851 	int ret = 0, q_idx = *q_idx_p;
1852 	struct Qdisc *q;
1853 	int b;
1854 
1855 	if (!root)
1856 		return 0;
1857 
1858 	q = root;
1859 	if (q_idx < s_q_idx) {
1860 		q_idx++;
1861 	} else {
1862 		if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1863 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1864 				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1865 				  RTM_NEWQDISC, NULL) <= 0)
1866 			goto done;
1867 		q_idx++;
1868 	}
1869 
1870 	/* If dumping singletons, there is no qdisc_dev(root) and the singleton
1871 	 * itself has already been dumped.
1872 	 *
1873 	 * If we've already dumped the top-level (ingress) qdisc above and the global
1874 	 * qdisc hashtable, we don't want to hit it again
1875 	 */
1876 	if (!qdisc_dev(root) || !recur)
1877 		goto out;
1878 
1879 	hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1880 		if (q_idx < s_q_idx) {
1881 			q_idx++;
1882 			continue;
1883 		}
1884 		if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1885 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1886 				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1887 				  RTM_NEWQDISC, NULL) <= 0)
1888 			goto done;
1889 		q_idx++;
1890 	}
1891 
1892 out:
1893 	*q_idx_p = q_idx;
1894 	return ret;
1895 done:
1896 	ret = -1;
1897 	goto out;
1898 }
1899 
1900 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1901 {
1902 	struct net *net = sock_net(skb->sk);
1903 	int idx, q_idx;
1904 	int s_idx, s_q_idx;
1905 	struct net_device *dev;
1906 	const struct nlmsghdr *nlh = cb->nlh;
1907 	struct nlattr *tca[TCA_MAX + 1];
1908 	int err;
1909 
1910 	s_idx = cb->args[0];
1911 	s_q_idx = q_idx = cb->args[1];
1912 
1913 	idx = 0;
1914 	ASSERT_RTNL();
1915 
1916 	err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1917 				     rtm_tca_policy, cb->extack);
1918 	if (err < 0)
1919 		return err;
1920 
1921 	for_each_netdev(net, dev) {
1922 		struct netdev_queue *dev_queue;
1923 
1924 		if (idx < s_idx)
1925 			goto cont;
1926 		if (idx > s_idx)
1927 			s_q_idx = 0;
1928 		q_idx = 0;
1929 
1930 		netdev_lock_ops(dev);
1931 		if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
1932 				       skb, cb, &q_idx, s_q_idx,
1933 				       true, tca[TCA_DUMP_INVISIBLE]) < 0) {
1934 			netdev_unlock_ops(dev);
1935 			goto done;
1936 		}
1937 
1938 		dev_queue = dev_ingress_queue(dev);
1939 		if (dev_queue &&
1940 		    tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping),
1941 				       skb, cb, &q_idx, s_q_idx, false,
1942 				       tca[TCA_DUMP_INVISIBLE]) < 0) {
1943 			netdev_unlock_ops(dev);
1944 			goto done;
1945 		}
1946 		netdev_unlock_ops(dev);
1947 
1948 cont:
1949 		idx++;
1950 	}
1951 
1952 done:
1953 	cb->args[0] = idx;
1954 	cb->args[1] = q_idx;
1955 
1956 	return skb->len;
1957 }
1958 
1959 
1960 
1961 /************************************************
1962  *	Traffic classes manipulation.		*
1963  ************************************************/
1964 
1965 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1966 			  unsigned long cl, u32 portid, u32 seq, u16 flags,
1967 			  int event, struct netlink_ext_ack *extack)
1968 {
1969 	struct tcmsg *tcm;
1970 	struct nlmsghdr  *nlh;
1971 	unsigned char *b = skb_tail_pointer(skb);
1972 	struct gnet_dump d;
1973 	const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1974 
1975 	cond_resched();
1976 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1977 	if (!nlh)
1978 		goto out_nlmsg_trim;
1979 	tcm = nlmsg_data(nlh);
1980 	tcm->tcm_family = AF_UNSPEC;
1981 	tcm->tcm__pad1 = 0;
1982 	tcm->tcm__pad2 = 0;
1983 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1984 	tcm->tcm_parent = q->handle;
1985 	tcm->tcm_handle = q->handle;
1986 	tcm->tcm_info = 0;
1987 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1988 		goto nla_put_failure;
1989 	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1990 		goto nla_put_failure;
1991 
1992 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1993 					 NULL, &d, TCA_PAD) < 0)
1994 		goto nla_put_failure;
1995 
1996 	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1997 		goto nla_put_failure;
1998 
1999 	if (gnet_stats_finish_copy(&d) < 0)
2000 		goto nla_put_failure;
2001 
2002 	if (extack && extack->_msg &&
2003 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2004 		goto out_nlmsg_trim;
2005 
2006 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2007 
2008 	return skb->len;
2009 
2010 out_nlmsg_trim:
2011 nla_put_failure:
2012 	nlmsg_trim(skb, b);
2013 	return -1;
2014 }
2015 
2016 static int tclass_notify(struct net *net, struct sk_buff *oskb,
2017 			 struct nlmsghdr *n, struct Qdisc *q,
2018 			 unsigned long cl, int event, struct netlink_ext_ack *extack)
2019 {
2020 	struct sk_buff *skb;
2021 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2022 
2023 	if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2024 		return 0;
2025 
2026 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2027 	if (!skb)
2028 		return -ENOBUFS;
2029 
2030 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event, extack) < 0) {
2031 		kfree_skb(skb);
2032 		return -EINVAL;
2033 	}
2034 
2035 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2036 			      n->nlmsg_flags & NLM_F_ECHO);
2037 }
2038 
2039 static int tclass_get_notify(struct net *net, struct sk_buff *oskb,
2040 			     struct nlmsghdr *n, struct Qdisc *q,
2041 			     unsigned long cl, struct netlink_ext_ack *extack)
2042 {
2043 	struct sk_buff *skb;
2044 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2045 
2046 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2047 	if (!skb)
2048 		return -ENOBUFS;
2049 
2050 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, RTM_NEWTCLASS,
2051 			   extack) < 0) {
2052 		kfree_skb(skb);
2053 		return -EINVAL;
2054 	}
2055 
2056 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2057 			      n->nlmsg_flags & NLM_F_ECHO);
2058 }
2059 
2060 static int tclass_del_notify(struct net *net,
2061 			     const struct Qdisc_class_ops *cops,
2062 			     struct sk_buff *oskb, struct nlmsghdr *n,
2063 			     struct Qdisc *q, unsigned long cl,
2064 			     struct netlink_ext_ack *extack)
2065 {
2066 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2067 	struct sk_buff *skb;
2068 	int err = 0;
2069 
2070 	if (!cops->delete)
2071 		return -EOPNOTSUPP;
2072 
2073 	if (rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) {
2074 		skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2075 		if (!skb)
2076 			return -ENOBUFS;
2077 
2078 		if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
2079 				   RTM_DELTCLASS, extack) < 0) {
2080 			kfree_skb(skb);
2081 			return -EINVAL;
2082 		}
2083 	} else {
2084 		skb = NULL;
2085 	}
2086 
2087 	err = cops->delete(q, cl, extack);
2088 	if (err) {
2089 		kfree_skb(skb);
2090 		return err;
2091 	}
2092 
2093 	err = rtnetlink_maybe_send(skb, net, portid, RTNLGRP_TC,
2094 				   n->nlmsg_flags & NLM_F_ECHO);
2095 	return err;
2096 }
2097 
2098 #ifdef CONFIG_NET_CLS
2099 
2100 struct tcf_bind_args {
2101 	struct tcf_walker w;
2102 	unsigned long base;
2103 	unsigned long cl;
2104 	u32 classid;
2105 };
2106 
2107 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2108 {
2109 	struct tcf_bind_args *a = (void *)arg;
2110 
2111 	if (n && tp->ops->bind_class) {
2112 		struct Qdisc *q = tcf_block_q(tp->chain->block);
2113 
2114 		sch_tree_lock(q);
2115 		tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
2116 		sch_tree_unlock(q);
2117 	}
2118 	return 0;
2119 }
2120 
2121 struct tc_bind_class_args {
2122 	struct qdisc_walker w;
2123 	unsigned long new_cl;
2124 	u32 portid;
2125 	u32 clid;
2126 };
2127 
2128 static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
2129 				struct qdisc_walker *w)
2130 {
2131 	struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
2132 	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
2133 	struct tcf_block *block;
2134 	struct tcf_chain *chain;
2135 
2136 	block = cops->tcf_block(q, cl, NULL);
2137 	if (!block)
2138 		return 0;
2139 	for (chain = tcf_get_next_chain(block, NULL);
2140 	     chain;
2141 	     chain = tcf_get_next_chain(block, chain)) {
2142 		struct tcf_proto *tp;
2143 
2144 		for (tp = tcf_get_next_proto(chain, NULL);
2145 		     tp; tp = tcf_get_next_proto(chain, tp)) {
2146 			struct tcf_bind_args arg = {};
2147 
2148 			arg.w.fn = tcf_node_bind;
2149 			arg.classid = a->clid;
2150 			arg.base = cl;
2151 			arg.cl = a->new_cl;
2152 			tp->ops->walk(tp, &arg.w, true);
2153 		}
2154 	}
2155 
2156 	return 0;
2157 }
2158 
2159 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
2160 			   unsigned long new_cl)
2161 {
2162 	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
2163 	struct tc_bind_class_args args = {};
2164 
2165 	if (!cops->tcf_block)
2166 		return;
2167 	args.portid = portid;
2168 	args.clid = clid;
2169 	args.new_cl = new_cl;
2170 	args.w.fn = tc_bind_class_walker;
2171 	q->ops->cl_ops->walk(q, &args.w);
2172 }
2173 
2174 #else
2175 
2176 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
2177 			   unsigned long new_cl)
2178 {
2179 }
2180 
2181 #endif
2182 
2183 static int __tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
2184 			   struct netlink_ext_ack *extack,
2185 			   struct net_device *dev,
2186 			   struct nlattr *tca[TCA_MAX + 1],
2187 			   struct tcmsg *tcm)
2188 {
2189 	struct net *net = sock_net(skb->sk);
2190 	const struct Qdisc_class_ops *cops;
2191 	struct Qdisc *q = NULL;
2192 	unsigned long cl = 0;
2193 	unsigned long new_cl;
2194 	u32 portid;
2195 	u32 clid;
2196 	u32 qid;
2197 	int err;
2198 
2199 	/*
2200 	   parent == TC_H_UNSPEC - unspecified parent.
2201 	   parent == TC_H_ROOT   - class is root, which has no parent.
2202 	   parent == X:0	 - parent is root class.
2203 	   parent == X:Y	 - parent is a node in hierarchy.
2204 	   parent == 0:Y	 - parent is X:Y, where X:0 is qdisc.
2205 
2206 	   handle == 0:0	 - generate handle from kernel pool.
2207 	   handle == 0:Y	 - class is X:Y, where X:0 is qdisc.
2208 	   handle == X:Y	 - clear.
2209 	   handle == X:0	 - root class.
2210 	 */
2211 
2212 	/* Step 1. Determine qdisc handle X:0 */
2213 
2214 	portid = tcm->tcm_parent;
2215 	clid = tcm->tcm_handle;
2216 	qid = TC_H_MAJ(clid);
2217 
2218 	if (portid != TC_H_ROOT) {
2219 		u32 qid1 = TC_H_MAJ(portid);
2220 
2221 		if (qid && qid1) {
2222 			/* If both majors are known, they must be identical. */
2223 			if (qid != qid1)
2224 				return -EINVAL;
2225 		} else if (qid1) {
2226 			qid = qid1;
2227 		} else if (qid == 0)
2228 			qid = rtnl_dereference(dev->qdisc)->handle;
2229 
2230 		/* Now qid is genuine qdisc handle consistent
2231 		 * both with parent and child.
2232 		 *
2233 		 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2234 		 */
2235 		if (portid)
2236 			portid = TC_H_MAKE(qid, portid);
2237 	} else {
2238 		if (qid == 0)
2239 			qid = rtnl_dereference(dev->qdisc)->handle;
2240 	}
2241 
2242 	/* OK. Locate qdisc */
2243 	q = qdisc_lookup(dev, qid);
2244 	if (!q)
2245 		return -ENOENT;
2246 
2247 	/* An check that it supports classes */
2248 	cops = q->ops->cl_ops;
2249 	if (cops == NULL)
2250 		return -EINVAL;
2251 
2252 	/* Now try to get class */
2253 	if (clid == 0) {
2254 		if (portid == TC_H_ROOT)
2255 			clid = qid;
2256 	} else
2257 		clid = TC_H_MAKE(qid, clid);
2258 
2259 	if (clid)
2260 		cl = cops->find(q, clid);
2261 
2262 	if (cl == 0) {
2263 		err = -ENOENT;
2264 		if (n->nlmsg_type != RTM_NEWTCLASS ||
2265 		    !(n->nlmsg_flags & NLM_F_CREATE))
2266 			goto out;
2267 	} else {
2268 		switch (n->nlmsg_type) {
2269 		case RTM_NEWTCLASS:
2270 			err = -EEXIST;
2271 			if (n->nlmsg_flags & NLM_F_EXCL)
2272 				goto out;
2273 			break;
2274 		case RTM_DELTCLASS:
2275 			err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
2276 			/* Unbind the class with flilters with 0 */
2277 			tc_bind_tclass(q, portid, clid, 0);
2278 			goto out;
2279 		case RTM_GETTCLASS:
2280 			err = tclass_get_notify(net, skb, n, q, cl, extack);
2281 			goto out;
2282 		default:
2283 			err = -EINVAL;
2284 			goto out;
2285 		}
2286 	}
2287 
2288 	if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2289 		NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2290 		return -EOPNOTSUPP;
2291 	}
2292 
2293 	/* Prevent creation of traffic classes with classid TC_H_ROOT */
2294 	if (clid == TC_H_ROOT) {
2295 		NL_SET_ERR_MSG(extack, "Cannot create traffic class with classid TC_H_ROOT");
2296 		return -EINVAL;
2297 	}
2298 
2299 	new_cl = cl;
2300 	err = -EOPNOTSUPP;
2301 	if (cops->change)
2302 		err = cops->change(q, clid, portid, tca, &new_cl, extack);
2303 	if (err == 0) {
2304 		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack);
2305 		/* We just create a new class, need to do reverse binding. */
2306 		if (cl != new_cl)
2307 			tc_bind_tclass(q, portid, clid, new_cl);
2308 	}
2309 out:
2310 	return err;
2311 }
2312 
2313 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
2314 			 struct netlink_ext_ack *extack)
2315 {
2316 	struct net *net = sock_net(skb->sk);
2317 	struct tcmsg *tcm = nlmsg_data(n);
2318 	struct nlattr *tca[TCA_MAX + 1];
2319 	struct net_device *dev;
2320 	int err;
2321 
2322 	err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
2323 				     rtm_tca_policy, extack);
2324 	if (err < 0)
2325 		return err;
2326 
2327 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2328 	if (!dev)
2329 		return -ENODEV;
2330 
2331 	netdev_lock_ops(dev);
2332 	err = __tc_ctl_tclass(skb, n, extack, dev, tca, tcm);
2333 	netdev_unlock_ops(dev);
2334 
2335 	return err;
2336 }
2337 
2338 struct qdisc_dump_args {
2339 	struct qdisc_walker	w;
2340 	struct sk_buff		*skb;
2341 	struct netlink_callback	*cb;
2342 };
2343 
2344 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2345 			    struct qdisc_walker *arg)
2346 {
2347 	struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2348 
2349 	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2350 			      a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2351 			      RTM_NEWTCLASS, NULL);
2352 }
2353 
2354 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2355 				struct tcmsg *tcm, struct netlink_callback *cb,
2356 				int *t_p, int s_t)
2357 {
2358 	struct qdisc_dump_args arg;
2359 
2360 	if (tc_qdisc_dump_ignore(q, false) ||
2361 	    *t_p < s_t || !q->ops->cl_ops ||
2362 	    (tcm->tcm_parent &&
2363 	     TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2364 		(*t_p)++;
2365 		return 0;
2366 	}
2367 	if (*t_p > s_t)
2368 		memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2369 	arg.w.fn = qdisc_class_dump;
2370 	arg.skb = skb;
2371 	arg.cb = cb;
2372 	arg.w.stop  = 0;
2373 	arg.w.skip = cb->args[1];
2374 	arg.w.count = 0;
2375 	q->ops->cl_ops->walk(q, &arg.w);
2376 	cb->args[1] = arg.w.count;
2377 	if (arg.w.stop)
2378 		return -1;
2379 	(*t_p)++;
2380 	return 0;
2381 }
2382 
2383 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2384 			       struct tcmsg *tcm, struct netlink_callback *cb,
2385 			       int *t_p, int s_t, bool recur)
2386 {
2387 	struct Qdisc *q;
2388 	int b;
2389 
2390 	if (!root)
2391 		return 0;
2392 
2393 	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2394 		return -1;
2395 
2396 	if (!qdisc_dev(root) || !recur)
2397 		return 0;
2398 
2399 	if (tcm->tcm_parent) {
2400 		q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2401 		if (q && q != root &&
2402 		    tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2403 			return -1;
2404 		return 0;
2405 	}
2406 	hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2407 		if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2408 			return -1;
2409 	}
2410 
2411 	return 0;
2412 }
2413 
2414 static int __tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb,
2415 			    struct tcmsg *tcm, struct net_device *dev)
2416 {
2417 	struct netdev_queue *dev_queue;
2418 	int t, s_t;
2419 
2420 	s_t = cb->args[0];
2421 	t = 0;
2422 
2423 	if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
2424 				skb, tcm, cb, &t, s_t, true) < 0)
2425 		goto done;
2426 
2427 	dev_queue = dev_ingress_queue(dev);
2428 	if (dev_queue &&
2429 	    tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping),
2430 				skb, tcm, cb, &t, s_t, false) < 0)
2431 		goto done;
2432 
2433 done:
2434 	cb->args[0] = t;
2435 
2436 	return skb->len;
2437 }
2438 
2439 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2440 {
2441 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2442 	struct net *net = sock_net(skb->sk);
2443 	struct net_device *dev;
2444 	int err;
2445 
2446 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2447 		return 0;
2448 
2449 	dev = dev_get_by_index(net, tcm->tcm_ifindex);
2450 	if (!dev)
2451 		return 0;
2452 
2453 	netdev_lock_ops(dev);
2454 	err = __tc_dump_tclass(skb, cb, tcm, dev);
2455 	netdev_unlock_ops(dev);
2456 
2457 	dev_put(dev);
2458 
2459 	return err;
2460 }
2461 
2462 #ifdef CONFIG_PROC_FS
2463 static int psched_show(struct seq_file *seq, void *v)
2464 {
2465 	seq_printf(seq, "%08x %08x %08x %08x\n",
2466 		   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2467 		   1000000,
2468 		   (u32)NSEC_PER_SEC / hrtimer_resolution);
2469 
2470 	return 0;
2471 }
2472 
2473 static int __net_init psched_net_init(struct net *net)
2474 {
2475 	struct proc_dir_entry *e;
2476 
2477 	e = proc_create_single("psched", 0, net->proc_net, psched_show);
2478 	if (e == NULL)
2479 		return -ENOMEM;
2480 
2481 	return 0;
2482 }
2483 
2484 static void __net_exit psched_net_exit(struct net *net)
2485 {
2486 	remove_proc_entry("psched", net->proc_net);
2487 }
2488 #else
2489 static int __net_init psched_net_init(struct net *net)
2490 {
2491 	return 0;
2492 }
2493 
2494 static void __net_exit psched_net_exit(struct net *net)
2495 {
2496 }
2497 #endif
2498 
2499 static struct pernet_operations psched_net_ops = {
2500 	.init = psched_net_init,
2501 	.exit = psched_net_exit,
2502 };
2503 
2504 #if IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)
2505 DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
2506 #endif
2507 
2508 static const struct rtnl_msg_handler psched_rtnl_msg_handlers[] __initconst = {
2509 	{.msgtype = RTM_NEWQDISC, .doit = tc_modify_qdisc},
2510 	{.msgtype = RTM_DELQDISC, .doit = tc_get_qdisc},
2511 	{.msgtype = RTM_GETQDISC, .doit = tc_get_qdisc,
2512 	 .dumpit = tc_dump_qdisc},
2513 	{.msgtype = RTM_NEWTCLASS, .doit = tc_ctl_tclass},
2514 	{.msgtype = RTM_DELTCLASS, .doit = tc_ctl_tclass},
2515 	{.msgtype = RTM_GETTCLASS, .doit = tc_ctl_tclass,
2516 	 .dumpit = tc_dump_tclass},
2517 };
2518 
2519 static int __init pktsched_init(void)
2520 {
2521 	int err;
2522 
2523 	err = register_pernet_subsys(&psched_net_ops);
2524 	if (err) {
2525 		pr_err("pktsched_init: "
2526 		       "cannot initialize per netns operations\n");
2527 		return err;
2528 	}
2529 
2530 	register_qdisc(&pfifo_fast_ops);
2531 	register_qdisc(&pfifo_qdisc_ops);
2532 	register_qdisc(&bfifo_qdisc_ops);
2533 	register_qdisc(&pfifo_head_drop_qdisc_ops);
2534 	register_qdisc(&mq_qdisc_ops);
2535 	register_qdisc(&noqueue_qdisc_ops);
2536 
2537 	rtnl_register_many(psched_rtnl_msg_handlers);
2538 
2539 	tc_wrapper_init();
2540 
2541 	return 0;
2542 }
2543 
2544 subsys_initcall(pktsched_init);
2545