1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/sch_api.c Packet scheduler API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Fixes: 8 * 9 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired. 10 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 11 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support 12 */ 13 14 #include <linux/module.h> 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/string.h> 18 #include <linux/errno.h> 19 #include <linux/skbuff.h> 20 #include <linux/init.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/kmod.h> 24 #include <linux/list.h> 25 #include <linux/hrtimer.h> 26 #include <linux/slab.h> 27 #include <linux/hashtable.h> 28 29 #include <net/netdev_lock.h> 30 #include <net/net_namespace.h> 31 #include <net/sock.h> 32 #include <net/netlink.h> 33 #include <net/pkt_sched.h> 34 #include <net/pkt_cls.h> 35 #include <net/tc_wrapper.h> 36 37 #include <trace/events/qdisc.h> 38 39 /* 40 41 Short review. 42 ------------- 43 44 This file consists of two interrelated parts: 45 46 1. queueing disciplines manager frontend. 47 2. traffic classes manager frontend. 48 49 Generally, queueing discipline ("qdisc") is a black box, 50 which is able to enqueue packets and to dequeue them (when 51 device is ready to send something) in order and at times 52 determined by algorithm hidden in it. 53 54 qdisc's are divided to two categories: 55 - "queues", which have no internal structure visible from outside. 56 - "schedulers", which split all the packets to "traffic classes", 57 using "packet classifiers" (look at cls_api.c) 58 59 In turn, classes may have child qdiscs (as rule, queues) 60 attached to them etc. etc. etc. 61 62 The goal of the routines in this file is to translate 63 information supplied by user in the form of handles 64 to more intelligible for kernel form, to make some sanity 65 checks and part of work, which is common to all qdiscs 66 and to provide rtnetlink notifications. 67 68 All real intelligent work is done inside qdisc modules. 69 70 71 72 Every discipline has two major routines: enqueue and dequeue. 73 74 ---dequeue 75 76 dequeue usually returns a skb to send. It is allowed to return NULL, 77 but it does not mean that queue is empty, it just means that 78 discipline does not want to send anything this time. 79 Queue is really empty if q->q.qlen == 0. 80 For complicated disciplines with multiple queues q->q is not 81 real packet queue, but however q->q.qlen must be valid. 82 83 ---enqueue 84 85 enqueue returns 0, if packet was enqueued successfully. 86 If packet (this one or another one) was dropped, it returns 87 not zero error code. 88 NET_XMIT_DROP - this packet dropped 89 Expected action: do not backoff, but wait until queue will clear. 90 NET_XMIT_CN - probably this packet enqueued, but another one dropped. 91 Expected action: backoff or ignore 92 93 Auxiliary routines: 94 95 ---peek 96 97 like dequeue but without removing a packet from the queue 98 99 ---reset 100 101 returns qdisc to initial state: purge all buffers, clear all 102 timers, counters (except for statistics) etc. 103 104 ---init 105 106 initializes newly created qdisc. 107 108 ---destroy 109 110 destroys resources allocated by init and during lifetime of qdisc. 111 112 ---change 113 114 changes qdisc parameters. 115 */ 116 117 /* Protects list of registered TC modules. It is pure SMP lock. */ 118 static DEFINE_RWLOCK(qdisc_mod_lock); 119 120 121 /************************************************ 122 * Queueing disciplines manipulation. * 123 ************************************************/ 124 125 126 /* The list of all installed queueing disciplines. */ 127 128 static struct Qdisc_ops *qdisc_base; 129 130 /* Register/unregister queueing discipline */ 131 132 int register_qdisc(struct Qdisc_ops *qops) 133 { 134 struct Qdisc_ops *q, **qp; 135 int rc = -EEXIST; 136 137 write_lock(&qdisc_mod_lock); 138 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) 139 if (!strcmp(qops->id, q->id)) 140 goto out; 141 142 if (qops->enqueue == NULL) 143 qops->enqueue = noop_qdisc_ops.enqueue; 144 if (qops->peek == NULL) { 145 if (qops->dequeue == NULL) 146 qops->peek = noop_qdisc_ops.peek; 147 else 148 goto out_einval; 149 } 150 if (qops->dequeue == NULL) 151 qops->dequeue = noop_qdisc_ops.dequeue; 152 153 if (qops->cl_ops) { 154 const struct Qdisc_class_ops *cops = qops->cl_ops; 155 156 if (!(cops->find && cops->walk && cops->leaf)) 157 goto out_einval; 158 159 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf)) 160 goto out_einval; 161 } 162 163 qops->next = NULL; 164 *qp = qops; 165 rc = 0; 166 out: 167 write_unlock(&qdisc_mod_lock); 168 return rc; 169 170 out_einval: 171 rc = -EINVAL; 172 goto out; 173 } 174 EXPORT_SYMBOL(register_qdisc); 175 176 void unregister_qdisc(struct Qdisc_ops *qops) 177 { 178 struct Qdisc_ops *q, **qp; 179 int err = -ENOENT; 180 181 write_lock(&qdisc_mod_lock); 182 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) 183 if (q == qops) 184 break; 185 if (q) { 186 *qp = q->next; 187 q->next = NULL; 188 err = 0; 189 } 190 write_unlock(&qdisc_mod_lock); 191 192 WARN(err, "unregister qdisc(%s) failed\n", qops->id); 193 } 194 EXPORT_SYMBOL(unregister_qdisc); 195 196 /* Get default qdisc if not otherwise specified */ 197 void qdisc_get_default(char *name, size_t len) 198 { 199 read_lock(&qdisc_mod_lock); 200 strscpy(name, default_qdisc_ops->id, len); 201 read_unlock(&qdisc_mod_lock); 202 } 203 204 static struct Qdisc_ops *qdisc_lookup_default(const char *name) 205 { 206 struct Qdisc_ops *q = NULL; 207 208 for (q = qdisc_base; q; q = q->next) { 209 if (!strcmp(name, q->id)) { 210 if (!try_module_get(q->owner)) 211 q = NULL; 212 break; 213 } 214 } 215 216 return q; 217 } 218 219 /* Set new default qdisc to use */ 220 int qdisc_set_default(const char *name) 221 { 222 const struct Qdisc_ops *ops; 223 224 if (!capable(CAP_NET_ADMIN)) 225 return -EPERM; 226 227 write_lock(&qdisc_mod_lock); 228 ops = qdisc_lookup_default(name); 229 if (!ops) { 230 /* Not found, drop lock and try to load module */ 231 write_unlock(&qdisc_mod_lock); 232 request_module(NET_SCH_ALIAS_PREFIX "%s", name); 233 write_lock(&qdisc_mod_lock); 234 235 ops = qdisc_lookup_default(name); 236 } 237 238 if (ops) { 239 /* Set new default */ 240 module_put(default_qdisc_ops->owner); 241 default_qdisc_ops = ops; 242 } 243 write_unlock(&qdisc_mod_lock); 244 245 return ops ? 0 : -ENOENT; 246 } 247 248 #ifdef CONFIG_NET_SCH_DEFAULT 249 /* Set default value from kernel config */ 250 static int __init sch_default_qdisc(void) 251 { 252 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH); 253 } 254 late_initcall(sch_default_qdisc); 255 #endif 256 257 /* We know handle. Find qdisc among all qdisc's attached to device 258 * (root qdisc, all its children, children of children etc.) 259 * Note: caller either uses rtnl or rcu_read_lock() 260 */ 261 262 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) 263 { 264 struct Qdisc *q; 265 266 if (!qdisc_dev(root)) 267 return (root->handle == handle ? root : NULL); 268 269 if (!(root->flags & TCQ_F_BUILTIN) && 270 root->handle == handle) 271 return root; 272 273 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle, 274 lockdep_rtnl_is_held()) { 275 if (q->handle == handle) 276 return q; 277 } 278 return NULL; 279 } 280 281 void qdisc_hash_add(struct Qdisc *q, bool invisible) 282 { 283 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { 284 ASSERT_RTNL(); 285 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle); 286 if (invisible) 287 q->flags |= TCQ_F_INVISIBLE; 288 } 289 } 290 EXPORT_SYMBOL(qdisc_hash_add); 291 292 void qdisc_hash_del(struct Qdisc *q) 293 { 294 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { 295 ASSERT_RTNL(); 296 hash_del_rcu(&q->hash); 297 } 298 } 299 EXPORT_SYMBOL(qdisc_hash_del); 300 301 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) 302 { 303 struct Qdisc *q; 304 305 if (!handle) 306 return NULL; 307 q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle); 308 if (q) 309 goto out; 310 311 if (dev_ingress_queue(dev)) 312 q = qdisc_match_from_root( 313 rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping), 314 handle); 315 out: 316 return q; 317 } 318 319 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle) 320 { 321 struct netdev_queue *nq; 322 struct Qdisc *q; 323 324 if (!handle) 325 return NULL; 326 q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle); 327 if (q) 328 goto out; 329 330 nq = dev_ingress_queue_rcu(dev); 331 if (nq) 332 q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping), 333 handle); 334 out: 335 return q; 336 } 337 338 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) 339 { 340 unsigned long cl; 341 const struct Qdisc_class_ops *cops = p->ops->cl_ops; 342 343 if (cops == NULL) 344 return NULL; 345 cl = cops->find(p, classid); 346 347 if (cl == 0) 348 return NULL; 349 return cops->leaf(p, cl); 350 } 351 352 /* Find queueing discipline by name */ 353 354 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind) 355 { 356 struct Qdisc_ops *q = NULL; 357 358 if (kind) { 359 read_lock(&qdisc_mod_lock); 360 for (q = qdisc_base; q; q = q->next) { 361 if (nla_strcmp(kind, q->id) == 0) { 362 if (!try_module_get(q->owner)) 363 q = NULL; 364 break; 365 } 366 } 367 read_unlock(&qdisc_mod_lock); 368 } 369 return q; 370 } 371 372 /* The linklayer setting were not transferred from iproute2, in older 373 * versions, and the rate tables lookup systems have been dropped in 374 * the kernel. To keep backward compatible with older iproute2 tc 375 * utils, we detect the linklayer setting by detecting if the rate 376 * table were modified. 377 * 378 * For linklayer ATM table entries, the rate table will be aligned to 379 * 48 bytes, thus some table entries will contain the same value. The 380 * mpu (min packet unit) is also encoded into the old rate table, thus 381 * starting from the mpu, we find low and high table entries for 382 * mapping this cell. If these entries contain the same value, when 383 * the rate tables have been modified for linklayer ATM. 384 * 385 * This is done by rounding mpu to the nearest 48 bytes cell/entry, 386 * and then roundup to the next cell, calc the table entry one below, 387 * and compare. 388 */ 389 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab) 390 { 391 int low = roundup(r->mpu, 48); 392 int high = roundup(low+1, 48); 393 int cell_low = low >> r->cell_log; 394 int cell_high = (high >> r->cell_log) - 1; 395 396 /* rtab is too inaccurate at rates > 100Mbit/s */ 397 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) { 398 pr_debug("TC linklayer: Giving up ATM detection\n"); 399 return TC_LINKLAYER_ETHERNET; 400 } 401 402 if ((cell_high > cell_low) && (cell_high < 256) 403 && (rtab[cell_low] == rtab[cell_high])) { 404 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n", 405 cell_low, cell_high, rtab[cell_high]); 406 return TC_LINKLAYER_ATM; 407 } 408 return TC_LINKLAYER_ETHERNET; 409 } 410 411 static struct qdisc_rate_table *qdisc_rtab_list; 412 413 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, 414 struct nlattr *tab, 415 struct netlink_ext_ack *extack) 416 { 417 struct qdisc_rate_table *rtab; 418 419 if (tab == NULL || r->rate == 0 || 420 r->cell_log == 0 || r->cell_log >= 32 || 421 nla_len(tab) != TC_RTAB_SIZE) { 422 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching"); 423 return NULL; 424 } 425 426 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) { 427 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) && 428 !memcmp(&rtab->data, nla_data(tab), 1024)) { 429 rtab->refcnt++; 430 return rtab; 431 } 432 } 433 434 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL); 435 if (rtab) { 436 rtab->rate = *r; 437 rtab->refcnt = 1; 438 memcpy(rtab->data, nla_data(tab), 1024); 439 if (r->linklayer == TC_LINKLAYER_UNAWARE) 440 r->linklayer = __detect_linklayer(r, rtab->data); 441 rtab->next = qdisc_rtab_list; 442 qdisc_rtab_list = rtab; 443 } else { 444 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table"); 445 } 446 return rtab; 447 } 448 EXPORT_SYMBOL(qdisc_get_rtab); 449 450 void qdisc_put_rtab(struct qdisc_rate_table *tab) 451 { 452 struct qdisc_rate_table *rtab, **rtabp; 453 454 if (!tab || --tab->refcnt) 455 return; 456 457 for (rtabp = &qdisc_rtab_list; 458 (rtab = *rtabp) != NULL; 459 rtabp = &rtab->next) { 460 if (rtab == tab) { 461 *rtabp = rtab->next; 462 kfree(rtab); 463 return; 464 } 465 } 466 } 467 EXPORT_SYMBOL(qdisc_put_rtab); 468 469 static LIST_HEAD(qdisc_stab_list); 470 471 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = { 472 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) }, 473 [TCA_STAB_DATA] = { .type = NLA_BINARY }, 474 }; 475 476 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt, 477 struct netlink_ext_ack *extack) 478 { 479 struct nlattr *tb[TCA_STAB_MAX + 1]; 480 struct qdisc_size_table *stab; 481 struct tc_sizespec *s; 482 unsigned int tsize = 0; 483 u16 *tab = NULL; 484 int err; 485 486 err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy, 487 extack); 488 if (err < 0) 489 return ERR_PTR(err); 490 if (!tb[TCA_STAB_BASE]) { 491 NL_SET_ERR_MSG(extack, "Size table base attribute is missing"); 492 return ERR_PTR(-EINVAL); 493 } 494 495 s = nla_data(tb[TCA_STAB_BASE]); 496 497 if (s->tsize > 0) { 498 if (!tb[TCA_STAB_DATA]) { 499 NL_SET_ERR_MSG(extack, "Size table data attribute is missing"); 500 return ERR_PTR(-EINVAL); 501 } 502 tab = nla_data(tb[TCA_STAB_DATA]); 503 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16); 504 } 505 506 if (tsize != s->tsize || (!tab && tsize > 0)) { 507 NL_SET_ERR_MSG(extack, "Invalid size of size table"); 508 return ERR_PTR(-EINVAL); 509 } 510 511 list_for_each_entry(stab, &qdisc_stab_list, list) { 512 if (memcmp(&stab->szopts, s, sizeof(*s))) 513 continue; 514 if (tsize > 0 && 515 memcmp(stab->data, tab, flex_array_size(stab, data, tsize))) 516 continue; 517 stab->refcnt++; 518 return stab; 519 } 520 521 if (s->size_log > STAB_SIZE_LOG_MAX || 522 s->cell_log > STAB_SIZE_LOG_MAX) { 523 NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table"); 524 return ERR_PTR(-EINVAL); 525 } 526 527 stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL); 528 if (!stab) 529 return ERR_PTR(-ENOMEM); 530 531 stab->refcnt = 1; 532 stab->szopts = *s; 533 if (tsize > 0) 534 memcpy(stab->data, tab, flex_array_size(stab, data, tsize)); 535 536 list_add_tail(&stab->list, &qdisc_stab_list); 537 538 return stab; 539 } 540 541 void qdisc_put_stab(struct qdisc_size_table *tab) 542 { 543 if (!tab) 544 return; 545 546 if (--tab->refcnt == 0) { 547 list_del(&tab->list); 548 kfree_rcu(tab, rcu); 549 } 550 } 551 EXPORT_SYMBOL(qdisc_put_stab); 552 553 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab) 554 { 555 struct nlattr *nest; 556 557 nest = nla_nest_start_noflag(skb, TCA_STAB); 558 if (nest == NULL) 559 goto nla_put_failure; 560 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts)) 561 goto nla_put_failure; 562 nla_nest_end(skb, nest); 563 564 return skb->len; 565 566 nla_put_failure: 567 return -1; 568 } 569 570 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 571 const struct qdisc_size_table *stab) 572 { 573 int pkt_len, slot; 574 575 pkt_len = skb->len + stab->szopts.overhead; 576 if (unlikely(!stab->szopts.tsize)) 577 goto out; 578 579 slot = pkt_len + stab->szopts.cell_align; 580 if (unlikely(slot < 0)) 581 slot = 0; 582 583 slot >>= stab->szopts.cell_log; 584 if (likely(slot < stab->szopts.tsize)) 585 pkt_len = stab->data[slot]; 586 else 587 pkt_len = stab->data[stab->szopts.tsize - 1] * 588 (slot / stab->szopts.tsize) + 589 stab->data[slot % stab->szopts.tsize]; 590 591 pkt_len <<= stab->szopts.size_log; 592 out: 593 if (unlikely(pkt_len < 1)) 594 pkt_len = 1; 595 qdisc_skb_cb(skb)->pkt_len = pkt_len; 596 } 597 598 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc) 599 { 600 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { 601 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", 602 txt, qdisc->ops->id, qdisc->handle >> 16); 603 qdisc->flags |= TCQ_F_WARN_NONWC; 604 } 605 } 606 EXPORT_SYMBOL(qdisc_warn_nonwc); 607 608 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) 609 { 610 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, 611 timer); 612 613 rcu_read_lock(); 614 __netif_schedule(qdisc_root(wd->qdisc)); 615 rcu_read_unlock(); 616 617 return HRTIMER_NORESTART; 618 } 619 620 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc, 621 clockid_t clockid) 622 { 623 hrtimer_setup(&wd->timer, qdisc_watchdog, clockid, HRTIMER_MODE_ABS_PINNED); 624 wd->qdisc = qdisc; 625 } 626 EXPORT_SYMBOL(qdisc_watchdog_init_clockid); 627 628 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) 629 { 630 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC); 631 } 632 EXPORT_SYMBOL(qdisc_watchdog_init); 633 634 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, 635 u64 delta_ns) 636 { 637 bool deactivated; 638 639 rcu_read_lock(); 640 deactivated = test_bit(__QDISC_STATE_DEACTIVATED, 641 &qdisc_root_sleeping(wd->qdisc)->state); 642 rcu_read_unlock(); 643 if (deactivated) 644 return; 645 646 if (hrtimer_is_queued(&wd->timer)) { 647 u64 softexpires; 648 649 softexpires = ktime_to_ns(hrtimer_get_softexpires(&wd->timer)); 650 /* If timer is already set in [expires, expires + delta_ns], 651 * do not reprogram it. 652 */ 653 if (softexpires - expires <= delta_ns) 654 return; 655 } 656 657 hrtimer_start_range_ns(&wd->timer, 658 ns_to_ktime(expires), 659 delta_ns, 660 HRTIMER_MODE_ABS_PINNED); 661 } 662 EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns); 663 664 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) 665 { 666 hrtimer_cancel(&wd->timer); 667 } 668 EXPORT_SYMBOL(qdisc_watchdog_cancel); 669 670 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n) 671 { 672 struct hlist_head *h; 673 unsigned int i; 674 675 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL); 676 677 if (h != NULL) { 678 for (i = 0; i < n; i++) 679 INIT_HLIST_HEAD(&h[i]); 680 } 681 return h; 682 } 683 684 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) 685 { 686 struct Qdisc_class_common *cl; 687 struct hlist_node *next; 688 struct hlist_head *nhash, *ohash; 689 unsigned int nsize, nmask, osize; 690 unsigned int i, h; 691 692 /* Rehash when load factor exceeds 0.75 */ 693 if (clhash->hashelems * 4 <= clhash->hashsize * 3) 694 return; 695 nsize = clhash->hashsize * 2; 696 nmask = nsize - 1; 697 nhash = qdisc_class_hash_alloc(nsize); 698 if (nhash == NULL) 699 return; 700 701 ohash = clhash->hash; 702 osize = clhash->hashsize; 703 704 sch_tree_lock(sch); 705 for (i = 0; i < osize; i++) { 706 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) { 707 h = qdisc_class_hash(cl->classid, nmask); 708 hlist_add_head(&cl->hnode, &nhash[h]); 709 } 710 } 711 clhash->hash = nhash; 712 clhash->hashsize = nsize; 713 clhash->hashmask = nmask; 714 sch_tree_unlock(sch); 715 716 kvfree(ohash); 717 } 718 EXPORT_SYMBOL(qdisc_class_hash_grow); 719 720 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash) 721 { 722 unsigned int size = 4; 723 724 clhash->hash = qdisc_class_hash_alloc(size); 725 if (!clhash->hash) 726 return -ENOMEM; 727 clhash->hashsize = size; 728 clhash->hashmask = size - 1; 729 clhash->hashelems = 0; 730 return 0; 731 } 732 EXPORT_SYMBOL(qdisc_class_hash_init); 733 734 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash) 735 { 736 kvfree(clhash->hash); 737 } 738 EXPORT_SYMBOL(qdisc_class_hash_destroy); 739 740 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash, 741 struct Qdisc_class_common *cl) 742 { 743 unsigned int h; 744 745 INIT_HLIST_NODE(&cl->hnode); 746 h = qdisc_class_hash(cl->classid, clhash->hashmask); 747 hlist_add_head(&cl->hnode, &clhash->hash[h]); 748 clhash->hashelems++; 749 } 750 EXPORT_SYMBOL(qdisc_class_hash_insert); 751 752 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash, 753 struct Qdisc_class_common *cl) 754 { 755 hlist_del(&cl->hnode); 756 clhash->hashelems--; 757 } 758 EXPORT_SYMBOL(qdisc_class_hash_remove); 759 760 /* Allocate an unique handle from space managed by kernel 761 * Possible range is [8000-FFFF]:0000 (0x8000 values) 762 */ 763 static u32 qdisc_alloc_handle(struct net_device *dev) 764 { 765 int i = 0x8000; 766 static u32 autohandle = TC_H_MAKE(0x80000000U, 0); 767 768 do { 769 autohandle += TC_H_MAKE(0x10000U, 0); 770 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0)) 771 autohandle = TC_H_MAKE(0x80000000U, 0); 772 if (!qdisc_lookup(dev, autohandle)) 773 return autohandle; 774 cond_resched(); 775 } while (--i > 0); 776 777 return 0; 778 } 779 780 void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len) 781 { 782 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED; 783 const struct Qdisc_class_ops *cops; 784 unsigned long cl; 785 u32 parentid; 786 bool notify; 787 int drops; 788 789 if (n == 0 && len == 0) 790 return; 791 drops = max_t(int, n, 0); 792 rcu_read_lock(); 793 while ((parentid = sch->parent)) { 794 if (parentid == TC_H_ROOT) 795 break; 796 797 if (sch->flags & TCQ_F_NOPARENT) 798 break; 799 /* Notify parent qdisc only if child qdisc becomes empty. 800 * 801 * If child was empty even before update then backlog 802 * counter is screwed and we skip notification because 803 * parent class is already passive. 804 * 805 * If the original child was offloaded then it is allowed 806 * to be seem as empty, so the parent is notified anyway. 807 */ 808 notify = !sch->q.qlen && !WARN_ON_ONCE(!n && 809 !qdisc_is_offloaded); 810 /* TODO: perform the search on a per txq basis */ 811 sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid)); 812 if (sch == NULL) { 813 WARN_ON_ONCE(parentid != TC_H_ROOT); 814 break; 815 } 816 cops = sch->ops->cl_ops; 817 if (notify && cops->qlen_notify) { 818 cl = cops->find(sch, parentid); 819 cops->qlen_notify(sch, cl); 820 } 821 sch->q.qlen -= n; 822 sch->qstats.backlog -= len; 823 __qdisc_qstats_drop(sch, drops); 824 } 825 rcu_read_unlock(); 826 } 827 EXPORT_SYMBOL(qdisc_tree_reduce_backlog); 828 829 int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type, 830 void *type_data) 831 { 832 struct net_device *dev = qdisc_dev(sch); 833 int err; 834 835 sch->flags &= ~TCQ_F_OFFLOADED; 836 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) 837 return 0; 838 839 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data); 840 if (err == -EOPNOTSUPP) 841 return 0; 842 843 if (!err) 844 sch->flags |= TCQ_F_OFFLOADED; 845 846 return err; 847 } 848 EXPORT_SYMBOL(qdisc_offload_dump_helper); 849 850 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 851 struct Qdisc *new, struct Qdisc *old, 852 enum tc_setup_type type, void *type_data, 853 struct netlink_ext_ack *extack) 854 { 855 bool any_qdisc_is_offloaded; 856 int err; 857 858 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) 859 return; 860 861 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data); 862 863 /* Don't report error if the graft is part of destroy operation. */ 864 if (!err || !new || new == &noop_qdisc) 865 return; 866 867 /* Don't report error if the parent, the old child and the new 868 * one are not offloaded. 869 */ 870 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED; 871 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED; 872 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED; 873 874 if (any_qdisc_is_offloaded) 875 NL_SET_ERR_MSG(extack, "Offloading graft operation failed."); 876 } 877 EXPORT_SYMBOL(qdisc_offload_graft_helper); 878 879 void qdisc_offload_query_caps(struct net_device *dev, 880 enum tc_setup_type type, 881 void *caps, size_t caps_len) 882 { 883 const struct net_device_ops *ops = dev->netdev_ops; 884 struct tc_query_caps_base base = { 885 .type = type, 886 .caps = caps, 887 }; 888 889 memset(caps, 0, caps_len); 890 891 if (ops->ndo_setup_tc) 892 ops->ndo_setup_tc(dev, TC_QUERY_CAPS, &base); 893 } 894 EXPORT_SYMBOL(qdisc_offload_query_caps); 895 896 static void qdisc_offload_graft_root(struct net_device *dev, 897 struct Qdisc *new, struct Qdisc *old, 898 struct netlink_ext_ack *extack) 899 { 900 struct tc_root_qopt_offload graft_offload = { 901 .command = TC_ROOT_GRAFT, 902 .handle = new ? new->handle : 0, 903 .ingress = (new && new->flags & TCQ_F_INGRESS) || 904 (old && old->flags & TCQ_F_INGRESS), 905 }; 906 907 qdisc_offload_graft_helper(dev, NULL, new, old, 908 TC_SETUP_ROOT_QDISC, &graft_offload, extack); 909 } 910 911 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, 912 u32 portid, u32 seq, u16 flags, int event, 913 struct netlink_ext_ack *extack) 914 { 915 struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL; 916 struct gnet_stats_queue __percpu *cpu_qstats = NULL; 917 struct tcmsg *tcm; 918 struct nlmsghdr *nlh; 919 unsigned char *b = skb_tail_pointer(skb); 920 struct gnet_dump d; 921 struct qdisc_size_table *stab; 922 u32 block_index; 923 __u32 qlen; 924 925 cond_resched(); 926 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 927 if (!nlh) 928 goto out_nlmsg_trim; 929 tcm = nlmsg_data(nlh); 930 tcm->tcm_family = AF_UNSPEC; 931 tcm->tcm__pad1 = 0; 932 tcm->tcm__pad2 = 0; 933 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 934 tcm->tcm_parent = clid; 935 tcm->tcm_handle = q->handle; 936 tcm->tcm_info = refcount_read(&q->refcnt); 937 if (nla_put_string(skb, TCA_KIND, q->ops->id)) 938 goto nla_put_failure; 939 if (q->ops->ingress_block_get) { 940 block_index = q->ops->ingress_block_get(q); 941 if (block_index && 942 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index)) 943 goto nla_put_failure; 944 } 945 if (q->ops->egress_block_get) { 946 block_index = q->ops->egress_block_get(q); 947 if (block_index && 948 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index)) 949 goto nla_put_failure; 950 } 951 if (q->ops->dump && q->ops->dump(q, skb) < 0) 952 goto nla_put_failure; 953 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED))) 954 goto nla_put_failure; 955 qlen = qdisc_qlen_sum(q); 956 957 stab = rtnl_dereference(q->stab); 958 if (stab && qdisc_dump_stab(skb, stab) < 0) 959 goto nla_put_failure; 960 961 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, 962 NULL, &d, TCA_PAD) < 0) 963 goto nla_put_failure; 964 965 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) 966 goto nla_put_failure; 967 968 if (qdisc_is_percpu_stats(q)) { 969 cpu_bstats = q->cpu_bstats; 970 cpu_qstats = q->cpu_qstats; 971 } 972 973 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 || 974 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || 975 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0) 976 goto nla_put_failure; 977 978 if (gnet_stats_finish_copy(&d) < 0) 979 goto nla_put_failure; 980 981 if (extack && extack->_msg && 982 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) 983 goto out_nlmsg_trim; 984 985 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 986 987 return skb->len; 988 989 out_nlmsg_trim: 990 nla_put_failure: 991 nlmsg_trim(skb, b); 992 return -1; 993 } 994 995 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible) 996 { 997 if (q->flags & TCQ_F_BUILTIN) 998 return true; 999 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible) 1000 return true; 1001 1002 return false; 1003 } 1004 1005 static int qdisc_get_notify(struct net *net, struct sk_buff *oskb, 1006 struct nlmsghdr *n, u32 clid, struct Qdisc *q, 1007 struct netlink_ext_ack *extack) 1008 { 1009 struct sk_buff *skb; 1010 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1011 1012 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1013 if (!skb) 1014 return -ENOBUFS; 1015 1016 if (!tc_qdisc_dump_ignore(q, false)) { 1017 if (tc_fill_qdisc(skb, q, clid, portid, n->nlmsg_seq, 0, 1018 RTM_NEWQDISC, extack) < 0) 1019 goto err_out; 1020 } 1021 1022 if (skb->len) 1023 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1024 n->nlmsg_flags & NLM_F_ECHO); 1025 1026 err_out: 1027 kfree_skb(skb); 1028 return -EINVAL; 1029 } 1030 1031 static int qdisc_notify(struct net *net, struct sk_buff *oskb, 1032 struct nlmsghdr *n, u32 clid, 1033 struct Qdisc *old, struct Qdisc *new, 1034 struct netlink_ext_ack *extack) 1035 { 1036 struct sk_buff *skb; 1037 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1038 1039 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) 1040 return 0; 1041 1042 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1043 if (!skb) 1044 return -ENOBUFS; 1045 1046 if (old && !tc_qdisc_dump_ignore(old, false)) { 1047 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq, 1048 0, RTM_DELQDISC, extack) < 0) 1049 goto err_out; 1050 } 1051 if (new && !tc_qdisc_dump_ignore(new, false)) { 1052 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq, 1053 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC, extack) < 0) 1054 goto err_out; 1055 } 1056 1057 if (skb->len) 1058 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1059 n->nlmsg_flags & NLM_F_ECHO); 1060 1061 err_out: 1062 kfree_skb(skb); 1063 return -EINVAL; 1064 } 1065 1066 static void notify_and_destroy(struct net *net, struct sk_buff *skb, 1067 struct nlmsghdr *n, u32 clid, 1068 struct Qdisc *old, struct Qdisc *new, 1069 struct netlink_ext_ack *extack) 1070 { 1071 if (new || old) 1072 qdisc_notify(net, skb, n, clid, old, new, extack); 1073 1074 if (old) 1075 qdisc_put(old); 1076 } 1077 1078 static void qdisc_clear_nolock(struct Qdisc *sch) 1079 { 1080 sch->flags &= ~TCQ_F_NOLOCK; 1081 if (!(sch->flags & TCQ_F_CPUSTATS)) 1082 return; 1083 1084 free_percpu(sch->cpu_bstats); 1085 free_percpu(sch->cpu_qstats); 1086 sch->cpu_bstats = NULL; 1087 sch->cpu_qstats = NULL; 1088 sch->flags &= ~TCQ_F_CPUSTATS; 1089 } 1090 1091 /* Graft qdisc "new" to class "classid" of qdisc "parent" or 1092 * to device "dev". 1093 * 1094 * When appropriate send a netlink notification using 'skb' 1095 * and "n". 1096 * 1097 * On success, destroy old qdisc. 1098 */ 1099 1100 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, 1101 struct sk_buff *skb, struct nlmsghdr *n, u32 classid, 1102 struct Qdisc *new, struct Qdisc *old, 1103 struct netlink_ext_ack *extack) 1104 { 1105 struct Qdisc *q = old; 1106 struct net *net = dev_net(dev); 1107 1108 if (parent == NULL) { 1109 unsigned int i, num_q, ingress; 1110 struct netdev_queue *dev_queue; 1111 1112 ingress = 0; 1113 num_q = dev->num_tx_queues; 1114 if ((q && q->flags & TCQ_F_INGRESS) || 1115 (new && new->flags & TCQ_F_INGRESS)) { 1116 ingress = 1; 1117 dev_queue = dev_ingress_queue(dev); 1118 if (!dev_queue) { 1119 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue"); 1120 return -ENOENT; 1121 } 1122 1123 q = rtnl_dereference(dev_queue->qdisc_sleeping); 1124 1125 /* This is the counterpart of that qdisc_refcount_inc_nz() call in 1126 * __tcf_qdisc_find() for filter requests. 1127 */ 1128 if (!qdisc_refcount_dec_if_one(q)) { 1129 NL_SET_ERR_MSG(extack, 1130 "Current ingress or clsact Qdisc has ongoing filter requests"); 1131 return -EBUSY; 1132 } 1133 } 1134 1135 if (dev->flags & IFF_UP) 1136 dev_deactivate(dev); 1137 1138 qdisc_offload_graft_root(dev, new, old, extack); 1139 1140 if (new && new->ops->attach && !ingress) 1141 goto skip; 1142 1143 if (!ingress) { 1144 for (i = 0; i < num_q; i++) { 1145 dev_queue = netdev_get_tx_queue(dev, i); 1146 old = dev_graft_qdisc(dev_queue, new); 1147 1148 if (new && i > 0) 1149 qdisc_refcount_inc(new); 1150 qdisc_put(old); 1151 } 1152 } else { 1153 old = dev_graft_qdisc(dev_queue, NULL); 1154 1155 /* {ingress,clsact}_destroy() @old before grafting @new to avoid 1156 * unprotected concurrent accesses to net_device::miniq_{in,e}gress 1157 * pointer(s) in mini_qdisc_pair_swap(). 1158 */ 1159 qdisc_notify(net, skb, n, classid, old, new, extack); 1160 qdisc_destroy(old); 1161 1162 dev_graft_qdisc(dev_queue, new); 1163 } 1164 1165 skip: 1166 if (!ingress) { 1167 old = rtnl_dereference(dev->qdisc); 1168 if (new && !new->ops->attach) 1169 qdisc_refcount_inc(new); 1170 rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc); 1171 1172 notify_and_destroy(net, skb, n, classid, old, new, extack); 1173 1174 if (new && new->ops->attach) 1175 new->ops->attach(new); 1176 } 1177 1178 if (dev->flags & IFF_UP) 1179 dev_activate(dev); 1180 } else { 1181 const struct Qdisc_class_ops *cops = parent->ops->cl_ops; 1182 unsigned long cl; 1183 int err; 1184 1185 /* Only support running class lockless if parent is lockless */ 1186 if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK)) 1187 qdisc_clear_nolock(new); 1188 1189 if (!cops || !cops->graft) 1190 return -EOPNOTSUPP; 1191 1192 cl = cops->find(parent, classid); 1193 if (!cl) { 1194 NL_SET_ERR_MSG(extack, "Specified class not found"); 1195 return -ENOENT; 1196 } 1197 1198 if (new && new->ops == &noqueue_qdisc_ops) { 1199 NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class"); 1200 return -EINVAL; 1201 } 1202 1203 if (new && 1204 !(parent->flags & TCQ_F_MQROOT) && 1205 rcu_access_pointer(new->stab)) { 1206 NL_SET_ERR_MSG(extack, "STAB not supported on a non root"); 1207 return -EINVAL; 1208 } 1209 err = cops->graft(parent, cl, new, &old, extack); 1210 if (err) 1211 return err; 1212 notify_and_destroy(net, skb, n, classid, old, new, extack); 1213 } 1214 return 0; 1215 } 1216 1217 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca, 1218 struct netlink_ext_ack *extack) 1219 { 1220 u32 block_index; 1221 1222 if (tca[TCA_INGRESS_BLOCK]) { 1223 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]); 1224 1225 if (!block_index) { 1226 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0"); 1227 return -EINVAL; 1228 } 1229 if (!sch->ops->ingress_block_set) { 1230 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported"); 1231 return -EOPNOTSUPP; 1232 } 1233 sch->ops->ingress_block_set(sch, block_index); 1234 } 1235 if (tca[TCA_EGRESS_BLOCK]) { 1236 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]); 1237 1238 if (!block_index) { 1239 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0"); 1240 return -EINVAL; 1241 } 1242 if (!sch->ops->egress_block_set) { 1243 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported"); 1244 return -EOPNOTSUPP; 1245 } 1246 sch->ops->egress_block_set(sch, block_index); 1247 } 1248 return 0; 1249 } 1250 1251 /* 1252 Allocate and initialize new qdisc. 1253 1254 Parameters are passed via opt. 1255 */ 1256 1257 static struct Qdisc *qdisc_create(struct net_device *dev, 1258 struct netdev_queue *dev_queue, 1259 u32 parent, u32 handle, 1260 struct nlattr **tca, int *errp, 1261 struct netlink_ext_ack *extack) 1262 { 1263 int err; 1264 struct nlattr *kind = tca[TCA_KIND]; 1265 struct Qdisc *sch; 1266 struct Qdisc_ops *ops; 1267 struct qdisc_size_table *stab; 1268 1269 ops = qdisc_lookup_ops(kind); 1270 if (!ops) { 1271 err = -ENOENT; 1272 NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown"); 1273 goto err_out; 1274 } 1275 1276 sch = qdisc_alloc(dev_queue, ops, extack); 1277 if (IS_ERR(sch)) { 1278 err = PTR_ERR(sch); 1279 goto err_out2; 1280 } 1281 1282 sch->parent = parent; 1283 1284 if (handle == TC_H_INGRESS) { 1285 if (!(sch->flags & TCQ_F_INGRESS)) { 1286 NL_SET_ERR_MSG(extack, 1287 "Specified parent ID is reserved for ingress and clsact Qdiscs"); 1288 err = -EINVAL; 1289 goto err_out3; 1290 } 1291 handle = TC_H_MAKE(TC_H_INGRESS, 0); 1292 } else { 1293 if (handle == 0) { 1294 handle = qdisc_alloc_handle(dev); 1295 if (handle == 0) { 1296 NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded"); 1297 err = -ENOSPC; 1298 goto err_out3; 1299 } 1300 } 1301 if (!netif_is_multiqueue(dev)) 1302 sch->flags |= TCQ_F_ONETXQUEUE; 1303 } 1304 1305 sch->handle = handle; 1306 1307 /* This exist to keep backward compatible with a userspace 1308 * loophole, what allowed userspace to get IFF_NO_QUEUE 1309 * facility on older kernels by setting tx_queue_len=0 (prior 1310 * to qdisc init), and then forgot to reinit tx_queue_len 1311 * before again attaching a qdisc. 1312 */ 1313 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) { 1314 WRITE_ONCE(dev->tx_queue_len, DEFAULT_TX_QUEUE_LEN); 1315 netdev_info(dev, "Caught tx_queue_len zero misconfig\n"); 1316 } 1317 1318 err = qdisc_block_indexes_set(sch, tca, extack); 1319 if (err) 1320 goto err_out3; 1321 1322 if (tca[TCA_STAB]) { 1323 stab = qdisc_get_stab(tca[TCA_STAB], extack); 1324 if (IS_ERR(stab)) { 1325 err = PTR_ERR(stab); 1326 goto err_out3; 1327 } 1328 rcu_assign_pointer(sch->stab, stab); 1329 } 1330 1331 if (ops->init) { 1332 err = ops->init(sch, tca[TCA_OPTIONS], extack); 1333 if (err != 0) 1334 goto err_out4; 1335 } 1336 1337 if (tca[TCA_RATE]) { 1338 err = -EOPNOTSUPP; 1339 if (sch->flags & TCQ_F_MQROOT) { 1340 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc"); 1341 goto err_out4; 1342 } 1343 1344 err = gen_new_estimator(&sch->bstats, 1345 sch->cpu_bstats, 1346 &sch->rate_est, 1347 NULL, 1348 true, 1349 tca[TCA_RATE]); 1350 if (err) { 1351 NL_SET_ERR_MSG(extack, "Failed to generate new estimator"); 1352 goto err_out4; 1353 } 1354 } 1355 1356 qdisc_hash_add(sch, false); 1357 trace_qdisc_create(ops, dev, parent); 1358 1359 return sch; 1360 1361 err_out4: 1362 /* Even if ops->init() failed, we call ops->destroy() 1363 * like qdisc_create_dflt(). 1364 */ 1365 if (ops->destroy) 1366 ops->destroy(sch); 1367 qdisc_put_stab(rtnl_dereference(sch->stab)); 1368 err_out3: 1369 lockdep_unregister_key(&sch->root_lock_key); 1370 netdev_put(dev, &sch->dev_tracker); 1371 qdisc_free(sch); 1372 err_out2: 1373 module_put(ops->owner); 1374 err_out: 1375 *errp = err; 1376 return NULL; 1377 } 1378 1379 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca, 1380 struct netlink_ext_ack *extack) 1381 { 1382 struct qdisc_size_table *ostab, *stab = NULL; 1383 int err = 0; 1384 1385 if (tca[TCA_OPTIONS]) { 1386 if (!sch->ops->change) { 1387 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc"); 1388 return -EINVAL; 1389 } 1390 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) { 1391 NL_SET_ERR_MSG(extack, "Change of blocks is not supported"); 1392 return -EOPNOTSUPP; 1393 } 1394 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack); 1395 if (err) 1396 return err; 1397 } 1398 1399 if (tca[TCA_STAB]) { 1400 stab = qdisc_get_stab(tca[TCA_STAB], extack); 1401 if (IS_ERR(stab)) 1402 return PTR_ERR(stab); 1403 } 1404 1405 ostab = rtnl_dereference(sch->stab); 1406 rcu_assign_pointer(sch->stab, stab); 1407 qdisc_put_stab(ostab); 1408 1409 if (tca[TCA_RATE]) { 1410 /* NB: ignores errors from replace_estimator 1411 because change can't be undone. */ 1412 if (sch->flags & TCQ_F_MQROOT) 1413 goto out; 1414 gen_replace_estimator(&sch->bstats, 1415 sch->cpu_bstats, 1416 &sch->rate_est, 1417 NULL, 1418 true, 1419 tca[TCA_RATE]); 1420 } 1421 out: 1422 return 0; 1423 } 1424 1425 struct check_loop_arg { 1426 struct qdisc_walker w; 1427 struct Qdisc *p; 1428 int depth; 1429 }; 1430 1431 static int check_loop_fn(struct Qdisc *q, unsigned long cl, 1432 struct qdisc_walker *w); 1433 1434 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth) 1435 { 1436 struct check_loop_arg arg; 1437 1438 if (q->ops->cl_ops == NULL) 1439 return 0; 1440 1441 arg.w.stop = arg.w.skip = arg.w.count = 0; 1442 arg.w.fn = check_loop_fn; 1443 arg.depth = depth; 1444 arg.p = p; 1445 q->ops->cl_ops->walk(q, &arg.w); 1446 return arg.w.stop ? -ELOOP : 0; 1447 } 1448 1449 static int 1450 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w) 1451 { 1452 struct Qdisc *leaf; 1453 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1454 struct check_loop_arg *arg = (struct check_loop_arg *)w; 1455 1456 leaf = cops->leaf(q, cl); 1457 if (leaf) { 1458 if (leaf == arg->p || arg->depth > 7) 1459 return -ELOOP; 1460 return check_loop(leaf, arg->p, arg->depth + 1); 1461 } 1462 return 0; 1463 } 1464 1465 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = { 1466 [TCA_KIND] = { .type = NLA_STRING }, 1467 [TCA_RATE] = { .type = NLA_BINARY, 1468 .len = sizeof(struct tc_estimator) }, 1469 [TCA_STAB] = { .type = NLA_NESTED }, 1470 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG }, 1471 [TCA_CHAIN] = { .type = NLA_U32 }, 1472 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 }, 1473 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 }, 1474 }; 1475 1476 /* 1477 * Delete/get qdisc. 1478 */ 1479 1480 static int __tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, 1481 struct netlink_ext_ack *extack, 1482 struct net_device *dev, 1483 struct nlattr *tca[TCA_MAX + 1], 1484 struct tcmsg *tcm) 1485 { 1486 struct net *net = sock_net(skb->sk); 1487 struct Qdisc *q = NULL; 1488 struct Qdisc *p = NULL; 1489 u32 clid; 1490 int err; 1491 1492 clid = tcm->tcm_parent; 1493 if (clid) { 1494 if (clid != TC_H_ROOT) { 1495 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { 1496 p = qdisc_lookup(dev, TC_H_MAJ(clid)); 1497 if (!p) { 1498 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid"); 1499 return -ENOENT; 1500 } 1501 q = qdisc_leaf(p, clid); 1502 } else if (dev_ingress_queue(dev)) { 1503 q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); 1504 } 1505 } else { 1506 q = rtnl_dereference(dev->qdisc); 1507 } 1508 if (!q) { 1509 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device"); 1510 return -ENOENT; 1511 } 1512 1513 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) { 1514 NL_SET_ERR_MSG(extack, "Invalid handle"); 1515 return -EINVAL; 1516 } 1517 } else { 1518 q = qdisc_lookup(dev, tcm->tcm_handle); 1519 if (!q) { 1520 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle"); 1521 return -ENOENT; 1522 } 1523 } 1524 1525 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) { 1526 NL_SET_ERR_MSG(extack, "Invalid qdisc name: must match existing qdisc"); 1527 return -EINVAL; 1528 } 1529 1530 if (n->nlmsg_type == RTM_DELQDISC) { 1531 if (!clid) { 1532 NL_SET_ERR_MSG(extack, "Classid cannot be zero"); 1533 return -EINVAL; 1534 } 1535 if (q->handle == 0) { 1536 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero"); 1537 return -ENOENT; 1538 } 1539 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack); 1540 if (err != 0) 1541 return err; 1542 } else { 1543 qdisc_get_notify(net, skb, n, clid, q, NULL); 1544 } 1545 return 0; 1546 } 1547 1548 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, 1549 struct netlink_ext_ack *extack) 1550 { 1551 struct net *net = sock_net(skb->sk); 1552 struct tcmsg *tcm = nlmsg_data(n); 1553 struct nlattr *tca[TCA_MAX + 1]; 1554 struct net_device *dev; 1555 int err; 1556 1557 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, 1558 rtm_tca_policy, extack); 1559 if (err < 0) 1560 return err; 1561 1562 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 1563 if (!dev) 1564 return -ENODEV; 1565 1566 netdev_lock_ops(dev); 1567 err = __tc_get_qdisc(skb, n, extack, dev, tca, tcm); 1568 netdev_unlock_ops(dev); 1569 1570 return err; 1571 } 1572 1573 static bool req_create_or_replace(struct nlmsghdr *n) 1574 { 1575 return (n->nlmsg_flags & NLM_F_CREATE && 1576 n->nlmsg_flags & NLM_F_REPLACE); 1577 } 1578 1579 static bool req_create_exclusive(struct nlmsghdr *n) 1580 { 1581 return (n->nlmsg_flags & NLM_F_CREATE && 1582 n->nlmsg_flags & NLM_F_EXCL); 1583 } 1584 1585 static bool req_change(struct nlmsghdr *n) 1586 { 1587 return (!(n->nlmsg_flags & NLM_F_CREATE) && 1588 !(n->nlmsg_flags & NLM_F_REPLACE) && 1589 !(n->nlmsg_flags & NLM_F_EXCL)); 1590 } 1591 1592 static int __tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, 1593 struct netlink_ext_ack *extack, 1594 struct net_device *dev, 1595 struct nlattr *tca[TCA_MAX + 1], 1596 struct tcmsg *tcm) 1597 { 1598 struct Qdisc *q = NULL; 1599 struct Qdisc *p = NULL; 1600 u32 clid; 1601 int err; 1602 1603 clid = tcm->tcm_parent; 1604 1605 if (clid) { 1606 if (clid != TC_H_ROOT) { 1607 if (clid != TC_H_INGRESS) { 1608 p = qdisc_lookup(dev, TC_H_MAJ(clid)); 1609 if (!p) { 1610 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc"); 1611 return -ENOENT; 1612 } 1613 q = qdisc_leaf(p, clid); 1614 } else if (dev_ingress_queue_create(dev)) { 1615 q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); 1616 } 1617 } else { 1618 q = rtnl_dereference(dev->qdisc); 1619 } 1620 1621 /* It may be default qdisc, ignore it */ 1622 if (q && q->handle == 0) 1623 q = NULL; 1624 1625 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) { 1626 if (tcm->tcm_handle) { 1627 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) { 1628 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override"); 1629 return -EEXIST; 1630 } 1631 if (TC_H_MIN(tcm->tcm_handle)) { 1632 NL_SET_ERR_MSG(extack, "Invalid minor handle"); 1633 return -EINVAL; 1634 } 1635 q = qdisc_lookup(dev, tcm->tcm_handle); 1636 if (!q) 1637 goto create_n_graft; 1638 if (q->parent != tcm->tcm_parent) { 1639 NL_SET_ERR_MSG(extack, "Cannot move an existing qdisc to a different parent"); 1640 return -EINVAL; 1641 } 1642 if (n->nlmsg_flags & NLM_F_EXCL) { 1643 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override"); 1644 return -EEXIST; 1645 } 1646 if (tca[TCA_KIND] && 1647 nla_strcmp(tca[TCA_KIND], q->ops->id)) { 1648 NL_SET_ERR_MSG(extack, "Invalid qdisc name: must match existing qdisc"); 1649 return -EINVAL; 1650 } 1651 if (q->flags & TCQ_F_INGRESS) { 1652 NL_SET_ERR_MSG(extack, 1653 "Cannot regraft ingress or clsact Qdiscs"); 1654 return -EINVAL; 1655 } 1656 if (q == p || 1657 (p && check_loop(q, p, 0))) { 1658 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected"); 1659 return -ELOOP; 1660 } 1661 if (clid == TC_H_INGRESS) { 1662 NL_SET_ERR_MSG(extack, "Ingress cannot graft directly"); 1663 return -EINVAL; 1664 } 1665 qdisc_refcount_inc(q); 1666 goto graft; 1667 } else { 1668 if (!q) 1669 goto create_n_graft; 1670 1671 /* This magic test requires explanation. 1672 * 1673 * We know, that some child q is already 1674 * attached to this parent and have choice: 1675 * 1) change it or 2) create/graft new one. 1676 * If the requested qdisc kind is different 1677 * than the existing one, then we choose graft. 1678 * If they are the same then this is "change" 1679 * operation - just let it fallthrough.. 1680 * 1681 * 1. We are allowed to create/graft only 1682 * if the request is explicitly stating 1683 * "please create if it doesn't exist". 1684 * 1685 * 2. If the request is to exclusive create 1686 * then the qdisc tcm_handle is not expected 1687 * to exist, so that we choose create/graft too. 1688 * 1689 * 3. The last case is when no flags are set. 1690 * This will happen when for example tc 1691 * utility issues a "change" command. 1692 * Alas, it is sort of hole in API, we 1693 * cannot decide what to do unambiguously. 1694 * For now we select create/graft. 1695 */ 1696 if (tca[TCA_KIND] && 1697 nla_strcmp(tca[TCA_KIND], q->ops->id)) { 1698 if (req_create_or_replace(n) || 1699 req_create_exclusive(n)) 1700 goto create_n_graft; 1701 else if (req_change(n)) 1702 goto create_n_graft2; 1703 } 1704 } 1705 } 1706 } else { 1707 if (!tcm->tcm_handle) { 1708 NL_SET_ERR_MSG(extack, "Handle cannot be zero"); 1709 return -EINVAL; 1710 } 1711 q = qdisc_lookup(dev, tcm->tcm_handle); 1712 } 1713 1714 /* Change qdisc parameters */ 1715 if (!q) { 1716 NL_SET_ERR_MSG(extack, "Specified qdisc not found"); 1717 return -ENOENT; 1718 } 1719 if (n->nlmsg_flags & NLM_F_EXCL) { 1720 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify"); 1721 return -EEXIST; 1722 } 1723 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) { 1724 NL_SET_ERR_MSG(extack, "Invalid qdisc name: must match existing qdisc"); 1725 return -EINVAL; 1726 } 1727 err = qdisc_change(q, tca, extack); 1728 if (err == 0) 1729 qdisc_notify(sock_net(skb->sk), skb, n, clid, NULL, q, extack); 1730 return err; 1731 1732 create_n_graft: 1733 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 1734 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag"); 1735 return -ENOENT; 1736 } 1737 create_n_graft2: 1738 if (clid == TC_H_INGRESS) { 1739 if (dev_ingress_queue(dev)) { 1740 q = qdisc_create(dev, dev_ingress_queue(dev), 1741 tcm->tcm_parent, tcm->tcm_parent, 1742 tca, &err, extack); 1743 } else { 1744 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device"); 1745 err = -ENOENT; 1746 } 1747 } else { 1748 struct netdev_queue *dev_queue; 1749 1750 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue) 1751 dev_queue = p->ops->cl_ops->select_queue(p, tcm); 1752 else if (p) 1753 dev_queue = p->dev_queue; 1754 else 1755 dev_queue = netdev_get_tx_queue(dev, 0); 1756 1757 q = qdisc_create(dev, dev_queue, 1758 tcm->tcm_parent, tcm->tcm_handle, 1759 tca, &err, extack); 1760 } 1761 if (!q) 1762 return err; 1763 1764 graft: 1765 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack); 1766 if (err) { 1767 if (q) 1768 qdisc_put(q); 1769 return err; 1770 } 1771 1772 return 0; 1773 } 1774 1775 static void request_qdisc_module(struct nlattr *kind) 1776 { 1777 struct Qdisc_ops *ops; 1778 char name[IFNAMSIZ]; 1779 1780 if (!kind) 1781 return; 1782 1783 ops = qdisc_lookup_ops(kind); 1784 if (ops) { 1785 module_put(ops->owner); 1786 return; 1787 } 1788 1789 if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) { 1790 rtnl_unlock(); 1791 request_module(NET_SCH_ALIAS_PREFIX "%s", name); 1792 rtnl_lock(); 1793 } 1794 } 1795 1796 /* 1797 * Create/change qdisc. 1798 */ 1799 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, 1800 struct netlink_ext_ack *extack) 1801 { 1802 struct net *net = sock_net(skb->sk); 1803 struct nlattr *tca[TCA_MAX + 1]; 1804 struct net_device *dev; 1805 struct tcmsg *tcm; 1806 int err; 1807 1808 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, 1809 rtm_tca_policy, extack); 1810 if (err < 0) 1811 return err; 1812 1813 request_qdisc_module(tca[TCA_KIND]); 1814 1815 tcm = nlmsg_data(n); 1816 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 1817 if (!dev) 1818 return -ENODEV; 1819 1820 netdev_lock_ops(dev); 1821 err = __tc_modify_qdisc(skb, n, extack, dev, tca, tcm); 1822 netdev_unlock_ops(dev); 1823 1824 return err; 1825 } 1826 1827 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, 1828 struct netlink_callback *cb, 1829 int *q_idx_p, int s_q_idx, bool recur, 1830 bool dump_invisible) 1831 { 1832 int ret = 0, q_idx = *q_idx_p; 1833 struct Qdisc *q; 1834 int b; 1835 1836 if (!root) 1837 return 0; 1838 1839 q = root; 1840 if (q_idx < s_q_idx) { 1841 q_idx++; 1842 } else { 1843 if (!tc_qdisc_dump_ignore(q, dump_invisible) && 1844 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, 1845 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1846 RTM_NEWQDISC, NULL) <= 0) 1847 goto done; 1848 q_idx++; 1849 } 1850 1851 /* If dumping singletons, there is no qdisc_dev(root) and the singleton 1852 * itself has already been dumped. 1853 * 1854 * If we've already dumped the top-level (ingress) qdisc above and the global 1855 * qdisc hashtable, we don't want to hit it again 1856 */ 1857 if (!qdisc_dev(root) || !recur) 1858 goto out; 1859 1860 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { 1861 if (q_idx < s_q_idx) { 1862 q_idx++; 1863 continue; 1864 } 1865 if (!tc_qdisc_dump_ignore(q, dump_invisible) && 1866 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, 1867 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1868 RTM_NEWQDISC, NULL) <= 0) 1869 goto done; 1870 q_idx++; 1871 } 1872 1873 out: 1874 *q_idx_p = q_idx; 1875 return ret; 1876 done: 1877 ret = -1; 1878 goto out; 1879 } 1880 1881 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) 1882 { 1883 struct net *net = sock_net(skb->sk); 1884 int idx, q_idx; 1885 int s_idx, s_q_idx; 1886 struct net_device *dev; 1887 const struct nlmsghdr *nlh = cb->nlh; 1888 struct nlattr *tca[TCA_MAX + 1]; 1889 int err; 1890 1891 s_idx = cb->args[0]; 1892 s_q_idx = q_idx = cb->args[1]; 1893 1894 idx = 0; 1895 ASSERT_RTNL(); 1896 1897 err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX, 1898 rtm_tca_policy, cb->extack); 1899 if (err < 0) 1900 return err; 1901 1902 for_each_netdev(net, dev) { 1903 struct netdev_queue *dev_queue; 1904 1905 if (idx < s_idx) 1906 goto cont; 1907 if (idx > s_idx) 1908 s_q_idx = 0; 1909 q_idx = 0; 1910 1911 netdev_lock_ops(dev); 1912 if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc), 1913 skb, cb, &q_idx, s_q_idx, 1914 true, tca[TCA_DUMP_INVISIBLE]) < 0) { 1915 netdev_unlock_ops(dev); 1916 goto done; 1917 } 1918 1919 dev_queue = dev_ingress_queue(dev); 1920 if (dev_queue && 1921 tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping), 1922 skb, cb, &q_idx, s_q_idx, false, 1923 tca[TCA_DUMP_INVISIBLE]) < 0) { 1924 netdev_unlock_ops(dev); 1925 goto done; 1926 } 1927 netdev_unlock_ops(dev); 1928 1929 cont: 1930 idx++; 1931 } 1932 1933 done: 1934 cb->args[0] = idx; 1935 cb->args[1] = q_idx; 1936 1937 return skb->len; 1938 } 1939 1940 1941 1942 /************************************************ 1943 * Traffic classes manipulation. * 1944 ************************************************/ 1945 1946 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, 1947 unsigned long cl, u32 portid, u32 seq, u16 flags, 1948 int event, struct netlink_ext_ack *extack) 1949 { 1950 struct tcmsg *tcm; 1951 struct nlmsghdr *nlh; 1952 unsigned char *b = skb_tail_pointer(skb); 1953 struct gnet_dump d; 1954 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops; 1955 1956 cond_resched(); 1957 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1958 if (!nlh) 1959 goto out_nlmsg_trim; 1960 tcm = nlmsg_data(nlh); 1961 tcm->tcm_family = AF_UNSPEC; 1962 tcm->tcm__pad1 = 0; 1963 tcm->tcm__pad2 = 0; 1964 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 1965 tcm->tcm_parent = q->handle; 1966 tcm->tcm_handle = q->handle; 1967 tcm->tcm_info = 0; 1968 if (nla_put_string(skb, TCA_KIND, q->ops->id)) 1969 goto nla_put_failure; 1970 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) 1971 goto nla_put_failure; 1972 1973 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, 1974 NULL, &d, TCA_PAD) < 0) 1975 goto nla_put_failure; 1976 1977 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) 1978 goto nla_put_failure; 1979 1980 if (gnet_stats_finish_copy(&d) < 0) 1981 goto nla_put_failure; 1982 1983 if (extack && extack->_msg && 1984 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) 1985 goto out_nlmsg_trim; 1986 1987 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1988 1989 return skb->len; 1990 1991 out_nlmsg_trim: 1992 nla_put_failure: 1993 nlmsg_trim(skb, b); 1994 return -1; 1995 } 1996 1997 static int tclass_notify(struct net *net, struct sk_buff *oskb, 1998 struct nlmsghdr *n, struct Qdisc *q, 1999 unsigned long cl, int event, struct netlink_ext_ack *extack) 2000 { 2001 struct sk_buff *skb; 2002 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2003 2004 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) 2005 return 0; 2006 2007 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2008 if (!skb) 2009 return -ENOBUFS; 2010 2011 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event, extack) < 0) { 2012 kfree_skb(skb); 2013 return -EINVAL; 2014 } 2015 2016 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2017 n->nlmsg_flags & NLM_F_ECHO); 2018 } 2019 2020 static int tclass_get_notify(struct net *net, struct sk_buff *oskb, 2021 struct nlmsghdr *n, struct Qdisc *q, 2022 unsigned long cl, struct netlink_ext_ack *extack) 2023 { 2024 struct sk_buff *skb; 2025 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2026 2027 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2028 if (!skb) 2029 return -ENOBUFS; 2030 2031 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, RTM_NEWTCLASS, 2032 extack) < 0) { 2033 kfree_skb(skb); 2034 return -EINVAL; 2035 } 2036 2037 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2038 n->nlmsg_flags & NLM_F_ECHO); 2039 } 2040 2041 static int tclass_del_notify(struct net *net, 2042 const struct Qdisc_class_ops *cops, 2043 struct sk_buff *oskb, struct nlmsghdr *n, 2044 struct Qdisc *q, unsigned long cl, 2045 struct netlink_ext_ack *extack) 2046 { 2047 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2048 struct sk_buff *skb; 2049 int err = 0; 2050 2051 if (!cops->delete) 2052 return -EOPNOTSUPP; 2053 2054 if (rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) { 2055 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2056 if (!skb) 2057 return -ENOBUFS; 2058 2059 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, 2060 RTM_DELTCLASS, extack) < 0) { 2061 kfree_skb(skb); 2062 return -EINVAL; 2063 } 2064 } else { 2065 skb = NULL; 2066 } 2067 2068 err = cops->delete(q, cl, extack); 2069 if (err) { 2070 kfree_skb(skb); 2071 return err; 2072 } 2073 2074 err = rtnetlink_maybe_send(skb, net, portid, RTNLGRP_TC, 2075 n->nlmsg_flags & NLM_F_ECHO); 2076 return err; 2077 } 2078 2079 #ifdef CONFIG_NET_CLS 2080 2081 struct tcf_bind_args { 2082 struct tcf_walker w; 2083 unsigned long base; 2084 unsigned long cl; 2085 u32 classid; 2086 }; 2087 2088 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 2089 { 2090 struct tcf_bind_args *a = (void *)arg; 2091 2092 if (n && tp->ops->bind_class) { 2093 struct Qdisc *q = tcf_block_q(tp->chain->block); 2094 2095 sch_tree_lock(q); 2096 tp->ops->bind_class(n, a->classid, a->cl, q, a->base); 2097 sch_tree_unlock(q); 2098 } 2099 return 0; 2100 } 2101 2102 struct tc_bind_class_args { 2103 struct qdisc_walker w; 2104 unsigned long new_cl; 2105 u32 portid; 2106 u32 clid; 2107 }; 2108 2109 static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl, 2110 struct qdisc_walker *w) 2111 { 2112 struct tc_bind_class_args *a = (struct tc_bind_class_args *)w; 2113 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 2114 struct tcf_block *block; 2115 struct tcf_chain *chain; 2116 2117 block = cops->tcf_block(q, cl, NULL); 2118 if (!block) 2119 return 0; 2120 for (chain = tcf_get_next_chain(block, NULL); 2121 chain; 2122 chain = tcf_get_next_chain(block, chain)) { 2123 struct tcf_proto *tp; 2124 2125 for (tp = tcf_get_next_proto(chain, NULL); 2126 tp; tp = tcf_get_next_proto(chain, tp)) { 2127 struct tcf_bind_args arg = {}; 2128 2129 arg.w.fn = tcf_node_bind; 2130 arg.classid = a->clid; 2131 arg.base = cl; 2132 arg.cl = a->new_cl; 2133 tp->ops->walk(tp, &arg.w, true); 2134 } 2135 } 2136 2137 return 0; 2138 } 2139 2140 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid, 2141 unsigned long new_cl) 2142 { 2143 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 2144 struct tc_bind_class_args args = {}; 2145 2146 if (!cops->tcf_block) 2147 return; 2148 args.portid = portid; 2149 args.clid = clid; 2150 args.new_cl = new_cl; 2151 args.w.fn = tc_bind_class_walker; 2152 q->ops->cl_ops->walk(q, &args.w); 2153 } 2154 2155 #else 2156 2157 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid, 2158 unsigned long new_cl) 2159 { 2160 } 2161 2162 #endif 2163 2164 static int __tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, 2165 struct netlink_ext_ack *extack, 2166 struct net_device *dev, 2167 struct nlattr *tca[TCA_MAX + 1], 2168 struct tcmsg *tcm) 2169 { 2170 struct net *net = sock_net(skb->sk); 2171 const struct Qdisc_class_ops *cops; 2172 struct Qdisc *q = NULL; 2173 unsigned long cl = 0; 2174 unsigned long new_cl; 2175 u32 portid; 2176 u32 clid; 2177 u32 qid; 2178 int err; 2179 2180 /* 2181 parent == TC_H_UNSPEC - unspecified parent. 2182 parent == TC_H_ROOT - class is root, which has no parent. 2183 parent == X:0 - parent is root class. 2184 parent == X:Y - parent is a node in hierarchy. 2185 parent == 0:Y - parent is X:Y, where X:0 is qdisc. 2186 2187 handle == 0:0 - generate handle from kernel pool. 2188 handle == 0:Y - class is X:Y, where X:0 is qdisc. 2189 handle == X:Y - clear. 2190 handle == X:0 - root class. 2191 */ 2192 2193 /* Step 1. Determine qdisc handle X:0 */ 2194 2195 portid = tcm->tcm_parent; 2196 clid = tcm->tcm_handle; 2197 qid = TC_H_MAJ(clid); 2198 2199 if (portid != TC_H_ROOT) { 2200 u32 qid1 = TC_H_MAJ(portid); 2201 2202 if (qid && qid1) { 2203 /* If both majors are known, they must be identical. */ 2204 if (qid != qid1) 2205 return -EINVAL; 2206 } else if (qid1) { 2207 qid = qid1; 2208 } else if (qid == 0) 2209 qid = rtnl_dereference(dev->qdisc)->handle; 2210 2211 /* Now qid is genuine qdisc handle consistent 2212 * both with parent and child. 2213 * 2214 * TC_H_MAJ(portid) still may be unspecified, complete it now. 2215 */ 2216 if (portid) 2217 portid = TC_H_MAKE(qid, portid); 2218 } else { 2219 if (qid == 0) 2220 qid = rtnl_dereference(dev->qdisc)->handle; 2221 } 2222 2223 /* OK. Locate qdisc */ 2224 q = qdisc_lookup(dev, qid); 2225 if (!q) 2226 return -ENOENT; 2227 2228 /* An check that it supports classes */ 2229 cops = q->ops->cl_ops; 2230 if (cops == NULL) 2231 return -EINVAL; 2232 2233 /* Now try to get class */ 2234 if (clid == 0) { 2235 if (portid == TC_H_ROOT) 2236 clid = qid; 2237 } else 2238 clid = TC_H_MAKE(qid, clid); 2239 2240 if (clid) 2241 cl = cops->find(q, clid); 2242 2243 if (cl == 0) { 2244 err = -ENOENT; 2245 if (n->nlmsg_type != RTM_NEWTCLASS || 2246 !(n->nlmsg_flags & NLM_F_CREATE)) 2247 goto out; 2248 } else { 2249 switch (n->nlmsg_type) { 2250 case RTM_NEWTCLASS: 2251 err = -EEXIST; 2252 if (n->nlmsg_flags & NLM_F_EXCL) 2253 goto out; 2254 break; 2255 case RTM_DELTCLASS: 2256 err = tclass_del_notify(net, cops, skb, n, q, cl, extack); 2257 /* Unbind the class with flilters with 0 */ 2258 tc_bind_tclass(q, portid, clid, 0); 2259 goto out; 2260 case RTM_GETTCLASS: 2261 err = tclass_get_notify(net, skb, n, q, cl, extack); 2262 goto out; 2263 default: 2264 err = -EINVAL; 2265 goto out; 2266 } 2267 } 2268 2269 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) { 2270 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes"); 2271 return -EOPNOTSUPP; 2272 } 2273 2274 /* Prevent creation of traffic classes with classid TC_H_ROOT */ 2275 if (clid == TC_H_ROOT) { 2276 NL_SET_ERR_MSG(extack, "Cannot create traffic class with classid TC_H_ROOT"); 2277 return -EINVAL; 2278 } 2279 2280 new_cl = cl; 2281 err = -EOPNOTSUPP; 2282 if (cops->change) 2283 err = cops->change(q, clid, portid, tca, &new_cl, extack); 2284 if (err == 0) { 2285 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack); 2286 /* We just create a new class, need to do reverse binding. */ 2287 if (cl != new_cl) 2288 tc_bind_tclass(q, portid, clid, new_cl); 2289 } 2290 out: 2291 return err; 2292 } 2293 2294 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, 2295 struct netlink_ext_ack *extack) 2296 { 2297 struct net *net = sock_net(skb->sk); 2298 struct tcmsg *tcm = nlmsg_data(n); 2299 struct nlattr *tca[TCA_MAX + 1]; 2300 struct net_device *dev; 2301 int err; 2302 2303 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, 2304 rtm_tca_policy, extack); 2305 if (err < 0) 2306 return err; 2307 2308 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2309 if (!dev) 2310 return -ENODEV; 2311 2312 netdev_lock_ops(dev); 2313 err = __tc_ctl_tclass(skb, n, extack, dev, tca, tcm); 2314 netdev_unlock_ops(dev); 2315 2316 return err; 2317 } 2318 2319 struct qdisc_dump_args { 2320 struct qdisc_walker w; 2321 struct sk_buff *skb; 2322 struct netlink_callback *cb; 2323 }; 2324 2325 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, 2326 struct qdisc_walker *arg) 2327 { 2328 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg; 2329 2330 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid, 2331 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2332 RTM_NEWTCLASS, NULL); 2333 } 2334 2335 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb, 2336 struct tcmsg *tcm, struct netlink_callback *cb, 2337 int *t_p, int s_t) 2338 { 2339 struct qdisc_dump_args arg; 2340 2341 if (tc_qdisc_dump_ignore(q, false) || 2342 *t_p < s_t || !q->ops->cl_ops || 2343 (tcm->tcm_parent && 2344 TC_H_MAJ(tcm->tcm_parent) != q->handle)) { 2345 (*t_p)++; 2346 return 0; 2347 } 2348 if (*t_p > s_t) 2349 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); 2350 arg.w.fn = qdisc_class_dump; 2351 arg.skb = skb; 2352 arg.cb = cb; 2353 arg.w.stop = 0; 2354 arg.w.skip = cb->args[1]; 2355 arg.w.count = 0; 2356 q->ops->cl_ops->walk(q, &arg.w); 2357 cb->args[1] = arg.w.count; 2358 if (arg.w.stop) 2359 return -1; 2360 (*t_p)++; 2361 return 0; 2362 } 2363 2364 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, 2365 struct tcmsg *tcm, struct netlink_callback *cb, 2366 int *t_p, int s_t, bool recur) 2367 { 2368 struct Qdisc *q; 2369 int b; 2370 2371 if (!root) 2372 return 0; 2373 2374 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0) 2375 return -1; 2376 2377 if (!qdisc_dev(root) || !recur) 2378 return 0; 2379 2380 if (tcm->tcm_parent) { 2381 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent)); 2382 if (q && q != root && 2383 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) 2384 return -1; 2385 return 0; 2386 } 2387 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { 2388 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) 2389 return -1; 2390 } 2391 2392 return 0; 2393 } 2394 2395 static int __tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb, 2396 struct tcmsg *tcm, struct net_device *dev) 2397 { 2398 struct netdev_queue *dev_queue; 2399 int t, s_t; 2400 2401 s_t = cb->args[0]; 2402 t = 0; 2403 2404 if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc), 2405 skb, tcm, cb, &t, s_t, true) < 0) 2406 goto done; 2407 2408 dev_queue = dev_ingress_queue(dev); 2409 if (dev_queue && 2410 tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping), 2411 skb, tcm, cb, &t, s_t, false) < 0) 2412 goto done; 2413 2414 done: 2415 cb->args[0] = t; 2416 2417 return skb->len; 2418 } 2419 2420 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) 2421 { 2422 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2423 struct net *net = sock_net(skb->sk); 2424 struct net_device *dev; 2425 int err; 2426 2427 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2428 return 0; 2429 2430 dev = dev_get_by_index(net, tcm->tcm_ifindex); 2431 if (!dev) 2432 return 0; 2433 2434 netdev_lock_ops(dev); 2435 err = __tc_dump_tclass(skb, cb, tcm, dev); 2436 netdev_unlock_ops(dev); 2437 2438 dev_put(dev); 2439 2440 return err; 2441 } 2442 2443 #ifdef CONFIG_PROC_FS 2444 static int psched_show(struct seq_file *seq, void *v) 2445 { 2446 seq_printf(seq, "%08x %08x %08x %08x\n", 2447 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1), 2448 1000000, 2449 (u32)NSEC_PER_SEC / hrtimer_resolution); 2450 2451 return 0; 2452 } 2453 2454 static int __net_init psched_net_init(struct net *net) 2455 { 2456 struct proc_dir_entry *e; 2457 2458 e = proc_create_single("psched", 0, net->proc_net, psched_show); 2459 if (e == NULL) 2460 return -ENOMEM; 2461 2462 return 0; 2463 } 2464 2465 static void __net_exit psched_net_exit(struct net *net) 2466 { 2467 remove_proc_entry("psched", net->proc_net); 2468 } 2469 #else 2470 static int __net_init psched_net_init(struct net *net) 2471 { 2472 return 0; 2473 } 2474 2475 static void __net_exit psched_net_exit(struct net *net) 2476 { 2477 } 2478 #endif 2479 2480 static struct pernet_operations psched_net_ops = { 2481 .init = psched_net_init, 2482 .exit = psched_net_exit, 2483 }; 2484 2485 #if IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) 2486 DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper); 2487 #endif 2488 2489 static const struct rtnl_msg_handler psched_rtnl_msg_handlers[] __initconst = { 2490 {.msgtype = RTM_NEWQDISC, .doit = tc_modify_qdisc}, 2491 {.msgtype = RTM_DELQDISC, .doit = tc_get_qdisc}, 2492 {.msgtype = RTM_GETQDISC, .doit = tc_get_qdisc, 2493 .dumpit = tc_dump_qdisc}, 2494 {.msgtype = RTM_NEWTCLASS, .doit = tc_ctl_tclass}, 2495 {.msgtype = RTM_DELTCLASS, .doit = tc_ctl_tclass}, 2496 {.msgtype = RTM_GETTCLASS, .doit = tc_ctl_tclass, 2497 .dumpit = tc_dump_tclass}, 2498 }; 2499 2500 static int __init pktsched_init(void) 2501 { 2502 int err; 2503 2504 err = register_pernet_subsys(&psched_net_ops); 2505 if (err) { 2506 pr_err("pktsched_init: " 2507 "cannot initialize per netns operations\n"); 2508 return err; 2509 } 2510 2511 register_qdisc(&pfifo_fast_ops); 2512 register_qdisc(&pfifo_qdisc_ops); 2513 register_qdisc(&bfifo_qdisc_ops); 2514 register_qdisc(&pfifo_head_drop_qdisc_ops); 2515 register_qdisc(&mq_qdisc_ops); 2516 register_qdisc(&noqueue_qdisc_ops); 2517 2518 rtnl_register_many(psched_rtnl_msg_handlers); 2519 2520 tc_wrapper_init(); 2521 2522 return 0; 2523 } 2524 2525 subsys_initcall(pktsched_init); 2526