1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * net/sched/sch_mqprio.c 4 * 5 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com> 6 */ 7 8 #include <linux/ethtool_netlink.h> 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/kernel.h> 12 #include <linux/string.h> 13 #include <linux/errno.h> 14 #include <linux/skbuff.h> 15 #include <linux/module.h> 16 #include <net/netlink.h> 17 #include <net/pkt_sched.h> 18 #include <net/sch_generic.h> 19 #include <net/pkt_cls.h> 20 21 #include "sch_mqprio_lib.h" 22 23 struct mqprio_sched { 24 struct Qdisc **qdiscs; 25 u16 mode; 26 u16 shaper; 27 int hw_offload; 28 u32 flags; 29 u64 min_rate[TC_QOPT_MAX_QUEUE]; 30 u64 max_rate[TC_QOPT_MAX_QUEUE]; 31 u32 fp[TC_QOPT_MAX_QUEUE]; 32 }; 33 34 static int mqprio_enable_offload(struct Qdisc *sch, 35 const struct tc_mqprio_qopt *qopt, 36 struct netlink_ext_ack *extack) 37 { 38 struct mqprio_sched *priv = qdisc_priv(sch); 39 struct net_device *dev = qdisc_dev(sch); 40 struct tc_mqprio_qopt_offload mqprio = { 41 .qopt = *qopt, 42 .extack = extack, 43 }; 44 int err, i; 45 46 switch (priv->mode) { 47 case TC_MQPRIO_MODE_DCB: 48 if (priv->shaper != TC_MQPRIO_SHAPER_DCB) 49 return -EINVAL; 50 break; 51 case TC_MQPRIO_MODE_CHANNEL: 52 mqprio.flags = priv->flags; 53 if (priv->flags & TC_MQPRIO_F_MODE) 54 mqprio.mode = priv->mode; 55 if (priv->flags & TC_MQPRIO_F_SHAPER) 56 mqprio.shaper = priv->shaper; 57 if (priv->flags & TC_MQPRIO_F_MIN_RATE) 58 for (i = 0; i < mqprio.qopt.num_tc; i++) 59 mqprio.min_rate[i] = priv->min_rate[i]; 60 if (priv->flags & TC_MQPRIO_F_MAX_RATE) 61 for (i = 0; i < mqprio.qopt.num_tc; i++) 62 mqprio.max_rate[i] = priv->max_rate[i]; 63 break; 64 default: 65 return -EINVAL; 66 } 67 68 mqprio_fp_to_offload(priv->fp, &mqprio); 69 70 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO, 71 &mqprio); 72 if (err) 73 return err; 74 75 priv->hw_offload = mqprio.qopt.hw; 76 77 return 0; 78 } 79 80 static void mqprio_disable_offload(struct Qdisc *sch) 81 { 82 struct tc_mqprio_qopt_offload mqprio = { { 0 } }; 83 struct mqprio_sched *priv = qdisc_priv(sch); 84 struct net_device *dev = qdisc_dev(sch); 85 86 switch (priv->mode) { 87 case TC_MQPRIO_MODE_DCB: 88 case TC_MQPRIO_MODE_CHANNEL: 89 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO, 90 &mqprio); 91 break; 92 } 93 } 94 95 static void mqprio_destroy(struct Qdisc *sch) 96 { 97 struct net_device *dev = qdisc_dev(sch); 98 struct mqprio_sched *priv = qdisc_priv(sch); 99 unsigned int ntx; 100 101 if (priv->qdiscs) { 102 for (ntx = 0; 103 ntx < dev->num_tx_queues && priv->qdiscs[ntx]; 104 ntx++) 105 qdisc_put(priv->qdiscs[ntx]); 106 kfree(priv->qdiscs); 107 } 108 109 if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) 110 mqprio_disable_offload(sch); 111 else 112 netdev_set_num_tc(dev, 0); 113 } 114 115 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt, 116 const struct tc_mqprio_caps *caps, 117 struct netlink_ext_ack *extack) 118 { 119 int err; 120 121 /* Limit qopt->hw to maximum supported offload value. Drivers have 122 * the option of overriding this later if they don't support the a 123 * given offload type. 124 */ 125 if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX) 126 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX; 127 128 /* If hardware offload is requested, we will leave 3 options to the 129 * device driver: 130 * - populate the queue counts itself (and ignore what was requested) 131 * - validate the provided queue counts by itself (and apply them) 132 * - request queue count validation here (and apply them) 133 */ 134 err = mqprio_validate_qopt(dev, qopt, 135 !qopt->hw || caps->validate_queue_counts, 136 false, extack); 137 if (err) 138 return err; 139 140 /* If ndo_setup_tc is not present then hardware doesn't support offload 141 * and we should return an error. 142 */ 143 if (qopt->hw && !dev->netdev_ops->ndo_setup_tc) { 144 NL_SET_ERR_MSG(extack, 145 "Device does not support hardware offload"); 146 return -EINVAL; 147 } 148 149 return 0; 150 } 151 152 static const struct 153 nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = { 154 [TCA_MQPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32, 155 TC_QOPT_MAX_QUEUE - 1), 156 [TCA_MQPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32, 157 TC_FP_EXPRESS, 158 TC_FP_PREEMPTIBLE), 159 }; 160 161 static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = { 162 [TCA_MQPRIO_MODE] = { .len = sizeof(u16) }, 163 [TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) }, 164 [TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED }, 165 [TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED }, 166 [TCA_MQPRIO_TC_ENTRY] = { .type = NLA_NESTED }, 167 }; 168 169 static int mqprio_parse_tc_entry(u32 fp[TC_QOPT_MAX_QUEUE], 170 struct nlattr *opt, 171 unsigned long *seen_tcs, 172 struct netlink_ext_ack *extack) 173 { 174 struct nlattr *tb[TCA_MQPRIO_TC_ENTRY_MAX + 1]; 175 int err, tc; 176 177 err = nla_parse_nested(tb, TCA_MQPRIO_TC_ENTRY_MAX, opt, 178 mqprio_tc_entry_policy, extack); 179 if (err < 0) 180 return err; 181 182 if (NL_REQ_ATTR_CHECK(extack, opt, tb, TCA_MQPRIO_TC_ENTRY_INDEX)) { 183 NL_SET_ERR_MSG(extack, "TC entry index missing"); 184 return -EINVAL; 185 } 186 187 tc = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_INDEX]); 188 if (*seen_tcs & BIT(tc)) { 189 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_TC_ENTRY_INDEX], 190 "Duplicate tc entry"); 191 return -EINVAL; 192 } 193 194 *seen_tcs |= BIT(tc); 195 196 if (tb[TCA_MQPRIO_TC_ENTRY_FP]) 197 fp[tc] = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_FP]); 198 199 return 0; 200 } 201 202 static int mqprio_parse_tc_entries(struct Qdisc *sch, struct nlattr *nlattr_opt, 203 int nlattr_opt_len, 204 struct netlink_ext_ack *extack) 205 { 206 struct mqprio_sched *priv = qdisc_priv(sch); 207 struct net_device *dev = qdisc_dev(sch); 208 bool have_preemption = false; 209 unsigned long seen_tcs = 0; 210 u32 fp[TC_QOPT_MAX_QUEUE]; 211 struct nlattr *n; 212 int tc, rem; 213 int err = 0; 214 215 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) 216 fp[tc] = priv->fp[tc]; 217 218 nla_for_each_attr_type(n, TCA_MQPRIO_TC_ENTRY, nlattr_opt, 219 nlattr_opt_len, rem) { 220 err = mqprio_parse_tc_entry(fp, n, &seen_tcs, extack); 221 if (err) 222 goto out; 223 } 224 225 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { 226 priv->fp[tc] = fp[tc]; 227 if (fp[tc] == TC_FP_PREEMPTIBLE) 228 have_preemption = true; 229 } 230 231 if (have_preemption && !ethtool_dev_mm_supported(dev)) { 232 NL_SET_ERR_MSG(extack, "Device does not support preemption"); 233 return -EOPNOTSUPP; 234 } 235 out: 236 return err; 237 } 238 239 /* Parse the other netlink attributes that represent the payload of 240 * TCA_OPTIONS, which are appended right after struct tc_mqprio_qopt. 241 */ 242 static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt, 243 struct nlattr *opt, 244 struct netlink_ext_ack *extack) 245 { 246 struct nlattr *nlattr_opt = nla_data(opt) + NLA_ALIGN(sizeof(*qopt)); 247 int nlattr_opt_len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt)); 248 struct mqprio_sched *priv = qdisc_priv(sch); 249 struct nlattr *tb[TCA_MQPRIO_MAX + 1] = {}; 250 struct nlattr *attr; 251 int i, rem, err; 252 253 if (nlattr_opt_len >= nla_attr_size(0)) { 254 err = nla_parse_deprecated(tb, TCA_MQPRIO_MAX, nlattr_opt, 255 nlattr_opt_len, mqprio_policy, 256 NULL); 257 if (err < 0) 258 return err; 259 } 260 261 if (!qopt->hw) { 262 NL_SET_ERR_MSG(extack, 263 "mqprio TCA_OPTIONS can only contain netlink attributes in hardware mode"); 264 return -EINVAL; 265 } 266 267 if (tb[TCA_MQPRIO_MODE]) { 268 priv->flags |= TC_MQPRIO_F_MODE; 269 priv->mode = nla_get_u16(tb[TCA_MQPRIO_MODE]); 270 } 271 272 if (tb[TCA_MQPRIO_SHAPER]) { 273 priv->flags |= TC_MQPRIO_F_SHAPER; 274 priv->shaper = nla_get_u16(tb[TCA_MQPRIO_SHAPER]); 275 } 276 277 if (tb[TCA_MQPRIO_MIN_RATE64]) { 278 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) { 279 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MIN_RATE64], 280 "min_rate accepted only when shaper is in bw_rlimit mode"); 281 return -EINVAL; 282 } 283 i = 0; 284 nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64], 285 rem) { 286 if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) { 287 NL_SET_ERR_MSG_ATTR(extack, attr, 288 "Attribute type expected to be TCA_MQPRIO_MIN_RATE64"); 289 return -EINVAL; 290 } 291 292 if (nla_len(attr) != sizeof(u64)) { 293 NL_SET_ERR_MSG_ATTR(extack, attr, 294 "Attribute TCA_MQPRIO_MIN_RATE64 expected to have 8 bytes length"); 295 return -EINVAL; 296 } 297 298 if (i >= qopt->num_tc) 299 break; 300 priv->min_rate[i] = nla_get_u64(attr); 301 i++; 302 } 303 priv->flags |= TC_MQPRIO_F_MIN_RATE; 304 } 305 306 if (tb[TCA_MQPRIO_MAX_RATE64]) { 307 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) { 308 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MAX_RATE64], 309 "max_rate accepted only when shaper is in bw_rlimit mode"); 310 return -EINVAL; 311 } 312 i = 0; 313 nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64], 314 rem) { 315 if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) { 316 NL_SET_ERR_MSG_ATTR(extack, attr, 317 "Attribute type expected to be TCA_MQPRIO_MAX_RATE64"); 318 return -EINVAL; 319 } 320 321 if (nla_len(attr) != sizeof(u64)) { 322 NL_SET_ERR_MSG_ATTR(extack, attr, 323 "Attribute TCA_MQPRIO_MAX_RATE64 expected to have 8 bytes length"); 324 return -EINVAL; 325 } 326 327 if (i >= qopt->num_tc) 328 break; 329 priv->max_rate[i] = nla_get_u64(attr); 330 i++; 331 } 332 priv->flags |= TC_MQPRIO_F_MAX_RATE; 333 } 334 335 if (tb[TCA_MQPRIO_TC_ENTRY]) { 336 err = mqprio_parse_tc_entries(sch, nlattr_opt, nlattr_opt_len, 337 extack); 338 if (err) 339 return err; 340 } 341 342 return 0; 343 } 344 345 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt, 346 struct netlink_ext_ack *extack) 347 { 348 struct net_device *dev = qdisc_dev(sch); 349 struct mqprio_sched *priv = qdisc_priv(sch); 350 struct netdev_queue *dev_queue; 351 struct Qdisc *qdisc; 352 int i, err = -EOPNOTSUPP; 353 struct tc_mqprio_qopt *qopt = NULL; 354 struct tc_mqprio_caps caps; 355 int len, tc; 356 357 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE); 358 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK); 359 360 if (sch->parent != TC_H_ROOT) 361 return -EOPNOTSUPP; 362 363 if (!netif_is_multiqueue(dev)) 364 return -EOPNOTSUPP; 365 366 /* make certain can allocate enough classids to handle queues */ 367 if (dev->num_tx_queues >= TC_H_MIN_PRIORITY) 368 return -ENOMEM; 369 370 if (!opt || nla_len(opt) < sizeof(*qopt)) 371 return -EINVAL; 372 373 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) 374 priv->fp[tc] = TC_FP_EXPRESS; 375 376 qdisc_offload_query_caps(dev, TC_SETUP_QDISC_MQPRIO, 377 &caps, sizeof(caps)); 378 379 qopt = nla_data(opt); 380 if (mqprio_parse_opt(dev, qopt, &caps, extack)) 381 return -EINVAL; 382 383 len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt)); 384 if (len > 0) { 385 err = mqprio_parse_nlattr(sch, qopt, opt, extack); 386 if (err) 387 return err; 388 } 389 390 /* pre-allocate qdisc, attachment can't fail */ 391 priv->qdiscs = kzalloc_objs(priv->qdiscs[0], dev->num_tx_queues); 392 if (!priv->qdiscs) 393 return -ENOMEM; 394 395 for (i = 0; i < dev->num_tx_queues; i++) { 396 dev_queue = netdev_get_tx_queue(dev, i); 397 qdisc = qdisc_create_dflt(dev_queue, 398 get_default_qdisc_ops(dev, i), 399 TC_H_MAKE(TC_H_MAJ(sch->handle), 400 TC_H_MIN(i + 1)), extack); 401 if (!qdisc) 402 return -ENOMEM; 403 404 priv->qdiscs[i] = qdisc; 405 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 406 } 407 408 /* If the mqprio options indicate that hardware should own 409 * the queue mapping then run ndo_setup_tc otherwise use the 410 * supplied and verified mapping 411 */ 412 if (qopt->hw) { 413 err = mqprio_enable_offload(sch, qopt, extack); 414 if (err) 415 return err; 416 } else { 417 netdev_set_num_tc(dev, qopt->num_tc); 418 for (i = 0; i < qopt->num_tc; i++) 419 netdev_set_tc_queue(dev, i, 420 qopt->count[i], qopt->offset[i]); 421 } 422 423 /* Always use supplied priority mappings */ 424 for (i = 0; i < TC_BITMASK + 1; i++) 425 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]); 426 427 sch->flags |= TCQ_F_MQROOT; 428 return 0; 429 } 430 431 static void mqprio_attach(struct Qdisc *sch) 432 { 433 struct net_device *dev = qdisc_dev(sch); 434 struct mqprio_sched *priv = qdisc_priv(sch); 435 struct Qdisc *qdisc, *old; 436 unsigned int ntx; 437 438 /* Attach underlying qdisc */ 439 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 440 qdisc = priv->qdiscs[ntx]; 441 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); 442 if (old) 443 qdisc_put(old); 444 if (ntx < dev->real_num_tx_queues) 445 qdisc_hash_add(qdisc, false); 446 } 447 kfree(priv->qdiscs); 448 priv->qdiscs = NULL; 449 } 450 451 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch, 452 unsigned long cl) 453 { 454 struct net_device *dev = qdisc_dev(sch); 455 unsigned long ntx = cl - 1; 456 457 if (ntx >= dev->num_tx_queues) 458 return NULL; 459 return netdev_get_tx_queue(dev, ntx); 460 } 461 462 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, 463 struct Qdisc **old, struct netlink_ext_ack *extack) 464 { 465 struct net_device *dev = qdisc_dev(sch); 466 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 467 468 if (!dev_queue) 469 return -EINVAL; 470 471 if (dev->flags & IFF_UP) 472 dev_deactivate(dev); 473 474 *old = dev_graft_qdisc(dev_queue, new); 475 476 if (new) 477 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 478 479 if (dev->flags & IFF_UP) 480 dev_activate(dev); 481 482 return 0; 483 } 484 485 static int dump_rates(struct mqprio_sched *priv, 486 struct tc_mqprio_qopt *opt, struct sk_buff *skb) 487 { 488 struct nlattr *nest; 489 int i; 490 491 if (priv->flags & TC_MQPRIO_F_MIN_RATE) { 492 nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64); 493 if (!nest) 494 goto nla_put_failure; 495 496 for (i = 0; i < opt->num_tc; i++) { 497 if (nla_put(skb, TCA_MQPRIO_MIN_RATE64, 498 sizeof(priv->min_rate[i]), 499 &priv->min_rate[i])) 500 goto nla_put_failure; 501 } 502 nla_nest_end(skb, nest); 503 } 504 505 if (priv->flags & TC_MQPRIO_F_MAX_RATE) { 506 nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64); 507 if (!nest) 508 goto nla_put_failure; 509 510 for (i = 0; i < opt->num_tc; i++) { 511 if (nla_put(skb, TCA_MQPRIO_MAX_RATE64, 512 sizeof(priv->max_rate[i]), 513 &priv->max_rate[i])) 514 goto nla_put_failure; 515 } 516 nla_nest_end(skb, nest); 517 } 518 return 0; 519 520 nla_put_failure: 521 nla_nest_cancel(skb, nest); 522 return -1; 523 } 524 525 static int mqprio_dump_tc_entries(struct mqprio_sched *priv, 526 struct sk_buff *skb) 527 { 528 struct nlattr *n; 529 int tc; 530 531 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { 532 n = nla_nest_start(skb, TCA_MQPRIO_TC_ENTRY); 533 if (!n) 534 return -EMSGSIZE; 535 536 if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_INDEX, tc)) 537 goto nla_put_failure; 538 539 if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_FP, priv->fp[tc])) 540 goto nla_put_failure; 541 542 nla_nest_end(skb, n); 543 } 544 545 return 0; 546 547 nla_put_failure: 548 nla_nest_cancel(skb, n); 549 return -EMSGSIZE; 550 } 551 552 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) 553 { 554 struct net_device *dev = qdisc_dev(sch); 555 struct mqprio_sched *priv = qdisc_priv(sch); 556 struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb); 557 struct tc_mqprio_qopt opt = { 0 }; 558 struct Qdisc *qdisc; 559 unsigned int ntx; 560 561 sch->q.qlen = 0; 562 gnet_stats_basic_sync_init(&sch->bstats); 563 memset(&sch->qstats, 0, sizeof(sch->qstats)); 564 565 /* MQ supports lockless qdiscs. However, statistics accounting needs 566 * to account for all, none, or a mix of locked and unlocked child 567 * qdiscs. Percpu stats are added to counters in-band and locking 568 * qdisc totals are added at end. 569 */ 570 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 571 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping); 572 spin_lock_bh(qdisc_lock(qdisc)); 573 574 gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, 575 &qdisc->bstats, false); 576 gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats, 577 &qdisc->qstats); 578 sch->q.qlen += qdisc_qlen(qdisc); 579 580 spin_unlock_bh(qdisc_lock(qdisc)); 581 } 582 583 mqprio_qopt_reconstruct(dev, &opt); 584 opt.hw = priv->hw_offload; 585 586 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) 587 goto nla_put_failure; 588 589 if ((priv->flags & TC_MQPRIO_F_MODE) && 590 nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode)) 591 goto nla_put_failure; 592 593 if ((priv->flags & TC_MQPRIO_F_SHAPER) && 594 nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper)) 595 goto nla_put_failure; 596 597 if ((priv->flags & TC_MQPRIO_F_MIN_RATE || 598 priv->flags & TC_MQPRIO_F_MAX_RATE) && 599 (dump_rates(priv, &opt, skb) != 0)) 600 goto nla_put_failure; 601 602 if (mqprio_dump_tc_entries(priv, skb)) 603 goto nla_put_failure; 604 605 return nla_nest_end(skb, nla); 606 nla_put_failure: 607 nlmsg_trim(skb, nla); 608 return -1; 609 } 610 611 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl) 612 { 613 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 614 615 if (!dev_queue) 616 return NULL; 617 618 return rtnl_dereference(dev_queue->qdisc_sleeping); 619 } 620 621 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid) 622 { 623 struct net_device *dev = qdisc_dev(sch); 624 unsigned int ntx = TC_H_MIN(classid); 625 626 /* There are essentially two regions here that have valid classid 627 * values. The first region will have a classid value of 1 through 628 * num_tx_queues. All of these are backed by actual Qdiscs. 629 */ 630 if (ntx < TC_H_MIN_PRIORITY) 631 return (ntx <= dev->num_tx_queues) ? ntx : 0; 632 633 /* The second region represents the hardware traffic classes. These 634 * are represented by classid values of TC_H_MIN_PRIORITY through 635 * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1 636 */ 637 return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0; 638 } 639 640 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl, 641 struct sk_buff *skb, struct tcmsg *tcm) 642 { 643 if (cl < TC_H_MIN_PRIORITY) { 644 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 645 struct net_device *dev = qdisc_dev(sch); 646 int tc = netdev_txq_to_tc(dev, cl - 1); 647 648 tcm->tcm_parent = (tc < 0) ? 0 : 649 TC_H_MAKE(TC_H_MAJ(sch->handle), 650 TC_H_MIN(tc + TC_H_MIN_PRIORITY)); 651 tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; 652 } else { 653 tcm->tcm_parent = TC_H_ROOT; 654 tcm->tcm_info = 0; 655 } 656 tcm->tcm_handle |= TC_H_MIN(cl); 657 return 0; 658 } 659 660 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, 661 struct gnet_dump *d) 662 __releases(d->lock) 663 __acquires(d->lock) 664 { 665 if (cl >= TC_H_MIN_PRIORITY) { 666 int i; 667 __u32 qlen; 668 struct gnet_stats_queue qstats = {0}; 669 struct gnet_stats_basic_sync bstats; 670 struct net_device *dev = qdisc_dev(sch); 671 struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK]; 672 673 gnet_stats_basic_sync_init(&bstats); 674 /* Drop lock here it will be reclaimed before touching 675 * statistics this is required because the d->lock we 676 * hold here is the look on dev_queue->qdisc_sleeping 677 * also acquired below. 678 */ 679 if (d->lock) 680 spin_unlock_bh(d->lock); 681 682 for (i = tc.offset; i < tc.offset + tc.count; i++) { 683 struct netdev_queue *q = netdev_get_tx_queue(dev, i); 684 struct Qdisc *qdisc = rtnl_dereference(q->qdisc); 685 686 spin_lock_bh(qdisc_lock(qdisc)); 687 688 gnet_stats_add_basic(&bstats, qdisc->cpu_bstats, 689 &qdisc->bstats, false); 690 gnet_stats_add_queue(&qstats, qdisc->cpu_qstats, 691 &qdisc->qstats); 692 sch->q.qlen += qdisc_qlen(qdisc); 693 694 spin_unlock_bh(qdisc_lock(qdisc)); 695 } 696 qlen = qdisc_qlen(sch) + qstats.qlen; 697 698 /* Reclaim root sleeping lock before completing stats */ 699 if (d->lock) 700 spin_lock_bh(d->lock); 701 if (gnet_stats_copy_basic(d, NULL, &bstats, false) < 0 || 702 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) 703 return -1; 704 } else { 705 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 706 707 sch = rtnl_dereference(dev_queue->qdisc_sleeping); 708 if (gnet_stats_copy_basic(d, sch->cpu_bstats, 709 &sch->bstats, true) < 0 || 710 qdisc_qstats_copy(d, sch) < 0) 711 return -1; 712 } 713 return 0; 714 } 715 716 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) 717 { 718 struct net_device *dev = qdisc_dev(sch); 719 unsigned long ntx; 720 721 if (arg->stop) 722 return; 723 724 /* Walk hierarchy with a virtual class per tc */ 725 arg->count = arg->skip; 726 for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) { 727 if (!tc_qdisc_stats_dump(sch, ntx + TC_H_MIN_PRIORITY, arg)) 728 return; 729 } 730 731 /* Pad the values and skip over unused traffic classes */ 732 if (ntx < TC_MAX_QUEUE) { 733 arg->count = TC_MAX_QUEUE; 734 ntx = TC_MAX_QUEUE; 735 } 736 737 /* Reset offset, sort out remaining per-queue qdiscs */ 738 for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) { 739 if (arg->fn(sch, ntx + 1, arg) < 0) { 740 arg->stop = 1; 741 return; 742 } 743 arg->count++; 744 } 745 } 746 747 static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch, 748 struct tcmsg *tcm) 749 { 750 return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent)); 751 } 752 753 static const struct Qdisc_class_ops mqprio_class_ops = { 754 .graft = mqprio_graft, 755 .leaf = mqprio_leaf, 756 .find = mqprio_find, 757 .walk = mqprio_walk, 758 .dump = mqprio_dump_class, 759 .dump_stats = mqprio_dump_class_stats, 760 .select_queue = mqprio_select_queue, 761 }; 762 763 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = { 764 .cl_ops = &mqprio_class_ops, 765 .id = "mqprio", 766 .priv_size = sizeof(struct mqprio_sched), 767 .init = mqprio_init, 768 .destroy = mqprio_destroy, 769 .attach = mqprio_attach, 770 .change_real_num_tx = mq_change_real_num_tx, 771 .dump = mqprio_dump, 772 .owner = THIS_MODULE, 773 }; 774 MODULE_ALIAS_NET_SCH("mqprio"); 775 776 static int __init mqprio_module_init(void) 777 { 778 return register_qdisc(&mqprio_qdisc_ops); 779 } 780 781 static void __exit mqprio_module_exit(void) 782 { 783 unregister_qdisc(&mqprio_qdisc_ops); 784 } 785 786 module_init(mqprio_module_init); 787 module_exit(mqprio_module_exit); 788 789 MODULE_LICENSE("GPL"); 790 MODULE_DESCRIPTION("Classful multiqueue prio qdisc"); 791