1 /* 2 * net/sched/sch_mqprio.c 3 * 4 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * version 2 as published by the Free Software Foundation. 9 */ 10 11 #include <linux/types.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 #include <linux/string.h> 15 #include <linux/errno.h> 16 #include <linux/skbuff.h> 17 #include <linux/module.h> 18 #include <net/netlink.h> 19 #include <net/pkt_sched.h> 20 #include <net/sch_generic.h> 21 #include <net/pkt_cls.h> 22 23 struct mqprio_sched { 24 struct Qdisc **qdiscs; 25 u16 mode; 26 u16 shaper; 27 int hw_offload; 28 u32 flags; 29 u64 min_rate[TC_QOPT_MAX_QUEUE]; 30 u64 max_rate[TC_QOPT_MAX_QUEUE]; 31 }; 32 33 static void mqprio_destroy(struct Qdisc *sch) 34 { 35 struct net_device *dev = qdisc_dev(sch); 36 struct mqprio_sched *priv = qdisc_priv(sch); 37 unsigned int ntx; 38 39 if (priv->qdiscs) { 40 for (ntx = 0; 41 ntx < dev->num_tx_queues && priv->qdiscs[ntx]; 42 ntx++) 43 qdisc_destroy(priv->qdiscs[ntx]); 44 kfree(priv->qdiscs); 45 } 46 47 if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) { 48 struct tc_mqprio_qopt_offload mqprio = { { 0 } }; 49 50 switch (priv->mode) { 51 case TC_MQPRIO_MODE_DCB: 52 case TC_MQPRIO_MODE_CHANNEL: 53 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, 54 &mqprio); 55 break; 56 default: 57 return; 58 } 59 } else { 60 netdev_set_num_tc(dev, 0); 61 } 62 } 63 64 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt) 65 { 66 int i, j; 67 68 /* Verify num_tc is not out of max range */ 69 if (qopt->num_tc > TC_MAX_QUEUE) 70 return -EINVAL; 71 72 /* Verify priority mapping uses valid tcs */ 73 for (i = 0; i < TC_BITMASK + 1; i++) { 74 if (qopt->prio_tc_map[i] >= qopt->num_tc) 75 return -EINVAL; 76 } 77 78 /* Limit qopt->hw to maximum supported offload value. Drivers have 79 * the option of overriding this later if they don't support the a 80 * given offload type. 81 */ 82 if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX) 83 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX; 84 85 /* If hardware offload is requested we will leave it to the device 86 * to either populate the queue counts itself or to validate the 87 * provided queue counts. If ndo_setup_tc is not present then 88 * hardware doesn't support offload and we should return an error. 89 */ 90 if (qopt->hw) 91 return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL; 92 93 for (i = 0; i < qopt->num_tc; i++) { 94 unsigned int last = qopt->offset[i] + qopt->count[i]; 95 96 /* Verify the queue count is in tx range being equal to the 97 * real_num_tx_queues indicates the last queue is in use. 98 */ 99 if (qopt->offset[i] >= dev->real_num_tx_queues || 100 !qopt->count[i] || 101 last > dev->real_num_tx_queues) 102 return -EINVAL; 103 104 /* Verify that the offset and counts do not overlap */ 105 for (j = i + 1; j < qopt->num_tc; j++) { 106 if (last > qopt->offset[j]) 107 return -EINVAL; 108 } 109 } 110 111 return 0; 112 } 113 114 static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = { 115 [TCA_MQPRIO_MODE] = { .len = sizeof(u16) }, 116 [TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) }, 117 [TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED }, 118 [TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED }, 119 }; 120 121 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, 122 const struct nla_policy *policy, int len) 123 { 124 int nested_len = nla_len(nla) - NLA_ALIGN(len); 125 126 if (nested_len >= nla_attr_size(0)) 127 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), 128 nested_len, policy, NULL); 129 130 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); 131 return 0; 132 } 133 134 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) 135 { 136 struct net_device *dev = qdisc_dev(sch); 137 struct mqprio_sched *priv = qdisc_priv(sch); 138 struct netdev_queue *dev_queue; 139 struct Qdisc *qdisc; 140 int i, err = -EOPNOTSUPP; 141 struct tc_mqprio_qopt *qopt = NULL; 142 struct nlattr *tb[TCA_MQPRIO_MAX + 1]; 143 struct nlattr *attr; 144 int rem; 145 int len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt)); 146 147 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE); 148 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK); 149 150 if (sch->parent != TC_H_ROOT) 151 return -EOPNOTSUPP; 152 153 if (!netif_is_multiqueue(dev)) 154 return -EOPNOTSUPP; 155 156 /* make certain can allocate enough classids to handle queues */ 157 if (dev->num_tx_queues >= TC_H_MIN_PRIORITY) 158 return -ENOMEM; 159 160 if (!opt || nla_len(opt) < sizeof(*qopt)) 161 return -EINVAL; 162 163 qopt = nla_data(opt); 164 if (mqprio_parse_opt(dev, qopt)) 165 return -EINVAL; 166 167 if (len > 0) { 168 err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy, 169 sizeof(*qopt)); 170 if (err < 0) 171 return err; 172 173 if (!qopt->hw) 174 return -EINVAL; 175 176 if (tb[TCA_MQPRIO_MODE]) { 177 priv->flags |= TC_MQPRIO_F_MODE; 178 priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]); 179 } 180 181 if (tb[TCA_MQPRIO_SHAPER]) { 182 priv->flags |= TC_MQPRIO_F_SHAPER; 183 priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]); 184 } 185 186 if (tb[TCA_MQPRIO_MIN_RATE64]) { 187 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) 188 return -EINVAL; 189 i = 0; 190 nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64], 191 rem) { 192 if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) 193 return -EINVAL; 194 if (i >= qopt->num_tc) 195 break; 196 priv->min_rate[i] = *(u64 *)nla_data(attr); 197 i++; 198 } 199 priv->flags |= TC_MQPRIO_F_MIN_RATE; 200 } 201 202 if (tb[TCA_MQPRIO_MAX_RATE64]) { 203 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) 204 return -EINVAL; 205 i = 0; 206 nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64], 207 rem) { 208 if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) 209 return -EINVAL; 210 if (i >= qopt->num_tc) 211 break; 212 priv->max_rate[i] = *(u64 *)nla_data(attr); 213 i++; 214 } 215 priv->flags |= TC_MQPRIO_F_MAX_RATE; 216 } 217 } 218 219 /* pre-allocate qdisc, attachment can't fail */ 220 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), 221 GFP_KERNEL); 222 if (!priv->qdiscs) 223 return -ENOMEM; 224 225 for (i = 0; i < dev->num_tx_queues; i++) { 226 dev_queue = netdev_get_tx_queue(dev, i); 227 qdisc = qdisc_create_dflt(dev_queue, 228 get_default_qdisc_ops(dev, i), 229 TC_H_MAKE(TC_H_MAJ(sch->handle), 230 TC_H_MIN(i + 1))); 231 if (!qdisc) 232 return -ENOMEM; 233 234 priv->qdiscs[i] = qdisc; 235 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 236 } 237 238 /* If the mqprio options indicate that hardware should own 239 * the queue mapping then run ndo_setup_tc otherwise use the 240 * supplied and verified mapping 241 */ 242 if (qopt->hw) { 243 struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt}; 244 245 switch (priv->mode) { 246 case TC_MQPRIO_MODE_DCB: 247 if (priv->shaper != TC_MQPRIO_SHAPER_DCB) 248 return -EINVAL; 249 break; 250 case TC_MQPRIO_MODE_CHANNEL: 251 mqprio.flags = priv->flags; 252 if (priv->flags & TC_MQPRIO_F_MODE) 253 mqprio.mode = priv->mode; 254 if (priv->flags & TC_MQPRIO_F_SHAPER) 255 mqprio.shaper = priv->shaper; 256 if (priv->flags & TC_MQPRIO_F_MIN_RATE) 257 for (i = 0; i < mqprio.qopt.num_tc; i++) 258 mqprio.min_rate[i] = priv->min_rate[i]; 259 if (priv->flags & TC_MQPRIO_F_MAX_RATE) 260 for (i = 0; i < mqprio.qopt.num_tc; i++) 261 mqprio.max_rate[i] = priv->max_rate[i]; 262 break; 263 default: 264 return -EINVAL; 265 } 266 err = dev->netdev_ops->ndo_setup_tc(dev, 267 TC_SETUP_MQPRIO, 268 &mqprio); 269 if (err) 270 return err; 271 272 priv->hw_offload = mqprio.qopt.hw; 273 } else { 274 netdev_set_num_tc(dev, qopt->num_tc); 275 for (i = 0; i < qopt->num_tc; i++) 276 netdev_set_tc_queue(dev, i, 277 qopt->count[i], qopt->offset[i]); 278 } 279 280 /* Always use supplied priority mappings */ 281 for (i = 0; i < TC_BITMASK + 1; i++) 282 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]); 283 284 sch->flags |= TCQ_F_MQROOT; 285 return 0; 286 } 287 288 static void mqprio_attach(struct Qdisc *sch) 289 { 290 struct net_device *dev = qdisc_dev(sch); 291 struct mqprio_sched *priv = qdisc_priv(sch); 292 struct Qdisc *qdisc, *old; 293 unsigned int ntx; 294 295 /* Attach underlying qdisc */ 296 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 297 qdisc = priv->qdiscs[ntx]; 298 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); 299 if (old) 300 qdisc_destroy(old); 301 if (ntx < dev->real_num_tx_queues) 302 qdisc_hash_add(qdisc, false); 303 } 304 kfree(priv->qdiscs); 305 priv->qdiscs = NULL; 306 } 307 308 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch, 309 unsigned long cl) 310 { 311 struct net_device *dev = qdisc_dev(sch); 312 unsigned long ntx = cl - 1; 313 314 if (ntx >= dev->num_tx_queues) 315 return NULL; 316 return netdev_get_tx_queue(dev, ntx); 317 } 318 319 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, 320 struct Qdisc **old) 321 { 322 struct net_device *dev = qdisc_dev(sch); 323 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 324 325 if (!dev_queue) 326 return -EINVAL; 327 328 if (dev->flags & IFF_UP) 329 dev_deactivate(dev); 330 331 *old = dev_graft_qdisc(dev_queue, new); 332 333 if (new) 334 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 335 336 if (dev->flags & IFF_UP) 337 dev_activate(dev); 338 339 return 0; 340 } 341 342 static int dump_rates(struct mqprio_sched *priv, 343 struct tc_mqprio_qopt *opt, struct sk_buff *skb) 344 { 345 struct nlattr *nest; 346 int i; 347 348 if (priv->flags & TC_MQPRIO_F_MIN_RATE) { 349 nest = nla_nest_start(skb, TCA_MQPRIO_MIN_RATE64); 350 if (!nest) 351 goto nla_put_failure; 352 353 for (i = 0; i < opt->num_tc; i++) { 354 if (nla_put(skb, TCA_MQPRIO_MIN_RATE64, 355 sizeof(priv->min_rate[i]), 356 &priv->min_rate[i])) 357 goto nla_put_failure; 358 } 359 nla_nest_end(skb, nest); 360 } 361 362 if (priv->flags & TC_MQPRIO_F_MAX_RATE) { 363 nest = nla_nest_start(skb, TCA_MQPRIO_MAX_RATE64); 364 if (!nest) 365 goto nla_put_failure; 366 367 for (i = 0; i < opt->num_tc; i++) { 368 if (nla_put(skb, TCA_MQPRIO_MAX_RATE64, 369 sizeof(priv->max_rate[i]), 370 &priv->max_rate[i])) 371 goto nla_put_failure; 372 } 373 nla_nest_end(skb, nest); 374 } 375 return 0; 376 377 nla_put_failure: 378 nla_nest_cancel(skb, nest); 379 return -1; 380 } 381 382 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) 383 { 384 struct net_device *dev = qdisc_dev(sch); 385 struct mqprio_sched *priv = qdisc_priv(sch); 386 struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb); 387 struct tc_mqprio_qopt opt = { 0 }; 388 struct Qdisc *qdisc; 389 unsigned int i; 390 391 sch->q.qlen = 0; 392 memset(&sch->bstats, 0, sizeof(sch->bstats)); 393 memset(&sch->qstats, 0, sizeof(sch->qstats)); 394 395 for (i = 0; i < dev->num_tx_queues; i++) { 396 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 397 spin_lock_bh(qdisc_lock(qdisc)); 398 sch->q.qlen += qdisc->q.qlen; 399 sch->bstats.bytes += qdisc->bstats.bytes; 400 sch->bstats.packets += qdisc->bstats.packets; 401 sch->qstats.backlog += qdisc->qstats.backlog; 402 sch->qstats.drops += qdisc->qstats.drops; 403 sch->qstats.requeues += qdisc->qstats.requeues; 404 sch->qstats.overlimits += qdisc->qstats.overlimits; 405 spin_unlock_bh(qdisc_lock(qdisc)); 406 } 407 408 opt.num_tc = netdev_get_num_tc(dev); 409 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map)); 410 opt.hw = priv->hw_offload; 411 412 for (i = 0; i < netdev_get_num_tc(dev); i++) { 413 opt.count[i] = dev->tc_to_txq[i].count; 414 opt.offset[i] = dev->tc_to_txq[i].offset; 415 } 416 417 if (nla_put(skb, TCA_OPTIONS, NLA_ALIGN(sizeof(opt)), &opt)) 418 goto nla_put_failure; 419 420 if ((priv->flags & TC_MQPRIO_F_MODE) && 421 nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode)) 422 goto nla_put_failure; 423 424 if ((priv->flags & TC_MQPRIO_F_SHAPER) && 425 nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper)) 426 goto nla_put_failure; 427 428 if ((priv->flags & TC_MQPRIO_F_MIN_RATE || 429 priv->flags & TC_MQPRIO_F_MAX_RATE) && 430 (dump_rates(priv, &opt, skb) != 0)) 431 goto nla_put_failure; 432 433 return nla_nest_end(skb, nla); 434 nla_put_failure: 435 nlmsg_trim(skb, nla); 436 return -1; 437 } 438 439 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl) 440 { 441 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 442 443 if (!dev_queue) 444 return NULL; 445 446 return dev_queue->qdisc_sleeping; 447 } 448 449 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid) 450 { 451 struct net_device *dev = qdisc_dev(sch); 452 unsigned int ntx = TC_H_MIN(classid); 453 454 /* There are essentially two regions here that have valid classid 455 * values. The first region will have a classid value of 1 through 456 * num_tx_queues. All of these are backed by actual Qdiscs. 457 */ 458 if (ntx < TC_H_MIN_PRIORITY) 459 return (ntx <= dev->num_tx_queues) ? ntx : 0; 460 461 /* The second region represents the hardware traffic classes. These 462 * are represented by classid values of TC_H_MIN_PRIORITY through 463 * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1 464 */ 465 return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0; 466 } 467 468 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl, 469 struct sk_buff *skb, struct tcmsg *tcm) 470 { 471 if (cl < TC_H_MIN_PRIORITY) { 472 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 473 struct net_device *dev = qdisc_dev(sch); 474 int tc = netdev_txq_to_tc(dev, cl - 1); 475 476 tcm->tcm_parent = (tc < 0) ? 0 : 477 TC_H_MAKE(TC_H_MAJ(sch->handle), 478 TC_H_MIN(tc + TC_H_MIN_PRIORITY)); 479 tcm->tcm_info = dev_queue->qdisc_sleeping->handle; 480 } else { 481 tcm->tcm_parent = TC_H_ROOT; 482 tcm->tcm_info = 0; 483 } 484 tcm->tcm_handle |= TC_H_MIN(cl); 485 return 0; 486 } 487 488 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, 489 struct gnet_dump *d) 490 __releases(d->lock) 491 __acquires(d->lock) 492 { 493 if (cl >= TC_H_MIN_PRIORITY) { 494 int i; 495 __u32 qlen = 0; 496 struct Qdisc *qdisc; 497 struct gnet_stats_queue qstats = {0}; 498 struct gnet_stats_basic_packed bstats = {0}; 499 struct net_device *dev = qdisc_dev(sch); 500 struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK]; 501 502 /* Drop lock here it will be reclaimed before touching 503 * statistics this is required because the d->lock we 504 * hold here is the look on dev_queue->qdisc_sleeping 505 * also acquired below. 506 */ 507 if (d->lock) 508 spin_unlock_bh(d->lock); 509 510 for (i = tc.offset; i < tc.offset + tc.count; i++) { 511 struct netdev_queue *q = netdev_get_tx_queue(dev, i); 512 513 qdisc = rtnl_dereference(q->qdisc); 514 spin_lock_bh(qdisc_lock(qdisc)); 515 qlen += qdisc->q.qlen; 516 bstats.bytes += qdisc->bstats.bytes; 517 bstats.packets += qdisc->bstats.packets; 518 qstats.backlog += qdisc->qstats.backlog; 519 qstats.drops += qdisc->qstats.drops; 520 qstats.requeues += qdisc->qstats.requeues; 521 qstats.overlimits += qdisc->qstats.overlimits; 522 spin_unlock_bh(qdisc_lock(qdisc)); 523 } 524 /* Reclaim root sleeping lock before completing stats */ 525 if (d->lock) 526 spin_lock_bh(d->lock); 527 if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 || 528 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) 529 return -1; 530 } else { 531 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 532 533 sch = dev_queue->qdisc_sleeping; 534 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 535 d, NULL, &sch->bstats) < 0 || 536 gnet_stats_copy_queue(d, NULL, 537 &sch->qstats, sch->q.qlen) < 0) 538 return -1; 539 } 540 return 0; 541 } 542 543 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) 544 { 545 struct net_device *dev = qdisc_dev(sch); 546 unsigned long ntx; 547 548 if (arg->stop) 549 return; 550 551 /* Walk hierarchy with a virtual class per tc */ 552 arg->count = arg->skip; 553 for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) { 554 if (arg->fn(sch, ntx + TC_H_MIN_PRIORITY, arg) < 0) { 555 arg->stop = 1; 556 return; 557 } 558 arg->count++; 559 } 560 561 /* Pad the values and skip over unused traffic classes */ 562 if (ntx < TC_MAX_QUEUE) { 563 arg->count = TC_MAX_QUEUE; 564 ntx = TC_MAX_QUEUE; 565 } 566 567 /* Reset offset, sort out remaining per-queue qdiscs */ 568 for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) { 569 if (arg->fn(sch, ntx + 1, arg) < 0) { 570 arg->stop = 1; 571 return; 572 } 573 arg->count++; 574 } 575 } 576 577 static const struct Qdisc_class_ops mqprio_class_ops = { 578 .graft = mqprio_graft, 579 .leaf = mqprio_leaf, 580 .find = mqprio_find, 581 .walk = mqprio_walk, 582 .dump = mqprio_dump_class, 583 .dump_stats = mqprio_dump_class_stats, 584 }; 585 586 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = { 587 .cl_ops = &mqprio_class_ops, 588 .id = "mqprio", 589 .priv_size = sizeof(struct mqprio_sched), 590 .init = mqprio_init, 591 .destroy = mqprio_destroy, 592 .attach = mqprio_attach, 593 .dump = mqprio_dump, 594 .owner = THIS_MODULE, 595 }; 596 597 static int __init mqprio_module_init(void) 598 { 599 return register_qdisc(&mqprio_qdisc_ops); 600 } 601 602 static void __exit mqprio_module_exit(void) 603 { 604 unregister_qdisc(&mqprio_qdisc_ops); 605 } 606 607 module_init(mqprio_module_init); 608 module_exit(mqprio_module_exit); 609 610 MODULE_LICENSE("GPL"); 611