1 // SPDX-License-Identifier: GPL-2.0 2 3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler 4 * 5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com> 6 * 7 */ 8 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/kernel.h> 12 #include <linux/string.h> 13 #include <linux/list.h> 14 #include <linux/errno.h> 15 #include <linux/skbuff.h> 16 #include <linux/math64.h> 17 #include <linux/module.h> 18 #include <linux/spinlock.h> 19 #include <linux/rcupdate.h> 20 #include <net/netlink.h> 21 #include <net/pkt_sched.h> 22 #include <net/pkt_cls.h> 23 #include <net/sch_generic.h> 24 #include <net/sock.h> 25 #include <net/tcp.h> 26 27 static LIST_HEAD(taprio_list); 28 static DEFINE_SPINLOCK(taprio_list_lock); 29 30 #define TAPRIO_ALL_GATES_OPEN -1 31 32 #define FLAGS_VALID(flags) (!((flags) & ~TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)) 33 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) 34 35 struct sched_entry { 36 struct list_head list; 37 38 /* The instant that this entry "closes" and the next one 39 * should open, the qdisc will make some effort so that no 40 * packet leaves after this time. 41 */ 42 ktime_t close_time; 43 ktime_t next_txtime; 44 atomic_t budget; 45 int index; 46 u32 gate_mask; 47 u32 interval; 48 u8 command; 49 }; 50 51 struct sched_gate_list { 52 struct rcu_head rcu; 53 struct list_head entries; 54 size_t num_entries; 55 ktime_t cycle_close_time; 56 s64 cycle_time; 57 s64 cycle_time_extension; 58 s64 base_time; 59 }; 60 61 struct taprio_sched { 62 struct Qdisc **qdiscs; 63 struct Qdisc *root; 64 u32 flags; 65 enum tk_offsets tk_offset; 66 int clockid; 67 atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ 68 * speeds it's sub-nanoseconds per byte 69 */ 70 71 /* Protects the update side of the RCU protected current_entry */ 72 spinlock_t current_entry_lock; 73 struct sched_entry __rcu *current_entry; 74 struct sched_gate_list __rcu *oper_sched; 75 struct sched_gate_list __rcu *admin_sched; 76 struct hrtimer advance_timer; 77 struct list_head taprio_list; 78 u32 txtime_delay; 79 }; 80 81 static ktime_t sched_base_time(const struct sched_gate_list *sched) 82 { 83 if (!sched) 84 return KTIME_MAX; 85 86 return ns_to_ktime(sched->base_time); 87 } 88 89 static ktime_t taprio_get_time(struct taprio_sched *q) 90 { 91 ktime_t mono = ktime_get(); 92 93 switch (q->tk_offset) { 94 case TK_OFFS_MAX: 95 return mono; 96 default: 97 return ktime_mono_to_any(mono, q->tk_offset); 98 } 99 100 return KTIME_MAX; 101 } 102 103 static void taprio_free_sched_cb(struct rcu_head *head) 104 { 105 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu); 106 struct sched_entry *entry, *n; 107 108 if (!sched) 109 return; 110 111 list_for_each_entry_safe(entry, n, &sched->entries, list) { 112 list_del(&entry->list); 113 kfree(entry); 114 } 115 116 kfree(sched); 117 } 118 119 static void switch_schedules(struct taprio_sched *q, 120 struct sched_gate_list **admin, 121 struct sched_gate_list **oper) 122 { 123 rcu_assign_pointer(q->oper_sched, *admin); 124 rcu_assign_pointer(q->admin_sched, NULL); 125 126 if (*oper) 127 call_rcu(&(*oper)->rcu, taprio_free_sched_cb); 128 129 *oper = *admin; 130 *admin = NULL; 131 } 132 133 /* Get how much time has been already elapsed in the current cycle. */ 134 static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time) 135 { 136 ktime_t time_since_sched_start; 137 s32 time_elapsed; 138 139 time_since_sched_start = ktime_sub(time, sched->base_time); 140 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed); 141 142 return time_elapsed; 143 } 144 145 static ktime_t get_interval_end_time(struct sched_gate_list *sched, 146 struct sched_gate_list *admin, 147 struct sched_entry *entry, 148 ktime_t intv_start) 149 { 150 s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start); 151 ktime_t intv_end, cycle_ext_end, cycle_end; 152 153 cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed); 154 intv_end = ktime_add_ns(intv_start, entry->interval); 155 cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension); 156 157 if (ktime_before(intv_end, cycle_end)) 158 return intv_end; 159 else if (admin && admin != sched && 160 ktime_after(admin->base_time, cycle_end) && 161 ktime_before(admin->base_time, cycle_ext_end)) 162 return admin->base_time; 163 else 164 return cycle_end; 165 } 166 167 static int length_to_duration(struct taprio_sched *q, int len) 168 { 169 return div_u64(len * atomic64_read(&q->picos_per_byte), 1000); 170 } 171 172 /* Returns the entry corresponding to next available interval. If 173 * validate_interval is set, it only validates whether the timestamp occurs 174 * when the gate corresponding to the skb's traffic class is open. 175 */ 176 static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb, 177 struct Qdisc *sch, 178 struct sched_gate_list *sched, 179 struct sched_gate_list *admin, 180 ktime_t time, 181 ktime_t *interval_start, 182 ktime_t *interval_end, 183 bool validate_interval) 184 { 185 ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time; 186 ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time; 187 struct sched_entry *entry = NULL, *entry_found = NULL; 188 struct taprio_sched *q = qdisc_priv(sch); 189 struct net_device *dev = qdisc_dev(sch); 190 bool entry_available = false; 191 s32 cycle_elapsed; 192 int tc, n; 193 194 tc = netdev_get_prio_tc_map(dev, skb->priority); 195 packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb)); 196 197 *interval_start = 0; 198 *interval_end = 0; 199 200 if (!sched) 201 return NULL; 202 203 cycle = sched->cycle_time; 204 cycle_elapsed = get_cycle_time_elapsed(sched, time); 205 curr_intv_end = ktime_sub_ns(time, cycle_elapsed); 206 cycle_end = ktime_add_ns(curr_intv_end, cycle); 207 208 list_for_each_entry(entry, &sched->entries, list) { 209 curr_intv_start = curr_intv_end; 210 curr_intv_end = get_interval_end_time(sched, admin, entry, 211 curr_intv_start); 212 213 if (ktime_after(curr_intv_start, cycle_end)) 214 break; 215 216 if (!(entry->gate_mask & BIT(tc)) || 217 packet_transmit_time > entry->interval) 218 continue; 219 220 txtime = entry->next_txtime; 221 222 if (ktime_before(txtime, time) || validate_interval) { 223 transmit_end_time = ktime_add_ns(time, packet_transmit_time); 224 if ((ktime_before(curr_intv_start, time) && 225 ktime_before(transmit_end_time, curr_intv_end)) || 226 (ktime_after(curr_intv_start, time) && !validate_interval)) { 227 entry_found = entry; 228 *interval_start = curr_intv_start; 229 *interval_end = curr_intv_end; 230 break; 231 } else if (!entry_available && !validate_interval) { 232 /* Here, we are just trying to find out the 233 * first available interval in the next cycle. 234 */ 235 entry_available = 1; 236 entry_found = entry; 237 *interval_start = ktime_add_ns(curr_intv_start, cycle); 238 *interval_end = ktime_add_ns(curr_intv_end, cycle); 239 } 240 } else if (ktime_before(txtime, earliest_txtime) && 241 !entry_available) { 242 earliest_txtime = txtime; 243 entry_found = entry; 244 n = div_s64(ktime_sub(txtime, curr_intv_start), cycle); 245 *interval_start = ktime_add(curr_intv_start, n * cycle); 246 *interval_end = ktime_add(curr_intv_end, n * cycle); 247 } 248 } 249 250 return entry_found; 251 } 252 253 static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch) 254 { 255 struct taprio_sched *q = qdisc_priv(sch); 256 struct sched_gate_list *sched, *admin; 257 ktime_t interval_start, interval_end; 258 struct sched_entry *entry; 259 260 rcu_read_lock(); 261 sched = rcu_dereference(q->oper_sched); 262 admin = rcu_dereference(q->admin_sched); 263 264 entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp, 265 &interval_start, &interval_end, true); 266 rcu_read_unlock(); 267 268 return entry; 269 } 270 271 /* This returns the tstamp value set by TCP in terms of the set clock. */ 272 static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb) 273 { 274 unsigned int offset = skb_network_offset(skb); 275 const struct ipv6hdr *ipv6h; 276 const struct iphdr *iph; 277 struct ipv6hdr _ipv6h; 278 279 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h); 280 if (!ipv6h) 281 return 0; 282 283 if (ipv6h->version == 4) { 284 iph = (struct iphdr *)ipv6h; 285 offset += iph->ihl * 4; 286 287 /* special-case 6in4 tunnelling, as that is a common way to get 288 * v6 connectivity in the home 289 */ 290 if (iph->protocol == IPPROTO_IPV6) { 291 ipv6h = skb_header_pointer(skb, offset, 292 sizeof(_ipv6h), &_ipv6h); 293 294 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP) 295 return 0; 296 } else if (iph->protocol != IPPROTO_TCP) { 297 return 0; 298 } 299 } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) { 300 return 0; 301 } 302 303 return ktime_mono_to_any(skb->skb_mstamp_ns, q->tk_offset); 304 } 305 306 /* There are a few scenarios where we will have to modify the txtime from 307 * what is read from next_txtime in sched_entry. They are: 308 * 1. If txtime is in the past, 309 * a. The gate for the traffic class is currently open and packet can be 310 * transmitted before it closes, schedule the packet right away. 311 * b. If the gate corresponding to the traffic class is going to open later 312 * in the cycle, set the txtime of packet to the interval start. 313 * 2. If txtime is in the future, there are packets corresponding to the 314 * current traffic class waiting to be transmitted. So, the following 315 * possibilities exist: 316 * a. We can transmit the packet before the window containing the txtime 317 * closes. 318 * b. The window might close before the transmission can be completed 319 * successfully. So, schedule the packet in the next open window. 320 */ 321 static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch) 322 { 323 ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp; 324 struct taprio_sched *q = qdisc_priv(sch); 325 struct sched_gate_list *sched, *admin; 326 ktime_t minimum_time, now, txtime; 327 int len, packet_transmit_time; 328 struct sched_entry *entry; 329 bool sched_changed; 330 331 now = taprio_get_time(q); 332 minimum_time = ktime_add_ns(now, q->txtime_delay); 333 334 tcp_tstamp = get_tcp_tstamp(q, skb); 335 minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp); 336 337 rcu_read_lock(); 338 admin = rcu_dereference(q->admin_sched); 339 sched = rcu_dereference(q->oper_sched); 340 if (admin && ktime_after(minimum_time, admin->base_time)) 341 switch_schedules(q, &admin, &sched); 342 343 /* Until the schedule starts, all the queues are open */ 344 if (!sched || ktime_before(minimum_time, sched->base_time)) { 345 txtime = minimum_time; 346 goto done; 347 } 348 349 len = qdisc_pkt_len(skb); 350 packet_transmit_time = length_to_duration(q, len); 351 352 do { 353 sched_changed = 0; 354 355 entry = find_entry_to_transmit(skb, sch, sched, admin, 356 minimum_time, 357 &interval_start, &interval_end, 358 false); 359 if (!entry) { 360 txtime = 0; 361 goto done; 362 } 363 364 txtime = entry->next_txtime; 365 txtime = max_t(ktime_t, txtime, minimum_time); 366 txtime = max_t(ktime_t, txtime, interval_start); 367 368 if (admin && admin != sched && 369 ktime_after(txtime, admin->base_time)) { 370 sched = admin; 371 sched_changed = 1; 372 continue; 373 } 374 375 transmit_end_time = ktime_add(txtime, packet_transmit_time); 376 minimum_time = transmit_end_time; 377 378 /* Update the txtime of current entry to the next time it's 379 * interval starts. 380 */ 381 if (ktime_after(transmit_end_time, interval_end)) 382 entry->next_txtime = ktime_add(interval_start, sched->cycle_time); 383 } while (sched_changed || ktime_after(transmit_end_time, interval_end)); 384 385 entry->next_txtime = transmit_end_time; 386 387 done: 388 rcu_read_unlock(); 389 return txtime; 390 } 391 392 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, 393 struct sk_buff **to_free) 394 { 395 struct taprio_sched *q = qdisc_priv(sch); 396 struct Qdisc *child; 397 int queue; 398 399 queue = skb_get_queue_mapping(skb); 400 401 child = q->qdiscs[queue]; 402 if (unlikely(!child)) 403 return qdisc_drop(skb, sch, to_free); 404 405 if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) { 406 if (!is_valid_interval(skb, sch)) 407 return qdisc_drop(skb, sch, to_free); 408 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 409 skb->tstamp = get_packet_txtime(skb, sch); 410 if (!skb->tstamp) 411 return qdisc_drop(skb, sch, to_free); 412 } 413 414 qdisc_qstats_backlog_inc(sch, skb); 415 sch->q.qlen++; 416 417 return qdisc_enqueue(skb, child, to_free); 418 } 419 420 static struct sk_buff *taprio_peek(struct Qdisc *sch) 421 { 422 struct taprio_sched *q = qdisc_priv(sch); 423 struct net_device *dev = qdisc_dev(sch); 424 struct sched_entry *entry; 425 struct sk_buff *skb; 426 u32 gate_mask; 427 int i; 428 429 rcu_read_lock(); 430 entry = rcu_dereference(q->current_entry); 431 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; 432 rcu_read_unlock(); 433 434 if (!gate_mask) 435 return NULL; 436 437 for (i = 0; i < dev->num_tx_queues; i++) { 438 struct Qdisc *child = q->qdiscs[i]; 439 int prio; 440 u8 tc; 441 442 if (unlikely(!child)) 443 continue; 444 445 skb = child->ops->peek(child); 446 if (!skb) 447 continue; 448 449 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) 450 return skb; 451 452 prio = skb->priority; 453 tc = netdev_get_prio_tc_map(dev, prio); 454 455 if (!(gate_mask & BIT(tc))) 456 continue; 457 458 return skb; 459 } 460 461 return NULL; 462 } 463 464 static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry) 465 { 466 atomic_set(&entry->budget, 467 div64_u64((u64)entry->interval * 1000, 468 atomic64_read(&q->picos_per_byte))); 469 } 470 471 static struct sk_buff *taprio_dequeue(struct Qdisc *sch) 472 { 473 struct taprio_sched *q = qdisc_priv(sch); 474 struct net_device *dev = qdisc_dev(sch); 475 struct sk_buff *skb = NULL; 476 struct sched_entry *entry; 477 u32 gate_mask; 478 int i; 479 480 if (atomic64_read(&q->picos_per_byte) == -1) { 481 WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte."); 482 return NULL; 483 } 484 485 rcu_read_lock(); 486 entry = rcu_dereference(q->current_entry); 487 /* if there's no entry, it means that the schedule didn't 488 * start yet, so force all gates to be open, this is in 489 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5 490 * "AdminGateSates" 491 */ 492 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; 493 494 if (!gate_mask) 495 goto done; 496 497 for (i = 0; i < dev->num_tx_queues; i++) { 498 struct Qdisc *child = q->qdiscs[i]; 499 ktime_t guard; 500 int prio; 501 int len; 502 u8 tc; 503 504 if (unlikely(!child)) 505 continue; 506 507 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 508 skb = child->ops->dequeue(child); 509 if (!skb) 510 continue; 511 goto skb_found; 512 } 513 514 skb = child->ops->peek(child); 515 if (!skb) 516 continue; 517 518 prio = skb->priority; 519 tc = netdev_get_prio_tc_map(dev, prio); 520 521 if (!(gate_mask & BIT(tc))) 522 continue; 523 524 len = qdisc_pkt_len(skb); 525 guard = ktime_add_ns(taprio_get_time(q), 526 length_to_duration(q, len)); 527 528 /* In the case that there's no gate entry, there's no 529 * guard band ... 530 */ 531 if (gate_mask != TAPRIO_ALL_GATES_OPEN && 532 ktime_after(guard, entry->close_time)) 533 continue; 534 535 /* ... and no budget. */ 536 if (gate_mask != TAPRIO_ALL_GATES_OPEN && 537 atomic_sub_return(len, &entry->budget) < 0) 538 continue; 539 540 skb = child->ops->dequeue(child); 541 if (unlikely(!skb)) 542 goto done; 543 544 skb_found: 545 qdisc_bstats_update(sch, skb); 546 qdisc_qstats_backlog_dec(sch, skb); 547 sch->q.qlen--; 548 549 goto done; 550 } 551 552 done: 553 rcu_read_unlock(); 554 555 return skb; 556 } 557 558 static bool should_restart_cycle(const struct sched_gate_list *oper, 559 const struct sched_entry *entry) 560 { 561 if (list_is_last(&entry->list, &oper->entries)) 562 return true; 563 564 if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0) 565 return true; 566 567 return false; 568 } 569 570 static bool should_change_schedules(const struct sched_gate_list *admin, 571 const struct sched_gate_list *oper, 572 ktime_t close_time) 573 { 574 ktime_t next_base_time, extension_time; 575 576 if (!admin) 577 return false; 578 579 next_base_time = sched_base_time(admin); 580 581 /* This is the simple case, the close_time would fall after 582 * the next schedule base_time. 583 */ 584 if (ktime_compare(next_base_time, close_time) <= 0) 585 return true; 586 587 /* This is the cycle_time_extension case, if the close_time 588 * plus the amount that can be extended would fall after the 589 * next schedule base_time, we can extend the current schedule 590 * for that amount. 591 */ 592 extension_time = ktime_add_ns(close_time, oper->cycle_time_extension); 593 594 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about 595 * how precisely the extension should be made. So after 596 * conformance testing, this logic may change. 597 */ 598 if (ktime_compare(next_base_time, extension_time) <= 0) 599 return true; 600 601 return false; 602 } 603 604 static enum hrtimer_restart advance_sched(struct hrtimer *timer) 605 { 606 struct taprio_sched *q = container_of(timer, struct taprio_sched, 607 advance_timer); 608 struct sched_gate_list *oper, *admin; 609 struct sched_entry *entry, *next; 610 struct Qdisc *sch = q->root; 611 ktime_t close_time; 612 613 spin_lock(&q->current_entry_lock); 614 entry = rcu_dereference_protected(q->current_entry, 615 lockdep_is_held(&q->current_entry_lock)); 616 oper = rcu_dereference_protected(q->oper_sched, 617 lockdep_is_held(&q->current_entry_lock)); 618 admin = rcu_dereference_protected(q->admin_sched, 619 lockdep_is_held(&q->current_entry_lock)); 620 621 if (!oper) 622 switch_schedules(q, &admin, &oper); 623 624 /* This can happen in two cases: 1. this is the very first run 625 * of this function (i.e. we weren't running any schedule 626 * previously); 2. The previous schedule just ended. The first 627 * entry of all schedules are pre-calculated during the 628 * schedule initialization. 629 */ 630 if (unlikely(!entry || entry->close_time == oper->base_time)) { 631 next = list_first_entry(&oper->entries, struct sched_entry, 632 list); 633 close_time = next->close_time; 634 goto first_run; 635 } 636 637 if (should_restart_cycle(oper, entry)) { 638 next = list_first_entry(&oper->entries, struct sched_entry, 639 list); 640 oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time, 641 oper->cycle_time); 642 } else { 643 next = list_next_entry(entry, list); 644 } 645 646 close_time = ktime_add_ns(entry->close_time, next->interval); 647 close_time = min_t(ktime_t, close_time, oper->cycle_close_time); 648 649 if (should_change_schedules(admin, oper, close_time)) { 650 /* Set things so the next time this runs, the new 651 * schedule runs. 652 */ 653 close_time = sched_base_time(admin); 654 switch_schedules(q, &admin, &oper); 655 } 656 657 next->close_time = close_time; 658 taprio_set_budget(q, next); 659 660 first_run: 661 rcu_assign_pointer(q->current_entry, next); 662 spin_unlock(&q->current_entry_lock); 663 664 hrtimer_set_expires(&q->advance_timer, close_time); 665 666 rcu_read_lock(); 667 __netif_schedule(sch); 668 rcu_read_unlock(); 669 670 return HRTIMER_RESTART; 671 } 672 673 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { 674 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 }, 675 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 }, 676 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 }, 677 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 }, 678 }; 679 680 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { 681 [TCA_TAPRIO_ATTR_PRIOMAP] = { 682 .len = sizeof(struct tc_mqprio_qopt) 683 }, 684 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED }, 685 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 }, 686 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED }, 687 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, 688 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, 689 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, 690 }; 691 692 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, 693 struct netlink_ext_ack *extack) 694 { 695 u32 interval = 0; 696 697 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD]) 698 entry->command = nla_get_u8( 699 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]); 700 701 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]) 702 entry->gate_mask = nla_get_u32( 703 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]); 704 705 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]) 706 interval = nla_get_u32( 707 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]); 708 709 if (interval == 0) { 710 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); 711 return -EINVAL; 712 } 713 714 entry->interval = interval; 715 716 return 0; 717 } 718 719 static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry, 720 int index, struct netlink_ext_ack *extack) 721 { 722 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { }; 723 int err; 724 725 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n, 726 entry_policy, NULL); 727 if (err < 0) { 728 NL_SET_ERR_MSG(extack, "Could not parse nested entry"); 729 return -EINVAL; 730 } 731 732 entry->index = index; 733 734 return fill_sched_entry(tb, entry, extack); 735 } 736 737 static int parse_sched_list(struct nlattr *list, 738 struct sched_gate_list *sched, 739 struct netlink_ext_ack *extack) 740 { 741 struct nlattr *n; 742 int err, rem; 743 int i = 0; 744 745 if (!list) 746 return -EINVAL; 747 748 nla_for_each_nested(n, list, rem) { 749 struct sched_entry *entry; 750 751 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) { 752 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'"); 753 continue; 754 } 755 756 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 757 if (!entry) { 758 NL_SET_ERR_MSG(extack, "Not enough memory for entry"); 759 return -ENOMEM; 760 } 761 762 err = parse_sched_entry(n, entry, i, extack); 763 if (err < 0) { 764 kfree(entry); 765 return err; 766 } 767 768 list_add_tail(&entry->list, &sched->entries); 769 i++; 770 } 771 772 sched->num_entries = i; 773 774 return i; 775 } 776 777 static int parse_taprio_schedule(struct nlattr **tb, 778 struct sched_gate_list *new, 779 struct netlink_ext_ack *extack) 780 { 781 int err = 0; 782 783 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) { 784 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported"); 785 return -ENOTSUPP; 786 } 787 788 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]) 789 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]); 790 791 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]) 792 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]); 793 794 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]) 795 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]); 796 797 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]) 798 err = parse_sched_list( 799 tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack); 800 if (err < 0) 801 return err; 802 803 if (!new->cycle_time) { 804 struct sched_entry *entry; 805 ktime_t cycle = 0; 806 807 list_for_each_entry(entry, &new->entries, list) 808 cycle = ktime_add_ns(cycle, entry->interval); 809 new->cycle_time = cycle; 810 } 811 812 return 0; 813 } 814 815 static int taprio_parse_mqprio_opt(struct net_device *dev, 816 struct tc_mqprio_qopt *qopt, 817 struct netlink_ext_ack *extack, 818 u32 taprio_flags) 819 { 820 int i, j; 821 822 if (!qopt && !dev->num_tc) { 823 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary"); 824 return -EINVAL; 825 } 826 827 /* If num_tc is already set, it means that the user already 828 * configured the mqprio part 829 */ 830 if (dev->num_tc) 831 return 0; 832 833 /* Verify num_tc is not out of max range */ 834 if (qopt->num_tc > TC_MAX_QUEUE) { 835 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range"); 836 return -EINVAL; 837 } 838 839 /* taprio imposes that traffic classes map 1:n to tx queues */ 840 if (qopt->num_tc > dev->num_tx_queues) { 841 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues"); 842 return -EINVAL; 843 } 844 845 /* Verify priority mapping uses valid tcs */ 846 for (i = 0; i < TC_BITMASK + 1; i++) { 847 if (qopt->prio_tc_map[i] >= qopt->num_tc) { 848 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping"); 849 return -EINVAL; 850 } 851 } 852 853 for (i = 0; i < qopt->num_tc; i++) { 854 unsigned int last = qopt->offset[i] + qopt->count[i]; 855 856 /* Verify the queue count is in tx range being equal to the 857 * real_num_tx_queues indicates the last queue is in use. 858 */ 859 if (qopt->offset[i] >= dev->num_tx_queues || 860 !qopt->count[i] || 861 last > dev->real_num_tx_queues) { 862 NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping"); 863 return -EINVAL; 864 } 865 866 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) 867 continue; 868 869 /* Verify that the offset and counts do not overlap */ 870 for (j = i + 1; j < qopt->num_tc; j++) { 871 if (last > qopt->offset[j]) { 872 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping"); 873 return -EINVAL; 874 } 875 } 876 } 877 878 return 0; 879 } 880 881 static int taprio_get_start_time(struct Qdisc *sch, 882 struct sched_gate_list *sched, 883 ktime_t *start) 884 { 885 struct taprio_sched *q = qdisc_priv(sch); 886 ktime_t now, base, cycle; 887 s64 n; 888 889 base = sched_base_time(sched); 890 now = taprio_get_time(q); 891 892 if (ktime_after(base, now)) { 893 *start = base; 894 return 0; 895 } 896 897 cycle = sched->cycle_time; 898 899 /* The qdisc is expected to have at least one sched_entry. Moreover, 900 * any entry must have 'interval' > 0. Thus if the cycle time is zero, 901 * something went really wrong. In that case, we should warn about this 902 * inconsistent state and return error. 903 */ 904 if (WARN_ON(!cycle)) 905 return -EFAULT; 906 907 /* Schedule the start time for the beginning of the next 908 * cycle. 909 */ 910 n = div64_s64(ktime_sub_ns(now, base), cycle); 911 *start = ktime_add_ns(base, (n + 1) * cycle); 912 return 0; 913 } 914 915 static void setup_first_close_time(struct taprio_sched *q, 916 struct sched_gate_list *sched, ktime_t base) 917 { 918 struct sched_entry *first; 919 ktime_t cycle; 920 921 first = list_first_entry(&sched->entries, 922 struct sched_entry, list); 923 924 cycle = sched->cycle_time; 925 926 /* FIXME: find a better place to do this */ 927 sched->cycle_close_time = ktime_add_ns(base, cycle); 928 929 first->close_time = ktime_add_ns(base, first->interval); 930 taprio_set_budget(q, first); 931 rcu_assign_pointer(q->current_entry, NULL); 932 } 933 934 static void taprio_start_sched(struct Qdisc *sch, 935 ktime_t start, struct sched_gate_list *new) 936 { 937 struct taprio_sched *q = qdisc_priv(sch); 938 ktime_t expires; 939 940 expires = hrtimer_get_expires(&q->advance_timer); 941 if (expires == 0) 942 expires = KTIME_MAX; 943 944 /* If the new schedule starts before the next expiration, we 945 * reprogram it to the earliest one, so we change the admin 946 * schedule to the operational one at the right time. 947 */ 948 start = min_t(ktime_t, start, expires); 949 950 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS); 951 } 952 953 static void taprio_set_picos_per_byte(struct net_device *dev, 954 struct taprio_sched *q) 955 { 956 struct ethtool_link_ksettings ecmd; 957 int picos_per_byte = -1; 958 959 if (!__ethtool_get_link_ksettings(dev, &ecmd) && 960 ecmd.base.speed != SPEED_UNKNOWN) 961 picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, 962 ecmd.base.speed * 1000 * 1000); 963 964 atomic64_set(&q->picos_per_byte, picos_per_byte); 965 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", 966 dev->name, (long long)atomic64_read(&q->picos_per_byte), 967 ecmd.base.speed); 968 } 969 970 static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event, 971 void *ptr) 972 { 973 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 974 struct net_device *qdev; 975 struct taprio_sched *q; 976 bool found = false; 977 978 ASSERT_RTNL(); 979 980 if (event != NETDEV_UP && event != NETDEV_CHANGE) 981 return NOTIFY_DONE; 982 983 spin_lock(&taprio_list_lock); 984 list_for_each_entry(q, &taprio_list, taprio_list) { 985 qdev = qdisc_dev(q->root); 986 if (qdev == dev) { 987 found = true; 988 break; 989 } 990 } 991 spin_unlock(&taprio_list_lock); 992 993 if (found) 994 taprio_set_picos_per_byte(dev, q); 995 996 return NOTIFY_DONE; 997 } 998 999 static void setup_txtime(struct taprio_sched *q, 1000 struct sched_gate_list *sched, ktime_t base) 1001 { 1002 struct sched_entry *entry; 1003 u32 interval = 0; 1004 1005 list_for_each_entry(entry, &sched->entries, list) { 1006 entry->next_txtime = ktime_add_ns(base, interval); 1007 interval += entry->interval; 1008 } 1009 } 1010 1011 static int taprio_change(struct Qdisc *sch, struct nlattr *opt, 1012 struct netlink_ext_ack *extack) 1013 { 1014 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { }; 1015 struct sched_gate_list *oper, *admin, *new_admin; 1016 struct taprio_sched *q = qdisc_priv(sch); 1017 struct net_device *dev = qdisc_dev(sch); 1018 struct tc_mqprio_qopt *mqprio = NULL; 1019 u32 taprio_flags = 0; 1020 int i, err, clockid; 1021 unsigned long flags; 1022 ktime_t start; 1023 1024 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt, 1025 taprio_policy, extack); 1026 if (err < 0) 1027 return err; 1028 1029 if (tb[TCA_TAPRIO_ATTR_PRIOMAP]) 1030 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]); 1031 1032 if (tb[TCA_TAPRIO_ATTR_FLAGS]) { 1033 taprio_flags = nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]); 1034 1035 if (q->flags != 0 && q->flags != taprio_flags) { 1036 NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported"); 1037 return -EOPNOTSUPP; 1038 } else if (!FLAGS_VALID(taprio_flags)) { 1039 NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid"); 1040 return -EINVAL; 1041 } 1042 1043 q->flags = taprio_flags; 1044 } 1045 1046 err = taprio_parse_mqprio_opt(dev, mqprio, extack, taprio_flags); 1047 if (err < 0) 1048 return err; 1049 1050 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL); 1051 if (!new_admin) { 1052 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule"); 1053 return -ENOMEM; 1054 } 1055 INIT_LIST_HEAD(&new_admin->entries); 1056 1057 rcu_read_lock(); 1058 oper = rcu_dereference(q->oper_sched); 1059 admin = rcu_dereference(q->admin_sched); 1060 rcu_read_unlock(); 1061 1062 if (mqprio && (oper || admin)) { 1063 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported"); 1064 err = -ENOTSUPP; 1065 goto free_sched; 1066 } 1067 1068 err = parse_taprio_schedule(tb, new_admin, extack); 1069 if (err < 0) 1070 goto free_sched; 1071 1072 if (new_admin->num_entries == 0) { 1073 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule"); 1074 err = -EINVAL; 1075 goto free_sched; 1076 } 1077 1078 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { 1079 clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]); 1080 1081 /* We only support static clockids and we don't allow 1082 * for it to be modified after the first init. 1083 */ 1084 if (clockid < 0 || 1085 (q->clockid != -1 && q->clockid != clockid)) { 1086 NL_SET_ERR_MSG(extack, "Changing the 'clockid' of a running schedule is not supported"); 1087 err = -ENOTSUPP; 1088 goto free_sched; 1089 } 1090 1091 q->clockid = clockid; 1092 } 1093 1094 if (q->clockid == -1 && !tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { 1095 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory"); 1096 err = -EINVAL; 1097 goto free_sched; 1098 } 1099 1100 taprio_set_picos_per_byte(dev, q); 1101 1102 /* Protects against enqueue()/dequeue() */ 1103 spin_lock_bh(qdisc_lock(sch)); 1104 1105 if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) { 1106 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) { 1107 NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled"); 1108 err = -EINVAL; 1109 goto unlock; 1110 } 1111 1112 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); 1113 } 1114 1115 if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) && 1116 !hrtimer_active(&q->advance_timer)) { 1117 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); 1118 q->advance_timer.function = advance_sched; 1119 } 1120 1121 if (mqprio) { 1122 netdev_set_num_tc(dev, mqprio->num_tc); 1123 for (i = 0; i < mqprio->num_tc; i++) 1124 netdev_set_tc_queue(dev, i, 1125 mqprio->count[i], 1126 mqprio->offset[i]); 1127 1128 /* Always use supplied priority mappings */ 1129 for (i = 0; i < TC_BITMASK + 1; i++) 1130 netdev_set_prio_tc_map(dev, i, 1131 mqprio->prio_tc_map[i]); 1132 } 1133 1134 switch (q->clockid) { 1135 case CLOCK_REALTIME: 1136 q->tk_offset = TK_OFFS_REAL; 1137 break; 1138 case CLOCK_MONOTONIC: 1139 q->tk_offset = TK_OFFS_MAX; 1140 break; 1141 case CLOCK_BOOTTIME: 1142 q->tk_offset = TK_OFFS_BOOT; 1143 break; 1144 case CLOCK_TAI: 1145 q->tk_offset = TK_OFFS_TAI; 1146 break; 1147 default: 1148 NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); 1149 err = -EINVAL; 1150 goto unlock; 1151 } 1152 1153 err = taprio_get_start_time(sch, new_admin, &start); 1154 if (err < 0) { 1155 NL_SET_ERR_MSG(extack, "Internal error: failed get start time"); 1156 goto unlock; 1157 } 1158 1159 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) { 1160 setup_txtime(q, new_admin, start); 1161 1162 if (!oper) { 1163 rcu_assign_pointer(q->oper_sched, new_admin); 1164 err = 0; 1165 new_admin = NULL; 1166 goto unlock; 1167 } 1168 1169 rcu_assign_pointer(q->admin_sched, new_admin); 1170 if (admin) 1171 call_rcu(&admin->rcu, taprio_free_sched_cb); 1172 } else { 1173 setup_first_close_time(q, new_admin, start); 1174 1175 /* Protects against advance_sched() */ 1176 spin_lock_irqsave(&q->current_entry_lock, flags); 1177 1178 taprio_start_sched(sch, start, new_admin); 1179 1180 rcu_assign_pointer(q->admin_sched, new_admin); 1181 if (admin) 1182 call_rcu(&admin->rcu, taprio_free_sched_cb); 1183 1184 spin_unlock_irqrestore(&q->current_entry_lock, flags); 1185 } 1186 1187 new_admin = NULL; 1188 err = 0; 1189 1190 unlock: 1191 spin_unlock_bh(qdisc_lock(sch)); 1192 1193 free_sched: 1194 kfree(new_admin); 1195 1196 return err; 1197 } 1198 1199 static void taprio_destroy(struct Qdisc *sch) 1200 { 1201 struct taprio_sched *q = qdisc_priv(sch); 1202 struct net_device *dev = qdisc_dev(sch); 1203 unsigned int i; 1204 1205 spin_lock(&taprio_list_lock); 1206 list_del(&q->taprio_list); 1207 spin_unlock(&taprio_list_lock); 1208 1209 hrtimer_cancel(&q->advance_timer); 1210 1211 if (q->qdiscs) { 1212 for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++) 1213 qdisc_put(q->qdiscs[i]); 1214 1215 kfree(q->qdiscs); 1216 } 1217 q->qdiscs = NULL; 1218 1219 netdev_set_num_tc(dev, 0); 1220 1221 if (q->oper_sched) 1222 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb); 1223 1224 if (q->admin_sched) 1225 call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb); 1226 } 1227 1228 static int taprio_init(struct Qdisc *sch, struct nlattr *opt, 1229 struct netlink_ext_ack *extack) 1230 { 1231 struct taprio_sched *q = qdisc_priv(sch); 1232 struct net_device *dev = qdisc_dev(sch); 1233 int i; 1234 1235 spin_lock_init(&q->current_entry_lock); 1236 1237 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS); 1238 q->advance_timer.function = advance_sched; 1239 1240 q->root = sch; 1241 1242 /* We only support static clockids. Use an invalid value as default 1243 * and get the valid one on taprio_change(). 1244 */ 1245 q->clockid = -1; 1246 1247 if (sch->parent != TC_H_ROOT) 1248 return -EOPNOTSUPP; 1249 1250 if (!netif_is_multiqueue(dev)) 1251 return -EOPNOTSUPP; 1252 1253 /* pre-allocate qdisc, attachment can't fail */ 1254 q->qdiscs = kcalloc(dev->num_tx_queues, 1255 sizeof(q->qdiscs[0]), 1256 GFP_KERNEL); 1257 1258 if (!q->qdiscs) 1259 return -ENOMEM; 1260 1261 if (!opt) 1262 return -EINVAL; 1263 1264 spin_lock(&taprio_list_lock); 1265 list_add(&q->taprio_list, &taprio_list); 1266 spin_unlock(&taprio_list_lock); 1267 1268 for (i = 0; i < dev->num_tx_queues; i++) { 1269 struct netdev_queue *dev_queue; 1270 struct Qdisc *qdisc; 1271 1272 dev_queue = netdev_get_tx_queue(dev, i); 1273 qdisc = qdisc_create_dflt(dev_queue, 1274 &pfifo_qdisc_ops, 1275 TC_H_MAKE(TC_H_MAJ(sch->handle), 1276 TC_H_MIN(i + 1)), 1277 extack); 1278 if (!qdisc) 1279 return -ENOMEM; 1280 1281 if (i < dev->real_num_tx_queues) 1282 qdisc_hash_add(qdisc, false); 1283 1284 q->qdiscs[i] = qdisc; 1285 } 1286 1287 return taprio_change(sch, opt, extack); 1288 } 1289 1290 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch, 1291 unsigned long cl) 1292 { 1293 struct net_device *dev = qdisc_dev(sch); 1294 unsigned long ntx = cl - 1; 1295 1296 if (ntx >= dev->num_tx_queues) 1297 return NULL; 1298 1299 return netdev_get_tx_queue(dev, ntx); 1300 } 1301 1302 static int taprio_graft(struct Qdisc *sch, unsigned long cl, 1303 struct Qdisc *new, struct Qdisc **old, 1304 struct netlink_ext_ack *extack) 1305 { 1306 struct taprio_sched *q = qdisc_priv(sch); 1307 struct net_device *dev = qdisc_dev(sch); 1308 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 1309 1310 if (!dev_queue) 1311 return -EINVAL; 1312 1313 if (dev->flags & IFF_UP) 1314 dev_deactivate(dev); 1315 1316 *old = q->qdiscs[cl - 1]; 1317 q->qdiscs[cl - 1] = new; 1318 1319 if (new) 1320 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 1321 1322 if (dev->flags & IFF_UP) 1323 dev_activate(dev); 1324 1325 return 0; 1326 } 1327 1328 static int dump_entry(struct sk_buff *msg, 1329 const struct sched_entry *entry) 1330 { 1331 struct nlattr *item; 1332 1333 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY); 1334 if (!item) 1335 return -ENOSPC; 1336 1337 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index)) 1338 goto nla_put_failure; 1339 1340 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command)) 1341 goto nla_put_failure; 1342 1343 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, 1344 entry->gate_mask)) 1345 goto nla_put_failure; 1346 1347 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL, 1348 entry->interval)) 1349 goto nla_put_failure; 1350 1351 return nla_nest_end(msg, item); 1352 1353 nla_put_failure: 1354 nla_nest_cancel(msg, item); 1355 return -1; 1356 } 1357 1358 static int dump_schedule(struct sk_buff *msg, 1359 const struct sched_gate_list *root) 1360 { 1361 struct nlattr *entry_list; 1362 struct sched_entry *entry; 1363 1364 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME, 1365 root->base_time, TCA_TAPRIO_PAD)) 1366 return -1; 1367 1368 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, 1369 root->cycle_time, TCA_TAPRIO_PAD)) 1370 return -1; 1371 1372 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, 1373 root->cycle_time_extension, TCA_TAPRIO_PAD)) 1374 return -1; 1375 1376 entry_list = nla_nest_start_noflag(msg, 1377 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST); 1378 if (!entry_list) 1379 goto error_nest; 1380 1381 list_for_each_entry(entry, &root->entries, list) { 1382 if (dump_entry(msg, entry) < 0) 1383 goto error_nest; 1384 } 1385 1386 nla_nest_end(msg, entry_list); 1387 return 0; 1388 1389 error_nest: 1390 nla_nest_cancel(msg, entry_list); 1391 return -1; 1392 } 1393 1394 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) 1395 { 1396 struct taprio_sched *q = qdisc_priv(sch); 1397 struct net_device *dev = qdisc_dev(sch); 1398 struct sched_gate_list *oper, *admin; 1399 struct tc_mqprio_qopt opt = { 0 }; 1400 struct nlattr *nest, *sched_nest; 1401 unsigned int i; 1402 1403 rcu_read_lock(); 1404 oper = rcu_dereference(q->oper_sched); 1405 admin = rcu_dereference(q->admin_sched); 1406 1407 opt.num_tc = netdev_get_num_tc(dev); 1408 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map)); 1409 1410 for (i = 0; i < netdev_get_num_tc(dev); i++) { 1411 opt.count[i] = dev->tc_to_txq[i].count; 1412 opt.offset[i] = dev->tc_to_txq[i].offset; 1413 } 1414 1415 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 1416 if (!nest) 1417 goto start_error; 1418 1419 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt)) 1420 goto options_error; 1421 1422 if (nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid)) 1423 goto options_error; 1424 1425 if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags)) 1426 goto options_error; 1427 1428 if (q->txtime_delay && 1429 nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay)) 1430 goto options_error; 1431 1432 if (oper && dump_schedule(skb, oper)) 1433 goto options_error; 1434 1435 if (!admin) 1436 goto done; 1437 1438 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED); 1439 if (!sched_nest) 1440 goto options_error; 1441 1442 if (dump_schedule(skb, admin)) 1443 goto admin_error; 1444 1445 nla_nest_end(skb, sched_nest); 1446 1447 done: 1448 rcu_read_unlock(); 1449 1450 return nla_nest_end(skb, nest); 1451 1452 admin_error: 1453 nla_nest_cancel(skb, sched_nest); 1454 1455 options_error: 1456 nla_nest_cancel(skb, nest); 1457 1458 start_error: 1459 rcu_read_unlock(); 1460 return -ENOSPC; 1461 } 1462 1463 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) 1464 { 1465 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 1466 1467 if (!dev_queue) 1468 return NULL; 1469 1470 return dev_queue->qdisc_sleeping; 1471 } 1472 1473 static unsigned long taprio_find(struct Qdisc *sch, u32 classid) 1474 { 1475 unsigned int ntx = TC_H_MIN(classid); 1476 1477 if (!taprio_queue_get(sch, ntx)) 1478 return 0; 1479 return ntx; 1480 } 1481 1482 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl, 1483 struct sk_buff *skb, struct tcmsg *tcm) 1484 { 1485 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 1486 1487 tcm->tcm_parent = TC_H_ROOT; 1488 tcm->tcm_handle |= TC_H_MIN(cl); 1489 tcm->tcm_info = dev_queue->qdisc_sleeping->handle; 1490 1491 return 0; 1492 } 1493 1494 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, 1495 struct gnet_dump *d) 1496 __releases(d->lock) 1497 __acquires(d->lock) 1498 { 1499 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 1500 1501 sch = dev_queue->qdisc_sleeping; 1502 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || 1503 qdisc_qstats_copy(d, sch) < 0) 1504 return -1; 1505 return 0; 1506 } 1507 1508 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1509 { 1510 struct net_device *dev = qdisc_dev(sch); 1511 unsigned long ntx; 1512 1513 if (arg->stop) 1514 return; 1515 1516 arg->count = arg->skip; 1517 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { 1518 if (arg->fn(sch, ntx + 1, arg) < 0) { 1519 arg->stop = 1; 1520 break; 1521 } 1522 arg->count++; 1523 } 1524 } 1525 1526 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch, 1527 struct tcmsg *tcm) 1528 { 1529 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent)); 1530 } 1531 1532 static const struct Qdisc_class_ops taprio_class_ops = { 1533 .graft = taprio_graft, 1534 .leaf = taprio_leaf, 1535 .find = taprio_find, 1536 .walk = taprio_walk, 1537 .dump = taprio_dump_class, 1538 .dump_stats = taprio_dump_class_stats, 1539 .select_queue = taprio_select_queue, 1540 }; 1541 1542 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = { 1543 .cl_ops = &taprio_class_ops, 1544 .id = "taprio", 1545 .priv_size = sizeof(struct taprio_sched), 1546 .init = taprio_init, 1547 .change = taprio_change, 1548 .destroy = taprio_destroy, 1549 .peek = taprio_peek, 1550 .dequeue = taprio_dequeue, 1551 .enqueue = taprio_enqueue, 1552 .dump = taprio_dump, 1553 .owner = THIS_MODULE, 1554 }; 1555 1556 static struct notifier_block taprio_device_notifier = { 1557 .notifier_call = taprio_dev_notifier, 1558 }; 1559 1560 static int __init taprio_module_init(void) 1561 { 1562 int err = register_netdevice_notifier(&taprio_device_notifier); 1563 1564 if (err) 1565 return err; 1566 1567 return register_qdisc(&taprio_qdisc_ops); 1568 } 1569 1570 static void __exit taprio_module_exit(void) 1571 { 1572 unregister_qdisc(&taprio_qdisc_ops); 1573 unregister_netdevice_notifier(&taprio_device_notifier); 1574 } 1575 1576 module_init(taprio_module_init); 1577 module_exit(taprio_module_exit); 1578 MODULE_LICENSE("GPL"); 1579