1 // SPDX-License-Identifier: GPL-2.0 2 3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler 4 * 5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com> 6 * 7 */ 8 9 #include <linux/ethtool.h> 10 #include <linux/types.h> 11 #include <linux/slab.h> 12 #include <linux/kernel.h> 13 #include <linux/string.h> 14 #include <linux/list.h> 15 #include <linux/errno.h> 16 #include <linux/skbuff.h> 17 #include <linux/math64.h> 18 #include <linux/module.h> 19 #include <linux/spinlock.h> 20 #include <linux/rcupdate.h> 21 #include <linux/time.h> 22 #include <net/netlink.h> 23 #include <net/pkt_sched.h> 24 #include <net/pkt_cls.h> 25 #include <net/sch_generic.h> 26 #include <net/sock.h> 27 #include <net/tcp.h> 28 29 #include "sch_mqprio_lib.h" 30 31 static LIST_HEAD(taprio_list); 32 static struct static_key_false taprio_have_broken_mqprio; 33 static struct static_key_false taprio_have_working_mqprio; 34 35 #define TAPRIO_ALL_GATES_OPEN -1 36 37 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) 38 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) 39 #define TAPRIO_FLAGS_INVALID U32_MAX 40 41 struct sched_entry { 42 /* Durations between this GCL entry and the GCL entry where the 43 * respective traffic class gate closes 44 */ 45 u64 gate_duration[TC_MAX_QUEUE]; 46 atomic_t budget[TC_MAX_QUEUE]; 47 /* The qdisc makes some effort so that no packet leaves 48 * after this time 49 */ 50 ktime_t gate_close_time[TC_MAX_QUEUE]; 51 struct list_head list; 52 /* Used to calculate when to advance the schedule */ 53 ktime_t end_time; 54 ktime_t next_txtime; 55 int index; 56 u32 gate_mask; 57 u32 interval; 58 u8 command; 59 }; 60 61 struct sched_gate_list { 62 /* Longest non-zero contiguous gate durations per traffic class, 63 * or 0 if a traffic class gate never opens during the schedule. 64 */ 65 u64 max_open_gate_duration[TC_MAX_QUEUE]; 66 u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */ 67 u32 max_sdu[TC_MAX_QUEUE]; /* for dump */ 68 struct rcu_head rcu; 69 struct list_head entries; 70 size_t num_entries; 71 ktime_t cycle_end_time; 72 s64 cycle_time; 73 s64 cycle_time_extension; 74 s64 base_time; 75 }; 76 77 struct taprio_sched { 78 struct Qdisc **qdiscs; 79 struct Qdisc *root; 80 u32 flags; 81 enum tk_offsets tk_offset; 82 int clockid; 83 bool offloaded; 84 bool detected_mqprio; 85 bool broken_mqprio; 86 atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ 87 * speeds it's sub-nanoseconds per byte 88 */ 89 90 /* Protects the update side of the RCU protected current_entry */ 91 spinlock_t current_entry_lock; 92 struct sched_entry __rcu *current_entry; 93 struct sched_gate_list __rcu *oper_sched; 94 struct sched_gate_list __rcu *admin_sched; 95 struct hrtimer advance_timer; 96 struct list_head taprio_list; 97 int cur_txq[TC_MAX_QUEUE]; 98 u32 max_sdu[TC_MAX_QUEUE]; /* save info from the user */ 99 u32 txtime_delay; 100 }; 101 102 struct __tc_taprio_qopt_offload { 103 refcount_t users; 104 struct tc_taprio_qopt_offload offload; 105 }; 106 107 static void taprio_calculate_gate_durations(struct taprio_sched *q, 108 struct sched_gate_list *sched) 109 { 110 struct net_device *dev = qdisc_dev(q->root); 111 int num_tc = netdev_get_num_tc(dev); 112 struct sched_entry *entry, *cur; 113 int tc; 114 115 list_for_each_entry(entry, &sched->entries, list) { 116 u32 gates_still_open = entry->gate_mask; 117 118 /* For each traffic class, calculate each open gate duration, 119 * starting at this schedule entry and ending at the schedule 120 * entry containing a gate close event for that TC. 121 */ 122 cur = entry; 123 124 do { 125 if (!gates_still_open) 126 break; 127 128 for (tc = 0; tc < num_tc; tc++) { 129 if (!(gates_still_open & BIT(tc))) 130 continue; 131 132 if (cur->gate_mask & BIT(tc)) 133 entry->gate_duration[tc] += cur->interval; 134 else 135 gates_still_open &= ~BIT(tc); 136 } 137 138 cur = list_next_entry_circular(cur, &sched->entries, list); 139 } while (cur != entry); 140 141 /* Keep track of the maximum gate duration for each traffic 142 * class, taking care to not confuse a traffic class which is 143 * temporarily closed with one that is always closed. 144 */ 145 for (tc = 0; tc < num_tc; tc++) 146 if (entry->gate_duration[tc] && 147 sched->max_open_gate_duration[tc] < entry->gate_duration[tc]) 148 sched->max_open_gate_duration[tc] = entry->gate_duration[tc]; 149 } 150 } 151 152 static bool taprio_entry_allows_tx(ktime_t skb_end_time, 153 struct sched_entry *entry, int tc) 154 { 155 return ktime_before(skb_end_time, entry->gate_close_time[tc]); 156 } 157 158 static ktime_t sched_base_time(const struct sched_gate_list *sched) 159 { 160 if (!sched) 161 return KTIME_MAX; 162 163 return ns_to_ktime(sched->base_time); 164 } 165 166 static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono) 167 { 168 /* This pairs with WRITE_ONCE() in taprio_parse_clockid() */ 169 enum tk_offsets tk_offset = READ_ONCE(q->tk_offset); 170 171 switch (tk_offset) { 172 case TK_OFFS_MAX: 173 return mono; 174 default: 175 return ktime_mono_to_any(mono, tk_offset); 176 } 177 } 178 179 static ktime_t taprio_get_time(const struct taprio_sched *q) 180 { 181 return taprio_mono_to_any(q, ktime_get()); 182 } 183 184 static void taprio_free_sched_cb(struct rcu_head *head) 185 { 186 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu); 187 struct sched_entry *entry, *n; 188 189 list_for_each_entry_safe(entry, n, &sched->entries, list) { 190 list_del(&entry->list); 191 kfree(entry); 192 } 193 194 kfree(sched); 195 } 196 197 static void switch_schedules(struct taprio_sched *q, 198 struct sched_gate_list **admin, 199 struct sched_gate_list **oper) 200 { 201 rcu_assign_pointer(q->oper_sched, *admin); 202 rcu_assign_pointer(q->admin_sched, NULL); 203 204 if (*oper) 205 call_rcu(&(*oper)->rcu, taprio_free_sched_cb); 206 207 *oper = *admin; 208 *admin = NULL; 209 } 210 211 /* Get how much time has been already elapsed in the current cycle. */ 212 static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time) 213 { 214 ktime_t time_since_sched_start; 215 s32 time_elapsed; 216 217 time_since_sched_start = ktime_sub(time, sched->base_time); 218 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed); 219 220 return time_elapsed; 221 } 222 223 static ktime_t get_interval_end_time(struct sched_gate_list *sched, 224 struct sched_gate_list *admin, 225 struct sched_entry *entry, 226 ktime_t intv_start) 227 { 228 s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start); 229 ktime_t intv_end, cycle_ext_end, cycle_end; 230 231 cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed); 232 intv_end = ktime_add_ns(intv_start, entry->interval); 233 cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension); 234 235 if (ktime_before(intv_end, cycle_end)) 236 return intv_end; 237 else if (admin && admin != sched && 238 ktime_after(admin->base_time, cycle_end) && 239 ktime_before(admin->base_time, cycle_ext_end)) 240 return admin->base_time; 241 else 242 return cycle_end; 243 } 244 245 static int length_to_duration(struct taprio_sched *q, int len) 246 { 247 return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC); 248 } 249 250 static int duration_to_length(struct taprio_sched *q, u64 duration) 251 { 252 return div_u64(duration * PSEC_PER_NSEC, atomic64_read(&q->picos_per_byte)); 253 } 254 255 /* Sets sched->max_sdu[] and sched->max_frm_len[] to the minimum between the 256 * q->max_sdu[] requested by the user and the max_sdu dynamically determined by 257 * the maximum open gate durations at the given link speed. 258 */ 259 static void taprio_update_queue_max_sdu(struct taprio_sched *q, 260 struct sched_gate_list *sched, 261 struct qdisc_size_table *stab) 262 { 263 struct net_device *dev = qdisc_dev(q->root); 264 int num_tc = netdev_get_num_tc(dev); 265 u32 max_sdu_from_user; 266 u32 max_sdu_dynamic; 267 u32 max_sdu; 268 int tc; 269 270 for (tc = 0; tc < num_tc; tc++) { 271 max_sdu_from_user = q->max_sdu[tc] ?: U32_MAX; 272 273 /* TC gate never closes => keep the queueMaxSDU 274 * selected by the user 275 */ 276 if (sched->max_open_gate_duration[tc] == sched->cycle_time) { 277 max_sdu_dynamic = U32_MAX; 278 } else { 279 u32 max_frm_len; 280 281 max_frm_len = duration_to_length(q, sched->max_open_gate_duration[tc]); 282 /* Compensate for L1 overhead from size table, 283 * but don't let the frame size go negative 284 */ 285 if (stab) { 286 max_frm_len -= stab->szopts.overhead; 287 max_frm_len = max_t(int, max_frm_len, 288 dev->hard_header_len + 1); 289 } 290 max_sdu_dynamic = max_frm_len - dev->hard_header_len; 291 if (max_sdu_dynamic > dev->max_mtu) 292 max_sdu_dynamic = U32_MAX; 293 } 294 295 max_sdu = min(max_sdu_dynamic, max_sdu_from_user); 296 297 if (max_sdu != U32_MAX) { 298 sched->max_frm_len[tc] = max_sdu + dev->hard_header_len; 299 sched->max_sdu[tc] = max_sdu; 300 } else { 301 sched->max_frm_len[tc] = U32_MAX; /* never oversized */ 302 sched->max_sdu[tc] = 0; 303 } 304 } 305 } 306 307 /* Returns the entry corresponding to next available interval. If 308 * validate_interval is set, it only validates whether the timestamp occurs 309 * when the gate corresponding to the skb's traffic class is open. 310 */ 311 static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb, 312 struct Qdisc *sch, 313 struct sched_gate_list *sched, 314 struct sched_gate_list *admin, 315 ktime_t time, 316 ktime_t *interval_start, 317 ktime_t *interval_end, 318 bool validate_interval) 319 { 320 ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time; 321 ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time; 322 struct sched_entry *entry = NULL, *entry_found = NULL; 323 struct taprio_sched *q = qdisc_priv(sch); 324 struct net_device *dev = qdisc_dev(sch); 325 bool entry_available = false; 326 s32 cycle_elapsed; 327 int tc, n; 328 329 tc = netdev_get_prio_tc_map(dev, skb->priority); 330 packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb)); 331 332 *interval_start = 0; 333 *interval_end = 0; 334 335 if (!sched) 336 return NULL; 337 338 cycle = sched->cycle_time; 339 cycle_elapsed = get_cycle_time_elapsed(sched, time); 340 curr_intv_end = ktime_sub_ns(time, cycle_elapsed); 341 cycle_end = ktime_add_ns(curr_intv_end, cycle); 342 343 list_for_each_entry(entry, &sched->entries, list) { 344 curr_intv_start = curr_intv_end; 345 curr_intv_end = get_interval_end_time(sched, admin, entry, 346 curr_intv_start); 347 348 if (ktime_after(curr_intv_start, cycle_end)) 349 break; 350 351 if (!(entry->gate_mask & BIT(tc)) || 352 packet_transmit_time > entry->interval) 353 continue; 354 355 txtime = entry->next_txtime; 356 357 if (ktime_before(txtime, time) || validate_interval) { 358 transmit_end_time = ktime_add_ns(time, packet_transmit_time); 359 if ((ktime_before(curr_intv_start, time) && 360 ktime_before(transmit_end_time, curr_intv_end)) || 361 (ktime_after(curr_intv_start, time) && !validate_interval)) { 362 entry_found = entry; 363 *interval_start = curr_intv_start; 364 *interval_end = curr_intv_end; 365 break; 366 } else if (!entry_available && !validate_interval) { 367 /* Here, we are just trying to find out the 368 * first available interval in the next cycle. 369 */ 370 entry_available = true; 371 entry_found = entry; 372 *interval_start = ktime_add_ns(curr_intv_start, cycle); 373 *interval_end = ktime_add_ns(curr_intv_end, cycle); 374 } 375 } else if (ktime_before(txtime, earliest_txtime) && 376 !entry_available) { 377 earliest_txtime = txtime; 378 entry_found = entry; 379 n = div_s64(ktime_sub(txtime, curr_intv_start), cycle); 380 *interval_start = ktime_add(curr_intv_start, n * cycle); 381 *interval_end = ktime_add(curr_intv_end, n * cycle); 382 } 383 } 384 385 return entry_found; 386 } 387 388 static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch) 389 { 390 struct taprio_sched *q = qdisc_priv(sch); 391 struct sched_gate_list *sched, *admin; 392 ktime_t interval_start, interval_end; 393 struct sched_entry *entry; 394 395 rcu_read_lock(); 396 sched = rcu_dereference(q->oper_sched); 397 admin = rcu_dereference(q->admin_sched); 398 399 entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp, 400 &interval_start, &interval_end, true); 401 rcu_read_unlock(); 402 403 return entry; 404 } 405 406 static bool taprio_flags_valid(u32 flags) 407 { 408 /* Make sure no other flag bits are set. */ 409 if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | 410 TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) 411 return false; 412 /* txtime-assist and full offload are mutually exclusive */ 413 if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) && 414 (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) 415 return false; 416 return true; 417 } 418 419 /* This returns the tstamp value set by TCP in terms of the set clock. */ 420 static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb) 421 { 422 unsigned int offset = skb_network_offset(skb); 423 const struct ipv6hdr *ipv6h; 424 const struct iphdr *iph; 425 struct ipv6hdr _ipv6h; 426 427 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h); 428 if (!ipv6h) 429 return 0; 430 431 if (ipv6h->version == 4) { 432 iph = (struct iphdr *)ipv6h; 433 offset += iph->ihl * 4; 434 435 /* special-case 6in4 tunnelling, as that is a common way to get 436 * v6 connectivity in the home 437 */ 438 if (iph->protocol == IPPROTO_IPV6) { 439 ipv6h = skb_header_pointer(skb, offset, 440 sizeof(_ipv6h), &_ipv6h); 441 442 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP) 443 return 0; 444 } else if (iph->protocol != IPPROTO_TCP) { 445 return 0; 446 } 447 } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) { 448 return 0; 449 } 450 451 return taprio_mono_to_any(q, skb->skb_mstamp_ns); 452 } 453 454 /* There are a few scenarios where we will have to modify the txtime from 455 * what is read from next_txtime in sched_entry. They are: 456 * 1. If txtime is in the past, 457 * a. The gate for the traffic class is currently open and packet can be 458 * transmitted before it closes, schedule the packet right away. 459 * b. If the gate corresponding to the traffic class is going to open later 460 * in the cycle, set the txtime of packet to the interval start. 461 * 2. If txtime is in the future, there are packets corresponding to the 462 * current traffic class waiting to be transmitted. So, the following 463 * possibilities exist: 464 * a. We can transmit the packet before the window containing the txtime 465 * closes. 466 * b. The window might close before the transmission can be completed 467 * successfully. So, schedule the packet in the next open window. 468 */ 469 static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch) 470 { 471 ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp; 472 struct taprio_sched *q = qdisc_priv(sch); 473 struct sched_gate_list *sched, *admin; 474 ktime_t minimum_time, now, txtime; 475 int len, packet_transmit_time; 476 struct sched_entry *entry; 477 bool sched_changed; 478 479 now = taprio_get_time(q); 480 minimum_time = ktime_add_ns(now, q->txtime_delay); 481 482 tcp_tstamp = get_tcp_tstamp(q, skb); 483 minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp); 484 485 rcu_read_lock(); 486 admin = rcu_dereference(q->admin_sched); 487 sched = rcu_dereference(q->oper_sched); 488 if (admin && ktime_after(minimum_time, admin->base_time)) 489 switch_schedules(q, &admin, &sched); 490 491 /* Until the schedule starts, all the queues are open */ 492 if (!sched || ktime_before(minimum_time, sched->base_time)) { 493 txtime = minimum_time; 494 goto done; 495 } 496 497 len = qdisc_pkt_len(skb); 498 packet_transmit_time = length_to_duration(q, len); 499 500 do { 501 sched_changed = false; 502 503 entry = find_entry_to_transmit(skb, sch, sched, admin, 504 minimum_time, 505 &interval_start, &interval_end, 506 false); 507 if (!entry) { 508 txtime = 0; 509 goto done; 510 } 511 512 txtime = entry->next_txtime; 513 txtime = max_t(ktime_t, txtime, minimum_time); 514 txtime = max_t(ktime_t, txtime, interval_start); 515 516 if (admin && admin != sched && 517 ktime_after(txtime, admin->base_time)) { 518 sched = admin; 519 sched_changed = true; 520 continue; 521 } 522 523 transmit_end_time = ktime_add(txtime, packet_transmit_time); 524 minimum_time = transmit_end_time; 525 526 /* Update the txtime of current entry to the next time it's 527 * interval starts. 528 */ 529 if (ktime_after(transmit_end_time, interval_end)) 530 entry->next_txtime = ktime_add(interval_start, sched->cycle_time); 531 } while (sched_changed || ktime_after(transmit_end_time, interval_end)); 532 533 entry->next_txtime = transmit_end_time; 534 535 done: 536 rcu_read_unlock(); 537 return txtime; 538 } 539 540 /* Devices with full offload are expected to honor this in hardware */ 541 static bool taprio_skb_exceeds_queue_max_sdu(struct Qdisc *sch, 542 struct sk_buff *skb) 543 { 544 struct taprio_sched *q = qdisc_priv(sch); 545 struct net_device *dev = qdisc_dev(sch); 546 struct sched_gate_list *sched; 547 int prio = skb->priority; 548 bool exceeds = false; 549 u8 tc; 550 551 tc = netdev_get_prio_tc_map(dev, prio); 552 553 rcu_read_lock(); 554 sched = rcu_dereference(q->oper_sched); 555 if (sched && skb->len > sched->max_frm_len[tc]) 556 exceeds = true; 557 rcu_read_unlock(); 558 559 return exceeds; 560 } 561 562 static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch, 563 struct Qdisc *child, struct sk_buff **to_free) 564 { 565 struct taprio_sched *q = qdisc_priv(sch); 566 567 /* sk_flags are only safe to use on full sockets. */ 568 if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) { 569 if (!is_valid_interval(skb, sch)) 570 return qdisc_drop(skb, sch, to_free); 571 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 572 skb->tstamp = get_packet_txtime(skb, sch); 573 if (!skb->tstamp) 574 return qdisc_drop(skb, sch, to_free); 575 } 576 577 qdisc_qstats_backlog_inc(sch, skb); 578 sch->q.qlen++; 579 580 return qdisc_enqueue(skb, child, to_free); 581 } 582 583 static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch, 584 struct Qdisc *child, 585 struct sk_buff **to_free) 586 { 587 unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb); 588 netdev_features_t features = netif_skb_features(skb); 589 struct sk_buff *segs, *nskb; 590 int ret; 591 592 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 593 if (IS_ERR_OR_NULL(segs)) 594 return qdisc_drop(skb, sch, to_free); 595 596 skb_list_walk_safe(segs, segs, nskb) { 597 skb_mark_not_on_list(segs); 598 qdisc_skb_cb(segs)->pkt_len = segs->len; 599 slen += segs->len; 600 601 /* FIXME: we should be segmenting to a smaller size 602 * rather than dropping these 603 */ 604 if (taprio_skb_exceeds_queue_max_sdu(sch, segs)) 605 ret = qdisc_drop(segs, sch, to_free); 606 else 607 ret = taprio_enqueue_one(segs, sch, child, to_free); 608 609 if (ret != NET_XMIT_SUCCESS) { 610 if (net_xmit_drop_count(ret)) 611 qdisc_qstats_drop(sch); 612 } else { 613 numsegs++; 614 } 615 } 616 617 if (numsegs > 1) 618 qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen); 619 consume_skb(skb); 620 621 return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; 622 } 623 624 /* Will not be called in the full offload case, since the TX queues are 625 * attached to the Qdisc created using qdisc_create_dflt() 626 */ 627 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, 628 struct sk_buff **to_free) 629 { 630 struct taprio_sched *q = qdisc_priv(sch); 631 struct Qdisc *child; 632 int queue; 633 634 queue = skb_get_queue_mapping(skb); 635 636 child = q->qdiscs[queue]; 637 if (unlikely(!child)) 638 return qdisc_drop(skb, sch, to_free); 639 640 if (taprio_skb_exceeds_queue_max_sdu(sch, skb)) { 641 /* Large packets might not be transmitted when the transmission 642 * duration exceeds any configured interval. Therefore, segment 643 * the skb into smaller chunks. Drivers with full offload are 644 * expected to handle this in hardware. 645 */ 646 if (skb_is_gso(skb)) 647 return taprio_enqueue_segmented(skb, sch, child, 648 to_free); 649 650 return qdisc_drop(skb, sch, to_free); 651 } 652 653 return taprio_enqueue_one(skb, sch, child, to_free); 654 } 655 656 static struct sk_buff *taprio_peek(struct Qdisc *sch) 657 { 658 WARN_ONCE(1, "taprio only supports operating as root qdisc, peek() not implemented"); 659 return NULL; 660 } 661 662 static void taprio_set_budgets(struct taprio_sched *q, 663 struct sched_gate_list *sched, 664 struct sched_entry *entry) 665 { 666 struct net_device *dev = qdisc_dev(q->root); 667 int num_tc = netdev_get_num_tc(dev); 668 int tc, budget; 669 670 for (tc = 0; tc < num_tc; tc++) { 671 /* Traffic classes which never close have infinite budget */ 672 if (entry->gate_duration[tc] == sched->cycle_time) 673 budget = INT_MAX; 674 else 675 budget = div64_u64((u64)entry->gate_duration[tc] * PSEC_PER_NSEC, 676 atomic64_read(&q->picos_per_byte)); 677 678 atomic_set(&entry->budget[tc], budget); 679 } 680 } 681 682 /* When an skb is sent, it consumes from the budget of all traffic classes */ 683 static int taprio_update_budgets(struct sched_entry *entry, size_t len, 684 int tc_consumed, int num_tc) 685 { 686 int tc, budget, new_budget = 0; 687 688 for (tc = 0; tc < num_tc; tc++) { 689 budget = atomic_read(&entry->budget[tc]); 690 /* Don't consume from infinite budget */ 691 if (budget == INT_MAX) { 692 if (tc == tc_consumed) 693 new_budget = budget; 694 continue; 695 } 696 697 if (tc == tc_consumed) 698 new_budget = atomic_sub_return(len, &entry->budget[tc]); 699 else 700 atomic_sub(len, &entry->budget[tc]); 701 } 702 703 return new_budget; 704 } 705 706 static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq, 707 struct sched_entry *entry, 708 u32 gate_mask) 709 { 710 struct taprio_sched *q = qdisc_priv(sch); 711 struct net_device *dev = qdisc_dev(sch); 712 struct Qdisc *child = q->qdiscs[txq]; 713 int num_tc = netdev_get_num_tc(dev); 714 struct sk_buff *skb; 715 ktime_t guard; 716 int prio; 717 int len; 718 u8 tc; 719 720 if (unlikely(!child)) 721 return NULL; 722 723 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) 724 goto skip_peek_checks; 725 726 skb = child->ops->peek(child); 727 if (!skb) 728 return NULL; 729 730 prio = skb->priority; 731 tc = netdev_get_prio_tc_map(dev, prio); 732 733 if (!(gate_mask & BIT(tc))) 734 return NULL; 735 736 len = qdisc_pkt_len(skb); 737 guard = ktime_add_ns(taprio_get_time(q), length_to_duration(q, len)); 738 739 /* In the case that there's no gate entry, there's no 740 * guard band ... 741 */ 742 if (gate_mask != TAPRIO_ALL_GATES_OPEN && 743 !taprio_entry_allows_tx(guard, entry, tc)) 744 return NULL; 745 746 /* ... and no budget. */ 747 if (gate_mask != TAPRIO_ALL_GATES_OPEN && 748 taprio_update_budgets(entry, len, tc, num_tc) < 0) 749 return NULL; 750 751 skip_peek_checks: 752 skb = child->ops->dequeue(child); 753 if (unlikely(!skb)) 754 return NULL; 755 756 qdisc_bstats_update(sch, skb); 757 qdisc_qstats_backlog_dec(sch, skb); 758 sch->q.qlen--; 759 760 return skb; 761 } 762 763 static void taprio_next_tc_txq(struct net_device *dev, int tc, int *txq) 764 { 765 int offset = dev->tc_to_txq[tc].offset; 766 int count = dev->tc_to_txq[tc].count; 767 768 (*txq)++; 769 if (*txq == offset + count) 770 *txq = offset; 771 } 772 773 /* Prioritize higher traffic classes, and select among TXQs belonging to the 774 * same TC using round robin 775 */ 776 static struct sk_buff *taprio_dequeue_tc_priority(struct Qdisc *sch, 777 struct sched_entry *entry, 778 u32 gate_mask) 779 { 780 struct taprio_sched *q = qdisc_priv(sch); 781 struct net_device *dev = qdisc_dev(sch); 782 int num_tc = netdev_get_num_tc(dev); 783 struct sk_buff *skb; 784 int tc; 785 786 for (tc = num_tc - 1; tc >= 0; tc--) { 787 int first_txq = q->cur_txq[tc]; 788 789 if (!(gate_mask & BIT(tc))) 790 continue; 791 792 do { 793 skb = taprio_dequeue_from_txq(sch, q->cur_txq[tc], 794 entry, gate_mask); 795 796 taprio_next_tc_txq(dev, tc, &q->cur_txq[tc]); 797 798 if (skb) 799 return skb; 800 } while (q->cur_txq[tc] != first_txq); 801 } 802 803 return NULL; 804 } 805 806 /* Broken way of prioritizing smaller TXQ indices and ignoring the traffic 807 * class other than to determine whether the gate is open or not 808 */ 809 static struct sk_buff *taprio_dequeue_txq_priority(struct Qdisc *sch, 810 struct sched_entry *entry, 811 u32 gate_mask) 812 { 813 struct net_device *dev = qdisc_dev(sch); 814 struct sk_buff *skb; 815 int i; 816 817 for (i = 0; i < dev->num_tx_queues; i++) { 818 skb = taprio_dequeue_from_txq(sch, i, entry, gate_mask); 819 if (skb) 820 return skb; 821 } 822 823 return NULL; 824 } 825 826 /* Will not be called in the full offload case, since the TX queues are 827 * attached to the Qdisc created using qdisc_create_dflt() 828 */ 829 static struct sk_buff *taprio_dequeue(struct Qdisc *sch) 830 { 831 struct taprio_sched *q = qdisc_priv(sch); 832 struct sk_buff *skb = NULL; 833 struct sched_entry *entry; 834 u32 gate_mask; 835 836 rcu_read_lock(); 837 entry = rcu_dereference(q->current_entry); 838 /* if there's no entry, it means that the schedule didn't 839 * start yet, so force all gates to be open, this is in 840 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5 841 * "AdminGateStates" 842 */ 843 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; 844 if (!gate_mask) 845 goto done; 846 847 if (static_branch_unlikely(&taprio_have_broken_mqprio) && 848 !static_branch_likely(&taprio_have_working_mqprio)) { 849 /* Single NIC kind which is broken */ 850 skb = taprio_dequeue_txq_priority(sch, entry, gate_mask); 851 } else if (static_branch_likely(&taprio_have_working_mqprio) && 852 !static_branch_unlikely(&taprio_have_broken_mqprio)) { 853 /* Single NIC kind which prioritizes properly */ 854 skb = taprio_dequeue_tc_priority(sch, entry, gate_mask); 855 } else { 856 /* Mixed NIC kinds present in system, need dynamic testing */ 857 if (q->broken_mqprio) 858 skb = taprio_dequeue_txq_priority(sch, entry, gate_mask); 859 else 860 skb = taprio_dequeue_tc_priority(sch, entry, gate_mask); 861 } 862 863 done: 864 rcu_read_unlock(); 865 866 return skb; 867 } 868 869 static bool should_restart_cycle(const struct sched_gate_list *oper, 870 const struct sched_entry *entry) 871 { 872 if (list_is_last(&entry->list, &oper->entries)) 873 return true; 874 875 if (ktime_compare(entry->end_time, oper->cycle_end_time) == 0) 876 return true; 877 878 return false; 879 } 880 881 static bool should_change_schedules(const struct sched_gate_list *admin, 882 const struct sched_gate_list *oper, 883 ktime_t end_time) 884 { 885 ktime_t next_base_time, extension_time; 886 887 if (!admin) 888 return false; 889 890 next_base_time = sched_base_time(admin); 891 892 /* This is the simple case, the end_time would fall after 893 * the next schedule base_time. 894 */ 895 if (ktime_compare(next_base_time, end_time) <= 0) 896 return true; 897 898 /* This is the cycle_time_extension case, if the end_time 899 * plus the amount that can be extended would fall after the 900 * next schedule base_time, we can extend the current schedule 901 * for that amount. 902 */ 903 extension_time = ktime_add_ns(end_time, oper->cycle_time_extension); 904 905 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about 906 * how precisely the extension should be made. So after 907 * conformance testing, this logic may change. 908 */ 909 if (ktime_compare(next_base_time, extension_time) <= 0) 910 return true; 911 912 return false; 913 } 914 915 static enum hrtimer_restart advance_sched(struct hrtimer *timer) 916 { 917 struct taprio_sched *q = container_of(timer, struct taprio_sched, 918 advance_timer); 919 struct net_device *dev = qdisc_dev(q->root); 920 struct sched_gate_list *oper, *admin; 921 int num_tc = netdev_get_num_tc(dev); 922 struct sched_entry *entry, *next; 923 struct Qdisc *sch = q->root; 924 ktime_t end_time; 925 int tc; 926 927 spin_lock(&q->current_entry_lock); 928 entry = rcu_dereference_protected(q->current_entry, 929 lockdep_is_held(&q->current_entry_lock)); 930 oper = rcu_dereference_protected(q->oper_sched, 931 lockdep_is_held(&q->current_entry_lock)); 932 admin = rcu_dereference_protected(q->admin_sched, 933 lockdep_is_held(&q->current_entry_lock)); 934 935 if (!oper) 936 switch_schedules(q, &admin, &oper); 937 938 /* This can happen in two cases: 1. this is the very first run 939 * of this function (i.e. we weren't running any schedule 940 * previously); 2. The previous schedule just ended. The first 941 * entry of all schedules are pre-calculated during the 942 * schedule initialization. 943 */ 944 if (unlikely(!entry || entry->end_time == oper->base_time)) { 945 next = list_first_entry(&oper->entries, struct sched_entry, 946 list); 947 end_time = next->end_time; 948 goto first_run; 949 } 950 951 if (should_restart_cycle(oper, entry)) { 952 next = list_first_entry(&oper->entries, struct sched_entry, 953 list); 954 oper->cycle_end_time = ktime_add_ns(oper->cycle_end_time, 955 oper->cycle_time); 956 } else { 957 next = list_next_entry(entry, list); 958 } 959 960 end_time = ktime_add_ns(entry->end_time, next->interval); 961 end_time = min_t(ktime_t, end_time, oper->cycle_end_time); 962 963 for (tc = 0; tc < num_tc; tc++) { 964 if (next->gate_duration[tc] == oper->cycle_time) 965 next->gate_close_time[tc] = KTIME_MAX; 966 else 967 next->gate_close_time[tc] = ktime_add_ns(entry->end_time, 968 next->gate_duration[tc]); 969 } 970 971 if (should_change_schedules(admin, oper, end_time)) { 972 /* Set things so the next time this runs, the new 973 * schedule runs. 974 */ 975 end_time = sched_base_time(admin); 976 switch_schedules(q, &admin, &oper); 977 } 978 979 next->end_time = end_time; 980 taprio_set_budgets(q, oper, next); 981 982 first_run: 983 rcu_assign_pointer(q->current_entry, next); 984 spin_unlock(&q->current_entry_lock); 985 986 hrtimer_set_expires(&q->advance_timer, end_time); 987 988 rcu_read_lock(); 989 __netif_schedule(sch); 990 rcu_read_unlock(); 991 992 return HRTIMER_RESTART; 993 } 994 995 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { 996 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 }, 997 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 }, 998 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 }, 999 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 }, 1000 }; 1001 1002 static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { 1003 [TCA_TAPRIO_TC_ENTRY_INDEX] = { .type = NLA_U32 }, 1004 [TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 }, 1005 }; 1006 1007 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { 1008 [TCA_TAPRIO_ATTR_PRIOMAP] = { 1009 .len = sizeof(struct tc_mqprio_qopt) 1010 }, 1011 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED }, 1012 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 }, 1013 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED }, 1014 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, 1015 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, 1016 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, 1017 [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 }, 1018 [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, 1019 [TCA_TAPRIO_ATTR_TC_ENTRY] = { .type = NLA_NESTED }, 1020 }; 1021 1022 static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb, 1023 struct sched_entry *entry, 1024 struct netlink_ext_ack *extack) 1025 { 1026 int min_duration = length_to_duration(q, ETH_ZLEN); 1027 u32 interval = 0; 1028 1029 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD]) 1030 entry->command = nla_get_u8( 1031 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]); 1032 1033 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]) 1034 entry->gate_mask = nla_get_u32( 1035 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]); 1036 1037 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]) 1038 interval = nla_get_u32( 1039 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]); 1040 1041 /* The interval should allow at least the minimum ethernet 1042 * frame to go out. 1043 */ 1044 if (interval < min_duration) { 1045 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); 1046 return -EINVAL; 1047 } 1048 1049 entry->interval = interval; 1050 1051 return 0; 1052 } 1053 1054 static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n, 1055 struct sched_entry *entry, int index, 1056 struct netlink_ext_ack *extack) 1057 { 1058 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { }; 1059 int err; 1060 1061 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n, 1062 entry_policy, NULL); 1063 if (err < 0) { 1064 NL_SET_ERR_MSG(extack, "Could not parse nested entry"); 1065 return -EINVAL; 1066 } 1067 1068 entry->index = index; 1069 1070 return fill_sched_entry(q, tb, entry, extack); 1071 } 1072 1073 static int parse_sched_list(struct taprio_sched *q, struct nlattr *list, 1074 struct sched_gate_list *sched, 1075 struct netlink_ext_ack *extack) 1076 { 1077 struct nlattr *n; 1078 int err, rem; 1079 int i = 0; 1080 1081 if (!list) 1082 return -EINVAL; 1083 1084 nla_for_each_nested(n, list, rem) { 1085 struct sched_entry *entry; 1086 1087 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) { 1088 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'"); 1089 continue; 1090 } 1091 1092 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1093 if (!entry) { 1094 NL_SET_ERR_MSG(extack, "Not enough memory for entry"); 1095 return -ENOMEM; 1096 } 1097 1098 err = parse_sched_entry(q, n, entry, i, extack); 1099 if (err < 0) { 1100 kfree(entry); 1101 return err; 1102 } 1103 1104 list_add_tail(&entry->list, &sched->entries); 1105 i++; 1106 } 1107 1108 sched->num_entries = i; 1109 1110 return i; 1111 } 1112 1113 static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, 1114 struct sched_gate_list *new, 1115 struct netlink_ext_ack *extack) 1116 { 1117 int err = 0; 1118 1119 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) { 1120 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported"); 1121 return -ENOTSUPP; 1122 } 1123 1124 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]) 1125 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]); 1126 1127 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]) 1128 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]); 1129 1130 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]) 1131 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]); 1132 1133 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]) 1134 err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], 1135 new, extack); 1136 if (err < 0) 1137 return err; 1138 1139 if (!new->cycle_time) { 1140 struct sched_entry *entry; 1141 ktime_t cycle = 0; 1142 1143 list_for_each_entry(entry, &new->entries, list) 1144 cycle = ktime_add_ns(cycle, entry->interval); 1145 1146 if (!cycle) { 1147 NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0"); 1148 return -EINVAL; 1149 } 1150 1151 new->cycle_time = cycle; 1152 } 1153 1154 taprio_calculate_gate_durations(q, new); 1155 1156 return 0; 1157 } 1158 1159 static int taprio_parse_mqprio_opt(struct net_device *dev, 1160 struct tc_mqprio_qopt *qopt, 1161 struct netlink_ext_ack *extack, 1162 u32 taprio_flags) 1163 { 1164 bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags); 1165 1166 if (!qopt && !dev->num_tc) { 1167 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary"); 1168 return -EINVAL; 1169 } 1170 1171 /* If num_tc is already set, it means that the user already 1172 * configured the mqprio part 1173 */ 1174 if (dev->num_tc) 1175 return 0; 1176 1177 /* taprio imposes that traffic classes map 1:n to tx queues */ 1178 if (qopt->num_tc > dev->num_tx_queues) { 1179 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues"); 1180 return -EINVAL; 1181 } 1182 1183 /* For some reason, in txtime-assist mode, we allow TXQ ranges for 1184 * different TCs to overlap, and just validate the TXQ ranges. 1185 */ 1186 return mqprio_validate_qopt(dev, qopt, true, allow_overlapping_txqs, 1187 extack); 1188 } 1189 1190 static int taprio_get_start_time(struct Qdisc *sch, 1191 struct sched_gate_list *sched, 1192 ktime_t *start) 1193 { 1194 struct taprio_sched *q = qdisc_priv(sch); 1195 ktime_t now, base, cycle; 1196 s64 n; 1197 1198 base = sched_base_time(sched); 1199 now = taprio_get_time(q); 1200 1201 if (ktime_after(base, now)) { 1202 *start = base; 1203 return 0; 1204 } 1205 1206 cycle = sched->cycle_time; 1207 1208 /* The qdisc is expected to have at least one sched_entry. Moreover, 1209 * any entry must have 'interval' > 0. Thus if the cycle time is zero, 1210 * something went really wrong. In that case, we should warn about this 1211 * inconsistent state and return error. 1212 */ 1213 if (WARN_ON(!cycle)) 1214 return -EFAULT; 1215 1216 /* Schedule the start time for the beginning of the next 1217 * cycle. 1218 */ 1219 n = div64_s64(ktime_sub_ns(now, base), cycle); 1220 *start = ktime_add_ns(base, (n + 1) * cycle); 1221 return 0; 1222 } 1223 1224 static void setup_first_end_time(struct taprio_sched *q, 1225 struct sched_gate_list *sched, ktime_t base) 1226 { 1227 struct net_device *dev = qdisc_dev(q->root); 1228 int num_tc = netdev_get_num_tc(dev); 1229 struct sched_entry *first; 1230 ktime_t cycle; 1231 int tc; 1232 1233 first = list_first_entry(&sched->entries, 1234 struct sched_entry, list); 1235 1236 cycle = sched->cycle_time; 1237 1238 /* FIXME: find a better place to do this */ 1239 sched->cycle_end_time = ktime_add_ns(base, cycle); 1240 1241 first->end_time = ktime_add_ns(base, first->interval); 1242 taprio_set_budgets(q, sched, first); 1243 1244 for (tc = 0; tc < num_tc; tc++) { 1245 if (first->gate_duration[tc] == sched->cycle_time) 1246 first->gate_close_time[tc] = KTIME_MAX; 1247 else 1248 first->gate_close_time[tc] = ktime_add_ns(base, first->gate_duration[tc]); 1249 } 1250 1251 rcu_assign_pointer(q->current_entry, NULL); 1252 } 1253 1254 static void taprio_start_sched(struct Qdisc *sch, 1255 ktime_t start, struct sched_gate_list *new) 1256 { 1257 struct taprio_sched *q = qdisc_priv(sch); 1258 ktime_t expires; 1259 1260 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1261 return; 1262 1263 expires = hrtimer_get_expires(&q->advance_timer); 1264 if (expires == 0) 1265 expires = KTIME_MAX; 1266 1267 /* If the new schedule starts before the next expiration, we 1268 * reprogram it to the earliest one, so we change the admin 1269 * schedule to the operational one at the right time. 1270 */ 1271 start = min_t(ktime_t, start, expires); 1272 1273 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS); 1274 } 1275 1276 static void taprio_set_picos_per_byte(struct net_device *dev, 1277 struct taprio_sched *q) 1278 { 1279 struct ethtool_link_ksettings ecmd; 1280 int speed = SPEED_10; 1281 int picos_per_byte; 1282 int err; 1283 1284 err = __ethtool_get_link_ksettings(dev, &ecmd); 1285 if (err < 0) 1286 goto skip; 1287 1288 if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN) 1289 speed = ecmd.base.speed; 1290 1291 skip: 1292 picos_per_byte = (USEC_PER_SEC * 8) / speed; 1293 1294 atomic64_set(&q->picos_per_byte, picos_per_byte); 1295 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", 1296 dev->name, (long long)atomic64_read(&q->picos_per_byte), 1297 ecmd.base.speed); 1298 } 1299 1300 static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event, 1301 void *ptr) 1302 { 1303 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1304 struct sched_gate_list *oper, *admin; 1305 struct qdisc_size_table *stab; 1306 struct taprio_sched *q; 1307 1308 ASSERT_RTNL(); 1309 1310 if (event != NETDEV_UP && event != NETDEV_CHANGE) 1311 return NOTIFY_DONE; 1312 1313 list_for_each_entry(q, &taprio_list, taprio_list) { 1314 if (dev != qdisc_dev(q->root)) 1315 continue; 1316 1317 taprio_set_picos_per_byte(dev, q); 1318 1319 stab = rtnl_dereference(q->root->stab); 1320 1321 oper = rtnl_dereference(q->oper_sched); 1322 if (oper) 1323 taprio_update_queue_max_sdu(q, oper, stab); 1324 1325 admin = rtnl_dereference(q->admin_sched); 1326 if (admin) 1327 taprio_update_queue_max_sdu(q, admin, stab); 1328 1329 break; 1330 } 1331 1332 return NOTIFY_DONE; 1333 } 1334 1335 static void setup_txtime(struct taprio_sched *q, 1336 struct sched_gate_list *sched, ktime_t base) 1337 { 1338 struct sched_entry *entry; 1339 u32 interval = 0; 1340 1341 list_for_each_entry(entry, &sched->entries, list) { 1342 entry->next_txtime = ktime_add_ns(base, interval); 1343 interval += entry->interval; 1344 } 1345 } 1346 1347 static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries) 1348 { 1349 struct __tc_taprio_qopt_offload *__offload; 1350 1351 __offload = kzalloc(struct_size(__offload, offload.entries, num_entries), 1352 GFP_KERNEL); 1353 if (!__offload) 1354 return NULL; 1355 1356 refcount_set(&__offload->users, 1); 1357 1358 return &__offload->offload; 1359 } 1360 1361 struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload 1362 *offload) 1363 { 1364 struct __tc_taprio_qopt_offload *__offload; 1365 1366 __offload = container_of(offload, struct __tc_taprio_qopt_offload, 1367 offload); 1368 1369 refcount_inc(&__offload->users); 1370 1371 return offload; 1372 } 1373 EXPORT_SYMBOL_GPL(taprio_offload_get); 1374 1375 void taprio_offload_free(struct tc_taprio_qopt_offload *offload) 1376 { 1377 struct __tc_taprio_qopt_offload *__offload; 1378 1379 __offload = container_of(offload, struct __tc_taprio_qopt_offload, 1380 offload); 1381 1382 if (!refcount_dec_and_test(&__offload->users)) 1383 return; 1384 1385 kfree(__offload); 1386 } 1387 EXPORT_SYMBOL_GPL(taprio_offload_free); 1388 1389 /* The function will only serve to keep the pointers to the "oper" and "admin" 1390 * schedules valid in relation to their base times, so when calling dump() the 1391 * users looks at the right schedules. 1392 * When using full offload, the admin configuration is promoted to oper at the 1393 * base_time in the PHC time domain. But because the system time is not 1394 * necessarily in sync with that, we can't just trigger a hrtimer to call 1395 * switch_schedules at the right hardware time. 1396 * At the moment we call this by hand right away from taprio, but in the future 1397 * it will be useful to create a mechanism for drivers to notify taprio of the 1398 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump(). 1399 * This is left as TODO. 1400 */ 1401 static void taprio_offload_config_changed(struct taprio_sched *q) 1402 { 1403 struct sched_gate_list *oper, *admin; 1404 1405 oper = rtnl_dereference(q->oper_sched); 1406 admin = rtnl_dereference(q->admin_sched); 1407 1408 switch_schedules(q, &admin, &oper); 1409 } 1410 1411 static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask) 1412 { 1413 u32 i, queue_mask = 0; 1414 1415 for (i = 0; i < dev->num_tc; i++) { 1416 u32 offset, count; 1417 1418 if (!(tc_mask & BIT(i))) 1419 continue; 1420 1421 offset = dev->tc_to_txq[i].offset; 1422 count = dev->tc_to_txq[i].count; 1423 1424 queue_mask |= GENMASK(offset + count - 1, offset); 1425 } 1426 1427 return queue_mask; 1428 } 1429 1430 static void taprio_sched_to_offload(struct net_device *dev, 1431 struct sched_gate_list *sched, 1432 struct tc_taprio_qopt_offload *offload, 1433 const struct tc_taprio_caps *caps) 1434 { 1435 struct sched_entry *entry; 1436 int i = 0; 1437 1438 offload->base_time = sched->base_time; 1439 offload->cycle_time = sched->cycle_time; 1440 offload->cycle_time_extension = sched->cycle_time_extension; 1441 1442 list_for_each_entry(entry, &sched->entries, list) { 1443 struct tc_taprio_sched_entry *e = &offload->entries[i]; 1444 1445 e->command = entry->command; 1446 e->interval = entry->interval; 1447 if (caps->gate_mask_per_txq) 1448 e->gate_mask = tc_map_to_queue_mask(dev, 1449 entry->gate_mask); 1450 else 1451 e->gate_mask = entry->gate_mask; 1452 1453 i++; 1454 } 1455 1456 offload->num_entries = i; 1457 } 1458 1459 static void taprio_detect_broken_mqprio(struct taprio_sched *q) 1460 { 1461 struct net_device *dev = qdisc_dev(q->root); 1462 struct tc_taprio_caps caps; 1463 1464 qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO, 1465 &caps, sizeof(caps)); 1466 1467 q->broken_mqprio = caps.broken_mqprio; 1468 if (q->broken_mqprio) 1469 static_branch_inc(&taprio_have_broken_mqprio); 1470 else 1471 static_branch_inc(&taprio_have_working_mqprio); 1472 1473 q->detected_mqprio = true; 1474 } 1475 1476 static void taprio_cleanup_broken_mqprio(struct taprio_sched *q) 1477 { 1478 if (!q->detected_mqprio) 1479 return; 1480 1481 if (q->broken_mqprio) 1482 static_branch_dec(&taprio_have_broken_mqprio); 1483 else 1484 static_branch_dec(&taprio_have_working_mqprio); 1485 } 1486 1487 static int taprio_enable_offload(struct net_device *dev, 1488 struct taprio_sched *q, 1489 struct sched_gate_list *sched, 1490 struct netlink_ext_ack *extack) 1491 { 1492 const struct net_device_ops *ops = dev->netdev_ops; 1493 struct tc_taprio_qopt_offload *offload; 1494 struct tc_taprio_caps caps; 1495 int tc, err = 0; 1496 1497 if (!ops->ndo_setup_tc) { 1498 NL_SET_ERR_MSG(extack, 1499 "Device does not support taprio offload"); 1500 return -EOPNOTSUPP; 1501 } 1502 1503 qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO, 1504 &caps, sizeof(caps)); 1505 1506 if (!caps.supports_queue_max_sdu) { 1507 for (tc = 0; tc < TC_MAX_QUEUE; tc++) { 1508 if (q->max_sdu[tc]) { 1509 NL_SET_ERR_MSG_MOD(extack, 1510 "Device does not handle queueMaxSDU"); 1511 return -EOPNOTSUPP; 1512 } 1513 } 1514 } 1515 1516 offload = taprio_offload_alloc(sched->num_entries); 1517 if (!offload) { 1518 NL_SET_ERR_MSG(extack, 1519 "Not enough memory for enabling offload mode"); 1520 return -ENOMEM; 1521 } 1522 offload->enable = 1; 1523 mqprio_qopt_reconstruct(dev, &offload->mqprio.qopt); 1524 taprio_sched_to_offload(dev, sched, offload, &caps); 1525 1526 for (tc = 0; tc < TC_MAX_QUEUE; tc++) 1527 offload->max_sdu[tc] = q->max_sdu[tc]; 1528 1529 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); 1530 if (err < 0) { 1531 NL_SET_ERR_MSG(extack, 1532 "Device failed to setup taprio offload"); 1533 goto done; 1534 } 1535 1536 q->offloaded = true; 1537 1538 done: 1539 taprio_offload_free(offload); 1540 1541 return err; 1542 } 1543 1544 static int taprio_disable_offload(struct net_device *dev, 1545 struct taprio_sched *q, 1546 struct netlink_ext_ack *extack) 1547 { 1548 const struct net_device_ops *ops = dev->netdev_ops; 1549 struct tc_taprio_qopt_offload *offload; 1550 int err; 1551 1552 if (!q->offloaded) 1553 return 0; 1554 1555 offload = taprio_offload_alloc(0); 1556 if (!offload) { 1557 NL_SET_ERR_MSG(extack, 1558 "Not enough memory to disable offload mode"); 1559 return -ENOMEM; 1560 } 1561 offload->enable = 0; 1562 1563 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); 1564 if (err < 0) { 1565 NL_SET_ERR_MSG(extack, 1566 "Device failed to disable offload"); 1567 goto out; 1568 } 1569 1570 q->offloaded = false; 1571 1572 out: 1573 taprio_offload_free(offload); 1574 1575 return err; 1576 } 1577 1578 /* If full offload is enabled, the only possible clockid is the net device's 1579 * PHC. For that reason, specifying a clockid through netlink is incorrect. 1580 * For txtime-assist, it is implicitly assumed that the device's PHC is kept 1581 * in sync with the specified clockid via a user space daemon such as phc2sys. 1582 * For both software taprio and txtime-assist, the clockid is used for the 1583 * hrtimer that advances the schedule and hence mandatory. 1584 */ 1585 static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb, 1586 struct netlink_ext_ack *extack) 1587 { 1588 struct taprio_sched *q = qdisc_priv(sch); 1589 struct net_device *dev = qdisc_dev(sch); 1590 int err = -EINVAL; 1591 1592 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 1593 const struct ethtool_ops *ops = dev->ethtool_ops; 1594 struct ethtool_ts_info info = { 1595 .cmd = ETHTOOL_GET_TS_INFO, 1596 .phc_index = -1, 1597 }; 1598 1599 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { 1600 NL_SET_ERR_MSG(extack, 1601 "The 'clockid' cannot be specified for full offload"); 1602 goto out; 1603 } 1604 1605 if (ops && ops->get_ts_info) 1606 err = ops->get_ts_info(dev, &info); 1607 1608 if (err || info.phc_index < 0) { 1609 NL_SET_ERR_MSG(extack, 1610 "Device does not have a PTP clock"); 1611 err = -ENOTSUPP; 1612 goto out; 1613 } 1614 } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { 1615 int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]); 1616 enum tk_offsets tk_offset; 1617 1618 /* We only support static clockids and we don't allow 1619 * for it to be modified after the first init. 1620 */ 1621 if (clockid < 0 || 1622 (q->clockid != -1 && q->clockid != clockid)) { 1623 NL_SET_ERR_MSG(extack, 1624 "Changing the 'clockid' of a running schedule is not supported"); 1625 err = -ENOTSUPP; 1626 goto out; 1627 } 1628 1629 switch (clockid) { 1630 case CLOCK_REALTIME: 1631 tk_offset = TK_OFFS_REAL; 1632 break; 1633 case CLOCK_MONOTONIC: 1634 tk_offset = TK_OFFS_MAX; 1635 break; 1636 case CLOCK_BOOTTIME: 1637 tk_offset = TK_OFFS_BOOT; 1638 break; 1639 case CLOCK_TAI: 1640 tk_offset = TK_OFFS_TAI; 1641 break; 1642 default: 1643 NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); 1644 err = -EINVAL; 1645 goto out; 1646 } 1647 /* This pairs with READ_ONCE() in taprio_mono_to_any */ 1648 WRITE_ONCE(q->tk_offset, tk_offset); 1649 1650 q->clockid = clockid; 1651 } else { 1652 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory"); 1653 goto out; 1654 } 1655 1656 /* Everything went ok, return success. */ 1657 err = 0; 1658 1659 out: 1660 return err; 1661 } 1662 1663 static int taprio_parse_tc_entry(struct Qdisc *sch, 1664 struct nlattr *opt, 1665 u32 max_sdu[TC_QOPT_MAX_QUEUE], 1666 unsigned long *seen_tcs, 1667 struct netlink_ext_ack *extack) 1668 { 1669 struct nlattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { }; 1670 struct net_device *dev = qdisc_dev(sch); 1671 u32 val = 0; 1672 int err, tc; 1673 1674 err = nla_parse_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, opt, 1675 taprio_tc_policy, extack); 1676 if (err < 0) 1677 return err; 1678 1679 if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) { 1680 NL_SET_ERR_MSG_MOD(extack, "TC entry index missing"); 1681 return -EINVAL; 1682 } 1683 1684 tc = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]); 1685 if (tc >= TC_QOPT_MAX_QUEUE) { 1686 NL_SET_ERR_MSG_MOD(extack, "TC entry index out of range"); 1687 return -ERANGE; 1688 } 1689 1690 if (*seen_tcs & BIT(tc)) { 1691 NL_SET_ERR_MSG_MOD(extack, "Duplicate TC entry"); 1692 return -EINVAL; 1693 } 1694 1695 *seen_tcs |= BIT(tc); 1696 1697 if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]) 1698 val = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]); 1699 1700 if (val > dev->max_mtu) { 1701 NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU"); 1702 return -ERANGE; 1703 } 1704 1705 max_sdu[tc] = val; 1706 1707 return 0; 1708 } 1709 1710 static int taprio_parse_tc_entries(struct Qdisc *sch, 1711 struct nlattr *opt, 1712 struct netlink_ext_ack *extack) 1713 { 1714 struct taprio_sched *q = qdisc_priv(sch); 1715 u32 max_sdu[TC_QOPT_MAX_QUEUE]; 1716 unsigned long seen_tcs = 0; 1717 struct nlattr *n; 1718 int tc, rem; 1719 int err = 0; 1720 1721 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) 1722 max_sdu[tc] = q->max_sdu[tc]; 1723 1724 nla_for_each_nested(n, opt, rem) { 1725 if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY) 1726 continue; 1727 1728 err = taprio_parse_tc_entry(sch, n, max_sdu, &seen_tcs, 1729 extack); 1730 if (err) 1731 goto out; 1732 } 1733 1734 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) 1735 q->max_sdu[tc] = max_sdu[tc]; 1736 1737 out: 1738 return err; 1739 } 1740 1741 static int taprio_mqprio_cmp(const struct net_device *dev, 1742 const struct tc_mqprio_qopt *mqprio) 1743 { 1744 int i; 1745 1746 if (!mqprio || mqprio->num_tc != dev->num_tc) 1747 return -1; 1748 1749 for (i = 0; i < mqprio->num_tc; i++) 1750 if (dev->tc_to_txq[i].count != mqprio->count[i] || 1751 dev->tc_to_txq[i].offset != mqprio->offset[i]) 1752 return -1; 1753 1754 for (i = 0; i <= TC_BITMASK; i++) 1755 if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i]) 1756 return -1; 1757 1758 return 0; 1759 } 1760 1761 /* The semantics of the 'flags' argument in relation to 'change()' 1762 * requests, are interpreted following two rules (which are applied in 1763 * this order): (1) an omitted 'flags' argument is interpreted as 1764 * zero; (2) the 'flags' of a "running" taprio instance cannot be 1765 * changed. 1766 */ 1767 static int taprio_new_flags(const struct nlattr *attr, u32 old, 1768 struct netlink_ext_ack *extack) 1769 { 1770 u32 new = 0; 1771 1772 if (attr) 1773 new = nla_get_u32(attr); 1774 1775 if (old != TAPRIO_FLAGS_INVALID && old != new) { 1776 NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported"); 1777 return -EOPNOTSUPP; 1778 } 1779 1780 if (!taprio_flags_valid(new)) { 1781 NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid"); 1782 return -EINVAL; 1783 } 1784 1785 return new; 1786 } 1787 1788 static int taprio_change(struct Qdisc *sch, struct nlattr *opt, 1789 struct netlink_ext_ack *extack) 1790 { 1791 struct qdisc_size_table *stab = rtnl_dereference(sch->stab); 1792 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { }; 1793 struct sched_gate_list *oper, *admin, *new_admin; 1794 struct taprio_sched *q = qdisc_priv(sch); 1795 struct net_device *dev = qdisc_dev(sch); 1796 struct tc_mqprio_qopt *mqprio = NULL; 1797 unsigned long flags; 1798 ktime_t start; 1799 int i, err; 1800 1801 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt, 1802 taprio_policy, extack); 1803 if (err < 0) 1804 return err; 1805 1806 if (tb[TCA_TAPRIO_ATTR_PRIOMAP]) 1807 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]); 1808 1809 err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS], 1810 q->flags, extack); 1811 if (err < 0) 1812 return err; 1813 1814 q->flags = err; 1815 1816 err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags); 1817 if (err < 0) 1818 return err; 1819 1820 err = taprio_parse_tc_entries(sch, opt, extack); 1821 if (err) 1822 return err; 1823 1824 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL); 1825 if (!new_admin) { 1826 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule"); 1827 return -ENOMEM; 1828 } 1829 INIT_LIST_HEAD(&new_admin->entries); 1830 1831 oper = rtnl_dereference(q->oper_sched); 1832 admin = rtnl_dereference(q->admin_sched); 1833 1834 /* no changes - no new mqprio settings */ 1835 if (!taprio_mqprio_cmp(dev, mqprio)) 1836 mqprio = NULL; 1837 1838 if (mqprio && (oper || admin)) { 1839 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported"); 1840 err = -ENOTSUPP; 1841 goto free_sched; 1842 } 1843 1844 if (mqprio) { 1845 err = netdev_set_num_tc(dev, mqprio->num_tc); 1846 if (err) 1847 goto free_sched; 1848 for (i = 0; i < mqprio->num_tc; i++) { 1849 netdev_set_tc_queue(dev, i, 1850 mqprio->count[i], 1851 mqprio->offset[i]); 1852 q->cur_txq[i] = mqprio->offset[i]; 1853 } 1854 1855 /* Always use supplied priority mappings */ 1856 for (i = 0; i <= TC_BITMASK; i++) 1857 netdev_set_prio_tc_map(dev, i, 1858 mqprio->prio_tc_map[i]); 1859 } 1860 1861 err = parse_taprio_schedule(q, tb, new_admin, extack); 1862 if (err < 0) 1863 goto free_sched; 1864 1865 if (new_admin->num_entries == 0) { 1866 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule"); 1867 err = -EINVAL; 1868 goto free_sched; 1869 } 1870 1871 err = taprio_parse_clockid(sch, tb, extack); 1872 if (err < 0) 1873 goto free_sched; 1874 1875 taprio_set_picos_per_byte(dev, q); 1876 taprio_update_queue_max_sdu(q, new_admin, stab); 1877 1878 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1879 err = taprio_enable_offload(dev, q, new_admin, extack); 1880 else 1881 err = taprio_disable_offload(dev, q, extack); 1882 if (err) 1883 goto free_sched; 1884 1885 /* Protects against enqueue()/dequeue() */ 1886 spin_lock_bh(qdisc_lock(sch)); 1887 1888 if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) { 1889 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) { 1890 NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled"); 1891 err = -EINVAL; 1892 goto unlock; 1893 } 1894 1895 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); 1896 } 1897 1898 if (!TXTIME_ASSIST_IS_ENABLED(q->flags) && 1899 !FULL_OFFLOAD_IS_ENABLED(q->flags) && 1900 !hrtimer_active(&q->advance_timer)) { 1901 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); 1902 q->advance_timer.function = advance_sched; 1903 } 1904 1905 err = taprio_get_start_time(sch, new_admin, &start); 1906 if (err < 0) { 1907 NL_SET_ERR_MSG(extack, "Internal error: failed get start time"); 1908 goto unlock; 1909 } 1910 1911 setup_txtime(q, new_admin, start); 1912 1913 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 1914 if (!oper) { 1915 rcu_assign_pointer(q->oper_sched, new_admin); 1916 err = 0; 1917 new_admin = NULL; 1918 goto unlock; 1919 } 1920 1921 rcu_assign_pointer(q->admin_sched, new_admin); 1922 if (admin) 1923 call_rcu(&admin->rcu, taprio_free_sched_cb); 1924 } else { 1925 setup_first_end_time(q, new_admin, start); 1926 1927 /* Protects against advance_sched() */ 1928 spin_lock_irqsave(&q->current_entry_lock, flags); 1929 1930 taprio_start_sched(sch, start, new_admin); 1931 1932 rcu_assign_pointer(q->admin_sched, new_admin); 1933 if (admin) 1934 call_rcu(&admin->rcu, taprio_free_sched_cb); 1935 1936 spin_unlock_irqrestore(&q->current_entry_lock, flags); 1937 1938 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1939 taprio_offload_config_changed(q); 1940 } 1941 1942 new_admin = NULL; 1943 err = 0; 1944 1945 if (!stab) 1946 NL_SET_ERR_MSG_MOD(extack, 1947 "Size table not specified, frame length estimations may be inaccurate"); 1948 1949 unlock: 1950 spin_unlock_bh(qdisc_lock(sch)); 1951 1952 free_sched: 1953 if (new_admin) 1954 call_rcu(&new_admin->rcu, taprio_free_sched_cb); 1955 1956 return err; 1957 } 1958 1959 static void taprio_reset(struct Qdisc *sch) 1960 { 1961 struct taprio_sched *q = qdisc_priv(sch); 1962 struct net_device *dev = qdisc_dev(sch); 1963 int i; 1964 1965 hrtimer_cancel(&q->advance_timer); 1966 1967 if (q->qdiscs) { 1968 for (i = 0; i < dev->num_tx_queues; i++) 1969 if (q->qdiscs[i]) 1970 qdisc_reset(q->qdiscs[i]); 1971 } 1972 } 1973 1974 static void taprio_destroy(struct Qdisc *sch) 1975 { 1976 struct taprio_sched *q = qdisc_priv(sch); 1977 struct net_device *dev = qdisc_dev(sch); 1978 struct sched_gate_list *oper, *admin; 1979 unsigned int i; 1980 1981 list_del(&q->taprio_list); 1982 1983 /* Note that taprio_reset() might not be called if an error 1984 * happens in qdisc_create(), after taprio_init() has been called. 1985 */ 1986 hrtimer_cancel(&q->advance_timer); 1987 qdisc_synchronize(sch); 1988 1989 taprio_disable_offload(dev, q, NULL); 1990 1991 if (q->qdiscs) { 1992 for (i = 0; i < dev->num_tx_queues; i++) 1993 qdisc_put(q->qdiscs[i]); 1994 1995 kfree(q->qdiscs); 1996 } 1997 q->qdiscs = NULL; 1998 1999 netdev_reset_tc(dev); 2000 2001 oper = rtnl_dereference(q->oper_sched); 2002 admin = rtnl_dereference(q->admin_sched); 2003 2004 if (oper) 2005 call_rcu(&oper->rcu, taprio_free_sched_cb); 2006 2007 if (admin) 2008 call_rcu(&admin->rcu, taprio_free_sched_cb); 2009 2010 taprio_cleanup_broken_mqprio(q); 2011 } 2012 2013 static int taprio_init(struct Qdisc *sch, struct nlattr *opt, 2014 struct netlink_ext_ack *extack) 2015 { 2016 struct taprio_sched *q = qdisc_priv(sch); 2017 struct net_device *dev = qdisc_dev(sch); 2018 int i; 2019 2020 spin_lock_init(&q->current_entry_lock); 2021 2022 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS); 2023 q->advance_timer.function = advance_sched; 2024 2025 q->root = sch; 2026 2027 /* We only support static clockids. Use an invalid value as default 2028 * and get the valid one on taprio_change(). 2029 */ 2030 q->clockid = -1; 2031 q->flags = TAPRIO_FLAGS_INVALID; 2032 2033 list_add(&q->taprio_list, &taprio_list); 2034 2035 if (sch->parent != TC_H_ROOT) { 2036 NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc"); 2037 return -EOPNOTSUPP; 2038 } 2039 2040 if (!netif_is_multiqueue(dev)) { 2041 NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required"); 2042 return -EOPNOTSUPP; 2043 } 2044 2045 /* pre-allocate qdisc, attachment can't fail */ 2046 q->qdiscs = kcalloc(dev->num_tx_queues, 2047 sizeof(q->qdiscs[0]), 2048 GFP_KERNEL); 2049 2050 if (!q->qdiscs) 2051 return -ENOMEM; 2052 2053 if (!opt) 2054 return -EINVAL; 2055 2056 for (i = 0; i < dev->num_tx_queues; i++) { 2057 struct netdev_queue *dev_queue; 2058 struct Qdisc *qdisc; 2059 2060 dev_queue = netdev_get_tx_queue(dev, i); 2061 qdisc = qdisc_create_dflt(dev_queue, 2062 &pfifo_qdisc_ops, 2063 TC_H_MAKE(TC_H_MAJ(sch->handle), 2064 TC_H_MIN(i + 1)), 2065 extack); 2066 if (!qdisc) 2067 return -ENOMEM; 2068 2069 if (i < dev->real_num_tx_queues) 2070 qdisc_hash_add(qdisc, false); 2071 2072 q->qdiscs[i] = qdisc; 2073 } 2074 2075 taprio_detect_broken_mqprio(q); 2076 2077 return taprio_change(sch, opt, extack); 2078 } 2079 2080 static void taprio_attach(struct Qdisc *sch) 2081 { 2082 struct taprio_sched *q = qdisc_priv(sch); 2083 struct net_device *dev = qdisc_dev(sch); 2084 unsigned int ntx; 2085 2086 /* Attach underlying qdisc */ 2087 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 2088 struct Qdisc *qdisc = q->qdiscs[ntx]; 2089 struct Qdisc *old; 2090 2091 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 2092 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 2093 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); 2094 } else { 2095 old = dev_graft_qdisc(qdisc->dev_queue, sch); 2096 qdisc_refcount_inc(sch); 2097 } 2098 if (old) 2099 qdisc_put(old); 2100 } 2101 2102 /* access to the child qdiscs is not needed in offload mode */ 2103 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 2104 kfree(q->qdiscs); 2105 q->qdiscs = NULL; 2106 } 2107 } 2108 2109 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch, 2110 unsigned long cl) 2111 { 2112 struct net_device *dev = qdisc_dev(sch); 2113 unsigned long ntx = cl - 1; 2114 2115 if (ntx >= dev->num_tx_queues) 2116 return NULL; 2117 2118 return netdev_get_tx_queue(dev, ntx); 2119 } 2120 2121 static int taprio_graft(struct Qdisc *sch, unsigned long cl, 2122 struct Qdisc *new, struct Qdisc **old, 2123 struct netlink_ext_ack *extack) 2124 { 2125 struct taprio_sched *q = qdisc_priv(sch); 2126 struct net_device *dev = qdisc_dev(sch); 2127 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 2128 2129 if (!dev_queue) 2130 return -EINVAL; 2131 2132 if (dev->flags & IFF_UP) 2133 dev_deactivate(dev); 2134 2135 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 2136 *old = dev_graft_qdisc(dev_queue, new); 2137 } else { 2138 *old = q->qdiscs[cl - 1]; 2139 q->qdiscs[cl - 1] = new; 2140 } 2141 2142 if (new) 2143 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 2144 2145 if (dev->flags & IFF_UP) 2146 dev_activate(dev); 2147 2148 return 0; 2149 } 2150 2151 static int dump_entry(struct sk_buff *msg, 2152 const struct sched_entry *entry) 2153 { 2154 struct nlattr *item; 2155 2156 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY); 2157 if (!item) 2158 return -ENOSPC; 2159 2160 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index)) 2161 goto nla_put_failure; 2162 2163 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command)) 2164 goto nla_put_failure; 2165 2166 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, 2167 entry->gate_mask)) 2168 goto nla_put_failure; 2169 2170 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL, 2171 entry->interval)) 2172 goto nla_put_failure; 2173 2174 return nla_nest_end(msg, item); 2175 2176 nla_put_failure: 2177 nla_nest_cancel(msg, item); 2178 return -1; 2179 } 2180 2181 static int dump_schedule(struct sk_buff *msg, 2182 const struct sched_gate_list *root) 2183 { 2184 struct nlattr *entry_list; 2185 struct sched_entry *entry; 2186 2187 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME, 2188 root->base_time, TCA_TAPRIO_PAD)) 2189 return -1; 2190 2191 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, 2192 root->cycle_time, TCA_TAPRIO_PAD)) 2193 return -1; 2194 2195 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, 2196 root->cycle_time_extension, TCA_TAPRIO_PAD)) 2197 return -1; 2198 2199 entry_list = nla_nest_start_noflag(msg, 2200 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST); 2201 if (!entry_list) 2202 goto error_nest; 2203 2204 list_for_each_entry(entry, &root->entries, list) { 2205 if (dump_entry(msg, entry) < 0) 2206 goto error_nest; 2207 } 2208 2209 nla_nest_end(msg, entry_list); 2210 return 0; 2211 2212 error_nest: 2213 nla_nest_cancel(msg, entry_list); 2214 return -1; 2215 } 2216 2217 static int taprio_dump_tc_entries(struct sk_buff *skb, 2218 struct sched_gate_list *sched) 2219 { 2220 struct nlattr *n; 2221 int tc; 2222 2223 for (tc = 0; tc < TC_MAX_QUEUE; tc++) { 2224 n = nla_nest_start(skb, TCA_TAPRIO_ATTR_TC_ENTRY); 2225 if (!n) 2226 return -EMSGSIZE; 2227 2228 if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_INDEX, tc)) 2229 goto nla_put_failure; 2230 2231 if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU, 2232 sched->max_sdu[tc])) 2233 goto nla_put_failure; 2234 2235 nla_nest_end(skb, n); 2236 } 2237 2238 return 0; 2239 2240 nla_put_failure: 2241 nla_nest_cancel(skb, n); 2242 return -EMSGSIZE; 2243 } 2244 2245 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) 2246 { 2247 struct taprio_sched *q = qdisc_priv(sch); 2248 struct net_device *dev = qdisc_dev(sch); 2249 struct sched_gate_list *oper, *admin; 2250 struct tc_mqprio_qopt opt = { 0 }; 2251 struct nlattr *nest, *sched_nest; 2252 2253 oper = rtnl_dereference(q->oper_sched); 2254 admin = rtnl_dereference(q->admin_sched); 2255 2256 mqprio_qopt_reconstruct(dev, &opt); 2257 2258 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 2259 if (!nest) 2260 goto start_error; 2261 2262 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt)) 2263 goto options_error; 2264 2265 if (!FULL_OFFLOAD_IS_ENABLED(q->flags) && 2266 nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid)) 2267 goto options_error; 2268 2269 if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags)) 2270 goto options_error; 2271 2272 if (q->txtime_delay && 2273 nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay)) 2274 goto options_error; 2275 2276 if (oper && taprio_dump_tc_entries(skb, oper)) 2277 goto options_error; 2278 2279 if (oper && dump_schedule(skb, oper)) 2280 goto options_error; 2281 2282 if (!admin) 2283 goto done; 2284 2285 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED); 2286 if (!sched_nest) 2287 goto options_error; 2288 2289 if (dump_schedule(skb, admin)) 2290 goto admin_error; 2291 2292 nla_nest_end(skb, sched_nest); 2293 2294 done: 2295 return nla_nest_end(skb, nest); 2296 2297 admin_error: 2298 nla_nest_cancel(skb, sched_nest); 2299 2300 options_error: 2301 nla_nest_cancel(skb, nest); 2302 2303 start_error: 2304 return -ENOSPC; 2305 } 2306 2307 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) 2308 { 2309 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 2310 2311 if (!dev_queue) 2312 return NULL; 2313 2314 return dev_queue->qdisc_sleeping; 2315 } 2316 2317 static unsigned long taprio_find(struct Qdisc *sch, u32 classid) 2318 { 2319 unsigned int ntx = TC_H_MIN(classid); 2320 2321 if (!taprio_queue_get(sch, ntx)) 2322 return 0; 2323 return ntx; 2324 } 2325 2326 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl, 2327 struct sk_buff *skb, struct tcmsg *tcm) 2328 { 2329 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 2330 2331 tcm->tcm_parent = TC_H_ROOT; 2332 tcm->tcm_handle |= TC_H_MIN(cl); 2333 tcm->tcm_info = dev_queue->qdisc_sleeping->handle; 2334 2335 return 0; 2336 } 2337 2338 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, 2339 struct gnet_dump *d) 2340 __releases(d->lock) 2341 __acquires(d->lock) 2342 { 2343 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 2344 2345 sch = dev_queue->qdisc_sleeping; 2346 if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 || 2347 qdisc_qstats_copy(d, sch) < 0) 2348 return -1; 2349 return 0; 2350 } 2351 2352 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) 2353 { 2354 struct net_device *dev = qdisc_dev(sch); 2355 unsigned long ntx; 2356 2357 if (arg->stop) 2358 return; 2359 2360 arg->count = arg->skip; 2361 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { 2362 if (!tc_qdisc_stats_dump(sch, ntx + 1, arg)) 2363 break; 2364 } 2365 } 2366 2367 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch, 2368 struct tcmsg *tcm) 2369 { 2370 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent)); 2371 } 2372 2373 static const struct Qdisc_class_ops taprio_class_ops = { 2374 .graft = taprio_graft, 2375 .leaf = taprio_leaf, 2376 .find = taprio_find, 2377 .walk = taprio_walk, 2378 .dump = taprio_dump_class, 2379 .dump_stats = taprio_dump_class_stats, 2380 .select_queue = taprio_select_queue, 2381 }; 2382 2383 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = { 2384 .cl_ops = &taprio_class_ops, 2385 .id = "taprio", 2386 .priv_size = sizeof(struct taprio_sched), 2387 .init = taprio_init, 2388 .change = taprio_change, 2389 .destroy = taprio_destroy, 2390 .reset = taprio_reset, 2391 .attach = taprio_attach, 2392 .peek = taprio_peek, 2393 .dequeue = taprio_dequeue, 2394 .enqueue = taprio_enqueue, 2395 .dump = taprio_dump, 2396 .owner = THIS_MODULE, 2397 }; 2398 2399 static struct notifier_block taprio_device_notifier = { 2400 .notifier_call = taprio_dev_notifier, 2401 }; 2402 2403 static int __init taprio_module_init(void) 2404 { 2405 int err = register_netdevice_notifier(&taprio_device_notifier); 2406 2407 if (err) 2408 return err; 2409 2410 return register_qdisc(&taprio_qdisc_ops); 2411 } 2412 2413 static void __exit taprio_module_exit(void) 2414 { 2415 unregister_qdisc(&taprio_qdisc_ops); 2416 unregister_netdevice_notifier(&taprio_device_notifier); 2417 } 2418 2419 module_init(taprio_module_init); 2420 module_exit(taprio_module_exit); 2421 MODULE_LICENSE("GPL"); 2422