1 // SPDX-License-Identifier: GPL-2.0 2 3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler 4 * 5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com> 6 * 7 */ 8 9 #include <linux/ethtool.h> 10 #include <linux/ethtool_netlink.h> 11 #include <linux/types.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 #include <linux/string.h> 15 #include <linux/list.h> 16 #include <linux/errno.h> 17 #include <linux/skbuff.h> 18 #include <linux/math64.h> 19 #include <linux/module.h> 20 #include <linux/spinlock.h> 21 #include <linux/rcupdate.h> 22 #include <linux/time.h> 23 #include <net/gso.h> 24 #include <net/netlink.h> 25 #include <net/pkt_sched.h> 26 #include <net/pkt_cls.h> 27 #include <net/sch_generic.h> 28 #include <net/sock.h> 29 #include <net/tcp.h> 30 31 #define TAPRIO_STAT_NOT_SET (~0ULL) 32 33 #include "sch_mqprio_lib.h" 34 35 static LIST_HEAD(taprio_list); 36 static struct static_key_false taprio_have_broken_mqprio; 37 static struct static_key_false taprio_have_working_mqprio; 38 39 #define TAPRIO_ALL_GATES_OPEN -1 40 41 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) 42 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) 43 #define TAPRIO_SUPPORTED_FLAGS \ 44 (TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) 45 #define TAPRIO_FLAGS_INVALID U32_MAX 46 47 struct sched_entry { 48 /* Durations between this GCL entry and the GCL entry where the 49 * respective traffic class gate closes 50 */ 51 u64 gate_duration[TC_MAX_QUEUE]; 52 atomic_t budget[TC_MAX_QUEUE]; 53 /* The qdisc makes some effort so that no packet leaves 54 * after this time 55 */ 56 ktime_t gate_close_time[TC_MAX_QUEUE]; 57 struct list_head list; 58 /* Used to calculate when to advance the schedule */ 59 ktime_t end_time; 60 ktime_t next_txtime; 61 int index; 62 u32 gate_mask; 63 u32 interval; 64 u8 command; 65 }; 66 67 struct sched_gate_list { 68 /* Longest non-zero contiguous gate durations per traffic class, 69 * or 0 if a traffic class gate never opens during the schedule. 70 */ 71 u64 max_open_gate_duration[TC_MAX_QUEUE]; 72 u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */ 73 u32 max_sdu[TC_MAX_QUEUE]; /* for dump */ 74 struct rcu_head rcu; 75 struct list_head entries; 76 size_t num_entries; 77 ktime_t cycle_end_time; 78 s64 cycle_time; 79 s64 cycle_time_extension; 80 s64 base_time; 81 }; 82 83 struct taprio_sched { 84 struct Qdisc **qdiscs; 85 struct Qdisc *root; 86 u32 flags; 87 enum tk_offsets tk_offset; 88 int clockid; 89 bool offloaded; 90 bool detected_mqprio; 91 bool broken_mqprio; 92 atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ 93 * speeds it's sub-nanoseconds per byte 94 */ 95 96 /* Protects the update side of the RCU protected current_entry */ 97 spinlock_t current_entry_lock; 98 struct sched_entry __rcu *current_entry; 99 struct sched_gate_list __rcu *oper_sched; 100 struct sched_gate_list __rcu *admin_sched; 101 struct hrtimer advance_timer; 102 struct list_head taprio_list; 103 int cur_txq[TC_MAX_QUEUE]; 104 u32 max_sdu[TC_MAX_QUEUE]; /* save info from the user */ 105 u32 fp[TC_QOPT_MAX_QUEUE]; /* only for dump and offloading */ 106 u32 txtime_delay; 107 }; 108 109 struct __tc_taprio_qopt_offload { 110 refcount_t users; 111 struct tc_taprio_qopt_offload offload; 112 }; 113 114 static void taprio_calculate_gate_durations(struct taprio_sched *q, 115 struct sched_gate_list *sched) 116 { 117 struct net_device *dev = qdisc_dev(q->root); 118 int num_tc = netdev_get_num_tc(dev); 119 struct sched_entry *entry, *cur; 120 int tc; 121 122 list_for_each_entry(entry, &sched->entries, list) { 123 u32 gates_still_open = entry->gate_mask; 124 125 /* For each traffic class, calculate each open gate duration, 126 * starting at this schedule entry and ending at the schedule 127 * entry containing a gate close event for that TC. 128 */ 129 cur = entry; 130 131 do { 132 if (!gates_still_open) 133 break; 134 135 for (tc = 0; tc < num_tc; tc++) { 136 if (!(gates_still_open & BIT(tc))) 137 continue; 138 139 if (cur->gate_mask & BIT(tc)) 140 entry->gate_duration[tc] += cur->interval; 141 else 142 gates_still_open &= ~BIT(tc); 143 } 144 145 cur = list_next_entry_circular(cur, &sched->entries, list); 146 } while (cur != entry); 147 148 /* Keep track of the maximum gate duration for each traffic 149 * class, taking care to not confuse a traffic class which is 150 * temporarily closed with one that is always closed. 151 */ 152 for (tc = 0; tc < num_tc; tc++) 153 if (entry->gate_duration[tc] && 154 sched->max_open_gate_duration[tc] < entry->gate_duration[tc]) 155 sched->max_open_gate_duration[tc] = entry->gate_duration[tc]; 156 } 157 } 158 159 static bool taprio_entry_allows_tx(ktime_t skb_end_time, 160 struct sched_entry *entry, int tc) 161 { 162 return ktime_before(skb_end_time, entry->gate_close_time[tc]); 163 } 164 165 static ktime_t sched_base_time(const struct sched_gate_list *sched) 166 { 167 if (!sched) 168 return KTIME_MAX; 169 170 return ns_to_ktime(sched->base_time); 171 } 172 173 static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono) 174 { 175 /* This pairs with WRITE_ONCE() in taprio_parse_clockid() */ 176 enum tk_offsets tk_offset = READ_ONCE(q->tk_offset); 177 178 switch (tk_offset) { 179 case TK_OFFS_MAX: 180 return mono; 181 default: 182 return ktime_mono_to_any(mono, tk_offset); 183 } 184 } 185 186 static ktime_t taprio_get_time(const struct taprio_sched *q) 187 { 188 return taprio_mono_to_any(q, ktime_get()); 189 } 190 191 static void taprio_free_sched_cb(struct rcu_head *head) 192 { 193 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu); 194 struct sched_entry *entry, *n; 195 196 list_for_each_entry_safe(entry, n, &sched->entries, list) { 197 list_del(&entry->list); 198 kfree(entry); 199 } 200 201 kfree(sched); 202 } 203 204 static void switch_schedules(struct taprio_sched *q, 205 struct sched_gate_list **admin, 206 struct sched_gate_list **oper) 207 { 208 rcu_assign_pointer(q->oper_sched, *admin); 209 rcu_assign_pointer(q->admin_sched, NULL); 210 211 if (*oper) 212 call_rcu(&(*oper)->rcu, taprio_free_sched_cb); 213 214 *oper = *admin; 215 *admin = NULL; 216 } 217 218 /* Get how much time has been already elapsed in the current cycle. */ 219 static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time) 220 { 221 ktime_t time_since_sched_start; 222 s32 time_elapsed; 223 224 time_since_sched_start = ktime_sub(time, sched->base_time); 225 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed); 226 227 return time_elapsed; 228 } 229 230 static ktime_t get_interval_end_time(struct sched_gate_list *sched, 231 struct sched_gate_list *admin, 232 struct sched_entry *entry, 233 ktime_t intv_start) 234 { 235 s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start); 236 ktime_t intv_end, cycle_ext_end, cycle_end; 237 238 cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed); 239 intv_end = ktime_add_ns(intv_start, entry->interval); 240 cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension); 241 242 if (ktime_before(intv_end, cycle_end)) 243 return intv_end; 244 else if (admin && admin != sched && 245 ktime_after(admin->base_time, cycle_end) && 246 ktime_before(admin->base_time, cycle_ext_end)) 247 return admin->base_time; 248 else 249 return cycle_end; 250 } 251 252 static int length_to_duration(struct taprio_sched *q, int len) 253 { 254 return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC); 255 } 256 257 static int duration_to_length(struct taprio_sched *q, u64 duration) 258 { 259 return div_u64(duration * PSEC_PER_NSEC, atomic64_read(&q->picos_per_byte)); 260 } 261 262 /* Sets sched->max_sdu[] and sched->max_frm_len[] to the minimum between the 263 * q->max_sdu[] requested by the user and the max_sdu dynamically determined by 264 * the maximum open gate durations at the given link speed. 265 */ 266 static void taprio_update_queue_max_sdu(struct taprio_sched *q, 267 struct sched_gate_list *sched, 268 struct qdisc_size_table *stab) 269 { 270 struct net_device *dev = qdisc_dev(q->root); 271 int num_tc = netdev_get_num_tc(dev); 272 u32 max_sdu_from_user; 273 u32 max_sdu_dynamic; 274 u32 max_sdu; 275 int tc; 276 277 for (tc = 0; tc < num_tc; tc++) { 278 max_sdu_from_user = q->max_sdu[tc] ?: U32_MAX; 279 280 /* TC gate never closes => keep the queueMaxSDU 281 * selected by the user 282 */ 283 if (sched->max_open_gate_duration[tc] == sched->cycle_time) { 284 max_sdu_dynamic = U32_MAX; 285 } else { 286 u32 max_frm_len; 287 288 max_frm_len = duration_to_length(q, sched->max_open_gate_duration[tc]); 289 /* Compensate for L1 overhead from size table, 290 * but don't let the frame size go negative 291 */ 292 if (stab) { 293 max_frm_len -= stab->szopts.overhead; 294 max_frm_len = max_t(int, max_frm_len, 295 dev->hard_header_len + 1); 296 } 297 max_sdu_dynamic = max_frm_len - dev->hard_header_len; 298 if (max_sdu_dynamic > dev->max_mtu) 299 max_sdu_dynamic = U32_MAX; 300 } 301 302 max_sdu = min(max_sdu_dynamic, max_sdu_from_user); 303 304 if (max_sdu != U32_MAX) { 305 sched->max_frm_len[tc] = max_sdu + dev->hard_header_len; 306 sched->max_sdu[tc] = max_sdu; 307 } else { 308 sched->max_frm_len[tc] = U32_MAX; /* never oversized */ 309 sched->max_sdu[tc] = 0; 310 } 311 } 312 } 313 314 /* Returns the entry corresponding to next available interval. If 315 * validate_interval is set, it only validates whether the timestamp occurs 316 * when the gate corresponding to the skb's traffic class is open. 317 */ 318 static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb, 319 struct Qdisc *sch, 320 struct sched_gate_list *sched, 321 struct sched_gate_list *admin, 322 ktime_t time, 323 ktime_t *interval_start, 324 ktime_t *interval_end, 325 bool validate_interval) 326 { 327 ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time; 328 ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time; 329 struct sched_entry *entry = NULL, *entry_found = NULL; 330 struct taprio_sched *q = qdisc_priv(sch); 331 struct net_device *dev = qdisc_dev(sch); 332 bool entry_available = false; 333 s32 cycle_elapsed; 334 int tc, n; 335 336 tc = netdev_get_prio_tc_map(dev, skb->priority); 337 packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb)); 338 339 *interval_start = 0; 340 *interval_end = 0; 341 342 if (!sched) 343 return NULL; 344 345 cycle = sched->cycle_time; 346 cycle_elapsed = get_cycle_time_elapsed(sched, time); 347 curr_intv_end = ktime_sub_ns(time, cycle_elapsed); 348 cycle_end = ktime_add_ns(curr_intv_end, cycle); 349 350 list_for_each_entry(entry, &sched->entries, list) { 351 curr_intv_start = curr_intv_end; 352 curr_intv_end = get_interval_end_time(sched, admin, entry, 353 curr_intv_start); 354 355 if (ktime_after(curr_intv_start, cycle_end)) 356 break; 357 358 if (!(entry->gate_mask & BIT(tc)) || 359 packet_transmit_time > entry->interval) 360 continue; 361 362 txtime = entry->next_txtime; 363 364 if (ktime_before(txtime, time) || validate_interval) { 365 transmit_end_time = ktime_add_ns(time, packet_transmit_time); 366 if ((ktime_before(curr_intv_start, time) && 367 ktime_before(transmit_end_time, curr_intv_end)) || 368 (ktime_after(curr_intv_start, time) && !validate_interval)) { 369 entry_found = entry; 370 *interval_start = curr_intv_start; 371 *interval_end = curr_intv_end; 372 break; 373 } else if (!entry_available && !validate_interval) { 374 /* Here, we are just trying to find out the 375 * first available interval in the next cycle. 376 */ 377 entry_available = true; 378 entry_found = entry; 379 *interval_start = ktime_add_ns(curr_intv_start, cycle); 380 *interval_end = ktime_add_ns(curr_intv_end, cycle); 381 } 382 } else if (ktime_before(txtime, earliest_txtime) && 383 !entry_available) { 384 earliest_txtime = txtime; 385 entry_found = entry; 386 n = div_s64(ktime_sub(txtime, curr_intv_start), cycle); 387 *interval_start = ktime_add(curr_intv_start, n * cycle); 388 *interval_end = ktime_add(curr_intv_end, n * cycle); 389 } 390 } 391 392 return entry_found; 393 } 394 395 static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch) 396 { 397 struct taprio_sched *q = qdisc_priv(sch); 398 struct sched_gate_list *sched, *admin; 399 ktime_t interval_start, interval_end; 400 struct sched_entry *entry; 401 402 rcu_read_lock(); 403 sched = rcu_dereference(q->oper_sched); 404 admin = rcu_dereference(q->admin_sched); 405 406 entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp, 407 &interval_start, &interval_end, true); 408 rcu_read_unlock(); 409 410 return entry; 411 } 412 413 /* This returns the tstamp value set by TCP in terms of the set clock. */ 414 static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb) 415 { 416 unsigned int offset = skb_network_offset(skb); 417 const struct ipv6hdr *ipv6h; 418 const struct iphdr *iph; 419 struct ipv6hdr _ipv6h; 420 421 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h); 422 if (!ipv6h) 423 return 0; 424 425 if (ipv6h->version == 4) { 426 iph = (struct iphdr *)ipv6h; 427 offset += iph->ihl * 4; 428 429 /* special-case 6in4 tunnelling, as that is a common way to get 430 * v6 connectivity in the home 431 */ 432 if (iph->protocol == IPPROTO_IPV6) { 433 ipv6h = skb_header_pointer(skb, offset, 434 sizeof(_ipv6h), &_ipv6h); 435 436 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP) 437 return 0; 438 } else if (iph->protocol != IPPROTO_TCP) { 439 return 0; 440 } 441 } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) { 442 return 0; 443 } 444 445 return taprio_mono_to_any(q, skb->skb_mstamp_ns); 446 } 447 448 /* There are a few scenarios where we will have to modify the txtime from 449 * what is read from next_txtime in sched_entry. They are: 450 * 1. If txtime is in the past, 451 * a. The gate for the traffic class is currently open and packet can be 452 * transmitted before it closes, schedule the packet right away. 453 * b. If the gate corresponding to the traffic class is going to open later 454 * in the cycle, set the txtime of packet to the interval start. 455 * 2. If txtime is in the future, there are packets corresponding to the 456 * current traffic class waiting to be transmitted. So, the following 457 * possibilities exist: 458 * a. We can transmit the packet before the window containing the txtime 459 * closes. 460 * b. The window might close before the transmission can be completed 461 * successfully. So, schedule the packet in the next open window. 462 */ 463 static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch) 464 { 465 ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp; 466 struct taprio_sched *q = qdisc_priv(sch); 467 struct sched_gate_list *sched, *admin; 468 ktime_t minimum_time, now, txtime; 469 int len, packet_transmit_time; 470 struct sched_entry *entry; 471 bool sched_changed; 472 473 now = taprio_get_time(q); 474 minimum_time = ktime_add_ns(now, q->txtime_delay); 475 476 tcp_tstamp = get_tcp_tstamp(q, skb); 477 minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp); 478 479 rcu_read_lock(); 480 admin = rcu_dereference(q->admin_sched); 481 sched = rcu_dereference(q->oper_sched); 482 if (admin && ktime_after(minimum_time, admin->base_time)) 483 switch_schedules(q, &admin, &sched); 484 485 /* Until the schedule starts, all the queues are open */ 486 if (!sched || ktime_before(minimum_time, sched->base_time)) { 487 txtime = minimum_time; 488 goto done; 489 } 490 491 len = qdisc_pkt_len(skb); 492 packet_transmit_time = length_to_duration(q, len); 493 494 do { 495 sched_changed = false; 496 497 entry = find_entry_to_transmit(skb, sch, sched, admin, 498 minimum_time, 499 &interval_start, &interval_end, 500 false); 501 if (!entry) { 502 txtime = 0; 503 goto done; 504 } 505 506 txtime = entry->next_txtime; 507 txtime = max_t(ktime_t, txtime, minimum_time); 508 txtime = max_t(ktime_t, txtime, interval_start); 509 510 if (admin && admin != sched && 511 ktime_after(txtime, admin->base_time)) { 512 sched = admin; 513 sched_changed = true; 514 continue; 515 } 516 517 transmit_end_time = ktime_add(txtime, packet_transmit_time); 518 minimum_time = transmit_end_time; 519 520 /* Update the txtime of current entry to the next time it's 521 * interval starts. 522 */ 523 if (ktime_after(transmit_end_time, interval_end)) 524 entry->next_txtime = ktime_add(interval_start, sched->cycle_time); 525 } while (sched_changed || ktime_after(transmit_end_time, interval_end)); 526 527 entry->next_txtime = transmit_end_time; 528 529 done: 530 rcu_read_unlock(); 531 return txtime; 532 } 533 534 /* Devices with full offload are expected to honor this in hardware */ 535 static bool taprio_skb_exceeds_queue_max_sdu(struct Qdisc *sch, 536 struct sk_buff *skb) 537 { 538 struct taprio_sched *q = qdisc_priv(sch); 539 struct net_device *dev = qdisc_dev(sch); 540 struct sched_gate_list *sched; 541 int prio = skb->priority; 542 bool exceeds = false; 543 u8 tc; 544 545 tc = netdev_get_prio_tc_map(dev, prio); 546 547 rcu_read_lock(); 548 sched = rcu_dereference(q->oper_sched); 549 if (sched && skb->len > sched->max_frm_len[tc]) 550 exceeds = true; 551 rcu_read_unlock(); 552 553 return exceeds; 554 } 555 556 static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch, 557 struct Qdisc *child, struct sk_buff **to_free) 558 { 559 struct taprio_sched *q = qdisc_priv(sch); 560 561 /* sk_flags are only safe to use on full sockets. */ 562 if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) { 563 if (!is_valid_interval(skb, sch)) 564 return qdisc_drop(skb, sch, to_free); 565 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 566 skb->tstamp = get_packet_txtime(skb, sch); 567 if (!skb->tstamp) 568 return qdisc_drop(skb, sch, to_free); 569 } 570 571 qdisc_qstats_backlog_inc(sch, skb); 572 sch->q.qlen++; 573 574 return qdisc_enqueue(skb, child, to_free); 575 } 576 577 static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch, 578 struct Qdisc *child, 579 struct sk_buff **to_free) 580 { 581 unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb); 582 netdev_features_t features = netif_skb_features(skb); 583 struct sk_buff *segs, *nskb; 584 int ret; 585 586 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 587 if (IS_ERR_OR_NULL(segs)) 588 return qdisc_drop(skb, sch, to_free); 589 590 skb_list_walk_safe(segs, segs, nskb) { 591 skb_mark_not_on_list(segs); 592 qdisc_skb_cb(segs)->pkt_len = segs->len; 593 slen += segs->len; 594 595 /* FIXME: we should be segmenting to a smaller size 596 * rather than dropping these 597 */ 598 if (taprio_skb_exceeds_queue_max_sdu(sch, segs)) 599 ret = qdisc_drop(segs, sch, to_free); 600 else 601 ret = taprio_enqueue_one(segs, sch, child, to_free); 602 603 if (ret != NET_XMIT_SUCCESS) { 604 if (net_xmit_drop_count(ret)) 605 qdisc_qstats_drop(sch); 606 } else { 607 numsegs++; 608 } 609 } 610 611 if (numsegs > 1) 612 qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen); 613 consume_skb(skb); 614 615 return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; 616 } 617 618 /* Will not be called in the full offload case, since the TX queues are 619 * attached to the Qdisc created using qdisc_create_dflt() 620 */ 621 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, 622 struct sk_buff **to_free) 623 { 624 struct taprio_sched *q = qdisc_priv(sch); 625 struct Qdisc *child; 626 int queue; 627 628 queue = skb_get_queue_mapping(skb); 629 630 child = q->qdiscs[queue]; 631 if (unlikely(!child)) 632 return qdisc_drop(skb, sch, to_free); 633 634 if (taprio_skb_exceeds_queue_max_sdu(sch, skb)) { 635 /* Large packets might not be transmitted when the transmission 636 * duration exceeds any configured interval. Therefore, segment 637 * the skb into smaller chunks. Drivers with full offload are 638 * expected to handle this in hardware. 639 */ 640 if (skb_is_gso(skb)) 641 return taprio_enqueue_segmented(skb, sch, child, 642 to_free); 643 644 return qdisc_drop(skb, sch, to_free); 645 } 646 647 return taprio_enqueue_one(skb, sch, child, to_free); 648 } 649 650 static struct sk_buff *taprio_peek(struct Qdisc *sch) 651 { 652 WARN_ONCE(1, "taprio only supports operating as root qdisc, peek() not implemented"); 653 return NULL; 654 } 655 656 static void taprio_set_budgets(struct taprio_sched *q, 657 struct sched_gate_list *sched, 658 struct sched_entry *entry) 659 { 660 struct net_device *dev = qdisc_dev(q->root); 661 int num_tc = netdev_get_num_tc(dev); 662 int tc, budget; 663 664 for (tc = 0; tc < num_tc; tc++) { 665 /* Traffic classes which never close have infinite budget */ 666 if (entry->gate_duration[tc] == sched->cycle_time) 667 budget = INT_MAX; 668 else 669 budget = div64_u64((u64)entry->gate_duration[tc] * PSEC_PER_NSEC, 670 atomic64_read(&q->picos_per_byte)); 671 672 atomic_set(&entry->budget[tc], budget); 673 } 674 } 675 676 /* When an skb is sent, it consumes from the budget of all traffic classes */ 677 static int taprio_update_budgets(struct sched_entry *entry, size_t len, 678 int tc_consumed, int num_tc) 679 { 680 int tc, budget, new_budget = 0; 681 682 for (tc = 0; tc < num_tc; tc++) { 683 budget = atomic_read(&entry->budget[tc]); 684 /* Don't consume from infinite budget */ 685 if (budget == INT_MAX) { 686 if (tc == tc_consumed) 687 new_budget = budget; 688 continue; 689 } 690 691 if (tc == tc_consumed) 692 new_budget = atomic_sub_return(len, &entry->budget[tc]); 693 else 694 atomic_sub(len, &entry->budget[tc]); 695 } 696 697 return new_budget; 698 } 699 700 static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq, 701 struct sched_entry *entry, 702 u32 gate_mask) 703 { 704 struct taprio_sched *q = qdisc_priv(sch); 705 struct net_device *dev = qdisc_dev(sch); 706 struct Qdisc *child = q->qdiscs[txq]; 707 int num_tc = netdev_get_num_tc(dev); 708 struct sk_buff *skb; 709 ktime_t guard; 710 int prio; 711 int len; 712 u8 tc; 713 714 if (unlikely(!child)) 715 return NULL; 716 717 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) 718 goto skip_peek_checks; 719 720 skb = child->ops->peek(child); 721 if (!skb) 722 return NULL; 723 724 prio = skb->priority; 725 tc = netdev_get_prio_tc_map(dev, prio); 726 727 if (!(gate_mask & BIT(tc))) 728 return NULL; 729 730 len = qdisc_pkt_len(skb); 731 guard = ktime_add_ns(taprio_get_time(q), length_to_duration(q, len)); 732 733 /* In the case that there's no gate entry, there's no 734 * guard band ... 735 */ 736 if (gate_mask != TAPRIO_ALL_GATES_OPEN && 737 !taprio_entry_allows_tx(guard, entry, tc)) 738 return NULL; 739 740 /* ... and no budget. */ 741 if (gate_mask != TAPRIO_ALL_GATES_OPEN && 742 taprio_update_budgets(entry, len, tc, num_tc) < 0) 743 return NULL; 744 745 skip_peek_checks: 746 skb = child->ops->dequeue(child); 747 if (unlikely(!skb)) 748 return NULL; 749 750 qdisc_bstats_update(sch, skb); 751 qdisc_qstats_backlog_dec(sch, skb); 752 sch->q.qlen--; 753 754 return skb; 755 } 756 757 static void taprio_next_tc_txq(struct net_device *dev, int tc, int *txq) 758 { 759 int offset = dev->tc_to_txq[tc].offset; 760 int count = dev->tc_to_txq[tc].count; 761 762 (*txq)++; 763 if (*txq == offset + count) 764 *txq = offset; 765 } 766 767 /* Prioritize higher traffic classes, and select among TXQs belonging to the 768 * same TC using round robin 769 */ 770 static struct sk_buff *taprio_dequeue_tc_priority(struct Qdisc *sch, 771 struct sched_entry *entry, 772 u32 gate_mask) 773 { 774 struct taprio_sched *q = qdisc_priv(sch); 775 struct net_device *dev = qdisc_dev(sch); 776 int num_tc = netdev_get_num_tc(dev); 777 struct sk_buff *skb; 778 int tc; 779 780 for (tc = num_tc - 1; tc >= 0; tc--) { 781 int first_txq = q->cur_txq[tc]; 782 783 if (!(gate_mask & BIT(tc))) 784 continue; 785 786 do { 787 skb = taprio_dequeue_from_txq(sch, q->cur_txq[tc], 788 entry, gate_mask); 789 790 taprio_next_tc_txq(dev, tc, &q->cur_txq[tc]); 791 792 if (q->cur_txq[tc] >= dev->num_tx_queues) 793 q->cur_txq[tc] = first_txq; 794 795 if (skb) 796 return skb; 797 } while (q->cur_txq[tc] != first_txq); 798 } 799 800 return NULL; 801 } 802 803 /* Broken way of prioritizing smaller TXQ indices and ignoring the traffic 804 * class other than to determine whether the gate is open or not 805 */ 806 static struct sk_buff *taprio_dequeue_txq_priority(struct Qdisc *sch, 807 struct sched_entry *entry, 808 u32 gate_mask) 809 { 810 struct net_device *dev = qdisc_dev(sch); 811 struct sk_buff *skb; 812 int i; 813 814 for (i = 0; i < dev->num_tx_queues; i++) { 815 skb = taprio_dequeue_from_txq(sch, i, entry, gate_mask); 816 if (skb) 817 return skb; 818 } 819 820 return NULL; 821 } 822 823 /* Will not be called in the full offload case, since the TX queues are 824 * attached to the Qdisc created using qdisc_create_dflt() 825 */ 826 static struct sk_buff *taprio_dequeue(struct Qdisc *sch) 827 { 828 struct taprio_sched *q = qdisc_priv(sch); 829 struct sk_buff *skb = NULL; 830 struct sched_entry *entry; 831 u32 gate_mask; 832 833 rcu_read_lock(); 834 entry = rcu_dereference(q->current_entry); 835 /* if there's no entry, it means that the schedule didn't 836 * start yet, so force all gates to be open, this is in 837 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5 838 * "AdminGateStates" 839 */ 840 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; 841 if (!gate_mask) 842 goto done; 843 844 if (static_branch_unlikely(&taprio_have_broken_mqprio) && 845 !static_branch_likely(&taprio_have_working_mqprio)) { 846 /* Single NIC kind which is broken */ 847 skb = taprio_dequeue_txq_priority(sch, entry, gate_mask); 848 } else if (static_branch_likely(&taprio_have_working_mqprio) && 849 !static_branch_unlikely(&taprio_have_broken_mqprio)) { 850 /* Single NIC kind which prioritizes properly */ 851 skb = taprio_dequeue_tc_priority(sch, entry, gate_mask); 852 } else { 853 /* Mixed NIC kinds present in system, need dynamic testing */ 854 if (q->broken_mqprio) 855 skb = taprio_dequeue_txq_priority(sch, entry, gate_mask); 856 else 857 skb = taprio_dequeue_tc_priority(sch, entry, gate_mask); 858 } 859 860 done: 861 rcu_read_unlock(); 862 863 return skb; 864 } 865 866 static bool should_restart_cycle(const struct sched_gate_list *oper, 867 const struct sched_entry *entry) 868 { 869 if (list_is_last(&entry->list, &oper->entries)) 870 return true; 871 872 if (ktime_compare(entry->end_time, oper->cycle_end_time) == 0) 873 return true; 874 875 return false; 876 } 877 878 static bool should_change_schedules(const struct sched_gate_list *admin, 879 const struct sched_gate_list *oper, 880 ktime_t end_time) 881 { 882 ktime_t next_base_time, extension_time; 883 884 if (!admin) 885 return false; 886 887 next_base_time = sched_base_time(admin); 888 889 /* This is the simple case, the end_time would fall after 890 * the next schedule base_time. 891 */ 892 if (ktime_compare(next_base_time, end_time) <= 0) 893 return true; 894 895 /* This is the cycle_time_extension case, if the end_time 896 * plus the amount that can be extended would fall after the 897 * next schedule base_time, we can extend the current schedule 898 * for that amount. 899 */ 900 extension_time = ktime_add_ns(end_time, oper->cycle_time_extension); 901 902 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about 903 * how precisely the extension should be made. So after 904 * conformance testing, this logic may change. 905 */ 906 if (ktime_compare(next_base_time, extension_time) <= 0) 907 return true; 908 909 return false; 910 } 911 912 static enum hrtimer_restart advance_sched(struct hrtimer *timer) 913 { 914 struct taprio_sched *q = container_of(timer, struct taprio_sched, 915 advance_timer); 916 struct net_device *dev = qdisc_dev(q->root); 917 struct sched_gate_list *oper, *admin; 918 int num_tc = netdev_get_num_tc(dev); 919 struct sched_entry *entry, *next; 920 struct Qdisc *sch = q->root; 921 ktime_t end_time; 922 int tc; 923 924 spin_lock(&q->current_entry_lock); 925 entry = rcu_dereference_protected(q->current_entry, 926 lockdep_is_held(&q->current_entry_lock)); 927 oper = rcu_dereference_protected(q->oper_sched, 928 lockdep_is_held(&q->current_entry_lock)); 929 admin = rcu_dereference_protected(q->admin_sched, 930 lockdep_is_held(&q->current_entry_lock)); 931 932 if (!oper) 933 switch_schedules(q, &admin, &oper); 934 935 /* This can happen in two cases: 1. this is the very first run 936 * of this function (i.e. we weren't running any schedule 937 * previously); 2. The previous schedule just ended. The first 938 * entry of all schedules are pre-calculated during the 939 * schedule initialization. 940 */ 941 if (unlikely(!entry || entry->end_time == oper->base_time)) { 942 next = list_first_entry(&oper->entries, struct sched_entry, 943 list); 944 end_time = next->end_time; 945 goto first_run; 946 } 947 948 if (should_restart_cycle(oper, entry)) { 949 next = list_first_entry(&oper->entries, struct sched_entry, 950 list); 951 oper->cycle_end_time = ktime_add_ns(oper->cycle_end_time, 952 oper->cycle_time); 953 } else { 954 next = list_next_entry(entry, list); 955 } 956 957 end_time = ktime_add_ns(entry->end_time, next->interval); 958 end_time = min_t(ktime_t, end_time, oper->cycle_end_time); 959 960 for (tc = 0; tc < num_tc; tc++) { 961 if (next->gate_duration[tc] == oper->cycle_time) 962 next->gate_close_time[tc] = KTIME_MAX; 963 else 964 next->gate_close_time[tc] = ktime_add_ns(entry->end_time, 965 next->gate_duration[tc]); 966 } 967 968 if (should_change_schedules(admin, oper, end_time)) { 969 /* Set things so the next time this runs, the new 970 * schedule runs. 971 */ 972 end_time = sched_base_time(admin); 973 switch_schedules(q, &admin, &oper); 974 } 975 976 next->end_time = end_time; 977 taprio_set_budgets(q, oper, next); 978 979 first_run: 980 rcu_assign_pointer(q->current_entry, next); 981 spin_unlock(&q->current_entry_lock); 982 983 hrtimer_set_expires(&q->advance_timer, end_time); 984 985 rcu_read_lock(); 986 __netif_schedule(sch); 987 rcu_read_unlock(); 988 989 return HRTIMER_RESTART; 990 } 991 992 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { 993 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 }, 994 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 }, 995 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 }, 996 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 }, 997 }; 998 999 static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { 1000 [TCA_TAPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32, 1001 TC_QOPT_MAX_QUEUE), 1002 [TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 }, 1003 [TCA_TAPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32, 1004 TC_FP_EXPRESS, 1005 TC_FP_PREEMPTIBLE), 1006 }; 1007 1008 static const struct netlink_range_validation_signed taprio_cycle_time_range = { 1009 .min = 0, 1010 .max = INT_MAX, 1011 }; 1012 1013 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { 1014 [TCA_TAPRIO_ATTR_PRIOMAP] = { 1015 .len = sizeof(struct tc_mqprio_qopt) 1016 }, 1017 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED }, 1018 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 }, 1019 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED }, 1020 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, 1021 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = 1022 NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range), 1023 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, 1024 [TCA_TAPRIO_ATTR_FLAGS] = 1025 NLA_POLICY_MASK(NLA_U32, TAPRIO_SUPPORTED_FLAGS), 1026 [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, 1027 [TCA_TAPRIO_ATTR_TC_ENTRY] = { .type = NLA_NESTED }, 1028 }; 1029 1030 static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb, 1031 struct sched_entry *entry, 1032 struct netlink_ext_ack *extack) 1033 { 1034 int min_duration = length_to_duration(q, ETH_ZLEN); 1035 u32 interval = 0; 1036 1037 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD]) 1038 entry->command = nla_get_u8( 1039 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]); 1040 1041 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]) 1042 entry->gate_mask = nla_get_u32( 1043 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]); 1044 1045 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]) 1046 interval = nla_get_u32( 1047 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]); 1048 1049 /* The interval should allow at least the minimum ethernet 1050 * frame to go out. 1051 */ 1052 if (interval < min_duration) { 1053 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); 1054 return -EINVAL; 1055 } 1056 1057 entry->interval = interval; 1058 1059 return 0; 1060 } 1061 1062 static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n, 1063 struct sched_entry *entry, int index, 1064 struct netlink_ext_ack *extack) 1065 { 1066 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { }; 1067 int err; 1068 1069 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n, 1070 entry_policy, NULL); 1071 if (err < 0) { 1072 NL_SET_ERR_MSG(extack, "Could not parse nested entry"); 1073 return -EINVAL; 1074 } 1075 1076 entry->index = index; 1077 1078 return fill_sched_entry(q, tb, entry, extack); 1079 } 1080 1081 static int parse_sched_list(struct taprio_sched *q, struct nlattr *list, 1082 struct sched_gate_list *sched, 1083 struct netlink_ext_ack *extack) 1084 { 1085 struct nlattr *n; 1086 int err, rem; 1087 int i = 0; 1088 1089 if (!list) 1090 return -EINVAL; 1091 1092 nla_for_each_nested(n, list, rem) { 1093 struct sched_entry *entry; 1094 1095 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) { 1096 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'"); 1097 continue; 1098 } 1099 1100 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1101 if (!entry) { 1102 NL_SET_ERR_MSG(extack, "Not enough memory for entry"); 1103 return -ENOMEM; 1104 } 1105 1106 err = parse_sched_entry(q, n, entry, i, extack); 1107 if (err < 0) { 1108 kfree(entry); 1109 return err; 1110 } 1111 1112 list_add_tail(&entry->list, &sched->entries); 1113 i++; 1114 } 1115 1116 sched->num_entries = i; 1117 1118 return i; 1119 } 1120 1121 static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, 1122 struct sched_gate_list *new, 1123 struct netlink_ext_ack *extack) 1124 { 1125 int err = 0; 1126 1127 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) { 1128 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported"); 1129 return -ENOTSUPP; 1130 } 1131 1132 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]) 1133 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]); 1134 1135 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]) 1136 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]); 1137 1138 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]) 1139 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]); 1140 1141 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]) 1142 err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], 1143 new, extack); 1144 if (err < 0) 1145 return err; 1146 1147 if (!new->cycle_time) { 1148 struct sched_entry *entry; 1149 ktime_t cycle = 0; 1150 1151 list_for_each_entry(entry, &new->entries, list) 1152 cycle = ktime_add_ns(cycle, entry->interval); 1153 1154 if (cycle < 0 || cycle > INT_MAX) { 1155 NL_SET_ERR_MSG(extack, "'cycle_time' is too big"); 1156 return -EINVAL; 1157 } 1158 1159 new->cycle_time = cycle; 1160 } 1161 1162 if (new->cycle_time < new->num_entries * length_to_duration(q, ETH_ZLEN)) { 1163 NL_SET_ERR_MSG(extack, "'cycle_time' is too small"); 1164 return -EINVAL; 1165 } 1166 1167 taprio_calculate_gate_durations(q, new); 1168 1169 return 0; 1170 } 1171 1172 static int taprio_parse_mqprio_opt(struct net_device *dev, 1173 struct tc_mqprio_qopt *qopt, 1174 struct netlink_ext_ack *extack, 1175 u32 taprio_flags) 1176 { 1177 bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags); 1178 1179 if (!qopt) { 1180 if (!dev->num_tc) { 1181 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary"); 1182 return -EINVAL; 1183 } 1184 return 0; 1185 } 1186 1187 /* taprio imposes that traffic classes map 1:n to tx queues */ 1188 if (qopt->num_tc > dev->num_tx_queues) { 1189 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues"); 1190 return -EINVAL; 1191 } 1192 1193 /* For some reason, in txtime-assist mode, we allow TXQ ranges for 1194 * different TCs to overlap, and just validate the TXQ ranges. 1195 */ 1196 return mqprio_validate_qopt(dev, qopt, true, allow_overlapping_txqs, 1197 extack); 1198 } 1199 1200 static int taprio_get_start_time(struct Qdisc *sch, 1201 struct sched_gate_list *sched, 1202 ktime_t *start) 1203 { 1204 struct taprio_sched *q = qdisc_priv(sch); 1205 ktime_t now, base, cycle; 1206 s64 n; 1207 1208 base = sched_base_time(sched); 1209 now = taprio_get_time(q); 1210 1211 if (ktime_after(base, now)) { 1212 *start = base; 1213 return 0; 1214 } 1215 1216 cycle = sched->cycle_time; 1217 1218 /* The qdisc is expected to have at least one sched_entry. Moreover, 1219 * any entry must have 'interval' > 0. Thus if the cycle time is zero, 1220 * something went really wrong. In that case, we should warn about this 1221 * inconsistent state and return error. 1222 */ 1223 if (WARN_ON(!cycle)) 1224 return -EFAULT; 1225 1226 /* Schedule the start time for the beginning of the next 1227 * cycle. 1228 */ 1229 n = div64_s64(ktime_sub_ns(now, base), cycle); 1230 *start = ktime_add_ns(base, (n + 1) * cycle); 1231 return 0; 1232 } 1233 1234 static void setup_first_end_time(struct taprio_sched *q, 1235 struct sched_gate_list *sched, ktime_t base) 1236 { 1237 struct net_device *dev = qdisc_dev(q->root); 1238 int num_tc = netdev_get_num_tc(dev); 1239 struct sched_entry *first; 1240 ktime_t cycle; 1241 int tc; 1242 1243 first = list_first_entry(&sched->entries, 1244 struct sched_entry, list); 1245 1246 cycle = sched->cycle_time; 1247 1248 /* FIXME: find a better place to do this */ 1249 sched->cycle_end_time = ktime_add_ns(base, cycle); 1250 1251 first->end_time = ktime_add_ns(base, first->interval); 1252 taprio_set_budgets(q, sched, first); 1253 1254 for (tc = 0; tc < num_tc; tc++) { 1255 if (first->gate_duration[tc] == sched->cycle_time) 1256 first->gate_close_time[tc] = KTIME_MAX; 1257 else 1258 first->gate_close_time[tc] = ktime_add_ns(base, first->gate_duration[tc]); 1259 } 1260 1261 rcu_assign_pointer(q->current_entry, NULL); 1262 } 1263 1264 static void taprio_start_sched(struct Qdisc *sch, 1265 ktime_t start, struct sched_gate_list *new) 1266 { 1267 struct taprio_sched *q = qdisc_priv(sch); 1268 ktime_t expires; 1269 1270 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1271 return; 1272 1273 expires = hrtimer_get_expires(&q->advance_timer); 1274 if (expires == 0) 1275 expires = KTIME_MAX; 1276 1277 /* If the new schedule starts before the next expiration, we 1278 * reprogram it to the earliest one, so we change the admin 1279 * schedule to the operational one at the right time. 1280 */ 1281 start = min_t(ktime_t, start, expires); 1282 1283 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS); 1284 } 1285 1286 static void taprio_set_picos_per_byte(struct net_device *dev, 1287 struct taprio_sched *q) 1288 { 1289 struct ethtool_link_ksettings ecmd; 1290 int speed = SPEED_10; 1291 int picos_per_byte; 1292 int err; 1293 1294 err = __ethtool_get_link_ksettings(dev, &ecmd); 1295 if (err < 0) 1296 goto skip; 1297 1298 if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN) 1299 speed = ecmd.base.speed; 1300 1301 skip: 1302 picos_per_byte = (USEC_PER_SEC * 8) / speed; 1303 1304 atomic64_set(&q->picos_per_byte, picos_per_byte); 1305 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", 1306 dev->name, (long long)atomic64_read(&q->picos_per_byte), 1307 ecmd.base.speed); 1308 } 1309 1310 static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event, 1311 void *ptr) 1312 { 1313 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1314 struct sched_gate_list *oper, *admin; 1315 struct qdisc_size_table *stab; 1316 struct taprio_sched *q; 1317 1318 ASSERT_RTNL(); 1319 1320 if (event != NETDEV_UP && event != NETDEV_CHANGE) 1321 return NOTIFY_DONE; 1322 1323 list_for_each_entry(q, &taprio_list, taprio_list) { 1324 if (dev != qdisc_dev(q->root)) 1325 continue; 1326 1327 taprio_set_picos_per_byte(dev, q); 1328 1329 stab = rtnl_dereference(q->root->stab); 1330 1331 oper = rtnl_dereference(q->oper_sched); 1332 if (oper) 1333 taprio_update_queue_max_sdu(q, oper, stab); 1334 1335 admin = rtnl_dereference(q->admin_sched); 1336 if (admin) 1337 taprio_update_queue_max_sdu(q, admin, stab); 1338 1339 break; 1340 } 1341 1342 return NOTIFY_DONE; 1343 } 1344 1345 static void setup_txtime(struct taprio_sched *q, 1346 struct sched_gate_list *sched, ktime_t base) 1347 { 1348 struct sched_entry *entry; 1349 u64 interval = 0; 1350 1351 list_for_each_entry(entry, &sched->entries, list) { 1352 entry->next_txtime = ktime_add_ns(base, interval); 1353 interval += entry->interval; 1354 } 1355 } 1356 1357 static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries) 1358 { 1359 struct __tc_taprio_qopt_offload *__offload; 1360 1361 __offload = kzalloc(struct_size(__offload, offload.entries, num_entries), 1362 GFP_KERNEL); 1363 if (!__offload) 1364 return NULL; 1365 1366 refcount_set(&__offload->users, 1); 1367 1368 return &__offload->offload; 1369 } 1370 1371 struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload 1372 *offload) 1373 { 1374 struct __tc_taprio_qopt_offload *__offload; 1375 1376 __offload = container_of(offload, struct __tc_taprio_qopt_offload, 1377 offload); 1378 1379 refcount_inc(&__offload->users); 1380 1381 return offload; 1382 } 1383 EXPORT_SYMBOL_GPL(taprio_offload_get); 1384 1385 void taprio_offload_free(struct tc_taprio_qopt_offload *offload) 1386 { 1387 struct __tc_taprio_qopt_offload *__offload; 1388 1389 __offload = container_of(offload, struct __tc_taprio_qopt_offload, 1390 offload); 1391 1392 if (!refcount_dec_and_test(&__offload->users)) 1393 return; 1394 1395 kfree(__offload); 1396 } 1397 EXPORT_SYMBOL_GPL(taprio_offload_free); 1398 1399 /* The function will only serve to keep the pointers to the "oper" and "admin" 1400 * schedules valid in relation to their base times, so when calling dump() the 1401 * users looks at the right schedules. 1402 * When using full offload, the admin configuration is promoted to oper at the 1403 * base_time in the PHC time domain. But because the system time is not 1404 * necessarily in sync with that, we can't just trigger a hrtimer to call 1405 * switch_schedules at the right hardware time. 1406 * At the moment we call this by hand right away from taprio, but in the future 1407 * it will be useful to create a mechanism for drivers to notify taprio of the 1408 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump(). 1409 * This is left as TODO. 1410 */ 1411 static void taprio_offload_config_changed(struct taprio_sched *q) 1412 { 1413 struct sched_gate_list *oper, *admin; 1414 1415 oper = rtnl_dereference(q->oper_sched); 1416 admin = rtnl_dereference(q->admin_sched); 1417 1418 switch_schedules(q, &admin, &oper); 1419 } 1420 1421 static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask) 1422 { 1423 u32 i, queue_mask = 0; 1424 1425 for (i = 0; i < dev->num_tc; i++) { 1426 u32 offset, count; 1427 1428 if (!(tc_mask & BIT(i))) 1429 continue; 1430 1431 offset = dev->tc_to_txq[i].offset; 1432 count = dev->tc_to_txq[i].count; 1433 1434 queue_mask |= GENMASK(offset + count - 1, offset); 1435 } 1436 1437 return queue_mask; 1438 } 1439 1440 static void taprio_sched_to_offload(struct net_device *dev, 1441 struct sched_gate_list *sched, 1442 struct tc_taprio_qopt_offload *offload, 1443 const struct tc_taprio_caps *caps) 1444 { 1445 struct sched_entry *entry; 1446 int i = 0; 1447 1448 offload->base_time = sched->base_time; 1449 offload->cycle_time = sched->cycle_time; 1450 offload->cycle_time_extension = sched->cycle_time_extension; 1451 1452 list_for_each_entry(entry, &sched->entries, list) { 1453 struct tc_taprio_sched_entry *e = &offload->entries[i]; 1454 1455 e->command = entry->command; 1456 e->interval = entry->interval; 1457 if (caps->gate_mask_per_txq) 1458 e->gate_mask = tc_map_to_queue_mask(dev, 1459 entry->gate_mask); 1460 else 1461 e->gate_mask = entry->gate_mask; 1462 1463 i++; 1464 } 1465 1466 offload->num_entries = i; 1467 } 1468 1469 static void taprio_detect_broken_mqprio(struct taprio_sched *q) 1470 { 1471 struct net_device *dev = qdisc_dev(q->root); 1472 struct tc_taprio_caps caps; 1473 1474 qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO, 1475 &caps, sizeof(caps)); 1476 1477 q->broken_mqprio = caps.broken_mqprio; 1478 if (q->broken_mqprio) 1479 static_branch_inc(&taprio_have_broken_mqprio); 1480 else 1481 static_branch_inc(&taprio_have_working_mqprio); 1482 1483 q->detected_mqprio = true; 1484 } 1485 1486 static void taprio_cleanup_broken_mqprio(struct taprio_sched *q) 1487 { 1488 if (!q->detected_mqprio) 1489 return; 1490 1491 if (q->broken_mqprio) 1492 static_branch_dec(&taprio_have_broken_mqprio); 1493 else 1494 static_branch_dec(&taprio_have_working_mqprio); 1495 } 1496 1497 static int taprio_enable_offload(struct net_device *dev, 1498 struct taprio_sched *q, 1499 struct sched_gate_list *sched, 1500 struct netlink_ext_ack *extack) 1501 { 1502 const struct net_device_ops *ops = dev->netdev_ops; 1503 struct tc_taprio_qopt_offload *offload; 1504 struct tc_taprio_caps caps; 1505 int tc, err = 0; 1506 1507 if (!ops->ndo_setup_tc) { 1508 NL_SET_ERR_MSG(extack, 1509 "Device does not support taprio offload"); 1510 return -EOPNOTSUPP; 1511 } 1512 1513 qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO, 1514 &caps, sizeof(caps)); 1515 1516 if (!caps.supports_queue_max_sdu) { 1517 for (tc = 0; tc < TC_MAX_QUEUE; tc++) { 1518 if (q->max_sdu[tc]) { 1519 NL_SET_ERR_MSG_MOD(extack, 1520 "Device does not handle queueMaxSDU"); 1521 return -EOPNOTSUPP; 1522 } 1523 } 1524 } 1525 1526 offload = taprio_offload_alloc(sched->num_entries); 1527 if (!offload) { 1528 NL_SET_ERR_MSG(extack, 1529 "Not enough memory for enabling offload mode"); 1530 return -ENOMEM; 1531 } 1532 offload->cmd = TAPRIO_CMD_REPLACE; 1533 offload->extack = extack; 1534 mqprio_qopt_reconstruct(dev, &offload->mqprio.qopt); 1535 offload->mqprio.extack = extack; 1536 taprio_sched_to_offload(dev, sched, offload, &caps); 1537 mqprio_fp_to_offload(q->fp, &offload->mqprio); 1538 1539 for (tc = 0; tc < TC_MAX_QUEUE; tc++) 1540 offload->max_sdu[tc] = q->max_sdu[tc]; 1541 1542 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); 1543 if (err < 0) { 1544 NL_SET_ERR_MSG_WEAK(extack, 1545 "Device failed to setup taprio offload"); 1546 goto done; 1547 } 1548 1549 q->offloaded = true; 1550 1551 done: 1552 /* The offload structure may linger around via a reference taken by the 1553 * device driver, so clear up the netlink extack pointer so that the 1554 * driver isn't tempted to dereference data which stopped being valid 1555 */ 1556 offload->extack = NULL; 1557 offload->mqprio.extack = NULL; 1558 taprio_offload_free(offload); 1559 1560 return err; 1561 } 1562 1563 static int taprio_disable_offload(struct net_device *dev, 1564 struct taprio_sched *q, 1565 struct netlink_ext_ack *extack) 1566 { 1567 const struct net_device_ops *ops = dev->netdev_ops; 1568 struct tc_taprio_qopt_offload *offload; 1569 int err; 1570 1571 if (!q->offloaded) 1572 return 0; 1573 1574 offload = taprio_offload_alloc(0); 1575 if (!offload) { 1576 NL_SET_ERR_MSG(extack, 1577 "Not enough memory to disable offload mode"); 1578 return -ENOMEM; 1579 } 1580 offload->cmd = TAPRIO_CMD_DESTROY; 1581 1582 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); 1583 if (err < 0) { 1584 NL_SET_ERR_MSG(extack, 1585 "Device failed to disable offload"); 1586 goto out; 1587 } 1588 1589 q->offloaded = false; 1590 1591 out: 1592 taprio_offload_free(offload); 1593 1594 return err; 1595 } 1596 1597 /* If full offload is enabled, the only possible clockid is the net device's 1598 * PHC. For that reason, specifying a clockid through netlink is incorrect. 1599 * For txtime-assist, it is implicitly assumed that the device's PHC is kept 1600 * in sync with the specified clockid via a user space daemon such as phc2sys. 1601 * For both software taprio and txtime-assist, the clockid is used for the 1602 * hrtimer that advances the schedule and hence mandatory. 1603 */ 1604 static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb, 1605 struct netlink_ext_ack *extack) 1606 { 1607 struct taprio_sched *q = qdisc_priv(sch); 1608 struct net_device *dev = qdisc_dev(sch); 1609 int err = -EINVAL; 1610 1611 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 1612 const struct ethtool_ops *ops = dev->ethtool_ops; 1613 struct kernel_ethtool_ts_info info = { 1614 .cmd = ETHTOOL_GET_TS_INFO, 1615 .phc_index = -1, 1616 }; 1617 1618 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { 1619 NL_SET_ERR_MSG(extack, 1620 "The 'clockid' cannot be specified for full offload"); 1621 goto out; 1622 } 1623 1624 if (ops && ops->get_ts_info) 1625 err = ops->get_ts_info(dev, &info); 1626 1627 if (err || info.phc_index < 0) { 1628 NL_SET_ERR_MSG(extack, 1629 "Device does not have a PTP clock"); 1630 err = -ENOTSUPP; 1631 goto out; 1632 } 1633 } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { 1634 int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]); 1635 enum tk_offsets tk_offset; 1636 1637 /* We only support static clockids and we don't allow 1638 * for it to be modified after the first init. 1639 */ 1640 if (clockid < 0 || 1641 (q->clockid != -1 && q->clockid != clockid)) { 1642 NL_SET_ERR_MSG(extack, 1643 "Changing the 'clockid' of a running schedule is not supported"); 1644 err = -ENOTSUPP; 1645 goto out; 1646 } 1647 1648 switch (clockid) { 1649 case CLOCK_REALTIME: 1650 tk_offset = TK_OFFS_REAL; 1651 break; 1652 case CLOCK_MONOTONIC: 1653 tk_offset = TK_OFFS_MAX; 1654 break; 1655 case CLOCK_BOOTTIME: 1656 tk_offset = TK_OFFS_BOOT; 1657 break; 1658 case CLOCK_TAI: 1659 tk_offset = TK_OFFS_TAI; 1660 break; 1661 default: 1662 NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); 1663 err = -EINVAL; 1664 goto out; 1665 } 1666 /* This pairs with READ_ONCE() in taprio_mono_to_any */ 1667 WRITE_ONCE(q->tk_offset, tk_offset); 1668 1669 q->clockid = clockid; 1670 } else { 1671 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory"); 1672 goto out; 1673 } 1674 1675 /* Everything went ok, return success. */ 1676 err = 0; 1677 1678 out: 1679 return err; 1680 } 1681 1682 static int taprio_parse_tc_entry(struct Qdisc *sch, 1683 struct nlattr *opt, 1684 u32 max_sdu[TC_QOPT_MAX_QUEUE], 1685 u32 fp[TC_QOPT_MAX_QUEUE], 1686 unsigned long *seen_tcs, 1687 struct netlink_ext_ack *extack) 1688 { 1689 struct nlattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { }; 1690 struct net_device *dev = qdisc_dev(sch); 1691 int err, tc; 1692 u32 val; 1693 1694 err = nla_parse_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, opt, 1695 taprio_tc_policy, extack); 1696 if (err < 0) 1697 return err; 1698 1699 if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) { 1700 NL_SET_ERR_MSG_MOD(extack, "TC entry index missing"); 1701 return -EINVAL; 1702 } 1703 1704 tc = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]); 1705 if (tc >= TC_QOPT_MAX_QUEUE) { 1706 NL_SET_ERR_MSG_MOD(extack, "TC entry index out of range"); 1707 return -ERANGE; 1708 } 1709 1710 if (*seen_tcs & BIT(tc)) { 1711 NL_SET_ERR_MSG_MOD(extack, "Duplicate TC entry"); 1712 return -EINVAL; 1713 } 1714 1715 *seen_tcs |= BIT(tc); 1716 1717 if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]) { 1718 val = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]); 1719 if (val > dev->max_mtu) { 1720 NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU"); 1721 return -ERANGE; 1722 } 1723 1724 max_sdu[tc] = val; 1725 } 1726 1727 if (tb[TCA_TAPRIO_TC_ENTRY_FP]) 1728 fp[tc] = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_FP]); 1729 1730 return 0; 1731 } 1732 1733 static int taprio_parse_tc_entries(struct Qdisc *sch, 1734 struct nlattr *opt, 1735 struct netlink_ext_ack *extack) 1736 { 1737 struct taprio_sched *q = qdisc_priv(sch); 1738 struct net_device *dev = qdisc_dev(sch); 1739 u32 max_sdu[TC_QOPT_MAX_QUEUE]; 1740 bool have_preemption = false; 1741 unsigned long seen_tcs = 0; 1742 u32 fp[TC_QOPT_MAX_QUEUE]; 1743 struct nlattr *n; 1744 int tc, rem; 1745 int err = 0; 1746 1747 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { 1748 max_sdu[tc] = q->max_sdu[tc]; 1749 fp[tc] = q->fp[tc]; 1750 } 1751 1752 nla_for_each_nested_type(n, TCA_TAPRIO_ATTR_TC_ENTRY, opt, rem) { 1753 err = taprio_parse_tc_entry(sch, n, max_sdu, fp, &seen_tcs, 1754 extack); 1755 if (err) 1756 return err; 1757 } 1758 1759 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { 1760 q->max_sdu[tc] = max_sdu[tc]; 1761 q->fp[tc] = fp[tc]; 1762 if (fp[tc] != TC_FP_EXPRESS) 1763 have_preemption = true; 1764 } 1765 1766 if (have_preemption) { 1767 if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) { 1768 NL_SET_ERR_MSG(extack, 1769 "Preemption only supported with full offload"); 1770 return -EOPNOTSUPP; 1771 } 1772 1773 if (!ethtool_dev_mm_supported(dev)) { 1774 NL_SET_ERR_MSG(extack, 1775 "Device does not support preemption"); 1776 return -EOPNOTSUPP; 1777 } 1778 } 1779 1780 return err; 1781 } 1782 1783 static int taprio_mqprio_cmp(const struct net_device *dev, 1784 const struct tc_mqprio_qopt *mqprio) 1785 { 1786 int i; 1787 1788 if (!mqprio || mqprio->num_tc != dev->num_tc) 1789 return -1; 1790 1791 for (i = 0; i < mqprio->num_tc; i++) 1792 if (dev->tc_to_txq[i].count != mqprio->count[i] || 1793 dev->tc_to_txq[i].offset != mqprio->offset[i]) 1794 return -1; 1795 1796 for (i = 0; i <= TC_BITMASK; i++) 1797 if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i]) 1798 return -1; 1799 1800 return 0; 1801 } 1802 1803 static int taprio_change(struct Qdisc *sch, struct nlattr *opt, 1804 struct netlink_ext_ack *extack) 1805 { 1806 struct qdisc_size_table *stab = rtnl_dereference(sch->stab); 1807 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { }; 1808 struct sched_gate_list *oper, *admin, *new_admin; 1809 struct taprio_sched *q = qdisc_priv(sch); 1810 struct net_device *dev = qdisc_dev(sch); 1811 struct tc_mqprio_qopt *mqprio = NULL; 1812 unsigned long flags; 1813 u32 taprio_flags; 1814 ktime_t start; 1815 int i, err; 1816 1817 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt, 1818 taprio_policy, extack); 1819 if (err < 0) 1820 return err; 1821 1822 if (tb[TCA_TAPRIO_ATTR_PRIOMAP]) 1823 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]); 1824 1825 /* The semantics of the 'flags' argument in relation to 'change()' 1826 * requests, are interpreted following two rules (which are applied in 1827 * this order): (1) an omitted 'flags' argument is interpreted as 1828 * zero; (2) the 'flags' of a "running" taprio instance cannot be 1829 * changed. 1830 */ 1831 taprio_flags = tb[TCA_TAPRIO_ATTR_FLAGS] ? nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]) : 0; 1832 1833 /* txtime-assist and full offload are mutually exclusive */ 1834 if ((taprio_flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) && 1835 (taprio_flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) { 1836 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_TAPRIO_ATTR_FLAGS], 1837 "TXTIME_ASSIST and FULL_OFFLOAD are mutually exclusive"); 1838 return -EINVAL; 1839 } 1840 1841 if (q->flags != TAPRIO_FLAGS_INVALID && q->flags != taprio_flags) { 1842 NL_SET_ERR_MSG_MOD(extack, 1843 "Changing 'flags' of a running schedule is not supported"); 1844 return -EOPNOTSUPP; 1845 } 1846 q->flags = taprio_flags; 1847 1848 /* Needed for length_to_duration() during netlink attribute parsing */ 1849 taprio_set_picos_per_byte(dev, q); 1850 1851 err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags); 1852 if (err < 0) 1853 return err; 1854 1855 err = taprio_parse_tc_entries(sch, opt, extack); 1856 if (err) 1857 return err; 1858 1859 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL); 1860 if (!new_admin) { 1861 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule"); 1862 return -ENOMEM; 1863 } 1864 INIT_LIST_HEAD(&new_admin->entries); 1865 1866 oper = rtnl_dereference(q->oper_sched); 1867 admin = rtnl_dereference(q->admin_sched); 1868 1869 /* no changes - no new mqprio settings */ 1870 if (!taprio_mqprio_cmp(dev, mqprio)) 1871 mqprio = NULL; 1872 1873 if (mqprio && (oper || admin)) { 1874 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported"); 1875 err = -ENOTSUPP; 1876 goto free_sched; 1877 } 1878 1879 if (mqprio) { 1880 err = netdev_set_num_tc(dev, mqprio->num_tc); 1881 if (err) 1882 goto free_sched; 1883 for (i = 0; i < mqprio->num_tc; i++) { 1884 netdev_set_tc_queue(dev, i, 1885 mqprio->count[i], 1886 mqprio->offset[i]); 1887 q->cur_txq[i] = mqprio->offset[i]; 1888 } 1889 1890 /* Always use supplied priority mappings */ 1891 for (i = 0; i <= TC_BITMASK; i++) 1892 netdev_set_prio_tc_map(dev, i, 1893 mqprio->prio_tc_map[i]); 1894 } 1895 1896 err = parse_taprio_schedule(q, tb, new_admin, extack); 1897 if (err < 0) 1898 goto free_sched; 1899 1900 if (new_admin->num_entries == 0) { 1901 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule"); 1902 err = -EINVAL; 1903 goto free_sched; 1904 } 1905 1906 err = taprio_parse_clockid(sch, tb, extack); 1907 if (err < 0) 1908 goto free_sched; 1909 1910 taprio_update_queue_max_sdu(q, new_admin, stab); 1911 1912 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1913 err = taprio_enable_offload(dev, q, new_admin, extack); 1914 else 1915 err = taprio_disable_offload(dev, q, extack); 1916 if (err) 1917 goto free_sched; 1918 1919 /* Protects against enqueue()/dequeue() */ 1920 spin_lock_bh(qdisc_lock(sch)); 1921 1922 if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) { 1923 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) { 1924 NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled"); 1925 err = -EINVAL; 1926 goto unlock; 1927 } 1928 1929 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); 1930 } 1931 1932 if (!TXTIME_ASSIST_IS_ENABLED(q->flags) && 1933 !FULL_OFFLOAD_IS_ENABLED(q->flags) && 1934 !hrtimer_active(&q->advance_timer)) { 1935 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); 1936 q->advance_timer.function = advance_sched; 1937 } 1938 1939 err = taprio_get_start_time(sch, new_admin, &start); 1940 if (err < 0) { 1941 NL_SET_ERR_MSG(extack, "Internal error: failed get start time"); 1942 goto unlock; 1943 } 1944 1945 setup_txtime(q, new_admin, start); 1946 1947 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 1948 if (!oper) { 1949 rcu_assign_pointer(q->oper_sched, new_admin); 1950 err = 0; 1951 new_admin = NULL; 1952 goto unlock; 1953 } 1954 1955 /* Not going to race against advance_sched(), but still */ 1956 admin = rcu_replace_pointer(q->admin_sched, new_admin, 1957 lockdep_rtnl_is_held()); 1958 if (admin) 1959 call_rcu(&admin->rcu, taprio_free_sched_cb); 1960 } else { 1961 setup_first_end_time(q, new_admin, start); 1962 1963 /* Protects against advance_sched() */ 1964 spin_lock_irqsave(&q->current_entry_lock, flags); 1965 1966 taprio_start_sched(sch, start, new_admin); 1967 1968 admin = rcu_replace_pointer(q->admin_sched, new_admin, 1969 lockdep_rtnl_is_held()); 1970 if (admin) 1971 call_rcu(&admin->rcu, taprio_free_sched_cb); 1972 1973 spin_unlock_irqrestore(&q->current_entry_lock, flags); 1974 1975 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1976 taprio_offload_config_changed(q); 1977 } 1978 1979 new_admin = NULL; 1980 err = 0; 1981 1982 if (!stab) 1983 NL_SET_ERR_MSG_MOD(extack, 1984 "Size table not specified, frame length estimations may be inaccurate"); 1985 1986 unlock: 1987 spin_unlock_bh(qdisc_lock(sch)); 1988 1989 free_sched: 1990 if (new_admin) 1991 call_rcu(&new_admin->rcu, taprio_free_sched_cb); 1992 1993 return err; 1994 } 1995 1996 static void taprio_reset(struct Qdisc *sch) 1997 { 1998 struct taprio_sched *q = qdisc_priv(sch); 1999 struct net_device *dev = qdisc_dev(sch); 2000 int i; 2001 2002 hrtimer_cancel(&q->advance_timer); 2003 2004 if (q->qdiscs) { 2005 for (i = 0; i < dev->num_tx_queues; i++) 2006 if (q->qdiscs[i]) 2007 qdisc_reset(q->qdiscs[i]); 2008 } 2009 } 2010 2011 static void taprio_destroy(struct Qdisc *sch) 2012 { 2013 struct taprio_sched *q = qdisc_priv(sch); 2014 struct net_device *dev = qdisc_dev(sch); 2015 struct sched_gate_list *oper, *admin; 2016 unsigned int i; 2017 2018 list_del(&q->taprio_list); 2019 2020 /* Note that taprio_reset() might not be called if an error 2021 * happens in qdisc_create(), after taprio_init() has been called. 2022 */ 2023 hrtimer_cancel(&q->advance_timer); 2024 qdisc_synchronize(sch); 2025 2026 taprio_disable_offload(dev, q, NULL); 2027 2028 if (q->qdiscs) { 2029 for (i = 0; i < dev->num_tx_queues; i++) 2030 qdisc_put(q->qdiscs[i]); 2031 2032 kfree(q->qdiscs); 2033 } 2034 q->qdiscs = NULL; 2035 2036 netdev_reset_tc(dev); 2037 2038 oper = rtnl_dereference(q->oper_sched); 2039 admin = rtnl_dereference(q->admin_sched); 2040 2041 if (oper) 2042 call_rcu(&oper->rcu, taprio_free_sched_cb); 2043 2044 if (admin) 2045 call_rcu(&admin->rcu, taprio_free_sched_cb); 2046 2047 taprio_cleanup_broken_mqprio(q); 2048 } 2049 2050 static int taprio_init(struct Qdisc *sch, struct nlattr *opt, 2051 struct netlink_ext_ack *extack) 2052 { 2053 struct taprio_sched *q = qdisc_priv(sch); 2054 struct net_device *dev = qdisc_dev(sch); 2055 int i, tc; 2056 2057 spin_lock_init(&q->current_entry_lock); 2058 2059 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS); 2060 q->advance_timer.function = advance_sched; 2061 2062 q->root = sch; 2063 2064 /* We only support static clockids. Use an invalid value as default 2065 * and get the valid one on taprio_change(). 2066 */ 2067 q->clockid = -1; 2068 q->flags = TAPRIO_FLAGS_INVALID; 2069 2070 list_add(&q->taprio_list, &taprio_list); 2071 2072 if (sch->parent != TC_H_ROOT) { 2073 NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc"); 2074 return -EOPNOTSUPP; 2075 } 2076 2077 if (!netif_is_multiqueue(dev)) { 2078 NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required"); 2079 return -EOPNOTSUPP; 2080 } 2081 2082 q->qdiscs = kcalloc(dev->num_tx_queues, sizeof(q->qdiscs[0]), 2083 GFP_KERNEL); 2084 if (!q->qdiscs) 2085 return -ENOMEM; 2086 2087 if (!opt) 2088 return -EINVAL; 2089 2090 for (i = 0; i < dev->num_tx_queues; i++) { 2091 struct netdev_queue *dev_queue; 2092 struct Qdisc *qdisc; 2093 2094 dev_queue = netdev_get_tx_queue(dev, i); 2095 qdisc = qdisc_create_dflt(dev_queue, 2096 &pfifo_qdisc_ops, 2097 TC_H_MAKE(TC_H_MAJ(sch->handle), 2098 TC_H_MIN(i + 1)), 2099 extack); 2100 if (!qdisc) 2101 return -ENOMEM; 2102 2103 if (i < dev->real_num_tx_queues) 2104 qdisc_hash_add(qdisc, false); 2105 2106 q->qdiscs[i] = qdisc; 2107 } 2108 2109 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) 2110 q->fp[tc] = TC_FP_EXPRESS; 2111 2112 taprio_detect_broken_mqprio(q); 2113 2114 return taprio_change(sch, opt, extack); 2115 } 2116 2117 static void taprio_attach(struct Qdisc *sch) 2118 { 2119 struct taprio_sched *q = qdisc_priv(sch); 2120 struct net_device *dev = qdisc_dev(sch); 2121 unsigned int ntx; 2122 2123 /* Attach underlying qdisc */ 2124 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 2125 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); 2126 struct Qdisc *old, *dev_queue_qdisc; 2127 2128 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 2129 struct Qdisc *qdisc = q->qdiscs[ntx]; 2130 2131 /* In offload mode, the root taprio qdisc is bypassed 2132 * and the netdev TX queues see the children directly 2133 */ 2134 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 2135 dev_queue_qdisc = qdisc; 2136 } else { 2137 /* In software mode, attach the root taprio qdisc 2138 * to all netdev TX queues, so that dev_qdisc_enqueue() 2139 * goes through taprio_enqueue(). 2140 */ 2141 dev_queue_qdisc = sch; 2142 } 2143 old = dev_graft_qdisc(dev_queue, dev_queue_qdisc); 2144 /* The qdisc's refcount requires to be elevated once 2145 * for each netdev TX queue it is grafted onto 2146 */ 2147 qdisc_refcount_inc(dev_queue_qdisc); 2148 if (old) 2149 qdisc_put(old); 2150 } 2151 } 2152 2153 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch, 2154 unsigned long cl) 2155 { 2156 struct net_device *dev = qdisc_dev(sch); 2157 unsigned long ntx = cl - 1; 2158 2159 if (ntx >= dev->num_tx_queues) 2160 return NULL; 2161 2162 return netdev_get_tx_queue(dev, ntx); 2163 } 2164 2165 static int taprio_graft(struct Qdisc *sch, unsigned long cl, 2166 struct Qdisc *new, struct Qdisc **old, 2167 struct netlink_ext_ack *extack) 2168 { 2169 struct taprio_sched *q = qdisc_priv(sch); 2170 struct net_device *dev = qdisc_dev(sch); 2171 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 2172 2173 if (!dev_queue) 2174 return -EINVAL; 2175 2176 if (dev->flags & IFF_UP) 2177 dev_deactivate(dev); 2178 2179 /* In offload mode, the child Qdisc is directly attached to the netdev 2180 * TX queue, and thus, we need to keep its refcount elevated in order 2181 * to counteract qdisc_graft()'s call to qdisc_put() once per TX queue. 2182 * However, save the reference to the new qdisc in the private array in 2183 * both software and offload cases, to have an up-to-date reference to 2184 * our children. 2185 */ 2186 *old = q->qdiscs[cl - 1]; 2187 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 2188 WARN_ON_ONCE(dev_graft_qdisc(dev_queue, new) != *old); 2189 if (new) 2190 qdisc_refcount_inc(new); 2191 if (*old) 2192 qdisc_put(*old); 2193 } 2194 2195 q->qdiscs[cl - 1] = new; 2196 if (new) 2197 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 2198 2199 if (dev->flags & IFF_UP) 2200 dev_activate(dev); 2201 2202 return 0; 2203 } 2204 2205 static int dump_entry(struct sk_buff *msg, 2206 const struct sched_entry *entry) 2207 { 2208 struct nlattr *item; 2209 2210 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY); 2211 if (!item) 2212 return -ENOSPC; 2213 2214 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index)) 2215 goto nla_put_failure; 2216 2217 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command)) 2218 goto nla_put_failure; 2219 2220 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, 2221 entry->gate_mask)) 2222 goto nla_put_failure; 2223 2224 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL, 2225 entry->interval)) 2226 goto nla_put_failure; 2227 2228 return nla_nest_end(msg, item); 2229 2230 nla_put_failure: 2231 nla_nest_cancel(msg, item); 2232 return -1; 2233 } 2234 2235 static int dump_schedule(struct sk_buff *msg, 2236 const struct sched_gate_list *root) 2237 { 2238 struct nlattr *entry_list; 2239 struct sched_entry *entry; 2240 2241 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME, 2242 root->base_time, TCA_TAPRIO_PAD)) 2243 return -1; 2244 2245 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, 2246 root->cycle_time, TCA_TAPRIO_PAD)) 2247 return -1; 2248 2249 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, 2250 root->cycle_time_extension, TCA_TAPRIO_PAD)) 2251 return -1; 2252 2253 entry_list = nla_nest_start_noflag(msg, 2254 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST); 2255 if (!entry_list) 2256 goto error_nest; 2257 2258 list_for_each_entry(entry, &root->entries, list) { 2259 if (dump_entry(msg, entry) < 0) 2260 goto error_nest; 2261 } 2262 2263 nla_nest_end(msg, entry_list); 2264 return 0; 2265 2266 error_nest: 2267 nla_nest_cancel(msg, entry_list); 2268 return -1; 2269 } 2270 2271 static int taprio_dump_tc_entries(struct sk_buff *skb, 2272 struct taprio_sched *q, 2273 struct sched_gate_list *sched) 2274 { 2275 struct nlattr *n; 2276 int tc; 2277 2278 for (tc = 0; tc < TC_MAX_QUEUE; tc++) { 2279 n = nla_nest_start(skb, TCA_TAPRIO_ATTR_TC_ENTRY); 2280 if (!n) 2281 return -EMSGSIZE; 2282 2283 if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_INDEX, tc)) 2284 goto nla_put_failure; 2285 2286 if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU, 2287 sched->max_sdu[tc])) 2288 goto nla_put_failure; 2289 2290 if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_FP, q->fp[tc])) 2291 goto nla_put_failure; 2292 2293 nla_nest_end(skb, n); 2294 } 2295 2296 return 0; 2297 2298 nla_put_failure: 2299 nla_nest_cancel(skb, n); 2300 return -EMSGSIZE; 2301 } 2302 2303 static int taprio_put_stat(struct sk_buff *skb, u64 val, u16 attrtype) 2304 { 2305 if (val == TAPRIO_STAT_NOT_SET) 2306 return 0; 2307 if (nla_put_u64_64bit(skb, attrtype, val, TCA_TAPRIO_OFFLOAD_STATS_PAD)) 2308 return -EMSGSIZE; 2309 return 0; 2310 } 2311 2312 static int taprio_dump_xstats(struct Qdisc *sch, struct gnet_dump *d, 2313 struct tc_taprio_qopt_offload *offload, 2314 struct tc_taprio_qopt_stats *stats) 2315 { 2316 struct net_device *dev = qdisc_dev(sch); 2317 const struct net_device_ops *ops; 2318 struct sk_buff *skb = d->skb; 2319 struct nlattr *xstats; 2320 int err; 2321 2322 ops = qdisc_dev(sch)->netdev_ops; 2323 2324 /* FIXME I could use qdisc_offload_dump_helper(), but that messes 2325 * with sch->flags depending on whether the device reports taprio 2326 * stats, and I'm not sure whether that's a good idea, considering 2327 * that stats are optional to the offload itself 2328 */ 2329 if (!ops->ndo_setup_tc) 2330 return 0; 2331 2332 memset(stats, 0xff, sizeof(*stats)); 2333 2334 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); 2335 if (err == -EOPNOTSUPP) 2336 return 0; 2337 if (err) 2338 return err; 2339 2340 xstats = nla_nest_start(skb, TCA_STATS_APP); 2341 if (!xstats) 2342 goto err; 2343 2344 if (taprio_put_stat(skb, stats->window_drops, 2345 TCA_TAPRIO_OFFLOAD_STATS_WINDOW_DROPS) || 2346 taprio_put_stat(skb, stats->tx_overruns, 2347 TCA_TAPRIO_OFFLOAD_STATS_TX_OVERRUNS)) 2348 goto err_cancel; 2349 2350 nla_nest_end(skb, xstats); 2351 2352 return 0; 2353 2354 err_cancel: 2355 nla_nest_cancel(skb, xstats); 2356 err: 2357 return -EMSGSIZE; 2358 } 2359 2360 static int taprio_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 2361 { 2362 struct tc_taprio_qopt_offload offload = { 2363 .cmd = TAPRIO_CMD_STATS, 2364 }; 2365 2366 return taprio_dump_xstats(sch, d, &offload, &offload.stats); 2367 } 2368 2369 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) 2370 { 2371 struct taprio_sched *q = qdisc_priv(sch); 2372 struct net_device *dev = qdisc_dev(sch); 2373 struct sched_gate_list *oper, *admin; 2374 struct tc_mqprio_qopt opt = { 0 }; 2375 struct nlattr *nest, *sched_nest; 2376 2377 mqprio_qopt_reconstruct(dev, &opt); 2378 2379 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 2380 if (!nest) 2381 goto start_error; 2382 2383 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt)) 2384 goto options_error; 2385 2386 if (!FULL_OFFLOAD_IS_ENABLED(q->flags) && 2387 nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid)) 2388 goto options_error; 2389 2390 if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags)) 2391 goto options_error; 2392 2393 if (q->txtime_delay && 2394 nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay)) 2395 goto options_error; 2396 2397 rcu_read_lock(); 2398 2399 oper = rtnl_dereference(q->oper_sched); 2400 admin = rtnl_dereference(q->admin_sched); 2401 2402 if (oper && taprio_dump_tc_entries(skb, q, oper)) 2403 goto options_error_rcu; 2404 2405 if (oper && dump_schedule(skb, oper)) 2406 goto options_error_rcu; 2407 2408 if (!admin) 2409 goto done; 2410 2411 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED); 2412 if (!sched_nest) 2413 goto options_error_rcu; 2414 2415 if (dump_schedule(skb, admin)) 2416 goto admin_error; 2417 2418 nla_nest_end(skb, sched_nest); 2419 2420 done: 2421 rcu_read_unlock(); 2422 return nla_nest_end(skb, nest); 2423 2424 admin_error: 2425 nla_nest_cancel(skb, sched_nest); 2426 2427 options_error_rcu: 2428 rcu_read_unlock(); 2429 2430 options_error: 2431 nla_nest_cancel(skb, nest); 2432 2433 start_error: 2434 return -ENOSPC; 2435 } 2436 2437 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) 2438 { 2439 struct taprio_sched *q = qdisc_priv(sch); 2440 struct net_device *dev = qdisc_dev(sch); 2441 unsigned int ntx = cl - 1; 2442 2443 if (ntx >= dev->num_tx_queues) 2444 return NULL; 2445 2446 return q->qdiscs[ntx]; 2447 } 2448 2449 static unsigned long taprio_find(struct Qdisc *sch, u32 classid) 2450 { 2451 unsigned int ntx = TC_H_MIN(classid); 2452 2453 if (!taprio_queue_get(sch, ntx)) 2454 return 0; 2455 return ntx; 2456 } 2457 2458 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl, 2459 struct sk_buff *skb, struct tcmsg *tcm) 2460 { 2461 struct Qdisc *child = taprio_leaf(sch, cl); 2462 2463 tcm->tcm_parent = TC_H_ROOT; 2464 tcm->tcm_handle |= TC_H_MIN(cl); 2465 tcm->tcm_info = child->handle; 2466 2467 return 0; 2468 } 2469 2470 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, 2471 struct gnet_dump *d) 2472 __releases(d->lock) 2473 __acquires(d->lock) 2474 { 2475 struct Qdisc *child = taprio_leaf(sch, cl); 2476 struct tc_taprio_qopt_offload offload = { 2477 .cmd = TAPRIO_CMD_QUEUE_STATS, 2478 .queue_stats = { 2479 .queue = cl - 1, 2480 }, 2481 }; 2482 2483 if (gnet_stats_copy_basic(d, NULL, &child->bstats, true) < 0 || 2484 qdisc_qstats_copy(d, child) < 0) 2485 return -1; 2486 2487 return taprio_dump_xstats(sch, d, &offload, &offload.queue_stats.stats); 2488 } 2489 2490 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) 2491 { 2492 struct net_device *dev = qdisc_dev(sch); 2493 unsigned long ntx; 2494 2495 if (arg->stop) 2496 return; 2497 2498 arg->count = arg->skip; 2499 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { 2500 if (!tc_qdisc_stats_dump(sch, ntx + 1, arg)) 2501 break; 2502 } 2503 } 2504 2505 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch, 2506 struct tcmsg *tcm) 2507 { 2508 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent)); 2509 } 2510 2511 static const struct Qdisc_class_ops taprio_class_ops = { 2512 .graft = taprio_graft, 2513 .leaf = taprio_leaf, 2514 .find = taprio_find, 2515 .walk = taprio_walk, 2516 .dump = taprio_dump_class, 2517 .dump_stats = taprio_dump_class_stats, 2518 .select_queue = taprio_select_queue, 2519 }; 2520 2521 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = { 2522 .cl_ops = &taprio_class_ops, 2523 .id = "taprio", 2524 .priv_size = sizeof(struct taprio_sched), 2525 .init = taprio_init, 2526 .change = taprio_change, 2527 .destroy = taprio_destroy, 2528 .reset = taprio_reset, 2529 .attach = taprio_attach, 2530 .peek = taprio_peek, 2531 .dequeue = taprio_dequeue, 2532 .enqueue = taprio_enqueue, 2533 .dump = taprio_dump, 2534 .dump_stats = taprio_dump_stats, 2535 .owner = THIS_MODULE, 2536 }; 2537 MODULE_ALIAS_NET_SCH("taprio"); 2538 2539 static struct notifier_block taprio_device_notifier = { 2540 .notifier_call = taprio_dev_notifier, 2541 }; 2542 2543 static int __init taprio_module_init(void) 2544 { 2545 int err = register_netdevice_notifier(&taprio_device_notifier); 2546 2547 if (err) 2548 return err; 2549 2550 return register_qdisc(&taprio_qdisc_ops); 2551 } 2552 2553 static void __exit taprio_module_exit(void) 2554 { 2555 unregister_qdisc(&taprio_qdisc_ops); 2556 unregister_netdevice_notifier(&taprio_device_notifier); 2557 } 2558 2559 module_init(taprio_module_init); 2560 module_exit(taprio_module_exit); 2561 MODULE_LICENSE("GPL"); 2562 MODULE_DESCRIPTION("Time Aware Priority qdisc"); 2563