1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_SCHED_GENERIC_H 3 #define __NET_SCHED_GENERIC_H 4 5 #include <linux/netdevice.h> 6 #include <linux/types.h> 7 #include <linux/rcupdate.h> 8 #include <linux/pkt_sched.h> 9 #include <linux/pkt_cls.h> 10 #include <linux/percpu.h> 11 #include <linux/dynamic_queue_limits.h> 12 #include <linux/list.h> 13 #include <linux/refcount.h> 14 #include <linux/workqueue.h> 15 #include <linux/mutex.h> 16 #include <linux/rwsem.h> 17 #include <linux/atomic.h> 18 #include <linux/hashtable.h> 19 #include <net/gen_stats.h> 20 #include <net/rtnetlink.h> 21 #include <net/flow_offload.h> 22 23 struct Qdisc_ops; 24 struct qdisc_walker; 25 struct tcf_walker; 26 struct module; 27 struct bpf_flow_keys; 28 29 struct qdisc_rate_table { 30 struct tc_ratespec rate; 31 u32 data[256]; 32 struct qdisc_rate_table *next; 33 int refcnt; 34 }; 35 36 enum qdisc_state_t { 37 __QDISC_STATE_SCHED, 38 __QDISC_STATE_DEACTIVATED, 39 __QDISC_STATE_MISSED, 40 __QDISC_STATE_DRAINING, 41 }; 42 43 enum qdisc_state2_t { 44 /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly. 45 * Use qdisc_run_begin/end() or qdisc_is_running() instead. 46 */ 47 __QDISC_STATE2_RUNNING, 48 }; 49 50 #define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED) 51 #define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING) 52 53 #define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \ 54 QDISC_STATE_DRAINING) 55 56 struct qdisc_size_table { 57 struct rcu_head rcu; 58 struct list_head list; 59 struct tc_sizespec szopts; 60 int refcnt; 61 u16 data[]; 62 }; 63 64 /* similar to sk_buff_head, but skb->prev pointer is undefined. */ 65 struct qdisc_skb_head { 66 struct sk_buff *head; 67 struct sk_buff *tail; 68 __u32 qlen; 69 spinlock_t lock; 70 }; 71 72 struct Qdisc { 73 int (*enqueue)(struct sk_buff *skb, 74 struct Qdisc *sch, 75 struct sk_buff **to_free); 76 struct sk_buff * (*dequeue)(struct Qdisc *sch); 77 unsigned int flags; 78 #define TCQ_F_BUILTIN 1 79 #define TCQ_F_INGRESS 2 80 #define TCQ_F_CAN_BYPASS 4 81 #define TCQ_F_MQROOT 8 82 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 83 * q->dev_queue : It can test 84 * netif_xmit_frozen_or_stopped() before 85 * dequeueing next packet. 86 * Its true for MQ/MQPRIO slaves, or non 87 * multiqueue device. 88 */ 89 #define TCQ_F_WARN_NONWC (1 << 16) 90 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 91 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 92 * qdisc_tree_decrease_qlen() should stop. 93 */ 94 #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 95 #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ 96 #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ 97 u32 limit; 98 const struct Qdisc_ops *ops; 99 struct qdisc_size_table __rcu *stab; 100 struct hlist_node hash; 101 u32 handle; 102 u32 parent; 103 104 struct netdev_queue *dev_queue; 105 106 struct net_rate_estimator __rcu *rate_est; 107 struct gnet_stats_basic_sync __percpu *cpu_bstats; 108 struct gnet_stats_queue __percpu *cpu_qstats; 109 int pad; 110 refcount_t refcnt; 111 112 /* 113 * For performance sake on SMP, we put highly modified fields at the end 114 */ 115 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; 116 struct qdisc_skb_head q; 117 struct gnet_stats_basic_sync bstats; 118 struct gnet_stats_queue qstats; 119 unsigned long state; 120 unsigned long state2; /* must be written under qdisc spinlock */ 121 struct Qdisc *next_sched; 122 struct sk_buff_head skb_bad_txq; 123 124 spinlock_t busylock ____cacheline_aligned_in_smp; 125 spinlock_t seqlock; 126 127 struct rcu_head rcu; 128 129 /* private data */ 130 long privdata[] ____cacheline_aligned; 131 }; 132 133 static inline void qdisc_refcount_inc(struct Qdisc *qdisc) 134 { 135 if (qdisc->flags & TCQ_F_BUILTIN) 136 return; 137 refcount_inc(&qdisc->refcnt); 138 } 139 140 /* Intended to be used by unlocked users, when concurrent qdisc release is 141 * possible. 142 */ 143 144 static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc) 145 { 146 if (qdisc->flags & TCQ_F_BUILTIN) 147 return qdisc; 148 if (refcount_inc_not_zero(&qdisc->refcnt)) 149 return qdisc; 150 return NULL; 151 } 152 153 /* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc 154 * root_lock section, or provide their own memory barriers -- ordering 155 * against qdisc_run_begin/end() atomic bit operations. 156 */ 157 static inline bool qdisc_is_running(struct Qdisc *qdisc) 158 { 159 if (qdisc->flags & TCQ_F_NOLOCK) 160 return spin_is_locked(&qdisc->seqlock); 161 return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 162 } 163 164 static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc) 165 { 166 return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY); 167 } 168 169 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 170 { 171 return q->flags & TCQ_F_CPUSTATS; 172 } 173 174 static inline bool qdisc_is_empty(const struct Qdisc *qdisc) 175 { 176 if (qdisc_is_percpu_stats(qdisc)) 177 return nolock_qdisc_is_empty(qdisc); 178 return !READ_ONCE(qdisc->q.qlen); 179 } 180 181 /* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with 182 * the qdisc root lock acquired. 183 */ 184 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 185 { 186 if (qdisc->flags & TCQ_F_NOLOCK) { 187 if (spin_trylock(&qdisc->seqlock)) 188 return true; 189 190 /* Paired with smp_mb__after_atomic() to make sure 191 * STATE_MISSED checking is synchronized with clearing 192 * in pfifo_fast_dequeue(). 193 */ 194 smp_mb__before_atomic(); 195 196 /* If the MISSED flag is set, it means other thread has 197 * set the MISSED flag before second spin_trylock(), so 198 * we can return false here to avoid multi cpus doing 199 * the set_bit() and second spin_trylock() concurrently. 200 */ 201 if (test_bit(__QDISC_STATE_MISSED, &qdisc->state)) 202 return false; 203 204 /* Set the MISSED flag before the second spin_trylock(), 205 * if the second spin_trylock() return false, it means 206 * other cpu holding the lock will do dequeuing for us 207 * or it will see the MISSED flag set after releasing 208 * lock and reschedule the net_tx_action() to do the 209 * dequeuing. 210 */ 211 set_bit(__QDISC_STATE_MISSED, &qdisc->state); 212 213 /* spin_trylock() only has load-acquire semantic, so use 214 * smp_mb__after_atomic() to ensure STATE_MISSED is set 215 * before doing the second spin_trylock(). 216 */ 217 smp_mb__after_atomic(); 218 219 /* Retry again in case other CPU may not see the new flag 220 * after it releases the lock at the end of qdisc_run_end(). 221 */ 222 return spin_trylock(&qdisc->seqlock); 223 } 224 return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 225 } 226 227 static inline void qdisc_run_end(struct Qdisc *qdisc) 228 { 229 if (qdisc->flags & TCQ_F_NOLOCK) { 230 spin_unlock(&qdisc->seqlock); 231 232 if (unlikely(test_bit(__QDISC_STATE_MISSED, 233 &qdisc->state))) 234 __netif_schedule(qdisc); 235 } else { 236 __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 237 } 238 } 239 240 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 241 { 242 return qdisc->flags & TCQ_F_ONETXQUEUE; 243 } 244 245 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 246 { 247 #ifdef CONFIG_BQL 248 /* Non-BQL migrated drivers will return 0, too. */ 249 return dql_avail(&txq->dql); 250 #else 251 return 0; 252 #endif 253 } 254 255 struct Qdisc_class_ops { 256 unsigned int flags; 257 /* Child qdisc manipulation */ 258 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 259 int (*graft)(struct Qdisc *, unsigned long cl, 260 struct Qdisc *, struct Qdisc **, 261 struct netlink_ext_ack *extack); 262 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 263 void (*qlen_notify)(struct Qdisc *, unsigned long); 264 265 /* Class manipulation routines */ 266 unsigned long (*find)(struct Qdisc *, u32 classid); 267 int (*change)(struct Qdisc *, u32, u32, 268 struct nlattr **, unsigned long *, 269 struct netlink_ext_ack *); 270 int (*delete)(struct Qdisc *, unsigned long, 271 struct netlink_ext_ack *); 272 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 273 274 /* Filter manipulation */ 275 struct tcf_block * (*tcf_block)(struct Qdisc *sch, 276 unsigned long arg, 277 struct netlink_ext_ack *extack); 278 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 279 u32 classid); 280 void (*unbind_tcf)(struct Qdisc *, unsigned long); 281 282 /* rtnetlink specific */ 283 int (*dump)(struct Qdisc *, unsigned long, 284 struct sk_buff *skb, struct tcmsg*); 285 int (*dump_stats)(struct Qdisc *, unsigned long, 286 struct gnet_dump *); 287 }; 288 289 /* Qdisc_class_ops flag values */ 290 291 /* Implements API that doesn't require rtnl lock */ 292 enum qdisc_class_ops_flags { 293 QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, 294 }; 295 296 struct Qdisc_ops { 297 struct Qdisc_ops *next; 298 const struct Qdisc_class_ops *cl_ops; 299 char id[IFNAMSIZ]; 300 int priv_size; 301 unsigned int static_flags; 302 303 int (*enqueue)(struct sk_buff *skb, 304 struct Qdisc *sch, 305 struct sk_buff **to_free); 306 struct sk_buff * (*dequeue)(struct Qdisc *); 307 struct sk_buff * (*peek)(struct Qdisc *); 308 309 int (*init)(struct Qdisc *sch, struct nlattr *arg, 310 struct netlink_ext_ack *extack); 311 void (*reset)(struct Qdisc *); 312 void (*destroy)(struct Qdisc *); 313 int (*change)(struct Qdisc *sch, 314 struct nlattr *arg, 315 struct netlink_ext_ack *extack); 316 void (*attach)(struct Qdisc *sch); 317 int (*change_tx_queue_len)(struct Qdisc *, unsigned int); 318 void (*change_real_num_tx)(struct Qdisc *sch, 319 unsigned int new_real_tx); 320 321 int (*dump)(struct Qdisc *, struct sk_buff *); 322 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 323 324 void (*ingress_block_set)(struct Qdisc *sch, 325 u32 block_index); 326 void (*egress_block_set)(struct Qdisc *sch, 327 u32 block_index); 328 u32 (*ingress_block_get)(struct Qdisc *sch); 329 u32 (*egress_block_get)(struct Qdisc *sch); 330 331 struct module *owner; 332 }; 333 334 335 struct tcf_result { 336 union { 337 struct { 338 unsigned long class; 339 u32 classid; 340 }; 341 const struct tcf_proto *goto_tp; 342 343 /* used in the skb_tc_reinsert function */ 344 struct { 345 bool ingress; 346 struct gnet_stats_queue *qstats; 347 }; 348 }; 349 }; 350 351 struct tcf_chain; 352 353 struct tcf_proto_ops { 354 struct list_head head; 355 char kind[IFNAMSIZ]; 356 357 int (*classify)(struct sk_buff *, 358 const struct tcf_proto *, 359 struct tcf_result *); 360 int (*init)(struct tcf_proto*); 361 void (*destroy)(struct tcf_proto *tp, bool rtnl_held, 362 struct netlink_ext_ack *extack); 363 364 void* (*get)(struct tcf_proto*, u32 handle); 365 void (*put)(struct tcf_proto *tp, void *f); 366 int (*change)(struct net *net, struct sk_buff *, 367 struct tcf_proto*, unsigned long, 368 u32 handle, struct nlattr **, 369 void **, u32, 370 struct netlink_ext_ack *); 371 int (*delete)(struct tcf_proto *tp, void *arg, 372 bool *last, bool rtnl_held, 373 struct netlink_ext_ack *); 374 bool (*delete_empty)(struct tcf_proto *tp); 375 void (*walk)(struct tcf_proto *tp, 376 struct tcf_walker *arg, bool rtnl_held); 377 int (*reoffload)(struct tcf_proto *tp, bool add, 378 flow_setup_cb_t *cb, void *cb_priv, 379 struct netlink_ext_ack *extack); 380 void (*hw_add)(struct tcf_proto *tp, 381 void *type_data); 382 void (*hw_del)(struct tcf_proto *tp, 383 void *type_data); 384 void (*bind_class)(void *, u32, unsigned long, 385 void *, unsigned long); 386 void * (*tmplt_create)(struct net *net, 387 struct tcf_chain *chain, 388 struct nlattr **tca, 389 struct netlink_ext_ack *extack); 390 void (*tmplt_destroy)(void *tmplt_priv); 391 392 /* rtnetlink specific */ 393 int (*dump)(struct net*, struct tcf_proto*, void *, 394 struct sk_buff *skb, struct tcmsg*, 395 bool); 396 int (*terse_dump)(struct net *net, 397 struct tcf_proto *tp, void *fh, 398 struct sk_buff *skb, 399 struct tcmsg *t, bool rtnl_held); 400 int (*tmplt_dump)(struct sk_buff *skb, 401 struct net *net, 402 void *tmplt_priv); 403 404 struct module *owner; 405 int flags; 406 }; 407 408 /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags 409 * are expected to implement tcf_proto_ops->delete_empty(), otherwise race 410 * conditions can occur when filters are inserted/deleted simultaneously. 411 */ 412 enum tcf_proto_ops_flags { 413 TCF_PROTO_OPS_DOIT_UNLOCKED = 1, 414 }; 415 416 struct tcf_proto { 417 /* Fast access part */ 418 struct tcf_proto __rcu *next; 419 void __rcu *root; 420 421 /* called under RCU BH lock*/ 422 int (*classify)(struct sk_buff *, 423 const struct tcf_proto *, 424 struct tcf_result *); 425 __be16 protocol; 426 427 /* All the rest */ 428 u32 prio; 429 void *data; 430 const struct tcf_proto_ops *ops; 431 struct tcf_chain *chain; 432 /* Lock protects tcf_proto shared state and can be used by unlocked 433 * classifiers to protect their private data. 434 */ 435 spinlock_t lock; 436 bool deleting; 437 refcount_t refcnt; 438 struct rcu_head rcu; 439 struct hlist_node destroy_ht_node; 440 }; 441 442 struct qdisc_skb_cb { 443 struct { 444 unsigned int pkt_len; 445 u16 slave_dev_queue_mapping; 446 u16 tc_classid; 447 }; 448 #define QDISC_CB_PRIV_LEN 20 449 unsigned char data[QDISC_CB_PRIV_LEN]; 450 u16 mru; 451 bool post_ct; 452 }; 453 454 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); 455 456 struct tcf_chain { 457 /* Protects filter_chain. */ 458 struct mutex filter_chain_lock; 459 struct tcf_proto __rcu *filter_chain; 460 struct list_head list; 461 struct tcf_block *block; 462 u32 index; /* chain index */ 463 unsigned int refcnt; 464 unsigned int action_refcnt; 465 bool explicitly_created; 466 bool flushing; 467 const struct tcf_proto_ops *tmplt_ops; 468 void *tmplt_priv; 469 struct rcu_head rcu; 470 }; 471 472 struct tcf_block { 473 /* Lock protects tcf_block and lifetime-management data of chains 474 * attached to the block (refcnt, action_refcnt, explicitly_created). 475 */ 476 struct mutex lock; 477 struct list_head chain_list; 478 u32 index; /* block index for shared blocks */ 479 u32 classid; /* which class this block belongs to */ 480 refcount_t refcnt; 481 struct net *net; 482 struct Qdisc *q; 483 struct rw_semaphore cb_lock; /* protects cb_list and offload counters */ 484 struct flow_block flow_block; 485 struct list_head owner_list; 486 bool keep_dst; 487 atomic_t offloadcnt; /* Number of oddloaded filters */ 488 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ 489 unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */ 490 struct { 491 struct tcf_chain *chain; 492 struct list_head filter_chain_list; 493 } chain0; 494 struct rcu_head rcu; 495 DECLARE_HASHTABLE(proto_destroy_ht, 7); 496 struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */ 497 }; 498 499 static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain) 500 { 501 return lockdep_is_held(&chain->filter_chain_lock); 502 } 503 504 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) 505 { 506 return lockdep_is_held(&tp->lock); 507 } 508 509 #define tcf_chain_dereference(p, chain) \ 510 rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain)) 511 512 #define tcf_proto_dereference(p, tp) \ 513 rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp)) 514 515 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 516 { 517 struct qdisc_skb_cb *qcb; 518 519 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb)); 520 BUILD_BUG_ON(sizeof(qcb->data) < sz); 521 } 522 523 static inline int qdisc_qlen_cpu(const struct Qdisc *q) 524 { 525 return this_cpu_ptr(q->cpu_qstats)->qlen; 526 } 527 528 static inline int qdisc_qlen(const struct Qdisc *q) 529 { 530 return q->q.qlen; 531 } 532 533 static inline int qdisc_qlen_sum(const struct Qdisc *q) 534 { 535 __u32 qlen = q->qstats.qlen; 536 int i; 537 538 if (qdisc_is_percpu_stats(q)) { 539 for_each_possible_cpu(i) 540 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; 541 } else { 542 qlen += q->q.qlen; 543 } 544 545 return qlen; 546 } 547 548 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 549 { 550 return (struct qdisc_skb_cb *)skb->cb; 551 } 552 553 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 554 { 555 return &qdisc->q.lock; 556 } 557 558 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 559 { 560 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 561 562 return q; 563 } 564 565 static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) 566 { 567 return rcu_dereference_bh(qdisc->dev_queue->qdisc); 568 } 569 570 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 571 { 572 return qdisc->dev_queue->qdisc_sleeping; 573 } 574 575 /* The qdisc root lock is a mechanism by which to top level 576 * of a qdisc tree can be locked from any qdisc node in the 577 * forest. This allows changing the configuration of some 578 * aspect of the qdisc tree while blocking out asynchronous 579 * qdisc access in the packet processing paths. 580 * 581 * It is only legal to do this when the root will not change 582 * on us. Otherwise we'll potentially lock the wrong qdisc 583 * root. This is enforced by holding the RTNL semaphore, which 584 * all users of this lock accessor must do. 585 */ 586 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) 587 { 588 struct Qdisc *root = qdisc_root(qdisc); 589 590 ASSERT_RTNL(); 591 return qdisc_lock(root); 592 } 593 594 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 595 { 596 struct Qdisc *root = qdisc_root_sleeping(qdisc); 597 598 ASSERT_RTNL(); 599 return qdisc_lock(root); 600 } 601 602 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 603 { 604 return qdisc->dev_queue->dev; 605 } 606 607 static inline void sch_tree_lock(struct Qdisc *q) 608 { 609 if (q->flags & TCQ_F_MQROOT) 610 spin_lock_bh(qdisc_lock(q)); 611 else 612 spin_lock_bh(qdisc_root_sleeping_lock(q)); 613 } 614 615 static inline void sch_tree_unlock(struct Qdisc *q) 616 { 617 if (q->flags & TCQ_F_MQROOT) 618 spin_unlock_bh(qdisc_lock(q)); 619 else 620 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 621 } 622 623 extern struct Qdisc noop_qdisc; 624 extern struct Qdisc_ops noop_qdisc_ops; 625 extern struct Qdisc_ops pfifo_fast_ops; 626 extern struct Qdisc_ops mq_qdisc_ops; 627 extern struct Qdisc_ops noqueue_qdisc_ops; 628 extern const struct Qdisc_ops *default_qdisc_ops; 629 static inline const struct Qdisc_ops * 630 get_default_qdisc_ops(const struct net_device *dev, int ntx) 631 { 632 return ntx < dev->real_num_tx_queues ? 633 default_qdisc_ops : &pfifo_fast_ops; 634 } 635 636 struct Qdisc_class_common { 637 u32 classid; 638 struct hlist_node hnode; 639 }; 640 641 struct Qdisc_class_hash { 642 struct hlist_head *hash; 643 unsigned int hashsize; 644 unsigned int hashmask; 645 unsigned int hashelems; 646 }; 647 648 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 649 { 650 id ^= id >> 8; 651 id ^= id >> 4; 652 return id & mask; 653 } 654 655 static inline struct Qdisc_class_common * 656 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 657 { 658 struct Qdisc_class_common *cl; 659 unsigned int h; 660 661 if (!id) 662 return NULL; 663 664 h = qdisc_class_hash(id, hash->hashmask); 665 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 666 if (cl->classid == id) 667 return cl; 668 } 669 return NULL; 670 } 671 672 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) 673 { 674 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; 675 676 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; 677 } 678 679 int qdisc_class_hash_init(struct Qdisc_class_hash *); 680 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 681 struct Qdisc_class_common *); 682 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 683 struct Qdisc_class_common *); 684 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 685 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 686 687 int dev_qdisc_change_tx_queue_len(struct net_device *dev); 688 void dev_qdisc_change_real_num_tx(struct net_device *dev, 689 unsigned int new_real_tx); 690 void dev_init_scheduler(struct net_device *dev); 691 void dev_shutdown(struct net_device *dev); 692 void dev_activate(struct net_device *dev); 693 void dev_deactivate(struct net_device *dev); 694 void dev_deactivate_many(struct list_head *head); 695 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 696 struct Qdisc *qdisc); 697 void qdisc_reset(struct Qdisc *qdisc); 698 void qdisc_put(struct Qdisc *qdisc); 699 void qdisc_put_unlocked(struct Qdisc *qdisc); 700 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); 701 #ifdef CONFIG_NET_SCHED 702 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 703 void *type_data); 704 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 705 struct Qdisc *new, struct Qdisc *old, 706 enum tc_setup_type type, void *type_data, 707 struct netlink_ext_ack *extack); 708 #else 709 static inline int 710 qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 711 void *type_data) 712 { 713 q->flags &= ~TCQ_F_OFFLOADED; 714 return 0; 715 } 716 717 static inline void 718 qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 719 struct Qdisc *new, struct Qdisc *old, 720 enum tc_setup_type type, void *type_data, 721 struct netlink_ext_ack *extack) 722 { 723 } 724 #endif 725 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 726 const struct Qdisc_ops *ops, 727 struct netlink_ext_ack *extack); 728 void qdisc_free(struct Qdisc *qdisc); 729 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 730 const struct Qdisc_ops *ops, u32 parentid, 731 struct netlink_ext_ack *extack); 732 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 733 const struct qdisc_size_table *stab); 734 int skb_do_redirect(struct sk_buff *); 735 736 static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 737 { 738 #ifdef CONFIG_NET_CLS_ACT 739 return skb->tc_at_ingress; 740 #else 741 return false; 742 #endif 743 } 744 745 static inline bool skb_skip_tc_classify(struct sk_buff *skb) 746 { 747 #ifdef CONFIG_NET_CLS_ACT 748 if (skb->tc_skip_classify) { 749 skb->tc_skip_classify = 0; 750 return true; 751 } 752 #endif 753 return false; 754 } 755 756 /* Reset all TX qdiscs greater than index of a device. */ 757 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 758 { 759 struct Qdisc *qdisc; 760 761 for (; i < dev->num_tx_queues; i++) { 762 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 763 if (qdisc) { 764 spin_lock_bh(qdisc_lock(qdisc)); 765 qdisc_reset(qdisc); 766 spin_unlock_bh(qdisc_lock(qdisc)); 767 } 768 } 769 } 770 771 /* Are all TX queues of the device empty? */ 772 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 773 { 774 unsigned int i; 775 776 rcu_read_lock(); 777 for (i = 0; i < dev->num_tx_queues; i++) { 778 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 779 const struct Qdisc *q = rcu_dereference(txq->qdisc); 780 781 if (!qdisc_is_empty(q)) { 782 rcu_read_unlock(); 783 return false; 784 } 785 } 786 rcu_read_unlock(); 787 return true; 788 } 789 790 /* Are any of the TX qdiscs changing? */ 791 static inline bool qdisc_tx_changing(const struct net_device *dev) 792 { 793 unsigned int i; 794 795 for (i = 0; i < dev->num_tx_queues; i++) { 796 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 797 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 798 return true; 799 } 800 return false; 801 } 802 803 /* Is the device using the noop qdisc on all queues? */ 804 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 805 { 806 unsigned int i; 807 808 for (i = 0; i < dev->num_tx_queues; i++) { 809 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 810 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 811 return false; 812 } 813 return true; 814 } 815 816 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 817 { 818 return qdisc_skb_cb(skb)->pkt_len; 819 } 820 821 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 822 enum net_xmit_qdisc_t { 823 __NET_XMIT_STOLEN = 0x00010000, 824 __NET_XMIT_BYPASS = 0x00020000, 825 }; 826 827 #ifdef CONFIG_NET_CLS_ACT 828 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 829 #else 830 #define net_xmit_drop_count(e) (1) 831 #endif 832 833 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 834 const struct Qdisc *sch) 835 { 836 #ifdef CONFIG_NET_SCHED 837 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 838 839 if (stab) 840 __qdisc_calculate_pkt_len(skb, stab); 841 #endif 842 } 843 844 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 845 struct sk_buff **to_free) 846 { 847 qdisc_calculate_pkt_len(skb, sch); 848 return sch->enqueue(skb, sch, to_free); 849 } 850 851 static inline void _bstats_update(struct gnet_stats_basic_sync *bstats, 852 __u64 bytes, __u32 packets) 853 { 854 u64_stats_update_begin(&bstats->syncp); 855 u64_stats_add(&bstats->bytes, bytes); 856 u64_stats_add(&bstats->packets, packets); 857 u64_stats_update_end(&bstats->syncp); 858 } 859 860 static inline void bstats_update(struct gnet_stats_basic_sync *bstats, 861 const struct sk_buff *skb) 862 { 863 _bstats_update(bstats, 864 qdisc_pkt_len(skb), 865 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 866 } 867 868 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 869 const struct sk_buff *skb) 870 { 871 bstats_update(this_cpu_ptr(sch->cpu_bstats), skb); 872 } 873 874 static inline void qdisc_bstats_update(struct Qdisc *sch, 875 const struct sk_buff *skb) 876 { 877 bstats_update(&sch->bstats, skb); 878 } 879 880 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 881 const struct sk_buff *skb) 882 { 883 sch->qstats.backlog -= qdisc_pkt_len(skb); 884 } 885 886 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, 887 const struct sk_buff *skb) 888 { 889 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 890 } 891 892 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 893 const struct sk_buff *skb) 894 { 895 sch->qstats.backlog += qdisc_pkt_len(skb); 896 } 897 898 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, 899 const struct sk_buff *skb) 900 { 901 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 902 } 903 904 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) 905 { 906 this_cpu_inc(sch->cpu_qstats->qlen); 907 } 908 909 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) 910 { 911 this_cpu_dec(sch->cpu_qstats->qlen); 912 } 913 914 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) 915 { 916 this_cpu_inc(sch->cpu_qstats->requeues); 917 } 918 919 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 920 { 921 sch->qstats.drops += count; 922 } 923 924 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 925 { 926 qstats->drops++; 927 } 928 929 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 930 { 931 qstats->overlimits++; 932 } 933 934 static inline void qdisc_qstats_drop(struct Qdisc *sch) 935 { 936 qstats_drop_inc(&sch->qstats); 937 } 938 939 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 940 { 941 this_cpu_inc(sch->cpu_qstats->drops); 942 } 943 944 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 945 { 946 sch->qstats.overlimits++; 947 } 948 949 static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch) 950 { 951 __u32 qlen = qdisc_qlen_sum(sch); 952 953 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); 954 } 955 956 static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, 957 __u32 *backlog) 958 { 959 struct gnet_stats_queue qstats = { 0 }; 960 961 gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats); 962 *qlen = qstats.qlen + qdisc_qlen(sch); 963 *backlog = qstats.backlog; 964 } 965 966 static inline void qdisc_tree_flush_backlog(struct Qdisc *sch) 967 { 968 __u32 qlen, backlog; 969 970 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 971 qdisc_tree_reduce_backlog(sch, qlen, backlog); 972 } 973 974 static inline void qdisc_purge_queue(struct Qdisc *sch) 975 { 976 __u32 qlen, backlog; 977 978 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 979 qdisc_reset(sch); 980 qdisc_tree_reduce_backlog(sch, qlen, backlog); 981 } 982 983 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) 984 { 985 qh->head = NULL; 986 qh->tail = NULL; 987 qh->qlen = 0; 988 } 989 990 static inline void __qdisc_enqueue_tail(struct sk_buff *skb, 991 struct qdisc_skb_head *qh) 992 { 993 struct sk_buff *last = qh->tail; 994 995 if (last) { 996 skb->next = NULL; 997 last->next = skb; 998 qh->tail = skb; 999 } else { 1000 qh->tail = skb; 1001 qh->head = skb; 1002 } 1003 qh->qlen++; 1004 } 1005 1006 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 1007 { 1008 __qdisc_enqueue_tail(skb, &sch->q); 1009 qdisc_qstats_backlog_inc(sch, skb); 1010 return NET_XMIT_SUCCESS; 1011 } 1012 1013 static inline void __qdisc_enqueue_head(struct sk_buff *skb, 1014 struct qdisc_skb_head *qh) 1015 { 1016 skb->next = qh->head; 1017 1018 if (!qh->head) 1019 qh->tail = skb; 1020 qh->head = skb; 1021 qh->qlen++; 1022 } 1023 1024 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 1025 { 1026 struct sk_buff *skb = qh->head; 1027 1028 if (likely(skb != NULL)) { 1029 qh->head = skb->next; 1030 qh->qlen--; 1031 if (qh->head == NULL) 1032 qh->tail = NULL; 1033 skb->next = NULL; 1034 } 1035 1036 return skb; 1037 } 1038 1039 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 1040 { 1041 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 1042 1043 if (likely(skb != NULL)) { 1044 qdisc_qstats_backlog_dec(sch, skb); 1045 qdisc_bstats_update(sch, skb); 1046 } 1047 1048 return skb; 1049 } 1050 1051 /* Instead of calling kfree_skb() while root qdisc lock is held, 1052 * queue the skb for future freeing at end of __dev_xmit_skb() 1053 */ 1054 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 1055 { 1056 skb->next = *to_free; 1057 *to_free = skb; 1058 } 1059 1060 static inline void __qdisc_drop_all(struct sk_buff *skb, 1061 struct sk_buff **to_free) 1062 { 1063 if (skb->prev) 1064 skb->prev->next = *to_free; 1065 else 1066 skb->next = *to_free; 1067 *to_free = skb; 1068 } 1069 1070 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 1071 struct qdisc_skb_head *qh, 1072 struct sk_buff **to_free) 1073 { 1074 struct sk_buff *skb = __qdisc_dequeue_head(qh); 1075 1076 if (likely(skb != NULL)) { 1077 unsigned int len = qdisc_pkt_len(skb); 1078 1079 qdisc_qstats_backlog_dec(sch, skb); 1080 __qdisc_drop(skb, to_free); 1081 return len; 1082 } 1083 1084 return 0; 1085 } 1086 1087 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 1088 { 1089 const struct qdisc_skb_head *qh = &sch->q; 1090 1091 return qh->head; 1092 } 1093 1094 /* generic pseudo peek method for non-work-conserving qdisc */ 1095 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 1096 { 1097 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1098 1099 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 1100 if (!skb) { 1101 skb = sch->dequeue(sch); 1102 1103 if (skb) { 1104 __skb_queue_head(&sch->gso_skb, skb); 1105 /* it's still part of the queue */ 1106 qdisc_qstats_backlog_inc(sch, skb); 1107 sch->q.qlen++; 1108 } 1109 } 1110 1111 return skb; 1112 } 1113 1114 static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch, 1115 struct sk_buff *skb) 1116 { 1117 if (qdisc_is_percpu_stats(sch)) { 1118 qdisc_qstats_cpu_backlog_dec(sch, skb); 1119 qdisc_bstats_cpu_update(sch, skb); 1120 qdisc_qstats_cpu_qlen_dec(sch); 1121 } else { 1122 qdisc_qstats_backlog_dec(sch, skb); 1123 qdisc_bstats_update(sch, skb); 1124 sch->q.qlen--; 1125 } 1126 } 1127 1128 static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch, 1129 unsigned int pkt_len) 1130 { 1131 if (qdisc_is_percpu_stats(sch)) { 1132 qdisc_qstats_cpu_qlen_inc(sch); 1133 this_cpu_add(sch->cpu_qstats->backlog, pkt_len); 1134 } else { 1135 sch->qstats.backlog += pkt_len; 1136 sch->q.qlen++; 1137 } 1138 } 1139 1140 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 1141 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 1142 { 1143 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1144 1145 if (skb) { 1146 skb = __skb_dequeue(&sch->gso_skb); 1147 if (qdisc_is_percpu_stats(sch)) { 1148 qdisc_qstats_cpu_backlog_dec(sch, skb); 1149 qdisc_qstats_cpu_qlen_dec(sch); 1150 } else { 1151 qdisc_qstats_backlog_dec(sch, skb); 1152 sch->q.qlen--; 1153 } 1154 } else { 1155 skb = sch->dequeue(sch); 1156 } 1157 1158 return skb; 1159 } 1160 1161 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 1162 { 1163 /* 1164 * We do not know the backlog in bytes of this list, it 1165 * is up to the caller to correct it 1166 */ 1167 ASSERT_RTNL(); 1168 if (qh->qlen) { 1169 rtnl_kfree_skbs(qh->head, qh->tail); 1170 1171 qh->head = NULL; 1172 qh->tail = NULL; 1173 qh->qlen = 0; 1174 } 1175 } 1176 1177 static inline void qdisc_reset_queue(struct Qdisc *sch) 1178 { 1179 __qdisc_reset_queue(&sch->q); 1180 sch->qstats.backlog = 0; 1181 } 1182 1183 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 1184 struct Qdisc **pold) 1185 { 1186 struct Qdisc *old; 1187 1188 sch_tree_lock(sch); 1189 old = *pold; 1190 *pold = new; 1191 if (old != NULL) 1192 qdisc_purge_queue(old); 1193 sch_tree_unlock(sch); 1194 1195 return old; 1196 } 1197 1198 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 1199 { 1200 rtnl_kfree_skbs(skb, skb); 1201 qdisc_qstats_drop(sch); 1202 } 1203 1204 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, 1205 struct sk_buff **to_free) 1206 { 1207 __qdisc_drop(skb, to_free); 1208 qdisc_qstats_cpu_drop(sch); 1209 1210 return NET_XMIT_DROP; 1211 } 1212 1213 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 1214 struct sk_buff **to_free) 1215 { 1216 __qdisc_drop(skb, to_free); 1217 qdisc_qstats_drop(sch); 1218 1219 return NET_XMIT_DROP; 1220 } 1221 1222 static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch, 1223 struct sk_buff **to_free) 1224 { 1225 __qdisc_drop_all(skb, to_free); 1226 qdisc_qstats_drop(sch); 1227 1228 return NET_XMIT_DROP; 1229 } 1230 1231 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 1232 long it will take to send a packet given its size. 1233 */ 1234 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 1235 { 1236 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 1237 if (slot < 0) 1238 slot = 0; 1239 slot >>= rtab->rate.cell_log; 1240 if (slot > 255) 1241 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 1242 return rtab->data[slot]; 1243 } 1244 1245 struct psched_ratecfg { 1246 u64 rate_bytes_ps; /* bytes per second */ 1247 u32 mult; 1248 u16 overhead; 1249 u8 linklayer; 1250 u8 shift; 1251 }; 1252 1253 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 1254 unsigned int len) 1255 { 1256 len += r->overhead; 1257 1258 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 1259 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 1260 1261 return ((u64)len * r->mult) >> r->shift; 1262 } 1263 1264 void psched_ratecfg_precompute(struct psched_ratecfg *r, 1265 const struct tc_ratespec *conf, 1266 u64 rate64); 1267 1268 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 1269 const struct psched_ratecfg *r) 1270 { 1271 memset(res, 0, sizeof(*res)); 1272 1273 /* legacy struct tc_ratespec has a 32bit @rate field 1274 * Qdisc using 64bit rate should add new attributes 1275 * in order to maintain compatibility. 1276 */ 1277 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 1278 1279 res->overhead = r->overhead; 1280 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 1281 } 1282 1283 struct psched_pktrate { 1284 u64 rate_pkts_ps; /* packets per second */ 1285 u32 mult; 1286 u8 shift; 1287 }; 1288 1289 static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r, 1290 unsigned int pkt_num) 1291 { 1292 return ((u64)pkt_num * r->mult) >> r->shift; 1293 } 1294 1295 void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64); 1296 1297 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. 1298 * The fast path only needs to access filter list and to update stats 1299 */ 1300 struct mini_Qdisc { 1301 struct tcf_proto *filter_list; 1302 struct tcf_block *block; 1303 struct gnet_stats_basic_sync __percpu *cpu_bstats; 1304 struct gnet_stats_queue __percpu *cpu_qstats; 1305 unsigned long rcu_state; 1306 }; 1307 1308 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, 1309 const struct sk_buff *skb) 1310 { 1311 bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb); 1312 } 1313 1314 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) 1315 { 1316 this_cpu_inc(miniq->cpu_qstats->drops); 1317 } 1318 1319 struct mini_Qdisc_pair { 1320 struct mini_Qdisc miniq1; 1321 struct mini_Qdisc miniq2; 1322 struct mini_Qdisc __rcu **p_miniq; 1323 }; 1324 1325 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 1326 struct tcf_proto *tp_head); 1327 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1328 struct mini_Qdisc __rcu **p_miniq); 1329 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, 1330 struct tcf_block *block); 1331 1332 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx); 1333 1334 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)); 1335 1336 #endif 1337