1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_SCHED_GENERIC_H 3 #define __NET_SCHED_GENERIC_H 4 5 #include <linux/netdevice.h> 6 #include <linux/types.h> 7 #include <linux/rcupdate.h> 8 #include <linux/pkt_sched.h> 9 #include <linux/pkt_cls.h> 10 #include <linux/percpu.h> 11 #include <linux/dynamic_queue_limits.h> 12 #include <linux/list.h> 13 #include <linux/refcount.h> 14 #include <linux/workqueue.h> 15 #include <linux/mutex.h> 16 #include <linux/rwsem.h> 17 #include <linux/atomic.h> 18 #include <linux/hashtable.h> 19 #include <net/gen_stats.h> 20 #include <net/rtnetlink.h> 21 #include <net/flow_offload.h> 22 23 struct Qdisc_ops; 24 struct qdisc_walker; 25 struct tcf_walker; 26 struct module; 27 struct bpf_flow_keys; 28 29 struct qdisc_rate_table { 30 struct tc_ratespec rate; 31 u32 data[256]; 32 struct qdisc_rate_table *next; 33 int refcnt; 34 }; 35 36 enum qdisc_state_t { 37 __QDISC_STATE_SCHED, 38 __QDISC_STATE_DEACTIVATED, 39 __QDISC_STATE_MISSED, 40 __QDISC_STATE_DRAINING, 41 }; 42 43 enum qdisc_state2_t { 44 /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly. 45 * Use qdisc_run_begin/end() or qdisc_is_running() instead. 46 */ 47 __QDISC_STATE2_RUNNING, 48 }; 49 50 #define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED) 51 #define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING) 52 53 #define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \ 54 QDISC_STATE_DRAINING) 55 56 struct qdisc_size_table { 57 struct rcu_head rcu; 58 struct list_head list; 59 struct tc_sizespec szopts; 60 int refcnt; 61 u16 data[]; 62 }; 63 64 /* similar to sk_buff_head, but skb->prev pointer is undefined. */ 65 struct qdisc_skb_head { 66 struct sk_buff *head; 67 struct sk_buff *tail; 68 __u32 qlen; 69 spinlock_t lock; 70 }; 71 72 struct Qdisc { 73 int (*enqueue)(struct sk_buff *skb, 74 struct Qdisc *sch, 75 struct sk_buff **to_free); 76 struct sk_buff * (*dequeue)(struct Qdisc *sch); 77 unsigned int flags; 78 #define TCQ_F_BUILTIN 1 79 #define TCQ_F_INGRESS 2 80 #define TCQ_F_CAN_BYPASS 4 81 #define TCQ_F_MQROOT 8 82 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 83 * q->dev_queue : It can test 84 * netif_xmit_frozen_or_stopped() before 85 * dequeueing next packet. 86 * Its true for MQ/MQPRIO slaves, or non 87 * multiqueue device. 88 */ 89 #define TCQ_F_WARN_NONWC (1 << 16) 90 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 91 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 92 * qdisc_tree_decrease_qlen() should stop. 93 */ 94 #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 95 #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ 96 #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ 97 u32 limit; 98 const struct Qdisc_ops *ops; 99 struct qdisc_size_table __rcu *stab; 100 struct hlist_node hash; 101 u32 handle; 102 u32 parent; 103 104 struct netdev_queue *dev_queue; 105 106 struct net_rate_estimator __rcu *rate_est; 107 struct gnet_stats_basic_sync __percpu *cpu_bstats; 108 struct gnet_stats_queue __percpu *cpu_qstats; 109 int pad; 110 refcount_t refcnt; 111 112 /* 113 * For performance sake on SMP, we put highly modified fields at the end 114 */ 115 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; 116 struct qdisc_skb_head q; 117 struct gnet_stats_basic_sync bstats; 118 struct gnet_stats_queue qstats; 119 unsigned long state; 120 unsigned long state2; /* must be written under qdisc spinlock */ 121 struct Qdisc *next_sched; 122 struct sk_buff_head skb_bad_txq; 123 124 spinlock_t busylock ____cacheline_aligned_in_smp; 125 spinlock_t seqlock; 126 127 struct rcu_head rcu; 128 netdevice_tracker dev_tracker; 129 /* private data */ 130 long privdata[] ____cacheline_aligned; 131 }; 132 133 static inline void qdisc_refcount_inc(struct Qdisc *qdisc) 134 { 135 if (qdisc->flags & TCQ_F_BUILTIN) 136 return; 137 refcount_inc(&qdisc->refcnt); 138 } 139 140 /* Intended to be used by unlocked users, when concurrent qdisc release is 141 * possible. 142 */ 143 144 static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc) 145 { 146 if (qdisc->flags & TCQ_F_BUILTIN) 147 return qdisc; 148 if (refcount_inc_not_zero(&qdisc->refcnt)) 149 return qdisc; 150 return NULL; 151 } 152 153 /* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc 154 * root_lock section, or provide their own memory barriers -- ordering 155 * against qdisc_run_begin/end() atomic bit operations. 156 */ 157 static inline bool qdisc_is_running(struct Qdisc *qdisc) 158 { 159 if (qdisc->flags & TCQ_F_NOLOCK) 160 return spin_is_locked(&qdisc->seqlock); 161 return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 162 } 163 164 static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc) 165 { 166 return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY); 167 } 168 169 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 170 { 171 return q->flags & TCQ_F_CPUSTATS; 172 } 173 174 static inline bool qdisc_is_empty(const struct Qdisc *qdisc) 175 { 176 if (qdisc_is_percpu_stats(qdisc)) 177 return nolock_qdisc_is_empty(qdisc); 178 return !READ_ONCE(qdisc->q.qlen); 179 } 180 181 /* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with 182 * the qdisc root lock acquired. 183 */ 184 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 185 { 186 if (qdisc->flags & TCQ_F_NOLOCK) { 187 if (spin_trylock(&qdisc->seqlock)) 188 return true; 189 190 /* No need to insist if the MISSED flag was already set. 191 * Note that test_and_set_bit() also gives us memory ordering 192 * guarantees wrt potential earlier enqueue() and below 193 * spin_trylock(), both of which are necessary to prevent races 194 */ 195 if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state)) 196 return false; 197 198 /* Try to take the lock again to make sure that we will either 199 * grab it or the CPU that still has it will see MISSED set 200 * when testing it in qdisc_run_end() 201 */ 202 return spin_trylock(&qdisc->seqlock); 203 } 204 return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 205 } 206 207 static inline void qdisc_run_end(struct Qdisc *qdisc) 208 { 209 if (qdisc->flags & TCQ_F_NOLOCK) { 210 spin_unlock(&qdisc->seqlock); 211 212 /* spin_unlock() only has store-release semantic. The unlock 213 * and test_bit() ordering is a store-load ordering, so a full 214 * memory barrier is needed here. 215 */ 216 smp_mb(); 217 218 if (unlikely(test_bit(__QDISC_STATE_MISSED, 219 &qdisc->state))) 220 __netif_schedule(qdisc); 221 } else { 222 __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 223 } 224 } 225 226 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 227 { 228 return qdisc->flags & TCQ_F_ONETXQUEUE; 229 } 230 231 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 232 { 233 #ifdef CONFIG_BQL 234 /* Non-BQL migrated drivers will return 0, too. */ 235 return dql_avail(&txq->dql); 236 #else 237 return 0; 238 #endif 239 } 240 241 struct Qdisc_class_ops { 242 unsigned int flags; 243 /* Child qdisc manipulation */ 244 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 245 int (*graft)(struct Qdisc *, unsigned long cl, 246 struct Qdisc *, struct Qdisc **, 247 struct netlink_ext_ack *extack); 248 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 249 void (*qlen_notify)(struct Qdisc *, unsigned long); 250 251 /* Class manipulation routines */ 252 unsigned long (*find)(struct Qdisc *, u32 classid); 253 int (*change)(struct Qdisc *, u32, u32, 254 struct nlattr **, unsigned long *, 255 struct netlink_ext_ack *); 256 int (*delete)(struct Qdisc *, unsigned long, 257 struct netlink_ext_ack *); 258 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 259 260 /* Filter manipulation */ 261 struct tcf_block * (*tcf_block)(struct Qdisc *sch, 262 unsigned long arg, 263 struct netlink_ext_ack *extack); 264 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 265 u32 classid); 266 void (*unbind_tcf)(struct Qdisc *, unsigned long); 267 268 /* rtnetlink specific */ 269 int (*dump)(struct Qdisc *, unsigned long, 270 struct sk_buff *skb, struct tcmsg*); 271 int (*dump_stats)(struct Qdisc *, unsigned long, 272 struct gnet_dump *); 273 }; 274 275 /* Qdisc_class_ops flag values */ 276 277 /* Implements API that doesn't require rtnl lock */ 278 enum qdisc_class_ops_flags { 279 QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, 280 }; 281 282 struct Qdisc_ops { 283 struct Qdisc_ops *next; 284 const struct Qdisc_class_ops *cl_ops; 285 char id[IFNAMSIZ]; 286 int priv_size; 287 unsigned int static_flags; 288 289 int (*enqueue)(struct sk_buff *skb, 290 struct Qdisc *sch, 291 struct sk_buff **to_free); 292 struct sk_buff * (*dequeue)(struct Qdisc *); 293 struct sk_buff * (*peek)(struct Qdisc *); 294 295 int (*init)(struct Qdisc *sch, struct nlattr *arg, 296 struct netlink_ext_ack *extack); 297 void (*reset)(struct Qdisc *); 298 void (*destroy)(struct Qdisc *); 299 int (*change)(struct Qdisc *sch, 300 struct nlattr *arg, 301 struct netlink_ext_ack *extack); 302 void (*attach)(struct Qdisc *sch); 303 int (*change_tx_queue_len)(struct Qdisc *, unsigned int); 304 void (*change_real_num_tx)(struct Qdisc *sch, 305 unsigned int new_real_tx); 306 307 int (*dump)(struct Qdisc *, struct sk_buff *); 308 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 309 310 void (*ingress_block_set)(struct Qdisc *sch, 311 u32 block_index); 312 void (*egress_block_set)(struct Qdisc *sch, 313 u32 block_index); 314 u32 (*ingress_block_get)(struct Qdisc *sch); 315 u32 (*egress_block_get)(struct Qdisc *sch); 316 317 struct module *owner; 318 }; 319 320 321 struct tcf_result { 322 union { 323 struct { 324 unsigned long class; 325 u32 classid; 326 }; 327 const struct tcf_proto *goto_tp; 328 329 }; 330 }; 331 332 struct tcf_chain; 333 334 struct tcf_proto_ops { 335 struct list_head head; 336 char kind[IFNAMSIZ]; 337 338 int (*classify)(struct sk_buff *, 339 const struct tcf_proto *, 340 struct tcf_result *); 341 int (*init)(struct tcf_proto*); 342 void (*destroy)(struct tcf_proto *tp, bool rtnl_held, 343 struct netlink_ext_ack *extack); 344 345 void* (*get)(struct tcf_proto*, u32 handle); 346 void (*put)(struct tcf_proto *tp, void *f); 347 int (*change)(struct net *net, struct sk_buff *, 348 struct tcf_proto*, unsigned long, 349 u32 handle, struct nlattr **, 350 void **, u32, 351 struct netlink_ext_ack *); 352 int (*delete)(struct tcf_proto *tp, void *arg, 353 bool *last, bool rtnl_held, 354 struct netlink_ext_ack *); 355 bool (*delete_empty)(struct tcf_proto *tp); 356 void (*walk)(struct tcf_proto *tp, 357 struct tcf_walker *arg, bool rtnl_held); 358 int (*reoffload)(struct tcf_proto *tp, bool add, 359 flow_setup_cb_t *cb, void *cb_priv, 360 struct netlink_ext_ack *extack); 361 void (*hw_add)(struct tcf_proto *tp, 362 void *type_data); 363 void (*hw_del)(struct tcf_proto *tp, 364 void *type_data); 365 void (*bind_class)(void *, u32, unsigned long, 366 void *, unsigned long); 367 void * (*tmplt_create)(struct net *net, 368 struct tcf_chain *chain, 369 struct nlattr **tca, 370 struct netlink_ext_ack *extack); 371 void (*tmplt_destroy)(void *tmplt_priv); 372 373 /* rtnetlink specific */ 374 int (*dump)(struct net*, struct tcf_proto*, void *, 375 struct sk_buff *skb, struct tcmsg*, 376 bool); 377 int (*terse_dump)(struct net *net, 378 struct tcf_proto *tp, void *fh, 379 struct sk_buff *skb, 380 struct tcmsg *t, bool rtnl_held); 381 int (*tmplt_dump)(struct sk_buff *skb, 382 struct net *net, 383 void *tmplt_priv); 384 385 struct module *owner; 386 int flags; 387 }; 388 389 /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags 390 * are expected to implement tcf_proto_ops->delete_empty(), otherwise race 391 * conditions can occur when filters are inserted/deleted simultaneously. 392 */ 393 enum tcf_proto_ops_flags { 394 TCF_PROTO_OPS_DOIT_UNLOCKED = 1, 395 }; 396 397 struct tcf_proto { 398 /* Fast access part */ 399 struct tcf_proto __rcu *next; 400 void __rcu *root; 401 402 /* called under RCU BH lock*/ 403 int (*classify)(struct sk_buff *, 404 const struct tcf_proto *, 405 struct tcf_result *); 406 __be16 protocol; 407 408 /* All the rest */ 409 u32 prio; 410 void *data; 411 const struct tcf_proto_ops *ops; 412 struct tcf_chain *chain; 413 /* Lock protects tcf_proto shared state and can be used by unlocked 414 * classifiers to protect their private data. 415 */ 416 spinlock_t lock; 417 bool deleting; 418 refcount_t refcnt; 419 struct rcu_head rcu; 420 struct hlist_node destroy_ht_node; 421 }; 422 423 struct qdisc_skb_cb { 424 struct { 425 unsigned int pkt_len; 426 u16 slave_dev_queue_mapping; 427 u16 tc_classid; 428 }; 429 #define QDISC_CB_PRIV_LEN 20 430 unsigned char data[QDISC_CB_PRIV_LEN]; 431 }; 432 433 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); 434 435 struct tcf_chain { 436 /* Protects filter_chain. */ 437 struct mutex filter_chain_lock; 438 struct tcf_proto __rcu *filter_chain; 439 struct list_head list; 440 struct tcf_block *block; 441 u32 index; /* chain index */ 442 unsigned int refcnt; 443 unsigned int action_refcnt; 444 bool explicitly_created; 445 bool flushing; 446 const struct tcf_proto_ops *tmplt_ops; 447 void *tmplt_priv; 448 struct rcu_head rcu; 449 }; 450 451 struct tcf_block { 452 /* Lock protects tcf_block and lifetime-management data of chains 453 * attached to the block (refcnt, action_refcnt, explicitly_created). 454 */ 455 struct mutex lock; 456 struct list_head chain_list; 457 u32 index; /* block index for shared blocks */ 458 u32 classid; /* which class this block belongs to */ 459 refcount_t refcnt; 460 struct net *net; 461 struct Qdisc *q; 462 struct rw_semaphore cb_lock; /* protects cb_list and offload counters */ 463 struct flow_block flow_block; 464 struct list_head owner_list; 465 bool keep_dst; 466 atomic_t offloadcnt; /* Number of oddloaded filters */ 467 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ 468 unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */ 469 struct { 470 struct tcf_chain *chain; 471 struct list_head filter_chain_list; 472 } chain0; 473 struct rcu_head rcu; 474 DECLARE_HASHTABLE(proto_destroy_ht, 7); 475 struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */ 476 }; 477 478 static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain) 479 { 480 return lockdep_is_held(&chain->filter_chain_lock); 481 } 482 483 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) 484 { 485 return lockdep_is_held(&tp->lock); 486 } 487 488 #define tcf_chain_dereference(p, chain) \ 489 rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain)) 490 491 #define tcf_proto_dereference(p, tp) \ 492 rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp)) 493 494 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 495 { 496 struct qdisc_skb_cb *qcb; 497 498 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb)); 499 BUILD_BUG_ON(sizeof(qcb->data) < sz); 500 } 501 502 static inline int qdisc_qlen(const struct Qdisc *q) 503 { 504 return q->q.qlen; 505 } 506 507 static inline int qdisc_qlen_sum(const struct Qdisc *q) 508 { 509 __u32 qlen = q->qstats.qlen; 510 int i; 511 512 if (qdisc_is_percpu_stats(q)) { 513 for_each_possible_cpu(i) 514 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; 515 } else { 516 qlen += q->q.qlen; 517 } 518 519 return qlen; 520 } 521 522 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 523 { 524 return (struct qdisc_skb_cb *)skb->cb; 525 } 526 527 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 528 { 529 return &qdisc->q.lock; 530 } 531 532 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 533 { 534 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 535 536 return q; 537 } 538 539 static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) 540 { 541 return rcu_dereference_bh(qdisc->dev_queue->qdisc); 542 } 543 544 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 545 { 546 return qdisc->dev_queue->qdisc_sleeping; 547 } 548 549 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 550 { 551 struct Qdisc *root = qdisc_root_sleeping(qdisc); 552 553 ASSERT_RTNL(); 554 return qdisc_lock(root); 555 } 556 557 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 558 { 559 return qdisc->dev_queue->dev; 560 } 561 562 static inline void sch_tree_lock(struct Qdisc *q) 563 { 564 if (q->flags & TCQ_F_MQROOT) 565 spin_lock_bh(qdisc_lock(q)); 566 else 567 spin_lock_bh(qdisc_root_sleeping_lock(q)); 568 } 569 570 static inline void sch_tree_unlock(struct Qdisc *q) 571 { 572 if (q->flags & TCQ_F_MQROOT) 573 spin_unlock_bh(qdisc_lock(q)); 574 else 575 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 576 } 577 578 extern struct Qdisc noop_qdisc; 579 extern struct Qdisc_ops noop_qdisc_ops; 580 extern struct Qdisc_ops pfifo_fast_ops; 581 extern struct Qdisc_ops mq_qdisc_ops; 582 extern struct Qdisc_ops noqueue_qdisc_ops; 583 extern const struct Qdisc_ops *default_qdisc_ops; 584 static inline const struct Qdisc_ops * 585 get_default_qdisc_ops(const struct net_device *dev, int ntx) 586 { 587 return ntx < dev->real_num_tx_queues ? 588 default_qdisc_ops : &pfifo_fast_ops; 589 } 590 591 struct Qdisc_class_common { 592 u32 classid; 593 struct hlist_node hnode; 594 }; 595 596 struct Qdisc_class_hash { 597 struct hlist_head *hash; 598 unsigned int hashsize; 599 unsigned int hashmask; 600 unsigned int hashelems; 601 }; 602 603 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 604 { 605 id ^= id >> 8; 606 id ^= id >> 4; 607 return id & mask; 608 } 609 610 static inline struct Qdisc_class_common * 611 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 612 { 613 struct Qdisc_class_common *cl; 614 unsigned int h; 615 616 if (!id) 617 return NULL; 618 619 h = qdisc_class_hash(id, hash->hashmask); 620 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 621 if (cl->classid == id) 622 return cl; 623 } 624 return NULL; 625 } 626 627 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) 628 { 629 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; 630 631 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; 632 } 633 634 int qdisc_class_hash_init(struct Qdisc_class_hash *); 635 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 636 struct Qdisc_class_common *); 637 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 638 struct Qdisc_class_common *); 639 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 640 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 641 642 int dev_qdisc_change_tx_queue_len(struct net_device *dev); 643 void dev_qdisc_change_real_num_tx(struct net_device *dev, 644 unsigned int new_real_tx); 645 void dev_init_scheduler(struct net_device *dev); 646 void dev_shutdown(struct net_device *dev); 647 void dev_activate(struct net_device *dev); 648 void dev_deactivate(struct net_device *dev); 649 void dev_deactivate_many(struct list_head *head); 650 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 651 struct Qdisc *qdisc); 652 void qdisc_reset(struct Qdisc *qdisc); 653 void qdisc_put(struct Qdisc *qdisc); 654 void qdisc_put_unlocked(struct Qdisc *qdisc); 655 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); 656 #ifdef CONFIG_NET_SCHED 657 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 658 void *type_data); 659 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 660 struct Qdisc *new, struct Qdisc *old, 661 enum tc_setup_type type, void *type_data, 662 struct netlink_ext_ack *extack); 663 #else 664 static inline int 665 qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 666 void *type_data) 667 { 668 q->flags &= ~TCQ_F_OFFLOADED; 669 return 0; 670 } 671 672 static inline void 673 qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 674 struct Qdisc *new, struct Qdisc *old, 675 enum tc_setup_type type, void *type_data, 676 struct netlink_ext_ack *extack) 677 { 678 } 679 #endif 680 void qdisc_offload_query_caps(struct net_device *dev, 681 enum tc_setup_type type, 682 void *caps, size_t caps_len); 683 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 684 const struct Qdisc_ops *ops, 685 struct netlink_ext_ack *extack); 686 void qdisc_free(struct Qdisc *qdisc); 687 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 688 const struct Qdisc_ops *ops, u32 parentid, 689 struct netlink_ext_ack *extack); 690 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 691 const struct qdisc_size_table *stab); 692 int skb_do_redirect(struct sk_buff *); 693 694 static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 695 { 696 #ifdef CONFIG_NET_CLS_ACT 697 return skb->tc_at_ingress; 698 #else 699 return false; 700 #endif 701 } 702 703 static inline bool skb_skip_tc_classify(struct sk_buff *skb) 704 { 705 #ifdef CONFIG_NET_CLS_ACT 706 if (skb->tc_skip_classify) { 707 skb->tc_skip_classify = 0; 708 return true; 709 } 710 #endif 711 return false; 712 } 713 714 /* Reset all TX qdiscs greater than index of a device. */ 715 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 716 { 717 struct Qdisc *qdisc; 718 719 for (; i < dev->num_tx_queues; i++) { 720 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 721 if (qdisc) { 722 spin_lock_bh(qdisc_lock(qdisc)); 723 qdisc_reset(qdisc); 724 spin_unlock_bh(qdisc_lock(qdisc)); 725 } 726 } 727 } 728 729 /* Are all TX queues of the device empty? */ 730 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 731 { 732 unsigned int i; 733 734 rcu_read_lock(); 735 for (i = 0; i < dev->num_tx_queues; i++) { 736 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 737 const struct Qdisc *q = rcu_dereference(txq->qdisc); 738 739 if (!qdisc_is_empty(q)) { 740 rcu_read_unlock(); 741 return false; 742 } 743 } 744 rcu_read_unlock(); 745 return true; 746 } 747 748 /* Are any of the TX qdiscs changing? */ 749 static inline bool qdisc_tx_changing(const struct net_device *dev) 750 { 751 unsigned int i; 752 753 for (i = 0; i < dev->num_tx_queues; i++) { 754 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 755 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 756 return true; 757 } 758 return false; 759 } 760 761 /* Is the device using the noop qdisc on all queues? */ 762 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 763 { 764 unsigned int i; 765 766 for (i = 0; i < dev->num_tx_queues; i++) { 767 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 768 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 769 return false; 770 } 771 return true; 772 } 773 774 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 775 { 776 return qdisc_skb_cb(skb)->pkt_len; 777 } 778 779 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 780 enum net_xmit_qdisc_t { 781 __NET_XMIT_STOLEN = 0x00010000, 782 __NET_XMIT_BYPASS = 0x00020000, 783 }; 784 785 #ifdef CONFIG_NET_CLS_ACT 786 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 787 #else 788 #define net_xmit_drop_count(e) (1) 789 #endif 790 791 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 792 const struct Qdisc *sch) 793 { 794 #ifdef CONFIG_NET_SCHED 795 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 796 797 if (stab) 798 __qdisc_calculate_pkt_len(skb, stab); 799 #endif 800 } 801 802 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 803 struct sk_buff **to_free) 804 { 805 qdisc_calculate_pkt_len(skb, sch); 806 return sch->enqueue(skb, sch, to_free); 807 } 808 809 static inline void _bstats_update(struct gnet_stats_basic_sync *bstats, 810 __u64 bytes, __u32 packets) 811 { 812 u64_stats_update_begin(&bstats->syncp); 813 u64_stats_add(&bstats->bytes, bytes); 814 u64_stats_add(&bstats->packets, packets); 815 u64_stats_update_end(&bstats->syncp); 816 } 817 818 static inline void bstats_update(struct gnet_stats_basic_sync *bstats, 819 const struct sk_buff *skb) 820 { 821 _bstats_update(bstats, 822 qdisc_pkt_len(skb), 823 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 824 } 825 826 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 827 const struct sk_buff *skb) 828 { 829 bstats_update(this_cpu_ptr(sch->cpu_bstats), skb); 830 } 831 832 static inline void qdisc_bstats_update(struct Qdisc *sch, 833 const struct sk_buff *skb) 834 { 835 bstats_update(&sch->bstats, skb); 836 } 837 838 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 839 const struct sk_buff *skb) 840 { 841 sch->qstats.backlog -= qdisc_pkt_len(skb); 842 } 843 844 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, 845 const struct sk_buff *skb) 846 { 847 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 848 } 849 850 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 851 const struct sk_buff *skb) 852 { 853 sch->qstats.backlog += qdisc_pkt_len(skb); 854 } 855 856 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, 857 const struct sk_buff *skb) 858 { 859 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 860 } 861 862 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) 863 { 864 this_cpu_inc(sch->cpu_qstats->qlen); 865 } 866 867 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) 868 { 869 this_cpu_dec(sch->cpu_qstats->qlen); 870 } 871 872 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) 873 { 874 this_cpu_inc(sch->cpu_qstats->requeues); 875 } 876 877 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 878 { 879 sch->qstats.drops += count; 880 } 881 882 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 883 { 884 qstats->drops++; 885 } 886 887 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 888 { 889 qstats->overlimits++; 890 } 891 892 static inline void qdisc_qstats_drop(struct Qdisc *sch) 893 { 894 qstats_drop_inc(&sch->qstats); 895 } 896 897 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 898 { 899 this_cpu_inc(sch->cpu_qstats->drops); 900 } 901 902 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 903 { 904 sch->qstats.overlimits++; 905 } 906 907 static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch) 908 { 909 __u32 qlen = qdisc_qlen_sum(sch); 910 911 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); 912 } 913 914 static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, 915 __u32 *backlog) 916 { 917 struct gnet_stats_queue qstats = { 0 }; 918 919 gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats); 920 *qlen = qstats.qlen + qdisc_qlen(sch); 921 *backlog = qstats.backlog; 922 } 923 924 static inline void qdisc_tree_flush_backlog(struct Qdisc *sch) 925 { 926 __u32 qlen, backlog; 927 928 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 929 qdisc_tree_reduce_backlog(sch, qlen, backlog); 930 } 931 932 static inline void qdisc_purge_queue(struct Qdisc *sch) 933 { 934 __u32 qlen, backlog; 935 936 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 937 qdisc_reset(sch); 938 qdisc_tree_reduce_backlog(sch, qlen, backlog); 939 } 940 941 static inline void __qdisc_enqueue_tail(struct sk_buff *skb, 942 struct qdisc_skb_head *qh) 943 { 944 struct sk_buff *last = qh->tail; 945 946 if (last) { 947 skb->next = NULL; 948 last->next = skb; 949 qh->tail = skb; 950 } else { 951 qh->tail = skb; 952 qh->head = skb; 953 } 954 qh->qlen++; 955 } 956 957 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 958 { 959 __qdisc_enqueue_tail(skb, &sch->q); 960 qdisc_qstats_backlog_inc(sch, skb); 961 return NET_XMIT_SUCCESS; 962 } 963 964 static inline void __qdisc_enqueue_head(struct sk_buff *skb, 965 struct qdisc_skb_head *qh) 966 { 967 skb->next = qh->head; 968 969 if (!qh->head) 970 qh->tail = skb; 971 qh->head = skb; 972 qh->qlen++; 973 } 974 975 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 976 { 977 struct sk_buff *skb = qh->head; 978 979 if (likely(skb != NULL)) { 980 qh->head = skb->next; 981 qh->qlen--; 982 if (qh->head == NULL) 983 qh->tail = NULL; 984 skb->next = NULL; 985 } 986 987 return skb; 988 } 989 990 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 991 { 992 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 993 994 if (likely(skb != NULL)) { 995 qdisc_qstats_backlog_dec(sch, skb); 996 qdisc_bstats_update(sch, skb); 997 } 998 999 return skb; 1000 } 1001 1002 /* Instead of calling kfree_skb() while root qdisc lock is held, 1003 * queue the skb for future freeing at end of __dev_xmit_skb() 1004 */ 1005 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 1006 { 1007 skb->next = *to_free; 1008 *to_free = skb; 1009 } 1010 1011 static inline void __qdisc_drop_all(struct sk_buff *skb, 1012 struct sk_buff **to_free) 1013 { 1014 if (skb->prev) 1015 skb->prev->next = *to_free; 1016 else 1017 skb->next = *to_free; 1018 *to_free = skb; 1019 } 1020 1021 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 1022 struct qdisc_skb_head *qh, 1023 struct sk_buff **to_free) 1024 { 1025 struct sk_buff *skb = __qdisc_dequeue_head(qh); 1026 1027 if (likely(skb != NULL)) { 1028 unsigned int len = qdisc_pkt_len(skb); 1029 1030 qdisc_qstats_backlog_dec(sch, skb); 1031 __qdisc_drop(skb, to_free); 1032 return len; 1033 } 1034 1035 return 0; 1036 } 1037 1038 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 1039 { 1040 const struct qdisc_skb_head *qh = &sch->q; 1041 1042 return qh->head; 1043 } 1044 1045 /* generic pseudo peek method for non-work-conserving qdisc */ 1046 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 1047 { 1048 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1049 1050 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 1051 if (!skb) { 1052 skb = sch->dequeue(sch); 1053 1054 if (skb) { 1055 __skb_queue_head(&sch->gso_skb, skb); 1056 /* it's still part of the queue */ 1057 qdisc_qstats_backlog_inc(sch, skb); 1058 sch->q.qlen++; 1059 } 1060 } 1061 1062 return skb; 1063 } 1064 1065 static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch, 1066 struct sk_buff *skb) 1067 { 1068 if (qdisc_is_percpu_stats(sch)) { 1069 qdisc_qstats_cpu_backlog_dec(sch, skb); 1070 qdisc_bstats_cpu_update(sch, skb); 1071 qdisc_qstats_cpu_qlen_dec(sch); 1072 } else { 1073 qdisc_qstats_backlog_dec(sch, skb); 1074 qdisc_bstats_update(sch, skb); 1075 sch->q.qlen--; 1076 } 1077 } 1078 1079 static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch, 1080 unsigned int pkt_len) 1081 { 1082 if (qdisc_is_percpu_stats(sch)) { 1083 qdisc_qstats_cpu_qlen_inc(sch); 1084 this_cpu_add(sch->cpu_qstats->backlog, pkt_len); 1085 } else { 1086 sch->qstats.backlog += pkt_len; 1087 sch->q.qlen++; 1088 } 1089 } 1090 1091 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 1092 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 1093 { 1094 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1095 1096 if (skb) { 1097 skb = __skb_dequeue(&sch->gso_skb); 1098 if (qdisc_is_percpu_stats(sch)) { 1099 qdisc_qstats_cpu_backlog_dec(sch, skb); 1100 qdisc_qstats_cpu_qlen_dec(sch); 1101 } else { 1102 qdisc_qstats_backlog_dec(sch, skb); 1103 sch->q.qlen--; 1104 } 1105 } else { 1106 skb = sch->dequeue(sch); 1107 } 1108 1109 return skb; 1110 } 1111 1112 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 1113 { 1114 /* 1115 * We do not know the backlog in bytes of this list, it 1116 * is up to the caller to correct it 1117 */ 1118 ASSERT_RTNL(); 1119 if (qh->qlen) { 1120 rtnl_kfree_skbs(qh->head, qh->tail); 1121 1122 qh->head = NULL; 1123 qh->tail = NULL; 1124 qh->qlen = 0; 1125 } 1126 } 1127 1128 static inline void qdisc_reset_queue(struct Qdisc *sch) 1129 { 1130 __qdisc_reset_queue(&sch->q); 1131 } 1132 1133 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 1134 struct Qdisc **pold) 1135 { 1136 struct Qdisc *old; 1137 1138 sch_tree_lock(sch); 1139 old = *pold; 1140 *pold = new; 1141 if (old != NULL) 1142 qdisc_purge_queue(old); 1143 sch_tree_unlock(sch); 1144 1145 return old; 1146 } 1147 1148 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 1149 { 1150 rtnl_kfree_skbs(skb, skb); 1151 qdisc_qstats_drop(sch); 1152 } 1153 1154 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, 1155 struct sk_buff **to_free) 1156 { 1157 __qdisc_drop(skb, to_free); 1158 qdisc_qstats_cpu_drop(sch); 1159 1160 return NET_XMIT_DROP; 1161 } 1162 1163 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 1164 struct sk_buff **to_free) 1165 { 1166 __qdisc_drop(skb, to_free); 1167 qdisc_qstats_drop(sch); 1168 1169 return NET_XMIT_DROP; 1170 } 1171 1172 static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch, 1173 struct sk_buff **to_free) 1174 { 1175 __qdisc_drop_all(skb, to_free); 1176 qdisc_qstats_drop(sch); 1177 1178 return NET_XMIT_DROP; 1179 } 1180 1181 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 1182 long it will take to send a packet given its size. 1183 */ 1184 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 1185 { 1186 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 1187 if (slot < 0) 1188 slot = 0; 1189 slot >>= rtab->rate.cell_log; 1190 if (slot > 255) 1191 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 1192 return rtab->data[slot]; 1193 } 1194 1195 struct psched_ratecfg { 1196 u64 rate_bytes_ps; /* bytes per second */ 1197 u32 mult; 1198 u16 overhead; 1199 u16 mpu; 1200 u8 linklayer; 1201 u8 shift; 1202 }; 1203 1204 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 1205 unsigned int len) 1206 { 1207 len += r->overhead; 1208 1209 if (len < r->mpu) 1210 len = r->mpu; 1211 1212 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 1213 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 1214 1215 return ((u64)len * r->mult) >> r->shift; 1216 } 1217 1218 void psched_ratecfg_precompute(struct psched_ratecfg *r, 1219 const struct tc_ratespec *conf, 1220 u64 rate64); 1221 1222 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 1223 const struct psched_ratecfg *r) 1224 { 1225 memset(res, 0, sizeof(*res)); 1226 1227 /* legacy struct tc_ratespec has a 32bit @rate field 1228 * Qdisc using 64bit rate should add new attributes 1229 * in order to maintain compatibility. 1230 */ 1231 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 1232 1233 res->overhead = r->overhead; 1234 res->mpu = r->mpu; 1235 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 1236 } 1237 1238 struct psched_pktrate { 1239 u64 rate_pkts_ps; /* packets per second */ 1240 u32 mult; 1241 u8 shift; 1242 }; 1243 1244 static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r, 1245 unsigned int pkt_num) 1246 { 1247 return ((u64)pkt_num * r->mult) >> r->shift; 1248 } 1249 1250 void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64); 1251 1252 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. 1253 * The fast path only needs to access filter list and to update stats 1254 */ 1255 struct mini_Qdisc { 1256 struct tcf_proto *filter_list; 1257 struct tcf_block *block; 1258 struct gnet_stats_basic_sync __percpu *cpu_bstats; 1259 struct gnet_stats_queue __percpu *cpu_qstats; 1260 unsigned long rcu_state; 1261 }; 1262 1263 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, 1264 const struct sk_buff *skb) 1265 { 1266 bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb); 1267 } 1268 1269 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) 1270 { 1271 this_cpu_inc(miniq->cpu_qstats->drops); 1272 } 1273 1274 struct mini_Qdisc_pair { 1275 struct mini_Qdisc miniq1; 1276 struct mini_Qdisc miniq2; 1277 struct mini_Qdisc __rcu **p_miniq; 1278 }; 1279 1280 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 1281 struct tcf_proto *tp_head); 1282 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1283 struct mini_Qdisc __rcu **p_miniq); 1284 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, 1285 struct tcf_block *block); 1286 1287 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx); 1288 1289 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)); 1290 1291 #endif 1292