1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_SCHED_GENERIC_H 3 #define __NET_SCHED_GENERIC_H 4 5 #include <linux/netdevice.h> 6 #include <linux/types.h> 7 #include <linux/rcupdate.h> 8 #include <linux/pkt_sched.h> 9 #include <linux/pkt_cls.h> 10 #include <linux/percpu.h> 11 #include <linux/dynamic_queue_limits.h> 12 #include <linux/list.h> 13 #include <linux/refcount.h> 14 #include <linux/workqueue.h> 15 #include <linux/mutex.h> 16 #include <linux/rwsem.h> 17 #include <linux/atomic.h> 18 #include <linux/hashtable.h> 19 #include <net/gen_stats.h> 20 #include <net/rtnetlink.h> 21 #include <net/flow_offload.h> 22 23 struct Qdisc_ops; 24 struct qdisc_walker; 25 struct tcf_walker; 26 struct module; 27 struct bpf_flow_keys; 28 29 struct qdisc_rate_table { 30 struct tc_ratespec rate; 31 u32 data[256]; 32 struct qdisc_rate_table *next; 33 int refcnt; 34 }; 35 36 enum qdisc_state_t { 37 __QDISC_STATE_SCHED, 38 __QDISC_STATE_DEACTIVATED, 39 }; 40 41 struct qdisc_size_table { 42 struct rcu_head rcu; 43 struct list_head list; 44 struct tc_sizespec szopts; 45 int refcnt; 46 u16 data[]; 47 }; 48 49 /* similar to sk_buff_head, but skb->prev pointer is undefined. */ 50 struct qdisc_skb_head { 51 struct sk_buff *head; 52 struct sk_buff *tail; 53 __u32 qlen; 54 spinlock_t lock; 55 }; 56 57 struct Qdisc { 58 int (*enqueue)(struct sk_buff *skb, 59 struct Qdisc *sch, 60 struct sk_buff **to_free); 61 struct sk_buff * (*dequeue)(struct Qdisc *sch); 62 unsigned int flags; 63 #define TCQ_F_BUILTIN 1 64 #define TCQ_F_INGRESS 2 65 #define TCQ_F_CAN_BYPASS 4 66 #define TCQ_F_MQROOT 8 67 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 68 * q->dev_queue : It can test 69 * netif_xmit_frozen_or_stopped() before 70 * dequeueing next packet. 71 * Its true for MQ/MQPRIO slaves, or non 72 * multiqueue device. 73 */ 74 #define TCQ_F_WARN_NONWC (1 << 16) 75 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 76 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 77 * qdisc_tree_decrease_qlen() should stop. 78 */ 79 #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 80 #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ 81 #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ 82 u32 limit; 83 const struct Qdisc_ops *ops; 84 struct qdisc_size_table __rcu *stab; 85 struct hlist_node hash; 86 u32 handle; 87 u32 parent; 88 89 struct netdev_queue *dev_queue; 90 91 struct net_rate_estimator __rcu *rate_est; 92 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 93 struct gnet_stats_queue __percpu *cpu_qstats; 94 int pad; 95 refcount_t refcnt; 96 97 /* 98 * For performance sake on SMP, we put highly modified fields at the end 99 */ 100 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; 101 struct qdisc_skb_head q; 102 struct gnet_stats_basic_packed bstats; 103 seqcount_t running; 104 struct gnet_stats_queue qstats; 105 unsigned long state; 106 struct Qdisc *next_sched; 107 struct sk_buff_head skb_bad_txq; 108 109 spinlock_t busylock ____cacheline_aligned_in_smp; 110 spinlock_t seqlock; 111 112 /* for NOLOCK qdisc, true if there are no enqueued skbs */ 113 bool empty; 114 struct rcu_head rcu; 115 116 /* private data */ 117 long privdata[] ____cacheline_aligned; 118 }; 119 120 static inline void qdisc_refcount_inc(struct Qdisc *qdisc) 121 { 122 if (qdisc->flags & TCQ_F_BUILTIN) 123 return; 124 refcount_inc(&qdisc->refcnt); 125 } 126 127 /* Intended to be used by unlocked users, when concurrent qdisc release is 128 * possible. 129 */ 130 131 static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc) 132 { 133 if (qdisc->flags & TCQ_F_BUILTIN) 134 return qdisc; 135 if (refcount_inc_not_zero(&qdisc->refcnt)) 136 return qdisc; 137 return NULL; 138 } 139 140 static inline bool qdisc_is_running(struct Qdisc *qdisc) 141 { 142 if (qdisc->flags & TCQ_F_NOLOCK) 143 return spin_is_locked(&qdisc->seqlock); 144 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; 145 } 146 147 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 148 { 149 return q->flags & TCQ_F_CPUSTATS; 150 } 151 152 static inline bool qdisc_is_empty(const struct Qdisc *qdisc) 153 { 154 if (qdisc_is_percpu_stats(qdisc)) 155 return READ_ONCE(qdisc->empty); 156 return !READ_ONCE(qdisc->q.qlen); 157 } 158 159 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 160 { 161 if (qdisc->flags & TCQ_F_NOLOCK) { 162 if (!spin_trylock(&qdisc->seqlock)) 163 return false; 164 WRITE_ONCE(qdisc->empty, false); 165 } else if (qdisc_is_running(qdisc)) { 166 return false; 167 } 168 /* Variant of write_seqcount_begin() telling lockdep a trylock 169 * was attempted. 170 */ 171 raw_write_seqcount_begin(&qdisc->running); 172 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); 173 return true; 174 } 175 176 static inline void qdisc_run_end(struct Qdisc *qdisc) 177 { 178 write_seqcount_end(&qdisc->running); 179 if (qdisc->flags & TCQ_F_NOLOCK) 180 spin_unlock(&qdisc->seqlock); 181 } 182 183 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 184 { 185 return qdisc->flags & TCQ_F_ONETXQUEUE; 186 } 187 188 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 189 { 190 #ifdef CONFIG_BQL 191 /* Non-BQL migrated drivers will return 0, too. */ 192 return dql_avail(&txq->dql); 193 #else 194 return 0; 195 #endif 196 } 197 198 struct Qdisc_class_ops { 199 unsigned int flags; 200 /* Child qdisc manipulation */ 201 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 202 int (*graft)(struct Qdisc *, unsigned long cl, 203 struct Qdisc *, struct Qdisc **, 204 struct netlink_ext_ack *extack); 205 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 206 void (*qlen_notify)(struct Qdisc *, unsigned long); 207 208 /* Class manipulation routines */ 209 unsigned long (*find)(struct Qdisc *, u32 classid); 210 int (*change)(struct Qdisc *, u32, u32, 211 struct nlattr **, unsigned long *, 212 struct netlink_ext_ack *); 213 int (*delete)(struct Qdisc *, unsigned long); 214 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 215 216 /* Filter manipulation */ 217 struct tcf_block * (*tcf_block)(struct Qdisc *sch, 218 unsigned long arg, 219 struct netlink_ext_ack *extack); 220 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 221 u32 classid); 222 void (*unbind_tcf)(struct Qdisc *, unsigned long); 223 224 /* rtnetlink specific */ 225 int (*dump)(struct Qdisc *, unsigned long, 226 struct sk_buff *skb, struct tcmsg*); 227 int (*dump_stats)(struct Qdisc *, unsigned long, 228 struct gnet_dump *); 229 }; 230 231 /* Qdisc_class_ops flag values */ 232 233 /* Implements API that doesn't require rtnl lock */ 234 enum qdisc_class_ops_flags { 235 QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, 236 }; 237 238 struct Qdisc_ops { 239 struct Qdisc_ops *next; 240 const struct Qdisc_class_ops *cl_ops; 241 char id[IFNAMSIZ]; 242 int priv_size; 243 unsigned int static_flags; 244 245 int (*enqueue)(struct sk_buff *skb, 246 struct Qdisc *sch, 247 struct sk_buff **to_free); 248 struct sk_buff * (*dequeue)(struct Qdisc *); 249 struct sk_buff * (*peek)(struct Qdisc *); 250 251 int (*init)(struct Qdisc *sch, struct nlattr *arg, 252 struct netlink_ext_ack *extack); 253 void (*reset)(struct Qdisc *); 254 void (*destroy)(struct Qdisc *); 255 int (*change)(struct Qdisc *sch, 256 struct nlattr *arg, 257 struct netlink_ext_ack *extack); 258 void (*attach)(struct Qdisc *sch); 259 int (*change_tx_queue_len)(struct Qdisc *, unsigned int); 260 261 int (*dump)(struct Qdisc *, struct sk_buff *); 262 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 263 264 void (*ingress_block_set)(struct Qdisc *sch, 265 u32 block_index); 266 void (*egress_block_set)(struct Qdisc *sch, 267 u32 block_index); 268 u32 (*ingress_block_get)(struct Qdisc *sch); 269 u32 (*egress_block_get)(struct Qdisc *sch); 270 271 struct module *owner; 272 }; 273 274 275 struct tcf_result { 276 union { 277 struct { 278 unsigned long class; 279 u32 classid; 280 }; 281 const struct tcf_proto *goto_tp; 282 283 /* used in the skb_tc_reinsert function */ 284 struct { 285 bool ingress; 286 struct gnet_stats_queue *qstats; 287 }; 288 }; 289 }; 290 291 struct tcf_chain; 292 293 struct tcf_proto_ops { 294 struct list_head head; 295 char kind[IFNAMSIZ]; 296 297 int (*classify)(struct sk_buff *, 298 const struct tcf_proto *, 299 struct tcf_result *); 300 int (*init)(struct tcf_proto*); 301 void (*destroy)(struct tcf_proto *tp, bool rtnl_held, 302 struct netlink_ext_ack *extack); 303 304 void* (*get)(struct tcf_proto*, u32 handle); 305 void (*put)(struct tcf_proto *tp, void *f); 306 int (*change)(struct net *net, struct sk_buff *, 307 struct tcf_proto*, unsigned long, 308 u32 handle, struct nlattr **, 309 void **, bool, bool, 310 struct netlink_ext_ack *); 311 int (*delete)(struct tcf_proto *tp, void *arg, 312 bool *last, bool rtnl_held, 313 struct netlink_ext_ack *); 314 bool (*delete_empty)(struct tcf_proto *tp); 315 void (*walk)(struct tcf_proto *tp, 316 struct tcf_walker *arg, bool rtnl_held); 317 int (*reoffload)(struct tcf_proto *tp, bool add, 318 flow_setup_cb_t *cb, void *cb_priv, 319 struct netlink_ext_ack *extack); 320 void (*hw_add)(struct tcf_proto *tp, 321 void *type_data); 322 void (*hw_del)(struct tcf_proto *tp, 323 void *type_data); 324 void (*bind_class)(void *, u32, unsigned long, 325 void *, unsigned long); 326 void * (*tmplt_create)(struct net *net, 327 struct tcf_chain *chain, 328 struct nlattr **tca, 329 struct netlink_ext_ack *extack); 330 void (*tmplt_destroy)(void *tmplt_priv); 331 332 /* rtnetlink specific */ 333 int (*dump)(struct net*, struct tcf_proto*, void *, 334 struct sk_buff *skb, struct tcmsg*, 335 bool); 336 int (*terse_dump)(struct net *net, 337 struct tcf_proto *tp, void *fh, 338 struct sk_buff *skb, 339 struct tcmsg *t, bool rtnl_held); 340 int (*tmplt_dump)(struct sk_buff *skb, 341 struct net *net, 342 void *tmplt_priv); 343 344 struct module *owner; 345 int flags; 346 }; 347 348 /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags 349 * are expected to implement tcf_proto_ops->delete_empty(), otherwise race 350 * conditions can occur when filters are inserted/deleted simultaneously. 351 */ 352 enum tcf_proto_ops_flags { 353 TCF_PROTO_OPS_DOIT_UNLOCKED = 1, 354 }; 355 356 struct tcf_proto { 357 /* Fast access part */ 358 struct tcf_proto __rcu *next; 359 void __rcu *root; 360 361 /* called under RCU BH lock*/ 362 int (*classify)(struct sk_buff *, 363 const struct tcf_proto *, 364 struct tcf_result *); 365 __be16 protocol; 366 367 /* All the rest */ 368 u32 prio; 369 void *data; 370 const struct tcf_proto_ops *ops; 371 struct tcf_chain *chain; 372 /* Lock protects tcf_proto shared state and can be used by unlocked 373 * classifiers to protect their private data. 374 */ 375 spinlock_t lock; 376 bool deleting; 377 refcount_t refcnt; 378 struct rcu_head rcu; 379 struct hlist_node destroy_ht_node; 380 }; 381 382 struct qdisc_skb_cb { 383 struct { 384 unsigned int pkt_len; 385 u16 slave_dev_queue_mapping; 386 u16 tc_classid; 387 }; 388 #define QDISC_CB_PRIV_LEN 20 389 unsigned char data[QDISC_CB_PRIV_LEN]; 390 u16 mru; 391 bool post_ct; 392 }; 393 394 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); 395 396 struct tcf_chain { 397 /* Protects filter_chain. */ 398 struct mutex filter_chain_lock; 399 struct tcf_proto __rcu *filter_chain; 400 struct list_head list; 401 struct tcf_block *block; 402 u32 index; /* chain index */ 403 unsigned int refcnt; 404 unsigned int action_refcnt; 405 bool explicitly_created; 406 bool flushing; 407 const struct tcf_proto_ops *tmplt_ops; 408 void *tmplt_priv; 409 struct rcu_head rcu; 410 }; 411 412 struct tcf_block { 413 /* Lock protects tcf_block and lifetime-management data of chains 414 * attached to the block (refcnt, action_refcnt, explicitly_created). 415 */ 416 struct mutex lock; 417 struct list_head chain_list; 418 u32 index; /* block index for shared blocks */ 419 u32 classid; /* which class this block belongs to */ 420 refcount_t refcnt; 421 struct net *net; 422 struct Qdisc *q; 423 struct rw_semaphore cb_lock; /* protects cb_list and offload counters */ 424 struct flow_block flow_block; 425 struct list_head owner_list; 426 bool keep_dst; 427 atomic_t offloadcnt; /* Number of oddloaded filters */ 428 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ 429 unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */ 430 struct { 431 struct tcf_chain *chain; 432 struct list_head filter_chain_list; 433 } chain0; 434 struct rcu_head rcu; 435 DECLARE_HASHTABLE(proto_destroy_ht, 7); 436 struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */ 437 }; 438 439 static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain) 440 { 441 return lockdep_is_held(&chain->filter_chain_lock); 442 } 443 444 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) 445 { 446 return lockdep_is_held(&tp->lock); 447 } 448 449 #define tcf_chain_dereference(p, chain) \ 450 rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain)) 451 452 #define tcf_proto_dereference(p, tp) \ 453 rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp)) 454 455 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 456 { 457 struct qdisc_skb_cb *qcb; 458 459 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb)); 460 BUILD_BUG_ON(sizeof(qcb->data) < sz); 461 } 462 463 static inline int qdisc_qlen_cpu(const struct Qdisc *q) 464 { 465 return this_cpu_ptr(q->cpu_qstats)->qlen; 466 } 467 468 static inline int qdisc_qlen(const struct Qdisc *q) 469 { 470 return q->q.qlen; 471 } 472 473 static inline int qdisc_qlen_sum(const struct Qdisc *q) 474 { 475 __u32 qlen = q->qstats.qlen; 476 int i; 477 478 if (qdisc_is_percpu_stats(q)) { 479 for_each_possible_cpu(i) 480 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; 481 } else { 482 qlen += q->q.qlen; 483 } 484 485 return qlen; 486 } 487 488 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 489 { 490 return (struct qdisc_skb_cb *)skb->cb; 491 } 492 493 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 494 { 495 return &qdisc->q.lock; 496 } 497 498 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 499 { 500 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 501 502 return q; 503 } 504 505 static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) 506 { 507 return rcu_dereference_bh(qdisc->dev_queue->qdisc); 508 } 509 510 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 511 { 512 return qdisc->dev_queue->qdisc_sleeping; 513 } 514 515 /* The qdisc root lock is a mechanism by which to top level 516 * of a qdisc tree can be locked from any qdisc node in the 517 * forest. This allows changing the configuration of some 518 * aspect of the qdisc tree while blocking out asynchronous 519 * qdisc access in the packet processing paths. 520 * 521 * It is only legal to do this when the root will not change 522 * on us. Otherwise we'll potentially lock the wrong qdisc 523 * root. This is enforced by holding the RTNL semaphore, which 524 * all users of this lock accessor must do. 525 */ 526 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) 527 { 528 struct Qdisc *root = qdisc_root(qdisc); 529 530 ASSERT_RTNL(); 531 return qdisc_lock(root); 532 } 533 534 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 535 { 536 struct Qdisc *root = qdisc_root_sleeping(qdisc); 537 538 ASSERT_RTNL(); 539 return qdisc_lock(root); 540 } 541 542 static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) 543 { 544 struct Qdisc *root = qdisc_root_sleeping(qdisc); 545 546 ASSERT_RTNL(); 547 return &root->running; 548 } 549 550 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 551 { 552 return qdisc->dev_queue->dev; 553 } 554 555 static inline void sch_tree_lock(const struct Qdisc *q) 556 { 557 spin_lock_bh(qdisc_root_sleeping_lock(q)); 558 } 559 560 static inline void sch_tree_unlock(const struct Qdisc *q) 561 { 562 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 563 } 564 565 extern struct Qdisc noop_qdisc; 566 extern struct Qdisc_ops noop_qdisc_ops; 567 extern struct Qdisc_ops pfifo_fast_ops; 568 extern struct Qdisc_ops mq_qdisc_ops; 569 extern struct Qdisc_ops noqueue_qdisc_ops; 570 extern const struct Qdisc_ops *default_qdisc_ops; 571 static inline const struct Qdisc_ops * 572 get_default_qdisc_ops(const struct net_device *dev, int ntx) 573 { 574 return ntx < dev->real_num_tx_queues ? 575 default_qdisc_ops : &pfifo_fast_ops; 576 } 577 578 struct Qdisc_class_common { 579 u32 classid; 580 struct hlist_node hnode; 581 }; 582 583 struct Qdisc_class_hash { 584 struct hlist_head *hash; 585 unsigned int hashsize; 586 unsigned int hashmask; 587 unsigned int hashelems; 588 }; 589 590 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 591 { 592 id ^= id >> 8; 593 id ^= id >> 4; 594 return id & mask; 595 } 596 597 static inline struct Qdisc_class_common * 598 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 599 { 600 struct Qdisc_class_common *cl; 601 unsigned int h; 602 603 if (!id) 604 return NULL; 605 606 h = qdisc_class_hash(id, hash->hashmask); 607 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 608 if (cl->classid == id) 609 return cl; 610 } 611 return NULL; 612 } 613 614 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) 615 { 616 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; 617 618 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; 619 } 620 621 int qdisc_class_hash_init(struct Qdisc_class_hash *); 622 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 623 struct Qdisc_class_common *); 624 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 625 struct Qdisc_class_common *); 626 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 627 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 628 629 int dev_qdisc_change_tx_queue_len(struct net_device *dev); 630 void dev_init_scheduler(struct net_device *dev); 631 void dev_shutdown(struct net_device *dev); 632 void dev_activate(struct net_device *dev); 633 void dev_deactivate(struct net_device *dev); 634 void dev_deactivate_many(struct list_head *head); 635 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 636 struct Qdisc *qdisc); 637 void qdisc_reset(struct Qdisc *qdisc); 638 void qdisc_put(struct Qdisc *qdisc); 639 void qdisc_put_unlocked(struct Qdisc *qdisc); 640 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); 641 #ifdef CONFIG_NET_SCHED 642 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 643 void *type_data); 644 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 645 struct Qdisc *new, struct Qdisc *old, 646 enum tc_setup_type type, void *type_data, 647 struct netlink_ext_ack *extack); 648 #else 649 static inline int 650 qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 651 void *type_data) 652 { 653 q->flags &= ~TCQ_F_OFFLOADED; 654 return 0; 655 } 656 657 static inline void 658 qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 659 struct Qdisc *new, struct Qdisc *old, 660 enum tc_setup_type type, void *type_data, 661 struct netlink_ext_ack *extack) 662 { 663 } 664 #endif 665 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 666 const struct Qdisc_ops *ops, 667 struct netlink_ext_ack *extack); 668 void qdisc_free(struct Qdisc *qdisc); 669 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 670 const struct Qdisc_ops *ops, u32 parentid, 671 struct netlink_ext_ack *extack); 672 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 673 const struct qdisc_size_table *stab); 674 int skb_do_redirect(struct sk_buff *); 675 676 static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 677 { 678 #ifdef CONFIG_NET_CLS_ACT 679 return skb->tc_at_ingress; 680 #else 681 return false; 682 #endif 683 } 684 685 static inline bool skb_skip_tc_classify(struct sk_buff *skb) 686 { 687 #ifdef CONFIG_NET_CLS_ACT 688 if (skb->tc_skip_classify) { 689 skb->tc_skip_classify = 0; 690 return true; 691 } 692 #endif 693 return false; 694 } 695 696 /* Reset all TX qdiscs greater than index of a device. */ 697 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 698 { 699 struct Qdisc *qdisc; 700 701 for (; i < dev->num_tx_queues; i++) { 702 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 703 if (qdisc) { 704 spin_lock_bh(qdisc_lock(qdisc)); 705 qdisc_reset(qdisc); 706 spin_unlock_bh(qdisc_lock(qdisc)); 707 } 708 } 709 } 710 711 /* Are all TX queues of the device empty? */ 712 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 713 { 714 unsigned int i; 715 716 rcu_read_lock(); 717 for (i = 0; i < dev->num_tx_queues; i++) { 718 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 719 const struct Qdisc *q = rcu_dereference(txq->qdisc); 720 721 if (!qdisc_is_empty(q)) { 722 rcu_read_unlock(); 723 return false; 724 } 725 } 726 rcu_read_unlock(); 727 return true; 728 } 729 730 /* Are any of the TX qdiscs changing? */ 731 static inline bool qdisc_tx_changing(const struct net_device *dev) 732 { 733 unsigned int i; 734 735 for (i = 0; i < dev->num_tx_queues; i++) { 736 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 737 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 738 return true; 739 } 740 return false; 741 } 742 743 /* Is the device using the noop qdisc on all queues? */ 744 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 745 { 746 unsigned int i; 747 748 for (i = 0; i < dev->num_tx_queues; i++) { 749 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 750 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 751 return false; 752 } 753 return true; 754 } 755 756 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 757 { 758 return qdisc_skb_cb(skb)->pkt_len; 759 } 760 761 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 762 enum net_xmit_qdisc_t { 763 __NET_XMIT_STOLEN = 0x00010000, 764 __NET_XMIT_BYPASS = 0x00020000, 765 }; 766 767 #ifdef CONFIG_NET_CLS_ACT 768 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 769 #else 770 #define net_xmit_drop_count(e) (1) 771 #endif 772 773 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 774 const struct Qdisc *sch) 775 { 776 #ifdef CONFIG_NET_SCHED 777 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 778 779 if (stab) 780 __qdisc_calculate_pkt_len(skb, stab); 781 #endif 782 } 783 784 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 785 struct sk_buff **to_free) 786 { 787 qdisc_calculate_pkt_len(skb, sch); 788 return sch->enqueue(skb, sch, to_free); 789 } 790 791 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, 792 __u64 bytes, __u32 packets) 793 { 794 bstats->bytes += bytes; 795 bstats->packets += packets; 796 } 797 798 static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 799 const struct sk_buff *skb) 800 { 801 _bstats_update(bstats, 802 qdisc_pkt_len(skb), 803 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 804 } 805 806 static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 807 __u64 bytes, __u32 packets) 808 { 809 u64_stats_update_begin(&bstats->syncp); 810 _bstats_update(&bstats->bstats, bytes, packets); 811 u64_stats_update_end(&bstats->syncp); 812 } 813 814 static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 815 const struct sk_buff *skb) 816 { 817 u64_stats_update_begin(&bstats->syncp); 818 bstats_update(&bstats->bstats, skb); 819 u64_stats_update_end(&bstats->syncp); 820 } 821 822 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 823 const struct sk_buff *skb) 824 { 825 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); 826 } 827 828 static inline void qdisc_bstats_update(struct Qdisc *sch, 829 const struct sk_buff *skb) 830 { 831 bstats_update(&sch->bstats, skb); 832 } 833 834 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 835 const struct sk_buff *skb) 836 { 837 sch->qstats.backlog -= qdisc_pkt_len(skb); 838 } 839 840 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, 841 const struct sk_buff *skb) 842 { 843 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 844 } 845 846 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 847 const struct sk_buff *skb) 848 { 849 sch->qstats.backlog += qdisc_pkt_len(skb); 850 } 851 852 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, 853 const struct sk_buff *skb) 854 { 855 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 856 } 857 858 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) 859 { 860 this_cpu_inc(sch->cpu_qstats->qlen); 861 } 862 863 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) 864 { 865 this_cpu_dec(sch->cpu_qstats->qlen); 866 } 867 868 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) 869 { 870 this_cpu_inc(sch->cpu_qstats->requeues); 871 } 872 873 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 874 { 875 sch->qstats.drops += count; 876 } 877 878 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 879 { 880 qstats->drops++; 881 } 882 883 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 884 { 885 qstats->overlimits++; 886 } 887 888 static inline void qdisc_qstats_drop(struct Qdisc *sch) 889 { 890 qstats_drop_inc(&sch->qstats); 891 } 892 893 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 894 { 895 this_cpu_inc(sch->cpu_qstats->drops); 896 } 897 898 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 899 { 900 sch->qstats.overlimits++; 901 } 902 903 static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch) 904 { 905 __u32 qlen = qdisc_qlen_sum(sch); 906 907 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); 908 } 909 910 static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, 911 __u32 *backlog) 912 { 913 struct gnet_stats_queue qstats = { 0 }; 914 __u32 len = qdisc_qlen_sum(sch); 915 916 __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len); 917 *qlen = qstats.qlen; 918 *backlog = qstats.backlog; 919 } 920 921 static inline void qdisc_tree_flush_backlog(struct Qdisc *sch) 922 { 923 __u32 qlen, backlog; 924 925 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 926 qdisc_tree_reduce_backlog(sch, qlen, backlog); 927 } 928 929 static inline void qdisc_purge_queue(struct Qdisc *sch) 930 { 931 __u32 qlen, backlog; 932 933 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 934 qdisc_reset(sch); 935 qdisc_tree_reduce_backlog(sch, qlen, backlog); 936 } 937 938 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) 939 { 940 qh->head = NULL; 941 qh->tail = NULL; 942 qh->qlen = 0; 943 } 944 945 static inline void __qdisc_enqueue_tail(struct sk_buff *skb, 946 struct qdisc_skb_head *qh) 947 { 948 struct sk_buff *last = qh->tail; 949 950 if (last) { 951 skb->next = NULL; 952 last->next = skb; 953 qh->tail = skb; 954 } else { 955 qh->tail = skb; 956 qh->head = skb; 957 } 958 qh->qlen++; 959 } 960 961 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 962 { 963 __qdisc_enqueue_tail(skb, &sch->q); 964 qdisc_qstats_backlog_inc(sch, skb); 965 return NET_XMIT_SUCCESS; 966 } 967 968 static inline void __qdisc_enqueue_head(struct sk_buff *skb, 969 struct qdisc_skb_head *qh) 970 { 971 skb->next = qh->head; 972 973 if (!qh->head) 974 qh->tail = skb; 975 qh->head = skb; 976 qh->qlen++; 977 } 978 979 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 980 { 981 struct sk_buff *skb = qh->head; 982 983 if (likely(skb != NULL)) { 984 qh->head = skb->next; 985 qh->qlen--; 986 if (qh->head == NULL) 987 qh->tail = NULL; 988 skb->next = NULL; 989 } 990 991 return skb; 992 } 993 994 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 995 { 996 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 997 998 if (likely(skb != NULL)) { 999 qdisc_qstats_backlog_dec(sch, skb); 1000 qdisc_bstats_update(sch, skb); 1001 } 1002 1003 return skb; 1004 } 1005 1006 /* Instead of calling kfree_skb() while root qdisc lock is held, 1007 * queue the skb for future freeing at end of __dev_xmit_skb() 1008 */ 1009 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 1010 { 1011 skb->next = *to_free; 1012 *to_free = skb; 1013 } 1014 1015 static inline void __qdisc_drop_all(struct sk_buff *skb, 1016 struct sk_buff **to_free) 1017 { 1018 if (skb->prev) 1019 skb->prev->next = *to_free; 1020 else 1021 skb->next = *to_free; 1022 *to_free = skb; 1023 } 1024 1025 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 1026 struct qdisc_skb_head *qh, 1027 struct sk_buff **to_free) 1028 { 1029 struct sk_buff *skb = __qdisc_dequeue_head(qh); 1030 1031 if (likely(skb != NULL)) { 1032 unsigned int len = qdisc_pkt_len(skb); 1033 1034 qdisc_qstats_backlog_dec(sch, skb); 1035 __qdisc_drop(skb, to_free); 1036 return len; 1037 } 1038 1039 return 0; 1040 } 1041 1042 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 1043 { 1044 const struct qdisc_skb_head *qh = &sch->q; 1045 1046 return qh->head; 1047 } 1048 1049 /* generic pseudo peek method for non-work-conserving qdisc */ 1050 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 1051 { 1052 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1053 1054 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 1055 if (!skb) { 1056 skb = sch->dequeue(sch); 1057 1058 if (skb) { 1059 __skb_queue_head(&sch->gso_skb, skb); 1060 /* it's still part of the queue */ 1061 qdisc_qstats_backlog_inc(sch, skb); 1062 sch->q.qlen++; 1063 } 1064 } 1065 1066 return skb; 1067 } 1068 1069 static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch, 1070 struct sk_buff *skb) 1071 { 1072 if (qdisc_is_percpu_stats(sch)) { 1073 qdisc_qstats_cpu_backlog_dec(sch, skb); 1074 qdisc_bstats_cpu_update(sch, skb); 1075 qdisc_qstats_cpu_qlen_dec(sch); 1076 } else { 1077 qdisc_qstats_backlog_dec(sch, skb); 1078 qdisc_bstats_update(sch, skb); 1079 sch->q.qlen--; 1080 } 1081 } 1082 1083 static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch, 1084 unsigned int pkt_len) 1085 { 1086 if (qdisc_is_percpu_stats(sch)) { 1087 qdisc_qstats_cpu_qlen_inc(sch); 1088 this_cpu_add(sch->cpu_qstats->backlog, pkt_len); 1089 } else { 1090 sch->qstats.backlog += pkt_len; 1091 sch->q.qlen++; 1092 } 1093 } 1094 1095 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 1096 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 1097 { 1098 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1099 1100 if (skb) { 1101 skb = __skb_dequeue(&sch->gso_skb); 1102 if (qdisc_is_percpu_stats(sch)) { 1103 qdisc_qstats_cpu_backlog_dec(sch, skb); 1104 qdisc_qstats_cpu_qlen_dec(sch); 1105 } else { 1106 qdisc_qstats_backlog_dec(sch, skb); 1107 sch->q.qlen--; 1108 } 1109 } else { 1110 skb = sch->dequeue(sch); 1111 } 1112 1113 return skb; 1114 } 1115 1116 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 1117 { 1118 /* 1119 * We do not know the backlog in bytes of this list, it 1120 * is up to the caller to correct it 1121 */ 1122 ASSERT_RTNL(); 1123 if (qh->qlen) { 1124 rtnl_kfree_skbs(qh->head, qh->tail); 1125 1126 qh->head = NULL; 1127 qh->tail = NULL; 1128 qh->qlen = 0; 1129 } 1130 } 1131 1132 static inline void qdisc_reset_queue(struct Qdisc *sch) 1133 { 1134 __qdisc_reset_queue(&sch->q); 1135 sch->qstats.backlog = 0; 1136 } 1137 1138 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 1139 struct Qdisc **pold) 1140 { 1141 struct Qdisc *old; 1142 1143 sch_tree_lock(sch); 1144 old = *pold; 1145 *pold = new; 1146 if (old != NULL) 1147 qdisc_tree_flush_backlog(old); 1148 sch_tree_unlock(sch); 1149 1150 return old; 1151 } 1152 1153 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 1154 { 1155 rtnl_kfree_skbs(skb, skb); 1156 qdisc_qstats_drop(sch); 1157 } 1158 1159 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, 1160 struct sk_buff **to_free) 1161 { 1162 __qdisc_drop(skb, to_free); 1163 qdisc_qstats_cpu_drop(sch); 1164 1165 return NET_XMIT_DROP; 1166 } 1167 1168 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 1169 struct sk_buff **to_free) 1170 { 1171 __qdisc_drop(skb, to_free); 1172 qdisc_qstats_drop(sch); 1173 1174 return NET_XMIT_DROP; 1175 } 1176 1177 static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch, 1178 struct sk_buff **to_free) 1179 { 1180 __qdisc_drop_all(skb, to_free); 1181 qdisc_qstats_drop(sch); 1182 1183 return NET_XMIT_DROP; 1184 } 1185 1186 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 1187 long it will take to send a packet given its size. 1188 */ 1189 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 1190 { 1191 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 1192 if (slot < 0) 1193 slot = 0; 1194 slot >>= rtab->rate.cell_log; 1195 if (slot > 255) 1196 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 1197 return rtab->data[slot]; 1198 } 1199 1200 struct psched_ratecfg { 1201 u64 rate_bytes_ps; /* bytes per second */ 1202 u32 mult; 1203 u16 overhead; 1204 u8 linklayer; 1205 u8 shift; 1206 }; 1207 1208 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 1209 unsigned int len) 1210 { 1211 len += r->overhead; 1212 1213 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 1214 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 1215 1216 return ((u64)len * r->mult) >> r->shift; 1217 } 1218 1219 void psched_ratecfg_precompute(struct psched_ratecfg *r, 1220 const struct tc_ratespec *conf, 1221 u64 rate64); 1222 1223 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 1224 const struct psched_ratecfg *r) 1225 { 1226 memset(res, 0, sizeof(*res)); 1227 1228 /* legacy struct tc_ratespec has a 32bit @rate field 1229 * Qdisc using 64bit rate should add new attributes 1230 * in order to maintain compatibility. 1231 */ 1232 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 1233 1234 res->overhead = r->overhead; 1235 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 1236 } 1237 1238 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. 1239 * The fast path only needs to access filter list and to update stats 1240 */ 1241 struct mini_Qdisc { 1242 struct tcf_proto *filter_list; 1243 struct tcf_block *block; 1244 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 1245 struct gnet_stats_queue __percpu *cpu_qstats; 1246 struct rcu_head rcu; 1247 }; 1248 1249 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, 1250 const struct sk_buff *skb) 1251 { 1252 bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb); 1253 } 1254 1255 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) 1256 { 1257 this_cpu_inc(miniq->cpu_qstats->drops); 1258 } 1259 1260 struct mini_Qdisc_pair { 1261 struct mini_Qdisc miniq1; 1262 struct mini_Qdisc miniq2; 1263 struct mini_Qdisc __rcu **p_miniq; 1264 }; 1265 1266 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 1267 struct tcf_proto *tp_head); 1268 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1269 struct mini_Qdisc __rcu **p_miniq); 1270 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, 1271 struct tcf_block *block); 1272 1273 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)); 1274 1275 #endif 1276