1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_SCHED_GENERIC_H 3 #define __NET_SCHED_GENERIC_H 4 5 #include <linux/netdevice.h> 6 #include <linux/types.h> 7 #include <linux/rcupdate.h> 8 #include <linux/pkt_sched.h> 9 #include <linux/pkt_cls.h> 10 #include <linux/percpu.h> 11 #include <linux/dynamic_queue_limits.h> 12 #include <linux/list.h> 13 #include <linux/refcount.h> 14 #include <linux/workqueue.h> 15 #include <linux/mutex.h> 16 #include <linux/rwsem.h> 17 #include <linux/atomic.h> 18 #include <linux/hashtable.h> 19 #include <net/gen_stats.h> 20 #include <net/rtnetlink.h> 21 #include <net/flow_offload.h> 22 #include <linux/xarray.h> 23 24 struct Qdisc_ops; 25 struct qdisc_walker; 26 struct tcf_walker; 27 struct module; 28 struct bpf_flow_keys; 29 30 struct qdisc_rate_table { 31 struct tc_ratespec rate; 32 u32 data[256]; 33 struct qdisc_rate_table *next; 34 int refcnt; 35 }; 36 37 enum qdisc_state_t { 38 __QDISC_STATE_SCHED, 39 __QDISC_STATE_DEACTIVATED, 40 __QDISC_STATE_MISSED, 41 __QDISC_STATE_DRAINING, 42 }; 43 44 enum qdisc_state2_t { 45 /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly. 46 * Use qdisc_run_begin/end() or qdisc_is_running() instead. 47 */ 48 __QDISC_STATE2_RUNNING, 49 }; 50 51 #define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED) 52 #define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING) 53 54 #define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \ 55 QDISC_STATE_DRAINING) 56 57 struct qdisc_size_table { 58 struct rcu_head rcu; 59 struct list_head list; 60 struct tc_sizespec szopts; 61 int refcnt; 62 u16 data[]; 63 }; 64 65 /* similar to sk_buff_head, but skb->prev pointer is undefined. */ 66 struct qdisc_skb_head { 67 struct sk_buff *head; 68 struct sk_buff *tail; 69 __u32 qlen; 70 spinlock_t lock; 71 }; 72 73 struct Qdisc { 74 int (*enqueue)(struct sk_buff *skb, 75 struct Qdisc *sch, 76 struct sk_buff **to_free); 77 struct sk_buff * (*dequeue)(struct Qdisc *sch); 78 unsigned int flags; 79 #define TCQ_F_BUILTIN 1 80 #define TCQ_F_INGRESS 2 81 #define TCQ_F_CAN_BYPASS 4 82 #define TCQ_F_MQROOT 8 83 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 84 * q->dev_queue : It can test 85 * netif_xmit_frozen_or_stopped() before 86 * dequeueing next packet. 87 * Its true for MQ/MQPRIO slaves, or non 88 * multiqueue device. 89 */ 90 #define TCQ_F_WARN_NONWC (1 << 16) 91 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 92 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 93 * qdisc_tree_decrease_qlen() should stop. 94 */ 95 #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 96 #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ 97 #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ 98 u32 limit; 99 const struct Qdisc_ops *ops; 100 struct qdisc_size_table __rcu *stab; 101 struct hlist_node hash; 102 u32 handle; 103 u32 parent; 104 105 struct netdev_queue *dev_queue; 106 107 struct net_rate_estimator __rcu *rate_est; 108 struct gnet_stats_basic_sync __percpu *cpu_bstats; 109 struct gnet_stats_queue __percpu *cpu_qstats; 110 int pad; 111 refcount_t refcnt; 112 113 /* 114 * For performance sake on SMP, we put highly modified fields at the end 115 */ 116 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; 117 struct qdisc_skb_head q; 118 struct gnet_stats_basic_sync bstats; 119 struct gnet_stats_queue qstats; 120 int owner; 121 unsigned long state; 122 unsigned long state2; /* must be written under qdisc spinlock */ 123 struct Qdisc *next_sched; 124 struct sk_buff_head skb_bad_txq; 125 126 spinlock_t busylock ____cacheline_aligned_in_smp; 127 spinlock_t seqlock; 128 129 struct rcu_head rcu; 130 netdevice_tracker dev_tracker; 131 struct lock_class_key root_lock_key; 132 /* private data */ 133 long privdata[] ____cacheline_aligned; 134 }; 135 136 static inline void qdisc_refcount_inc(struct Qdisc *qdisc) 137 { 138 if (qdisc->flags & TCQ_F_BUILTIN) 139 return; 140 refcount_inc(&qdisc->refcnt); 141 } 142 143 static inline bool qdisc_refcount_dec_if_one(struct Qdisc *qdisc) 144 { 145 if (qdisc->flags & TCQ_F_BUILTIN) 146 return true; 147 return refcount_dec_if_one(&qdisc->refcnt); 148 } 149 150 /* Intended to be used by unlocked users, when concurrent qdisc release is 151 * possible. 152 */ 153 154 static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc) 155 { 156 if (qdisc->flags & TCQ_F_BUILTIN) 157 return qdisc; 158 if (refcount_inc_not_zero(&qdisc->refcnt)) 159 return qdisc; 160 return NULL; 161 } 162 163 /* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc 164 * root_lock section, or provide their own memory barriers -- ordering 165 * against qdisc_run_begin/end() atomic bit operations. 166 */ 167 static inline bool qdisc_is_running(struct Qdisc *qdisc) 168 { 169 if (qdisc->flags & TCQ_F_NOLOCK) 170 return spin_is_locked(&qdisc->seqlock); 171 return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 172 } 173 174 static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc) 175 { 176 return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY); 177 } 178 179 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 180 { 181 return q->flags & TCQ_F_CPUSTATS; 182 } 183 184 static inline bool qdisc_is_empty(const struct Qdisc *qdisc) 185 { 186 if (qdisc_is_percpu_stats(qdisc)) 187 return nolock_qdisc_is_empty(qdisc); 188 return !READ_ONCE(qdisc->q.qlen); 189 } 190 191 /* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with 192 * the qdisc root lock acquired. 193 */ 194 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 195 { 196 if (qdisc->flags & TCQ_F_NOLOCK) { 197 if (spin_trylock(&qdisc->seqlock)) 198 return true; 199 200 /* No need to insist if the MISSED flag was already set. 201 * Note that test_and_set_bit() also gives us memory ordering 202 * guarantees wrt potential earlier enqueue() and below 203 * spin_trylock(), both of which are necessary to prevent races 204 */ 205 if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state)) 206 return false; 207 208 /* Try to take the lock again to make sure that we will either 209 * grab it or the CPU that still has it will see MISSED set 210 * when testing it in qdisc_run_end() 211 */ 212 return spin_trylock(&qdisc->seqlock); 213 } 214 return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 215 } 216 217 static inline void qdisc_run_end(struct Qdisc *qdisc) 218 { 219 if (qdisc->flags & TCQ_F_NOLOCK) { 220 spin_unlock(&qdisc->seqlock); 221 222 /* spin_unlock() only has store-release semantic. The unlock 223 * and test_bit() ordering is a store-load ordering, so a full 224 * memory barrier is needed here. 225 */ 226 smp_mb(); 227 228 if (unlikely(test_bit(__QDISC_STATE_MISSED, 229 &qdisc->state))) 230 __netif_schedule(qdisc); 231 } else { 232 __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 233 } 234 } 235 236 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 237 { 238 return qdisc->flags & TCQ_F_ONETXQUEUE; 239 } 240 241 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 242 { 243 return netdev_queue_dql_avail(txq); 244 } 245 246 struct Qdisc_class_ops { 247 unsigned int flags; 248 /* Child qdisc manipulation */ 249 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 250 int (*graft)(struct Qdisc *, unsigned long cl, 251 struct Qdisc *, struct Qdisc **, 252 struct netlink_ext_ack *extack); 253 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 254 void (*qlen_notify)(struct Qdisc *, unsigned long); 255 256 /* Class manipulation routines */ 257 unsigned long (*find)(struct Qdisc *, u32 classid); 258 int (*change)(struct Qdisc *, u32, u32, 259 struct nlattr **, unsigned long *, 260 struct netlink_ext_ack *); 261 int (*delete)(struct Qdisc *, unsigned long, 262 struct netlink_ext_ack *); 263 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 264 265 /* Filter manipulation */ 266 struct tcf_block * (*tcf_block)(struct Qdisc *sch, 267 unsigned long arg, 268 struct netlink_ext_ack *extack); 269 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 270 u32 classid); 271 void (*unbind_tcf)(struct Qdisc *, unsigned long); 272 273 /* rtnetlink specific */ 274 int (*dump)(struct Qdisc *, unsigned long, 275 struct sk_buff *skb, struct tcmsg*); 276 int (*dump_stats)(struct Qdisc *, unsigned long, 277 struct gnet_dump *); 278 }; 279 280 /* Qdisc_class_ops flag values */ 281 282 /* Implements API that doesn't require rtnl lock */ 283 enum qdisc_class_ops_flags { 284 QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, 285 }; 286 287 struct Qdisc_ops { 288 struct Qdisc_ops *next; 289 const struct Qdisc_class_ops *cl_ops; 290 char id[IFNAMSIZ]; 291 int priv_size; 292 unsigned int static_flags; 293 294 int (*enqueue)(struct sk_buff *skb, 295 struct Qdisc *sch, 296 struct sk_buff **to_free); 297 struct sk_buff * (*dequeue)(struct Qdisc *); 298 struct sk_buff * (*peek)(struct Qdisc *); 299 300 int (*init)(struct Qdisc *sch, struct nlattr *arg, 301 struct netlink_ext_ack *extack); 302 void (*reset)(struct Qdisc *); 303 void (*destroy)(struct Qdisc *); 304 int (*change)(struct Qdisc *sch, 305 struct nlattr *arg, 306 struct netlink_ext_ack *extack); 307 void (*attach)(struct Qdisc *sch); 308 int (*change_tx_queue_len)(struct Qdisc *, unsigned int); 309 void (*change_real_num_tx)(struct Qdisc *sch, 310 unsigned int new_real_tx); 311 312 int (*dump)(struct Qdisc *, struct sk_buff *); 313 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 314 315 void (*ingress_block_set)(struct Qdisc *sch, 316 u32 block_index); 317 void (*egress_block_set)(struct Qdisc *sch, 318 u32 block_index); 319 u32 (*ingress_block_get)(struct Qdisc *sch); 320 u32 (*egress_block_get)(struct Qdisc *sch); 321 322 struct module *owner; 323 }; 324 325 struct tcf_result { 326 union { 327 struct { 328 unsigned long class; 329 u32 classid; 330 }; 331 const struct tcf_proto *goto_tp; 332 }; 333 }; 334 335 struct tcf_chain; 336 337 struct tcf_proto_ops { 338 struct list_head head; 339 char kind[IFNAMSIZ]; 340 341 int (*classify)(struct sk_buff *, 342 const struct tcf_proto *, 343 struct tcf_result *); 344 int (*init)(struct tcf_proto*); 345 void (*destroy)(struct tcf_proto *tp, bool rtnl_held, 346 struct netlink_ext_ack *extack); 347 348 void* (*get)(struct tcf_proto*, u32 handle); 349 void (*put)(struct tcf_proto *tp, void *f); 350 int (*change)(struct net *net, struct sk_buff *, 351 struct tcf_proto*, unsigned long, 352 u32 handle, struct nlattr **, 353 void **, u32, 354 struct netlink_ext_ack *); 355 int (*delete)(struct tcf_proto *tp, void *arg, 356 bool *last, bool rtnl_held, 357 struct netlink_ext_ack *); 358 bool (*delete_empty)(struct tcf_proto *tp); 359 void (*walk)(struct tcf_proto *tp, 360 struct tcf_walker *arg, bool rtnl_held); 361 int (*reoffload)(struct tcf_proto *tp, bool add, 362 flow_setup_cb_t *cb, void *cb_priv, 363 struct netlink_ext_ack *extack); 364 void (*hw_add)(struct tcf_proto *tp, 365 void *type_data); 366 void (*hw_del)(struct tcf_proto *tp, 367 void *type_data); 368 void (*bind_class)(void *, u32, unsigned long, 369 void *, unsigned long); 370 void * (*tmplt_create)(struct net *net, 371 struct tcf_chain *chain, 372 struct nlattr **tca, 373 struct netlink_ext_ack *extack); 374 void (*tmplt_destroy)(void *tmplt_priv); 375 void (*tmplt_reoffload)(struct tcf_chain *chain, 376 bool add, 377 flow_setup_cb_t *cb, 378 void *cb_priv); 379 struct tcf_exts * (*get_exts)(const struct tcf_proto *tp, 380 u32 handle); 381 382 /* rtnetlink specific */ 383 int (*dump)(struct net*, struct tcf_proto*, void *, 384 struct sk_buff *skb, struct tcmsg*, 385 bool); 386 int (*terse_dump)(struct net *net, 387 struct tcf_proto *tp, void *fh, 388 struct sk_buff *skb, 389 struct tcmsg *t, bool rtnl_held); 390 int (*tmplt_dump)(struct sk_buff *skb, 391 struct net *net, 392 void *tmplt_priv); 393 394 struct module *owner; 395 int flags; 396 }; 397 398 /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags 399 * are expected to implement tcf_proto_ops->delete_empty(), otherwise race 400 * conditions can occur when filters are inserted/deleted simultaneously. 401 */ 402 enum tcf_proto_ops_flags { 403 TCF_PROTO_OPS_DOIT_UNLOCKED = 1, 404 }; 405 406 struct tcf_proto { 407 /* Fast access part */ 408 struct tcf_proto __rcu *next; 409 void __rcu *root; 410 411 /* called under RCU BH lock*/ 412 int (*classify)(struct sk_buff *, 413 const struct tcf_proto *, 414 struct tcf_result *); 415 __be16 protocol; 416 417 /* All the rest */ 418 u32 prio; 419 void *data; 420 const struct tcf_proto_ops *ops; 421 struct tcf_chain *chain; 422 /* Lock protects tcf_proto shared state and can be used by unlocked 423 * classifiers to protect their private data. 424 */ 425 spinlock_t lock; 426 bool deleting; 427 bool counted; 428 bool usesw; 429 refcount_t refcnt; 430 struct rcu_head rcu; 431 struct hlist_node destroy_ht_node; 432 }; 433 434 struct qdisc_skb_cb { 435 struct { 436 unsigned int pkt_len; 437 u16 slave_dev_queue_mapping; 438 u16 tc_classid; 439 }; 440 #define QDISC_CB_PRIV_LEN 20 441 unsigned char data[QDISC_CB_PRIV_LEN]; 442 }; 443 444 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); 445 446 struct tcf_chain { 447 /* Protects filter_chain. */ 448 struct mutex filter_chain_lock; 449 struct tcf_proto __rcu *filter_chain; 450 struct list_head list; 451 struct tcf_block *block; 452 u32 index; /* chain index */ 453 unsigned int refcnt; 454 unsigned int action_refcnt; 455 bool explicitly_created; 456 bool flushing; 457 const struct tcf_proto_ops *tmplt_ops; 458 void *tmplt_priv; 459 struct rcu_head rcu; 460 }; 461 462 struct tcf_block { 463 struct xarray ports; /* datapath accessible */ 464 /* Lock protects tcf_block and lifetime-management data of chains 465 * attached to the block (refcnt, action_refcnt, explicitly_created). 466 */ 467 struct mutex lock; 468 struct list_head chain_list; 469 u32 index; /* block index for shared blocks */ 470 u32 classid; /* which class this block belongs to */ 471 refcount_t refcnt; 472 struct net *net; 473 struct Qdisc *q; 474 struct rw_semaphore cb_lock; /* protects cb_list and offload counters */ 475 struct flow_block flow_block; 476 struct list_head owner_list; 477 bool keep_dst; 478 atomic_t useswcnt; 479 atomic_t offloadcnt; /* Number of oddloaded filters */ 480 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ 481 unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */ 482 struct { 483 struct tcf_chain *chain; 484 struct list_head filter_chain_list; 485 } chain0; 486 struct rcu_head rcu; 487 DECLARE_HASHTABLE(proto_destroy_ht, 7); 488 struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */ 489 }; 490 491 struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index); 492 493 static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain) 494 { 495 return lockdep_is_held(&chain->filter_chain_lock); 496 } 497 498 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) 499 { 500 return lockdep_is_held(&tp->lock); 501 } 502 503 #define tcf_chain_dereference(p, chain) \ 504 rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain)) 505 506 #define tcf_proto_dereference(p, tp) \ 507 rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp)) 508 509 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 510 { 511 struct qdisc_skb_cb *qcb; 512 513 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb)); 514 BUILD_BUG_ON(sizeof(qcb->data) < sz); 515 } 516 517 static inline int qdisc_qlen(const struct Qdisc *q) 518 { 519 return q->q.qlen; 520 } 521 522 static inline int qdisc_qlen_sum(const struct Qdisc *q) 523 { 524 __u32 qlen = q->qstats.qlen; 525 int i; 526 527 if (qdisc_is_percpu_stats(q)) { 528 for_each_possible_cpu(i) 529 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; 530 } else { 531 qlen += q->q.qlen; 532 } 533 534 return qlen; 535 } 536 537 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 538 { 539 return (struct qdisc_skb_cb *)skb->cb; 540 } 541 542 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 543 { 544 return &qdisc->q.lock; 545 } 546 547 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 548 { 549 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 550 551 return q; 552 } 553 554 static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) 555 { 556 return rcu_dereference_bh(qdisc->dev_queue->qdisc); 557 } 558 559 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 560 { 561 return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping); 562 } 563 564 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 565 { 566 struct Qdisc *root = qdisc_root_sleeping(qdisc); 567 568 ASSERT_RTNL(); 569 return qdisc_lock(root); 570 } 571 572 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 573 { 574 return qdisc->dev_queue->dev; 575 } 576 577 static inline void sch_tree_lock(struct Qdisc *q) 578 { 579 if (q->flags & TCQ_F_MQROOT) 580 spin_lock_bh(qdisc_lock(q)); 581 else 582 spin_lock_bh(qdisc_root_sleeping_lock(q)); 583 } 584 585 static inline void sch_tree_unlock(struct Qdisc *q) 586 { 587 if (q->flags & TCQ_F_MQROOT) 588 spin_unlock_bh(qdisc_lock(q)); 589 else 590 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 591 } 592 593 extern struct Qdisc noop_qdisc; 594 extern struct Qdisc_ops noop_qdisc_ops; 595 extern struct Qdisc_ops pfifo_fast_ops; 596 extern const u8 sch_default_prio2band[TC_PRIO_MAX + 1]; 597 extern struct Qdisc_ops mq_qdisc_ops; 598 extern struct Qdisc_ops noqueue_qdisc_ops; 599 extern const struct Qdisc_ops *default_qdisc_ops; 600 static inline const struct Qdisc_ops * 601 get_default_qdisc_ops(const struct net_device *dev, int ntx) 602 { 603 return ntx < dev->real_num_tx_queues ? 604 default_qdisc_ops : &pfifo_fast_ops; 605 } 606 607 struct Qdisc_class_common { 608 u32 classid; 609 unsigned int filter_cnt; 610 struct hlist_node hnode; 611 }; 612 613 struct Qdisc_class_hash { 614 struct hlist_head *hash; 615 unsigned int hashsize; 616 unsigned int hashmask; 617 unsigned int hashelems; 618 }; 619 620 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 621 { 622 id ^= id >> 8; 623 id ^= id >> 4; 624 return id & mask; 625 } 626 627 static inline struct Qdisc_class_common * 628 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 629 { 630 struct Qdisc_class_common *cl; 631 unsigned int h; 632 633 if (!id) 634 return NULL; 635 636 h = qdisc_class_hash(id, hash->hashmask); 637 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 638 if (cl->classid == id) 639 return cl; 640 } 641 return NULL; 642 } 643 644 static inline bool qdisc_class_in_use(const struct Qdisc_class_common *cl) 645 { 646 return cl->filter_cnt > 0; 647 } 648 649 static inline void qdisc_class_get(struct Qdisc_class_common *cl) 650 { 651 unsigned int res; 652 653 if (check_add_overflow(cl->filter_cnt, 1, &res)) 654 WARN(1, "Qdisc class overflow"); 655 656 cl->filter_cnt = res; 657 } 658 659 static inline void qdisc_class_put(struct Qdisc_class_common *cl) 660 { 661 unsigned int res; 662 663 if (check_sub_overflow(cl->filter_cnt, 1, &res)) 664 WARN(1, "Qdisc class underflow"); 665 666 cl->filter_cnt = res; 667 } 668 669 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) 670 { 671 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; 672 673 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; 674 } 675 676 int qdisc_class_hash_init(struct Qdisc_class_hash *); 677 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 678 struct Qdisc_class_common *); 679 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 680 struct Qdisc_class_common *); 681 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 682 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 683 684 int dev_qdisc_change_tx_queue_len(struct net_device *dev); 685 void dev_qdisc_change_real_num_tx(struct net_device *dev, 686 unsigned int new_real_tx); 687 void dev_init_scheduler(struct net_device *dev); 688 void dev_shutdown(struct net_device *dev); 689 void dev_activate(struct net_device *dev); 690 void dev_deactivate(struct net_device *dev); 691 void dev_deactivate_many(struct list_head *head); 692 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 693 struct Qdisc *qdisc); 694 void qdisc_reset(struct Qdisc *qdisc); 695 void qdisc_destroy(struct Qdisc *qdisc); 696 void qdisc_put(struct Qdisc *qdisc); 697 void qdisc_put_unlocked(struct Qdisc *qdisc); 698 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); 699 #ifdef CONFIG_NET_SCHED 700 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 701 void *type_data); 702 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 703 struct Qdisc *new, struct Qdisc *old, 704 enum tc_setup_type type, void *type_data, 705 struct netlink_ext_ack *extack); 706 #else 707 static inline int 708 qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 709 void *type_data) 710 { 711 q->flags &= ~TCQ_F_OFFLOADED; 712 return 0; 713 } 714 715 static inline void 716 qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 717 struct Qdisc *new, struct Qdisc *old, 718 enum tc_setup_type type, void *type_data, 719 struct netlink_ext_ack *extack) 720 { 721 } 722 #endif 723 void qdisc_offload_query_caps(struct net_device *dev, 724 enum tc_setup_type type, 725 void *caps, size_t caps_len); 726 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 727 const struct Qdisc_ops *ops, 728 struct netlink_ext_ack *extack); 729 void qdisc_free(struct Qdisc *qdisc); 730 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 731 const struct Qdisc_ops *ops, u32 parentid, 732 struct netlink_ext_ack *extack); 733 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 734 const struct qdisc_size_table *stab); 735 int skb_do_redirect(struct sk_buff *); 736 737 static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 738 { 739 #ifdef CONFIG_NET_XGRESS 740 return skb->tc_at_ingress; 741 #else 742 return false; 743 #endif 744 } 745 746 static inline bool skb_skip_tc_classify(struct sk_buff *skb) 747 { 748 #ifdef CONFIG_NET_CLS_ACT 749 if (skb->tc_skip_classify) { 750 skb->tc_skip_classify = 0; 751 return true; 752 } 753 #endif 754 return false; 755 } 756 757 /* Reset all TX qdiscs greater than index of a device. */ 758 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 759 { 760 struct Qdisc *qdisc; 761 762 for (; i < dev->num_tx_queues; i++) { 763 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 764 if (qdisc) { 765 spin_lock_bh(qdisc_lock(qdisc)); 766 qdisc_reset(qdisc); 767 spin_unlock_bh(qdisc_lock(qdisc)); 768 } 769 } 770 } 771 772 /* Are all TX queues of the device empty? */ 773 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 774 { 775 unsigned int i; 776 777 rcu_read_lock(); 778 for (i = 0; i < dev->num_tx_queues; i++) { 779 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 780 const struct Qdisc *q = rcu_dereference(txq->qdisc); 781 782 if (!qdisc_is_empty(q)) { 783 rcu_read_unlock(); 784 return false; 785 } 786 } 787 rcu_read_unlock(); 788 return true; 789 } 790 791 /* Are any of the TX qdiscs changing? */ 792 static inline bool qdisc_tx_changing(const struct net_device *dev) 793 { 794 unsigned int i; 795 796 for (i = 0; i < dev->num_tx_queues; i++) { 797 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 798 799 if (rcu_access_pointer(txq->qdisc) != 800 rcu_access_pointer(txq->qdisc_sleeping)) 801 return true; 802 } 803 return false; 804 } 805 806 /* Is the device using the noop qdisc on all queues? */ 807 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 808 { 809 unsigned int i; 810 811 for (i = 0; i < dev->num_tx_queues; i++) { 812 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 813 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 814 return false; 815 } 816 return true; 817 } 818 819 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 820 { 821 return qdisc_skb_cb(skb)->pkt_len; 822 } 823 824 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 825 enum net_xmit_qdisc_t { 826 __NET_XMIT_STOLEN = 0x00010000, 827 __NET_XMIT_BYPASS = 0x00020000, 828 }; 829 830 #ifdef CONFIG_NET_CLS_ACT 831 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 832 #else 833 #define net_xmit_drop_count(e) (1) 834 #endif 835 836 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 837 const struct Qdisc *sch) 838 { 839 #ifdef CONFIG_NET_SCHED 840 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 841 842 if (stab) 843 __qdisc_calculate_pkt_len(skb, stab); 844 #endif 845 } 846 847 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 848 struct sk_buff **to_free) 849 { 850 return sch->enqueue(skb, sch, to_free); 851 } 852 853 static inline void _bstats_update(struct gnet_stats_basic_sync *bstats, 854 __u64 bytes, __u32 packets) 855 { 856 u64_stats_update_begin(&bstats->syncp); 857 u64_stats_add(&bstats->bytes, bytes); 858 u64_stats_add(&bstats->packets, packets); 859 u64_stats_update_end(&bstats->syncp); 860 } 861 862 static inline void bstats_update(struct gnet_stats_basic_sync *bstats, 863 const struct sk_buff *skb) 864 { 865 _bstats_update(bstats, 866 qdisc_pkt_len(skb), 867 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 868 } 869 870 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 871 const struct sk_buff *skb) 872 { 873 bstats_update(this_cpu_ptr(sch->cpu_bstats), skb); 874 } 875 876 static inline void qdisc_bstats_update(struct Qdisc *sch, 877 const struct sk_buff *skb) 878 { 879 bstats_update(&sch->bstats, skb); 880 } 881 882 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 883 const struct sk_buff *skb) 884 { 885 sch->qstats.backlog -= qdisc_pkt_len(skb); 886 } 887 888 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, 889 const struct sk_buff *skb) 890 { 891 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 892 } 893 894 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 895 const struct sk_buff *skb) 896 { 897 sch->qstats.backlog += qdisc_pkt_len(skb); 898 } 899 900 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, 901 const struct sk_buff *skb) 902 { 903 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 904 } 905 906 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) 907 { 908 this_cpu_inc(sch->cpu_qstats->qlen); 909 } 910 911 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) 912 { 913 this_cpu_dec(sch->cpu_qstats->qlen); 914 } 915 916 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) 917 { 918 this_cpu_inc(sch->cpu_qstats->requeues); 919 } 920 921 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 922 { 923 sch->qstats.drops += count; 924 } 925 926 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 927 { 928 qstats->drops++; 929 } 930 931 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 932 { 933 qstats->overlimits++; 934 } 935 936 static inline void qdisc_qstats_drop(struct Qdisc *sch) 937 { 938 qstats_drop_inc(&sch->qstats); 939 } 940 941 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 942 { 943 this_cpu_inc(sch->cpu_qstats->drops); 944 } 945 946 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 947 { 948 sch->qstats.overlimits++; 949 } 950 951 static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch) 952 { 953 __u32 qlen = qdisc_qlen_sum(sch); 954 955 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); 956 } 957 958 static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, 959 __u32 *backlog) 960 { 961 struct gnet_stats_queue qstats = { 0 }; 962 963 gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats); 964 *qlen = qstats.qlen + qdisc_qlen(sch); 965 *backlog = qstats.backlog; 966 } 967 968 static inline void qdisc_tree_flush_backlog(struct Qdisc *sch) 969 { 970 __u32 qlen, backlog; 971 972 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 973 qdisc_tree_reduce_backlog(sch, qlen, backlog); 974 } 975 976 static inline void qdisc_purge_queue(struct Qdisc *sch) 977 { 978 __u32 qlen, backlog; 979 980 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 981 qdisc_reset(sch); 982 qdisc_tree_reduce_backlog(sch, qlen, backlog); 983 } 984 985 static inline void __qdisc_enqueue_tail(struct sk_buff *skb, 986 struct qdisc_skb_head *qh) 987 { 988 struct sk_buff *last = qh->tail; 989 990 if (last) { 991 skb->next = NULL; 992 last->next = skb; 993 qh->tail = skb; 994 } else { 995 qh->tail = skb; 996 qh->head = skb; 997 } 998 qh->qlen++; 999 } 1000 1001 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 1002 { 1003 __qdisc_enqueue_tail(skb, &sch->q); 1004 qdisc_qstats_backlog_inc(sch, skb); 1005 return NET_XMIT_SUCCESS; 1006 } 1007 1008 static inline void __qdisc_enqueue_head(struct sk_buff *skb, 1009 struct qdisc_skb_head *qh) 1010 { 1011 skb->next = qh->head; 1012 1013 if (!qh->head) 1014 qh->tail = skb; 1015 qh->head = skb; 1016 qh->qlen++; 1017 } 1018 1019 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 1020 { 1021 struct sk_buff *skb = qh->head; 1022 1023 if (likely(skb != NULL)) { 1024 qh->head = skb->next; 1025 qh->qlen--; 1026 if (qh->head == NULL) 1027 qh->tail = NULL; 1028 skb->next = NULL; 1029 } 1030 1031 return skb; 1032 } 1033 1034 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 1035 { 1036 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 1037 1038 if (likely(skb != NULL)) { 1039 qdisc_qstats_backlog_dec(sch, skb); 1040 qdisc_bstats_update(sch, skb); 1041 } 1042 1043 return skb; 1044 } 1045 1046 struct tc_skb_cb { 1047 struct qdisc_skb_cb qdisc_cb; 1048 u32 drop_reason; 1049 1050 u16 zone; /* Only valid if post_ct = true */ 1051 u16 mru; 1052 u8 post_ct:1; 1053 u8 post_ct_snat:1; 1054 u8 post_ct_dnat:1; 1055 }; 1056 1057 static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb) 1058 { 1059 struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb; 1060 1061 BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb)); 1062 return cb; 1063 } 1064 1065 static inline enum skb_drop_reason 1066 tcf_get_drop_reason(const struct sk_buff *skb) 1067 { 1068 return tc_skb_cb(skb)->drop_reason; 1069 } 1070 1071 static inline void tcf_set_drop_reason(const struct sk_buff *skb, 1072 enum skb_drop_reason reason) 1073 { 1074 tc_skb_cb(skb)->drop_reason = reason; 1075 } 1076 1077 /* Instead of calling kfree_skb() while root qdisc lock is held, 1078 * queue the skb for future freeing at end of __dev_xmit_skb() 1079 */ 1080 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 1081 { 1082 skb->next = *to_free; 1083 *to_free = skb; 1084 } 1085 1086 static inline void __qdisc_drop_all(struct sk_buff *skb, 1087 struct sk_buff **to_free) 1088 { 1089 if (skb->prev) 1090 skb->prev->next = *to_free; 1091 else 1092 skb->next = *to_free; 1093 *to_free = skb; 1094 } 1095 1096 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 1097 struct qdisc_skb_head *qh, 1098 struct sk_buff **to_free) 1099 { 1100 struct sk_buff *skb = __qdisc_dequeue_head(qh); 1101 1102 if (likely(skb != NULL)) { 1103 unsigned int len = qdisc_pkt_len(skb); 1104 1105 qdisc_qstats_backlog_dec(sch, skb); 1106 __qdisc_drop(skb, to_free); 1107 return len; 1108 } 1109 1110 return 0; 1111 } 1112 1113 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 1114 { 1115 const struct qdisc_skb_head *qh = &sch->q; 1116 1117 return qh->head; 1118 } 1119 1120 /* generic pseudo peek method for non-work-conserving qdisc */ 1121 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 1122 { 1123 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1124 1125 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 1126 if (!skb) { 1127 skb = sch->dequeue(sch); 1128 1129 if (skb) { 1130 __skb_queue_head(&sch->gso_skb, skb); 1131 /* it's still part of the queue */ 1132 qdisc_qstats_backlog_inc(sch, skb); 1133 sch->q.qlen++; 1134 } 1135 } 1136 1137 return skb; 1138 } 1139 1140 static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch, 1141 struct sk_buff *skb) 1142 { 1143 if (qdisc_is_percpu_stats(sch)) { 1144 qdisc_qstats_cpu_backlog_dec(sch, skb); 1145 qdisc_bstats_cpu_update(sch, skb); 1146 qdisc_qstats_cpu_qlen_dec(sch); 1147 } else { 1148 qdisc_qstats_backlog_dec(sch, skb); 1149 qdisc_bstats_update(sch, skb); 1150 sch->q.qlen--; 1151 } 1152 } 1153 1154 static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch, 1155 unsigned int pkt_len) 1156 { 1157 if (qdisc_is_percpu_stats(sch)) { 1158 qdisc_qstats_cpu_qlen_inc(sch); 1159 this_cpu_add(sch->cpu_qstats->backlog, pkt_len); 1160 } else { 1161 sch->qstats.backlog += pkt_len; 1162 sch->q.qlen++; 1163 } 1164 } 1165 1166 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 1167 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 1168 { 1169 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1170 1171 if (skb) { 1172 skb = __skb_dequeue(&sch->gso_skb); 1173 if (qdisc_is_percpu_stats(sch)) { 1174 qdisc_qstats_cpu_backlog_dec(sch, skb); 1175 qdisc_qstats_cpu_qlen_dec(sch); 1176 } else { 1177 qdisc_qstats_backlog_dec(sch, skb); 1178 sch->q.qlen--; 1179 } 1180 } else { 1181 skb = sch->dequeue(sch); 1182 } 1183 1184 return skb; 1185 } 1186 1187 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 1188 { 1189 /* 1190 * We do not know the backlog in bytes of this list, it 1191 * is up to the caller to correct it 1192 */ 1193 ASSERT_RTNL(); 1194 if (qh->qlen) { 1195 rtnl_kfree_skbs(qh->head, qh->tail); 1196 1197 qh->head = NULL; 1198 qh->tail = NULL; 1199 qh->qlen = 0; 1200 } 1201 } 1202 1203 static inline void qdisc_reset_queue(struct Qdisc *sch) 1204 { 1205 __qdisc_reset_queue(&sch->q); 1206 } 1207 1208 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 1209 struct Qdisc **pold) 1210 { 1211 struct Qdisc *old; 1212 1213 sch_tree_lock(sch); 1214 old = *pold; 1215 *pold = new; 1216 if (old != NULL) 1217 qdisc_purge_queue(old); 1218 sch_tree_unlock(sch); 1219 1220 return old; 1221 } 1222 1223 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 1224 { 1225 rtnl_kfree_skbs(skb, skb); 1226 qdisc_qstats_drop(sch); 1227 } 1228 1229 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, 1230 struct sk_buff **to_free) 1231 { 1232 __qdisc_drop(skb, to_free); 1233 qdisc_qstats_cpu_drop(sch); 1234 1235 return NET_XMIT_DROP; 1236 } 1237 1238 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 1239 struct sk_buff **to_free) 1240 { 1241 __qdisc_drop(skb, to_free); 1242 qdisc_qstats_drop(sch); 1243 1244 return NET_XMIT_DROP; 1245 } 1246 1247 static inline int qdisc_drop_reason(struct sk_buff *skb, struct Qdisc *sch, 1248 struct sk_buff **to_free, 1249 enum skb_drop_reason reason) 1250 { 1251 tcf_set_drop_reason(skb, reason); 1252 return qdisc_drop(skb, sch, to_free); 1253 } 1254 1255 static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch, 1256 struct sk_buff **to_free) 1257 { 1258 __qdisc_drop_all(skb, to_free); 1259 qdisc_qstats_drop(sch); 1260 1261 return NET_XMIT_DROP; 1262 } 1263 1264 struct psched_ratecfg { 1265 u64 rate_bytes_ps; /* bytes per second */ 1266 u32 mult; 1267 u16 overhead; 1268 u16 mpu; 1269 u8 linklayer; 1270 u8 shift; 1271 }; 1272 1273 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 1274 unsigned int len) 1275 { 1276 len += r->overhead; 1277 1278 if (len < r->mpu) 1279 len = r->mpu; 1280 1281 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 1282 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 1283 1284 return ((u64)len * r->mult) >> r->shift; 1285 } 1286 1287 void psched_ratecfg_precompute(struct psched_ratecfg *r, 1288 const struct tc_ratespec *conf, 1289 u64 rate64); 1290 1291 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 1292 const struct psched_ratecfg *r) 1293 { 1294 memset(res, 0, sizeof(*res)); 1295 1296 /* legacy struct tc_ratespec has a 32bit @rate field 1297 * Qdisc using 64bit rate should add new attributes 1298 * in order to maintain compatibility. 1299 */ 1300 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 1301 1302 res->overhead = r->overhead; 1303 res->mpu = r->mpu; 1304 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 1305 } 1306 1307 struct psched_pktrate { 1308 u64 rate_pkts_ps; /* packets per second */ 1309 u32 mult; 1310 u8 shift; 1311 }; 1312 1313 static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r, 1314 unsigned int pkt_num) 1315 { 1316 return ((u64)pkt_num * r->mult) >> r->shift; 1317 } 1318 1319 void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64); 1320 1321 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. 1322 * The fast path only needs to access filter list and to update stats 1323 */ 1324 struct mini_Qdisc { 1325 struct tcf_proto *filter_list; 1326 struct tcf_block *block; 1327 struct gnet_stats_basic_sync __percpu *cpu_bstats; 1328 struct gnet_stats_queue __percpu *cpu_qstats; 1329 unsigned long rcu_state; 1330 }; 1331 1332 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, 1333 const struct sk_buff *skb) 1334 { 1335 bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb); 1336 } 1337 1338 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) 1339 { 1340 this_cpu_inc(miniq->cpu_qstats->drops); 1341 } 1342 1343 struct mini_Qdisc_pair { 1344 struct mini_Qdisc miniq1; 1345 struct mini_Qdisc miniq2; 1346 struct mini_Qdisc __rcu **p_miniq; 1347 }; 1348 1349 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 1350 struct tcf_proto *tp_head); 1351 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1352 struct mini_Qdisc __rcu **p_miniq); 1353 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, 1354 struct tcf_block *block); 1355 1356 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx); 1357 1358 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)); 1359 1360 /* Make sure qdisc is no longer in SCHED state. */ 1361 static inline void qdisc_synchronize(const struct Qdisc *q) 1362 { 1363 while (test_bit(__QDISC_STATE_SCHED, &q->state)) 1364 msleep(1); 1365 } 1366 1367 #endif 1368