1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_SCHED_GENERIC_H 3 #define __NET_SCHED_GENERIC_H 4 5 #include <linux/netdevice.h> 6 #include <linux/types.h> 7 #include <linux/rcupdate.h> 8 #include <linux/pkt_sched.h> 9 #include <linux/pkt_cls.h> 10 #include <linux/percpu.h> 11 #include <linux/dynamic_queue_limits.h> 12 #include <linux/list.h> 13 #include <linux/refcount.h> 14 #include <linux/workqueue.h> 15 #include <linux/mutex.h> 16 #include <linux/rwsem.h> 17 #include <linux/atomic.h> 18 #include <linux/hashtable.h> 19 #include <net/gen_stats.h> 20 #include <net/rtnetlink.h> 21 #include <net/flow_offload.h> 22 #include <linux/xarray.h> 23 24 struct Qdisc_ops; 25 struct qdisc_walker; 26 struct tcf_walker; 27 struct module; 28 struct bpf_flow_keys; 29 30 struct qdisc_rate_table { 31 struct tc_ratespec rate; 32 u32 data[256]; 33 struct qdisc_rate_table *next; 34 int refcnt; 35 }; 36 37 enum qdisc_state_t { 38 __QDISC_STATE_SCHED, 39 __QDISC_STATE_DEACTIVATED, 40 __QDISC_STATE_MISSED, 41 __QDISC_STATE_DRAINING, 42 }; 43 44 enum qdisc_state2_t { 45 /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly. 46 * Use qdisc_run_begin/end() or qdisc_is_running() instead. 47 */ 48 __QDISC_STATE2_RUNNING, 49 }; 50 51 #define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED) 52 #define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING) 53 54 #define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \ 55 QDISC_STATE_DRAINING) 56 57 struct qdisc_size_table { 58 struct rcu_head rcu; 59 struct list_head list; 60 struct tc_sizespec szopts; 61 int refcnt; 62 u16 data[]; 63 }; 64 65 /* similar to sk_buff_head, but skb->prev pointer is undefined. */ 66 struct qdisc_skb_head { 67 struct sk_buff *head; 68 struct sk_buff *tail; 69 __u32 qlen; 70 spinlock_t lock; 71 }; 72 73 struct Qdisc { 74 int (*enqueue)(struct sk_buff *skb, 75 struct Qdisc *sch, 76 struct sk_buff **to_free); 77 struct sk_buff * (*dequeue)(struct Qdisc *sch); 78 unsigned int flags; 79 #define TCQ_F_BUILTIN 1 80 #define TCQ_F_INGRESS 2 81 #define TCQ_F_CAN_BYPASS 4 82 #define TCQ_F_MQROOT 8 83 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 84 * q->dev_queue : It can test 85 * netif_xmit_frozen_or_stopped() before 86 * dequeueing next packet. 87 * Its true for MQ/MQPRIO slaves, or non 88 * multiqueue device. 89 */ 90 #define TCQ_F_WARN_NONWC (1 << 16) 91 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 92 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 93 * qdisc_tree_decrease_qlen() should stop. 94 */ 95 #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 96 #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ 97 #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ 98 u32 limit; 99 const struct Qdisc_ops *ops; 100 struct qdisc_size_table __rcu *stab; 101 struct hlist_node hash; 102 u32 handle; 103 u32 parent; 104 105 struct netdev_queue *dev_queue; 106 107 struct net_rate_estimator __rcu *rate_est; 108 struct gnet_stats_basic_sync __percpu *cpu_bstats; 109 struct gnet_stats_queue __percpu *cpu_qstats; 110 int pad; 111 refcount_t refcnt; 112 113 /* 114 * For performance sake on SMP, we put highly modified fields at the end 115 */ 116 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; 117 struct qdisc_skb_head q; 118 struct gnet_stats_basic_sync bstats; 119 struct gnet_stats_queue qstats; 120 int owner; 121 unsigned long state; 122 unsigned long state2; /* must be written under qdisc spinlock */ 123 struct Qdisc *next_sched; 124 struct sk_buff_head skb_bad_txq; 125 126 spinlock_t busylock ____cacheline_aligned_in_smp; 127 spinlock_t seqlock; 128 129 struct rcu_head rcu; 130 netdevice_tracker dev_tracker; 131 struct lock_class_key root_lock_key; 132 /* private data */ 133 long privdata[] ____cacheline_aligned; 134 }; 135 136 static inline void qdisc_refcount_inc(struct Qdisc *qdisc) 137 { 138 if (qdisc->flags & TCQ_F_BUILTIN) 139 return; 140 refcount_inc(&qdisc->refcnt); 141 } 142 143 static inline bool qdisc_refcount_dec_if_one(struct Qdisc *qdisc) 144 { 145 if (qdisc->flags & TCQ_F_BUILTIN) 146 return true; 147 return refcount_dec_if_one(&qdisc->refcnt); 148 } 149 150 /* Intended to be used by unlocked users, when concurrent qdisc release is 151 * possible. 152 */ 153 154 static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc) 155 { 156 if (qdisc->flags & TCQ_F_BUILTIN) 157 return qdisc; 158 if (refcount_inc_not_zero(&qdisc->refcnt)) 159 return qdisc; 160 return NULL; 161 } 162 163 /* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc 164 * root_lock section, or provide their own memory barriers -- ordering 165 * against qdisc_run_begin/end() atomic bit operations. 166 */ 167 static inline bool qdisc_is_running(struct Qdisc *qdisc) 168 { 169 if (qdisc->flags & TCQ_F_NOLOCK) 170 return spin_is_locked(&qdisc->seqlock); 171 return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 172 } 173 174 static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc) 175 { 176 return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY); 177 } 178 179 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 180 { 181 return q->flags & TCQ_F_CPUSTATS; 182 } 183 184 static inline bool qdisc_is_empty(const struct Qdisc *qdisc) 185 { 186 if (qdisc_is_percpu_stats(qdisc)) 187 return nolock_qdisc_is_empty(qdisc); 188 return !READ_ONCE(qdisc->q.qlen); 189 } 190 191 /* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with 192 * the qdisc root lock acquired. 193 */ 194 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 195 { 196 if (qdisc->flags & TCQ_F_NOLOCK) { 197 if (spin_trylock(&qdisc->seqlock)) 198 return true; 199 200 /* No need to insist if the MISSED flag was already set. 201 * Note that test_and_set_bit() also gives us memory ordering 202 * guarantees wrt potential earlier enqueue() and below 203 * spin_trylock(), both of which are necessary to prevent races 204 */ 205 if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state)) 206 return false; 207 208 /* Try to take the lock again to make sure that we will either 209 * grab it or the CPU that still has it will see MISSED set 210 * when testing it in qdisc_run_end() 211 */ 212 return spin_trylock(&qdisc->seqlock); 213 } 214 return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 215 } 216 217 static inline void qdisc_run_end(struct Qdisc *qdisc) 218 { 219 if (qdisc->flags & TCQ_F_NOLOCK) { 220 spin_unlock(&qdisc->seqlock); 221 222 /* spin_unlock() only has store-release semantic. The unlock 223 * and test_bit() ordering is a store-load ordering, so a full 224 * memory barrier is needed here. 225 */ 226 smp_mb(); 227 228 if (unlikely(test_bit(__QDISC_STATE_MISSED, 229 &qdisc->state))) 230 __netif_schedule(qdisc); 231 } else { 232 __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 233 } 234 } 235 236 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 237 { 238 return qdisc->flags & TCQ_F_ONETXQUEUE; 239 } 240 241 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 242 { 243 return netdev_queue_dql_avail(txq); 244 } 245 246 struct Qdisc_class_ops { 247 unsigned int flags; 248 /* Child qdisc manipulation */ 249 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 250 int (*graft)(struct Qdisc *, unsigned long cl, 251 struct Qdisc *, struct Qdisc **, 252 struct netlink_ext_ack *extack); 253 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 254 void (*qlen_notify)(struct Qdisc *, unsigned long); 255 256 /* Class manipulation routines */ 257 unsigned long (*find)(struct Qdisc *, u32 classid); 258 int (*change)(struct Qdisc *, u32, u32, 259 struct nlattr **, unsigned long *, 260 struct netlink_ext_ack *); 261 int (*delete)(struct Qdisc *, unsigned long, 262 struct netlink_ext_ack *); 263 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 264 265 /* Filter manipulation */ 266 struct tcf_block * (*tcf_block)(struct Qdisc *sch, 267 unsigned long arg, 268 struct netlink_ext_ack *extack); 269 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 270 u32 classid); 271 void (*unbind_tcf)(struct Qdisc *, unsigned long); 272 273 /* rtnetlink specific */ 274 int (*dump)(struct Qdisc *, unsigned long, 275 struct sk_buff *skb, struct tcmsg*); 276 int (*dump_stats)(struct Qdisc *, unsigned long, 277 struct gnet_dump *); 278 }; 279 280 /* Qdisc_class_ops flag values */ 281 282 /* Implements API that doesn't require rtnl lock */ 283 enum qdisc_class_ops_flags { 284 QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, 285 }; 286 287 struct Qdisc_ops { 288 struct Qdisc_ops *next; 289 const struct Qdisc_class_ops *cl_ops; 290 char id[IFNAMSIZ]; 291 int priv_size; 292 unsigned int static_flags; 293 294 int (*enqueue)(struct sk_buff *skb, 295 struct Qdisc *sch, 296 struct sk_buff **to_free); 297 struct sk_buff * (*dequeue)(struct Qdisc *); 298 struct sk_buff * (*peek)(struct Qdisc *); 299 300 int (*init)(struct Qdisc *sch, struct nlattr *arg, 301 struct netlink_ext_ack *extack); 302 void (*reset)(struct Qdisc *); 303 void (*destroy)(struct Qdisc *); 304 int (*change)(struct Qdisc *sch, 305 struct nlattr *arg, 306 struct netlink_ext_ack *extack); 307 void (*attach)(struct Qdisc *sch); 308 int (*change_tx_queue_len)(struct Qdisc *, unsigned int); 309 void (*change_real_num_tx)(struct Qdisc *sch, 310 unsigned int new_real_tx); 311 312 int (*dump)(struct Qdisc *, struct sk_buff *); 313 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 314 315 void (*ingress_block_set)(struct Qdisc *sch, 316 u32 block_index); 317 void (*egress_block_set)(struct Qdisc *sch, 318 u32 block_index); 319 u32 (*ingress_block_get)(struct Qdisc *sch); 320 u32 (*egress_block_get)(struct Qdisc *sch); 321 322 struct module *owner; 323 }; 324 325 struct tcf_result { 326 union { 327 struct { 328 unsigned long class; 329 u32 classid; 330 }; 331 const struct tcf_proto *goto_tp; 332 }; 333 }; 334 335 struct tcf_chain; 336 337 struct tcf_proto_ops { 338 struct list_head head; 339 char kind[IFNAMSIZ]; 340 341 int (*classify)(struct sk_buff *, 342 const struct tcf_proto *, 343 struct tcf_result *); 344 int (*init)(struct tcf_proto*); 345 void (*destroy)(struct tcf_proto *tp, bool rtnl_held, 346 struct netlink_ext_ack *extack); 347 348 void* (*get)(struct tcf_proto*, u32 handle); 349 void (*put)(struct tcf_proto *tp, void *f); 350 int (*change)(struct net *net, struct sk_buff *, 351 struct tcf_proto*, unsigned long, 352 u32 handle, struct nlattr **, 353 void **, u32, 354 struct netlink_ext_ack *); 355 int (*delete)(struct tcf_proto *tp, void *arg, 356 bool *last, bool rtnl_held, 357 struct netlink_ext_ack *); 358 bool (*delete_empty)(struct tcf_proto *tp); 359 void (*walk)(struct tcf_proto *tp, 360 struct tcf_walker *arg, bool rtnl_held); 361 int (*reoffload)(struct tcf_proto *tp, bool add, 362 flow_setup_cb_t *cb, void *cb_priv, 363 struct netlink_ext_ack *extack); 364 void (*hw_add)(struct tcf_proto *tp, 365 void *type_data); 366 void (*hw_del)(struct tcf_proto *tp, 367 void *type_data); 368 void (*bind_class)(void *, u32, unsigned long, 369 void *, unsigned long); 370 void * (*tmplt_create)(struct net *net, 371 struct tcf_chain *chain, 372 struct nlattr **tca, 373 struct netlink_ext_ack *extack); 374 void (*tmplt_destroy)(void *tmplt_priv); 375 void (*tmplt_reoffload)(struct tcf_chain *chain, 376 bool add, 377 flow_setup_cb_t *cb, 378 void *cb_priv); 379 struct tcf_exts * (*get_exts)(const struct tcf_proto *tp, 380 u32 handle); 381 382 /* rtnetlink specific */ 383 int (*dump)(struct net*, struct tcf_proto*, void *, 384 struct sk_buff *skb, struct tcmsg*, 385 bool); 386 int (*terse_dump)(struct net *net, 387 struct tcf_proto *tp, void *fh, 388 struct sk_buff *skb, 389 struct tcmsg *t, bool rtnl_held); 390 int (*tmplt_dump)(struct sk_buff *skb, 391 struct net *net, 392 void *tmplt_priv); 393 394 struct module *owner; 395 int flags; 396 }; 397 398 /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags 399 * are expected to implement tcf_proto_ops->delete_empty(), otherwise race 400 * conditions can occur when filters are inserted/deleted simultaneously. 401 */ 402 enum tcf_proto_ops_flags { 403 TCF_PROTO_OPS_DOIT_UNLOCKED = 1, 404 }; 405 406 struct tcf_proto { 407 /* Fast access part */ 408 struct tcf_proto __rcu *next; 409 void __rcu *root; 410 411 /* called under RCU BH lock*/ 412 int (*classify)(struct sk_buff *, 413 const struct tcf_proto *, 414 struct tcf_result *); 415 __be16 protocol; 416 417 /* All the rest */ 418 u32 prio; 419 void *data; 420 const struct tcf_proto_ops *ops; 421 struct tcf_chain *chain; 422 /* Lock protects tcf_proto shared state and can be used by unlocked 423 * classifiers to protect their private data. 424 */ 425 spinlock_t lock; 426 bool deleting; 427 bool counted; 428 refcount_t refcnt; 429 struct rcu_head rcu; 430 struct hlist_node destroy_ht_node; 431 }; 432 433 struct qdisc_skb_cb { 434 struct { 435 unsigned int pkt_len; 436 u16 slave_dev_queue_mapping; 437 u16 tc_classid; 438 }; 439 #define QDISC_CB_PRIV_LEN 20 440 unsigned char data[QDISC_CB_PRIV_LEN]; 441 }; 442 443 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); 444 445 struct tcf_chain { 446 /* Protects filter_chain. */ 447 struct mutex filter_chain_lock; 448 struct tcf_proto __rcu *filter_chain; 449 struct list_head list; 450 struct tcf_block *block; 451 u32 index; /* chain index */ 452 unsigned int refcnt; 453 unsigned int action_refcnt; 454 bool explicitly_created; 455 bool flushing; 456 const struct tcf_proto_ops *tmplt_ops; 457 void *tmplt_priv; 458 struct rcu_head rcu; 459 }; 460 461 struct tcf_block { 462 struct xarray ports; /* datapath accessible */ 463 /* Lock protects tcf_block and lifetime-management data of chains 464 * attached to the block (refcnt, action_refcnt, explicitly_created). 465 */ 466 struct mutex lock; 467 struct list_head chain_list; 468 u32 index; /* block index for shared blocks */ 469 u32 classid; /* which class this block belongs to */ 470 refcount_t refcnt; 471 struct net *net; 472 struct Qdisc *q; 473 struct rw_semaphore cb_lock; /* protects cb_list and offload counters */ 474 struct flow_block flow_block; 475 struct list_head owner_list; 476 bool keep_dst; 477 bool bypass_wanted; 478 atomic_t filtercnt; /* Number of filters */ 479 atomic_t skipswcnt; /* Number of skip_sw filters */ 480 atomic_t offloadcnt; /* Number of oddloaded filters */ 481 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ 482 unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */ 483 struct { 484 struct tcf_chain *chain; 485 struct list_head filter_chain_list; 486 } chain0; 487 struct rcu_head rcu; 488 DECLARE_HASHTABLE(proto_destroy_ht, 7); 489 struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */ 490 }; 491 492 struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index); 493 494 static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain) 495 { 496 return lockdep_is_held(&chain->filter_chain_lock); 497 } 498 499 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) 500 { 501 return lockdep_is_held(&tp->lock); 502 } 503 504 #define tcf_chain_dereference(p, chain) \ 505 rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain)) 506 507 #define tcf_proto_dereference(p, tp) \ 508 rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp)) 509 510 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 511 { 512 struct qdisc_skb_cb *qcb; 513 514 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb)); 515 BUILD_BUG_ON(sizeof(qcb->data) < sz); 516 } 517 518 static inline int qdisc_qlen(const struct Qdisc *q) 519 { 520 return q->q.qlen; 521 } 522 523 static inline int qdisc_qlen_sum(const struct Qdisc *q) 524 { 525 __u32 qlen = q->qstats.qlen; 526 int i; 527 528 if (qdisc_is_percpu_stats(q)) { 529 for_each_possible_cpu(i) 530 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; 531 } else { 532 qlen += q->q.qlen; 533 } 534 535 return qlen; 536 } 537 538 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 539 { 540 return (struct qdisc_skb_cb *)skb->cb; 541 } 542 543 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 544 { 545 return &qdisc->q.lock; 546 } 547 548 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 549 { 550 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 551 552 return q; 553 } 554 555 static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) 556 { 557 return rcu_dereference_bh(qdisc->dev_queue->qdisc); 558 } 559 560 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 561 { 562 return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping); 563 } 564 565 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 566 { 567 struct Qdisc *root = qdisc_root_sleeping(qdisc); 568 569 ASSERT_RTNL(); 570 return qdisc_lock(root); 571 } 572 573 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 574 { 575 return qdisc->dev_queue->dev; 576 } 577 578 static inline void sch_tree_lock(struct Qdisc *q) 579 { 580 if (q->flags & TCQ_F_MQROOT) 581 spin_lock_bh(qdisc_lock(q)); 582 else 583 spin_lock_bh(qdisc_root_sleeping_lock(q)); 584 } 585 586 static inline void sch_tree_unlock(struct Qdisc *q) 587 { 588 if (q->flags & TCQ_F_MQROOT) 589 spin_unlock_bh(qdisc_lock(q)); 590 else 591 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 592 } 593 594 extern struct Qdisc noop_qdisc; 595 extern struct Qdisc_ops noop_qdisc_ops; 596 extern struct Qdisc_ops pfifo_fast_ops; 597 extern const u8 sch_default_prio2band[TC_PRIO_MAX + 1]; 598 extern struct Qdisc_ops mq_qdisc_ops; 599 extern struct Qdisc_ops noqueue_qdisc_ops; 600 extern const struct Qdisc_ops *default_qdisc_ops; 601 static inline const struct Qdisc_ops * 602 get_default_qdisc_ops(const struct net_device *dev, int ntx) 603 { 604 return ntx < dev->real_num_tx_queues ? 605 default_qdisc_ops : &pfifo_fast_ops; 606 } 607 608 struct Qdisc_class_common { 609 u32 classid; 610 unsigned int filter_cnt; 611 struct hlist_node hnode; 612 }; 613 614 struct Qdisc_class_hash { 615 struct hlist_head *hash; 616 unsigned int hashsize; 617 unsigned int hashmask; 618 unsigned int hashelems; 619 }; 620 621 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 622 { 623 id ^= id >> 8; 624 id ^= id >> 4; 625 return id & mask; 626 } 627 628 static inline struct Qdisc_class_common * 629 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 630 { 631 struct Qdisc_class_common *cl; 632 unsigned int h; 633 634 if (!id) 635 return NULL; 636 637 h = qdisc_class_hash(id, hash->hashmask); 638 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 639 if (cl->classid == id) 640 return cl; 641 } 642 return NULL; 643 } 644 645 static inline bool qdisc_class_in_use(const struct Qdisc_class_common *cl) 646 { 647 return cl->filter_cnt > 0; 648 } 649 650 static inline void qdisc_class_get(struct Qdisc_class_common *cl) 651 { 652 unsigned int res; 653 654 if (check_add_overflow(cl->filter_cnt, 1, &res)) 655 WARN(1, "Qdisc class overflow"); 656 657 cl->filter_cnt = res; 658 } 659 660 static inline void qdisc_class_put(struct Qdisc_class_common *cl) 661 { 662 unsigned int res; 663 664 if (check_sub_overflow(cl->filter_cnt, 1, &res)) 665 WARN(1, "Qdisc class underflow"); 666 667 cl->filter_cnt = res; 668 } 669 670 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) 671 { 672 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; 673 674 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; 675 } 676 677 int qdisc_class_hash_init(struct Qdisc_class_hash *); 678 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 679 struct Qdisc_class_common *); 680 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 681 struct Qdisc_class_common *); 682 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 683 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 684 685 int dev_qdisc_change_tx_queue_len(struct net_device *dev); 686 void dev_qdisc_change_real_num_tx(struct net_device *dev, 687 unsigned int new_real_tx); 688 void dev_init_scheduler(struct net_device *dev); 689 void dev_shutdown(struct net_device *dev); 690 void dev_activate(struct net_device *dev); 691 void dev_deactivate(struct net_device *dev); 692 void dev_deactivate_many(struct list_head *head); 693 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 694 struct Qdisc *qdisc); 695 void qdisc_reset(struct Qdisc *qdisc); 696 void qdisc_destroy(struct Qdisc *qdisc); 697 void qdisc_put(struct Qdisc *qdisc); 698 void qdisc_put_unlocked(struct Qdisc *qdisc); 699 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); 700 #ifdef CONFIG_NET_SCHED 701 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 702 void *type_data); 703 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 704 struct Qdisc *new, struct Qdisc *old, 705 enum tc_setup_type type, void *type_data, 706 struct netlink_ext_ack *extack); 707 #else 708 static inline int 709 qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 710 void *type_data) 711 { 712 q->flags &= ~TCQ_F_OFFLOADED; 713 return 0; 714 } 715 716 static inline void 717 qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 718 struct Qdisc *new, struct Qdisc *old, 719 enum tc_setup_type type, void *type_data, 720 struct netlink_ext_ack *extack) 721 { 722 } 723 #endif 724 void qdisc_offload_query_caps(struct net_device *dev, 725 enum tc_setup_type type, 726 void *caps, size_t caps_len); 727 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 728 const struct Qdisc_ops *ops, 729 struct netlink_ext_ack *extack); 730 void qdisc_free(struct Qdisc *qdisc); 731 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 732 const struct Qdisc_ops *ops, u32 parentid, 733 struct netlink_ext_ack *extack); 734 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 735 const struct qdisc_size_table *stab); 736 int skb_do_redirect(struct sk_buff *); 737 738 static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 739 { 740 #ifdef CONFIG_NET_XGRESS 741 return skb->tc_at_ingress; 742 #else 743 return false; 744 #endif 745 } 746 747 static inline bool skb_skip_tc_classify(struct sk_buff *skb) 748 { 749 #ifdef CONFIG_NET_CLS_ACT 750 if (skb->tc_skip_classify) { 751 skb->tc_skip_classify = 0; 752 return true; 753 } 754 #endif 755 return false; 756 } 757 758 /* Reset all TX qdiscs greater than index of a device. */ 759 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 760 { 761 struct Qdisc *qdisc; 762 763 for (; i < dev->num_tx_queues; i++) { 764 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 765 if (qdisc) { 766 spin_lock_bh(qdisc_lock(qdisc)); 767 qdisc_reset(qdisc); 768 spin_unlock_bh(qdisc_lock(qdisc)); 769 } 770 } 771 } 772 773 /* Are all TX queues of the device empty? */ 774 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 775 { 776 unsigned int i; 777 778 rcu_read_lock(); 779 for (i = 0; i < dev->num_tx_queues; i++) { 780 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 781 const struct Qdisc *q = rcu_dereference(txq->qdisc); 782 783 if (!qdisc_is_empty(q)) { 784 rcu_read_unlock(); 785 return false; 786 } 787 } 788 rcu_read_unlock(); 789 return true; 790 } 791 792 /* Are any of the TX qdiscs changing? */ 793 static inline bool qdisc_tx_changing(const struct net_device *dev) 794 { 795 unsigned int i; 796 797 for (i = 0; i < dev->num_tx_queues; i++) { 798 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 799 800 if (rcu_access_pointer(txq->qdisc) != 801 rcu_access_pointer(txq->qdisc_sleeping)) 802 return true; 803 } 804 return false; 805 } 806 807 /* Is the device using the noop qdisc on all queues? */ 808 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 809 { 810 unsigned int i; 811 812 for (i = 0; i < dev->num_tx_queues; i++) { 813 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 814 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 815 return false; 816 } 817 return true; 818 } 819 820 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 821 { 822 return qdisc_skb_cb(skb)->pkt_len; 823 } 824 825 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 826 enum net_xmit_qdisc_t { 827 __NET_XMIT_STOLEN = 0x00010000, 828 __NET_XMIT_BYPASS = 0x00020000, 829 }; 830 831 #ifdef CONFIG_NET_CLS_ACT 832 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 833 #else 834 #define net_xmit_drop_count(e) (1) 835 #endif 836 837 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 838 const struct Qdisc *sch) 839 { 840 #ifdef CONFIG_NET_SCHED 841 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 842 843 if (stab) 844 __qdisc_calculate_pkt_len(skb, stab); 845 #endif 846 } 847 848 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 849 struct sk_buff **to_free) 850 { 851 qdisc_calculate_pkt_len(skb, sch); 852 return sch->enqueue(skb, sch, to_free); 853 } 854 855 static inline void _bstats_update(struct gnet_stats_basic_sync *bstats, 856 __u64 bytes, __u32 packets) 857 { 858 u64_stats_update_begin(&bstats->syncp); 859 u64_stats_add(&bstats->bytes, bytes); 860 u64_stats_add(&bstats->packets, packets); 861 u64_stats_update_end(&bstats->syncp); 862 } 863 864 static inline void bstats_update(struct gnet_stats_basic_sync *bstats, 865 const struct sk_buff *skb) 866 { 867 _bstats_update(bstats, 868 qdisc_pkt_len(skb), 869 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 870 } 871 872 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 873 const struct sk_buff *skb) 874 { 875 bstats_update(this_cpu_ptr(sch->cpu_bstats), skb); 876 } 877 878 static inline void qdisc_bstats_update(struct Qdisc *sch, 879 const struct sk_buff *skb) 880 { 881 bstats_update(&sch->bstats, skb); 882 } 883 884 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 885 const struct sk_buff *skb) 886 { 887 sch->qstats.backlog -= qdisc_pkt_len(skb); 888 } 889 890 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, 891 const struct sk_buff *skb) 892 { 893 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 894 } 895 896 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 897 const struct sk_buff *skb) 898 { 899 sch->qstats.backlog += qdisc_pkt_len(skb); 900 } 901 902 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, 903 const struct sk_buff *skb) 904 { 905 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 906 } 907 908 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) 909 { 910 this_cpu_inc(sch->cpu_qstats->qlen); 911 } 912 913 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) 914 { 915 this_cpu_dec(sch->cpu_qstats->qlen); 916 } 917 918 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) 919 { 920 this_cpu_inc(sch->cpu_qstats->requeues); 921 } 922 923 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 924 { 925 sch->qstats.drops += count; 926 } 927 928 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 929 { 930 qstats->drops++; 931 } 932 933 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 934 { 935 qstats->overlimits++; 936 } 937 938 static inline void qdisc_qstats_drop(struct Qdisc *sch) 939 { 940 qstats_drop_inc(&sch->qstats); 941 } 942 943 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 944 { 945 this_cpu_inc(sch->cpu_qstats->drops); 946 } 947 948 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 949 { 950 sch->qstats.overlimits++; 951 } 952 953 static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch) 954 { 955 __u32 qlen = qdisc_qlen_sum(sch); 956 957 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); 958 } 959 960 static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, 961 __u32 *backlog) 962 { 963 struct gnet_stats_queue qstats = { 0 }; 964 965 gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats); 966 *qlen = qstats.qlen + qdisc_qlen(sch); 967 *backlog = qstats.backlog; 968 } 969 970 static inline void qdisc_tree_flush_backlog(struct Qdisc *sch) 971 { 972 __u32 qlen, backlog; 973 974 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 975 qdisc_tree_reduce_backlog(sch, qlen, backlog); 976 } 977 978 static inline void qdisc_purge_queue(struct Qdisc *sch) 979 { 980 __u32 qlen, backlog; 981 982 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 983 qdisc_reset(sch); 984 qdisc_tree_reduce_backlog(sch, qlen, backlog); 985 } 986 987 static inline void __qdisc_enqueue_tail(struct sk_buff *skb, 988 struct qdisc_skb_head *qh) 989 { 990 struct sk_buff *last = qh->tail; 991 992 if (last) { 993 skb->next = NULL; 994 last->next = skb; 995 qh->tail = skb; 996 } else { 997 qh->tail = skb; 998 qh->head = skb; 999 } 1000 qh->qlen++; 1001 } 1002 1003 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 1004 { 1005 __qdisc_enqueue_tail(skb, &sch->q); 1006 qdisc_qstats_backlog_inc(sch, skb); 1007 return NET_XMIT_SUCCESS; 1008 } 1009 1010 static inline void __qdisc_enqueue_head(struct sk_buff *skb, 1011 struct qdisc_skb_head *qh) 1012 { 1013 skb->next = qh->head; 1014 1015 if (!qh->head) 1016 qh->tail = skb; 1017 qh->head = skb; 1018 qh->qlen++; 1019 } 1020 1021 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 1022 { 1023 struct sk_buff *skb = qh->head; 1024 1025 if (likely(skb != NULL)) { 1026 qh->head = skb->next; 1027 qh->qlen--; 1028 if (qh->head == NULL) 1029 qh->tail = NULL; 1030 skb->next = NULL; 1031 } 1032 1033 return skb; 1034 } 1035 1036 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 1037 { 1038 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 1039 1040 if (likely(skb != NULL)) { 1041 qdisc_qstats_backlog_dec(sch, skb); 1042 qdisc_bstats_update(sch, skb); 1043 } 1044 1045 return skb; 1046 } 1047 1048 struct tc_skb_cb { 1049 struct qdisc_skb_cb qdisc_cb; 1050 u32 drop_reason; 1051 1052 u16 zone; /* Only valid if post_ct = true */ 1053 u16 mru; 1054 u8 post_ct:1; 1055 u8 post_ct_snat:1; 1056 u8 post_ct_dnat:1; 1057 }; 1058 1059 static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb) 1060 { 1061 struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb; 1062 1063 BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb)); 1064 return cb; 1065 } 1066 1067 static inline enum skb_drop_reason 1068 tcf_get_drop_reason(const struct sk_buff *skb) 1069 { 1070 return tc_skb_cb(skb)->drop_reason; 1071 } 1072 1073 static inline void tcf_set_drop_reason(const struct sk_buff *skb, 1074 enum skb_drop_reason reason) 1075 { 1076 tc_skb_cb(skb)->drop_reason = reason; 1077 } 1078 1079 /* Instead of calling kfree_skb() while root qdisc lock is held, 1080 * queue the skb for future freeing at end of __dev_xmit_skb() 1081 */ 1082 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 1083 { 1084 skb->next = *to_free; 1085 *to_free = skb; 1086 } 1087 1088 static inline void __qdisc_drop_all(struct sk_buff *skb, 1089 struct sk_buff **to_free) 1090 { 1091 if (skb->prev) 1092 skb->prev->next = *to_free; 1093 else 1094 skb->next = *to_free; 1095 *to_free = skb; 1096 } 1097 1098 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 1099 struct qdisc_skb_head *qh, 1100 struct sk_buff **to_free) 1101 { 1102 struct sk_buff *skb = __qdisc_dequeue_head(qh); 1103 1104 if (likely(skb != NULL)) { 1105 unsigned int len = qdisc_pkt_len(skb); 1106 1107 qdisc_qstats_backlog_dec(sch, skb); 1108 __qdisc_drop(skb, to_free); 1109 return len; 1110 } 1111 1112 return 0; 1113 } 1114 1115 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 1116 { 1117 const struct qdisc_skb_head *qh = &sch->q; 1118 1119 return qh->head; 1120 } 1121 1122 /* generic pseudo peek method for non-work-conserving qdisc */ 1123 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 1124 { 1125 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1126 1127 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 1128 if (!skb) { 1129 skb = sch->dequeue(sch); 1130 1131 if (skb) { 1132 __skb_queue_head(&sch->gso_skb, skb); 1133 /* it's still part of the queue */ 1134 qdisc_qstats_backlog_inc(sch, skb); 1135 sch->q.qlen++; 1136 } 1137 } 1138 1139 return skb; 1140 } 1141 1142 static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch, 1143 struct sk_buff *skb) 1144 { 1145 if (qdisc_is_percpu_stats(sch)) { 1146 qdisc_qstats_cpu_backlog_dec(sch, skb); 1147 qdisc_bstats_cpu_update(sch, skb); 1148 qdisc_qstats_cpu_qlen_dec(sch); 1149 } else { 1150 qdisc_qstats_backlog_dec(sch, skb); 1151 qdisc_bstats_update(sch, skb); 1152 sch->q.qlen--; 1153 } 1154 } 1155 1156 static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch, 1157 unsigned int pkt_len) 1158 { 1159 if (qdisc_is_percpu_stats(sch)) { 1160 qdisc_qstats_cpu_qlen_inc(sch); 1161 this_cpu_add(sch->cpu_qstats->backlog, pkt_len); 1162 } else { 1163 sch->qstats.backlog += pkt_len; 1164 sch->q.qlen++; 1165 } 1166 } 1167 1168 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 1169 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 1170 { 1171 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1172 1173 if (skb) { 1174 skb = __skb_dequeue(&sch->gso_skb); 1175 if (qdisc_is_percpu_stats(sch)) { 1176 qdisc_qstats_cpu_backlog_dec(sch, skb); 1177 qdisc_qstats_cpu_qlen_dec(sch); 1178 } else { 1179 qdisc_qstats_backlog_dec(sch, skb); 1180 sch->q.qlen--; 1181 } 1182 } else { 1183 skb = sch->dequeue(sch); 1184 } 1185 1186 return skb; 1187 } 1188 1189 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 1190 { 1191 /* 1192 * We do not know the backlog in bytes of this list, it 1193 * is up to the caller to correct it 1194 */ 1195 ASSERT_RTNL(); 1196 if (qh->qlen) { 1197 rtnl_kfree_skbs(qh->head, qh->tail); 1198 1199 qh->head = NULL; 1200 qh->tail = NULL; 1201 qh->qlen = 0; 1202 } 1203 } 1204 1205 static inline void qdisc_reset_queue(struct Qdisc *sch) 1206 { 1207 __qdisc_reset_queue(&sch->q); 1208 } 1209 1210 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 1211 struct Qdisc **pold) 1212 { 1213 struct Qdisc *old; 1214 1215 sch_tree_lock(sch); 1216 old = *pold; 1217 *pold = new; 1218 if (old != NULL) 1219 qdisc_purge_queue(old); 1220 sch_tree_unlock(sch); 1221 1222 return old; 1223 } 1224 1225 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 1226 { 1227 rtnl_kfree_skbs(skb, skb); 1228 qdisc_qstats_drop(sch); 1229 } 1230 1231 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, 1232 struct sk_buff **to_free) 1233 { 1234 __qdisc_drop(skb, to_free); 1235 qdisc_qstats_cpu_drop(sch); 1236 1237 return NET_XMIT_DROP; 1238 } 1239 1240 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 1241 struct sk_buff **to_free) 1242 { 1243 __qdisc_drop(skb, to_free); 1244 qdisc_qstats_drop(sch); 1245 1246 return NET_XMIT_DROP; 1247 } 1248 1249 static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch, 1250 struct sk_buff **to_free) 1251 { 1252 __qdisc_drop_all(skb, to_free); 1253 qdisc_qstats_drop(sch); 1254 1255 return NET_XMIT_DROP; 1256 } 1257 1258 struct psched_ratecfg { 1259 u64 rate_bytes_ps; /* bytes per second */ 1260 u32 mult; 1261 u16 overhead; 1262 u16 mpu; 1263 u8 linklayer; 1264 u8 shift; 1265 }; 1266 1267 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 1268 unsigned int len) 1269 { 1270 len += r->overhead; 1271 1272 if (len < r->mpu) 1273 len = r->mpu; 1274 1275 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 1276 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 1277 1278 return ((u64)len * r->mult) >> r->shift; 1279 } 1280 1281 void psched_ratecfg_precompute(struct psched_ratecfg *r, 1282 const struct tc_ratespec *conf, 1283 u64 rate64); 1284 1285 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 1286 const struct psched_ratecfg *r) 1287 { 1288 memset(res, 0, sizeof(*res)); 1289 1290 /* legacy struct tc_ratespec has a 32bit @rate field 1291 * Qdisc using 64bit rate should add new attributes 1292 * in order to maintain compatibility. 1293 */ 1294 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 1295 1296 res->overhead = r->overhead; 1297 res->mpu = r->mpu; 1298 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 1299 } 1300 1301 struct psched_pktrate { 1302 u64 rate_pkts_ps; /* packets per second */ 1303 u32 mult; 1304 u8 shift; 1305 }; 1306 1307 static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r, 1308 unsigned int pkt_num) 1309 { 1310 return ((u64)pkt_num * r->mult) >> r->shift; 1311 } 1312 1313 void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64); 1314 1315 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. 1316 * The fast path only needs to access filter list and to update stats 1317 */ 1318 struct mini_Qdisc { 1319 struct tcf_proto *filter_list; 1320 struct tcf_block *block; 1321 struct gnet_stats_basic_sync __percpu *cpu_bstats; 1322 struct gnet_stats_queue __percpu *cpu_qstats; 1323 unsigned long rcu_state; 1324 }; 1325 1326 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, 1327 const struct sk_buff *skb) 1328 { 1329 bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb); 1330 } 1331 1332 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) 1333 { 1334 this_cpu_inc(miniq->cpu_qstats->drops); 1335 } 1336 1337 struct mini_Qdisc_pair { 1338 struct mini_Qdisc miniq1; 1339 struct mini_Qdisc miniq2; 1340 struct mini_Qdisc __rcu **p_miniq; 1341 }; 1342 1343 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 1344 struct tcf_proto *tp_head); 1345 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1346 struct mini_Qdisc __rcu **p_miniq); 1347 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, 1348 struct tcf_block *block); 1349 1350 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx); 1351 1352 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)); 1353 1354 /* Make sure qdisc is no longer in SCHED state. */ 1355 static inline void qdisc_synchronize(const struct Qdisc *q) 1356 { 1357 while (test_bit(__QDISC_STATE_SCHED, &q->state)) 1358 msleep(1); 1359 } 1360 1361 #endif 1362