1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_SCHED_GENERIC_H 3 #define __NET_SCHED_GENERIC_H 4 5 #include <linux/netdevice.h> 6 #include <linux/types.h> 7 #include <linux/rcupdate.h> 8 #include <linux/pkt_sched.h> 9 #include <linux/pkt_cls.h> 10 #include <linux/percpu.h> 11 #include <linux/dynamic_queue_limits.h> 12 #include <linux/list.h> 13 #include <linux/refcount.h> 14 #include <linux/workqueue.h> 15 #include <linux/mutex.h> 16 #include <linux/rwsem.h> 17 #include <linux/atomic.h> 18 #include <linux/hashtable.h> 19 #include <net/gen_stats.h> 20 #include <net/rtnetlink.h> 21 #include <net/flow_offload.h> 22 #include <linux/xarray.h> 23 24 struct Qdisc_ops; 25 struct qdisc_walker; 26 struct tcf_walker; 27 struct module; 28 struct bpf_flow_keys; 29 30 struct qdisc_rate_table { 31 struct tc_ratespec rate; 32 u32 data[256]; 33 struct qdisc_rate_table *next; 34 int refcnt; 35 }; 36 37 enum qdisc_state_t { 38 __QDISC_STATE_SCHED, 39 __QDISC_STATE_DEACTIVATED, 40 __QDISC_STATE_MISSED, 41 __QDISC_STATE_DRAINING, 42 }; 43 44 enum qdisc_state2_t { 45 /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly. 46 * Use qdisc_run_begin/end() or qdisc_is_running() instead. 47 */ 48 __QDISC_STATE2_RUNNING, 49 }; 50 51 #define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED) 52 #define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING) 53 54 #define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \ 55 QDISC_STATE_DRAINING) 56 57 struct qdisc_size_table { 58 struct rcu_head rcu; 59 struct list_head list; 60 struct tc_sizespec szopts; 61 int refcnt; 62 u16 data[]; 63 }; 64 65 /* similar to sk_buff_head, but skb->prev pointer is undefined. */ 66 struct qdisc_skb_head { 67 struct sk_buff *head; 68 struct sk_buff *tail; 69 __u32 qlen; 70 spinlock_t lock; 71 }; 72 73 struct Qdisc { 74 int (*enqueue)(struct sk_buff *skb, 75 struct Qdisc *sch, 76 struct sk_buff **to_free); 77 struct sk_buff * (*dequeue)(struct Qdisc *sch); 78 unsigned int flags; 79 #define TCQ_F_BUILTIN 1 80 #define TCQ_F_INGRESS 2 81 #define TCQ_F_CAN_BYPASS 4 82 #define TCQ_F_MQROOT 8 83 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 84 * q->dev_queue : It can test 85 * netif_xmit_frozen_or_stopped() before 86 * dequeueing next packet. 87 * Its true for MQ/MQPRIO slaves, or non 88 * multiqueue device. 89 */ 90 #define TCQ_F_WARN_NONWC (1 << 16) 91 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 92 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 93 * qdisc_tree_decrease_qlen() should stop. 94 */ 95 #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 96 #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ 97 #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ 98 u32 limit; 99 const struct Qdisc_ops *ops; 100 struct qdisc_size_table __rcu *stab; 101 struct hlist_node hash; 102 u32 handle; 103 u32 parent; 104 105 struct netdev_queue *dev_queue; 106 107 struct net_rate_estimator __rcu *rate_est; 108 struct gnet_stats_basic_sync __percpu *cpu_bstats; 109 struct gnet_stats_queue __percpu *cpu_qstats; 110 int pad; 111 refcount_t refcnt; 112 113 /* 114 * For performance sake on SMP, we put highly modified fields at the end 115 */ 116 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; 117 struct qdisc_skb_head q; 118 struct gnet_stats_basic_sync bstats; 119 struct gnet_stats_queue qstats; 120 unsigned long state; 121 unsigned long state2; /* must be written under qdisc spinlock */ 122 struct Qdisc *next_sched; 123 struct sk_buff_head skb_bad_txq; 124 125 spinlock_t busylock ____cacheline_aligned_in_smp; 126 spinlock_t seqlock; 127 128 struct rcu_head rcu; 129 netdevice_tracker dev_tracker; 130 /* private data */ 131 long privdata[] ____cacheline_aligned; 132 }; 133 134 static inline void qdisc_refcount_inc(struct Qdisc *qdisc) 135 { 136 if (qdisc->flags & TCQ_F_BUILTIN) 137 return; 138 refcount_inc(&qdisc->refcnt); 139 } 140 141 static inline bool qdisc_refcount_dec_if_one(struct Qdisc *qdisc) 142 { 143 if (qdisc->flags & TCQ_F_BUILTIN) 144 return true; 145 return refcount_dec_if_one(&qdisc->refcnt); 146 } 147 148 /* Intended to be used by unlocked users, when concurrent qdisc release is 149 * possible. 150 */ 151 152 static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc) 153 { 154 if (qdisc->flags & TCQ_F_BUILTIN) 155 return qdisc; 156 if (refcount_inc_not_zero(&qdisc->refcnt)) 157 return qdisc; 158 return NULL; 159 } 160 161 /* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc 162 * root_lock section, or provide their own memory barriers -- ordering 163 * against qdisc_run_begin/end() atomic bit operations. 164 */ 165 static inline bool qdisc_is_running(struct Qdisc *qdisc) 166 { 167 if (qdisc->flags & TCQ_F_NOLOCK) 168 return spin_is_locked(&qdisc->seqlock); 169 return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 170 } 171 172 static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc) 173 { 174 return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY); 175 } 176 177 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 178 { 179 return q->flags & TCQ_F_CPUSTATS; 180 } 181 182 static inline bool qdisc_is_empty(const struct Qdisc *qdisc) 183 { 184 if (qdisc_is_percpu_stats(qdisc)) 185 return nolock_qdisc_is_empty(qdisc); 186 return !READ_ONCE(qdisc->q.qlen); 187 } 188 189 /* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with 190 * the qdisc root lock acquired. 191 */ 192 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 193 { 194 if (qdisc->flags & TCQ_F_NOLOCK) { 195 if (spin_trylock(&qdisc->seqlock)) 196 return true; 197 198 /* No need to insist if the MISSED flag was already set. 199 * Note that test_and_set_bit() also gives us memory ordering 200 * guarantees wrt potential earlier enqueue() and below 201 * spin_trylock(), both of which are necessary to prevent races 202 */ 203 if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state)) 204 return false; 205 206 /* Try to take the lock again to make sure that we will either 207 * grab it or the CPU that still has it will see MISSED set 208 * when testing it in qdisc_run_end() 209 */ 210 return spin_trylock(&qdisc->seqlock); 211 } 212 return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 213 } 214 215 static inline void qdisc_run_end(struct Qdisc *qdisc) 216 { 217 if (qdisc->flags & TCQ_F_NOLOCK) { 218 spin_unlock(&qdisc->seqlock); 219 220 /* spin_unlock() only has store-release semantic. The unlock 221 * and test_bit() ordering is a store-load ordering, so a full 222 * memory barrier is needed here. 223 */ 224 smp_mb(); 225 226 if (unlikely(test_bit(__QDISC_STATE_MISSED, 227 &qdisc->state))) 228 __netif_schedule(qdisc); 229 } else { 230 __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 231 } 232 } 233 234 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 235 { 236 return qdisc->flags & TCQ_F_ONETXQUEUE; 237 } 238 239 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 240 { 241 #ifdef CONFIG_BQL 242 /* Non-BQL migrated drivers will return 0, too. */ 243 return dql_avail(&txq->dql); 244 #else 245 return 0; 246 #endif 247 } 248 249 struct Qdisc_class_ops { 250 unsigned int flags; 251 /* Child qdisc manipulation */ 252 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 253 int (*graft)(struct Qdisc *, unsigned long cl, 254 struct Qdisc *, struct Qdisc **, 255 struct netlink_ext_ack *extack); 256 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 257 void (*qlen_notify)(struct Qdisc *, unsigned long); 258 259 /* Class manipulation routines */ 260 unsigned long (*find)(struct Qdisc *, u32 classid); 261 int (*change)(struct Qdisc *, u32, u32, 262 struct nlattr **, unsigned long *, 263 struct netlink_ext_ack *); 264 int (*delete)(struct Qdisc *, unsigned long, 265 struct netlink_ext_ack *); 266 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 267 268 /* Filter manipulation */ 269 struct tcf_block * (*tcf_block)(struct Qdisc *sch, 270 unsigned long arg, 271 struct netlink_ext_ack *extack); 272 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 273 u32 classid); 274 void (*unbind_tcf)(struct Qdisc *, unsigned long); 275 276 /* rtnetlink specific */ 277 int (*dump)(struct Qdisc *, unsigned long, 278 struct sk_buff *skb, struct tcmsg*); 279 int (*dump_stats)(struct Qdisc *, unsigned long, 280 struct gnet_dump *); 281 }; 282 283 /* Qdisc_class_ops flag values */ 284 285 /* Implements API that doesn't require rtnl lock */ 286 enum qdisc_class_ops_flags { 287 QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, 288 }; 289 290 struct Qdisc_ops { 291 struct Qdisc_ops *next; 292 const struct Qdisc_class_ops *cl_ops; 293 char id[IFNAMSIZ]; 294 int priv_size; 295 unsigned int static_flags; 296 297 int (*enqueue)(struct sk_buff *skb, 298 struct Qdisc *sch, 299 struct sk_buff **to_free); 300 struct sk_buff * (*dequeue)(struct Qdisc *); 301 struct sk_buff * (*peek)(struct Qdisc *); 302 303 int (*init)(struct Qdisc *sch, struct nlattr *arg, 304 struct netlink_ext_ack *extack); 305 void (*reset)(struct Qdisc *); 306 void (*destroy)(struct Qdisc *); 307 int (*change)(struct Qdisc *sch, 308 struct nlattr *arg, 309 struct netlink_ext_ack *extack); 310 void (*attach)(struct Qdisc *sch); 311 int (*change_tx_queue_len)(struct Qdisc *, unsigned int); 312 void (*change_real_num_tx)(struct Qdisc *sch, 313 unsigned int new_real_tx); 314 315 int (*dump)(struct Qdisc *, struct sk_buff *); 316 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 317 318 void (*ingress_block_set)(struct Qdisc *sch, 319 u32 block_index); 320 void (*egress_block_set)(struct Qdisc *sch, 321 u32 block_index); 322 u32 (*ingress_block_get)(struct Qdisc *sch); 323 u32 (*egress_block_get)(struct Qdisc *sch); 324 325 struct module *owner; 326 }; 327 328 struct tcf_result { 329 union { 330 struct { 331 unsigned long class; 332 u32 classid; 333 }; 334 const struct tcf_proto *goto_tp; 335 }; 336 }; 337 338 struct tcf_chain; 339 340 struct tcf_proto_ops { 341 struct list_head head; 342 char kind[IFNAMSIZ]; 343 344 int (*classify)(struct sk_buff *, 345 const struct tcf_proto *, 346 struct tcf_result *); 347 int (*init)(struct tcf_proto*); 348 void (*destroy)(struct tcf_proto *tp, bool rtnl_held, 349 struct netlink_ext_ack *extack); 350 351 void* (*get)(struct tcf_proto*, u32 handle); 352 void (*put)(struct tcf_proto *tp, void *f); 353 int (*change)(struct net *net, struct sk_buff *, 354 struct tcf_proto*, unsigned long, 355 u32 handle, struct nlattr **, 356 void **, u32, 357 struct netlink_ext_ack *); 358 int (*delete)(struct tcf_proto *tp, void *arg, 359 bool *last, bool rtnl_held, 360 struct netlink_ext_ack *); 361 bool (*delete_empty)(struct tcf_proto *tp); 362 void (*walk)(struct tcf_proto *tp, 363 struct tcf_walker *arg, bool rtnl_held); 364 int (*reoffload)(struct tcf_proto *tp, bool add, 365 flow_setup_cb_t *cb, void *cb_priv, 366 struct netlink_ext_ack *extack); 367 void (*hw_add)(struct tcf_proto *tp, 368 void *type_data); 369 void (*hw_del)(struct tcf_proto *tp, 370 void *type_data); 371 void (*bind_class)(void *, u32, unsigned long, 372 void *, unsigned long); 373 void * (*tmplt_create)(struct net *net, 374 struct tcf_chain *chain, 375 struct nlattr **tca, 376 struct netlink_ext_ack *extack); 377 void (*tmplt_destroy)(void *tmplt_priv); 378 void (*tmplt_reoffload)(struct tcf_chain *chain, 379 bool add, 380 flow_setup_cb_t *cb, 381 void *cb_priv); 382 struct tcf_exts * (*get_exts)(const struct tcf_proto *tp, 383 u32 handle); 384 385 /* rtnetlink specific */ 386 int (*dump)(struct net*, struct tcf_proto*, void *, 387 struct sk_buff *skb, struct tcmsg*, 388 bool); 389 int (*terse_dump)(struct net *net, 390 struct tcf_proto *tp, void *fh, 391 struct sk_buff *skb, 392 struct tcmsg *t, bool rtnl_held); 393 int (*tmplt_dump)(struct sk_buff *skb, 394 struct net *net, 395 void *tmplt_priv); 396 397 struct module *owner; 398 int flags; 399 }; 400 401 /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags 402 * are expected to implement tcf_proto_ops->delete_empty(), otherwise race 403 * conditions can occur when filters are inserted/deleted simultaneously. 404 */ 405 enum tcf_proto_ops_flags { 406 TCF_PROTO_OPS_DOIT_UNLOCKED = 1, 407 }; 408 409 struct tcf_proto { 410 /* Fast access part */ 411 struct tcf_proto __rcu *next; 412 void __rcu *root; 413 414 /* called under RCU BH lock*/ 415 int (*classify)(struct sk_buff *, 416 const struct tcf_proto *, 417 struct tcf_result *); 418 __be16 protocol; 419 420 /* All the rest */ 421 u32 prio; 422 void *data; 423 const struct tcf_proto_ops *ops; 424 struct tcf_chain *chain; 425 /* Lock protects tcf_proto shared state and can be used by unlocked 426 * classifiers to protect their private data. 427 */ 428 spinlock_t lock; 429 bool deleting; 430 refcount_t refcnt; 431 struct rcu_head rcu; 432 struct hlist_node destroy_ht_node; 433 }; 434 435 struct qdisc_skb_cb { 436 struct { 437 unsigned int pkt_len; 438 u16 slave_dev_queue_mapping; 439 u16 tc_classid; 440 }; 441 #define QDISC_CB_PRIV_LEN 20 442 unsigned char data[QDISC_CB_PRIV_LEN]; 443 }; 444 445 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); 446 447 struct tcf_chain { 448 /* Protects filter_chain. */ 449 struct mutex filter_chain_lock; 450 struct tcf_proto __rcu *filter_chain; 451 struct list_head list; 452 struct tcf_block *block; 453 u32 index; /* chain index */ 454 unsigned int refcnt; 455 unsigned int action_refcnt; 456 bool explicitly_created; 457 bool flushing; 458 const struct tcf_proto_ops *tmplt_ops; 459 void *tmplt_priv; 460 struct rcu_head rcu; 461 }; 462 463 struct tcf_block { 464 struct xarray ports; /* datapath accessible */ 465 /* Lock protects tcf_block and lifetime-management data of chains 466 * attached to the block (refcnt, action_refcnt, explicitly_created). 467 */ 468 struct mutex lock; 469 struct list_head chain_list; 470 u32 index; /* block index for shared blocks */ 471 u32 classid; /* which class this block belongs to */ 472 refcount_t refcnt; 473 struct net *net; 474 struct Qdisc *q; 475 struct rw_semaphore cb_lock; /* protects cb_list and offload counters */ 476 struct flow_block flow_block; 477 struct list_head owner_list; 478 bool keep_dst; 479 atomic_t offloadcnt; /* Number of oddloaded filters */ 480 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ 481 unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */ 482 struct { 483 struct tcf_chain *chain; 484 struct list_head filter_chain_list; 485 } chain0; 486 struct rcu_head rcu; 487 DECLARE_HASHTABLE(proto_destroy_ht, 7); 488 struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */ 489 }; 490 491 struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index); 492 493 static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain) 494 { 495 return lockdep_is_held(&chain->filter_chain_lock); 496 } 497 498 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) 499 { 500 return lockdep_is_held(&tp->lock); 501 } 502 503 #define tcf_chain_dereference(p, chain) \ 504 rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain)) 505 506 #define tcf_proto_dereference(p, tp) \ 507 rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp)) 508 509 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 510 { 511 struct qdisc_skb_cb *qcb; 512 513 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb)); 514 BUILD_BUG_ON(sizeof(qcb->data) < sz); 515 } 516 517 static inline int qdisc_qlen(const struct Qdisc *q) 518 { 519 return q->q.qlen; 520 } 521 522 static inline int qdisc_qlen_sum(const struct Qdisc *q) 523 { 524 __u32 qlen = q->qstats.qlen; 525 int i; 526 527 if (qdisc_is_percpu_stats(q)) { 528 for_each_possible_cpu(i) 529 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; 530 } else { 531 qlen += q->q.qlen; 532 } 533 534 return qlen; 535 } 536 537 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 538 { 539 return (struct qdisc_skb_cb *)skb->cb; 540 } 541 542 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 543 { 544 return &qdisc->q.lock; 545 } 546 547 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 548 { 549 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 550 551 return q; 552 } 553 554 static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) 555 { 556 return rcu_dereference_bh(qdisc->dev_queue->qdisc); 557 } 558 559 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 560 { 561 return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping); 562 } 563 564 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 565 { 566 struct Qdisc *root = qdisc_root_sleeping(qdisc); 567 568 ASSERT_RTNL(); 569 return qdisc_lock(root); 570 } 571 572 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 573 { 574 return qdisc->dev_queue->dev; 575 } 576 577 static inline void sch_tree_lock(struct Qdisc *q) 578 { 579 if (q->flags & TCQ_F_MQROOT) 580 spin_lock_bh(qdisc_lock(q)); 581 else 582 spin_lock_bh(qdisc_root_sleeping_lock(q)); 583 } 584 585 static inline void sch_tree_unlock(struct Qdisc *q) 586 { 587 if (q->flags & TCQ_F_MQROOT) 588 spin_unlock_bh(qdisc_lock(q)); 589 else 590 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 591 } 592 593 extern struct Qdisc noop_qdisc; 594 extern struct Qdisc_ops noop_qdisc_ops; 595 extern struct Qdisc_ops pfifo_fast_ops; 596 extern const u8 sch_default_prio2band[TC_PRIO_MAX + 1]; 597 extern struct Qdisc_ops mq_qdisc_ops; 598 extern struct Qdisc_ops noqueue_qdisc_ops; 599 extern const struct Qdisc_ops *default_qdisc_ops; 600 static inline const struct Qdisc_ops * 601 get_default_qdisc_ops(const struct net_device *dev, int ntx) 602 { 603 return ntx < dev->real_num_tx_queues ? 604 default_qdisc_ops : &pfifo_fast_ops; 605 } 606 607 struct Qdisc_class_common { 608 u32 classid; 609 unsigned int filter_cnt; 610 struct hlist_node hnode; 611 }; 612 613 struct Qdisc_class_hash { 614 struct hlist_head *hash; 615 unsigned int hashsize; 616 unsigned int hashmask; 617 unsigned int hashelems; 618 }; 619 620 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 621 { 622 id ^= id >> 8; 623 id ^= id >> 4; 624 return id & mask; 625 } 626 627 static inline struct Qdisc_class_common * 628 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 629 { 630 struct Qdisc_class_common *cl; 631 unsigned int h; 632 633 if (!id) 634 return NULL; 635 636 h = qdisc_class_hash(id, hash->hashmask); 637 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 638 if (cl->classid == id) 639 return cl; 640 } 641 return NULL; 642 } 643 644 static inline bool qdisc_class_in_use(const struct Qdisc_class_common *cl) 645 { 646 return cl->filter_cnt > 0; 647 } 648 649 static inline void qdisc_class_get(struct Qdisc_class_common *cl) 650 { 651 unsigned int res; 652 653 if (check_add_overflow(cl->filter_cnt, 1, &res)) 654 WARN(1, "Qdisc class overflow"); 655 656 cl->filter_cnt = res; 657 } 658 659 static inline void qdisc_class_put(struct Qdisc_class_common *cl) 660 { 661 unsigned int res; 662 663 if (check_sub_overflow(cl->filter_cnt, 1, &res)) 664 WARN(1, "Qdisc class underflow"); 665 666 cl->filter_cnt = res; 667 } 668 669 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) 670 { 671 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; 672 673 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; 674 } 675 676 int qdisc_class_hash_init(struct Qdisc_class_hash *); 677 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 678 struct Qdisc_class_common *); 679 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 680 struct Qdisc_class_common *); 681 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 682 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 683 684 int dev_qdisc_change_tx_queue_len(struct net_device *dev); 685 void dev_qdisc_change_real_num_tx(struct net_device *dev, 686 unsigned int new_real_tx); 687 void dev_init_scheduler(struct net_device *dev); 688 void dev_shutdown(struct net_device *dev); 689 void dev_activate(struct net_device *dev); 690 void dev_deactivate(struct net_device *dev); 691 void dev_deactivate_many(struct list_head *head); 692 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 693 struct Qdisc *qdisc); 694 void qdisc_reset(struct Qdisc *qdisc); 695 void qdisc_destroy(struct Qdisc *qdisc); 696 void qdisc_put(struct Qdisc *qdisc); 697 void qdisc_put_unlocked(struct Qdisc *qdisc); 698 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); 699 #ifdef CONFIG_NET_SCHED 700 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 701 void *type_data); 702 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 703 struct Qdisc *new, struct Qdisc *old, 704 enum tc_setup_type type, void *type_data, 705 struct netlink_ext_ack *extack); 706 #else 707 static inline int 708 qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 709 void *type_data) 710 { 711 q->flags &= ~TCQ_F_OFFLOADED; 712 return 0; 713 } 714 715 static inline void 716 qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 717 struct Qdisc *new, struct Qdisc *old, 718 enum tc_setup_type type, void *type_data, 719 struct netlink_ext_ack *extack) 720 { 721 } 722 #endif 723 void qdisc_offload_query_caps(struct net_device *dev, 724 enum tc_setup_type type, 725 void *caps, size_t caps_len); 726 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 727 const struct Qdisc_ops *ops, 728 struct netlink_ext_ack *extack); 729 void qdisc_free(struct Qdisc *qdisc); 730 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 731 const struct Qdisc_ops *ops, u32 parentid, 732 struct netlink_ext_ack *extack); 733 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 734 const struct qdisc_size_table *stab); 735 int skb_do_redirect(struct sk_buff *); 736 737 static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 738 { 739 #ifdef CONFIG_NET_XGRESS 740 return skb->tc_at_ingress; 741 #else 742 return false; 743 #endif 744 } 745 746 static inline bool skb_skip_tc_classify(struct sk_buff *skb) 747 { 748 #ifdef CONFIG_NET_CLS_ACT 749 if (skb->tc_skip_classify) { 750 skb->tc_skip_classify = 0; 751 return true; 752 } 753 #endif 754 return false; 755 } 756 757 /* Reset all TX qdiscs greater than index of a device. */ 758 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 759 { 760 struct Qdisc *qdisc; 761 762 for (; i < dev->num_tx_queues; i++) { 763 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 764 if (qdisc) { 765 spin_lock_bh(qdisc_lock(qdisc)); 766 qdisc_reset(qdisc); 767 spin_unlock_bh(qdisc_lock(qdisc)); 768 } 769 } 770 } 771 772 /* Are all TX queues of the device empty? */ 773 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 774 { 775 unsigned int i; 776 777 rcu_read_lock(); 778 for (i = 0; i < dev->num_tx_queues; i++) { 779 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 780 const struct Qdisc *q = rcu_dereference(txq->qdisc); 781 782 if (!qdisc_is_empty(q)) { 783 rcu_read_unlock(); 784 return false; 785 } 786 } 787 rcu_read_unlock(); 788 return true; 789 } 790 791 /* Are any of the TX qdiscs changing? */ 792 static inline bool qdisc_tx_changing(const struct net_device *dev) 793 { 794 unsigned int i; 795 796 for (i = 0; i < dev->num_tx_queues; i++) { 797 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 798 799 if (rcu_access_pointer(txq->qdisc) != 800 rcu_access_pointer(txq->qdisc_sleeping)) 801 return true; 802 } 803 return false; 804 } 805 806 /* Is the device using the noop qdisc on all queues? */ 807 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 808 { 809 unsigned int i; 810 811 for (i = 0; i < dev->num_tx_queues; i++) { 812 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 813 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 814 return false; 815 } 816 return true; 817 } 818 819 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 820 { 821 return qdisc_skb_cb(skb)->pkt_len; 822 } 823 824 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 825 enum net_xmit_qdisc_t { 826 __NET_XMIT_STOLEN = 0x00010000, 827 __NET_XMIT_BYPASS = 0x00020000, 828 }; 829 830 #ifdef CONFIG_NET_CLS_ACT 831 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 832 #else 833 #define net_xmit_drop_count(e) (1) 834 #endif 835 836 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 837 const struct Qdisc *sch) 838 { 839 #ifdef CONFIG_NET_SCHED 840 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 841 842 if (stab) 843 __qdisc_calculate_pkt_len(skb, stab); 844 #endif 845 } 846 847 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 848 struct sk_buff **to_free) 849 { 850 qdisc_calculate_pkt_len(skb, sch); 851 return sch->enqueue(skb, sch, to_free); 852 } 853 854 static inline void _bstats_update(struct gnet_stats_basic_sync *bstats, 855 __u64 bytes, __u32 packets) 856 { 857 u64_stats_update_begin(&bstats->syncp); 858 u64_stats_add(&bstats->bytes, bytes); 859 u64_stats_add(&bstats->packets, packets); 860 u64_stats_update_end(&bstats->syncp); 861 } 862 863 static inline void bstats_update(struct gnet_stats_basic_sync *bstats, 864 const struct sk_buff *skb) 865 { 866 _bstats_update(bstats, 867 qdisc_pkt_len(skb), 868 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 869 } 870 871 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 872 const struct sk_buff *skb) 873 { 874 bstats_update(this_cpu_ptr(sch->cpu_bstats), skb); 875 } 876 877 static inline void qdisc_bstats_update(struct Qdisc *sch, 878 const struct sk_buff *skb) 879 { 880 bstats_update(&sch->bstats, skb); 881 } 882 883 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 884 const struct sk_buff *skb) 885 { 886 sch->qstats.backlog -= qdisc_pkt_len(skb); 887 } 888 889 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, 890 const struct sk_buff *skb) 891 { 892 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 893 } 894 895 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 896 const struct sk_buff *skb) 897 { 898 sch->qstats.backlog += qdisc_pkt_len(skb); 899 } 900 901 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, 902 const struct sk_buff *skb) 903 { 904 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 905 } 906 907 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) 908 { 909 this_cpu_inc(sch->cpu_qstats->qlen); 910 } 911 912 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) 913 { 914 this_cpu_dec(sch->cpu_qstats->qlen); 915 } 916 917 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) 918 { 919 this_cpu_inc(sch->cpu_qstats->requeues); 920 } 921 922 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 923 { 924 sch->qstats.drops += count; 925 } 926 927 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 928 { 929 qstats->drops++; 930 } 931 932 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 933 { 934 qstats->overlimits++; 935 } 936 937 static inline void qdisc_qstats_drop(struct Qdisc *sch) 938 { 939 qstats_drop_inc(&sch->qstats); 940 } 941 942 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 943 { 944 this_cpu_inc(sch->cpu_qstats->drops); 945 } 946 947 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 948 { 949 sch->qstats.overlimits++; 950 } 951 952 static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch) 953 { 954 __u32 qlen = qdisc_qlen_sum(sch); 955 956 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); 957 } 958 959 static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, 960 __u32 *backlog) 961 { 962 struct gnet_stats_queue qstats = { 0 }; 963 964 gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats); 965 *qlen = qstats.qlen + qdisc_qlen(sch); 966 *backlog = qstats.backlog; 967 } 968 969 static inline void qdisc_tree_flush_backlog(struct Qdisc *sch) 970 { 971 __u32 qlen, backlog; 972 973 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 974 qdisc_tree_reduce_backlog(sch, qlen, backlog); 975 } 976 977 static inline void qdisc_purge_queue(struct Qdisc *sch) 978 { 979 __u32 qlen, backlog; 980 981 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 982 qdisc_reset(sch); 983 qdisc_tree_reduce_backlog(sch, qlen, backlog); 984 } 985 986 static inline void __qdisc_enqueue_tail(struct sk_buff *skb, 987 struct qdisc_skb_head *qh) 988 { 989 struct sk_buff *last = qh->tail; 990 991 if (last) { 992 skb->next = NULL; 993 last->next = skb; 994 qh->tail = skb; 995 } else { 996 qh->tail = skb; 997 qh->head = skb; 998 } 999 qh->qlen++; 1000 } 1001 1002 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 1003 { 1004 __qdisc_enqueue_tail(skb, &sch->q); 1005 qdisc_qstats_backlog_inc(sch, skb); 1006 return NET_XMIT_SUCCESS; 1007 } 1008 1009 static inline void __qdisc_enqueue_head(struct sk_buff *skb, 1010 struct qdisc_skb_head *qh) 1011 { 1012 skb->next = qh->head; 1013 1014 if (!qh->head) 1015 qh->tail = skb; 1016 qh->head = skb; 1017 qh->qlen++; 1018 } 1019 1020 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 1021 { 1022 struct sk_buff *skb = qh->head; 1023 1024 if (likely(skb != NULL)) { 1025 qh->head = skb->next; 1026 qh->qlen--; 1027 if (qh->head == NULL) 1028 qh->tail = NULL; 1029 skb->next = NULL; 1030 } 1031 1032 return skb; 1033 } 1034 1035 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 1036 { 1037 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 1038 1039 if (likely(skb != NULL)) { 1040 qdisc_qstats_backlog_dec(sch, skb); 1041 qdisc_bstats_update(sch, skb); 1042 } 1043 1044 return skb; 1045 } 1046 1047 struct tc_skb_cb { 1048 struct qdisc_skb_cb qdisc_cb; 1049 u32 drop_reason; 1050 1051 u16 zone; /* Only valid if post_ct = true */ 1052 u16 mru; 1053 u8 post_ct:1; 1054 u8 post_ct_snat:1; 1055 u8 post_ct_dnat:1; 1056 }; 1057 1058 static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb) 1059 { 1060 struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb; 1061 1062 BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb)); 1063 return cb; 1064 } 1065 1066 static inline enum skb_drop_reason 1067 tcf_get_drop_reason(const struct sk_buff *skb) 1068 { 1069 return tc_skb_cb(skb)->drop_reason; 1070 } 1071 1072 static inline void tcf_set_drop_reason(const struct sk_buff *skb, 1073 enum skb_drop_reason reason) 1074 { 1075 tc_skb_cb(skb)->drop_reason = reason; 1076 } 1077 1078 /* Instead of calling kfree_skb() while root qdisc lock is held, 1079 * queue the skb for future freeing at end of __dev_xmit_skb() 1080 */ 1081 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 1082 { 1083 skb->next = *to_free; 1084 *to_free = skb; 1085 } 1086 1087 static inline void __qdisc_drop_all(struct sk_buff *skb, 1088 struct sk_buff **to_free) 1089 { 1090 if (skb->prev) 1091 skb->prev->next = *to_free; 1092 else 1093 skb->next = *to_free; 1094 *to_free = skb; 1095 } 1096 1097 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 1098 struct qdisc_skb_head *qh, 1099 struct sk_buff **to_free) 1100 { 1101 struct sk_buff *skb = __qdisc_dequeue_head(qh); 1102 1103 if (likely(skb != NULL)) { 1104 unsigned int len = qdisc_pkt_len(skb); 1105 1106 qdisc_qstats_backlog_dec(sch, skb); 1107 __qdisc_drop(skb, to_free); 1108 return len; 1109 } 1110 1111 return 0; 1112 } 1113 1114 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 1115 { 1116 const struct qdisc_skb_head *qh = &sch->q; 1117 1118 return qh->head; 1119 } 1120 1121 /* generic pseudo peek method for non-work-conserving qdisc */ 1122 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 1123 { 1124 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1125 1126 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 1127 if (!skb) { 1128 skb = sch->dequeue(sch); 1129 1130 if (skb) { 1131 __skb_queue_head(&sch->gso_skb, skb); 1132 /* it's still part of the queue */ 1133 qdisc_qstats_backlog_inc(sch, skb); 1134 sch->q.qlen++; 1135 } 1136 } 1137 1138 return skb; 1139 } 1140 1141 static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch, 1142 struct sk_buff *skb) 1143 { 1144 if (qdisc_is_percpu_stats(sch)) { 1145 qdisc_qstats_cpu_backlog_dec(sch, skb); 1146 qdisc_bstats_cpu_update(sch, skb); 1147 qdisc_qstats_cpu_qlen_dec(sch); 1148 } else { 1149 qdisc_qstats_backlog_dec(sch, skb); 1150 qdisc_bstats_update(sch, skb); 1151 sch->q.qlen--; 1152 } 1153 } 1154 1155 static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch, 1156 unsigned int pkt_len) 1157 { 1158 if (qdisc_is_percpu_stats(sch)) { 1159 qdisc_qstats_cpu_qlen_inc(sch); 1160 this_cpu_add(sch->cpu_qstats->backlog, pkt_len); 1161 } else { 1162 sch->qstats.backlog += pkt_len; 1163 sch->q.qlen++; 1164 } 1165 } 1166 1167 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 1168 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 1169 { 1170 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1171 1172 if (skb) { 1173 skb = __skb_dequeue(&sch->gso_skb); 1174 if (qdisc_is_percpu_stats(sch)) { 1175 qdisc_qstats_cpu_backlog_dec(sch, skb); 1176 qdisc_qstats_cpu_qlen_dec(sch); 1177 } else { 1178 qdisc_qstats_backlog_dec(sch, skb); 1179 sch->q.qlen--; 1180 } 1181 } else { 1182 skb = sch->dequeue(sch); 1183 } 1184 1185 return skb; 1186 } 1187 1188 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 1189 { 1190 /* 1191 * We do not know the backlog in bytes of this list, it 1192 * is up to the caller to correct it 1193 */ 1194 ASSERT_RTNL(); 1195 if (qh->qlen) { 1196 rtnl_kfree_skbs(qh->head, qh->tail); 1197 1198 qh->head = NULL; 1199 qh->tail = NULL; 1200 qh->qlen = 0; 1201 } 1202 } 1203 1204 static inline void qdisc_reset_queue(struct Qdisc *sch) 1205 { 1206 __qdisc_reset_queue(&sch->q); 1207 } 1208 1209 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 1210 struct Qdisc **pold) 1211 { 1212 struct Qdisc *old; 1213 1214 sch_tree_lock(sch); 1215 old = *pold; 1216 *pold = new; 1217 if (old != NULL) 1218 qdisc_purge_queue(old); 1219 sch_tree_unlock(sch); 1220 1221 return old; 1222 } 1223 1224 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 1225 { 1226 rtnl_kfree_skbs(skb, skb); 1227 qdisc_qstats_drop(sch); 1228 } 1229 1230 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, 1231 struct sk_buff **to_free) 1232 { 1233 __qdisc_drop(skb, to_free); 1234 qdisc_qstats_cpu_drop(sch); 1235 1236 return NET_XMIT_DROP; 1237 } 1238 1239 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 1240 struct sk_buff **to_free) 1241 { 1242 __qdisc_drop(skb, to_free); 1243 qdisc_qstats_drop(sch); 1244 1245 return NET_XMIT_DROP; 1246 } 1247 1248 static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch, 1249 struct sk_buff **to_free) 1250 { 1251 __qdisc_drop_all(skb, to_free); 1252 qdisc_qstats_drop(sch); 1253 1254 return NET_XMIT_DROP; 1255 } 1256 1257 struct psched_ratecfg { 1258 u64 rate_bytes_ps; /* bytes per second */ 1259 u32 mult; 1260 u16 overhead; 1261 u16 mpu; 1262 u8 linklayer; 1263 u8 shift; 1264 }; 1265 1266 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 1267 unsigned int len) 1268 { 1269 len += r->overhead; 1270 1271 if (len < r->mpu) 1272 len = r->mpu; 1273 1274 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 1275 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 1276 1277 return ((u64)len * r->mult) >> r->shift; 1278 } 1279 1280 void psched_ratecfg_precompute(struct psched_ratecfg *r, 1281 const struct tc_ratespec *conf, 1282 u64 rate64); 1283 1284 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 1285 const struct psched_ratecfg *r) 1286 { 1287 memset(res, 0, sizeof(*res)); 1288 1289 /* legacy struct tc_ratespec has a 32bit @rate field 1290 * Qdisc using 64bit rate should add new attributes 1291 * in order to maintain compatibility. 1292 */ 1293 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 1294 1295 res->overhead = r->overhead; 1296 res->mpu = r->mpu; 1297 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 1298 } 1299 1300 struct psched_pktrate { 1301 u64 rate_pkts_ps; /* packets per second */ 1302 u32 mult; 1303 u8 shift; 1304 }; 1305 1306 static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r, 1307 unsigned int pkt_num) 1308 { 1309 return ((u64)pkt_num * r->mult) >> r->shift; 1310 } 1311 1312 void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64); 1313 1314 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. 1315 * The fast path only needs to access filter list and to update stats 1316 */ 1317 struct mini_Qdisc { 1318 struct tcf_proto *filter_list; 1319 struct tcf_block *block; 1320 struct gnet_stats_basic_sync __percpu *cpu_bstats; 1321 struct gnet_stats_queue __percpu *cpu_qstats; 1322 unsigned long rcu_state; 1323 }; 1324 1325 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, 1326 const struct sk_buff *skb) 1327 { 1328 bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb); 1329 } 1330 1331 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) 1332 { 1333 this_cpu_inc(miniq->cpu_qstats->drops); 1334 } 1335 1336 struct mini_Qdisc_pair { 1337 struct mini_Qdisc miniq1; 1338 struct mini_Qdisc miniq2; 1339 struct mini_Qdisc __rcu **p_miniq; 1340 }; 1341 1342 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 1343 struct tcf_proto *tp_head); 1344 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1345 struct mini_Qdisc __rcu **p_miniq); 1346 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, 1347 struct tcf_block *block); 1348 1349 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx); 1350 1351 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)); 1352 1353 /* Make sure qdisc is no longer in SCHED state. */ 1354 static inline void qdisc_synchronize(const struct Qdisc *q) 1355 { 1356 while (test_bit(__QDISC_STATE_SCHED, &q->state)) 1357 msleep(1); 1358 } 1359 1360 #endif 1361