1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_SCHED_GENERIC_H 3 #define __NET_SCHED_GENERIC_H 4 5 #include <linux/netdevice.h> 6 #include <linux/types.h> 7 #include <linux/rcupdate.h> 8 #include <linux/pkt_sched.h> 9 #include <linux/pkt_cls.h> 10 #include <linux/percpu.h> 11 #include <linux/dynamic_queue_limits.h> 12 #include <linux/list.h> 13 #include <linux/refcount.h> 14 #include <linux/workqueue.h> 15 #include <net/gen_stats.h> 16 #include <net/rtnetlink.h> 17 18 struct Qdisc_ops; 19 struct qdisc_walker; 20 struct tcf_walker; 21 struct module; 22 23 struct qdisc_rate_table { 24 struct tc_ratespec rate; 25 u32 data[256]; 26 struct qdisc_rate_table *next; 27 int refcnt; 28 }; 29 30 enum qdisc_state_t { 31 __QDISC_STATE_SCHED, 32 __QDISC_STATE_DEACTIVATED, 33 }; 34 35 struct qdisc_size_table { 36 struct rcu_head rcu; 37 struct list_head list; 38 struct tc_sizespec szopts; 39 int refcnt; 40 u16 data[]; 41 }; 42 43 /* similar to sk_buff_head, but skb->prev pointer is undefined. */ 44 struct qdisc_skb_head { 45 struct sk_buff *head; 46 struct sk_buff *tail; 47 __u32 qlen; 48 spinlock_t lock; 49 }; 50 51 struct Qdisc { 52 int (*enqueue)(struct sk_buff *skb, 53 struct Qdisc *sch, 54 struct sk_buff **to_free); 55 struct sk_buff * (*dequeue)(struct Qdisc *sch); 56 unsigned int flags; 57 #define TCQ_F_BUILTIN 1 58 #define TCQ_F_INGRESS 2 59 #define TCQ_F_CAN_BYPASS 4 60 #define TCQ_F_MQROOT 8 61 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 62 * q->dev_queue : It can test 63 * netif_xmit_frozen_or_stopped() before 64 * dequeueing next packet. 65 * Its true for MQ/MQPRIO slaves, or non 66 * multiqueue device. 67 */ 68 #define TCQ_F_WARN_NONWC (1 << 16) 69 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 70 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 71 * qdisc_tree_decrease_qlen() should stop. 72 */ 73 #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 74 #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ 75 #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ 76 u32 limit; 77 const struct Qdisc_ops *ops; 78 struct qdisc_size_table __rcu *stab; 79 struct hlist_node hash; 80 u32 handle; 81 u32 parent; 82 83 struct netdev_queue *dev_queue; 84 85 struct net_rate_estimator __rcu *rate_est; 86 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 87 struct gnet_stats_queue __percpu *cpu_qstats; 88 89 /* 90 * For performance sake on SMP, we put highly modified fields at the end 91 */ 92 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; 93 struct qdisc_skb_head q; 94 struct gnet_stats_basic_packed bstats; 95 seqcount_t running; 96 struct gnet_stats_queue qstats; 97 unsigned long state; 98 struct Qdisc *next_sched; 99 struct sk_buff_head skb_bad_txq; 100 int padded; 101 refcount_t refcnt; 102 103 spinlock_t busylock ____cacheline_aligned_in_smp; 104 }; 105 106 static inline void qdisc_refcount_inc(struct Qdisc *qdisc) 107 { 108 if (qdisc->flags & TCQ_F_BUILTIN) 109 return; 110 refcount_inc(&qdisc->refcnt); 111 } 112 113 static inline bool qdisc_is_running(const struct Qdisc *qdisc) 114 { 115 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; 116 } 117 118 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 119 { 120 if (qdisc_is_running(qdisc)) 121 return false; 122 /* Variant of write_seqcount_begin() telling lockdep a trylock 123 * was attempted. 124 */ 125 raw_write_seqcount_begin(&qdisc->running); 126 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); 127 return true; 128 } 129 130 static inline void qdisc_run_end(struct Qdisc *qdisc) 131 { 132 write_seqcount_end(&qdisc->running); 133 } 134 135 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 136 { 137 return qdisc->flags & TCQ_F_ONETXQUEUE; 138 } 139 140 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 141 { 142 #ifdef CONFIG_BQL 143 /* Non-BQL migrated drivers will return 0, too. */ 144 return dql_avail(&txq->dql); 145 #else 146 return 0; 147 #endif 148 } 149 150 struct Qdisc_class_ops { 151 /* Child qdisc manipulation */ 152 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 153 int (*graft)(struct Qdisc *, unsigned long cl, 154 struct Qdisc *, struct Qdisc **, 155 struct netlink_ext_ack *extack); 156 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 157 void (*qlen_notify)(struct Qdisc *, unsigned long); 158 159 /* Class manipulation routines */ 160 unsigned long (*find)(struct Qdisc *, u32 classid); 161 int (*change)(struct Qdisc *, u32, u32, 162 struct nlattr **, unsigned long *, 163 struct netlink_ext_ack *); 164 int (*delete)(struct Qdisc *, unsigned long); 165 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 166 167 /* Filter manipulation */ 168 struct tcf_block * (*tcf_block)(struct Qdisc *sch, 169 unsigned long arg, 170 struct netlink_ext_ack *extack); 171 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 172 u32 classid); 173 void (*unbind_tcf)(struct Qdisc *, unsigned long); 174 175 /* rtnetlink specific */ 176 int (*dump)(struct Qdisc *, unsigned long, 177 struct sk_buff *skb, struct tcmsg*); 178 int (*dump_stats)(struct Qdisc *, unsigned long, 179 struct gnet_dump *); 180 }; 181 182 struct Qdisc_ops { 183 struct Qdisc_ops *next; 184 const struct Qdisc_class_ops *cl_ops; 185 char id[IFNAMSIZ]; 186 int priv_size; 187 unsigned int static_flags; 188 189 int (*enqueue)(struct sk_buff *skb, 190 struct Qdisc *sch, 191 struct sk_buff **to_free); 192 struct sk_buff * (*dequeue)(struct Qdisc *); 193 struct sk_buff * (*peek)(struct Qdisc *); 194 195 int (*init)(struct Qdisc *sch, struct nlattr *arg, 196 struct netlink_ext_ack *extack); 197 void (*reset)(struct Qdisc *); 198 void (*destroy)(struct Qdisc *); 199 int (*change)(struct Qdisc *sch, 200 struct nlattr *arg, 201 struct netlink_ext_ack *extack); 202 void (*attach)(struct Qdisc *sch); 203 204 int (*dump)(struct Qdisc *, struct sk_buff *); 205 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 206 207 struct module *owner; 208 }; 209 210 211 struct tcf_result { 212 union { 213 struct { 214 unsigned long class; 215 u32 classid; 216 }; 217 const struct tcf_proto *goto_tp; 218 }; 219 }; 220 221 struct tcf_proto_ops { 222 struct list_head head; 223 char kind[IFNAMSIZ]; 224 225 int (*classify)(struct sk_buff *, 226 const struct tcf_proto *, 227 struct tcf_result *); 228 int (*init)(struct tcf_proto*); 229 void (*destroy)(struct tcf_proto*); 230 231 void* (*get)(struct tcf_proto*, u32 handle); 232 int (*change)(struct net *net, struct sk_buff *, 233 struct tcf_proto*, unsigned long, 234 u32 handle, struct nlattr **, 235 void **, bool); 236 int (*delete)(struct tcf_proto*, void *, bool*); 237 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 238 void (*bind_class)(void *, u32, unsigned long); 239 240 /* rtnetlink specific */ 241 int (*dump)(struct net*, struct tcf_proto*, void *, 242 struct sk_buff *skb, struct tcmsg*); 243 244 struct module *owner; 245 }; 246 247 struct tcf_proto { 248 /* Fast access part */ 249 struct tcf_proto __rcu *next; 250 void __rcu *root; 251 int (*classify)(struct sk_buff *, 252 const struct tcf_proto *, 253 struct tcf_result *); 254 __be16 protocol; 255 256 /* All the rest */ 257 u32 prio; 258 u32 classid; 259 struct Qdisc *q; 260 void *data; 261 const struct tcf_proto_ops *ops; 262 struct tcf_chain *chain; 263 struct rcu_head rcu; 264 }; 265 266 struct qdisc_skb_cb { 267 unsigned int pkt_len; 268 u16 slave_dev_queue_mapping; 269 u16 tc_classid; 270 #define QDISC_CB_PRIV_LEN 20 271 unsigned char data[QDISC_CB_PRIV_LEN]; 272 }; 273 274 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); 275 276 struct tcf_chain { 277 struct tcf_proto __rcu *filter_chain; 278 tcf_chain_head_change_t *chain_head_change; 279 void *chain_head_change_priv; 280 struct list_head list; 281 struct tcf_block *block; 282 u32 index; /* chain index */ 283 unsigned int refcnt; 284 }; 285 286 struct tcf_block { 287 struct list_head chain_list; 288 struct net *net; 289 struct Qdisc *q; 290 struct list_head cb_list; 291 }; 292 293 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 294 { 295 struct qdisc_skb_cb *qcb; 296 297 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); 298 BUILD_BUG_ON(sizeof(qcb->data) < sz); 299 } 300 301 static inline int qdisc_qlen_cpu(const struct Qdisc *q) 302 { 303 return this_cpu_ptr(q->cpu_qstats)->qlen; 304 } 305 306 static inline int qdisc_qlen(const struct Qdisc *q) 307 { 308 return q->q.qlen; 309 } 310 311 static inline int qdisc_qlen_sum(const struct Qdisc *q) 312 { 313 __u32 qlen = 0; 314 int i; 315 316 if (q->flags & TCQ_F_NOLOCK) { 317 for_each_possible_cpu(i) 318 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; 319 } else { 320 qlen = q->q.qlen; 321 } 322 323 return qlen; 324 } 325 326 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 327 { 328 return (struct qdisc_skb_cb *)skb->cb; 329 } 330 331 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 332 { 333 return &qdisc->q.lock; 334 } 335 336 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 337 { 338 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 339 340 return q; 341 } 342 343 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 344 { 345 return qdisc->dev_queue->qdisc_sleeping; 346 } 347 348 /* The qdisc root lock is a mechanism by which to top level 349 * of a qdisc tree can be locked from any qdisc node in the 350 * forest. This allows changing the configuration of some 351 * aspect of the qdisc tree while blocking out asynchronous 352 * qdisc access in the packet processing paths. 353 * 354 * It is only legal to do this when the root will not change 355 * on us. Otherwise we'll potentially lock the wrong qdisc 356 * root. This is enforced by holding the RTNL semaphore, which 357 * all users of this lock accessor must do. 358 */ 359 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) 360 { 361 struct Qdisc *root = qdisc_root(qdisc); 362 363 ASSERT_RTNL(); 364 return qdisc_lock(root); 365 } 366 367 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 368 { 369 struct Qdisc *root = qdisc_root_sleeping(qdisc); 370 371 ASSERT_RTNL(); 372 return qdisc_lock(root); 373 } 374 375 static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) 376 { 377 struct Qdisc *root = qdisc_root_sleeping(qdisc); 378 379 ASSERT_RTNL(); 380 return &root->running; 381 } 382 383 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 384 { 385 return qdisc->dev_queue->dev; 386 } 387 388 static inline void sch_tree_lock(const struct Qdisc *q) 389 { 390 spin_lock_bh(qdisc_root_sleeping_lock(q)); 391 } 392 393 static inline void sch_tree_unlock(const struct Qdisc *q) 394 { 395 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 396 } 397 398 extern struct Qdisc noop_qdisc; 399 extern struct Qdisc_ops noop_qdisc_ops; 400 extern struct Qdisc_ops pfifo_fast_ops; 401 extern struct Qdisc_ops mq_qdisc_ops; 402 extern struct Qdisc_ops noqueue_qdisc_ops; 403 extern const struct Qdisc_ops *default_qdisc_ops; 404 static inline const struct Qdisc_ops * 405 get_default_qdisc_ops(const struct net_device *dev, int ntx) 406 { 407 return ntx < dev->real_num_tx_queues ? 408 default_qdisc_ops : &pfifo_fast_ops; 409 } 410 411 struct Qdisc_class_common { 412 u32 classid; 413 struct hlist_node hnode; 414 }; 415 416 struct Qdisc_class_hash { 417 struct hlist_head *hash; 418 unsigned int hashsize; 419 unsigned int hashmask; 420 unsigned int hashelems; 421 }; 422 423 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 424 { 425 id ^= id >> 8; 426 id ^= id >> 4; 427 return id & mask; 428 } 429 430 static inline struct Qdisc_class_common * 431 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 432 { 433 struct Qdisc_class_common *cl; 434 unsigned int h; 435 436 if (!id) 437 return NULL; 438 439 h = qdisc_class_hash(id, hash->hashmask); 440 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 441 if (cl->classid == id) 442 return cl; 443 } 444 return NULL; 445 } 446 447 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) 448 { 449 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; 450 451 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; 452 } 453 454 int qdisc_class_hash_init(struct Qdisc_class_hash *); 455 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 456 struct Qdisc_class_common *); 457 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 458 struct Qdisc_class_common *); 459 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 460 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 461 462 void dev_init_scheduler(struct net_device *dev); 463 void dev_shutdown(struct net_device *dev); 464 void dev_activate(struct net_device *dev); 465 void dev_deactivate(struct net_device *dev); 466 void dev_deactivate_many(struct list_head *head); 467 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 468 struct Qdisc *qdisc); 469 void qdisc_reset(struct Qdisc *qdisc); 470 void qdisc_destroy(struct Qdisc *qdisc); 471 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, 472 unsigned int len); 473 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 474 const struct Qdisc_ops *ops, 475 struct netlink_ext_ack *extack); 476 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 477 const struct Qdisc_ops *ops, u32 parentid, 478 struct netlink_ext_ack *extack); 479 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 480 const struct qdisc_size_table *stab); 481 int skb_do_redirect(struct sk_buff *); 482 483 static inline void skb_reset_tc(struct sk_buff *skb) 484 { 485 #ifdef CONFIG_NET_CLS_ACT 486 skb->tc_redirected = 0; 487 #endif 488 } 489 490 static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 491 { 492 #ifdef CONFIG_NET_CLS_ACT 493 return skb->tc_at_ingress; 494 #else 495 return false; 496 #endif 497 } 498 499 static inline bool skb_skip_tc_classify(struct sk_buff *skb) 500 { 501 #ifdef CONFIG_NET_CLS_ACT 502 if (skb->tc_skip_classify) { 503 skb->tc_skip_classify = 0; 504 return true; 505 } 506 #endif 507 return false; 508 } 509 510 /* Reset all TX qdiscs greater then index of a device. */ 511 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 512 { 513 struct Qdisc *qdisc; 514 515 for (; i < dev->num_tx_queues; i++) { 516 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 517 if (qdisc) { 518 spin_lock_bh(qdisc_lock(qdisc)); 519 qdisc_reset(qdisc); 520 spin_unlock_bh(qdisc_lock(qdisc)); 521 } 522 } 523 } 524 525 static inline void qdisc_reset_all_tx(struct net_device *dev) 526 { 527 qdisc_reset_all_tx_gt(dev, 0); 528 } 529 530 /* Are all TX queues of the device empty? */ 531 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 532 { 533 unsigned int i; 534 535 rcu_read_lock(); 536 for (i = 0; i < dev->num_tx_queues; i++) { 537 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 538 const struct Qdisc *q = rcu_dereference(txq->qdisc); 539 540 if (q->q.qlen) { 541 rcu_read_unlock(); 542 return false; 543 } 544 } 545 rcu_read_unlock(); 546 return true; 547 } 548 549 /* Are any of the TX qdiscs changing? */ 550 static inline bool qdisc_tx_changing(const struct net_device *dev) 551 { 552 unsigned int i; 553 554 for (i = 0; i < dev->num_tx_queues; i++) { 555 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 556 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 557 return true; 558 } 559 return false; 560 } 561 562 /* Is the device using the noop qdisc on all queues? */ 563 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 564 { 565 unsigned int i; 566 567 for (i = 0; i < dev->num_tx_queues; i++) { 568 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 569 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 570 return false; 571 } 572 return true; 573 } 574 575 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 576 { 577 return qdisc_skb_cb(skb)->pkt_len; 578 } 579 580 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 581 enum net_xmit_qdisc_t { 582 __NET_XMIT_STOLEN = 0x00010000, 583 __NET_XMIT_BYPASS = 0x00020000, 584 }; 585 586 #ifdef CONFIG_NET_CLS_ACT 587 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 588 #else 589 #define net_xmit_drop_count(e) (1) 590 #endif 591 592 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 593 const struct Qdisc *sch) 594 { 595 #ifdef CONFIG_NET_SCHED 596 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 597 598 if (stab) 599 __qdisc_calculate_pkt_len(skb, stab); 600 #endif 601 } 602 603 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 604 struct sk_buff **to_free) 605 { 606 qdisc_calculate_pkt_len(skb, sch); 607 return sch->enqueue(skb, sch, to_free); 608 } 609 610 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 611 { 612 return q->flags & TCQ_F_CPUSTATS; 613 } 614 615 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, 616 __u64 bytes, __u32 packets) 617 { 618 bstats->bytes += bytes; 619 bstats->packets += packets; 620 } 621 622 static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 623 const struct sk_buff *skb) 624 { 625 _bstats_update(bstats, 626 qdisc_pkt_len(skb), 627 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 628 } 629 630 static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 631 __u64 bytes, __u32 packets) 632 { 633 u64_stats_update_begin(&bstats->syncp); 634 _bstats_update(&bstats->bstats, bytes, packets); 635 u64_stats_update_end(&bstats->syncp); 636 } 637 638 static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 639 const struct sk_buff *skb) 640 { 641 u64_stats_update_begin(&bstats->syncp); 642 bstats_update(&bstats->bstats, skb); 643 u64_stats_update_end(&bstats->syncp); 644 } 645 646 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 647 const struct sk_buff *skb) 648 { 649 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); 650 } 651 652 static inline void qdisc_bstats_update(struct Qdisc *sch, 653 const struct sk_buff *skb) 654 { 655 bstats_update(&sch->bstats, skb); 656 } 657 658 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 659 const struct sk_buff *skb) 660 { 661 sch->qstats.backlog -= qdisc_pkt_len(skb); 662 } 663 664 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, 665 const struct sk_buff *skb) 666 { 667 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 668 } 669 670 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 671 const struct sk_buff *skb) 672 { 673 sch->qstats.backlog += qdisc_pkt_len(skb); 674 } 675 676 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, 677 const struct sk_buff *skb) 678 { 679 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 680 } 681 682 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) 683 { 684 this_cpu_inc(sch->cpu_qstats->qlen); 685 } 686 687 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) 688 { 689 this_cpu_dec(sch->cpu_qstats->qlen); 690 } 691 692 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) 693 { 694 this_cpu_inc(sch->cpu_qstats->requeues); 695 } 696 697 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 698 { 699 sch->qstats.drops += count; 700 } 701 702 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 703 { 704 qstats->drops++; 705 } 706 707 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 708 { 709 qstats->overlimits++; 710 } 711 712 static inline void qdisc_qstats_drop(struct Qdisc *sch) 713 { 714 qstats_drop_inc(&sch->qstats); 715 } 716 717 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 718 { 719 this_cpu_inc(sch->cpu_qstats->drops); 720 } 721 722 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 723 { 724 sch->qstats.overlimits++; 725 } 726 727 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) 728 { 729 qh->head = NULL; 730 qh->tail = NULL; 731 qh->qlen = 0; 732 } 733 734 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 735 struct qdisc_skb_head *qh) 736 { 737 struct sk_buff *last = qh->tail; 738 739 if (last) { 740 skb->next = NULL; 741 last->next = skb; 742 qh->tail = skb; 743 } else { 744 qh->tail = skb; 745 qh->head = skb; 746 } 747 qh->qlen++; 748 qdisc_qstats_backlog_inc(sch, skb); 749 750 return NET_XMIT_SUCCESS; 751 } 752 753 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 754 { 755 return __qdisc_enqueue_tail(skb, sch, &sch->q); 756 } 757 758 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 759 { 760 struct sk_buff *skb = qh->head; 761 762 if (likely(skb != NULL)) { 763 qh->head = skb->next; 764 qh->qlen--; 765 if (qh->head == NULL) 766 qh->tail = NULL; 767 skb->next = NULL; 768 } 769 770 return skb; 771 } 772 773 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 774 { 775 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 776 777 if (likely(skb != NULL)) { 778 qdisc_qstats_backlog_dec(sch, skb); 779 qdisc_bstats_update(sch, skb); 780 } 781 782 return skb; 783 } 784 785 /* Instead of calling kfree_skb() while root qdisc lock is held, 786 * queue the skb for future freeing at end of __dev_xmit_skb() 787 */ 788 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 789 { 790 skb->next = *to_free; 791 *to_free = skb; 792 } 793 794 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 795 struct qdisc_skb_head *qh, 796 struct sk_buff **to_free) 797 { 798 struct sk_buff *skb = __qdisc_dequeue_head(qh); 799 800 if (likely(skb != NULL)) { 801 unsigned int len = qdisc_pkt_len(skb); 802 803 qdisc_qstats_backlog_dec(sch, skb); 804 __qdisc_drop(skb, to_free); 805 return len; 806 } 807 808 return 0; 809 } 810 811 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch, 812 struct sk_buff **to_free) 813 { 814 return __qdisc_queue_drop_head(sch, &sch->q, to_free); 815 } 816 817 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 818 { 819 const struct qdisc_skb_head *qh = &sch->q; 820 821 return qh->head; 822 } 823 824 /* generic pseudo peek method for non-work-conserving qdisc */ 825 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 826 { 827 struct sk_buff *skb = skb_peek(&sch->gso_skb); 828 829 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 830 if (!skb) { 831 skb = sch->dequeue(sch); 832 833 if (skb) { 834 __skb_queue_head(&sch->gso_skb, skb); 835 /* it's still part of the queue */ 836 qdisc_qstats_backlog_inc(sch, skb); 837 sch->q.qlen++; 838 } 839 } 840 841 return skb; 842 } 843 844 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 845 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 846 { 847 struct sk_buff *skb = skb_peek(&sch->gso_skb); 848 849 if (skb) { 850 skb = __skb_dequeue(&sch->gso_skb); 851 qdisc_qstats_backlog_dec(sch, skb); 852 sch->q.qlen--; 853 } else { 854 skb = sch->dequeue(sch); 855 } 856 857 return skb; 858 } 859 860 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 861 { 862 /* 863 * We do not know the backlog in bytes of this list, it 864 * is up to the caller to correct it 865 */ 866 ASSERT_RTNL(); 867 if (qh->qlen) { 868 rtnl_kfree_skbs(qh->head, qh->tail); 869 870 qh->head = NULL; 871 qh->tail = NULL; 872 qh->qlen = 0; 873 } 874 } 875 876 static inline void qdisc_reset_queue(struct Qdisc *sch) 877 { 878 __qdisc_reset_queue(&sch->q); 879 sch->qstats.backlog = 0; 880 } 881 882 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 883 struct Qdisc **pold) 884 { 885 struct Qdisc *old; 886 887 sch_tree_lock(sch); 888 old = *pold; 889 *pold = new; 890 if (old != NULL) { 891 unsigned int qlen = old->q.qlen; 892 unsigned int backlog = old->qstats.backlog; 893 894 qdisc_reset(old); 895 qdisc_tree_reduce_backlog(old, qlen, backlog); 896 } 897 sch_tree_unlock(sch); 898 899 return old; 900 } 901 902 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 903 { 904 rtnl_kfree_skbs(skb, skb); 905 qdisc_qstats_drop(sch); 906 } 907 908 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, 909 struct sk_buff **to_free) 910 { 911 __qdisc_drop(skb, to_free); 912 qdisc_qstats_cpu_drop(sch); 913 914 return NET_XMIT_DROP; 915 } 916 917 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 918 struct sk_buff **to_free) 919 { 920 __qdisc_drop(skb, to_free); 921 qdisc_qstats_drop(sch); 922 923 return NET_XMIT_DROP; 924 } 925 926 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 927 long it will take to send a packet given its size. 928 */ 929 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 930 { 931 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 932 if (slot < 0) 933 slot = 0; 934 slot >>= rtab->rate.cell_log; 935 if (slot > 255) 936 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 937 return rtab->data[slot]; 938 } 939 940 struct psched_ratecfg { 941 u64 rate_bytes_ps; /* bytes per second */ 942 u32 mult; 943 u16 overhead; 944 u8 linklayer; 945 u8 shift; 946 }; 947 948 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 949 unsigned int len) 950 { 951 len += r->overhead; 952 953 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 954 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 955 956 return ((u64)len * r->mult) >> r->shift; 957 } 958 959 void psched_ratecfg_precompute(struct psched_ratecfg *r, 960 const struct tc_ratespec *conf, 961 u64 rate64); 962 963 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 964 const struct psched_ratecfg *r) 965 { 966 memset(res, 0, sizeof(*res)); 967 968 /* legacy struct tc_ratespec has a 32bit @rate field 969 * Qdisc using 64bit rate should add new attributes 970 * in order to maintain compatibility. 971 */ 972 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 973 974 res->overhead = r->overhead; 975 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 976 } 977 978 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. 979 * The fast path only needs to access filter list and to update stats 980 */ 981 struct mini_Qdisc { 982 struct tcf_proto *filter_list; 983 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 984 struct gnet_stats_queue __percpu *cpu_qstats; 985 struct rcu_head rcu; 986 }; 987 988 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, 989 const struct sk_buff *skb) 990 { 991 bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb); 992 } 993 994 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) 995 { 996 this_cpu_inc(miniq->cpu_qstats->drops); 997 } 998 999 struct mini_Qdisc_pair { 1000 struct mini_Qdisc miniq1; 1001 struct mini_Qdisc miniq2; 1002 struct mini_Qdisc __rcu **p_miniq; 1003 }; 1004 1005 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 1006 struct tcf_proto *tp_head); 1007 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1008 struct mini_Qdisc __rcu **p_miniq); 1009 1010 #endif 1011