1 #ifndef __NET_SCHED_GENERIC_H 2 #define __NET_SCHED_GENERIC_H 3 4 #include <linux/netdevice.h> 5 #include <linux/types.h> 6 #include <linux/rcupdate.h> 7 #include <linux/pkt_sched.h> 8 #include <linux/pkt_cls.h> 9 #include <linux/percpu.h> 10 #include <linux/dynamic_queue_limits.h> 11 #include <linux/list.h> 12 #include <linux/refcount.h> 13 #include <net/gen_stats.h> 14 #include <net/rtnetlink.h> 15 16 struct Qdisc_ops; 17 struct qdisc_walker; 18 struct tcf_walker; 19 struct module; 20 21 struct qdisc_rate_table { 22 struct tc_ratespec rate; 23 u32 data[256]; 24 struct qdisc_rate_table *next; 25 int refcnt; 26 }; 27 28 enum qdisc_state_t { 29 __QDISC_STATE_SCHED, 30 __QDISC_STATE_DEACTIVATED, 31 }; 32 33 struct qdisc_size_table { 34 struct rcu_head rcu; 35 struct list_head list; 36 struct tc_sizespec szopts; 37 int refcnt; 38 u16 data[]; 39 }; 40 41 /* similar to sk_buff_head, but skb->prev pointer is undefined. */ 42 struct qdisc_skb_head { 43 struct sk_buff *head; 44 struct sk_buff *tail; 45 __u32 qlen; 46 spinlock_t lock; 47 }; 48 49 struct Qdisc { 50 int (*enqueue)(struct sk_buff *skb, 51 struct Qdisc *sch, 52 struct sk_buff **to_free); 53 struct sk_buff * (*dequeue)(struct Qdisc *sch); 54 unsigned int flags; 55 #define TCQ_F_BUILTIN 1 56 #define TCQ_F_INGRESS 2 57 #define TCQ_F_CAN_BYPASS 4 58 #define TCQ_F_MQROOT 8 59 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 60 * q->dev_queue : It can test 61 * netif_xmit_frozen_or_stopped() before 62 * dequeueing next packet. 63 * Its true for MQ/MQPRIO slaves, or non 64 * multiqueue device. 65 */ 66 #define TCQ_F_WARN_NONWC (1 << 16) 67 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 68 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 69 * qdisc_tree_decrease_qlen() should stop. 70 */ 71 #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 72 u32 limit; 73 const struct Qdisc_ops *ops; 74 struct qdisc_size_table __rcu *stab; 75 struct hlist_node hash; 76 u32 handle; 77 u32 parent; 78 void *u32_node; 79 80 struct netdev_queue *dev_queue; 81 82 struct net_rate_estimator __rcu *rate_est; 83 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 84 struct gnet_stats_queue __percpu *cpu_qstats; 85 86 /* 87 * For performance sake on SMP, we put highly modified fields at the end 88 */ 89 struct sk_buff *gso_skb ____cacheline_aligned_in_smp; 90 struct qdisc_skb_head q; 91 struct gnet_stats_basic_packed bstats; 92 seqcount_t running; 93 struct gnet_stats_queue qstats; 94 unsigned long state; 95 struct Qdisc *next_sched; 96 struct sk_buff *skb_bad_txq; 97 struct rcu_head rcu_head; 98 int padded; 99 refcount_t refcnt; 100 101 spinlock_t busylock ____cacheline_aligned_in_smp; 102 }; 103 104 static inline bool qdisc_is_running(const struct Qdisc *qdisc) 105 { 106 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; 107 } 108 109 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 110 { 111 if (qdisc_is_running(qdisc)) 112 return false; 113 /* Variant of write_seqcount_begin() telling lockdep a trylock 114 * was attempted. 115 */ 116 raw_write_seqcount_begin(&qdisc->running); 117 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); 118 return true; 119 } 120 121 static inline void qdisc_run_end(struct Qdisc *qdisc) 122 { 123 write_seqcount_end(&qdisc->running); 124 } 125 126 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 127 { 128 return qdisc->flags & TCQ_F_ONETXQUEUE; 129 } 130 131 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 132 { 133 #ifdef CONFIG_BQL 134 /* Non-BQL migrated drivers will return 0, too. */ 135 return dql_avail(&txq->dql); 136 #else 137 return 0; 138 #endif 139 } 140 141 struct Qdisc_class_ops { 142 /* Child qdisc manipulation */ 143 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 144 int (*graft)(struct Qdisc *, unsigned long cl, 145 struct Qdisc *, struct Qdisc **); 146 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 147 void (*qlen_notify)(struct Qdisc *, unsigned long); 148 149 /* Class manipulation routines */ 150 unsigned long (*get)(struct Qdisc *, u32 classid); 151 void (*put)(struct Qdisc *, unsigned long); 152 int (*change)(struct Qdisc *, u32, u32, 153 struct nlattr **, unsigned long *); 154 int (*delete)(struct Qdisc *, unsigned long); 155 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 156 157 /* Filter manipulation */ 158 struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long); 159 bool (*tcf_cl_offload)(u32 classid); 160 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 161 u32 classid); 162 void (*unbind_tcf)(struct Qdisc *, unsigned long); 163 164 /* rtnetlink specific */ 165 int (*dump)(struct Qdisc *, unsigned long, 166 struct sk_buff *skb, struct tcmsg*); 167 int (*dump_stats)(struct Qdisc *, unsigned long, 168 struct gnet_dump *); 169 }; 170 171 struct Qdisc_ops { 172 struct Qdisc_ops *next; 173 const struct Qdisc_class_ops *cl_ops; 174 char id[IFNAMSIZ]; 175 int priv_size; 176 177 int (*enqueue)(struct sk_buff *skb, 178 struct Qdisc *sch, 179 struct sk_buff **to_free); 180 struct sk_buff * (*dequeue)(struct Qdisc *); 181 struct sk_buff * (*peek)(struct Qdisc *); 182 183 int (*init)(struct Qdisc *, struct nlattr *arg); 184 void (*reset)(struct Qdisc *); 185 void (*destroy)(struct Qdisc *); 186 int (*change)(struct Qdisc *, struct nlattr *arg); 187 void (*attach)(struct Qdisc *); 188 189 int (*dump)(struct Qdisc *, struct sk_buff *); 190 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 191 192 struct module *owner; 193 }; 194 195 196 struct tcf_result { 197 union { 198 struct { 199 unsigned long class; 200 u32 classid; 201 }; 202 const struct tcf_proto *goto_tp; 203 }; 204 }; 205 206 struct tcf_proto_ops { 207 struct list_head head; 208 char kind[IFNAMSIZ]; 209 210 int (*classify)(struct sk_buff *, 211 const struct tcf_proto *, 212 struct tcf_result *); 213 int (*init)(struct tcf_proto*); 214 void (*destroy)(struct tcf_proto*); 215 216 unsigned long (*get)(struct tcf_proto*, u32 handle); 217 int (*change)(struct net *net, struct sk_buff *, 218 struct tcf_proto*, unsigned long, 219 u32 handle, struct nlattr **, 220 unsigned long *, bool); 221 int (*delete)(struct tcf_proto*, unsigned long, bool*); 222 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 223 224 /* rtnetlink specific */ 225 int (*dump)(struct net*, struct tcf_proto*, unsigned long, 226 struct sk_buff *skb, struct tcmsg*); 227 228 struct module *owner; 229 }; 230 231 struct tcf_proto { 232 /* Fast access part */ 233 struct tcf_proto __rcu *next; 234 void __rcu *root; 235 int (*classify)(struct sk_buff *, 236 const struct tcf_proto *, 237 struct tcf_result *); 238 __be16 protocol; 239 240 /* All the rest */ 241 u32 prio; 242 u32 classid; 243 struct Qdisc *q; 244 void *data; 245 const struct tcf_proto_ops *ops; 246 struct tcf_chain *chain; 247 struct rcu_head rcu; 248 }; 249 250 struct qdisc_skb_cb { 251 unsigned int pkt_len; 252 u16 slave_dev_queue_mapping; 253 u16 tc_classid; 254 #define QDISC_CB_PRIV_LEN 20 255 unsigned char data[QDISC_CB_PRIV_LEN]; 256 }; 257 258 struct tcf_chain { 259 struct tcf_proto __rcu *filter_chain; 260 struct tcf_proto __rcu **p_filter_chain; 261 struct list_head list; 262 struct tcf_block *block; 263 u32 index; /* chain index */ 264 unsigned int refcnt; 265 }; 266 267 struct tcf_block { 268 struct list_head chain_list; 269 }; 270 271 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 272 { 273 struct qdisc_skb_cb *qcb; 274 275 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); 276 BUILD_BUG_ON(sizeof(qcb->data) < sz); 277 } 278 279 static inline int qdisc_qlen(const struct Qdisc *q) 280 { 281 return q->q.qlen; 282 } 283 284 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 285 { 286 return (struct qdisc_skb_cb *)skb->cb; 287 } 288 289 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 290 { 291 return &qdisc->q.lock; 292 } 293 294 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 295 { 296 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 297 298 return q; 299 } 300 301 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 302 { 303 return qdisc->dev_queue->qdisc_sleeping; 304 } 305 306 /* The qdisc root lock is a mechanism by which to top level 307 * of a qdisc tree can be locked from any qdisc node in the 308 * forest. This allows changing the configuration of some 309 * aspect of the qdisc tree while blocking out asynchronous 310 * qdisc access in the packet processing paths. 311 * 312 * It is only legal to do this when the root will not change 313 * on us. Otherwise we'll potentially lock the wrong qdisc 314 * root. This is enforced by holding the RTNL semaphore, which 315 * all users of this lock accessor must do. 316 */ 317 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) 318 { 319 struct Qdisc *root = qdisc_root(qdisc); 320 321 ASSERT_RTNL(); 322 return qdisc_lock(root); 323 } 324 325 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 326 { 327 struct Qdisc *root = qdisc_root_sleeping(qdisc); 328 329 ASSERT_RTNL(); 330 return qdisc_lock(root); 331 } 332 333 static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) 334 { 335 struct Qdisc *root = qdisc_root_sleeping(qdisc); 336 337 ASSERT_RTNL(); 338 return &root->running; 339 } 340 341 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 342 { 343 return qdisc->dev_queue->dev; 344 } 345 346 static inline void sch_tree_lock(const struct Qdisc *q) 347 { 348 spin_lock_bh(qdisc_root_sleeping_lock(q)); 349 } 350 351 static inline void sch_tree_unlock(const struct Qdisc *q) 352 { 353 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 354 } 355 356 #define tcf_tree_lock(tp) sch_tree_lock((tp)->q) 357 #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q) 358 359 extern struct Qdisc noop_qdisc; 360 extern struct Qdisc_ops noop_qdisc_ops; 361 extern struct Qdisc_ops pfifo_fast_ops; 362 extern struct Qdisc_ops mq_qdisc_ops; 363 extern struct Qdisc_ops noqueue_qdisc_ops; 364 extern const struct Qdisc_ops *default_qdisc_ops; 365 static inline const struct Qdisc_ops * 366 get_default_qdisc_ops(const struct net_device *dev, int ntx) 367 { 368 return ntx < dev->real_num_tx_queues ? 369 default_qdisc_ops : &pfifo_fast_ops; 370 } 371 372 struct Qdisc_class_common { 373 u32 classid; 374 struct hlist_node hnode; 375 }; 376 377 struct Qdisc_class_hash { 378 struct hlist_head *hash; 379 unsigned int hashsize; 380 unsigned int hashmask; 381 unsigned int hashelems; 382 }; 383 384 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 385 { 386 id ^= id >> 8; 387 id ^= id >> 4; 388 return id & mask; 389 } 390 391 static inline struct Qdisc_class_common * 392 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 393 { 394 struct Qdisc_class_common *cl; 395 unsigned int h; 396 397 h = qdisc_class_hash(id, hash->hashmask); 398 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 399 if (cl->classid == id) 400 return cl; 401 } 402 return NULL; 403 } 404 405 int qdisc_class_hash_init(struct Qdisc_class_hash *); 406 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 407 struct Qdisc_class_common *); 408 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 409 struct Qdisc_class_common *); 410 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 411 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 412 413 void dev_init_scheduler(struct net_device *dev); 414 void dev_shutdown(struct net_device *dev); 415 void dev_activate(struct net_device *dev); 416 void dev_deactivate(struct net_device *dev); 417 void dev_deactivate_many(struct list_head *head); 418 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 419 struct Qdisc *qdisc); 420 void qdisc_reset(struct Qdisc *qdisc); 421 void qdisc_destroy(struct Qdisc *qdisc); 422 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, 423 unsigned int len); 424 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 425 const struct Qdisc_ops *ops); 426 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 427 const struct Qdisc_ops *ops, u32 parentid); 428 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 429 const struct qdisc_size_table *stab); 430 int skb_do_redirect(struct sk_buff *); 431 432 static inline void skb_reset_tc(struct sk_buff *skb) 433 { 434 #ifdef CONFIG_NET_CLS_ACT 435 skb->tc_redirected = 0; 436 #endif 437 } 438 439 static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 440 { 441 #ifdef CONFIG_NET_CLS_ACT 442 return skb->tc_at_ingress; 443 #else 444 return false; 445 #endif 446 } 447 448 static inline bool skb_skip_tc_classify(struct sk_buff *skb) 449 { 450 #ifdef CONFIG_NET_CLS_ACT 451 if (skb->tc_skip_classify) { 452 skb->tc_skip_classify = 0; 453 return true; 454 } 455 #endif 456 return false; 457 } 458 459 /* Reset all TX qdiscs greater then index of a device. */ 460 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 461 { 462 struct Qdisc *qdisc; 463 464 for (; i < dev->num_tx_queues; i++) { 465 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 466 if (qdisc) { 467 spin_lock_bh(qdisc_lock(qdisc)); 468 qdisc_reset(qdisc); 469 spin_unlock_bh(qdisc_lock(qdisc)); 470 } 471 } 472 } 473 474 static inline void qdisc_reset_all_tx(struct net_device *dev) 475 { 476 qdisc_reset_all_tx_gt(dev, 0); 477 } 478 479 /* Are all TX queues of the device empty? */ 480 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 481 { 482 unsigned int i; 483 484 rcu_read_lock(); 485 for (i = 0; i < dev->num_tx_queues; i++) { 486 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 487 const struct Qdisc *q = rcu_dereference(txq->qdisc); 488 489 if (q->q.qlen) { 490 rcu_read_unlock(); 491 return false; 492 } 493 } 494 rcu_read_unlock(); 495 return true; 496 } 497 498 /* Are any of the TX qdiscs changing? */ 499 static inline bool qdisc_tx_changing(const struct net_device *dev) 500 { 501 unsigned int i; 502 503 for (i = 0; i < dev->num_tx_queues; i++) { 504 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 505 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 506 return true; 507 } 508 return false; 509 } 510 511 /* Is the device using the noop qdisc on all queues? */ 512 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 513 { 514 unsigned int i; 515 516 for (i = 0; i < dev->num_tx_queues; i++) { 517 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 518 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 519 return false; 520 } 521 return true; 522 } 523 524 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 525 { 526 return qdisc_skb_cb(skb)->pkt_len; 527 } 528 529 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 530 enum net_xmit_qdisc_t { 531 __NET_XMIT_STOLEN = 0x00010000, 532 __NET_XMIT_BYPASS = 0x00020000, 533 }; 534 535 #ifdef CONFIG_NET_CLS_ACT 536 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 537 #else 538 #define net_xmit_drop_count(e) (1) 539 #endif 540 541 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 542 const struct Qdisc *sch) 543 { 544 #ifdef CONFIG_NET_SCHED 545 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 546 547 if (stab) 548 __qdisc_calculate_pkt_len(skb, stab); 549 #endif 550 } 551 552 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 553 struct sk_buff **to_free) 554 { 555 qdisc_calculate_pkt_len(skb, sch); 556 return sch->enqueue(skb, sch, to_free); 557 } 558 559 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 560 { 561 return q->flags & TCQ_F_CPUSTATS; 562 } 563 564 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, 565 __u64 bytes, __u32 packets) 566 { 567 bstats->bytes += bytes; 568 bstats->packets += packets; 569 } 570 571 static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 572 const struct sk_buff *skb) 573 { 574 _bstats_update(bstats, 575 qdisc_pkt_len(skb), 576 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 577 } 578 579 static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 580 __u64 bytes, __u32 packets) 581 { 582 u64_stats_update_begin(&bstats->syncp); 583 _bstats_update(&bstats->bstats, bytes, packets); 584 u64_stats_update_end(&bstats->syncp); 585 } 586 587 static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 588 const struct sk_buff *skb) 589 { 590 u64_stats_update_begin(&bstats->syncp); 591 bstats_update(&bstats->bstats, skb); 592 u64_stats_update_end(&bstats->syncp); 593 } 594 595 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 596 const struct sk_buff *skb) 597 { 598 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); 599 } 600 601 static inline void qdisc_bstats_update(struct Qdisc *sch, 602 const struct sk_buff *skb) 603 { 604 bstats_update(&sch->bstats, skb); 605 } 606 607 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 608 const struct sk_buff *skb) 609 { 610 sch->qstats.backlog -= qdisc_pkt_len(skb); 611 } 612 613 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 614 const struct sk_buff *skb) 615 { 616 sch->qstats.backlog += qdisc_pkt_len(skb); 617 } 618 619 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 620 { 621 sch->qstats.drops += count; 622 } 623 624 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 625 { 626 qstats->drops++; 627 } 628 629 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 630 { 631 qstats->overlimits++; 632 } 633 634 static inline void qdisc_qstats_drop(struct Qdisc *sch) 635 { 636 qstats_drop_inc(&sch->qstats); 637 } 638 639 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 640 { 641 this_cpu_inc(sch->cpu_qstats->drops); 642 } 643 644 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 645 { 646 sch->qstats.overlimits++; 647 } 648 649 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) 650 { 651 qh->head = NULL; 652 qh->tail = NULL; 653 qh->qlen = 0; 654 } 655 656 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 657 struct qdisc_skb_head *qh) 658 { 659 struct sk_buff *last = qh->tail; 660 661 if (last) { 662 skb->next = NULL; 663 last->next = skb; 664 qh->tail = skb; 665 } else { 666 qh->tail = skb; 667 qh->head = skb; 668 } 669 qh->qlen++; 670 qdisc_qstats_backlog_inc(sch, skb); 671 672 return NET_XMIT_SUCCESS; 673 } 674 675 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 676 { 677 return __qdisc_enqueue_tail(skb, sch, &sch->q); 678 } 679 680 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 681 { 682 struct sk_buff *skb = qh->head; 683 684 if (likely(skb != NULL)) { 685 qh->head = skb->next; 686 qh->qlen--; 687 if (qh->head == NULL) 688 qh->tail = NULL; 689 skb->next = NULL; 690 } 691 692 return skb; 693 } 694 695 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 696 { 697 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 698 699 if (likely(skb != NULL)) { 700 qdisc_qstats_backlog_dec(sch, skb); 701 qdisc_bstats_update(sch, skb); 702 } 703 704 return skb; 705 } 706 707 /* Instead of calling kfree_skb() while root qdisc lock is held, 708 * queue the skb for future freeing at end of __dev_xmit_skb() 709 */ 710 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 711 { 712 skb->next = *to_free; 713 *to_free = skb; 714 } 715 716 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 717 struct qdisc_skb_head *qh, 718 struct sk_buff **to_free) 719 { 720 struct sk_buff *skb = __qdisc_dequeue_head(qh); 721 722 if (likely(skb != NULL)) { 723 unsigned int len = qdisc_pkt_len(skb); 724 725 qdisc_qstats_backlog_dec(sch, skb); 726 __qdisc_drop(skb, to_free); 727 return len; 728 } 729 730 return 0; 731 } 732 733 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch, 734 struct sk_buff **to_free) 735 { 736 return __qdisc_queue_drop_head(sch, &sch->q, to_free); 737 } 738 739 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 740 { 741 const struct qdisc_skb_head *qh = &sch->q; 742 743 return qh->head; 744 } 745 746 /* generic pseudo peek method for non-work-conserving qdisc */ 747 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 748 { 749 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 750 if (!sch->gso_skb) { 751 sch->gso_skb = sch->dequeue(sch); 752 if (sch->gso_skb) { 753 /* it's still part of the queue */ 754 qdisc_qstats_backlog_inc(sch, sch->gso_skb); 755 sch->q.qlen++; 756 } 757 } 758 759 return sch->gso_skb; 760 } 761 762 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 763 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 764 { 765 struct sk_buff *skb = sch->gso_skb; 766 767 if (skb) { 768 sch->gso_skb = NULL; 769 qdisc_qstats_backlog_dec(sch, skb); 770 sch->q.qlen--; 771 } else { 772 skb = sch->dequeue(sch); 773 } 774 775 return skb; 776 } 777 778 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 779 { 780 /* 781 * We do not know the backlog in bytes of this list, it 782 * is up to the caller to correct it 783 */ 784 ASSERT_RTNL(); 785 if (qh->qlen) { 786 rtnl_kfree_skbs(qh->head, qh->tail); 787 788 qh->head = NULL; 789 qh->tail = NULL; 790 qh->qlen = 0; 791 } 792 } 793 794 static inline void qdisc_reset_queue(struct Qdisc *sch) 795 { 796 __qdisc_reset_queue(&sch->q); 797 sch->qstats.backlog = 0; 798 } 799 800 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 801 struct Qdisc **pold) 802 { 803 struct Qdisc *old; 804 805 sch_tree_lock(sch); 806 old = *pold; 807 *pold = new; 808 if (old != NULL) { 809 qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); 810 qdisc_reset(old); 811 } 812 sch_tree_unlock(sch); 813 814 return old; 815 } 816 817 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 818 { 819 rtnl_kfree_skbs(skb, skb); 820 qdisc_qstats_drop(sch); 821 } 822 823 824 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 825 struct sk_buff **to_free) 826 { 827 __qdisc_drop(skb, to_free); 828 qdisc_qstats_drop(sch); 829 830 return NET_XMIT_DROP; 831 } 832 833 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 834 long it will take to send a packet given its size. 835 */ 836 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 837 { 838 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 839 if (slot < 0) 840 slot = 0; 841 slot >>= rtab->rate.cell_log; 842 if (slot > 255) 843 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 844 return rtab->data[slot]; 845 } 846 847 struct psched_ratecfg { 848 u64 rate_bytes_ps; /* bytes per second */ 849 u32 mult; 850 u16 overhead; 851 u8 linklayer; 852 u8 shift; 853 }; 854 855 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 856 unsigned int len) 857 { 858 len += r->overhead; 859 860 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 861 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 862 863 return ((u64)len * r->mult) >> r->shift; 864 } 865 866 void psched_ratecfg_precompute(struct psched_ratecfg *r, 867 const struct tc_ratespec *conf, 868 u64 rate64); 869 870 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 871 const struct psched_ratecfg *r) 872 { 873 memset(res, 0, sizeof(*res)); 874 875 /* legacy struct tc_ratespec has a 32bit @rate field 876 * Qdisc using 64bit rate should add new attributes 877 * in order to maintain compatibility. 878 */ 879 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 880 881 res->overhead = r->overhead; 882 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 883 } 884 885 #endif 886