1 #ifndef __NET_SCHED_GENERIC_H 2 #define __NET_SCHED_GENERIC_H 3 4 #include <linux/netdevice.h> 5 #include <linux/types.h> 6 #include <linux/rcupdate.h> 7 #include <linux/pkt_sched.h> 8 #include <linux/pkt_cls.h> 9 #include <net/gen_stats.h> 10 #include <net/rtnetlink.h> 11 12 struct Qdisc_ops; 13 struct qdisc_walker; 14 struct tcf_walker; 15 struct module; 16 17 struct qdisc_rate_table { 18 struct tc_ratespec rate; 19 u32 data[256]; 20 struct qdisc_rate_table *next; 21 int refcnt; 22 }; 23 24 enum qdisc_state_t { 25 __QDISC_STATE_SCHED, 26 __QDISC_STATE_DEACTIVATED, 27 __QDISC_STATE_THROTTLED, 28 }; 29 30 /* 31 * following bits are only changed while qdisc lock is held 32 */ 33 enum qdisc___state_t { 34 __QDISC___STATE_RUNNING = 1, 35 }; 36 37 struct qdisc_size_table { 38 struct rcu_head rcu; 39 struct list_head list; 40 struct tc_sizespec szopts; 41 int refcnt; 42 u16 data[]; 43 }; 44 45 struct Qdisc { 46 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); 47 struct sk_buff * (*dequeue)(struct Qdisc *dev); 48 unsigned int flags; 49 #define TCQ_F_BUILTIN 1 50 #define TCQ_F_INGRESS 2 51 #define TCQ_F_CAN_BYPASS 4 52 #define TCQ_F_MQROOT 8 53 #define TCQ_F_WARN_NONWC (1 << 16) 54 int padded; 55 const struct Qdisc_ops *ops; 56 struct qdisc_size_table __rcu *stab; 57 struct list_head list; 58 u32 handle; 59 u32 parent; 60 atomic_t refcnt; 61 struct gnet_stats_rate_est rate_est; 62 int (*reshape_fail)(struct sk_buff *skb, 63 struct Qdisc *q); 64 65 void *u32_node; 66 67 /* This field is deprecated, but it is still used by CBQ 68 * and it will live until better solution will be invented. 69 */ 70 struct Qdisc *__parent; 71 struct netdev_queue *dev_queue; 72 struct Qdisc *next_sched; 73 74 struct sk_buff *gso_skb; 75 /* 76 * For performance sake on SMP, we put highly modified fields at the end 77 */ 78 unsigned long state; 79 struct sk_buff_head q; 80 struct gnet_stats_basic_packed bstats; 81 unsigned int __state; 82 struct gnet_stats_queue qstats; 83 struct rcu_head rcu_head; 84 spinlock_t busylock; 85 u32 limit; 86 }; 87 88 static inline bool qdisc_is_running(const struct Qdisc *qdisc) 89 { 90 return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false; 91 } 92 93 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 94 { 95 if (qdisc_is_running(qdisc)) 96 return false; 97 qdisc->__state |= __QDISC___STATE_RUNNING; 98 return true; 99 } 100 101 static inline void qdisc_run_end(struct Qdisc *qdisc) 102 { 103 qdisc->__state &= ~__QDISC___STATE_RUNNING; 104 } 105 106 static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) 107 { 108 return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false; 109 } 110 111 static inline void qdisc_throttled(struct Qdisc *qdisc) 112 { 113 set_bit(__QDISC_STATE_THROTTLED, &qdisc->state); 114 } 115 116 static inline void qdisc_unthrottled(struct Qdisc *qdisc) 117 { 118 clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state); 119 } 120 121 struct Qdisc_class_ops { 122 /* Child qdisc manipulation */ 123 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 124 int (*graft)(struct Qdisc *, unsigned long cl, 125 struct Qdisc *, struct Qdisc **); 126 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 127 void (*qlen_notify)(struct Qdisc *, unsigned long); 128 129 /* Class manipulation routines */ 130 unsigned long (*get)(struct Qdisc *, u32 classid); 131 void (*put)(struct Qdisc *, unsigned long); 132 int (*change)(struct Qdisc *, u32, u32, 133 struct nlattr **, unsigned long *); 134 int (*delete)(struct Qdisc *, unsigned long); 135 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 136 137 /* Filter manipulation */ 138 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); 139 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 140 u32 classid); 141 void (*unbind_tcf)(struct Qdisc *, unsigned long); 142 143 /* rtnetlink specific */ 144 int (*dump)(struct Qdisc *, unsigned long, 145 struct sk_buff *skb, struct tcmsg*); 146 int (*dump_stats)(struct Qdisc *, unsigned long, 147 struct gnet_dump *); 148 }; 149 150 struct Qdisc_ops { 151 struct Qdisc_ops *next; 152 const struct Qdisc_class_ops *cl_ops; 153 char id[IFNAMSIZ]; 154 int priv_size; 155 156 int (*enqueue)(struct sk_buff *, struct Qdisc *); 157 struct sk_buff * (*dequeue)(struct Qdisc *); 158 struct sk_buff * (*peek)(struct Qdisc *); 159 unsigned int (*drop)(struct Qdisc *); 160 161 int (*init)(struct Qdisc *, struct nlattr *arg); 162 void (*reset)(struct Qdisc *); 163 void (*destroy)(struct Qdisc *); 164 int (*change)(struct Qdisc *, struct nlattr *arg); 165 void (*attach)(struct Qdisc *); 166 167 int (*dump)(struct Qdisc *, struct sk_buff *); 168 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 169 170 struct module *owner; 171 }; 172 173 174 struct tcf_result { 175 unsigned long class; 176 u32 classid; 177 }; 178 179 struct tcf_proto_ops { 180 struct tcf_proto_ops *next; 181 char kind[IFNAMSIZ]; 182 183 int (*classify)(struct sk_buff *, 184 const struct tcf_proto *, 185 struct tcf_result *); 186 int (*init)(struct tcf_proto*); 187 void (*destroy)(struct tcf_proto*); 188 189 unsigned long (*get)(struct tcf_proto*, u32 handle); 190 void (*put)(struct tcf_proto*, unsigned long); 191 int (*change)(struct tcf_proto*, unsigned long, 192 u32 handle, struct nlattr **, 193 unsigned long *); 194 int (*delete)(struct tcf_proto*, unsigned long); 195 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 196 197 /* rtnetlink specific */ 198 int (*dump)(struct tcf_proto*, unsigned long, 199 struct sk_buff *skb, struct tcmsg*); 200 201 struct module *owner; 202 }; 203 204 struct tcf_proto { 205 /* Fast access part */ 206 struct tcf_proto *next; 207 void *root; 208 int (*classify)(struct sk_buff *, 209 const struct tcf_proto *, 210 struct tcf_result *); 211 __be16 protocol; 212 213 /* All the rest */ 214 u32 prio; 215 u32 classid; 216 struct Qdisc *q; 217 void *data; 218 const struct tcf_proto_ops *ops; 219 }; 220 221 struct qdisc_skb_cb { 222 unsigned int pkt_len; 223 long data[]; 224 }; 225 226 static inline int qdisc_qlen(const struct Qdisc *q) 227 { 228 return q->q.qlen; 229 } 230 231 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 232 { 233 return (struct qdisc_skb_cb *)skb->cb; 234 } 235 236 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 237 { 238 return &qdisc->q.lock; 239 } 240 241 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 242 { 243 return qdisc->dev_queue->qdisc; 244 } 245 246 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 247 { 248 return qdisc->dev_queue->qdisc_sleeping; 249 } 250 251 /* The qdisc root lock is a mechanism by which to top level 252 * of a qdisc tree can be locked from any qdisc node in the 253 * forest. This allows changing the configuration of some 254 * aspect of the qdisc tree while blocking out asynchronous 255 * qdisc access in the packet processing paths. 256 * 257 * It is only legal to do this when the root will not change 258 * on us. Otherwise we'll potentially lock the wrong qdisc 259 * root. This is enforced by holding the RTNL semaphore, which 260 * all users of this lock accessor must do. 261 */ 262 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) 263 { 264 struct Qdisc *root = qdisc_root(qdisc); 265 266 ASSERT_RTNL(); 267 return qdisc_lock(root); 268 } 269 270 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 271 { 272 struct Qdisc *root = qdisc_root_sleeping(qdisc); 273 274 ASSERT_RTNL(); 275 return qdisc_lock(root); 276 } 277 278 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 279 { 280 return qdisc->dev_queue->dev; 281 } 282 283 static inline void sch_tree_lock(const struct Qdisc *q) 284 { 285 spin_lock_bh(qdisc_root_sleeping_lock(q)); 286 } 287 288 static inline void sch_tree_unlock(const struct Qdisc *q) 289 { 290 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 291 } 292 293 #define tcf_tree_lock(tp) sch_tree_lock((tp)->q) 294 #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q) 295 296 extern struct Qdisc noop_qdisc; 297 extern struct Qdisc_ops noop_qdisc_ops; 298 extern struct Qdisc_ops pfifo_fast_ops; 299 extern struct Qdisc_ops mq_qdisc_ops; 300 301 struct Qdisc_class_common { 302 u32 classid; 303 struct hlist_node hnode; 304 }; 305 306 struct Qdisc_class_hash { 307 struct hlist_head *hash; 308 unsigned int hashsize; 309 unsigned int hashmask; 310 unsigned int hashelems; 311 }; 312 313 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 314 { 315 id ^= id >> 8; 316 id ^= id >> 4; 317 return id & mask; 318 } 319 320 static inline struct Qdisc_class_common * 321 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 322 { 323 struct Qdisc_class_common *cl; 324 struct hlist_node *n; 325 unsigned int h; 326 327 h = qdisc_class_hash(id, hash->hashmask); 328 hlist_for_each_entry(cl, n, &hash->hash[h], hnode) { 329 if (cl->classid == id) 330 return cl; 331 } 332 return NULL; 333 } 334 335 extern int qdisc_class_hash_init(struct Qdisc_class_hash *); 336 extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *); 337 extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *); 338 extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 339 extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 340 341 extern void dev_init_scheduler(struct net_device *dev); 342 extern void dev_shutdown(struct net_device *dev); 343 extern void dev_activate(struct net_device *dev); 344 extern void dev_deactivate(struct net_device *dev); 345 extern void dev_deactivate_many(struct list_head *head); 346 extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 347 struct Qdisc *qdisc); 348 extern void qdisc_reset(struct Qdisc *qdisc); 349 extern void qdisc_destroy(struct Qdisc *qdisc); 350 extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); 351 extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 352 struct Qdisc_ops *ops); 353 extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 354 struct Qdisc_ops *ops, u32 parentid); 355 extern void __qdisc_calculate_pkt_len(struct sk_buff *skb, 356 const struct qdisc_size_table *stab); 357 extern void tcf_destroy(struct tcf_proto *tp); 358 extern void tcf_destroy_chain(struct tcf_proto **fl); 359 360 /* Reset all TX qdiscs greater then index of a device. */ 361 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 362 { 363 struct Qdisc *qdisc; 364 365 for (; i < dev->num_tx_queues; i++) { 366 qdisc = netdev_get_tx_queue(dev, i)->qdisc; 367 if (qdisc) { 368 spin_lock_bh(qdisc_lock(qdisc)); 369 qdisc_reset(qdisc); 370 spin_unlock_bh(qdisc_lock(qdisc)); 371 } 372 } 373 } 374 375 static inline void qdisc_reset_all_tx(struct net_device *dev) 376 { 377 qdisc_reset_all_tx_gt(dev, 0); 378 } 379 380 /* Are all TX queues of the device empty? */ 381 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 382 { 383 unsigned int i; 384 for (i = 0; i < dev->num_tx_queues; i++) { 385 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 386 const struct Qdisc *q = txq->qdisc; 387 388 if (q->q.qlen) 389 return false; 390 } 391 return true; 392 } 393 394 /* Are any of the TX qdiscs changing? */ 395 static inline bool qdisc_tx_changing(const struct net_device *dev) 396 { 397 unsigned int i; 398 for (i = 0; i < dev->num_tx_queues; i++) { 399 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 400 if (txq->qdisc != txq->qdisc_sleeping) 401 return true; 402 } 403 return false; 404 } 405 406 /* Is the device using the noop qdisc on all queues? */ 407 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 408 { 409 unsigned int i; 410 for (i = 0; i < dev->num_tx_queues; i++) { 411 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 412 if (txq->qdisc != &noop_qdisc) 413 return false; 414 } 415 return true; 416 } 417 418 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 419 { 420 return qdisc_skb_cb(skb)->pkt_len; 421 } 422 423 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 424 enum net_xmit_qdisc_t { 425 __NET_XMIT_STOLEN = 0x00010000, 426 __NET_XMIT_BYPASS = 0x00020000, 427 }; 428 429 #ifdef CONFIG_NET_CLS_ACT 430 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 431 #else 432 #define net_xmit_drop_count(e) (1) 433 #endif 434 435 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 436 const struct Qdisc *sch) 437 { 438 #ifdef CONFIG_NET_SCHED 439 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 440 441 if (stab) 442 __qdisc_calculate_pkt_len(skb, stab); 443 #endif 444 } 445 446 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 447 { 448 qdisc_calculate_pkt_len(skb, sch); 449 return sch->enqueue(skb, sch); 450 } 451 452 static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) 453 { 454 qdisc_skb_cb(skb)->pkt_len = skb->len; 455 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; 456 } 457 458 459 static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 460 const struct sk_buff *skb) 461 { 462 bstats->bytes += qdisc_pkt_len(skb); 463 bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 464 } 465 466 static inline void qdisc_bstats_update(struct Qdisc *sch, 467 const struct sk_buff *skb) 468 { 469 bstats_update(&sch->bstats, skb); 470 } 471 472 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 473 struct sk_buff_head *list) 474 { 475 __skb_queue_tail(list, skb); 476 sch->qstats.backlog += qdisc_pkt_len(skb); 477 478 return NET_XMIT_SUCCESS; 479 } 480 481 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 482 { 483 return __qdisc_enqueue_tail(skb, sch, &sch->q); 484 } 485 486 static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, 487 struct sk_buff_head *list) 488 { 489 struct sk_buff *skb = __skb_dequeue(list); 490 491 if (likely(skb != NULL)) { 492 sch->qstats.backlog -= qdisc_pkt_len(skb); 493 qdisc_bstats_update(sch, skb); 494 } 495 496 return skb; 497 } 498 499 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 500 { 501 return __qdisc_dequeue_head(sch, &sch->q); 502 } 503 504 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 505 struct sk_buff_head *list) 506 { 507 struct sk_buff *skb = __skb_dequeue(list); 508 509 if (likely(skb != NULL)) { 510 unsigned int len = qdisc_pkt_len(skb); 511 sch->qstats.backlog -= len; 512 kfree_skb(skb); 513 return len; 514 } 515 516 return 0; 517 } 518 519 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch) 520 { 521 return __qdisc_queue_drop_head(sch, &sch->q); 522 } 523 524 static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, 525 struct sk_buff_head *list) 526 { 527 struct sk_buff *skb = __skb_dequeue_tail(list); 528 529 if (likely(skb != NULL)) 530 sch->qstats.backlog -= qdisc_pkt_len(skb); 531 532 return skb; 533 } 534 535 static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch) 536 { 537 return __qdisc_dequeue_tail(sch, &sch->q); 538 } 539 540 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 541 { 542 return skb_peek(&sch->q); 543 } 544 545 /* generic pseudo peek method for non-work-conserving qdisc */ 546 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 547 { 548 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 549 if (!sch->gso_skb) { 550 sch->gso_skb = sch->dequeue(sch); 551 if (sch->gso_skb) 552 /* it's still part of the queue */ 553 sch->q.qlen++; 554 } 555 556 return sch->gso_skb; 557 } 558 559 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 560 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 561 { 562 struct sk_buff *skb = sch->gso_skb; 563 564 if (skb) { 565 sch->gso_skb = NULL; 566 sch->q.qlen--; 567 } else { 568 skb = sch->dequeue(sch); 569 } 570 571 return skb; 572 } 573 574 static inline void __qdisc_reset_queue(struct Qdisc *sch, 575 struct sk_buff_head *list) 576 { 577 /* 578 * We do not know the backlog in bytes of this list, it 579 * is up to the caller to correct it 580 */ 581 __skb_queue_purge(list); 582 } 583 584 static inline void qdisc_reset_queue(struct Qdisc *sch) 585 { 586 __qdisc_reset_queue(sch, &sch->q); 587 sch->qstats.backlog = 0; 588 } 589 590 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, 591 struct sk_buff_head *list) 592 { 593 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); 594 595 if (likely(skb != NULL)) { 596 unsigned int len = qdisc_pkt_len(skb); 597 kfree_skb(skb); 598 return len; 599 } 600 601 return 0; 602 } 603 604 static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) 605 { 606 return __qdisc_queue_drop(sch, &sch->q); 607 } 608 609 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 610 { 611 kfree_skb(skb); 612 sch->qstats.drops++; 613 614 return NET_XMIT_DROP; 615 } 616 617 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) 618 { 619 sch->qstats.drops++; 620 621 #ifdef CONFIG_NET_CLS_ACT 622 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 623 goto drop; 624 625 return NET_XMIT_SUCCESS; 626 627 drop: 628 #endif 629 kfree_skb(skb); 630 return NET_XMIT_DROP; 631 } 632 633 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 634 long it will take to send a packet given its size. 635 */ 636 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 637 { 638 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 639 if (slot < 0) 640 slot = 0; 641 slot >>= rtab->rate.cell_log; 642 if (slot > 255) 643 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 644 return rtab->data[slot]; 645 } 646 647 #ifdef CONFIG_NET_CLS_ACT 648 static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask, 649 int action) 650 { 651 struct sk_buff *n; 652 653 n = skb_clone(skb, gfp_mask); 654 655 if (n) { 656 n->tc_verd = SET_TC_VERD(n->tc_verd, 0); 657 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); 658 n->tc_verd = CLR_TC_MUNGED(n->tc_verd); 659 } 660 return n; 661 } 662 #endif 663 664 #endif 665