1 #ifndef __NET_SCHED_GENERIC_H 2 #define __NET_SCHED_GENERIC_H 3 4 #include <linux/netdevice.h> 5 #include <linux/types.h> 6 #include <linux/rcupdate.h> 7 #include <linux/module.h> 8 #include <linux/pkt_sched.h> 9 #include <linux/pkt_cls.h> 10 #include <net/gen_stats.h> 11 #include <net/rtnetlink.h> 12 13 struct Qdisc_ops; 14 struct qdisc_walker; 15 struct tcf_walker; 16 struct module; 17 18 struct qdisc_rate_table { 19 struct tc_ratespec rate; 20 u32 data[256]; 21 struct qdisc_rate_table *next; 22 int refcnt; 23 }; 24 25 enum qdisc_state_t { 26 __QDISC_STATE_SCHED, 27 __QDISC_STATE_DEACTIVATED, 28 }; 29 30 /* 31 * following bits are only changed while qdisc lock is held 32 */ 33 enum qdisc___state_t { 34 __QDISC___STATE_RUNNING, 35 }; 36 37 struct qdisc_size_table { 38 struct list_head list; 39 struct tc_sizespec szopts; 40 int refcnt; 41 u16 data[]; 42 }; 43 44 struct Qdisc { 45 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); 46 struct sk_buff * (*dequeue)(struct Qdisc *dev); 47 unsigned flags; 48 #define TCQ_F_BUILTIN 1 49 #define TCQ_F_THROTTLED 2 50 #define TCQ_F_INGRESS 4 51 #define TCQ_F_CAN_BYPASS 8 52 #define TCQ_F_MQROOT 16 53 #define TCQ_F_WARN_NONWC (1 << 16) 54 int padded; 55 struct Qdisc_ops *ops; 56 struct qdisc_size_table *stab; 57 struct list_head list; 58 u32 handle; 59 u32 parent; 60 atomic_t refcnt; 61 struct gnet_stats_rate_est rate_est; 62 int (*reshape_fail)(struct sk_buff *skb, 63 struct Qdisc *q); 64 65 void *u32_node; 66 67 /* This field is deprecated, but it is still used by CBQ 68 * and it will live until better solution will be invented. 69 */ 70 struct Qdisc *__parent; 71 struct netdev_queue *dev_queue; 72 struct Qdisc *next_sched; 73 74 struct sk_buff *gso_skb; 75 /* 76 * For performance sake on SMP, we put highly modified fields at the end 77 */ 78 unsigned long state; 79 struct sk_buff_head q; 80 struct gnet_stats_basic_packed bstats; 81 unsigned long __state; 82 struct gnet_stats_queue qstats; 83 struct rcu_head rcu_head; 84 spinlock_t busylock; 85 }; 86 87 static inline bool qdisc_is_running(struct Qdisc *qdisc) 88 { 89 return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state); 90 } 91 92 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 93 { 94 return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state); 95 } 96 97 static inline void qdisc_run_end(struct Qdisc *qdisc) 98 { 99 __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state); 100 } 101 102 struct Qdisc_class_ops { 103 /* Child qdisc manipulation */ 104 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 105 int (*graft)(struct Qdisc *, unsigned long cl, 106 struct Qdisc *, struct Qdisc **); 107 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 108 void (*qlen_notify)(struct Qdisc *, unsigned long); 109 110 /* Class manipulation routines */ 111 unsigned long (*get)(struct Qdisc *, u32 classid); 112 void (*put)(struct Qdisc *, unsigned long); 113 int (*change)(struct Qdisc *, u32, u32, 114 struct nlattr **, unsigned long *); 115 int (*delete)(struct Qdisc *, unsigned long); 116 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 117 118 /* Filter manipulation */ 119 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); 120 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 121 u32 classid); 122 void (*unbind_tcf)(struct Qdisc *, unsigned long); 123 124 /* rtnetlink specific */ 125 int (*dump)(struct Qdisc *, unsigned long, 126 struct sk_buff *skb, struct tcmsg*); 127 int (*dump_stats)(struct Qdisc *, unsigned long, 128 struct gnet_dump *); 129 }; 130 131 struct Qdisc_ops { 132 struct Qdisc_ops *next; 133 const struct Qdisc_class_ops *cl_ops; 134 char id[IFNAMSIZ]; 135 int priv_size; 136 137 int (*enqueue)(struct sk_buff *, struct Qdisc *); 138 struct sk_buff * (*dequeue)(struct Qdisc *); 139 struct sk_buff * (*peek)(struct Qdisc *); 140 unsigned int (*drop)(struct Qdisc *); 141 142 int (*init)(struct Qdisc *, struct nlattr *arg); 143 void (*reset)(struct Qdisc *); 144 void (*destroy)(struct Qdisc *); 145 int (*change)(struct Qdisc *, struct nlattr *arg); 146 void (*attach)(struct Qdisc *); 147 148 int (*dump)(struct Qdisc *, struct sk_buff *); 149 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 150 151 struct module *owner; 152 }; 153 154 155 struct tcf_result { 156 unsigned long class; 157 u32 classid; 158 }; 159 160 struct tcf_proto_ops { 161 struct tcf_proto_ops *next; 162 char kind[IFNAMSIZ]; 163 164 int (*classify)(struct sk_buff*, struct tcf_proto*, 165 struct tcf_result *); 166 int (*init)(struct tcf_proto*); 167 void (*destroy)(struct tcf_proto*); 168 169 unsigned long (*get)(struct tcf_proto*, u32 handle); 170 void (*put)(struct tcf_proto*, unsigned long); 171 int (*change)(struct tcf_proto*, unsigned long, 172 u32 handle, struct nlattr **, 173 unsigned long *); 174 int (*delete)(struct tcf_proto*, unsigned long); 175 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 176 177 /* rtnetlink specific */ 178 int (*dump)(struct tcf_proto*, unsigned long, 179 struct sk_buff *skb, struct tcmsg*); 180 181 struct module *owner; 182 }; 183 184 struct tcf_proto { 185 /* Fast access part */ 186 struct tcf_proto *next; 187 void *root; 188 int (*classify)(struct sk_buff*, struct tcf_proto*, 189 struct tcf_result *); 190 __be16 protocol; 191 192 /* All the rest */ 193 u32 prio; 194 u32 classid; 195 struct Qdisc *q; 196 void *data; 197 struct tcf_proto_ops *ops; 198 }; 199 200 struct qdisc_skb_cb { 201 unsigned int pkt_len; 202 char data[]; 203 }; 204 205 static inline int qdisc_qlen(struct Qdisc *q) 206 { 207 return q->q.qlen; 208 } 209 210 static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb) 211 { 212 return (struct qdisc_skb_cb *)skb->cb; 213 } 214 215 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 216 { 217 return &qdisc->q.lock; 218 } 219 220 static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc) 221 { 222 return qdisc->dev_queue->qdisc; 223 } 224 225 static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc) 226 { 227 return qdisc->dev_queue->qdisc_sleeping; 228 } 229 230 /* The qdisc root lock is a mechanism by which to top level 231 * of a qdisc tree can be locked from any qdisc node in the 232 * forest. This allows changing the configuration of some 233 * aspect of the qdisc tree while blocking out asynchronous 234 * qdisc access in the packet processing paths. 235 * 236 * It is only legal to do this when the root will not change 237 * on us. Otherwise we'll potentially lock the wrong qdisc 238 * root. This is enforced by holding the RTNL semaphore, which 239 * all users of this lock accessor must do. 240 */ 241 static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc) 242 { 243 struct Qdisc *root = qdisc_root(qdisc); 244 245 ASSERT_RTNL(); 246 return qdisc_lock(root); 247 } 248 249 static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc) 250 { 251 struct Qdisc *root = qdisc_root_sleeping(qdisc); 252 253 ASSERT_RTNL(); 254 return qdisc_lock(root); 255 } 256 257 static inline struct net_device *qdisc_dev(struct Qdisc *qdisc) 258 { 259 return qdisc->dev_queue->dev; 260 } 261 262 static inline void sch_tree_lock(struct Qdisc *q) 263 { 264 spin_lock_bh(qdisc_root_sleeping_lock(q)); 265 } 266 267 static inline void sch_tree_unlock(struct Qdisc *q) 268 { 269 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 270 } 271 272 #define tcf_tree_lock(tp) sch_tree_lock((tp)->q) 273 #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q) 274 275 extern struct Qdisc noop_qdisc; 276 extern struct Qdisc_ops noop_qdisc_ops; 277 extern struct Qdisc_ops pfifo_fast_ops; 278 extern struct Qdisc_ops mq_qdisc_ops; 279 280 struct Qdisc_class_common { 281 u32 classid; 282 struct hlist_node hnode; 283 }; 284 285 struct Qdisc_class_hash { 286 struct hlist_head *hash; 287 unsigned int hashsize; 288 unsigned int hashmask; 289 unsigned int hashelems; 290 }; 291 292 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 293 { 294 id ^= id >> 8; 295 id ^= id >> 4; 296 return id & mask; 297 } 298 299 static inline struct Qdisc_class_common * 300 qdisc_class_find(struct Qdisc_class_hash *hash, u32 id) 301 { 302 struct Qdisc_class_common *cl; 303 struct hlist_node *n; 304 unsigned int h; 305 306 h = qdisc_class_hash(id, hash->hashmask); 307 hlist_for_each_entry(cl, n, &hash->hash[h], hnode) { 308 if (cl->classid == id) 309 return cl; 310 } 311 return NULL; 312 } 313 314 extern int qdisc_class_hash_init(struct Qdisc_class_hash *); 315 extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *); 316 extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *); 317 extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 318 extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 319 320 extern void dev_init_scheduler(struct net_device *dev); 321 extern void dev_shutdown(struct net_device *dev); 322 extern void dev_activate(struct net_device *dev); 323 extern void dev_deactivate(struct net_device *dev); 324 extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 325 struct Qdisc *qdisc); 326 extern void qdisc_reset(struct Qdisc *qdisc); 327 extern void qdisc_destroy(struct Qdisc *qdisc); 328 extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); 329 extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 330 struct Qdisc_ops *ops); 331 extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 332 struct Qdisc_ops *ops, u32 parentid); 333 extern void qdisc_calculate_pkt_len(struct sk_buff *skb, 334 struct qdisc_size_table *stab); 335 extern void tcf_destroy(struct tcf_proto *tp); 336 extern void tcf_destroy_chain(struct tcf_proto **fl); 337 338 /* Reset all TX qdiscs greater then index of a device. */ 339 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 340 { 341 struct Qdisc *qdisc; 342 343 for (; i < dev->num_tx_queues; i++) { 344 qdisc = netdev_get_tx_queue(dev, i)->qdisc; 345 if (qdisc) { 346 spin_lock_bh(qdisc_lock(qdisc)); 347 qdisc_reset(qdisc); 348 spin_unlock_bh(qdisc_lock(qdisc)); 349 } 350 } 351 } 352 353 static inline void qdisc_reset_all_tx(struct net_device *dev) 354 { 355 qdisc_reset_all_tx_gt(dev, 0); 356 } 357 358 /* Are all TX queues of the device empty? */ 359 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 360 { 361 unsigned int i; 362 for (i = 0; i < dev->num_tx_queues; i++) { 363 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 364 const struct Qdisc *q = txq->qdisc; 365 366 if (q->q.qlen) 367 return false; 368 } 369 return true; 370 } 371 372 /* Are any of the TX qdiscs changing? */ 373 static inline bool qdisc_tx_changing(struct net_device *dev) 374 { 375 unsigned int i; 376 for (i = 0; i < dev->num_tx_queues; i++) { 377 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 378 if (txq->qdisc != txq->qdisc_sleeping) 379 return true; 380 } 381 return false; 382 } 383 384 /* Is the device using the noop qdisc on all queues? */ 385 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 386 { 387 unsigned int i; 388 for (i = 0; i < dev->num_tx_queues; i++) { 389 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 390 if (txq->qdisc != &noop_qdisc) 391 return false; 392 } 393 return true; 394 } 395 396 static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) 397 { 398 return qdisc_skb_cb(skb)->pkt_len; 399 } 400 401 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 402 enum net_xmit_qdisc_t { 403 __NET_XMIT_STOLEN = 0x00010000, 404 __NET_XMIT_BYPASS = 0x00020000, 405 }; 406 407 #ifdef CONFIG_NET_CLS_ACT 408 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 409 #else 410 #define net_xmit_drop_count(e) (1) 411 #endif 412 413 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 414 { 415 #ifdef CONFIG_NET_SCHED 416 if (sch->stab) 417 qdisc_calculate_pkt_len(skb, sch->stab); 418 #endif 419 return sch->enqueue(skb, sch); 420 } 421 422 static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) 423 { 424 qdisc_skb_cb(skb)->pkt_len = skb->len; 425 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; 426 } 427 428 static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len) 429 { 430 sch->bstats.bytes += len; 431 sch->bstats.packets++; 432 } 433 434 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 435 struct sk_buff_head *list) 436 { 437 __skb_queue_tail(list, skb); 438 sch->qstats.backlog += qdisc_pkt_len(skb); 439 __qdisc_update_bstats(sch, qdisc_pkt_len(skb)); 440 441 return NET_XMIT_SUCCESS; 442 } 443 444 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 445 { 446 return __qdisc_enqueue_tail(skb, sch, &sch->q); 447 } 448 449 static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, 450 struct sk_buff_head *list) 451 { 452 struct sk_buff *skb = __skb_dequeue(list); 453 454 if (likely(skb != NULL)) 455 sch->qstats.backlog -= qdisc_pkt_len(skb); 456 457 return skb; 458 } 459 460 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 461 { 462 return __qdisc_dequeue_head(sch, &sch->q); 463 } 464 465 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 466 struct sk_buff_head *list) 467 { 468 struct sk_buff *skb = __qdisc_dequeue_head(sch, list); 469 470 if (likely(skb != NULL)) { 471 unsigned int len = qdisc_pkt_len(skb); 472 kfree_skb(skb); 473 return len; 474 } 475 476 return 0; 477 } 478 479 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch) 480 { 481 return __qdisc_queue_drop_head(sch, &sch->q); 482 } 483 484 static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, 485 struct sk_buff_head *list) 486 { 487 struct sk_buff *skb = __skb_dequeue_tail(list); 488 489 if (likely(skb != NULL)) 490 sch->qstats.backlog -= qdisc_pkt_len(skb); 491 492 return skb; 493 } 494 495 static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch) 496 { 497 return __qdisc_dequeue_tail(sch, &sch->q); 498 } 499 500 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 501 { 502 return skb_peek(&sch->q); 503 } 504 505 /* generic pseudo peek method for non-work-conserving qdisc */ 506 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 507 { 508 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 509 if (!sch->gso_skb) { 510 sch->gso_skb = sch->dequeue(sch); 511 if (sch->gso_skb) 512 /* it's still part of the queue */ 513 sch->q.qlen++; 514 } 515 516 return sch->gso_skb; 517 } 518 519 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 520 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 521 { 522 struct sk_buff *skb = sch->gso_skb; 523 524 if (skb) { 525 sch->gso_skb = NULL; 526 sch->q.qlen--; 527 } else { 528 skb = sch->dequeue(sch); 529 } 530 531 return skb; 532 } 533 534 static inline void __qdisc_reset_queue(struct Qdisc *sch, 535 struct sk_buff_head *list) 536 { 537 /* 538 * We do not know the backlog in bytes of this list, it 539 * is up to the caller to correct it 540 */ 541 __skb_queue_purge(list); 542 } 543 544 static inline void qdisc_reset_queue(struct Qdisc *sch) 545 { 546 __qdisc_reset_queue(sch, &sch->q); 547 sch->qstats.backlog = 0; 548 } 549 550 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, 551 struct sk_buff_head *list) 552 { 553 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); 554 555 if (likely(skb != NULL)) { 556 unsigned int len = qdisc_pkt_len(skb); 557 kfree_skb(skb); 558 return len; 559 } 560 561 return 0; 562 } 563 564 static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) 565 { 566 return __qdisc_queue_drop(sch, &sch->q); 567 } 568 569 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 570 { 571 kfree_skb(skb); 572 sch->qstats.drops++; 573 574 return NET_XMIT_DROP; 575 } 576 577 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) 578 { 579 sch->qstats.drops++; 580 581 #ifdef CONFIG_NET_CLS_ACT 582 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 583 goto drop; 584 585 return NET_XMIT_SUCCESS; 586 587 drop: 588 #endif 589 kfree_skb(skb); 590 return NET_XMIT_DROP; 591 } 592 593 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 594 long it will take to send a packet given its size. 595 */ 596 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 597 { 598 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 599 if (slot < 0) 600 slot = 0; 601 slot >>= rtab->rate.cell_log; 602 if (slot > 255) 603 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 604 return rtab->data[slot]; 605 } 606 607 #ifdef CONFIG_NET_CLS_ACT 608 static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask, 609 int action) 610 { 611 struct sk_buff *n; 612 613 if ((action == TC_ACT_STOLEN || action == TC_ACT_QUEUED) && 614 !skb_shared(skb)) 615 n = skb_get(skb); 616 else 617 n = skb_clone(skb, gfp_mask); 618 619 if (n) { 620 n->tc_verd = SET_TC_VERD(n->tc_verd, 0); 621 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); 622 n->tc_verd = CLR_TC_MUNGED(n->tc_verd); 623 } 624 return n; 625 } 626 #endif 627 628 #endif 629