1 #ifndef __NET_SCHED_GENERIC_H 2 #define __NET_SCHED_GENERIC_H 3 4 #include <linux/netdevice.h> 5 #include <linux/types.h> 6 #include <linux/rcupdate.h> 7 #include <linux/module.h> 8 #include <linux/rtnetlink.h> 9 #include <linux/pkt_sched.h> 10 #include <linux/pkt_cls.h> 11 #include <net/gen_stats.h> 12 13 struct Qdisc_ops; 14 struct qdisc_walker; 15 struct tcf_walker; 16 struct module; 17 18 struct qdisc_rate_table 19 { 20 struct tc_ratespec rate; 21 u32 data[256]; 22 struct qdisc_rate_table *next; 23 int refcnt; 24 }; 25 26 struct Qdisc 27 { 28 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); 29 struct sk_buff * (*dequeue)(struct Qdisc *dev); 30 unsigned flags; 31 #define TCQ_F_BUILTIN 1 32 #define TCQ_F_THROTTLED 2 33 #define TCQ_F_INGRESS 4 34 int padded; 35 struct Qdisc_ops *ops; 36 u32 handle; 37 u32 parent; 38 atomic_t refcnt; 39 struct sk_buff_head q; 40 struct net_device *dev; 41 struct list_head list; 42 43 struct gnet_stats_basic bstats; 44 struct gnet_stats_queue qstats; 45 struct gnet_stats_rate_est rate_est; 46 spinlock_t *stats_lock; 47 struct rcu_head q_rcu; 48 int (*reshape_fail)(struct sk_buff *skb, 49 struct Qdisc *q); 50 51 /* This field is deprecated, but it is still used by CBQ 52 * and it will live until better solution will be invented. 53 */ 54 struct Qdisc *__parent; 55 }; 56 57 struct Qdisc_class_ops 58 { 59 /* Child qdisc manipulation */ 60 int (*graft)(struct Qdisc *, unsigned long cl, 61 struct Qdisc *, struct Qdisc **); 62 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 63 void (*qlen_notify)(struct Qdisc *, unsigned long); 64 65 /* Class manipulation routines */ 66 unsigned long (*get)(struct Qdisc *, u32 classid); 67 void (*put)(struct Qdisc *, unsigned long); 68 int (*change)(struct Qdisc *, u32, u32, 69 struct rtattr **, unsigned long *); 70 int (*delete)(struct Qdisc *, unsigned long); 71 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 72 73 /* Filter manipulation */ 74 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); 75 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 76 u32 classid); 77 void (*unbind_tcf)(struct Qdisc *, unsigned long); 78 79 /* rtnetlink specific */ 80 int (*dump)(struct Qdisc *, unsigned long, 81 struct sk_buff *skb, struct tcmsg*); 82 int (*dump_stats)(struct Qdisc *, unsigned long, 83 struct gnet_dump *); 84 }; 85 86 struct Qdisc_ops 87 { 88 struct Qdisc_ops *next; 89 struct Qdisc_class_ops *cl_ops; 90 char id[IFNAMSIZ]; 91 int priv_size; 92 93 int (*enqueue)(struct sk_buff *, struct Qdisc *); 94 struct sk_buff * (*dequeue)(struct Qdisc *); 95 int (*requeue)(struct sk_buff *, struct Qdisc *); 96 unsigned int (*drop)(struct Qdisc *); 97 98 int (*init)(struct Qdisc *, struct rtattr *arg); 99 void (*reset)(struct Qdisc *); 100 void (*destroy)(struct Qdisc *); 101 int (*change)(struct Qdisc *, struct rtattr *arg); 102 103 int (*dump)(struct Qdisc *, struct sk_buff *); 104 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 105 106 struct module *owner; 107 }; 108 109 110 struct tcf_result 111 { 112 unsigned long class; 113 u32 classid; 114 }; 115 116 struct tcf_proto_ops 117 { 118 struct tcf_proto_ops *next; 119 char kind[IFNAMSIZ]; 120 121 int (*classify)(struct sk_buff*, struct tcf_proto*, 122 struct tcf_result *); 123 int (*init)(struct tcf_proto*); 124 void (*destroy)(struct tcf_proto*); 125 126 unsigned long (*get)(struct tcf_proto*, u32 handle); 127 void (*put)(struct tcf_proto*, unsigned long); 128 int (*change)(struct tcf_proto*, unsigned long, 129 u32 handle, struct rtattr **, 130 unsigned long *); 131 int (*delete)(struct tcf_proto*, unsigned long); 132 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 133 134 /* rtnetlink specific */ 135 int (*dump)(struct tcf_proto*, unsigned long, 136 struct sk_buff *skb, struct tcmsg*); 137 138 struct module *owner; 139 }; 140 141 struct tcf_proto 142 { 143 /* Fast access part */ 144 struct tcf_proto *next; 145 void *root; 146 int (*classify)(struct sk_buff*, struct tcf_proto*, 147 struct tcf_result *); 148 __be16 protocol; 149 150 /* All the rest */ 151 u32 prio; 152 u32 classid; 153 struct Qdisc *q; 154 void *data; 155 struct tcf_proto_ops *ops; 156 }; 157 158 159 extern void qdisc_lock_tree(struct net_device *dev); 160 extern void qdisc_unlock_tree(struct net_device *dev); 161 162 #define sch_tree_lock(q) qdisc_lock_tree((q)->dev) 163 #define sch_tree_unlock(q) qdisc_unlock_tree((q)->dev) 164 #define tcf_tree_lock(tp) qdisc_lock_tree((tp)->q->dev) 165 #define tcf_tree_unlock(tp) qdisc_unlock_tree((tp)->q->dev) 166 167 extern struct Qdisc noop_qdisc; 168 extern struct Qdisc_ops noop_qdisc_ops; 169 170 extern void dev_init_scheduler(struct net_device *dev); 171 extern void dev_shutdown(struct net_device *dev); 172 extern void dev_activate(struct net_device *dev); 173 extern void dev_deactivate(struct net_device *dev); 174 extern void qdisc_reset(struct Qdisc *qdisc); 175 extern void qdisc_destroy(struct Qdisc *qdisc); 176 extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); 177 extern struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops); 178 extern struct Qdisc *qdisc_create_dflt(struct net_device *dev, 179 struct Qdisc_ops *ops, u32 parentid); 180 181 static inline void 182 tcf_destroy(struct tcf_proto *tp) 183 { 184 tp->ops->destroy(tp); 185 module_put(tp->ops->owner); 186 kfree(tp); 187 } 188 189 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 190 struct sk_buff_head *list) 191 { 192 __skb_queue_tail(list, skb); 193 sch->qstats.backlog += skb->len; 194 sch->bstats.bytes += skb->len; 195 sch->bstats.packets++; 196 197 return NET_XMIT_SUCCESS; 198 } 199 200 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 201 { 202 return __qdisc_enqueue_tail(skb, sch, &sch->q); 203 } 204 205 static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, 206 struct sk_buff_head *list) 207 { 208 struct sk_buff *skb = __skb_dequeue(list); 209 210 if (likely(skb != NULL)) 211 sch->qstats.backlog -= skb->len; 212 213 return skb; 214 } 215 216 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 217 { 218 return __qdisc_dequeue_head(sch, &sch->q); 219 } 220 221 static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, 222 struct sk_buff_head *list) 223 { 224 struct sk_buff *skb = __skb_dequeue_tail(list); 225 226 if (likely(skb != NULL)) 227 sch->qstats.backlog -= skb->len; 228 229 return skb; 230 } 231 232 static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch) 233 { 234 return __qdisc_dequeue_tail(sch, &sch->q); 235 } 236 237 static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch, 238 struct sk_buff_head *list) 239 { 240 __skb_queue_head(list, skb); 241 sch->qstats.backlog += skb->len; 242 sch->qstats.requeues++; 243 244 return NET_XMIT_SUCCESS; 245 } 246 247 static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch) 248 { 249 return __qdisc_requeue(skb, sch, &sch->q); 250 } 251 252 static inline void __qdisc_reset_queue(struct Qdisc *sch, 253 struct sk_buff_head *list) 254 { 255 /* 256 * We do not know the backlog in bytes of this list, it 257 * is up to the caller to correct it 258 */ 259 skb_queue_purge(list); 260 } 261 262 static inline void qdisc_reset_queue(struct Qdisc *sch) 263 { 264 __qdisc_reset_queue(sch, &sch->q); 265 sch->qstats.backlog = 0; 266 } 267 268 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, 269 struct sk_buff_head *list) 270 { 271 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); 272 273 if (likely(skb != NULL)) { 274 unsigned int len = skb->len; 275 kfree_skb(skb); 276 return len; 277 } 278 279 return 0; 280 } 281 282 static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) 283 { 284 return __qdisc_queue_drop(sch, &sch->q); 285 } 286 287 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 288 { 289 kfree_skb(skb); 290 sch->qstats.drops++; 291 292 return NET_XMIT_DROP; 293 } 294 295 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) 296 { 297 sch->qstats.drops++; 298 299 #ifdef CONFIG_NET_CLS_POLICE 300 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 301 goto drop; 302 303 return NET_XMIT_SUCCESS; 304 305 drop: 306 #endif 307 kfree_skb(skb); 308 return NET_XMIT_DROP; 309 } 310 311 #endif 312