xref: /linux/include/net/sch_generic.h (revision 83a37b3292f4aca799b355179ad6fbdd78a08e10)
1 #ifndef __NET_SCHED_GENERIC_H
2 #define __NET_SCHED_GENERIC_H
3 
4 #include <linux/netdevice.h>
5 #include <linux/types.h>
6 #include <linux/rcupdate.h>
7 #include <linux/pkt_sched.h>
8 #include <linux/pkt_cls.h>
9 #include <linux/percpu.h>
10 #include <linux/dynamic_queue_limits.h>
11 #include <linux/list.h>
12 #include <linux/refcount.h>
13 #include <net/gen_stats.h>
14 #include <net/rtnetlink.h>
15 
16 struct Qdisc_ops;
17 struct qdisc_walker;
18 struct tcf_walker;
19 struct module;
20 
21 struct qdisc_rate_table {
22 	struct tc_ratespec rate;
23 	u32		data[256];
24 	struct qdisc_rate_table *next;
25 	int		refcnt;
26 };
27 
28 enum qdisc_state_t {
29 	__QDISC_STATE_SCHED,
30 	__QDISC_STATE_DEACTIVATED,
31 };
32 
33 struct qdisc_size_table {
34 	struct rcu_head		rcu;
35 	struct list_head	list;
36 	struct tc_sizespec	szopts;
37 	int			refcnt;
38 	u16			data[];
39 };
40 
41 /* similar to sk_buff_head, but skb->prev pointer is undefined. */
42 struct qdisc_skb_head {
43 	struct sk_buff	*head;
44 	struct sk_buff	*tail;
45 	__u32		qlen;
46 	spinlock_t	lock;
47 };
48 
49 struct Qdisc {
50 	int 			(*enqueue)(struct sk_buff *skb,
51 					   struct Qdisc *sch,
52 					   struct sk_buff **to_free);
53 	struct sk_buff *	(*dequeue)(struct Qdisc *sch);
54 	unsigned int		flags;
55 #define TCQ_F_BUILTIN		1
56 #define TCQ_F_INGRESS		2
57 #define TCQ_F_CAN_BYPASS	4
58 #define TCQ_F_MQROOT		8
59 #define TCQ_F_ONETXQUEUE	0x10 /* dequeue_skb() can assume all skbs are for
60 				      * q->dev_queue : It can test
61 				      * netif_xmit_frozen_or_stopped() before
62 				      * dequeueing next packet.
63 				      * Its true for MQ/MQPRIO slaves, or non
64 				      * multiqueue device.
65 				      */
66 #define TCQ_F_WARN_NONWC	(1 << 16)
67 #define TCQ_F_CPUSTATS		0x20 /* run using percpu statistics */
68 #define TCQ_F_NOPARENT		0x40 /* root of its hierarchy :
69 				      * qdisc_tree_decrease_qlen() should stop.
70 				      */
71 #define TCQ_F_INVISIBLE		0x80 /* invisible by default in dump */
72 	u32			limit;
73 	const struct Qdisc_ops	*ops;
74 	struct qdisc_size_table	__rcu *stab;
75 	struct hlist_node       hash;
76 	u32			handle;
77 	u32			parent;
78 
79 	struct netdev_queue	*dev_queue;
80 
81 	struct net_rate_estimator __rcu *rate_est;
82 	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
83 	struct gnet_stats_queue	__percpu *cpu_qstats;
84 
85 	/*
86 	 * For performance sake on SMP, we put highly modified fields at the end
87 	 */
88 	struct sk_buff		*gso_skb ____cacheline_aligned_in_smp;
89 	struct qdisc_skb_head	q;
90 	struct gnet_stats_basic_packed bstats;
91 	seqcount_t		running;
92 	struct gnet_stats_queue	qstats;
93 	unsigned long		state;
94 	struct Qdisc            *next_sched;
95 	struct sk_buff		*skb_bad_txq;
96 	int			padded;
97 	refcount_t		refcnt;
98 
99 	spinlock_t		busylock ____cacheline_aligned_in_smp;
100 };
101 
102 static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
103 {
104 	if (qdisc->flags & TCQ_F_BUILTIN)
105 		return;
106 	refcount_inc(&qdisc->refcnt);
107 }
108 
109 static inline bool qdisc_is_running(const struct Qdisc *qdisc)
110 {
111 	return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
112 }
113 
114 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
115 {
116 	if (qdisc_is_running(qdisc))
117 		return false;
118 	/* Variant of write_seqcount_begin() telling lockdep a trylock
119 	 * was attempted.
120 	 */
121 	raw_write_seqcount_begin(&qdisc->running);
122 	seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
123 	return true;
124 }
125 
126 static inline void qdisc_run_end(struct Qdisc *qdisc)
127 {
128 	write_seqcount_end(&qdisc->running);
129 }
130 
131 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
132 {
133 	return qdisc->flags & TCQ_F_ONETXQUEUE;
134 }
135 
136 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
137 {
138 #ifdef CONFIG_BQL
139 	/* Non-BQL migrated drivers will return 0, too. */
140 	return dql_avail(&txq->dql);
141 #else
142 	return 0;
143 #endif
144 }
145 
146 struct Qdisc_class_ops {
147 	/* Child qdisc manipulation */
148 	struct netdev_queue *	(*select_queue)(struct Qdisc *, struct tcmsg *);
149 	int			(*graft)(struct Qdisc *, unsigned long cl,
150 					struct Qdisc *, struct Qdisc **);
151 	struct Qdisc *		(*leaf)(struct Qdisc *, unsigned long cl);
152 	void			(*qlen_notify)(struct Qdisc *, unsigned long);
153 
154 	/* Class manipulation routines */
155 	unsigned long		(*find)(struct Qdisc *, u32 classid);
156 	int			(*change)(struct Qdisc *, u32, u32,
157 					struct nlattr **, unsigned long *);
158 	int			(*delete)(struct Qdisc *, unsigned long);
159 	void			(*walk)(struct Qdisc *, struct qdisc_walker * arg);
160 
161 	/* Filter manipulation */
162 	struct tcf_block *	(*tcf_block)(struct Qdisc *, unsigned long);
163 	unsigned long		(*bind_tcf)(struct Qdisc *, unsigned long,
164 					u32 classid);
165 	void			(*unbind_tcf)(struct Qdisc *, unsigned long);
166 
167 	/* rtnetlink specific */
168 	int			(*dump)(struct Qdisc *, unsigned long,
169 					struct sk_buff *skb, struct tcmsg*);
170 	int			(*dump_stats)(struct Qdisc *, unsigned long,
171 					struct gnet_dump *);
172 };
173 
174 struct Qdisc_ops {
175 	struct Qdisc_ops	*next;
176 	const struct Qdisc_class_ops	*cl_ops;
177 	char			id[IFNAMSIZ];
178 	int			priv_size;
179 
180 	int 			(*enqueue)(struct sk_buff *skb,
181 					   struct Qdisc *sch,
182 					   struct sk_buff **to_free);
183 	struct sk_buff *	(*dequeue)(struct Qdisc *);
184 	struct sk_buff *	(*peek)(struct Qdisc *);
185 
186 	int			(*init)(struct Qdisc *, struct nlattr *arg);
187 	void			(*reset)(struct Qdisc *);
188 	void			(*destroy)(struct Qdisc *);
189 	int			(*change)(struct Qdisc *, struct nlattr *arg);
190 	void			(*attach)(struct Qdisc *);
191 
192 	int			(*dump)(struct Qdisc *, struct sk_buff *);
193 	int			(*dump_stats)(struct Qdisc *, struct gnet_dump *);
194 
195 	struct module		*owner;
196 };
197 
198 
199 struct tcf_result {
200 	union {
201 		struct {
202 			unsigned long	class;
203 			u32		classid;
204 		};
205 		const struct tcf_proto *goto_tp;
206 	};
207 };
208 
209 struct tcf_proto_ops {
210 	struct list_head	head;
211 	char			kind[IFNAMSIZ];
212 
213 	int			(*classify)(struct sk_buff *,
214 					    const struct tcf_proto *,
215 					    struct tcf_result *);
216 	int			(*init)(struct tcf_proto*);
217 	void			(*destroy)(struct tcf_proto*);
218 
219 	void*			(*get)(struct tcf_proto*, u32 handle);
220 	int			(*change)(struct net *net, struct sk_buff *,
221 					struct tcf_proto*, unsigned long,
222 					u32 handle, struct nlattr **,
223 					void **, bool);
224 	int			(*delete)(struct tcf_proto*, void *, bool*);
225 	void			(*walk)(struct tcf_proto*, struct tcf_walker *arg);
226 	void			(*bind_class)(void *, u32, unsigned long);
227 
228 	/* rtnetlink specific */
229 	int			(*dump)(struct net*, struct tcf_proto*, void *,
230 					struct sk_buff *skb, struct tcmsg*);
231 
232 	struct module		*owner;
233 };
234 
235 struct tcf_proto {
236 	/* Fast access part */
237 	struct tcf_proto __rcu	*next;
238 	void __rcu		*root;
239 	int			(*classify)(struct sk_buff *,
240 					    const struct tcf_proto *,
241 					    struct tcf_result *);
242 	__be16			protocol;
243 
244 	/* All the rest */
245 	u32			prio;
246 	u32			classid;
247 	struct Qdisc		*q;
248 	void			*data;
249 	const struct tcf_proto_ops	*ops;
250 	struct tcf_chain	*chain;
251 	struct rcu_head		rcu;
252 };
253 
254 struct qdisc_skb_cb {
255 	unsigned int		pkt_len;
256 	u16			slave_dev_queue_mapping;
257 	u16			tc_classid;
258 #define QDISC_CB_PRIV_LEN 20
259 	unsigned char		data[QDISC_CB_PRIV_LEN];
260 };
261 
262 struct tcf_chain {
263 	struct tcf_proto __rcu *filter_chain;
264 	struct tcf_proto __rcu **p_filter_chain;
265 	struct list_head list;
266 	struct tcf_block *block;
267 	u32 index; /* chain index */
268 	unsigned int refcnt;
269 };
270 
271 struct tcf_block {
272 	struct list_head chain_list;
273 	struct net *net;
274 	struct Qdisc *q;
275 };
276 
277 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
278 {
279 	struct qdisc_skb_cb *qcb;
280 
281 	BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
282 	BUILD_BUG_ON(sizeof(qcb->data) < sz);
283 }
284 
285 static inline int qdisc_qlen(const struct Qdisc *q)
286 {
287 	return q->q.qlen;
288 }
289 
290 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
291 {
292 	return (struct qdisc_skb_cb *)skb->cb;
293 }
294 
295 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
296 {
297 	return &qdisc->q.lock;
298 }
299 
300 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
301 {
302 	struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
303 
304 	return q;
305 }
306 
307 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
308 {
309 	return qdisc->dev_queue->qdisc_sleeping;
310 }
311 
312 /* The qdisc root lock is a mechanism by which to top level
313  * of a qdisc tree can be locked from any qdisc node in the
314  * forest.  This allows changing the configuration of some
315  * aspect of the qdisc tree while blocking out asynchronous
316  * qdisc access in the packet processing paths.
317  *
318  * It is only legal to do this when the root will not change
319  * on us.  Otherwise we'll potentially lock the wrong qdisc
320  * root.  This is enforced by holding the RTNL semaphore, which
321  * all users of this lock accessor must do.
322  */
323 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
324 {
325 	struct Qdisc *root = qdisc_root(qdisc);
326 
327 	ASSERT_RTNL();
328 	return qdisc_lock(root);
329 }
330 
331 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
332 {
333 	struct Qdisc *root = qdisc_root_sleeping(qdisc);
334 
335 	ASSERT_RTNL();
336 	return qdisc_lock(root);
337 }
338 
339 static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
340 {
341 	struct Qdisc *root = qdisc_root_sleeping(qdisc);
342 
343 	ASSERT_RTNL();
344 	return &root->running;
345 }
346 
347 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
348 {
349 	return qdisc->dev_queue->dev;
350 }
351 
352 static inline void sch_tree_lock(const struct Qdisc *q)
353 {
354 	spin_lock_bh(qdisc_root_sleeping_lock(q));
355 }
356 
357 static inline void sch_tree_unlock(const struct Qdisc *q)
358 {
359 	spin_unlock_bh(qdisc_root_sleeping_lock(q));
360 }
361 
362 extern struct Qdisc noop_qdisc;
363 extern struct Qdisc_ops noop_qdisc_ops;
364 extern struct Qdisc_ops pfifo_fast_ops;
365 extern struct Qdisc_ops mq_qdisc_ops;
366 extern struct Qdisc_ops noqueue_qdisc_ops;
367 extern const struct Qdisc_ops *default_qdisc_ops;
368 static inline const struct Qdisc_ops *
369 get_default_qdisc_ops(const struct net_device *dev, int ntx)
370 {
371 	return ntx < dev->real_num_tx_queues ?
372 			default_qdisc_ops : &pfifo_fast_ops;
373 }
374 
375 struct Qdisc_class_common {
376 	u32			classid;
377 	struct hlist_node	hnode;
378 };
379 
380 struct Qdisc_class_hash {
381 	struct hlist_head	*hash;
382 	unsigned int		hashsize;
383 	unsigned int		hashmask;
384 	unsigned int		hashelems;
385 };
386 
387 static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
388 {
389 	id ^= id >> 8;
390 	id ^= id >> 4;
391 	return id & mask;
392 }
393 
394 static inline struct Qdisc_class_common *
395 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
396 {
397 	struct Qdisc_class_common *cl;
398 	unsigned int h;
399 
400 	if (!id)
401 		return NULL;
402 
403 	h = qdisc_class_hash(id, hash->hashmask);
404 	hlist_for_each_entry(cl, &hash->hash[h], hnode) {
405 		if (cl->classid == id)
406 			return cl;
407 	}
408 	return NULL;
409 }
410 
411 int qdisc_class_hash_init(struct Qdisc_class_hash *);
412 void qdisc_class_hash_insert(struct Qdisc_class_hash *,
413 			     struct Qdisc_class_common *);
414 void qdisc_class_hash_remove(struct Qdisc_class_hash *,
415 			     struct Qdisc_class_common *);
416 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
417 void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
418 
419 void dev_init_scheduler(struct net_device *dev);
420 void dev_shutdown(struct net_device *dev);
421 void dev_activate(struct net_device *dev);
422 void dev_deactivate(struct net_device *dev);
423 void dev_deactivate_many(struct list_head *head);
424 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
425 			      struct Qdisc *qdisc);
426 void qdisc_reset(struct Qdisc *qdisc);
427 void qdisc_destroy(struct Qdisc *qdisc);
428 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
429 			       unsigned int len);
430 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
431 			  const struct Qdisc_ops *ops);
432 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
433 				const struct Qdisc_ops *ops, u32 parentid);
434 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
435 			       const struct qdisc_size_table *stab);
436 int skb_do_redirect(struct sk_buff *);
437 
438 static inline void skb_reset_tc(struct sk_buff *skb)
439 {
440 #ifdef CONFIG_NET_CLS_ACT
441 	skb->tc_redirected = 0;
442 #endif
443 }
444 
445 static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
446 {
447 #ifdef CONFIG_NET_CLS_ACT
448 	return skb->tc_at_ingress;
449 #else
450 	return false;
451 #endif
452 }
453 
454 static inline bool skb_skip_tc_classify(struct sk_buff *skb)
455 {
456 #ifdef CONFIG_NET_CLS_ACT
457 	if (skb->tc_skip_classify) {
458 		skb->tc_skip_classify = 0;
459 		return true;
460 	}
461 #endif
462 	return false;
463 }
464 
465 /* Reset all TX qdiscs greater then index of a device.  */
466 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
467 {
468 	struct Qdisc *qdisc;
469 
470 	for (; i < dev->num_tx_queues; i++) {
471 		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
472 		if (qdisc) {
473 			spin_lock_bh(qdisc_lock(qdisc));
474 			qdisc_reset(qdisc);
475 			spin_unlock_bh(qdisc_lock(qdisc));
476 		}
477 	}
478 }
479 
480 static inline void qdisc_reset_all_tx(struct net_device *dev)
481 {
482 	qdisc_reset_all_tx_gt(dev, 0);
483 }
484 
485 /* Are all TX queues of the device empty?  */
486 static inline bool qdisc_all_tx_empty(const struct net_device *dev)
487 {
488 	unsigned int i;
489 
490 	rcu_read_lock();
491 	for (i = 0; i < dev->num_tx_queues; i++) {
492 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
493 		const struct Qdisc *q = rcu_dereference(txq->qdisc);
494 
495 		if (q->q.qlen) {
496 			rcu_read_unlock();
497 			return false;
498 		}
499 	}
500 	rcu_read_unlock();
501 	return true;
502 }
503 
504 /* Are any of the TX qdiscs changing?  */
505 static inline bool qdisc_tx_changing(const struct net_device *dev)
506 {
507 	unsigned int i;
508 
509 	for (i = 0; i < dev->num_tx_queues; i++) {
510 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
511 		if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
512 			return true;
513 	}
514 	return false;
515 }
516 
517 /* Is the device using the noop qdisc on all queues?  */
518 static inline bool qdisc_tx_is_noop(const struct net_device *dev)
519 {
520 	unsigned int i;
521 
522 	for (i = 0; i < dev->num_tx_queues; i++) {
523 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
524 		if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
525 			return false;
526 	}
527 	return true;
528 }
529 
530 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
531 {
532 	return qdisc_skb_cb(skb)->pkt_len;
533 }
534 
535 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
536 enum net_xmit_qdisc_t {
537 	__NET_XMIT_STOLEN = 0x00010000,
538 	__NET_XMIT_BYPASS = 0x00020000,
539 };
540 
541 #ifdef CONFIG_NET_CLS_ACT
542 #define net_xmit_drop_count(e)	((e) & __NET_XMIT_STOLEN ? 0 : 1)
543 #else
544 #define net_xmit_drop_count(e)	(1)
545 #endif
546 
547 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
548 					   const struct Qdisc *sch)
549 {
550 #ifdef CONFIG_NET_SCHED
551 	struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
552 
553 	if (stab)
554 		__qdisc_calculate_pkt_len(skb, stab);
555 #endif
556 }
557 
558 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
559 				struct sk_buff **to_free)
560 {
561 	qdisc_calculate_pkt_len(skb, sch);
562 	return sch->enqueue(skb, sch, to_free);
563 }
564 
565 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
566 {
567 	return q->flags & TCQ_F_CPUSTATS;
568 }
569 
570 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
571 				  __u64 bytes, __u32 packets)
572 {
573 	bstats->bytes += bytes;
574 	bstats->packets += packets;
575 }
576 
577 static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
578 				 const struct sk_buff *skb)
579 {
580 	_bstats_update(bstats,
581 		       qdisc_pkt_len(skb),
582 		       skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
583 }
584 
585 static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
586 				      __u64 bytes, __u32 packets)
587 {
588 	u64_stats_update_begin(&bstats->syncp);
589 	_bstats_update(&bstats->bstats, bytes, packets);
590 	u64_stats_update_end(&bstats->syncp);
591 }
592 
593 static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
594 				     const struct sk_buff *skb)
595 {
596 	u64_stats_update_begin(&bstats->syncp);
597 	bstats_update(&bstats->bstats, skb);
598 	u64_stats_update_end(&bstats->syncp);
599 }
600 
601 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
602 					   const struct sk_buff *skb)
603 {
604 	bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
605 }
606 
607 static inline void qdisc_bstats_update(struct Qdisc *sch,
608 				       const struct sk_buff *skb)
609 {
610 	bstats_update(&sch->bstats, skb);
611 }
612 
613 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
614 					    const struct sk_buff *skb)
615 {
616 	sch->qstats.backlog -= qdisc_pkt_len(skb);
617 }
618 
619 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
620 					    const struct sk_buff *skb)
621 {
622 	sch->qstats.backlog += qdisc_pkt_len(skb);
623 }
624 
625 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
626 {
627 	sch->qstats.drops += count;
628 }
629 
630 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
631 {
632 	qstats->drops++;
633 }
634 
635 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
636 {
637 	qstats->overlimits++;
638 }
639 
640 static inline void qdisc_qstats_drop(struct Qdisc *sch)
641 {
642 	qstats_drop_inc(&sch->qstats);
643 }
644 
645 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
646 {
647 	this_cpu_inc(sch->cpu_qstats->drops);
648 }
649 
650 static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
651 {
652 	sch->qstats.overlimits++;
653 }
654 
655 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
656 {
657 	qh->head = NULL;
658 	qh->tail = NULL;
659 	qh->qlen = 0;
660 }
661 
662 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
663 				       struct qdisc_skb_head *qh)
664 {
665 	struct sk_buff *last = qh->tail;
666 
667 	if (last) {
668 		skb->next = NULL;
669 		last->next = skb;
670 		qh->tail = skb;
671 	} else {
672 		qh->tail = skb;
673 		qh->head = skb;
674 	}
675 	qh->qlen++;
676 	qdisc_qstats_backlog_inc(sch, skb);
677 
678 	return NET_XMIT_SUCCESS;
679 }
680 
681 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
682 {
683 	return __qdisc_enqueue_tail(skb, sch, &sch->q);
684 }
685 
686 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
687 {
688 	struct sk_buff *skb = qh->head;
689 
690 	if (likely(skb != NULL)) {
691 		qh->head = skb->next;
692 		qh->qlen--;
693 		if (qh->head == NULL)
694 			qh->tail = NULL;
695 		skb->next = NULL;
696 	}
697 
698 	return skb;
699 }
700 
701 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
702 {
703 	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
704 
705 	if (likely(skb != NULL)) {
706 		qdisc_qstats_backlog_dec(sch, skb);
707 		qdisc_bstats_update(sch, skb);
708 	}
709 
710 	return skb;
711 }
712 
713 /* Instead of calling kfree_skb() while root qdisc lock is held,
714  * queue the skb for future freeing at end of __dev_xmit_skb()
715  */
716 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
717 {
718 	skb->next = *to_free;
719 	*to_free = skb;
720 }
721 
722 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
723 						   struct qdisc_skb_head *qh,
724 						   struct sk_buff **to_free)
725 {
726 	struct sk_buff *skb = __qdisc_dequeue_head(qh);
727 
728 	if (likely(skb != NULL)) {
729 		unsigned int len = qdisc_pkt_len(skb);
730 
731 		qdisc_qstats_backlog_dec(sch, skb);
732 		__qdisc_drop(skb, to_free);
733 		return len;
734 	}
735 
736 	return 0;
737 }
738 
739 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
740 						 struct sk_buff **to_free)
741 {
742 	return __qdisc_queue_drop_head(sch, &sch->q, to_free);
743 }
744 
745 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
746 {
747 	const struct qdisc_skb_head *qh = &sch->q;
748 
749 	return qh->head;
750 }
751 
752 /* generic pseudo peek method for non-work-conserving qdisc */
753 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
754 {
755 	/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
756 	if (!sch->gso_skb) {
757 		sch->gso_skb = sch->dequeue(sch);
758 		if (sch->gso_skb) {
759 			/* it's still part of the queue */
760 			qdisc_qstats_backlog_inc(sch, sch->gso_skb);
761 			sch->q.qlen++;
762 		}
763 	}
764 
765 	return sch->gso_skb;
766 }
767 
768 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
769 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
770 {
771 	struct sk_buff *skb = sch->gso_skb;
772 
773 	if (skb) {
774 		sch->gso_skb = NULL;
775 		qdisc_qstats_backlog_dec(sch, skb);
776 		sch->q.qlen--;
777 	} else {
778 		skb = sch->dequeue(sch);
779 	}
780 
781 	return skb;
782 }
783 
784 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
785 {
786 	/*
787 	 * We do not know the backlog in bytes of this list, it
788 	 * is up to the caller to correct it
789 	 */
790 	ASSERT_RTNL();
791 	if (qh->qlen) {
792 		rtnl_kfree_skbs(qh->head, qh->tail);
793 
794 		qh->head = NULL;
795 		qh->tail = NULL;
796 		qh->qlen = 0;
797 	}
798 }
799 
800 static inline void qdisc_reset_queue(struct Qdisc *sch)
801 {
802 	__qdisc_reset_queue(&sch->q);
803 	sch->qstats.backlog = 0;
804 }
805 
806 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
807 					  struct Qdisc **pold)
808 {
809 	struct Qdisc *old;
810 
811 	sch_tree_lock(sch);
812 	old = *pold;
813 	*pold = new;
814 	if (old != NULL) {
815 		unsigned int qlen = old->q.qlen;
816 		unsigned int backlog = old->qstats.backlog;
817 
818 		qdisc_reset(old);
819 		qdisc_tree_reduce_backlog(old, qlen, backlog);
820 	}
821 	sch_tree_unlock(sch);
822 
823 	return old;
824 }
825 
826 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
827 {
828 	rtnl_kfree_skbs(skb, skb);
829 	qdisc_qstats_drop(sch);
830 }
831 
832 
833 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
834 			     struct sk_buff **to_free)
835 {
836 	__qdisc_drop(skb, to_free);
837 	qdisc_qstats_drop(sch);
838 
839 	return NET_XMIT_DROP;
840 }
841 
842 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
843    long it will take to send a packet given its size.
844  */
845 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
846 {
847 	int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
848 	if (slot < 0)
849 		slot = 0;
850 	slot >>= rtab->rate.cell_log;
851 	if (slot > 255)
852 		return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
853 	return rtab->data[slot];
854 }
855 
856 struct psched_ratecfg {
857 	u64	rate_bytes_ps; /* bytes per second */
858 	u32	mult;
859 	u16	overhead;
860 	u8	linklayer;
861 	u8	shift;
862 };
863 
864 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
865 				unsigned int len)
866 {
867 	len += r->overhead;
868 
869 	if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
870 		return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
871 
872 	return ((u64)len * r->mult) >> r->shift;
873 }
874 
875 void psched_ratecfg_precompute(struct psched_ratecfg *r,
876 			       const struct tc_ratespec *conf,
877 			       u64 rate64);
878 
879 static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
880 					  const struct psched_ratecfg *r)
881 {
882 	memset(res, 0, sizeof(*res));
883 
884 	/* legacy struct tc_ratespec has a 32bit @rate field
885 	 * Qdisc using 64bit rate should add new attributes
886 	 * in order to maintain compatibility.
887 	 */
888 	res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
889 
890 	res->overhead = r->overhead;
891 	res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
892 }
893 
894 #endif
895