xref: /linux/include/net/pkt_sched.h (revision d67b569f5f620c0fb95d5212642746b7ba9d29e4)
1 #ifndef __NET_PKT_SCHED_H
2 #define __NET_PKT_SCHED_H
3 
4 #include <net/sch_generic.h>
5 
6 struct qdisc_walker
7 {
8 	int	stop;
9 	int	skip;
10 	int	count;
11 	int	(*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
12 };
13 
14 extern rwlock_t qdisc_tree_lock;
15 
16 #define QDISC_ALIGNTO		32
17 #define QDISC_ALIGN(len)	(((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
18 
19 static inline void *qdisc_priv(struct Qdisc *q)
20 {
21 	return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc));
22 }
23 
24 /*
25    Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
26 
27    Normal IP packet size ~ 512byte, hence:
28 
29    0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for
30    10Mbit ethernet.
31 
32    10msec resolution -> <50Kbit/sec.
33 
34    The result: [34]86 is not good choice for QoS router :-(
35 
36    The things are not so bad, because we may use artifical
37    clock evaluated by integration of network data flow
38    in the most critical places.
39 
40    Note: we do not use fastgettimeofday.
41    The reason is that, when it is not the same thing as
42    gettimeofday, it returns invalid timestamp, which is
43    not updated, when net_bh is active.
44  */
45 
46 /* General note about internal clock.
47 
48    Any clock source returns time intervals, measured in units
49    close to 1usec. With source CONFIG_NET_SCH_CLK_GETTIMEOFDAY it is precisely
50    microseconds, otherwise something close but different chosen to minimize
51    arithmetic cost. Ratio usec/internal untis in form nominator/denominator
52    may be read from /proc/net/psched.
53  */
54 
55 
56 #ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
57 
58 typedef struct timeval	psched_time_t;
59 typedef long		psched_tdiff_t;
60 
61 #define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp))
62 #define PSCHED_US2JIFFIE(usecs) (((usecs)+(1000000/HZ-1))/(1000000/HZ))
63 #define PSCHED_JIFFIE2US(delay) ((delay)*(1000000/HZ))
64 
65 #else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
66 
67 typedef u64	psched_time_t;
68 typedef long	psched_tdiff_t;
69 
70 #ifdef CONFIG_NET_SCH_CLK_JIFFIES
71 
72 #if HZ < 96
73 #define PSCHED_JSCALE 14
74 #elif HZ >= 96 && HZ < 192
75 #define PSCHED_JSCALE 13
76 #elif HZ >= 192 && HZ < 384
77 #define PSCHED_JSCALE 12
78 #elif HZ >= 384 && HZ < 768
79 #define PSCHED_JSCALE 11
80 #elif HZ >= 768
81 #define PSCHED_JSCALE 10
82 #endif
83 
84 #define PSCHED_GET_TIME(stamp) ((stamp) = (get_jiffies_64()<<PSCHED_JSCALE))
85 #define PSCHED_US2JIFFIE(delay) (((delay)+(1<<PSCHED_JSCALE)-1)>>PSCHED_JSCALE)
86 #define PSCHED_JIFFIE2US(delay) ((delay)<<PSCHED_JSCALE)
87 
88 #endif /* CONFIG_NET_SCH_CLK_JIFFIES */
89 #ifdef CONFIG_NET_SCH_CLK_CPU
90 #include <asm/timex.h>
91 
92 extern psched_tdiff_t psched_clock_per_hz;
93 extern int psched_clock_scale;
94 extern psched_time_t psched_time_base;
95 extern cycles_t psched_time_mark;
96 
97 #define PSCHED_GET_TIME(stamp)						\
98 do {									\
99 	cycles_t cur = get_cycles();					\
100 	if (sizeof(cycles_t) == sizeof(u32)) {				\
101 		if (cur <= psched_time_mark)				\
102 			psched_time_base += 0x100000000ULL;		\
103 		psched_time_mark = cur;					\
104 		(stamp) = (psched_time_base + cur)>>psched_clock_scale;	\
105 	} else {							\
106 		(stamp) = cur>>psched_clock_scale;			\
107 	}								\
108 } while (0)
109 #define PSCHED_US2JIFFIE(delay) (((delay)+psched_clock_per_hz-1)/psched_clock_per_hz)
110 #define PSCHED_JIFFIE2US(delay) ((delay)*psched_clock_per_hz)
111 
112 #endif /* CONFIG_NET_SCH_CLK_CPU */
113 
114 #endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
115 
116 #ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
117 #define PSCHED_TDIFF(tv1, tv2) \
118 ({ \
119 	   int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
120 	   int __delta = (tv1).tv_usec - (tv2).tv_usec; \
121 	   if (__delta_sec) { \
122 	           switch (__delta_sec) { \
123 		   default: \
124 			   __delta = 0; \
125 		   case 2: \
126 			   __delta += 1000000; \
127 		   case 1: \
128 			   __delta += 1000000; \
129 	           } \
130 	   } \
131 	   __delta; \
132 })
133 
134 static inline int
135 psched_tod_diff(int delta_sec, int bound)
136 {
137 	int delta;
138 
139 	if (bound <= 1000000 || delta_sec > (0x7FFFFFFF/1000000)-1)
140 		return bound;
141 	delta = delta_sec * 1000000;
142 	if (delta > bound || delta < 0)
143 		delta = bound;
144 	return delta;
145 }
146 
147 #define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
148 ({ \
149 	   int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
150 	   int __delta = (tv1).tv_usec - (tv2).tv_usec; \
151 	   switch (__delta_sec) { \
152 	   default: \
153 		   __delta = psched_tod_diff(__delta_sec, bound);  break; \
154 	   case 2: \
155 		   __delta += 1000000; \
156 	   case 1: \
157 		   __delta += 1000000; \
158 	   case 0: \
159  		   if (__delta > bound || __delta < 0) \
160  			__delta = bound; \
161 	   } \
162 	   __delta; \
163 })
164 
165 #define PSCHED_TLESS(tv1, tv2) (((tv1).tv_usec < (tv2).tv_usec && \
166 				(tv1).tv_sec <= (tv2).tv_sec) || \
167 				 (tv1).tv_sec < (tv2).tv_sec)
168 
169 #define PSCHED_TADD2(tv, delta, tv_res) \
170 ({ \
171 	   int __delta = (tv).tv_usec + (delta); \
172 	   (tv_res).tv_sec = (tv).tv_sec; \
173 	   if (__delta > 1000000) { (tv_res).tv_sec++; __delta -= 1000000; } \
174 	   (tv_res).tv_usec = __delta; \
175 })
176 
177 #define PSCHED_TADD(tv, delta) \
178 ({ \
179 	   (tv).tv_usec += (delta); \
180 	   if ((tv).tv_usec > 1000000) { (tv).tv_sec++; \
181 		 (tv).tv_usec -= 1000000; } \
182 })
183 
184 /* Set/check that time is in the "past perfect";
185    it depends on concrete representation of system time
186  */
187 
188 #define PSCHED_SET_PASTPERFECT(t)	((t).tv_sec = 0)
189 #define PSCHED_IS_PASTPERFECT(t)	((t).tv_sec == 0)
190 
191 #define	PSCHED_AUDIT_TDIFF(t) ({ if ((t) > 2000000) (t) = 2000000; })
192 
193 #else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
194 
195 #define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
196 #define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
197 	min_t(long long, (tv1) - (tv2), bound)
198 
199 
200 #define PSCHED_TLESS(tv1, tv2) ((tv1) < (tv2))
201 #define PSCHED_TADD2(tv, delta, tv_res) ((tv_res) = (tv) + (delta))
202 #define PSCHED_TADD(tv, delta) ((tv) += (delta))
203 #define PSCHED_SET_PASTPERFECT(t)	((t) = 0)
204 #define PSCHED_IS_PASTPERFECT(t)	((t) == 0)
205 #define	PSCHED_AUDIT_TDIFF(t)
206 
207 #endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
208 
209 extern struct Qdisc_ops pfifo_qdisc_ops;
210 extern struct Qdisc_ops bfifo_qdisc_ops;
211 
212 extern int register_qdisc(struct Qdisc_ops *qops);
213 extern int unregister_qdisc(struct Qdisc_ops *qops);
214 extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
215 extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
216 extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
217 		struct rtattr *tab);
218 extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
219 
220 extern int qdisc_restart(struct net_device *dev);
221 
222 static inline void qdisc_run(struct net_device *dev)
223 {
224 	while (!netif_queue_stopped(dev) && qdisc_restart(dev) < 0)
225 		/* NOTHING */;
226 }
227 
228 extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
229 	struct tcf_result *res);
230 
231 /* Calculate maximal size of packet seen by hard_start_xmit
232    routine of this device.
233  */
234 static inline unsigned psched_mtu(struct net_device *dev)
235 {
236 	unsigned mtu = dev->mtu;
237 	return dev->hard_header ? mtu + dev->hard_header_len : mtu;
238 }
239 
240 #endif
241