xref: /linux/include/net/dst.h (revision af901ca181d92aac3a7dc265144a9081a86d8f39)
1 /*
2  * net/dst.h	Protocol independent destination cache definitions.
3  *
4  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5  *
6  */
7 
8 #ifndef _NET_DST_H
9 #define _NET_DST_H
10 
11 #include <net/dst_ops.h>
12 #include <linux/netdevice.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/rcupdate.h>
15 #include <linux/jiffies.h>
16 #include <net/neighbour.h>
17 #include <asm/processor.h>
18 
19 /*
20  * 0 - no debugging messages
21  * 1 - rare events and bugs (default)
22  * 2 - trace mode.
23  */
24 #define RT_CACHE_DEBUG		0
25 
26 #define DST_GC_MIN	(HZ/10)
27 #define DST_GC_INC	(HZ/2)
28 #define DST_GC_MAX	(120*HZ)
29 
30 /* Each dst_entry has reference count and sits in some parent list(s).
31  * When it is removed from parent list, it is "freed" (dst_free).
32  * After this it enters dead state (dst->obsolete > 0) and if its refcnt
33  * is zero, it can be destroyed immediately, otherwise it is added
34  * to gc list and garbage collector periodically checks the refcnt.
35  */
36 
37 struct sk_buff;
38 
39 struct dst_entry
40 {
41 	struct rcu_head		rcu_head;
42 	struct dst_entry	*child;
43 	struct net_device       *dev;
44 	short			error;
45 	short			obsolete;
46 	int			flags;
47 #define DST_HOST		1
48 #define DST_NOXFRM		2
49 #define DST_NOPOLICY		4
50 #define DST_NOHASH		8
51 	unsigned long		expires;
52 
53 	unsigned short		header_len;	/* more space at head required */
54 	unsigned short		trailer_len;	/* space to reserve at tail */
55 
56 	unsigned int		rate_tokens;
57 	unsigned long		rate_last;	/* rate limiting for ICMP */
58 
59 	struct dst_entry	*path;
60 
61 	struct neighbour	*neighbour;
62 	struct hh_cache		*hh;
63 #ifdef CONFIG_XFRM
64 	struct xfrm_state	*xfrm;
65 #else
66 	void			*__pad1;
67 #endif
68 	int			(*input)(struct sk_buff*);
69 	int			(*output)(struct sk_buff*);
70 
71 	struct  dst_ops	        *ops;
72 
73 	u32			metrics[RTAX_MAX];
74 
75 #ifdef CONFIG_NET_CLS_ROUTE
76 	__u32			tclassid;
77 #else
78 	__u32			__pad2;
79 #endif
80 
81 
82 	/*
83 	 * Align __refcnt to a 64 bytes alignment
84 	 * (L1_CACHE_SIZE would be too much)
85 	 */
86 #ifdef CONFIG_64BIT
87 	long			__pad_to_align_refcnt[2];
88 #else
89 	long			__pad_to_align_refcnt[1];
90 #endif
91 	/*
92 	 * __refcnt wants to be on a different cache line from
93 	 * input/output/ops or performance tanks badly
94 	 */
95 	atomic_t		__refcnt;	/* client references	*/
96 	int			__use;
97 	unsigned long		lastuse;
98 	union {
99 		struct dst_entry *next;
100 		struct rtable    *rt_next;
101 		struct rt6_info   *rt6_next;
102 		struct dn_route  *dn_next;
103 	};
104 };
105 
106 #ifdef __KERNEL__
107 
108 static inline u32
109 dst_metric(const struct dst_entry *dst, int metric)
110 {
111 	return dst->metrics[metric-1];
112 }
113 
114 static inline u32 dst_mtu(const struct dst_entry *dst)
115 {
116 	u32 mtu = dst_metric(dst, RTAX_MTU);
117 	/*
118 	 * Alexey put it here, so ask him about it :)
119 	 */
120 	barrier();
121 	return mtu;
122 }
123 
124 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
125 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
126 {
127 	return msecs_to_jiffies(dst_metric(dst, metric));
128 }
129 
130 static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric,
131 				      unsigned long rtt)
132 {
133 	dst->metrics[metric-1] = jiffies_to_msecs(rtt);
134 }
135 
136 static inline u32
137 dst_allfrag(const struct dst_entry *dst)
138 {
139 	int ret = dst_metric(dst, RTAX_FEATURES) & RTAX_FEATURE_ALLFRAG;
140 	/* Yes, _exactly_. This is paranoia. */
141 	barrier();
142 	return ret;
143 }
144 
145 static inline int
146 dst_metric_locked(struct dst_entry *dst, int metric)
147 {
148 	return dst_metric(dst, RTAX_LOCK) & (1<<metric);
149 }
150 
151 static inline void dst_hold(struct dst_entry * dst)
152 {
153 	/*
154 	 * If your kernel compilation stops here, please check
155 	 * __pad_to_align_refcnt declaration in struct dst_entry
156 	 */
157 	BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
158 	atomic_inc(&dst->__refcnt);
159 }
160 
161 static inline void dst_use(struct dst_entry *dst, unsigned long time)
162 {
163 	dst_hold(dst);
164 	dst->__use++;
165 	dst->lastuse = time;
166 }
167 
168 static inline
169 struct dst_entry * dst_clone(struct dst_entry * dst)
170 {
171 	if (dst)
172 		atomic_inc(&dst->__refcnt);
173 	return dst;
174 }
175 
176 extern void dst_release(struct dst_entry *dst);
177 static inline void skb_dst_drop(struct sk_buff *skb)
178 {
179 	if (skb->_skb_dst)
180 		dst_release(skb_dst(skb));
181 	skb->_skb_dst = 0UL;
182 }
183 
184 /* Children define the path of the packet through the
185  * Linux networking.  Thus, destinations are stackable.
186  */
187 
188 static inline struct dst_entry *dst_pop(struct dst_entry *dst)
189 {
190 	struct dst_entry *child = dst_clone(dst->child);
191 
192 	dst_release(dst);
193 	return child;
194 }
195 
196 extern int dst_discard(struct sk_buff *skb);
197 extern void * dst_alloc(struct dst_ops * ops);
198 extern void __dst_free(struct dst_entry * dst);
199 extern struct dst_entry *dst_destroy(struct dst_entry * dst);
200 
201 static inline void dst_free(struct dst_entry * dst)
202 {
203 	if (dst->obsolete > 1)
204 		return;
205 	if (!atomic_read(&dst->__refcnt)) {
206 		dst = dst_destroy(dst);
207 		if (!dst)
208 			return;
209 	}
210 	__dst_free(dst);
211 }
212 
213 static inline void dst_rcu_free(struct rcu_head *head)
214 {
215 	struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
216 	dst_free(dst);
217 }
218 
219 static inline void dst_confirm(struct dst_entry *dst)
220 {
221 	if (dst)
222 		neigh_confirm(dst->neighbour);
223 }
224 
225 static inline void dst_negative_advice(struct dst_entry **dst_p)
226 {
227 	struct dst_entry * dst = *dst_p;
228 	if (dst && dst->ops->negative_advice)
229 		*dst_p = dst->ops->negative_advice(dst);
230 }
231 
232 static inline void dst_link_failure(struct sk_buff *skb)
233 {
234 	struct dst_entry *dst = skb_dst(skb);
235 	if (dst && dst->ops && dst->ops->link_failure)
236 		dst->ops->link_failure(skb);
237 }
238 
239 static inline void dst_set_expires(struct dst_entry *dst, int timeout)
240 {
241 	unsigned long expires = jiffies + timeout;
242 
243 	if (expires == 0)
244 		expires = 1;
245 
246 	if (dst->expires == 0 || time_before(expires, dst->expires))
247 		dst->expires = expires;
248 }
249 
250 /* Output packet to network from transport.  */
251 static inline int dst_output(struct sk_buff *skb)
252 {
253 	return skb_dst(skb)->output(skb);
254 }
255 
256 /* Input packet from network to transport.  */
257 static inline int dst_input(struct sk_buff *skb)
258 {
259 	return skb_dst(skb)->input(skb);
260 }
261 
262 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
263 {
264 	if (dst->obsolete)
265 		dst = dst->ops->check(dst, cookie);
266 	return dst;
267 }
268 
269 extern void		dst_init(void);
270 
271 /* Flags for xfrm_lookup flags argument. */
272 enum {
273 	XFRM_LOOKUP_WAIT = 1 << 0,
274 	XFRM_LOOKUP_ICMP = 1 << 1,
275 };
276 
277 struct flowi;
278 #ifndef CONFIG_XFRM
279 static inline int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
280 			      struct flowi *fl, struct sock *sk, int flags)
281 {
282 	return 0;
283 }
284 static inline int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
285 				struct flowi *fl, struct sock *sk, int flags)
286 {
287 	return 0;
288 }
289 #else
290 extern int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
291 		       struct flowi *fl, struct sock *sk, int flags);
292 extern int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
293 			 struct flowi *fl, struct sock *sk, int flags);
294 #endif
295 #endif
296 
297 #endif /* _NET_DST_H */
298