1 /* 2 * net/dst.h Protocol independent destination cache definitions. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 */ 7 8 #ifndef _NET_DST_H 9 #define _NET_DST_H 10 11 #include <linux/netdevice.h> 12 #include <linux/rtnetlink.h> 13 #include <linux/rcupdate.h> 14 #include <linux/jiffies.h> 15 #include <net/neighbour.h> 16 #include <asm/processor.h> 17 18 /* 19 * 0 - no debugging messages 20 * 1 - rare events and bugs (default) 21 * 2 - trace mode. 22 */ 23 #define RT_CACHE_DEBUG 0 24 25 #define DST_GC_MIN (HZ/10) 26 #define DST_GC_INC (HZ/2) 27 #define DST_GC_MAX (120*HZ) 28 29 /* Each dst_entry has reference count and sits in some parent list(s). 30 * When it is removed from parent list, it is "freed" (dst_free). 31 * After this it enters dead state (dst->obsolete > 0) and if its refcnt 32 * is zero, it can be destroyed immediately, otherwise it is added 33 * to gc list and garbage collector periodically checks the refcnt. 34 */ 35 36 struct sk_buff; 37 38 struct dst_entry 39 { 40 struct rcu_head rcu_head; 41 struct dst_entry *child; 42 struct net_device *dev; 43 short error; 44 short obsolete; 45 int flags; 46 #define DST_HOST 1 47 #define DST_NOXFRM 2 48 #define DST_NOPOLICY 4 49 #define DST_NOHASH 8 50 unsigned long expires; 51 52 unsigned short header_len; /* more space at head required */ 53 unsigned short trailer_len; /* space to reserve at tail */ 54 55 unsigned int rate_tokens; 56 unsigned long rate_last; /* rate limiting for ICMP */ 57 58 struct dst_entry *path; 59 60 struct neighbour *neighbour; 61 struct hh_cache *hh; 62 #ifdef CONFIG_XFRM 63 struct xfrm_state *xfrm; 64 #else 65 void *__pad1; 66 #endif 67 int (*input)(struct sk_buff*); 68 int (*output)(struct sk_buff*); 69 70 struct dst_ops *ops; 71 72 u32 metrics[RTAX_MAX]; 73 74 #ifdef CONFIG_NET_CLS_ROUTE 75 __u32 tclassid; 76 #else 77 __u32 __pad2; 78 #endif 79 80 81 /* 82 * Align __refcnt to a 64 bytes alignment 83 * (L1_CACHE_SIZE would be too much) 84 */ 85 #ifdef CONFIG_64BIT 86 long __pad_to_align_refcnt[2]; 87 #else 88 long __pad_to_align_refcnt[1]; 89 #endif 90 /* 91 * __refcnt wants to be on a different cache line from 92 * input/output/ops or performance tanks badly 93 */ 94 atomic_t __refcnt; /* client references */ 95 int __use; 96 unsigned long lastuse; 97 union { 98 struct dst_entry *next; 99 struct rtable *rt_next; 100 struct rt6_info *rt6_next; 101 struct dn_route *dn_next; 102 }; 103 }; 104 105 106 struct dst_ops 107 { 108 unsigned short family; 109 __be16 protocol; 110 unsigned gc_thresh; 111 112 int (*gc)(struct dst_ops *ops); 113 struct dst_entry * (*check)(struct dst_entry *, __u32 cookie); 114 void (*destroy)(struct dst_entry *); 115 void (*ifdown)(struct dst_entry *, 116 struct net_device *dev, int how); 117 struct dst_entry * (*negative_advice)(struct dst_entry *); 118 void (*link_failure)(struct sk_buff *); 119 void (*update_pmtu)(struct dst_entry *dst, u32 mtu); 120 int (*local_out)(struct sk_buff *skb); 121 122 atomic_t entries; 123 struct kmem_cache *kmem_cachep; 124 struct net *dst_net; 125 }; 126 127 #ifdef __KERNEL__ 128 129 static inline u32 130 dst_metric(const struct dst_entry *dst, int metric) 131 { 132 return dst->metrics[metric-1]; 133 } 134 135 static inline u32 dst_mtu(const struct dst_entry *dst) 136 { 137 u32 mtu = dst_metric(dst, RTAX_MTU); 138 /* 139 * Alexey put it here, so ask him about it :) 140 */ 141 barrier(); 142 return mtu; 143 } 144 145 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ 146 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) 147 { 148 return msecs_to_jiffies(dst_metric(dst, metric)); 149 } 150 151 static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric, 152 unsigned long rtt) 153 { 154 dst->metrics[metric-1] = jiffies_to_msecs(rtt); 155 } 156 157 static inline u32 158 dst_allfrag(const struct dst_entry *dst) 159 { 160 int ret = dst_metric(dst, RTAX_FEATURES) & RTAX_FEATURE_ALLFRAG; 161 /* Yes, _exactly_. This is paranoia. */ 162 barrier(); 163 return ret; 164 } 165 166 static inline int 167 dst_metric_locked(struct dst_entry *dst, int metric) 168 { 169 return dst_metric(dst, RTAX_LOCK) & (1<<metric); 170 } 171 172 static inline void dst_hold(struct dst_entry * dst) 173 { 174 /* 175 * If your kernel compilation stops here, please check 176 * __pad_to_align_refcnt declaration in struct dst_entry 177 */ 178 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); 179 atomic_inc(&dst->__refcnt); 180 } 181 182 static inline void dst_use(struct dst_entry *dst, unsigned long time) 183 { 184 dst_hold(dst); 185 dst->__use++; 186 dst->lastuse = time; 187 } 188 189 static inline 190 struct dst_entry * dst_clone(struct dst_entry * dst) 191 { 192 if (dst) 193 atomic_inc(&dst->__refcnt); 194 return dst; 195 } 196 197 extern void dst_release(struct dst_entry *dst); 198 static inline void skb_dst_drop(struct sk_buff *skb) 199 { 200 if (skb->_skb_dst) 201 dst_release(skb_dst(skb)); 202 skb->_skb_dst = 0UL; 203 } 204 205 /* Children define the path of the packet through the 206 * Linux networking. Thus, destinations are stackable. 207 */ 208 209 static inline struct dst_entry *dst_pop(struct dst_entry *dst) 210 { 211 struct dst_entry *child = dst_clone(dst->child); 212 213 dst_release(dst); 214 return child; 215 } 216 217 extern int dst_discard(struct sk_buff *skb); 218 extern void * dst_alloc(struct dst_ops * ops); 219 extern void __dst_free(struct dst_entry * dst); 220 extern struct dst_entry *dst_destroy(struct dst_entry * dst); 221 222 static inline void dst_free(struct dst_entry * dst) 223 { 224 if (dst->obsolete > 1) 225 return; 226 if (!atomic_read(&dst->__refcnt)) { 227 dst = dst_destroy(dst); 228 if (!dst) 229 return; 230 } 231 __dst_free(dst); 232 } 233 234 static inline void dst_rcu_free(struct rcu_head *head) 235 { 236 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); 237 dst_free(dst); 238 } 239 240 static inline void dst_confirm(struct dst_entry *dst) 241 { 242 if (dst) 243 neigh_confirm(dst->neighbour); 244 } 245 246 static inline void dst_negative_advice(struct dst_entry **dst_p) 247 { 248 struct dst_entry * dst = *dst_p; 249 if (dst && dst->ops->negative_advice) 250 *dst_p = dst->ops->negative_advice(dst); 251 } 252 253 static inline void dst_link_failure(struct sk_buff *skb) 254 { 255 struct dst_entry *dst = skb_dst(skb); 256 if (dst && dst->ops && dst->ops->link_failure) 257 dst->ops->link_failure(skb); 258 } 259 260 static inline void dst_set_expires(struct dst_entry *dst, int timeout) 261 { 262 unsigned long expires = jiffies + timeout; 263 264 if (expires == 0) 265 expires = 1; 266 267 if (dst->expires == 0 || time_before(expires, dst->expires)) 268 dst->expires = expires; 269 } 270 271 /* Output packet to network from transport. */ 272 static inline int dst_output(struct sk_buff *skb) 273 { 274 return skb_dst(skb)->output(skb); 275 } 276 277 /* Input packet from network to transport. */ 278 static inline int dst_input(struct sk_buff *skb) 279 { 280 return skb_dst(skb)->input(skb); 281 } 282 283 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) 284 { 285 if (dst->obsolete) 286 dst = dst->ops->check(dst, cookie); 287 return dst; 288 } 289 290 extern void dst_init(void); 291 292 /* Flags for xfrm_lookup flags argument. */ 293 enum { 294 XFRM_LOOKUP_WAIT = 1 << 0, 295 XFRM_LOOKUP_ICMP = 1 << 1, 296 }; 297 298 struct flowi; 299 #ifndef CONFIG_XFRM 300 static inline int xfrm_lookup(struct net *net, struct dst_entry **dst_p, 301 struct flowi *fl, struct sock *sk, int flags) 302 { 303 return 0; 304 } 305 static inline int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, 306 struct flowi *fl, struct sock *sk, int flags) 307 { 308 return 0; 309 } 310 #else 311 extern int xfrm_lookup(struct net *net, struct dst_entry **dst_p, 312 struct flowi *fl, struct sock *sk, int flags); 313 extern int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, 314 struct flowi *fl, struct sock *sk, int flags); 315 #endif 316 #endif 317 318 #endif /* _NET_DST_H */ 319