1 /* 2 * net/dst.h Protocol independent destination cache definitions. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 */ 7 8 #ifndef _NET_DST_H 9 #define _NET_DST_H 10 11 #include <net/dst_ops.h> 12 #include <linux/netdevice.h> 13 #include <linux/rtnetlink.h> 14 #include <linux/rcupdate.h> 15 #include <linux/jiffies.h> 16 #include <net/neighbour.h> 17 #include <asm/processor.h> 18 19 /* 20 * 0 - no debugging messages 21 * 1 - rare events and bugs (default) 22 * 2 - trace mode. 23 */ 24 #define RT_CACHE_DEBUG 0 25 26 #define DST_GC_MIN (HZ/10) 27 #define DST_GC_INC (HZ/2) 28 #define DST_GC_MAX (120*HZ) 29 30 /* Each dst_entry has reference count and sits in some parent list(s). 31 * When it is removed from parent list, it is "freed" (dst_free). 32 * After this it enters dead state (dst->obsolete > 0) and if its refcnt 33 * is zero, it can be destroyed immediately, otherwise it is added 34 * to gc list and garbage collector periodically checks the refcnt. 35 */ 36 37 struct sk_buff; 38 39 struct dst_entry { 40 struct rcu_head rcu_head; 41 struct dst_entry *child; 42 struct net_device *dev; 43 short error; 44 short obsolete; 45 int flags; 46 #define DST_HOST 1 47 #define DST_NOXFRM 2 48 #define DST_NOPOLICY 4 49 #define DST_NOHASH 8 50 unsigned long expires; 51 52 unsigned short header_len; /* more space at head required */ 53 unsigned short trailer_len; /* space to reserve at tail */ 54 55 unsigned int rate_tokens; 56 unsigned long rate_last; /* rate limiting for ICMP */ 57 58 struct dst_entry *path; 59 60 struct neighbour *neighbour; 61 struct hh_cache *hh; 62 #ifdef CONFIG_XFRM 63 struct xfrm_state *xfrm; 64 #else 65 void *__pad1; 66 #endif 67 int (*input)(struct sk_buff*); 68 int (*output)(struct sk_buff*); 69 70 struct dst_ops *ops; 71 72 u32 metrics[RTAX_MAX]; 73 74 #ifdef CONFIG_NET_CLS_ROUTE 75 __u32 tclassid; 76 #else 77 __u32 __pad2; 78 #endif 79 80 81 /* 82 * Align __refcnt to a 64 bytes alignment 83 * (L1_CACHE_SIZE would be too much) 84 */ 85 #ifdef CONFIG_64BIT 86 long __pad_to_align_refcnt[1]; 87 #endif 88 /* 89 * __refcnt wants to be on a different cache line from 90 * input/output/ops or performance tanks badly 91 */ 92 atomic_t __refcnt; /* client references */ 93 int __use; 94 unsigned long lastuse; 95 union { 96 struct dst_entry *next; 97 struct rtable *rt_next; 98 struct rt6_info *rt6_next; 99 struct dn_route *dn_next; 100 }; 101 }; 102 103 #ifdef __KERNEL__ 104 105 static inline u32 106 dst_metric(const struct dst_entry *dst, int metric) 107 { 108 return dst->metrics[metric-1]; 109 } 110 111 static inline u32 112 dst_feature(const struct dst_entry *dst, u32 feature) 113 { 114 return dst_metric(dst, RTAX_FEATURES) & feature; 115 } 116 117 static inline u32 dst_mtu(const struct dst_entry *dst) 118 { 119 u32 mtu = dst_metric(dst, RTAX_MTU); 120 /* 121 * Alexey put it here, so ask him about it :) 122 */ 123 barrier(); 124 return mtu; 125 } 126 127 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ 128 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) 129 { 130 return msecs_to_jiffies(dst_metric(dst, metric)); 131 } 132 133 static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric, 134 unsigned long rtt) 135 { 136 dst->metrics[metric-1] = jiffies_to_msecs(rtt); 137 } 138 139 static inline u32 140 dst_allfrag(const struct dst_entry *dst) 141 { 142 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG); 143 /* Yes, _exactly_. This is paranoia. */ 144 barrier(); 145 return ret; 146 } 147 148 static inline int 149 dst_metric_locked(struct dst_entry *dst, int metric) 150 { 151 return dst_metric(dst, RTAX_LOCK) & (1<<metric); 152 } 153 154 static inline void dst_hold(struct dst_entry * dst) 155 { 156 /* 157 * If your kernel compilation stops here, please check 158 * __pad_to_align_refcnt declaration in struct dst_entry 159 */ 160 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); 161 atomic_inc(&dst->__refcnt); 162 } 163 164 static inline void dst_use(struct dst_entry *dst, unsigned long time) 165 { 166 dst_hold(dst); 167 dst->__use++; 168 dst->lastuse = time; 169 } 170 171 static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) 172 { 173 dst->__use++; 174 dst->lastuse = time; 175 } 176 177 static inline 178 struct dst_entry * dst_clone(struct dst_entry * dst) 179 { 180 if (dst) 181 atomic_inc(&dst->__refcnt); 182 return dst; 183 } 184 185 extern void dst_release(struct dst_entry *dst); 186 187 static inline void refdst_drop(unsigned long refdst) 188 { 189 if (!(refdst & SKB_DST_NOREF)) 190 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK)); 191 } 192 193 /** 194 * skb_dst_drop - drops skb dst 195 * @skb: buffer 196 * 197 * Drops dst reference count if a reference was taken. 198 */ 199 static inline void skb_dst_drop(struct sk_buff *skb) 200 { 201 if (skb->_skb_refdst) { 202 refdst_drop(skb->_skb_refdst); 203 skb->_skb_refdst = 0UL; 204 } 205 } 206 207 static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) 208 { 209 nskb->_skb_refdst = oskb->_skb_refdst; 210 if (!(nskb->_skb_refdst & SKB_DST_NOREF)) 211 dst_clone(skb_dst(nskb)); 212 } 213 214 /** 215 * skb_dst_force - makes sure skb dst is refcounted 216 * @skb: buffer 217 * 218 * If dst is not yet refcounted, let's do it 219 */ 220 static inline void skb_dst_force(struct sk_buff *skb) 221 { 222 if (skb_dst_is_noref(skb)) { 223 WARN_ON(!rcu_read_lock_held()); 224 skb->_skb_refdst &= ~SKB_DST_NOREF; 225 dst_clone(skb_dst(skb)); 226 } 227 } 228 229 230 /** 231 * skb_tunnel_rx - prepare skb for rx reinsert 232 * @skb: buffer 233 * @dev: tunnel device 234 * 235 * After decapsulation, packet is going to re-enter (netif_rx()) our stack, 236 * so make some cleanups, and perform accounting. 237 */ 238 static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) 239 { 240 skb->dev = dev; 241 /* TODO : stats should be SMP safe */ 242 dev->stats.rx_packets++; 243 dev->stats.rx_bytes += skb->len; 244 skb->rxhash = 0; 245 skb_dst_drop(skb); 246 nf_reset(skb); 247 } 248 249 /* Children define the path of the packet through the 250 * Linux networking. Thus, destinations are stackable. 251 */ 252 253 static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) 254 { 255 struct dst_entry *child = skb_dst(skb)->child; 256 257 skb_dst_drop(skb); 258 return child; 259 } 260 261 extern int dst_discard(struct sk_buff *skb); 262 extern void * dst_alloc(struct dst_ops * ops); 263 extern void __dst_free(struct dst_entry * dst); 264 extern struct dst_entry *dst_destroy(struct dst_entry * dst); 265 266 static inline void dst_free(struct dst_entry * dst) 267 { 268 if (dst->obsolete > 1) 269 return; 270 if (!atomic_read(&dst->__refcnt)) { 271 dst = dst_destroy(dst); 272 if (!dst) 273 return; 274 } 275 __dst_free(dst); 276 } 277 278 static inline void dst_rcu_free(struct rcu_head *head) 279 { 280 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); 281 dst_free(dst); 282 } 283 284 static inline void dst_confirm(struct dst_entry *dst) 285 { 286 if (dst) 287 neigh_confirm(dst->neighbour); 288 } 289 290 static inline void dst_link_failure(struct sk_buff *skb) 291 { 292 struct dst_entry *dst = skb_dst(skb); 293 if (dst && dst->ops && dst->ops->link_failure) 294 dst->ops->link_failure(skb); 295 } 296 297 static inline void dst_set_expires(struct dst_entry *dst, int timeout) 298 { 299 unsigned long expires = jiffies + timeout; 300 301 if (expires == 0) 302 expires = 1; 303 304 if (dst->expires == 0 || time_before(expires, dst->expires)) 305 dst->expires = expires; 306 } 307 308 /* Output packet to network from transport. */ 309 static inline int dst_output(struct sk_buff *skb) 310 { 311 return skb_dst(skb)->output(skb); 312 } 313 314 /* Input packet from network to transport. */ 315 static inline int dst_input(struct sk_buff *skb) 316 { 317 return skb_dst(skb)->input(skb); 318 } 319 320 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) 321 { 322 if (dst->obsolete) 323 dst = dst->ops->check(dst, cookie); 324 return dst; 325 } 326 327 extern void dst_init(void); 328 329 /* Flags for xfrm_lookup flags argument. */ 330 enum { 331 XFRM_LOOKUP_WAIT = 1 << 0, 332 XFRM_LOOKUP_ICMP = 1 << 1, 333 }; 334 335 struct flowi; 336 #ifndef CONFIG_XFRM 337 static inline int xfrm_lookup(struct net *net, struct dst_entry **dst_p, 338 struct flowi *fl, struct sock *sk, int flags) 339 { 340 return 0; 341 } 342 static inline int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, 343 struct flowi *fl, struct sock *sk, int flags) 344 { 345 return 0; 346 } 347 #else 348 extern int xfrm_lookup(struct net *net, struct dst_entry **dst_p, 349 struct flowi *fl, struct sock *sk, int flags); 350 extern int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, 351 struct flowi *fl, struct sock *sk, int flags); 352 #endif 353 #endif 354 355 #endif /* _NET_DST_H */ 356