1 /* 2 * INETPEER - A storage for permanent information about peers 3 * 4 * Authors: Andrey V. Savochkin <saw@msu.ru> 5 */ 6 7 #ifndef _NET_INETPEER_H 8 #define _NET_INETPEER_H 9 10 #include <linux/types.h> 11 #include <linux/init.h> 12 #include <linux/jiffies.h> 13 #include <linux/spinlock.h> 14 #include <linux/rtnetlink.h> 15 #include <net/ipv6.h> 16 #include <linux/atomic.h> 17 18 struct inetpeer_addr_base { 19 union { 20 __be32 a4; 21 __be32 a6[4]; 22 }; 23 }; 24 25 struct inetpeer_addr { 26 struct inetpeer_addr_base addr; 27 __u16 family; 28 }; 29 30 struct inet_peer { 31 /* group together avl_left,avl_right,v4daddr to speedup lookups */ 32 struct inet_peer __rcu *avl_left, *avl_right; 33 struct inetpeer_addr daddr; 34 __u32 avl_height; 35 36 u32 metrics[RTAX_MAX]; 37 u32 rate_tokens; /* rate limiting for ICMP */ 38 unsigned long rate_last; 39 unsigned long pmtu_expires; 40 u32 pmtu_orig; 41 u32 pmtu_learned; 42 struct inetpeer_addr_base redirect_learned; 43 union { 44 struct list_head gc_list; 45 struct rcu_head gc_rcu; 46 }; 47 /* 48 * Once inet_peer is queued for deletion (refcnt == -1), following fields 49 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp 50 * We can share memory with rcu_head to help keep inet_peer small. 51 */ 52 union { 53 struct { 54 atomic_t rid; /* Frag reception counter */ 55 atomic_t ip_id_count; /* IP ID for the next packet */ 56 __u32 tcp_ts; 57 __u32 tcp_ts_stamp; 58 }; 59 struct rcu_head rcu; 60 struct inet_peer *gc_next; 61 }; 62 63 /* following fields might be frequently dirtied */ 64 __u32 dtime; /* the time of last use of not referenced entries */ 65 atomic_t refcnt; 66 }; 67 68 void inet_initpeers(void) __init; 69 70 #define INETPEER_METRICS_NEW (~(u32) 0) 71 72 static inline bool inet_metrics_new(const struct inet_peer *p) 73 { 74 return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW; 75 } 76 77 /* can be called with or without local BH being disabled */ 78 struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create); 79 80 static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create) 81 { 82 struct inetpeer_addr daddr; 83 84 daddr.addr.a4 = v4daddr; 85 daddr.family = AF_INET; 86 return inet_getpeer(&daddr, create); 87 } 88 89 static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr, int create) 90 { 91 struct inetpeer_addr daddr; 92 93 *(struct in6_addr *)daddr.addr.a6 = *v6daddr; 94 daddr.family = AF_INET6; 95 return inet_getpeer(&daddr, create); 96 } 97 98 /* can be called from BH context or outside */ 99 extern void inet_putpeer(struct inet_peer *p); 100 extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); 101 102 extern void inetpeer_invalidate_tree(int family); 103 104 /* 105 * temporary check to make sure we dont access rid, ip_id_count, tcp_ts, 106 * tcp_ts_stamp if no refcount is taken on inet_peer 107 */ 108 static inline void inet_peer_refcheck(const struct inet_peer *p) 109 { 110 WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0); 111 } 112 113 114 /* can be called with or without local BH being disabled */ 115 static inline int inet_getid(struct inet_peer *p, int more) 116 { 117 int old, new; 118 more++; 119 inet_peer_refcheck(p); 120 do { 121 old = atomic_read(&p->ip_id_count); 122 new = old + more; 123 if (!new) 124 new = 1; 125 } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old); 126 return new; 127 } 128 129 #endif /* _NET_INETPEER_H */ 130