1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Authors: Lotsa people, from code originally in tcp 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #ifndef _INET_HASHTABLES_H 15 #define _INET_HASHTABLES_H 16 17 18 #include <linux/interrupt.h> 19 #include <linux/ip.h> 20 #include <linux/ipv6.h> 21 #include <linux/list.h> 22 #include <linux/slab.h> 23 #include <linux/socket.h> 24 #include <linux/spinlock.h> 25 #include <linux/types.h> 26 #include <linux/wait.h> 27 28 #include <net/inet_connection_sock.h> 29 #include <net/inet_sock.h> 30 #include <net/sock.h> 31 #include <net/route.h> 32 #include <net/tcp_states.h> 33 #include <net/netns/hash.h> 34 35 #include <linux/refcount.h> 36 #include <asm/byteorder.h> 37 38 /* This is for all connections with a full identity, no wildcards. 39 * The 'e' prefix stands for Establish, but we really put all sockets 40 * but LISTEN ones. 41 */ 42 struct inet_ehash_bucket { 43 struct hlist_nulls_head chain; 44 }; 45 46 /* There are a few simple rules, which allow for local port reuse by 47 * an application. In essence: 48 * 49 * 1) Sockets bound to different interfaces may share a local port. 50 * Failing that, goto test 2. 51 * 2) If all sockets have sk->sk_reuse set, and none of them are in 52 * TCP_LISTEN state, the port may be shared. 53 * Failing that, goto test 3. 54 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local 55 * address, and none of them are the same, the port may be 56 * shared. 57 * Failing this, the port cannot be shared. 58 * 59 * The interesting point, is test #2. This is what an FTP server does 60 * all day. To optimize this case we use a specific flag bit defined 61 * below. As we add sockets to a bind bucket list, we perform a 62 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN)) 63 * As long as all sockets added to a bind bucket pass this test, 64 * the flag bit will be set. 65 * The resulting situation is that tcp_v[46]_verify_bind() can just check 66 * for this flag bit, if it is set and the socket trying to bind has 67 * sk->sk_reuse set, we don't even have to walk the owners list at all, 68 * we return that it is ok to bind this socket to the requested local port. 69 * 70 * Sounds like a lot of work, but it is worth it. In a more naive 71 * implementation (ie. current FreeBSD etc.) the entire list of ports 72 * must be walked for each data port opened by an ftp server. Needless 73 * to say, this does not scale at all. With a couple thousand FTP 74 * users logged onto your box, isn't it nice to know that new data 75 * ports are created in O(1) time? I thought so. ;-) -DaveM 76 */ 77 #define FASTREUSEPORT_ANY 1 78 #define FASTREUSEPORT_STRICT 2 79 80 struct inet_bind_bucket { 81 possible_net_t ib_net; 82 int l3mdev; 83 unsigned short port; 84 signed char fastreuse; 85 signed char fastreuseport; 86 kuid_t fastuid; 87 #if IS_ENABLED(CONFIG_IPV6) 88 struct in6_addr fast_v6_rcv_saddr; 89 #endif 90 __be32 fast_rcv_saddr; 91 unsigned short fast_sk_family; 92 bool fast_ipv6_only; 93 struct hlist_node node; 94 struct hlist_head owners; 95 }; 96 97 static inline struct net *ib_net(struct inet_bind_bucket *ib) 98 { 99 return read_pnet(&ib->ib_net); 100 } 101 102 #define inet_bind_bucket_for_each(tb, head) \ 103 hlist_for_each_entry(tb, head, node) 104 105 struct inet_bind_hashbucket { 106 spinlock_t lock; 107 struct hlist_head chain; 108 }; 109 110 /* 111 * Sockets can be hashed in established or listening table 112 */ 113 struct inet_listen_hashbucket { 114 spinlock_t lock; 115 unsigned int count; 116 struct hlist_head head; 117 }; 118 119 /* This is for listening sockets, thus all sockets which possess wildcards. */ 120 #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */ 121 122 struct inet_hashinfo { 123 /* This is for sockets with full identity only. Sockets here will 124 * always be without wildcards and will have the following invariant: 125 * 126 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE 127 * 128 */ 129 struct inet_ehash_bucket *ehash; 130 spinlock_t *ehash_locks; 131 unsigned int ehash_mask; 132 unsigned int ehash_locks_mask; 133 134 /* Ok, let's try this, I give up, we do need a local binding 135 * TCP hash as well as the others for fast bind/connect. 136 */ 137 struct kmem_cache *bind_bucket_cachep; 138 struct inet_bind_hashbucket *bhash; 139 unsigned int bhash_size; 140 141 /* The 2nd listener table hashed by local port and address */ 142 unsigned int lhash2_mask; 143 struct inet_listen_hashbucket *lhash2; 144 145 /* All the above members are written once at bootup and 146 * never written again _or_ are predominantly read-access. 147 * 148 * Now align to a new cache line as all the following members 149 * might be often dirty. 150 */ 151 /* All sockets in TCP_LISTEN state will be in listening_hash. 152 * This is the only table where wildcard'd TCP sockets can 153 * exist. listening_hash is only hashed by local port number. 154 * If lhash2 is initialized, the same socket will also be hashed 155 * to lhash2 by port and address. 156 */ 157 struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE] 158 ____cacheline_aligned_in_smp; 159 }; 160 161 #define inet_lhash2_for_each_icsk_rcu(__icsk, list) \ 162 hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node) 163 164 static inline struct inet_listen_hashbucket * 165 inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash) 166 { 167 return &h->lhash2[hash & h->lhash2_mask]; 168 } 169 170 static inline struct inet_ehash_bucket *inet_ehash_bucket( 171 struct inet_hashinfo *hashinfo, 172 unsigned int hash) 173 { 174 return &hashinfo->ehash[hash & hashinfo->ehash_mask]; 175 } 176 177 static inline spinlock_t *inet_ehash_lockp( 178 struct inet_hashinfo *hashinfo, 179 unsigned int hash) 180 { 181 return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask]; 182 } 183 184 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo); 185 186 static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) 187 { 188 kvfree(hashinfo->ehash_locks); 189 hashinfo->ehash_locks = NULL; 190 } 191 192 static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if, 193 int dif, int sdif) 194 { 195 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 196 return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept, 197 bound_dev_if, dif, sdif); 198 #else 199 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); 200 #endif 201 } 202 203 struct inet_bind_bucket * 204 inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, 205 struct inet_bind_hashbucket *head, 206 const unsigned short snum, int l3mdev); 207 void inet_bind_bucket_destroy(struct kmem_cache *cachep, 208 struct inet_bind_bucket *tb); 209 210 static inline u32 inet_bhashfn(const struct net *net, const __u16 lport, 211 const u32 bhash_size) 212 { 213 return (lport + net_hash_mix(net)) & (bhash_size - 1); 214 } 215 216 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, 217 const unsigned short snum); 218 219 /* These can have wildcards, don't try too hard. */ 220 static inline u32 inet_lhashfn(const struct net *net, const unsigned short num) 221 { 222 return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1); 223 } 224 225 static inline int inet_sk_listen_hashfn(const struct sock *sk) 226 { 227 return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num); 228 } 229 230 /* Caller must disable local BH processing. */ 231 int __inet_inherit_port(const struct sock *sk, struct sock *child); 232 233 void inet_put_port(struct sock *sk); 234 235 void inet_hashinfo_init(struct inet_hashinfo *h); 236 void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, 237 unsigned long numentries, int scale, 238 unsigned long low_limit, 239 unsigned long high_limit); 240 241 bool inet_ehash_insert(struct sock *sk, struct sock *osk); 242 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk); 243 int __inet_hash(struct sock *sk, struct sock *osk); 244 int inet_hash(struct sock *sk); 245 void inet_unhash(struct sock *sk); 246 247 struct sock *__inet_lookup_listener(struct net *net, 248 struct inet_hashinfo *hashinfo, 249 struct sk_buff *skb, int doff, 250 const __be32 saddr, const __be16 sport, 251 const __be32 daddr, 252 const unsigned short hnum, 253 const int dif, const int sdif); 254 255 static inline struct sock *inet_lookup_listener(struct net *net, 256 struct inet_hashinfo *hashinfo, 257 struct sk_buff *skb, int doff, 258 __be32 saddr, __be16 sport, 259 __be32 daddr, __be16 dport, int dif, int sdif) 260 { 261 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport, 262 daddr, ntohs(dport), dif, sdif); 263 } 264 265 /* Socket demux engine toys. */ 266 /* What happens here is ugly; there's a pair of adjacent fields in 267 struct inet_sock; __be16 dport followed by __u16 num. We want to 268 search by pair, so we combine the keys into a single 32bit value 269 and compare with 32bit value read from &...->dport. Let's at least 270 make sure that it's not mixed with anything else... 271 On 64bit targets we combine comparisons with pair of adjacent __be32 272 fields in the same way. 273 */ 274 #ifdef __BIG_ENDIAN 275 #define INET_COMBINED_PORTS(__sport, __dport) \ 276 ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport))) 277 #else /* __LITTLE_ENDIAN */ 278 #define INET_COMBINED_PORTS(__sport, __dport) \ 279 ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport))) 280 #endif 281 282 #if (BITS_PER_LONG == 64) 283 #ifdef __BIG_ENDIAN 284 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ 285 const __addrpair __name = (__force __addrpair) ( \ 286 (((__force __u64)(__be32)(__saddr)) << 32) | \ 287 ((__force __u64)(__be32)(__daddr))) 288 #else /* __LITTLE_ENDIAN */ 289 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ 290 const __addrpair __name = (__force __addrpair) ( \ 291 (((__force __u64)(__be32)(__daddr)) << 32) | \ 292 ((__force __u64)(__be32)(__saddr))) 293 #endif /* __BIG_ENDIAN */ 294 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \ 295 (((__sk)->sk_portpair == (__ports)) && \ 296 ((__sk)->sk_addrpair == (__cookie)) && \ 297 (((__sk)->sk_bound_dev_if == (__dif)) || \ 298 ((__sk)->sk_bound_dev_if == (__sdif))) && \ 299 net_eq(sock_net(__sk), (__net))) 300 #else /* 32-bit arch */ 301 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ 302 const int __name __deprecated __attribute__((unused)) 303 304 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \ 305 (((__sk)->sk_portpair == (__ports)) && \ 306 ((__sk)->sk_daddr == (__saddr)) && \ 307 ((__sk)->sk_rcv_saddr == (__daddr)) && \ 308 (((__sk)->sk_bound_dev_if == (__dif)) || \ 309 ((__sk)->sk_bound_dev_if == (__sdif))) && \ 310 net_eq(sock_net(__sk), (__net))) 311 #endif /* 64-bit arch */ 312 313 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need 314 * not check it for lookups anymore, thanks Alexey. -DaveM 315 */ 316 struct sock *__inet_lookup_established(struct net *net, 317 struct inet_hashinfo *hashinfo, 318 const __be32 saddr, const __be16 sport, 319 const __be32 daddr, const u16 hnum, 320 const int dif, const int sdif); 321 322 static inline struct sock * 323 inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, 324 const __be32 saddr, const __be16 sport, 325 const __be32 daddr, const __be16 dport, 326 const int dif) 327 { 328 return __inet_lookup_established(net, hashinfo, saddr, sport, daddr, 329 ntohs(dport), dif, 0); 330 } 331 332 static inline struct sock *__inet_lookup(struct net *net, 333 struct inet_hashinfo *hashinfo, 334 struct sk_buff *skb, int doff, 335 const __be32 saddr, const __be16 sport, 336 const __be32 daddr, const __be16 dport, 337 const int dif, const int sdif, 338 bool *refcounted) 339 { 340 u16 hnum = ntohs(dport); 341 struct sock *sk; 342 343 sk = __inet_lookup_established(net, hashinfo, saddr, sport, 344 daddr, hnum, dif, sdif); 345 *refcounted = true; 346 if (sk) 347 return sk; 348 *refcounted = false; 349 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, 350 sport, daddr, hnum, dif, sdif); 351 } 352 353 static inline struct sock *inet_lookup(struct net *net, 354 struct inet_hashinfo *hashinfo, 355 struct sk_buff *skb, int doff, 356 const __be32 saddr, const __be16 sport, 357 const __be32 daddr, const __be16 dport, 358 const int dif) 359 { 360 struct sock *sk; 361 bool refcounted; 362 363 sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, 364 dport, dif, 0, &refcounted); 365 366 if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt)) 367 sk = NULL; 368 return sk; 369 } 370 371 static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, 372 struct sk_buff *skb, 373 int doff, 374 const __be16 sport, 375 const __be16 dport, 376 const int sdif, 377 bool *refcounted) 378 { 379 struct sock *sk = skb_steal_sock(skb); 380 const struct iphdr *iph = ip_hdr(skb); 381 382 *refcounted = true; 383 if (sk) 384 return sk; 385 386 return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb, 387 doff, iph->saddr, sport, 388 iph->daddr, dport, inet_iif(skb), sdif, 389 refcounted); 390 } 391 392 u32 inet6_ehashfn(const struct net *net, 393 const struct in6_addr *laddr, const u16 lport, 394 const struct in6_addr *faddr, const __be16 fport); 395 396 static inline void sk_daddr_set(struct sock *sk, __be32 addr) 397 { 398 sk->sk_daddr = addr; /* alias of inet_daddr */ 399 #if IS_ENABLED(CONFIG_IPV6) 400 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr); 401 #endif 402 } 403 404 static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr) 405 { 406 sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */ 407 #if IS_ENABLED(CONFIG_IPV6) 408 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr); 409 #endif 410 } 411 412 int __inet_hash_connect(struct inet_timewait_death_row *death_row, 413 struct sock *sk, u32 port_offset, 414 int (*check_established)(struct inet_timewait_death_row *, 415 struct sock *, __u16, 416 struct inet_timewait_sock **)); 417 418 int inet_hash_connect(struct inet_timewait_death_row *death_row, 419 struct sock *sk); 420 #endif /* _INET_HASHTABLES_H */ 421