1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Authors: Lotsa people, from code originally in tcp 8 */ 9 10 #ifndef _INET_HASHTABLES_H 11 #define _INET_HASHTABLES_H 12 13 14 #include <linux/interrupt.h> 15 #include <linux/ip.h> 16 #include <linux/ipv6.h> 17 #include <linux/list.h> 18 #include <linux/slab.h> 19 #include <linux/socket.h> 20 #include <linux/spinlock.h> 21 #include <linux/types.h> 22 #include <linux/wait.h> 23 24 #include <net/inet_connection_sock.h> 25 #include <net/inet_sock.h> 26 #include <net/sock.h> 27 #include <net/route.h> 28 #include <net/tcp_states.h> 29 #include <net/netns/hash.h> 30 31 #include <linux/refcount.h> 32 #include <asm/byteorder.h> 33 34 /* This is for all connections with a full identity, no wildcards. 35 * The 'e' prefix stands for Establish, but we really put all sockets 36 * but LISTEN ones. 37 */ 38 struct inet_ehash_bucket { 39 struct hlist_nulls_head chain; 40 }; 41 42 /* There are a few simple rules, which allow for local port reuse by 43 * an application. In essence: 44 * 45 * 1) Sockets bound to different interfaces may share a local port. 46 * Failing that, goto test 2. 47 * 2) If all sockets have sk->sk_reuse set, and none of them are in 48 * TCP_LISTEN state, the port may be shared. 49 * Failing that, goto test 3. 50 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local 51 * address, and none of them are the same, the port may be 52 * shared. 53 * Failing this, the port cannot be shared. 54 * 55 * The interesting point, is test #2. This is what an FTP server does 56 * all day. To optimize this case we use a specific flag bit defined 57 * below. As we add sockets to a bind bucket list, we perform a 58 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN)) 59 * As long as all sockets added to a bind bucket pass this test, 60 * the flag bit will be set. 61 * The resulting situation is that tcp_v[46]_verify_bind() can just check 62 * for this flag bit, if it is set and the socket trying to bind has 63 * sk->sk_reuse set, we don't even have to walk the owners list at all, 64 * we return that it is ok to bind this socket to the requested local port. 65 * 66 * Sounds like a lot of work, but it is worth it. In a more naive 67 * implementation (ie. current FreeBSD etc.) the entire list of ports 68 * must be walked for each data port opened by an ftp server. Needless 69 * to say, this does not scale at all. With a couple thousand FTP 70 * users logged onto your box, isn't it nice to know that new data 71 * ports are created in O(1) time? I thought so. ;-) -DaveM 72 */ 73 #define FASTREUSEPORT_ANY 1 74 #define FASTREUSEPORT_STRICT 2 75 76 struct inet_bind_bucket { 77 possible_net_t ib_net; 78 int l3mdev; 79 unsigned short port; 80 signed char fastreuse; 81 signed char fastreuseport; 82 kuid_t fastuid; 83 #if IS_ENABLED(CONFIG_IPV6) 84 struct in6_addr fast_v6_rcv_saddr; 85 #endif 86 __be32 fast_rcv_saddr; 87 unsigned short fast_sk_family; 88 bool fast_ipv6_only; 89 struct hlist_node node; 90 struct hlist_head owners; 91 }; 92 93 struct inet_bind2_bucket { 94 possible_net_t ib_net; 95 int l3mdev; 96 unsigned short port; 97 union { 98 #if IS_ENABLED(CONFIG_IPV6) 99 struct in6_addr v6_rcv_saddr; 100 #endif 101 __be32 rcv_saddr; 102 }; 103 /* Node in the inet2_bind_hashbucket chain */ 104 struct hlist_node node; 105 /* List of sockets hashed to this bucket */ 106 struct hlist_head owners; 107 }; 108 109 static inline struct net *ib_net(struct inet_bind_bucket *ib) 110 { 111 return read_pnet(&ib->ib_net); 112 } 113 114 static inline struct net *ib2_net(struct inet_bind2_bucket *ib) 115 { 116 return read_pnet(&ib->ib_net); 117 } 118 119 #define inet_bind_bucket_for_each(tb, head) \ 120 hlist_for_each_entry(tb, head, node) 121 122 struct inet_bind_hashbucket { 123 spinlock_t lock; 124 struct hlist_head chain; 125 }; 126 127 /* This is synchronized using the inet_bind_hashbucket's spinlock. 128 * Instead of having separate spinlocks, the inet_bind2_hashbucket can share 129 * the inet_bind_hashbucket's given that in every case where the bhash2 table 130 * is useful, a lookup in the bhash table also occurs. 131 */ 132 struct inet_bind2_hashbucket { 133 struct hlist_head chain; 134 }; 135 136 /* Sockets can be hashed in established or listening table. 137 * We must use different 'nulls' end-of-chain value for all hash buckets : 138 * A socket might transition from ESTABLISH to LISTEN state without 139 * RCU grace period. A lookup in ehash table needs to handle this case. 140 */ 141 #define LISTENING_NULLS_BASE (1U << 29) 142 struct inet_listen_hashbucket { 143 spinlock_t lock; 144 struct hlist_nulls_head nulls_head; 145 }; 146 147 /* This is for listening sockets, thus all sockets which possess wildcards. */ 148 #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */ 149 150 struct inet_hashinfo { 151 /* This is for sockets with full identity only. Sockets here will 152 * always be without wildcards and will have the following invariant: 153 * 154 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE 155 * 156 */ 157 struct inet_ehash_bucket *ehash; 158 spinlock_t *ehash_locks; 159 unsigned int ehash_mask; 160 unsigned int ehash_locks_mask; 161 162 /* Ok, let's try this, I give up, we do need a local binding 163 * TCP hash as well as the others for fast bind/connect. 164 */ 165 struct kmem_cache *bind_bucket_cachep; 166 struct inet_bind_hashbucket *bhash; 167 /* The 2nd binding table hashed by port and address. 168 * This is used primarily for expediting the resolution of bind 169 * conflicts. 170 */ 171 struct kmem_cache *bind2_bucket_cachep; 172 struct inet_bind2_hashbucket *bhash2; 173 unsigned int bhash_size; 174 175 /* The 2nd listener table hashed by local port and address */ 176 unsigned int lhash2_mask; 177 struct inet_listen_hashbucket *lhash2; 178 }; 179 180 static inline struct inet_listen_hashbucket * 181 inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash) 182 { 183 return &h->lhash2[hash & h->lhash2_mask]; 184 } 185 186 static inline struct inet_ehash_bucket *inet_ehash_bucket( 187 struct inet_hashinfo *hashinfo, 188 unsigned int hash) 189 { 190 return &hashinfo->ehash[hash & hashinfo->ehash_mask]; 191 } 192 193 static inline spinlock_t *inet_ehash_lockp( 194 struct inet_hashinfo *hashinfo, 195 unsigned int hash) 196 { 197 return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask]; 198 } 199 200 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo); 201 202 static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h) 203 { 204 kfree(h->lhash2); 205 h->lhash2 = NULL; 206 } 207 208 static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) 209 { 210 kvfree(hashinfo->ehash_locks); 211 hashinfo->ehash_locks = NULL; 212 } 213 214 static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if, 215 int dif, int sdif) 216 { 217 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 218 return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept, 219 bound_dev_if, dif, sdif); 220 #else 221 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); 222 #endif 223 } 224 225 struct inet_bind_bucket * 226 inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, 227 struct inet_bind_hashbucket *head, 228 const unsigned short snum, int l3mdev); 229 void inet_bind_bucket_destroy(struct kmem_cache *cachep, 230 struct inet_bind_bucket *tb); 231 232 static inline bool check_bind_bucket_match(struct inet_bind_bucket *tb, 233 struct net *net, 234 const unsigned short port, 235 int l3mdev) 236 { 237 return net_eq(ib_net(tb), net) && tb->port == port && 238 tb->l3mdev == l3mdev; 239 } 240 241 struct inet_bind2_bucket * 242 inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net, 243 struct inet_bind2_hashbucket *head, 244 const unsigned short port, int l3mdev, 245 const struct sock *sk); 246 247 void inet_bind2_bucket_destroy(struct kmem_cache *cachep, 248 struct inet_bind2_bucket *tb); 249 250 struct inet_bind2_bucket * 251 inet_bind2_bucket_find(struct inet_hashinfo *hinfo, struct net *net, 252 const unsigned short port, int l3mdev, 253 struct sock *sk, 254 struct inet_bind2_hashbucket **head); 255 256 bool check_bind2_bucket_match_nulladdr(struct inet_bind2_bucket *tb, 257 struct net *net, 258 const unsigned short port, 259 int l3mdev, 260 const struct sock *sk); 261 262 static inline u32 inet_bhashfn(const struct net *net, const __u16 lport, 263 const u32 bhash_size) 264 { 265 return (lport + net_hash_mix(net)) & (bhash_size - 1); 266 } 267 268 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, 269 struct inet_bind2_bucket *tb2, const unsigned short snum); 270 271 /* Caller must disable local BH processing. */ 272 int __inet_inherit_port(const struct sock *sk, struct sock *child); 273 274 void inet_put_port(struct sock *sk); 275 276 void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, 277 unsigned long numentries, int scale, 278 unsigned long low_limit, 279 unsigned long high_limit); 280 int inet_hashinfo2_init_mod(struct inet_hashinfo *h); 281 282 bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk); 283 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, 284 bool *found_dup_sk); 285 int __inet_hash(struct sock *sk, struct sock *osk); 286 int inet_hash(struct sock *sk); 287 void inet_unhash(struct sock *sk); 288 289 struct sock *__inet_lookup_listener(struct net *net, 290 struct inet_hashinfo *hashinfo, 291 struct sk_buff *skb, int doff, 292 const __be32 saddr, const __be16 sport, 293 const __be32 daddr, 294 const unsigned short hnum, 295 const int dif, const int sdif); 296 297 static inline struct sock *inet_lookup_listener(struct net *net, 298 struct inet_hashinfo *hashinfo, 299 struct sk_buff *skb, int doff, 300 __be32 saddr, __be16 sport, 301 __be32 daddr, __be16 dport, int dif, int sdif) 302 { 303 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport, 304 daddr, ntohs(dport), dif, sdif); 305 } 306 307 /* Socket demux engine toys. */ 308 /* What happens here is ugly; there's a pair of adjacent fields in 309 struct inet_sock; __be16 dport followed by __u16 num. We want to 310 search by pair, so we combine the keys into a single 32bit value 311 and compare with 32bit value read from &...->dport. Let's at least 312 make sure that it's not mixed with anything else... 313 On 64bit targets we combine comparisons with pair of adjacent __be32 314 fields in the same way. 315 */ 316 #ifdef __BIG_ENDIAN 317 #define INET_COMBINED_PORTS(__sport, __dport) \ 318 ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport))) 319 #else /* __LITTLE_ENDIAN */ 320 #define INET_COMBINED_PORTS(__sport, __dport) \ 321 ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport))) 322 #endif 323 324 #ifdef __BIG_ENDIAN 325 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ 326 const __addrpair __name = (__force __addrpair) ( \ 327 (((__force __u64)(__be32)(__saddr)) << 32) | \ 328 ((__force __u64)(__be32)(__daddr))) 329 #else /* __LITTLE_ENDIAN */ 330 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ 331 const __addrpair __name = (__force __addrpair) ( \ 332 (((__force __u64)(__be32)(__daddr)) << 32) | \ 333 ((__force __u64)(__be32)(__saddr))) 334 #endif /* __BIG_ENDIAN */ 335 336 static inline bool inet_match(struct net *net, const struct sock *sk, 337 const __addrpair cookie, const __portpair ports, 338 int dif, int sdif) 339 { 340 int bound_dev_if; 341 342 if (!net_eq(sock_net(sk), net) || 343 sk->sk_portpair != ports || 344 sk->sk_addrpair != cookie) 345 return false; 346 347 /* Paired with WRITE_ONCE() from sock_bindtoindex_locked() */ 348 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 349 return bound_dev_if == dif || bound_dev_if == sdif; 350 } 351 352 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need 353 * not check it for lookups anymore, thanks Alexey. -DaveM 354 */ 355 struct sock *__inet_lookup_established(struct net *net, 356 struct inet_hashinfo *hashinfo, 357 const __be32 saddr, const __be16 sport, 358 const __be32 daddr, const u16 hnum, 359 const int dif, const int sdif); 360 361 static inline struct sock * 362 inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, 363 const __be32 saddr, const __be16 sport, 364 const __be32 daddr, const __be16 dport, 365 const int dif) 366 { 367 return __inet_lookup_established(net, hashinfo, saddr, sport, daddr, 368 ntohs(dport), dif, 0); 369 } 370 371 static inline struct sock *__inet_lookup(struct net *net, 372 struct inet_hashinfo *hashinfo, 373 struct sk_buff *skb, int doff, 374 const __be32 saddr, const __be16 sport, 375 const __be32 daddr, const __be16 dport, 376 const int dif, const int sdif, 377 bool *refcounted) 378 { 379 u16 hnum = ntohs(dport); 380 struct sock *sk; 381 382 sk = __inet_lookup_established(net, hashinfo, saddr, sport, 383 daddr, hnum, dif, sdif); 384 *refcounted = true; 385 if (sk) 386 return sk; 387 *refcounted = false; 388 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, 389 sport, daddr, hnum, dif, sdif); 390 } 391 392 static inline struct sock *inet_lookup(struct net *net, 393 struct inet_hashinfo *hashinfo, 394 struct sk_buff *skb, int doff, 395 const __be32 saddr, const __be16 sport, 396 const __be32 daddr, const __be16 dport, 397 const int dif) 398 { 399 struct sock *sk; 400 bool refcounted; 401 402 sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, 403 dport, dif, 0, &refcounted); 404 405 if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt)) 406 sk = NULL; 407 return sk; 408 } 409 410 static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, 411 struct sk_buff *skb, 412 int doff, 413 const __be16 sport, 414 const __be16 dport, 415 const int sdif, 416 bool *refcounted) 417 { 418 struct sock *sk = skb_steal_sock(skb, refcounted); 419 const struct iphdr *iph = ip_hdr(skb); 420 421 if (sk) 422 return sk; 423 424 return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb, 425 doff, iph->saddr, sport, 426 iph->daddr, dport, inet_iif(skb), sdif, 427 refcounted); 428 } 429 430 u32 inet6_ehashfn(const struct net *net, 431 const struct in6_addr *laddr, const u16 lport, 432 const struct in6_addr *faddr, const __be16 fport); 433 434 static inline void sk_daddr_set(struct sock *sk, __be32 addr) 435 { 436 sk->sk_daddr = addr; /* alias of inet_daddr */ 437 #if IS_ENABLED(CONFIG_IPV6) 438 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr); 439 #endif 440 } 441 442 static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr) 443 { 444 sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */ 445 #if IS_ENABLED(CONFIG_IPV6) 446 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr); 447 #endif 448 } 449 450 int __inet_hash_connect(struct inet_timewait_death_row *death_row, 451 struct sock *sk, u64 port_offset, 452 int (*check_established)(struct inet_timewait_death_row *, 453 struct sock *, __u16, 454 struct inet_timewait_sock **)); 455 456 int inet_hash_connect(struct inet_timewait_death_row *death_row, 457 struct sock *sk); 458 #endif /* _INET_HASHTABLES_H */ 459