xref: /linux/include/net/inet_hashtables.h (revision bdfa82f5b8998a6311a8ef0cf89ad413f5cd9ea4)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  * Authors:	Lotsa people, from code originally in tcp
8  */
9 
10 #ifndef _INET_HASHTABLES_H
11 #define _INET_HASHTABLES_H
12 
13 
14 #include <linux/interrupt.h>
15 #include <linux/ip.h>
16 #include <linux/ipv6.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 #include <linux/socket.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/wait.h>
23 
24 #include <net/inet_connection_sock.h>
25 #include <net/inet_sock.h>
26 #include <net/ip.h>
27 #include <net/sock.h>
28 #include <net/route.h>
29 #include <net/tcp_states.h>
30 #include <net/netns/hash.h>
31 
32 #include <linux/refcount.h>
33 #include <asm/byteorder.h>
34 
35 /* This is for all connections with a full identity, no wildcards.
36  * The 'e' prefix stands for Establish, but we really put all sockets
37  * but LISTEN ones.
38  */
39 struct inet_ehash_bucket {
40 	struct hlist_nulls_head chain;
41 };
42 
43 /* There are a few simple rules, which allow for local port reuse by
44  * an application.  In essence:
45  *
46  *	1) Sockets bound to different interfaces may share a local port.
47  *	   Failing that, goto test 2.
48  *	2) If all sockets have sk->sk_reuse set, and none of them are in
49  *	   TCP_LISTEN state, the port may be shared.
50  *	   Failing that, goto test 3.
51  *	3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
52  *	   address, and none of them are the same, the port may be
53  *	   shared.
54  *	   Failing this, the port cannot be shared.
55  *
56  * The interesting point, is test #2.  This is what an FTP server does
57  * all day.  To optimize this case we use a specific flag bit defined
58  * below.  As we add sockets to a bind bucket list, we perform a
59  * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
60  * As long as all sockets added to a bind bucket pass this test,
61  * the flag bit will be set.
62  * The resulting situation is that tcp_v[46]_verify_bind() can just check
63  * for this flag bit, if it is set and the socket trying to bind has
64  * sk->sk_reuse set, we don't even have to walk the owners list at all,
65  * we return that it is ok to bind this socket to the requested local port.
66  *
67  * Sounds like a lot of work, but it is worth it.  In a more naive
68  * implementation (ie. current FreeBSD etc.) the entire list of ports
69  * must be walked for each data port opened by an ftp server.  Needless
70  * to say, this does not scale at all.  With a couple thousand FTP
71  * users logged onto your box, isn't it nice to know that new data
72  * ports are created in O(1) time?  I thought so. ;-)	-DaveM
73  */
74 #define FASTREUSEPORT_ANY	1
75 #define FASTREUSEPORT_STRICT	2
76 
77 struct inet_bind_bucket {
78 	possible_net_t		ib_net;
79 	int			l3mdev;
80 	unsigned short		port;
81 	signed char		fastreuse;
82 	signed char		fastreuseport;
83 	kuid_t			fastuid;
84 #if IS_ENABLED(CONFIG_IPV6)
85 	struct in6_addr		fast_v6_rcv_saddr;
86 #endif
87 	__be32			fast_rcv_saddr;
88 	unsigned short		fast_sk_family;
89 	bool			fast_ipv6_only;
90 	struct hlist_node	node;
91 	struct hlist_head	bhash2;
92 	struct rcu_head		rcu;
93 };
94 
95 struct inet_bind2_bucket {
96 	possible_net_t		ib_net;
97 	int			l3mdev;
98 	unsigned short		port;
99 #if IS_ENABLED(CONFIG_IPV6)
100 	unsigned short		addr_type;
101 	struct in6_addr		v6_rcv_saddr;
102 #define rcv_saddr		v6_rcv_saddr.s6_addr32[3]
103 #else
104 	__be32			rcv_saddr;
105 #endif
106 	/* Node in the bhash2 inet_bind_hashbucket chain */
107 	struct hlist_node	node;
108 	struct hlist_node	bhash_node;
109 	/* List of sockets hashed to this bucket */
110 	struct hlist_head	owners;
111 };
112 
113 static inline struct net *ib_net(const struct inet_bind_bucket *ib)
114 {
115 	return read_pnet(&ib->ib_net);
116 }
117 
118 static inline struct net *ib2_net(const struct inet_bind2_bucket *ib)
119 {
120 	return read_pnet(&ib->ib_net);
121 }
122 
123 #define inet_bind_bucket_for_each(tb, head) \
124 	hlist_for_each_entry(tb, head, node)
125 
126 struct inet_bind_hashbucket {
127 	spinlock_t		lock;
128 	struct hlist_head	chain;
129 };
130 
131 /* Sockets can be hashed in established or listening table.
132  * We must use different 'nulls' end-of-chain value for all hash buckets :
133  * A socket might transition from ESTABLISH to LISTEN state without
134  * RCU grace period. A lookup in ehash table needs to handle this case.
135  */
136 #define LISTENING_NULLS_BASE (1U << 29)
137 struct inet_listen_hashbucket {
138 	spinlock_t		lock;
139 	struct hlist_nulls_head	nulls_head;
140 };
141 
142 /* This is for listening sockets, thus all sockets which possess wildcards. */
143 #define INET_LHTABLE_SIZE	32	/* Yes, really, this is all you need. */
144 
145 struct inet_hashinfo {
146 	/* This is for sockets with full identity only.  Sockets here will
147 	 * always be without wildcards and will have the following invariant:
148 	 *
149 	 *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
150 	 *
151 	 */
152 	struct inet_ehash_bucket	*ehash;
153 	spinlock_t			*ehash_locks;
154 	unsigned int			ehash_mask;
155 	unsigned int			ehash_locks_mask;
156 
157 	/* Ok, let's try this, I give up, we do need a local binding
158 	 * TCP hash as well as the others for fast bind/connect.
159 	 */
160 	struct kmem_cache		*bind_bucket_cachep;
161 	/* This bind table is hashed by local port */
162 	struct inet_bind_hashbucket	*bhash;
163 	struct kmem_cache		*bind2_bucket_cachep;
164 	/* This bind table is hashed by local port and sk->sk_rcv_saddr (ipv4)
165 	 * or sk->sk_v6_rcv_saddr (ipv6). This 2nd bind table is used
166 	 * primarily for expediting bind conflict resolution.
167 	 */
168 	struct inet_bind_hashbucket	*bhash2;
169 	unsigned int			bhash_size;
170 
171 	/* The 2nd listener table hashed by local port and address */
172 	unsigned int			lhash2_mask;
173 	struct inet_listen_hashbucket	*lhash2;
174 
175 	bool				pernet;
176 } ____cacheline_aligned_in_smp;
177 
178 static inline struct inet_hashinfo *tcp_get_hashinfo(const struct sock *sk)
179 {
180 	return sock_net(sk)->ipv4.tcp_death_row.hashinfo;
181 }
182 
183 static inline struct inet_listen_hashbucket *
184 inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
185 {
186 	return &h->lhash2[hash & h->lhash2_mask];
187 }
188 
189 static inline struct inet_ehash_bucket *inet_ehash_bucket(
190 	struct inet_hashinfo *hashinfo,
191 	unsigned int hash)
192 {
193 	return &hashinfo->ehash[hash & hashinfo->ehash_mask];
194 }
195 
196 static inline spinlock_t *inet_ehash_lockp(
197 	struct inet_hashinfo *hashinfo,
198 	unsigned int hash)
199 {
200 	return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
201 }
202 
203 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
204 
205 static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h)
206 {
207 	kfree(h->lhash2);
208 	h->lhash2 = NULL;
209 }
210 
211 static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
212 {
213 	kvfree(hashinfo->ehash_locks);
214 	hashinfo->ehash_locks = NULL;
215 }
216 
217 struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo,
218 						 unsigned int ehash_entries);
219 void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo);
220 
221 struct inet_bind_bucket *
222 inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
223 			struct inet_bind_hashbucket *head,
224 			const unsigned short snum, int l3mdev);
225 void inet_bind_bucket_destroy(struct inet_bind_bucket *tb);
226 
227 bool inet_bind_bucket_match(const struct inet_bind_bucket *tb,
228 			    const struct net *net, unsigned short port,
229 			    int l3mdev);
230 
231 struct inet_bind2_bucket *
232 inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net,
233 			 struct inet_bind_hashbucket *head,
234 			 struct inet_bind_bucket *tb,
235 			 const struct sock *sk);
236 
237 void inet_bind2_bucket_destroy(struct kmem_cache *cachep,
238 			       struct inet_bind2_bucket *tb);
239 
240 struct inet_bind2_bucket *
241 inet_bind2_bucket_find(const struct inet_bind_hashbucket *head,
242 		       const struct net *net,
243 		       unsigned short port, int l3mdev,
244 		       const struct sock *sk);
245 
246 bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb,
247 				      const struct net *net, unsigned short port,
248 				      int l3mdev, const struct sock *sk);
249 
250 static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
251 			       const u32 bhash_size)
252 {
253 	return (lport + net_hash_mix(net)) & (bhash_size - 1);
254 }
255 
256 static inline struct inet_bind_hashbucket *
257 inet_bhashfn_portaddr(const struct inet_hashinfo *hinfo, const struct sock *sk,
258 		      const struct net *net, unsigned short port)
259 {
260 	u32 hash;
261 
262 #if IS_ENABLED(CONFIG_IPV6)
263 	if (sk->sk_family == AF_INET6)
264 		hash = ipv6_portaddr_hash(net, &sk->sk_v6_rcv_saddr, port);
265 	else
266 #endif
267 		hash = ipv4_portaddr_hash(net, sk->sk_rcv_saddr, port);
268 	return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
269 }
270 
271 struct inet_bind_hashbucket *
272 inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port);
273 
274 /* This should be called whenever a socket's sk_rcv_saddr (ipv4) or
275  * sk_v6_rcv_saddr (ipv6) changes after it has been binded. The socket's
276  * rcv_saddr field should already have been updated when this is called.
277  */
278 int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family);
279 void inet_bhash2_reset_saddr(struct sock *sk);
280 
281 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
282 		    struct inet_bind2_bucket *tb2, unsigned short port);
283 
284 /* Caller must disable local BH processing. */
285 int __inet_inherit_port(const struct sock *sk, struct sock *child);
286 
287 void inet_put_port(struct sock *sk);
288 
289 void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
290 			 unsigned long numentries, int scale,
291 			 unsigned long low_limit,
292 			 unsigned long high_limit);
293 int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
294 
295 bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
296 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
297 			 bool *found_dup_sk);
298 int __inet_hash(struct sock *sk, struct sock *osk);
299 int inet_hash(struct sock *sk);
300 void inet_unhash(struct sock *sk);
301 
302 struct sock *__inet_lookup_listener(const struct net *net,
303 				    struct inet_hashinfo *hashinfo,
304 				    struct sk_buff *skb, int doff,
305 				    const __be32 saddr, const __be16 sport,
306 				    const __be32 daddr,
307 				    const unsigned short hnum,
308 				    const int dif, const int sdif);
309 
310 static inline struct sock *inet_lookup_listener(struct net *net,
311 		struct inet_hashinfo *hashinfo,
312 		struct sk_buff *skb, int doff,
313 		__be32 saddr, __be16 sport,
314 		__be32 daddr, __be16 dport, int dif, int sdif)
315 {
316 	return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
317 				      daddr, ntohs(dport), dif, sdif);
318 }
319 
320 /* Socket demux engine toys. */
321 /* What happens here is ugly; there's a pair of adjacent fields in
322    struct inet_sock; __be16 dport followed by __u16 num.  We want to
323    search by pair, so we combine the keys into a single 32bit value
324    and compare with 32bit value read from &...->dport.  Let's at least
325    make sure that it's not mixed with anything else...
326    On 64bit targets we combine comparisons with pair of adjacent __be32
327    fields in the same way.
328 */
329 #ifdef __BIG_ENDIAN
330 #define INET_COMBINED_PORTS(__sport, __dport) \
331 	((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
332 #else /* __LITTLE_ENDIAN */
333 #define INET_COMBINED_PORTS(__sport, __dport) \
334 	((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
335 #endif
336 
337 #ifdef __BIG_ENDIAN
338 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
339 	const __addrpair __name = (__force __addrpair) ( \
340 				   (((__force __u64)(__be32)(__saddr)) << 32) | \
341 				   ((__force __u64)(__be32)(__daddr)))
342 #else /* __LITTLE_ENDIAN */
343 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
344 	const __addrpair __name = (__force __addrpair) ( \
345 				   (((__force __u64)(__be32)(__daddr)) << 32) | \
346 				   ((__force __u64)(__be32)(__saddr)))
347 #endif /* __BIG_ENDIAN */
348 
349 static inline bool inet_match(const struct net *net, const struct sock *sk,
350 			      const __addrpair cookie, const __portpair ports,
351 			      int dif, int sdif)
352 {
353 	if (!net_eq(sock_net(sk), net) ||
354 	    sk->sk_portpair != ports ||
355 	    sk->sk_addrpair != cookie)
356 	        return false;
357 
358 	/* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */
359 	return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif,
360 				    sdif);
361 }
362 
363 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
364  * not check it for lookups anymore, thanks Alexey. -DaveM
365  */
366 struct sock *__inet_lookup_established(const struct net *net,
367 				       struct inet_hashinfo *hashinfo,
368 				       const __be32 saddr, const __be16 sport,
369 				       const __be32 daddr, const u16 hnum,
370 				       const int dif, const int sdif);
371 
372 typedef u32 (inet_ehashfn_t)(const struct net *net,
373 			      const __be32 laddr, const __u16 lport,
374 			      const __be32 faddr, const __be16 fport);
375 
376 inet_ehashfn_t inet_ehashfn;
377 
378 INDIRECT_CALLABLE_DECLARE(inet_ehashfn_t udp_ehashfn);
379 
380 struct sock *inet_lookup_reuseport(const struct net *net, struct sock *sk,
381 				   struct sk_buff *skb, int doff,
382 				   __be32 saddr, __be16 sport,
383 				   __be32 daddr, unsigned short hnum,
384 				   inet_ehashfn_t *ehashfn);
385 
386 struct sock *inet_lookup_run_sk_lookup(const struct net *net,
387 				       int protocol,
388 				       struct sk_buff *skb, int doff,
389 				       __be32 saddr, __be16 sport,
390 				       __be32 daddr, u16 hnum, const int dif,
391 				       inet_ehashfn_t *ehashfn);
392 
393 static inline struct sock *
394 	inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
395 				const __be32 saddr, const __be16 sport,
396 				const __be32 daddr, const __be16 dport,
397 				const int dif)
398 {
399 	return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
400 					 ntohs(dport), dif, 0);
401 }
402 
403 static inline struct sock *__inet_lookup(struct net *net,
404 					 struct inet_hashinfo *hashinfo,
405 					 struct sk_buff *skb, int doff,
406 					 const __be32 saddr, const __be16 sport,
407 					 const __be32 daddr, const __be16 dport,
408 					 const int dif, const int sdif,
409 					 bool *refcounted)
410 {
411 	u16 hnum = ntohs(dport);
412 	struct sock *sk;
413 
414 	sk = __inet_lookup_established(net, hashinfo, saddr, sport,
415 				       daddr, hnum, dif, sdif);
416 	*refcounted = true;
417 	if (sk)
418 		return sk;
419 	*refcounted = false;
420 	return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
421 				      sport, daddr, hnum, dif, sdif);
422 }
423 
424 static inline struct sock *inet_lookup(struct net *net,
425 				       struct inet_hashinfo *hashinfo,
426 				       struct sk_buff *skb, int doff,
427 				       const __be32 saddr, const __be16 sport,
428 				       const __be32 daddr, const __be16 dport,
429 				       const int dif)
430 {
431 	struct sock *sk;
432 	bool refcounted;
433 
434 	sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
435 			   dport, dif, 0, &refcounted);
436 
437 	if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
438 		sk = NULL;
439 	return sk;
440 }
441 
442 static inline
443 struct sock *inet_steal_sock(struct net *net, struct sk_buff *skb, int doff,
444 			     const __be32 saddr, const __be16 sport,
445 			     const __be32 daddr, const __be16 dport,
446 			     bool *refcounted, inet_ehashfn_t *ehashfn)
447 {
448 	struct sock *sk, *reuse_sk;
449 	bool prefetched;
450 
451 	sk = skb_steal_sock(skb, refcounted, &prefetched);
452 	if (!sk)
453 		return NULL;
454 
455 	if (!prefetched || !sk_fullsock(sk))
456 		return sk;
457 
458 	if (sk->sk_protocol == IPPROTO_TCP) {
459 		if (sk->sk_state != TCP_LISTEN)
460 			return sk;
461 	} else if (sk->sk_protocol == IPPROTO_UDP) {
462 		if (sk->sk_state != TCP_CLOSE)
463 			return sk;
464 	} else {
465 		return sk;
466 	}
467 
468 	reuse_sk = inet_lookup_reuseport(net, sk, skb, doff,
469 					 saddr, sport, daddr, ntohs(dport),
470 					 ehashfn);
471 	if (!reuse_sk)
472 		return sk;
473 
474 	/* We've chosen a new reuseport sock which is never refcounted. This
475 	 * implies that sk also isn't refcounted.
476 	 */
477 	WARN_ON_ONCE(*refcounted);
478 
479 	return reuse_sk;
480 }
481 
482 static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
483 					     struct sk_buff *skb,
484 					     int doff,
485 					     const __be16 sport,
486 					     const __be16 dport,
487 					     const int sdif,
488 					     bool *refcounted)
489 {
490 	struct net *net = dev_net_rcu(skb_dst(skb)->dev);
491 	const struct iphdr *iph = ip_hdr(skb);
492 	struct sock *sk;
493 
494 	sk = inet_steal_sock(net, skb, doff, iph->saddr, sport, iph->daddr, dport,
495 			     refcounted, inet_ehashfn);
496 	if (IS_ERR(sk))
497 		return NULL;
498 	if (sk)
499 		return sk;
500 
501 	return __inet_lookup(net, hashinfo, skb,
502 			     doff, iph->saddr, sport,
503 			     iph->daddr, dport, inet_iif(skb), sdif,
504 			     refcounted);
505 }
506 
507 static inline void sk_daddr_set(struct sock *sk, __be32 addr)
508 {
509 	sk->sk_daddr = addr; /* alias of inet_daddr */
510 #if IS_ENABLED(CONFIG_IPV6)
511 	ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
512 #endif
513 }
514 
515 static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
516 {
517 	sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
518 #if IS_ENABLED(CONFIG_IPV6)
519 	ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
520 #endif
521 }
522 
523 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
524 			struct sock *sk, u64 port_offset,
525 			u32 hash_port0,
526 			int (*check_established)(struct inet_timewait_death_row *,
527 						 struct sock *, __u16,
528 						 struct inet_timewait_sock **,
529 						 bool rcu_lookup,
530 						 u32 hash));
531 
532 int inet_hash_connect(struct inet_timewait_death_row *death_row,
533 		      struct sock *sk);
534 #endif /* _INET_HASHTABLES_H */
535