xref: /linux/net/ipv4/inet_hashtables.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Generic INET transport hashtables
8  *
9  * Authors:	Lotsa people, from code originally in tcp
10  */
11 
12 #include <linux/module.h>
13 #include <linux/random.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/wait.h>
17 #include <linux/vmalloc.h>
18 #include <linux/memblock.h>
19 
20 #include <net/addrconf.h>
21 #include <net/inet_connection_sock.h>
22 #include <net/inet_hashtables.h>
23 #if IS_ENABLED(CONFIG_IPV6)
24 #include <net/inet6_hashtables.h>
25 #endif
26 #include <net/secure_seq.h>
27 #include <net/hotdata.h>
28 #include <net/ip.h>
29 #include <net/tcp.h>
30 #include <net/sock_reuseport.h>
31 
inet_ehashfn(const struct net * net,const __be32 laddr,const __u16 lport,const __be32 faddr,const __be16 fport)32 u32 inet_ehashfn(const struct net *net, const __be32 laddr,
33 		 const __u16 lport, const __be32 faddr,
34 		 const __be16 fport)
35 {
36 	net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
37 
38 	return lport + __inet_ehashfn(laddr, 0, faddr, fport,
39 				      inet_ehash_secret + net_hash_mix(net));
40 }
41 EXPORT_SYMBOL_GPL(inet_ehashfn);
42 
43 /* This function handles inet_sock, but also timewait and request sockets
44  * for IPv4/IPv6.
45  */
sk_ehashfn(const struct sock * sk)46 static u32 sk_ehashfn(const struct sock *sk)
47 {
48 #if IS_ENABLED(CONFIG_IPV6)
49 	if (sk->sk_family == AF_INET6 &&
50 	    !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
51 		return inet6_ehashfn(sock_net(sk),
52 				     &sk->sk_v6_rcv_saddr, sk->sk_num,
53 				     &sk->sk_v6_daddr, sk->sk_dport);
54 #endif
55 	return inet_ehashfn(sock_net(sk),
56 			    sk->sk_rcv_saddr, sk->sk_num,
57 			    sk->sk_daddr, sk->sk_dport);
58 }
59 
60 /*
61  * Allocate and initialize a new local port bind bucket.
62  * The bindhash mutex for snum's hash chain must be held here.
63  */
inet_bind_bucket_create(struct kmem_cache * cachep,struct net * net,struct inet_bind_hashbucket * head,const unsigned short snum,int l3mdev)64 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
65 						 struct net *net,
66 						 struct inet_bind_hashbucket *head,
67 						 const unsigned short snum,
68 						 int l3mdev)
69 {
70 	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
71 
72 	if (tb) {
73 		write_pnet(&tb->ib_net, net);
74 		tb->l3mdev    = l3mdev;
75 		tb->port      = snum;
76 		tb->fastreuse = 0;
77 		tb->fastreuseport = 0;
78 		INIT_HLIST_HEAD(&tb->bhash2);
79 		hlist_add_head_rcu(&tb->node, &head->chain);
80 	}
81 	return tb;
82 }
83 
84 /*
85  * Caller must hold hashbucket lock for this tb with local BH disabled
86  */
inet_bind_bucket_destroy(struct inet_bind_bucket * tb)87 void inet_bind_bucket_destroy(struct inet_bind_bucket *tb)
88 {
89 	if (hlist_empty(&tb->bhash2)) {
90 		hlist_del_rcu(&tb->node);
91 		kfree_rcu(tb, rcu);
92 	}
93 }
94 
inet_bind_bucket_match(const struct inet_bind_bucket * tb,const struct net * net,unsigned short port,int l3mdev)95 bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net,
96 			    unsigned short port, int l3mdev)
97 {
98 	return net_eq(ib_net(tb), net) && tb->port == port &&
99 		tb->l3mdev == l3mdev;
100 }
101 
inet_bind2_bucket_init(struct inet_bind2_bucket * tb2,struct net * net,struct inet_bind_hashbucket * head,struct inet_bind_bucket * tb,const struct sock * sk)102 static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb2,
103 				   struct net *net,
104 				   struct inet_bind_hashbucket *head,
105 				   struct inet_bind_bucket *tb,
106 				   const struct sock *sk)
107 {
108 	write_pnet(&tb2->ib_net, net);
109 	tb2->l3mdev = tb->l3mdev;
110 	tb2->port = tb->port;
111 #if IS_ENABLED(CONFIG_IPV6)
112 	BUILD_BUG_ON(USHRT_MAX < (IPV6_ADDR_ANY | IPV6_ADDR_MAPPED));
113 	if (sk->sk_family == AF_INET6) {
114 		tb2->addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
115 		tb2->v6_rcv_saddr = sk->sk_v6_rcv_saddr;
116 	} else {
117 		tb2->addr_type = IPV6_ADDR_MAPPED;
118 		ipv6_addr_set_v4mapped(sk->sk_rcv_saddr, &tb2->v6_rcv_saddr);
119 	}
120 #else
121 	tb2->rcv_saddr = sk->sk_rcv_saddr;
122 #endif
123 	INIT_HLIST_HEAD(&tb2->owners);
124 	hlist_add_head(&tb2->node, &head->chain);
125 	hlist_add_head(&tb2->bhash_node, &tb->bhash2);
126 }
127 
inet_bind2_bucket_create(struct kmem_cache * cachep,struct net * net,struct inet_bind_hashbucket * head,struct inet_bind_bucket * tb,const struct sock * sk)128 struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep,
129 						   struct net *net,
130 						   struct inet_bind_hashbucket *head,
131 						   struct inet_bind_bucket *tb,
132 						   const struct sock *sk)
133 {
134 	struct inet_bind2_bucket *tb2 = kmem_cache_alloc(cachep, GFP_ATOMIC);
135 
136 	if (tb2)
137 		inet_bind2_bucket_init(tb2, net, head, tb, sk);
138 
139 	return tb2;
140 }
141 
142 /* Caller must hold hashbucket lock for this tb with local BH disabled */
inet_bind2_bucket_destroy(struct kmem_cache * cachep,struct inet_bind2_bucket * tb)143 void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb)
144 {
145 	if (hlist_empty(&tb->owners)) {
146 		__hlist_del(&tb->node);
147 		__hlist_del(&tb->bhash_node);
148 		kmem_cache_free(cachep, tb);
149 	}
150 }
151 
inet_bind2_bucket_addr_match(const struct inet_bind2_bucket * tb2,const struct sock * sk)152 static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
153 					 const struct sock *sk)
154 {
155 #if IS_ENABLED(CONFIG_IPV6)
156 	if (sk->sk_family == AF_INET6)
157 		return ipv6_addr_equal(&tb2->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
158 
159 	if (tb2->addr_type != IPV6_ADDR_MAPPED)
160 		return false;
161 #endif
162 	return tb2->rcv_saddr == sk->sk_rcv_saddr;
163 }
164 
inet_bind_hash(struct sock * sk,struct inet_bind_bucket * tb,struct inet_bind2_bucket * tb2,unsigned short port)165 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
166 		    struct inet_bind2_bucket *tb2, unsigned short port)
167 {
168 	inet_sk(sk)->inet_num = port;
169 	inet_csk(sk)->icsk_bind_hash = tb;
170 	inet_csk(sk)->icsk_bind2_hash = tb2;
171 	sk_add_bind_node(sk, &tb2->owners);
172 }
173 
174 /*
175  * Get rid of any references to a local port held by the given sock.
176  */
__inet_put_port(struct sock * sk)177 static void __inet_put_port(struct sock *sk)
178 {
179 	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
180 	struct inet_bind_hashbucket *head, *head2;
181 	struct net *net = sock_net(sk);
182 	struct inet_bind_bucket *tb;
183 	int bhash;
184 
185 	bhash = inet_bhashfn(net, inet_sk(sk)->inet_num, hashinfo->bhash_size);
186 	head = &hashinfo->bhash[bhash];
187 	head2 = inet_bhashfn_portaddr(hashinfo, sk, net, inet_sk(sk)->inet_num);
188 
189 	spin_lock(&head->lock);
190 	tb = inet_csk(sk)->icsk_bind_hash;
191 	inet_csk(sk)->icsk_bind_hash = NULL;
192 	inet_sk(sk)->inet_num = 0;
193 
194 	spin_lock(&head2->lock);
195 	if (inet_csk(sk)->icsk_bind2_hash) {
196 		struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash;
197 
198 		__sk_del_bind_node(sk);
199 		inet_csk(sk)->icsk_bind2_hash = NULL;
200 		inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
201 	}
202 	spin_unlock(&head2->lock);
203 
204 	inet_bind_bucket_destroy(tb);
205 	spin_unlock(&head->lock);
206 }
207 
inet_put_port(struct sock * sk)208 void inet_put_port(struct sock *sk)
209 {
210 	local_bh_disable();
211 	__inet_put_port(sk);
212 	local_bh_enable();
213 }
214 EXPORT_SYMBOL(inet_put_port);
215 
__inet_inherit_port(const struct sock * sk,struct sock * child)216 int __inet_inherit_port(const struct sock *sk, struct sock *child)
217 {
218 	struct inet_hashinfo *table = tcp_or_dccp_get_hashinfo(sk);
219 	unsigned short port = inet_sk(child)->inet_num;
220 	struct inet_bind_hashbucket *head, *head2;
221 	bool created_inet_bind_bucket = false;
222 	struct net *net = sock_net(sk);
223 	bool update_fastreuse = false;
224 	struct inet_bind2_bucket *tb2;
225 	struct inet_bind_bucket *tb;
226 	int bhash, l3mdev;
227 
228 	bhash = inet_bhashfn(net, port, table->bhash_size);
229 	head = &table->bhash[bhash];
230 	head2 = inet_bhashfn_portaddr(table, child, net, port);
231 
232 	spin_lock(&head->lock);
233 	spin_lock(&head2->lock);
234 	tb = inet_csk(sk)->icsk_bind_hash;
235 	tb2 = inet_csk(sk)->icsk_bind2_hash;
236 	if (unlikely(!tb || !tb2)) {
237 		spin_unlock(&head2->lock);
238 		spin_unlock(&head->lock);
239 		return -ENOENT;
240 	}
241 	if (tb->port != port) {
242 		l3mdev = inet_sk_bound_l3mdev(sk);
243 
244 		/* NOTE: using tproxy and redirecting skbs to a proxy
245 		 * on a different listener port breaks the assumption
246 		 * that the listener socket's icsk_bind_hash is the same
247 		 * as that of the child socket. We have to look up or
248 		 * create a new bind bucket for the child here. */
249 		inet_bind_bucket_for_each(tb, &head->chain) {
250 			if (inet_bind_bucket_match(tb, net, port, l3mdev))
251 				break;
252 		}
253 		if (!tb) {
254 			tb = inet_bind_bucket_create(table->bind_bucket_cachep,
255 						     net, head, port, l3mdev);
256 			if (!tb) {
257 				spin_unlock(&head2->lock);
258 				spin_unlock(&head->lock);
259 				return -ENOMEM;
260 			}
261 			created_inet_bind_bucket = true;
262 		}
263 		update_fastreuse = true;
264 
265 		goto bhash2_find;
266 	} else if (!inet_bind2_bucket_addr_match(tb2, child)) {
267 		l3mdev = inet_sk_bound_l3mdev(sk);
268 
269 bhash2_find:
270 		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, child);
271 		if (!tb2) {
272 			tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep,
273 						       net, head2, tb, child);
274 			if (!tb2)
275 				goto error;
276 		}
277 	}
278 	if (update_fastreuse)
279 		inet_csk_update_fastreuse(tb, child);
280 	inet_bind_hash(child, tb, tb2, port);
281 	spin_unlock(&head2->lock);
282 	spin_unlock(&head->lock);
283 
284 	return 0;
285 
286 error:
287 	if (created_inet_bind_bucket)
288 		inet_bind_bucket_destroy(tb);
289 	spin_unlock(&head2->lock);
290 	spin_unlock(&head->lock);
291 	return -ENOMEM;
292 }
293 EXPORT_SYMBOL_GPL(__inet_inherit_port);
294 
295 static struct inet_listen_hashbucket *
inet_lhash2_bucket_sk(struct inet_hashinfo * h,struct sock * sk)296 inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
297 {
298 	u32 hash;
299 
300 #if IS_ENABLED(CONFIG_IPV6)
301 	if (sk->sk_family == AF_INET6)
302 		hash = ipv6_portaddr_hash(sock_net(sk),
303 					  &sk->sk_v6_rcv_saddr,
304 					  inet_sk(sk)->inet_num);
305 	else
306 #endif
307 		hash = ipv4_portaddr_hash(sock_net(sk),
308 					  inet_sk(sk)->inet_rcv_saddr,
309 					  inet_sk(sk)->inet_num);
310 	return inet_lhash2_bucket(h, hash);
311 }
312 
compute_score(struct sock * sk,const struct net * net,const unsigned short hnum,const __be32 daddr,const int dif,const int sdif)313 static inline int compute_score(struct sock *sk, const struct net *net,
314 				const unsigned short hnum, const __be32 daddr,
315 				const int dif, const int sdif)
316 {
317 	int score = -1;
318 
319 	if (net_eq(sock_net(sk), net) && sk->sk_num == hnum &&
320 			!ipv6_only_sock(sk)) {
321 		if (sk->sk_rcv_saddr != daddr)
322 			return -1;
323 
324 		if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
325 			return -1;
326 		score =  sk->sk_bound_dev_if ? 2 : 1;
327 
328 		if (sk->sk_family == PF_INET)
329 			score++;
330 		if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
331 			score++;
332 	}
333 	return score;
334 }
335 
336 /**
337  * inet_lookup_reuseport() - execute reuseport logic on AF_INET socket if necessary.
338  * @net: network namespace.
339  * @sk: AF_INET socket, must be in TCP_LISTEN state for TCP or TCP_CLOSE for UDP.
340  * @skb: context for a potential SK_REUSEPORT program.
341  * @doff: header offset.
342  * @saddr: source address.
343  * @sport: source port.
344  * @daddr: destination address.
345  * @hnum: destination port in host byte order.
346  * @ehashfn: hash function used to generate the fallback hash.
347  *
348  * Return: NULL if sk doesn't have SO_REUSEPORT set, otherwise a pointer to
349  *         the selected sock or an error.
350  */
inet_lookup_reuseport(const struct net * net,struct sock * sk,struct sk_buff * skb,int doff,__be32 saddr,__be16 sport,__be32 daddr,unsigned short hnum,inet_ehashfn_t * ehashfn)351 struct sock *inet_lookup_reuseport(const struct net *net, struct sock *sk,
352 				   struct sk_buff *skb, int doff,
353 				   __be32 saddr, __be16 sport,
354 				   __be32 daddr, unsigned short hnum,
355 				   inet_ehashfn_t *ehashfn)
356 {
357 	struct sock *reuse_sk = NULL;
358 	u32 phash;
359 
360 	if (sk->sk_reuseport) {
361 		phash = INDIRECT_CALL_2(ehashfn, udp_ehashfn, inet_ehashfn,
362 					net, daddr, hnum, saddr, sport);
363 		reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
364 	}
365 	return reuse_sk;
366 }
367 EXPORT_SYMBOL_GPL(inet_lookup_reuseport);
368 
369 /*
370  * Here are some nice properties to exploit here. The BSD API
371  * does not allow a listening sock to specify the remote port nor the
372  * remote address for the connection. So always assume those are both
373  * wildcarded during the search since they can never be otherwise.
374  */
375 
376 /* called with rcu_read_lock() : No refcount taken on the socket */
inet_lhash2_lookup(const struct net * net,struct inet_listen_hashbucket * ilb2,struct sk_buff * skb,int doff,const __be32 saddr,__be16 sport,const __be32 daddr,const unsigned short hnum,const int dif,const int sdif)377 static struct sock *inet_lhash2_lookup(const struct net *net,
378 				struct inet_listen_hashbucket *ilb2,
379 				struct sk_buff *skb, int doff,
380 				const __be32 saddr, __be16 sport,
381 				const __be32 daddr, const unsigned short hnum,
382 				const int dif, const int sdif)
383 {
384 	struct sock *sk, *result = NULL;
385 	struct hlist_nulls_node *node;
386 	int score, hiscore = 0;
387 
388 	sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) {
389 		score = compute_score(sk, net, hnum, daddr, dif, sdif);
390 		if (score > hiscore) {
391 			result = inet_lookup_reuseport(net, sk, skb, doff,
392 						       saddr, sport, daddr, hnum, inet_ehashfn);
393 			if (result)
394 				return result;
395 
396 			result = sk;
397 			hiscore = score;
398 		}
399 	}
400 
401 	return result;
402 }
403 
inet_lookup_run_sk_lookup(const struct net * net,int protocol,struct sk_buff * skb,int doff,__be32 saddr,__be16 sport,__be32 daddr,u16 hnum,const int dif,inet_ehashfn_t * ehashfn)404 struct sock *inet_lookup_run_sk_lookup(const struct net *net,
405 				       int protocol,
406 				       struct sk_buff *skb, int doff,
407 				       __be32 saddr, __be16 sport,
408 				       __be32 daddr, u16 hnum, const int dif,
409 				       inet_ehashfn_t *ehashfn)
410 {
411 	struct sock *sk, *reuse_sk;
412 	bool no_reuseport;
413 
414 	no_reuseport = bpf_sk_lookup_run_v4(net, protocol, saddr, sport,
415 					    daddr, hnum, dif, &sk);
416 	if (no_reuseport || IS_ERR_OR_NULL(sk))
417 		return sk;
418 
419 	reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum,
420 					 ehashfn);
421 	if (reuse_sk)
422 		sk = reuse_sk;
423 	return sk;
424 }
425 
__inet_lookup_listener(const struct net * net,struct inet_hashinfo * hashinfo,struct sk_buff * skb,int doff,const __be32 saddr,__be16 sport,const __be32 daddr,const unsigned short hnum,const int dif,const int sdif)426 struct sock *__inet_lookup_listener(const struct net *net,
427 				    struct inet_hashinfo *hashinfo,
428 				    struct sk_buff *skb, int doff,
429 				    const __be32 saddr, __be16 sport,
430 				    const __be32 daddr, const unsigned short hnum,
431 				    const int dif, const int sdif)
432 {
433 	struct inet_listen_hashbucket *ilb2;
434 	struct sock *result = NULL;
435 	unsigned int hash2;
436 
437 	/* Lookup redirect from BPF */
438 	if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
439 	    hashinfo == net->ipv4.tcp_death_row.hashinfo) {
440 		result = inet_lookup_run_sk_lookup(net, IPPROTO_TCP, skb, doff,
441 						   saddr, sport, daddr, hnum, dif,
442 						   inet_ehashfn);
443 		if (result)
444 			goto done;
445 	}
446 
447 	hash2 = ipv4_portaddr_hash(net, daddr, hnum);
448 	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
449 
450 	result = inet_lhash2_lookup(net, ilb2, skb, doff,
451 				    saddr, sport, daddr, hnum,
452 				    dif, sdif);
453 	if (result)
454 		goto done;
455 
456 	/* Lookup lhash2 with INADDR_ANY */
457 	hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
458 	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
459 
460 	result = inet_lhash2_lookup(net, ilb2, skb, doff,
461 				    saddr, sport, htonl(INADDR_ANY), hnum,
462 				    dif, sdif);
463 done:
464 	if (IS_ERR(result))
465 		return NULL;
466 	return result;
467 }
468 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
469 
470 /* All sockets share common refcount, but have different destructors */
sock_gen_put(struct sock * sk)471 void sock_gen_put(struct sock *sk)
472 {
473 	if (!refcount_dec_and_test(&sk->sk_refcnt))
474 		return;
475 
476 	if (sk->sk_state == TCP_TIME_WAIT)
477 		inet_twsk_free(inet_twsk(sk));
478 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
479 		reqsk_free(inet_reqsk(sk));
480 	else
481 		sk_free(sk);
482 }
483 EXPORT_SYMBOL_GPL(sock_gen_put);
484 
sock_edemux(struct sk_buff * skb)485 void sock_edemux(struct sk_buff *skb)
486 {
487 	sock_gen_put(skb->sk);
488 }
489 EXPORT_SYMBOL(sock_edemux);
490 
__inet_lookup_established(const struct net * net,struct inet_hashinfo * hashinfo,const __be32 saddr,const __be16 sport,const __be32 daddr,const u16 hnum,const int dif,const int sdif)491 struct sock *__inet_lookup_established(const struct net *net,
492 				  struct inet_hashinfo *hashinfo,
493 				  const __be32 saddr, const __be16 sport,
494 				  const __be32 daddr, const u16 hnum,
495 				  const int dif, const int sdif)
496 {
497 	INET_ADDR_COOKIE(acookie, saddr, daddr);
498 	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
499 	struct sock *sk;
500 	const struct hlist_nulls_node *node;
501 	/* Optimize here for direct hit, only listening connections can
502 	 * have wildcards anyways.
503 	 */
504 	unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
505 	unsigned int slot = hash & hashinfo->ehash_mask;
506 	struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
507 
508 begin:
509 	sk_nulls_for_each_rcu(sk, node, &head->chain) {
510 		if (sk->sk_hash != hash)
511 			continue;
512 		if (likely(inet_match(net, sk, acookie, ports, dif, sdif))) {
513 			if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
514 				goto out;
515 			if (unlikely(!inet_match(net, sk, acookie,
516 						 ports, dif, sdif))) {
517 				sock_gen_put(sk);
518 				goto begin;
519 			}
520 			goto found;
521 		}
522 	}
523 	/*
524 	 * if the nulls value we got at the end of this lookup is
525 	 * not the expected one, we must restart lookup.
526 	 * We probably met an item that was moved to another chain.
527 	 */
528 	if (get_nulls_value(node) != slot)
529 		goto begin;
530 out:
531 	sk = NULL;
532 found:
533 	return sk;
534 }
535 EXPORT_SYMBOL_GPL(__inet_lookup_established);
536 
537 /* called with local bh disabled */
__inet_check_established(struct inet_timewait_death_row * death_row,struct sock * sk,__u16 lport,struct inet_timewait_sock ** twp,bool rcu_lookup,u32 hash)538 static int __inet_check_established(struct inet_timewait_death_row *death_row,
539 				    struct sock *sk, __u16 lport,
540 				    struct inet_timewait_sock **twp,
541 				    bool rcu_lookup,
542 				    u32 hash)
543 {
544 	struct inet_hashinfo *hinfo = death_row->hashinfo;
545 	struct inet_sock *inet = inet_sk(sk);
546 	__be32 daddr = inet->inet_rcv_saddr;
547 	__be32 saddr = inet->inet_daddr;
548 	int dif = sk->sk_bound_dev_if;
549 	struct net *net = sock_net(sk);
550 	int sdif = l3mdev_master_ifindex_by_index(net, dif);
551 	INET_ADDR_COOKIE(acookie, saddr, daddr);
552 	const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
553 	struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
554 	struct inet_timewait_sock *tw = NULL;
555 	const struct hlist_nulls_node *node;
556 	struct sock *sk2;
557 	spinlock_t *lock;
558 
559 	if (rcu_lookup) {
560 		sk_nulls_for_each(sk2, node, &head->chain) {
561 			if (sk2->sk_hash != hash ||
562 			    !inet_match(net, sk2, acookie, ports, dif, sdif))
563 				continue;
564 			if (sk2->sk_state == TCP_TIME_WAIT)
565 				break;
566 			return -EADDRNOTAVAIL;
567 		}
568 		return 0;
569 	}
570 
571 	lock = inet_ehash_lockp(hinfo, hash);
572 	spin_lock(lock);
573 
574 	sk_nulls_for_each(sk2, node, &head->chain) {
575 		if (sk2->sk_hash != hash)
576 			continue;
577 
578 		if (likely(inet_match(net, sk2, acookie, ports, dif, sdif))) {
579 			if (sk2->sk_state == TCP_TIME_WAIT) {
580 				tw = inet_twsk(sk2);
581 				if (sk->sk_protocol == IPPROTO_TCP &&
582 				    tcp_twsk_unique(sk, sk2, twp))
583 					break;
584 			}
585 			goto not_unique;
586 		}
587 	}
588 
589 	/* Must record num and sport now. Otherwise we will see
590 	 * in hash table socket with a funny identity.
591 	 */
592 	inet->inet_num = lport;
593 	inet->inet_sport = htons(lport);
594 	sk->sk_hash = hash;
595 	WARN_ON(!sk_unhashed(sk));
596 	__sk_nulls_add_node_rcu(sk, &head->chain);
597 	if (tw) {
598 		sk_nulls_del_node_init_rcu((struct sock *)tw);
599 		__NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
600 	}
601 	spin_unlock(lock);
602 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
603 
604 	if (twp) {
605 		*twp = tw;
606 	} else if (tw) {
607 		/* Silly. Should hash-dance instead... */
608 		inet_twsk_deschedule_put(tw);
609 	}
610 	return 0;
611 
612 not_unique:
613 	spin_unlock(lock);
614 	return -EADDRNOTAVAIL;
615 }
616 
inet_sk_port_offset(const struct sock * sk)617 static u64 inet_sk_port_offset(const struct sock *sk)
618 {
619 	const struct inet_sock *inet = inet_sk(sk);
620 
621 	return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
622 					  inet->inet_daddr,
623 					  inet->inet_dport);
624 }
625 
626 /* Searches for an exsiting socket in the ehash bucket list.
627  * Returns true if found, false otherwise.
628  */
inet_ehash_lookup_by_sk(struct sock * sk,struct hlist_nulls_head * list)629 static bool inet_ehash_lookup_by_sk(struct sock *sk,
630 				    struct hlist_nulls_head *list)
631 {
632 	const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num);
633 	const int sdif = sk->sk_bound_dev_if;
634 	const int dif = sk->sk_bound_dev_if;
635 	const struct hlist_nulls_node *node;
636 	struct net *net = sock_net(sk);
637 	struct sock *esk;
638 
639 	INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr);
640 
641 	sk_nulls_for_each_rcu(esk, node, list) {
642 		if (esk->sk_hash != sk->sk_hash)
643 			continue;
644 		if (sk->sk_family == AF_INET) {
645 			if (unlikely(inet_match(net, esk, acookie,
646 						ports, dif, sdif))) {
647 				return true;
648 			}
649 		}
650 #if IS_ENABLED(CONFIG_IPV6)
651 		else if (sk->sk_family == AF_INET6) {
652 			if (unlikely(inet6_match(net, esk,
653 						 &sk->sk_v6_daddr,
654 						 &sk->sk_v6_rcv_saddr,
655 						 ports, dif, sdif))) {
656 				return true;
657 			}
658 		}
659 #endif
660 	}
661 	return false;
662 }
663 
664 /* Insert a socket into ehash, and eventually remove another one
665  * (The another one can be a SYN_RECV or TIMEWAIT)
666  * If an existing socket already exists, socket sk is not inserted,
667  * and sets found_dup_sk parameter to true.
668  */
inet_ehash_insert(struct sock * sk,struct sock * osk,bool * found_dup_sk)669 bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
670 {
671 	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
672 	struct inet_ehash_bucket *head;
673 	struct hlist_nulls_head *list;
674 	spinlock_t *lock;
675 	bool ret = true;
676 
677 	WARN_ON_ONCE(!sk_unhashed(sk));
678 
679 	sk->sk_hash = sk_ehashfn(sk);
680 	head = inet_ehash_bucket(hashinfo, sk->sk_hash);
681 	list = &head->chain;
682 	lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
683 
684 	spin_lock(lock);
685 	if (osk) {
686 		WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
687 		ret = sk_nulls_del_node_init_rcu(osk);
688 	} else if (found_dup_sk) {
689 		*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
690 		if (*found_dup_sk)
691 			ret = false;
692 	}
693 
694 	if (ret)
695 		__sk_nulls_add_node_rcu(sk, list);
696 
697 	spin_unlock(lock);
698 
699 	return ret;
700 }
701 
inet_ehash_nolisten(struct sock * sk,struct sock * osk,bool * found_dup_sk)702 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
703 {
704 	bool ok = inet_ehash_insert(sk, osk, found_dup_sk);
705 
706 	if (ok) {
707 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
708 	} else {
709 		this_cpu_inc(*sk->sk_prot->orphan_count);
710 		inet_sk_set_state(sk, TCP_CLOSE);
711 		sock_set_flag(sk, SOCK_DEAD);
712 		inet_csk_destroy_sock(sk);
713 	}
714 	return ok;
715 }
716 EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
717 
inet_reuseport_add_sock(struct sock * sk,struct inet_listen_hashbucket * ilb)718 static int inet_reuseport_add_sock(struct sock *sk,
719 				   struct inet_listen_hashbucket *ilb)
720 {
721 	struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
722 	const struct hlist_nulls_node *node;
723 	struct sock *sk2;
724 	kuid_t uid = sock_i_uid(sk);
725 
726 	sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
727 		if (sk2 != sk &&
728 		    sk2->sk_family == sk->sk_family &&
729 		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
730 		    sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
731 		    inet_csk(sk2)->icsk_bind_hash == tb &&
732 		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
733 		    inet_rcv_saddr_equal(sk, sk2, false))
734 			return reuseport_add_sock(sk, sk2,
735 						  inet_rcv_saddr_any(sk));
736 	}
737 
738 	return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
739 }
740 
__inet_hash(struct sock * sk,struct sock * osk)741 int __inet_hash(struct sock *sk, struct sock *osk)
742 {
743 	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
744 	struct inet_listen_hashbucket *ilb2;
745 	int err = 0;
746 
747 	if (sk->sk_state != TCP_LISTEN) {
748 		local_bh_disable();
749 		inet_ehash_nolisten(sk, osk, NULL);
750 		local_bh_enable();
751 		return 0;
752 	}
753 	WARN_ON(!sk_unhashed(sk));
754 	ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
755 
756 	spin_lock(&ilb2->lock);
757 	if (sk->sk_reuseport) {
758 		err = inet_reuseport_add_sock(sk, ilb2);
759 		if (err)
760 			goto unlock;
761 	}
762 	sock_set_flag(sk, SOCK_RCU_FREE);
763 	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
764 		sk->sk_family == AF_INET6)
765 		__sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head);
766 	else
767 		__sk_nulls_add_node_rcu(sk, &ilb2->nulls_head);
768 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
769 unlock:
770 	spin_unlock(&ilb2->lock);
771 
772 	return err;
773 }
774 EXPORT_SYMBOL(__inet_hash);
775 
inet_hash(struct sock * sk)776 int inet_hash(struct sock *sk)
777 {
778 	int err = 0;
779 
780 	if (sk->sk_state != TCP_CLOSE)
781 		err = __inet_hash(sk, NULL);
782 
783 	return err;
784 }
785 EXPORT_SYMBOL_GPL(inet_hash);
786 
inet_unhash(struct sock * sk)787 void inet_unhash(struct sock *sk)
788 {
789 	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
790 
791 	if (sk_unhashed(sk))
792 		return;
793 
794 	if (sk->sk_state == TCP_LISTEN) {
795 		struct inet_listen_hashbucket *ilb2;
796 
797 		ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
798 		/* Don't disable bottom halves while acquiring the lock to
799 		 * avoid circular locking dependency on PREEMPT_RT.
800 		 */
801 		spin_lock(&ilb2->lock);
802 		if (sk_unhashed(sk)) {
803 			spin_unlock(&ilb2->lock);
804 			return;
805 		}
806 
807 		if (rcu_access_pointer(sk->sk_reuseport_cb))
808 			reuseport_stop_listen_sock(sk);
809 
810 		__sk_nulls_del_node_init_rcu(sk);
811 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
812 		spin_unlock(&ilb2->lock);
813 	} else {
814 		spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
815 
816 		spin_lock_bh(lock);
817 		if (sk_unhashed(sk)) {
818 			spin_unlock_bh(lock);
819 			return;
820 		}
821 		__sk_nulls_del_node_init_rcu(sk);
822 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
823 		spin_unlock_bh(lock);
824 	}
825 }
826 EXPORT_SYMBOL_GPL(inet_unhash);
827 
inet_bind2_bucket_match(const struct inet_bind2_bucket * tb,const struct net * net,unsigned short port,int l3mdev,const struct sock * sk)828 static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
829 				    const struct net *net, unsigned short port,
830 				    int l3mdev, const struct sock *sk)
831 {
832 	if (!net_eq(ib2_net(tb), net) || tb->port != port ||
833 	    tb->l3mdev != l3mdev)
834 		return false;
835 
836 	return inet_bind2_bucket_addr_match(tb, sk);
837 }
838 
inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket * tb,const struct net * net,unsigned short port,int l3mdev,const struct sock * sk)839 bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
840 				      unsigned short port, int l3mdev, const struct sock *sk)
841 {
842 	if (!net_eq(ib2_net(tb), net) || tb->port != port ||
843 	    tb->l3mdev != l3mdev)
844 		return false;
845 
846 #if IS_ENABLED(CONFIG_IPV6)
847 	if (tb->addr_type == IPV6_ADDR_ANY)
848 		return true;
849 
850 	if (tb->addr_type != IPV6_ADDR_MAPPED)
851 		return false;
852 
853 	if (sk->sk_family == AF_INET6 &&
854 	    !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
855 		return false;
856 #endif
857 	return tb->rcv_saddr == 0;
858 }
859 
860 /* The socket's bhash2 hashbucket spinlock must be held when this is called */
861 struct inet_bind2_bucket *
inet_bind2_bucket_find(const struct inet_bind_hashbucket * head,const struct net * net,unsigned short port,int l3mdev,const struct sock * sk)862 inet_bind2_bucket_find(const struct inet_bind_hashbucket *head, const struct net *net,
863 		       unsigned short port, int l3mdev, const struct sock *sk)
864 {
865 	struct inet_bind2_bucket *bhash2 = NULL;
866 
867 	inet_bind_bucket_for_each(bhash2, &head->chain)
868 		if (inet_bind2_bucket_match(bhash2, net, port, l3mdev, sk))
869 			break;
870 
871 	return bhash2;
872 }
873 
874 struct inet_bind_hashbucket *
inet_bhash2_addr_any_hashbucket(const struct sock * sk,const struct net * net,int port)875 inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port)
876 {
877 	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
878 	u32 hash;
879 
880 #if IS_ENABLED(CONFIG_IPV6)
881 	if (sk->sk_family == AF_INET6)
882 		hash = ipv6_portaddr_hash(net, &in6addr_any, port);
883 	else
884 #endif
885 		hash = ipv4_portaddr_hash(net, 0, port);
886 
887 	return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
888 }
889 
inet_update_saddr(struct sock * sk,void * saddr,int family)890 static void inet_update_saddr(struct sock *sk, void *saddr, int family)
891 {
892 	if (family == AF_INET) {
893 		inet_sk(sk)->inet_saddr = *(__be32 *)saddr;
894 		sk_rcv_saddr_set(sk, inet_sk(sk)->inet_saddr);
895 	}
896 #if IS_ENABLED(CONFIG_IPV6)
897 	else {
898 		sk->sk_v6_rcv_saddr = *(struct in6_addr *)saddr;
899 	}
900 #endif
901 }
902 
__inet_bhash2_update_saddr(struct sock * sk,void * saddr,int family,bool reset)903 static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family, bool reset)
904 {
905 	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
906 	struct inet_bind_hashbucket *head, *head2;
907 	struct inet_bind2_bucket *tb2, *new_tb2;
908 	int l3mdev = inet_sk_bound_l3mdev(sk);
909 	int port = inet_sk(sk)->inet_num;
910 	struct net *net = sock_net(sk);
911 	int bhash;
912 
913 	if (!inet_csk(sk)->icsk_bind2_hash) {
914 		/* Not bind()ed before. */
915 		if (reset)
916 			inet_reset_saddr(sk);
917 		else
918 			inet_update_saddr(sk, saddr, family);
919 
920 		return 0;
921 	}
922 
923 	/* Allocate a bind2 bucket ahead of time to avoid permanently putting
924 	 * the bhash2 table in an inconsistent state if a new tb2 bucket
925 	 * allocation fails.
926 	 */
927 	new_tb2 = kmem_cache_alloc(hinfo->bind2_bucket_cachep, GFP_ATOMIC);
928 	if (!new_tb2) {
929 		if (reset) {
930 			/* The (INADDR_ANY, port) bucket might have already
931 			 * been freed, then we cannot fixup icsk_bind2_hash,
932 			 * so we give up and unlink sk from bhash/bhash2 not
933 			 * to leave inconsistency in bhash2.
934 			 */
935 			inet_put_port(sk);
936 			inet_reset_saddr(sk);
937 		}
938 
939 		return -ENOMEM;
940 	}
941 
942 	bhash = inet_bhashfn(net, port, hinfo->bhash_size);
943 	head = &hinfo->bhash[bhash];
944 	head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
945 
946 	/* If we change saddr locklessly, another thread
947 	 * iterating over bhash might see corrupted address.
948 	 */
949 	spin_lock_bh(&head->lock);
950 
951 	spin_lock(&head2->lock);
952 	__sk_del_bind_node(sk);
953 	inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, inet_csk(sk)->icsk_bind2_hash);
954 	spin_unlock(&head2->lock);
955 
956 	if (reset)
957 		inet_reset_saddr(sk);
958 	else
959 		inet_update_saddr(sk, saddr, family);
960 
961 	head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
962 
963 	spin_lock(&head2->lock);
964 	tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
965 	if (!tb2) {
966 		tb2 = new_tb2;
967 		inet_bind2_bucket_init(tb2, net, head2, inet_csk(sk)->icsk_bind_hash, sk);
968 	}
969 	inet_csk(sk)->icsk_bind2_hash = tb2;
970 	sk_add_bind_node(sk, &tb2->owners);
971 	spin_unlock(&head2->lock);
972 
973 	spin_unlock_bh(&head->lock);
974 
975 	if (tb2 != new_tb2)
976 		kmem_cache_free(hinfo->bind2_bucket_cachep, new_tb2);
977 
978 	return 0;
979 }
980 
inet_bhash2_update_saddr(struct sock * sk,void * saddr,int family)981 int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family)
982 {
983 	return __inet_bhash2_update_saddr(sk, saddr, family, false);
984 }
985 EXPORT_SYMBOL_GPL(inet_bhash2_update_saddr);
986 
inet_bhash2_reset_saddr(struct sock * sk)987 void inet_bhash2_reset_saddr(struct sock *sk)
988 {
989 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
990 		__inet_bhash2_update_saddr(sk, NULL, 0, true);
991 }
992 EXPORT_SYMBOL_GPL(inet_bhash2_reset_saddr);
993 
994 /* RFC 6056 3.3.4.  Algorithm 4: Double-Hash Port Selection Algorithm
995  * Note that we use 32bit integers (vs RFC 'short integers')
996  * because 2^16 is not a multiple of num_ephemeral and this
997  * property might be used by clever attacker.
998  *
999  * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
1000  * attacks were since demonstrated, thus we use 65536 by default instead
1001  * to really give more isolation and privacy, at the expense of 256kB
1002  * of kernel memory.
1003  */
1004 #define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER)
1005 static u32 *table_perturb;
1006 
__inet_hash_connect(struct inet_timewait_death_row * death_row,struct sock * sk,u64 port_offset,u32 hash_port0,int (* check_established)(struct inet_timewait_death_row *,struct sock *,__u16,struct inet_timewait_sock **,bool rcu_lookup,u32 hash))1007 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
1008 		struct sock *sk, u64 port_offset,
1009 		u32 hash_port0,
1010 		int (*check_established)(struct inet_timewait_death_row *,
1011 			struct sock *, __u16, struct inet_timewait_sock **,
1012 			bool rcu_lookup, u32 hash))
1013 {
1014 	struct inet_hashinfo *hinfo = death_row->hashinfo;
1015 	struct inet_bind_hashbucket *head, *head2;
1016 	struct inet_timewait_sock *tw = NULL;
1017 	int port = inet_sk(sk)->inet_num;
1018 	struct net *net = sock_net(sk);
1019 	struct inet_bind2_bucket *tb2;
1020 	struct inet_bind_bucket *tb;
1021 	bool tb_created = false;
1022 	u32 remaining, offset;
1023 	int ret, i, low, high;
1024 	bool local_ports;
1025 	int step, l3mdev;
1026 	u32 index;
1027 
1028 	if (port) {
1029 		local_bh_disable();
1030 		ret = check_established(death_row, sk, port, NULL, false,
1031 					hash_port0 + port);
1032 		local_bh_enable();
1033 		return ret;
1034 	}
1035 
1036 	l3mdev = inet_sk_bound_l3mdev(sk);
1037 
1038 	local_ports = inet_sk_get_local_port_range(sk, &low, &high);
1039 	step = local_ports ? 1 : 2;
1040 
1041 	high++; /* [32768, 60999] -> [32768, 61000[ */
1042 	remaining = high - low;
1043 	if (!local_ports && remaining > 1)
1044 		remaining &= ~1U;
1045 
1046 	get_random_sleepable_once(table_perturb,
1047 				  INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
1048 	index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
1049 
1050 	offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
1051 	offset %= remaining;
1052 
1053 	/* In first pass we try ports of @low parity.
1054 	 * inet_csk_get_port() does the opposite choice.
1055 	 */
1056 	if (!local_ports)
1057 		offset &= ~1U;
1058 other_parity_scan:
1059 	port = low + offset;
1060 	for (i = 0; i < remaining; i += step, port += step) {
1061 		if (unlikely(port >= high))
1062 			port -= remaining;
1063 		if (inet_is_local_reserved_port(net, port))
1064 			continue;
1065 		head = &hinfo->bhash[inet_bhashfn(net, port,
1066 						  hinfo->bhash_size)];
1067 		rcu_read_lock();
1068 		hlist_for_each_entry_rcu(tb, &head->chain, node) {
1069 			if (!inet_bind_bucket_match(tb, net, port, l3mdev))
1070 				continue;
1071 			if (tb->fastreuse >= 0 || tb->fastreuseport >= 0) {
1072 				rcu_read_unlock();
1073 				goto next_port;
1074 			}
1075 			if (!check_established(death_row, sk, port, &tw, true,
1076 					       hash_port0 + port))
1077 				break;
1078 			rcu_read_unlock();
1079 			goto next_port;
1080 		}
1081 		rcu_read_unlock();
1082 
1083 		spin_lock_bh(&head->lock);
1084 
1085 		/* Does not bother with rcv_saddr checks, because
1086 		 * the established check is already unique enough.
1087 		 */
1088 		inet_bind_bucket_for_each(tb, &head->chain) {
1089 			if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
1090 				if (tb->fastreuse >= 0 ||
1091 				    tb->fastreuseport >= 0)
1092 					goto next_port_unlock;
1093 				WARN_ON(hlist_empty(&tb->bhash2));
1094 				if (!check_established(death_row, sk,
1095 						       port, &tw, false,
1096 						       hash_port0 + port))
1097 					goto ok;
1098 				goto next_port_unlock;
1099 			}
1100 		}
1101 
1102 		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
1103 					     net, head, port, l3mdev);
1104 		if (!tb) {
1105 			spin_unlock_bh(&head->lock);
1106 			return -ENOMEM;
1107 		}
1108 		tb_created = true;
1109 		tb->fastreuse = -1;
1110 		tb->fastreuseport = -1;
1111 		goto ok;
1112 next_port_unlock:
1113 		spin_unlock_bh(&head->lock);
1114 next_port:
1115 		cond_resched();
1116 	}
1117 
1118 	if (!local_ports) {
1119 		offset++;
1120 		if ((offset & 1) && remaining > 1)
1121 			goto other_parity_scan;
1122 	}
1123 	return -EADDRNOTAVAIL;
1124 
1125 ok:
1126 	/* Find the corresponding tb2 bucket since we need to
1127 	 * add the socket to the bhash2 table as well
1128 	 */
1129 	head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
1130 	spin_lock(&head2->lock);
1131 
1132 	tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
1133 	if (!tb2) {
1134 		tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, net,
1135 					       head2, tb, sk);
1136 		if (!tb2)
1137 			goto error;
1138 	}
1139 
1140 	/* Here we want to add a little bit of randomness to the next source
1141 	 * port that will be chosen. We use a max() with a random here so that
1142 	 * on low contention the randomness is maximal and on high contention
1143 	 * it may be inexistent.
1144 	 */
1145 	i = max_t(int, i, get_random_u32_below(8) * step);
1146 	WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + step);
1147 
1148 	/* Head lock still held and bh's disabled */
1149 	inet_bind_hash(sk, tb, tb2, port);
1150 
1151 	if (sk_unhashed(sk)) {
1152 		inet_sk(sk)->inet_sport = htons(port);
1153 		inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
1154 	}
1155 	if (tw)
1156 		inet_twsk_bind_unhash(tw, hinfo);
1157 
1158 	spin_unlock(&head2->lock);
1159 	spin_unlock(&head->lock);
1160 
1161 	if (tw)
1162 		inet_twsk_deschedule_put(tw);
1163 	local_bh_enable();
1164 	return 0;
1165 
1166 error:
1167 	if (sk_hashed(sk)) {
1168 		spinlock_t *lock = inet_ehash_lockp(hinfo, sk->sk_hash);
1169 
1170 		sock_prot_inuse_add(net, sk->sk_prot, -1);
1171 
1172 		spin_lock(lock);
1173 		__sk_nulls_del_node_init_rcu(sk);
1174 		spin_unlock(lock);
1175 
1176 		sk->sk_hash = 0;
1177 		inet_sk(sk)->inet_sport = 0;
1178 		inet_sk(sk)->inet_num = 0;
1179 
1180 		if (tw)
1181 			inet_twsk_bind_unhash(tw, hinfo);
1182 	}
1183 
1184 	spin_unlock(&head2->lock);
1185 	if (tb_created)
1186 		inet_bind_bucket_destroy(tb);
1187 	spin_unlock(&head->lock);
1188 
1189 	if (tw)
1190 		inet_twsk_deschedule_put(tw);
1191 
1192 	local_bh_enable();
1193 
1194 	return -ENOMEM;
1195 }
1196 
1197 /*
1198  * Bind a port for a connect operation and hash it.
1199  */
inet_hash_connect(struct inet_timewait_death_row * death_row,struct sock * sk)1200 int inet_hash_connect(struct inet_timewait_death_row *death_row,
1201 		      struct sock *sk)
1202 {
1203 	const struct inet_sock *inet = inet_sk(sk);
1204 	const struct net *net = sock_net(sk);
1205 	u64 port_offset = 0;
1206 	u32 hash_port0;
1207 
1208 	if (!inet_sk(sk)->inet_num)
1209 		port_offset = inet_sk_port_offset(sk);
1210 
1211 	hash_port0 = inet_ehashfn(net, inet->inet_rcv_saddr, 0,
1212 				  inet->inet_daddr, inet->inet_dport);
1213 
1214 	return __inet_hash_connect(death_row, sk, port_offset, hash_port0,
1215 				   __inet_check_established);
1216 }
1217 EXPORT_SYMBOL_GPL(inet_hash_connect);
1218 
init_hashinfo_lhash2(struct inet_hashinfo * h)1219 static void init_hashinfo_lhash2(struct inet_hashinfo *h)
1220 {
1221 	int i;
1222 
1223 	for (i = 0; i <= h->lhash2_mask; i++) {
1224 		spin_lock_init(&h->lhash2[i].lock);
1225 		INIT_HLIST_NULLS_HEAD(&h->lhash2[i].nulls_head,
1226 				      i + LISTENING_NULLS_BASE);
1227 	}
1228 }
1229 
inet_hashinfo2_init(struct inet_hashinfo * h,const char * name,unsigned long numentries,int scale,unsigned long low_limit,unsigned long high_limit)1230 void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
1231 				unsigned long numentries, int scale,
1232 				unsigned long low_limit,
1233 				unsigned long high_limit)
1234 {
1235 	h->lhash2 = alloc_large_system_hash(name,
1236 					    sizeof(*h->lhash2),
1237 					    numentries,
1238 					    scale,
1239 					    0,
1240 					    NULL,
1241 					    &h->lhash2_mask,
1242 					    low_limit,
1243 					    high_limit);
1244 	init_hashinfo_lhash2(h);
1245 
1246 	/* this one is used for source ports of outgoing connections */
1247 	table_perturb = alloc_large_system_hash("Table-perturb",
1248 						sizeof(*table_perturb),
1249 						INET_TABLE_PERTURB_SIZE,
1250 						0, 0, NULL, NULL,
1251 						INET_TABLE_PERTURB_SIZE,
1252 						INET_TABLE_PERTURB_SIZE);
1253 }
1254 
inet_hashinfo2_init_mod(struct inet_hashinfo * h)1255 int inet_hashinfo2_init_mod(struct inet_hashinfo *h)
1256 {
1257 	h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, sizeof(*h->lhash2), GFP_KERNEL);
1258 	if (!h->lhash2)
1259 		return -ENOMEM;
1260 
1261 	h->lhash2_mask = INET_LHTABLE_SIZE - 1;
1262 	/* INET_LHTABLE_SIZE must be a power of 2 */
1263 	BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask);
1264 
1265 	init_hashinfo_lhash2(h);
1266 	return 0;
1267 }
1268 EXPORT_SYMBOL_GPL(inet_hashinfo2_init_mod);
1269 
inet_ehash_locks_alloc(struct inet_hashinfo * hashinfo)1270 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
1271 {
1272 	unsigned int locksz = sizeof(spinlock_t);
1273 	unsigned int i, nblocks = 1;
1274 	spinlock_t *ptr = NULL;
1275 
1276 	if (locksz == 0)
1277 		goto set_mask;
1278 
1279 	/* Allocate 2 cache lines or at least one spinlock per cpu. */
1280 	nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U) * num_possible_cpus();
1281 
1282 	/* At least one page per NUMA node. */
1283 	nblocks = max(nblocks, num_online_nodes() * PAGE_SIZE / locksz);
1284 
1285 	nblocks = roundup_pow_of_two(nblocks);
1286 
1287 	/* No more locks than number of hash buckets. */
1288 	nblocks = min(nblocks, hashinfo->ehash_mask + 1);
1289 
1290 	if (num_online_nodes() > 1) {
1291 		/* Use vmalloc() to allow NUMA policy to spread pages
1292 		 * on all available nodes if desired.
1293 		 */
1294 		ptr = vmalloc_array(nblocks, locksz);
1295 	}
1296 	if (!ptr) {
1297 		ptr = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
1298 		if (!ptr)
1299 			return -ENOMEM;
1300 	}
1301 	for (i = 0; i < nblocks; i++)
1302 		spin_lock_init(&ptr[i]);
1303 	hashinfo->ehash_locks = ptr;
1304 set_mask:
1305 	hashinfo->ehash_locks_mask = nblocks - 1;
1306 	return 0;
1307 }
1308 EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);
1309 
inet_pernet_hashinfo_alloc(struct inet_hashinfo * hashinfo,unsigned int ehash_entries)1310 struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo,
1311 						 unsigned int ehash_entries)
1312 {
1313 	struct inet_hashinfo *new_hashinfo;
1314 	int i;
1315 
1316 	new_hashinfo = kmemdup(hashinfo, sizeof(*hashinfo), GFP_KERNEL);
1317 	if (!new_hashinfo)
1318 		goto err;
1319 
1320 	new_hashinfo->ehash = vmalloc_huge(ehash_entries * sizeof(struct inet_ehash_bucket),
1321 					   GFP_KERNEL_ACCOUNT);
1322 	if (!new_hashinfo->ehash)
1323 		goto free_hashinfo;
1324 
1325 	new_hashinfo->ehash_mask = ehash_entries - 1;
1326 
1327 	if (inet_ehash_locks_alloc(new_hashinfo))
1328 		goto free_ehash;
1329 
1330 	for (i = 0; i < ehash_entries; i++)
1331 		INIT_HLIST_NULLS_HEAD(&new_hashinfo->ehash[i].chain, i);
1332 
1333 	new_hashinfo->pernet = true;
1334 
1335 	return new_hashinfo;
1336 
1337 free_ehash:
1338 	vfree(new_hashinfo->ehash);
1339 free_hashinfo:
1340 	kfree(new_hashinfo);
1341 err:
1342 	return NULL;
1343 }
1344 EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_alloc);
1345 
inet_pernet_hashinfo_free(struct inet_hashinfo * hashinfo)1346 void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo)
1347 {
1348 	if (!hashinfo->pernet)
1349 		return;
1350 
1351 	inet_ehash_locks_free(hashinfo);
1352 	vfree(hashinfo->ehash);
1353 	kfree(hashinfo);
1354 }
1355 EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_free);
1356