xref: /linux/net/ipv4/inet_hashtables.c (revision 9e56ff53b4115875667760445b028357848b4748)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Generic INET transport hashtables
8  *
9  * Authors:	Lotsa people, from code originally in tcp
10  */
11 
12 #include <linux/module.h>
13 #include <linux/random.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/wait.h>
17 #include <linux/vmalloc.h>
18 #include <linux/memblock.h>
19 
20 #include <net/addrconf.h>
21 #include <net/inet_connection_sock.h>
22 #include <net/inet_hashtables.h>
23 #if IS_ENABLED(CONFIG_IPV6)
24 #include <net/inet6_hashtables.h>
25 #endif
26 #include <net/secure_seq.h>
27 #include <net/ip.h>
28 #include <net/tcp.h>
29 #include <net/sock_reuseport.h>
30 
31 u32 inet_ehashfn(const struct net *net, const __be32 laddr,
32 		 const __u16 lport, const __be32 faddr,
33 		 const __be16 fport)
34 {
35 	static u32 inet_ehash_secret __read_mostly;
36 
37 	net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
38 
39 	return __inet_ehashfn(laddr, lport, faddr, fport,
40 			      inet_ehash_secret + net_hash_mix(net));
41 }
42 EXPORT_SYMBOL_GPL(inet_ehashfn);
43 
44 /* This function handles inet_sock, but also timewait and request sockets
45  * for IPv4/IPv6.
46  */
47 static u32 sk_ehashfn(const struct sock *sk)
48 {
49 #if IS_ENABLED(CONFIG_IPV6)
50 	if (sk->sk_family == AF_INET6 &&
51 	    !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
52 		return inet6_ehashfn(sock_net(sk),
53 				     &sk->sk_v6_rcv_saddr, sk->sk_num,
54 				     &sk->sk_v6_daddr, sk->sk_dport);
55 #endif
56 	return inet_ehashfn(sock_net(sk),
57 			    sk->sk_rcv_saddr, sk->sk_num,
58 			    sk->sk_daddr, sk->sk_dport);
59 }
60 
61 /*
62  * Allocate and initialize a new local port bind bucket.
63  * The bindhash mutex for snum's hash chain must be held here.
64  */
65 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
66 						 struct net *net,
67 						 struct inet_bind_hashbucket *head,
68 						 const unsigned short snum,
69 						 int l3mdev)
70 {
71 	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
72 
73 	if (tb) {
74 		write_pnet(&tb->ib_net, net);
75 		tb->l3mdev    = l3mdev;
76 		tb->port      = snum;
77 		tb->fastreuse = 0;
78 		tb->fastreuseport = 0;
79 		INIT_HLIST_HEAD(&tb->bhash2);
80 		hlist_add_head(&tb->node, &head->chain);
81 	}
82 	return tb;
83 }
84 
85 /*
86  * Caller must hold hashbucket lock for this tb with local BH disabled
87  */
88 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
89 {
90 	if (hlist_empty(&tb->bhash2)) {
91 		__hlist_del(&tb->node);
92 		kmem_cache_free(cachep, tb);
93 	}
94 }
95 
96 bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net,
97 			    unsigned short port, int l3mdev)
98 {
99 	return net_eq(ib_net(tb), net) && tb->port == port &&
100 		tb->l3mdev == l3mdev;
101 }
102 
103 static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb2,
104 				   struct net *net,
105 				   struct inet_bind_hashbucket *head,
106 				   struct inet_bind_bucket *tb,
107 				   const struct sock *sk)
108 {
109 	write_pnet(&tb2->ib_net, net);
110 	tb2->l3mdev = tb->l3mdev;
111 	tb2->port = tb->port;
112 #if IS_ENABLED(CONFIG_IPV6)
113 	BUILD_BUG_ON(USHRT_MAX < (IPV6_ADDR_ANY | IPV6_ADDR_MAPPED));
114 	if (sk->sk_family == AF_INET6) {
115 		tb2->addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
116 		tb2->v6_rcv_saddr = sk->sk_v6_rcv_saddr;
117 	} else {
118 		tb2->addr_type = IPV6_ADDR_MAPPED;
119 		ipv6_addr_set_v4mapped(sk->sk_rcv_saddr, &tb2->v6_rcv_saddr);
120 	}
121 #else
122 	tb2->rcv_saddr = sk->sk_rcv_saddr;
123 #endif
124 	INIT_HLIST_HEAD(&tb2->owners);
125 	hlist_add_head(&tb2->node, &head->chain);
126 	hlist_add_head(&tb2->bhash_node, &tb->bhash2);
127 }
128 
129 struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep,
130 						   struct net *net,
131 						   struct inet_bind_hashbucket *head,
132 						   struct inet_bind_bucket *tb,
133 						   const struct sock *sk)
134 {
135 	struct inet_bind2_bucket *tb2 = kmem_cache_alloc(cachep, GFP_ATOMIC);
136 
137 	if (tb2)
138 		inet_bind2_bucket_init(tb2, net, head, tb, sk);
139 
140 	return tb2;
141 }
142 
143 /* Caller must hold hashbucket lock for this tb with local BH disabled */
144 void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb)
145 {
146 	if (hlist_empty(&tb->owners)) {
147 		__hlist_del(&tb->node);
148 		__hlist_del(&tb->bhash_node);
149 		kmem_cache_free(cachep, tb);
150 	}
151 }
152 
153 static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
154 					 const struct sock *sk)
155 {
156 #if IS_ENABLED(CONFIG_IPV6)
157 	if (sk->sk_family == AF_INET6)
158 		return ipv6_addr_equal(&tb2->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
159 
160 	if (tb2->addr_type != IPV6_ADDR_MAPPED)
161 		return false;
162 #endif
163 	return tb2->rcv_saddr == sk->sk_rcv_saddr;
164 }
165 
166 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
167 		    struct inet_bind2_bucket *tb2, unsigned short port)
168 {
169 	inet_sk(sk)->inet_num = port;
170 	inet_csk(sk)->icsk_bind_hash = tb;
171 	inet_csk(sk)->icsk_bind2_hash = tb2;
172 	sk_add_bind_node(sk, &tb2->owners);
173 }
174 
175 /*
176  * Get rid of any references to a local port held by the given sock.
177  */
178 static void __inet_put_port(struct sock *sk)
179 {
180 	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
181 	struct inet_bind_hashbucket *head, *head2;
182 	struct net *net = sock_net(sk);
183 	struct inet_bind_bucket *tb;
184 	int bhash;
185 
186 	bhash = inet_bhashfn(net, inet_sk(sk)->inet_num, hashinfo->bhash_size);
187 	head = &hashinfo->bhash[bhash];
188 	head2 = inet_bhashfn_portaddr(hashinfo, sk, net, inet_sk(sk)->inet_num);
189 
190 	spin_lock(&head->lock);
191 	tb = inet_csk(sk)->icsk_bind_hash;
192 	inet_csk(sk)->icsk_bind_hash = NULL;
193 	inet_sk(sk)->inet_num = 0;
194 
195 	spin_lock(&head2->lock);
196 	if (inet_csk(sk)->icsk_bind2_hash) {
197 		struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash;
198 
199 		__sk_del_bind_node(sk);
200 		inet_csk(sk)->icsk_bind2_hash = NULL;
201 		inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
202 	}
203 	spin_unlock(&head2->lock);
204 
205 	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
206 	spin_unlock(&head->lock);
207 }
208 
209 void inet_put_port(struct sock *sk)
210 {
211 	local_bh_disable();
212 	__inet_put_port(sk);
213 	local_bh_enable();
214 }
215 EXPORT_SYMBOL(inet_put_port);
216 
217 int __inet_inherit_port(const struct sock *sk, struct sock *child)
218 {
219 	struct inet_hashinfo *table = tcp_or_dccp_get_hashinfo(sk);
220 	unsigned short port = inet_sk(child)->inet_num;
221 	struct inet_bind_hashbucket *head, *head2;
222 	bool created_inet_bind_bucket = false;
223 	struct net *net = sock_net(sk);
224 	bool update_fastreuse = false;
225 	struct inet_bind2_bucket *tb2;
226 	struct inet_bind_bucket *tb;
227 	int bhash, l3mdev;
228 
229 	bhash = inet_bhashfn(net, port, table->bhash_size);
230 	head = &table->bhash[bhash];
231 	head2 = inet_bhashfn_portaddr(table, child, net, port);
232 
233 	spin_lock(&head->lock);
234 	spin_lock(&head2->lock);
235 	tb = inet_csk(sk)->icsk_bind_hash;
236 	tb2 = inet_csk(sk)->icsk_bind2_hash;
237 	if (unlikely(!tb || !tb2)) {
238 		spin_unlock(&head2->lock);
239 		spin_unlock(&head->lock);
240 		return -ENOENT;
241 	}
242 	if (tb->port != port) {
243 		l3mdev = inet_sk_bound_l3mdev(sk);
244 
245 		/* NOTE: using tproxy and redirecting skbs to a proxy
246 		 * on a different listener port breaks the assumption
247 		 * that the listener socket's icsk_bind_hash is the same
248 		 * as that of the child socket. We have to look up or
249 		 * create a new bind bucket for the child here. */
250 		inet_bind_bucket_for_each(tb, &head->chain) {
251 			if (inet_bind_bucket_match(tb, net, port, l3mdev))
252 				break;
253 		}
254 		if (!tb) {
255 			tb = inet_bind_bucket_create(table->bind_bucket_cachep,
256 						     net, head, port, l3mdev);
257 			if (!tb) {
258 				spin_unlock(&head2->lock);
259 				spin_unlock(&head->lock);
260 				return -ENOMEM;
261 			}
262 			created_inet_bind_bucket = true;
263 		}
264 		update_fastreuse = true;
265 
266 		goto bhash2_find;
267 	} else if (!inet_bind2_bucket_addr_match(tb2, child)) {
268 		l3mdev = inet_sk_bound_l3mdev(sk);
269 
270 bhash2_find:
271 		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, child);
272 		if (!tb2) {
273 			tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep,
274 						       net, head2, tb, child);
275 			if (!tb2)
276 				goto error;
277 		}
278 	}
279 	if (update_fastreuse)
280 		inet_csk_update_fastreuse(tb, child);
281 	inet_bind_hash(child, tb, tb2, port);
282 	spin_unlock(&head2->lock);
283 	spin_unlock(&head->lock);
284 
285 	return 0;
286 
287 error:
288 	if (created_inet_bind_bucket)
289 		inet_bind_bucket_destroy(table->bind_bucket_cachep, tb);
290 	spin_unlock(&head2->lock);
291 	spin_unlock(&head->lock);
292 	return -ENOMEM;
293 }
294 EXPORT_SYMBOL_GPL(__inet_inherit_port);
295 
296 static struct inet_listen_hashbucket *
297 inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
298 {
299 	u32 hash;
300 
301 #if IS_ENABLED(CONFIG_IPV6)
302 	if (sk->sk_family == AF_INET6)
303 		hash = ipv6_portaddr_hash(sock_net(sk),
304 					  &sk->sk_v6_rcv_saddr,
305 					  inet_sk(sk)->inet_num);
306 	else
307 #endif
308 		hash = ipv4_portaddr_hash(sock_net(sk),
309 					  inet_sk(sk)->inet_rcv_saddr,
310 					  inet_sk(sk)->inet_num);
311 	return inet_lhash2_bucket(h, hash);
312 }
313 
314 static inline int compute_score(struct sock *sk, struct net *net,
315 				const unsigned short hnum, const __be32 daddr,
316 				const int dif, const int sdif)
317 {
318 	int score = -1;
319 
320 	if (net_eq(sock_net(sk), net) && sk->sk_num == hnum &&
321 			!ipv6_only_sock(sk)) {
322 		if (sk->sk_rcv_saddr != daddr)
323 			return -1;
324 
325 		if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
326 			return -1;
327 		score =  sk->sk_bound_dev_if ? 2 : 1;
328 
329 		if (sk->sk_family == PF_INET)
330 			score++;
331 		if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
332 			score++;
333 	}
334 	return score;
335 }
336 
337 /**
338  * inet_lookup_reuseport() - execute reuseport logic on AF_INET socket if necessary.
339  * @net: network namespace.
340  * @sk: AF_INET socket, must be in TCP_LISTEN state for TCP or TCP_CLOSE for UDP.
341  * @skb: context for a potential SK_REUSEPORT program.
342  * @doff: header offset.
343  * @saddr: source address.
344  * @sport: source port.
345  * @daddr: destination address.
346  * @hnum: destination port in host byte order.
347  * @ehashfn: hash function used to generate the fallback hash.
348  *
349  * Return: NULL if sk doesn't have SO_REUSEPORT set, otherwise a pointer to
350  *         the selected sock or an error.
351  */
352 struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk,
353 				   struct sk_buff *skb, int doff,
354 				   __be32 saddr, __be16 sport,
355 				   __be32 daddr, unsigned short hnum,
356 				   inet_ehashfn_t *ehashfn)
357 {
358 	struct sock *reuse_sk = NULL;
359 	u32 phash;
360 
361 	if (sk->sk_reuseport) {
362 		phash = INDIRECT_CALL_2(ehashfn, udp_ehashfn, inet_ehashfn,
363 					net, daddr, hnum, saddr, sport);
364 		reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
365 	}
366 	return reuse_sk;
367 }
368 EXPORT_SYMBOL_GPL(inet_lookup_reuseport);
369 
370 /*
371  * Here are some nice properties to exploit here. The BSD API
372  * does not allow a listening sock to specify the remote port nor the
373  * remote address for the connection. So always assume those are both
374  * wildcarded during the search since they can never be otherwise.
375  */
376 
377 /* called with rcu_read_lock() : No refcount taken on the socket */
378 static struct sock *inet_lhash2_lookup(struct net *net,
379 				struct inet_listen_hashbucket *ilb2,
380 				struct sk_buff *skb, int doff,
381 				const __be32 saddr, __be16 sport,
382 				const __be32 daddr, const unsigned short hnum,
383 				const int dif, const int sdif)
384 {
385 	struct sock *sk, *result = NULL;
386 	struct hlist_nulls_node *node;
387 	int score, hiscore = 0;
388 
389 	sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) {
390 		score = compute_score(sk, net, hnum, daddr, dif, sdif);
391 		if (score > hiscore) {
392 			result = inet_lookup_reuseport(net, sk, skb, doff,
393 						       saddr, sport, daddr, hnum, inet_ehashfn);
394 			if (result)
395 				return result;
396 
397 			result = sk;
398 			hiscore = score;
399 		}
400 	}
401 
402 	return result;
403 }
404 
405 struct sock *inet_lookup_run_sk_lookup(struct net *net,
406 				       int protocol,
407 				       struct sk_buff *skb, int doff,
408 				       __be32 saddr, __be16 sport,
409 				       __be32 daddr, u16 hnum, const int dif,
410 				       inet_ehashfn_t *ehashfn)
411 {
412 	struct sock *sk, *reuse_sk;
413 	bool no_reuseport;
414 
415 	no_reuseport = bpf_sk_lookup_run_v4(net, protocol, saddr, sport,
416 					    daddr, hnum, dif, &sk);
417 	if (no_reuseport || IS_ERR_OR_NULL(sk))
418 		return sk;
419 
420 	reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum,
421 					 ehashfn);
422 	if (reuse_sk)
423 		sk = reuse_sk;
424 	return sk;
425 }
426 
427 struct sock *__inet_lookup_listener(struct net *net,
428 				    struct inet_hashinfo *hashinfo,
429 				    struct sk_buff *skb, int doff,
430 				    const __be32 saddr, __be16 sport,
431 				    const __be32 daddr, const unsigned short hnum,
432 				    const int dif, const int sdif)
433 {
434 	struct inet_listen_hashbucket *ilb2;
435 	struct sock *result = NULL;
436 	unsigned int hash2;
437 
438 	/* Lookup redirect from BPF */
439 	if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
440 	    hashinfo == net->ipv4.tcp_death_row.hashinfo) {
441 		result = inet_lookup_run_sk_lookup(net, IPPROTO_TCP, skb, doff,
442 						   saddr, sport, daddr, hnum, dif,
443 						   inet_ehashfn);
444 		if (result)
445 			goto done;
446 	}
447 
448 	hash2 = ipv4_portaddr_hash(net, daddr, hnum);
449 	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
450 
451 	result = inet_lhash2_lookup(net, ilb2, skb, doff,
452 				    saddr, sport, daddr, hnum,
453 				    dif, sdif);
454 	if (result)
455 		goto done;
456 
457 	/* Lookup lhash2 with INADDR_ANY */
458 	hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
459 	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
460 
461 	result = inet_lhash2_lookup(net, ilb2, skb, doff,
462 				    saddr, sport, htonl(INADDR_ANY), hnum,
463 				    dif, sdif);
464 done:
465 	if (IS_ERR(result))
466 		return NULL;
467 	return result;
468 }
469 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
470 
471 /* All sockets share common refcount, but have different destructors */
472 void sock_gen_put(struct sock *sk)
473 {
474 	if (!refcount_dec_and_test(&sk->sk_refcnt))
475 		return;
476 
477 	if (sk->sk_state == TCP_TIME_WAIT)
478 		inet_twsk_free(inet_twsk(sk));
479 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
480 		reqsk_free(inet_reqsk(sk));
481 	else
482 		sk_free(sk);
483 }
484 EXPORT_SYMBOL_GPL(sock_gen_put);
485 
486 void sock_edemux(struct sk_buff *skb)
487 {
488 	sock_gen_put(skb->sk);
489 }
490 EXPORT_SYMBOL(sock_edemux);
491 
492 struct sock *__inet_lookup_established(struct net *net,
493 				  struct inet_hashinfo *hashinfo,
494 				  const __be32 saddr, const __be16 sport,
495 				  const __be32 daddr, const u16 hnum,
496 				  const int dif, const int sdif)
497 {
498 	INET_ADDR_COOKIE(acookie, saddr, daddr);
499 	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
500 	struct sock *sk;
501 	const struct hlist_nulls_node *node;
502 	/* Optimize here for direct hit, only listening connections can
503 	 * have wildcards anyways.
504 	 */
505 	unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
506 	unsigned int slot = hash & hashinfo->ehash_mask;
507 	struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
508 
509 begin:
510 	sk_nulls_for_each_rcu(sk, node, &head->chain) {
511 		if (sk->sk_hash != hash)
512 			continue;
513 		if (likely(inet_match(net, sk, acookie, ports, dif, sdif))) {
514 			if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
515 				goto out;
516 			if (unlikely(!inet_match(net, sk, acookie,
517 						 ports, dif, sdif))) {
518 				sock_gen_put(sk);
519 				goto begin;
520 			}
521 			goto found;
522 		}
523 	}
524 	/*
525 	 * if the nulls value we got at the end of this lookup is
526 	 * not the expected one, we must restart lookup.
527 	 * We probably met an item that was moved to another chain.
528 	 */
529 	if (get_nulls_value(node) != slot)
530 		goto begin;
531 out:
532 	sk = NULL;
533 found:
534 	return sk;
535 }
536 EXPORT_SYMBOL_GPL(__inet_lookup_established);
537 
538 /* called with local bh disabled */
539 static int __inet_check_established(struct inet_timewait_death_row *death_row,
540 				    struct sock *sk, __u16 lport,
541 				    struct inet_timewait_sock **twp)
542 {
543 	struct inet_hashinfo *hinfo = death_row->hashinfo;
544 	struct inet_sock *inet = inet_sk(sk);
545 	__be32 daddr = inet->inet_rcv_saddr;
546 	__be32 saddr = inet->inet_daddr;
547 	int dif = sk->sk_bound_dev_if;
548 	struct net *net = sock_net(sk);
549 	int sdif = l3mdev_master_ifindex_by_index(net, dif);
550 	INET_ADDR_COOKIE(acookie, saddr, daddr);
551 	const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
552 	unsigned int hash = inet_ehashfn(net, daddr, lport,
553 					 saddr, inet->inet_dport);
554 	struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
555 	spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
556 	struct sock *sk2;
557 	const struct hlist_nulls_node *node;
558 	struct inet_timewait_sock *tw = NULL;
559 
560 	spin_lock(lock);
561 
562 	sk_nulls_for_each(sk2, node, &head->chain) {
563 		if (sk2->sk_hash != hash)
564 			continue;
565 
566 		if (likely(inet_match(net, sk2, acookie, ports, dif, sdif))) {
567 			if (sk2->sk_state == TCP_TIME_WAIT) {
568 				tw = inet_twsk(sk2);
569 				if (twsk_unique(sk, sk2, twp))
570 					break;
571 			}
572 			goto not_unique;
573 		}
574 	}
575 
576 	/* Must record num and sport now. Otherwise we will see
577 	 * in hash table socket with a funny identity.
578 	 */
579 	inet->inet_num = lport;
580 	inet->inet_sport = htons(lport);
581 	sk->sk_hash = hash;
582 	WARN_ON(!sk_unhashed(sk));
583 	__sk_nulls_add_node_rcu(sk, &head->chain);
584 	if (tw) {
585 		sk_nulls_del_node_init_rcu((struct sock *)tw);
586 		__NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
587 	}
588 	spin_unlock(lock);
589 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
590 
591 	if (twp) {
592 		*twp = tw;
593 	} else if (tw) {
594 		/* Silly. Should hash-dance instead... */
595 		inet_twsk_deschedule_put(tw);
596 	}
597 	return 0;
598 
599 not_unique:
600 	spin_unlock(lock);
601 	return -EADDRNOTAVAIL;
602 }
603 
604 static u64 inet_sk_port_offset(const struct sock *sk)
605 {
606 	const struct inet_sock *inet = inet_sk(sk);
607 
608 	return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
609 					  inet->inet_daddr,
610 					  inet->inet_dport);
611 }
612 
613 /* Searches for an exsiting socket in the ehash bucket list.
614  * Returns true if found, false otherwise.
615  */
616 static bool inet_ehash_lookup_by_sk(struct sock *sk,
617 				    struct hlist_nulls_head *list)
618 {
619 	const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num);
620 	const int sdif = sk->sk_bound_dev_if;
621 	const int dif = sk->sk_bound_dev_if;
622 	const struct hlist_nulls_node *node;
623 	struct net *net = sock_net(sk);
624 	struct sock *esk;
625 
626 	INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr);
627 
628 	sk_nulls_for_each_rcu(esk, node, list) {
629 		if (esk->sk_hash != sk->sk_hash)
630 			continue;
631 		if (sk->sk_family == AF_INET) {
632 			if (unlikely(inet_match(net, esk, acookie,
633 						ports, dif, sdif))) {
634 				return true;
635 			}
636 		}
637 #if IS_ENABLED(CONFIG_IPV6)
638 		else if (sk->sk_family == AF_INET6) {
639 			if (unlikely(inet6_match(net, esk,
640 						 &sk->sk_v6_daddr,
641 						 &sk->sk_v6_rcv_saddr,
642 						 ports, dif, sdif))) {
643 				return true;
644 			}
645 		}
646 #endif
647 	}
648 	return false;
649 }
650 
651 /* Insert a socket into ehash, and eventually remove another one
652  * (The another one can be a SYN_RECV or TIMEWAIT)
653  * If an existing socket already exists, socket sk is not inserted,
654  * and sets found_dup_sk parameter to true.
655  */
656 bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
657 {
658 	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
659 	struct inet_ehash_bucket *head;
660 	struct hlist_nulls_head *list;
661 	spinlock_t *lock;
662 	bool ret = true;
663 
664 	WARN_ON_ONCE(!sk_unhashed(sk));
665 
666 	sk->sk_hash = sk_ehashfn(sk);
667 	head = inet_ehash_bucket(hashinfo, sk->sk_hash);
668 	list = &head->chain;
669 	lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
670 
671 	spin_lock(lock);
672 	if (osk) {
673 		WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
674 		ret = sk_nulls_del_node_init_rcu(osk);
675 	} else if (found_dup_sk) {
676 		*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
677 		if (*found_dup_sk)
678 			ret = false;
679 	}
680 
681 	if (ret)
682 		__sk_nulls_add_node_rcu(sk, list);
683 
684 	spin_unlock(lock);
685 
686 	return ret;
687 }
688 
689 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
690 {
691 	bool ok = inet_ehash_insert(sk, osk, found_dup_sk);
692 
693 	if (ok) {
694 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
695 	} else {
696 		this_cpu_inc(*sk->sk_prot->orphan_count);
697 		inet_sk_set_state(sk, TCP_CLOSE);
698 		sock_set_flag(sk, SOCK_DEAD);
699 		inet_csk_destroy_sock(sk);
700 	}
701 	return ok;
702 }
703 EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
704 
705 static int inet_reuseport_add_sock(struct sock *sk,
706 				   struct inet_listen_hashbucket *ilb)
707 {
708 	struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
709 	const struct hlist_nulls_node *node;
710 	struct sock *sk2;
711 	kuid_t uid = sock_i_uid(sk);
712 
713 	sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
714 		if (sk2 != sk &&
715 		    sk2->sk_family == sk->sk_family &&
716 		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
717 		    sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
718 		    inet_csk(sk2)->icsk_bind_hash == tb &&
719 		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
720 		    inet_rcv_saddr_equal(sk, sk2, false))
721 			return reuseport_add_sock(sk, sk2,
722 						  inet_rcv_saddr_any(sk));
723 	}
724 
725 	return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
726 }
727 
728 int __inet_hash(struct sock *sk, struct sock *osk)
729 {
730 	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
731 	struct inet_listen_hashbucket *ilb2;
732 	int err = 0;
733 
734 	if (sk->sk_state != TCP_LISTEN) {
735 		local_bh_disable();
736 		inet_ehash_nolisten(sk, osk, NULL);
737 		local_bh_enable();
738 		return 0;
739 	}
740 	WARN_ON(!sk_unhashed(sk));
741 	ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
742 
743 	spin_lock(&ilb2->lock);
744 	if (sk->sk_reuseport) {
745 		err = inet_reuseport_add_sock(sk, ilb2);
746 		if (err)
747 			goto unlock;
748 	}
749 	sock_set_flag(sk, SOCK_RCU_FREE);
750 	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
751 		sk->sk_family == AF_INET6)
752 		__sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head);
753 	else
754 		__sk_nulls_add_node_rcu(sk, &ilb2->nulls_head);
755 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
756 unlock:
757 	spin_unlock(&ilb2->lock);
758 
759 	return err;
760 }
761 EXPORT_SYMBOL(__inet_hash);
762 
763 int inet_hash(struct sock *sk)
764 {
765 	int err = 0;
766 
767 	if (sk->sk_state != TCP_CLOSE)
768 		err = __inet_hash(sk, NULL);
769 
770 	return err;
771 }
772 EXPORT_SYMBOL_GPL(inet_hash);
773 
774 void inet_unhash(struct sock *sk)
775 {
776 	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
777 
778 	if (sk_unhashed(sk))
779 		return;
780 
781 	if (sk->sk_state == TCP_LISTEN) {
782 		struct inet_listen_hashbucket *ilb2;
783 
784 		ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
785 		/* Don't disable bottom halves while acquiring the lock to
786 		 * avoid circular locking dependency on PREEMPT_RT.
787 		 */
788 		spin_lock(&ilb2->lock);
789 		if (sk_unhashed(sk)) {
790 			spin_unlock(&ilb2->lock);
791 			return;
792 		}
793 
794 		if (rcu_access_pointer(sk->sk_reuseport_cb))
795 			reuseport_stop_listen_sock(sk);
796 
797 		__sk_nulls_del_node_init_rcu(sk);
798 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
799 		spin_unlock(&ilb2->lock);
800 	} else {
801 		spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
802 
803 		spin_lock_bh(lock);
804 		if (sk_unhashed(sk)) {
805 			spin_unlock_bh(lock);
806 			return;
807 		}
808 		__sk_nulls_del_node_init_rcu(sk);
809 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
810 		spin_unlock_bh(lock);
811 	}
812 }
813 EXPORT_SYMBOL_GPL(inet_unhash);
814 
815 static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
816 				    const struct net *net, unsigned short port,
817 				    int l3mdev, const struct sock *sk)
818 {
819 	if (!net_eq(ib2_net(tb), net) || tb->port != port ||
820 	    tb->l3mdev != l3mdev)
821 		return false;
822 
823 	return inet_bind2_bucket_addr_match(tb, sk);
824 }
825 
826 bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
827 				      unsigned short port, int l3mdev, const struct sock *sk)
828 {
829 	if (!net_eq(ib2_net(tb), net) || tb->port != port ||
830 	    tb->l3mdev != l3mdev)
831 		return false;
832 
833 #if IS_ENABLED(CONFIG_IPV6)
834 	if (tb->addr_type == IPV6_ADDR_ANY)
835 		return true;
836 
837 	if (tb->addr_type != IPV6_ADDR_MAPPED)
838 		return false;
839 
840 	if (sk->sk_family == AF_INET6 &&
841 	    !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
842 		return false;
843 #endif
844 	return tb->rcv_saddr == 0;
845 }
846 
847 /* The socket's bhash2 hashbucket spinlock must be held when this is called */
848 struct inet_bind2_bucket *
849 inet_bind2_bucket_find(const struct inet_bind_hashbucket *head, const struct net *net,
850 		       unsigned short port, int l3mdev, const struct sock *sk)
851 {
852 	struct inet_bind2_bucket *bhash2 = NULL;
853 
854 	inet_bind_bucket_for_each(bhash2, &head->chain)
855 		if (inet_bind2_bucket_match(bhash2, net, port, l3mdev, sk))
856 			break;
857 
858 	return bhash2;
859 }
860 
861 struct inet_bind_hashbucket *
862 inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port)
863 {
864 	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
865 	u32 hash;
866 
867 #if IS_ENABLED(CONFIG_IPV6)
868 	if (sk->sk_family == AF_INET6)
869 		hash = ipv6_portaddr_hash(net, &in6addr_any, port);
870 	else
871 #endif
872 		hash = ipv4_portaddr_hash(net, 0, port);
873 
874 	return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
875 }
876 
877 static void inet_update_saddr(struct sock *sk, void *saddr, int family)
878 {
879 	if (family == AF_INET) {
880 		inet_sk(sk)->inet_saddr = *(__be32 *)saddr;
881 		sk_rcv_saddr_set(sk, inet_sk(sk)->inet_saddr);
882 	}
883 #if IS_ENABLED(CONFIG_IPV6)
884 	else {
885 		sk->sk_v6_rcv_saddr = *(struct in6_addr *)saddr;
886 	}
887 #endif
888 }
889 
890 static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family, bool reset)
891 {
892 	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
893 	struct inet_bind_hashbucket *head, *head2;
894 	struct inet_bind2_bucket *tb2, *new_tb2;
895 	int l3mdev = inet_sk_bound_l3mdev(sk);
896 	int port = inet_sk(sk)->inet_num;
897 	struct net *net = sock_net(sk);
898 	int bhash;
899 
900 	if (!inet_csk(sk)->icsk_bind2_hash) {
901 		/* Not bind()ed before. */
902 		if (reset)
903 			inet_reset_saddr(sk);
904 		else
905 			inet_update_saddr(sk, saddr, family);
906 
907 		return 0;
908 	}
909 
910 	/* Allocate a bind2 bucket ahead of time to avoid permanently putting
911 	 * the bhash2 table in an inconsistent state if a new tb2 bucket
912 	 * allocation fails.
913 	 */
914 	new_tb2 = kmem_cache_alloc(hinfo->bind2_bucket_cachep, GFP_ATOMIC);
915 	if (!new_tb2) {
916 		if (reset) {
917 			/* The (INADDR_ANY, port) bucket might have already
918 			 * been freed, then we cannot fixup icsk_bind2_hash,
919 			 * so we give up and unlink sk from bhash/bhash2 not
920 			 * to leave inconsistency in bhash2.
921 			 */
922 			inet_put_port(sk);
923 			inet_reset_saddr(sk);
924 		}
925 
926 		return -ENOMEM;
927 	}
928 
929 	bhash = inet_bhashfn(net, port, hinfo->bhash_size);
930 	head = &hinfo->bhash[bhash];
931 	head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
932 
933 	/* If we change saddr locklessly, another thread
934 	 * iterating over bhash might see corrupted address.
935 	 */
936 	spin_lock_bh(&head->lock);
937 
938 	spin_lock(&head2->lock);
939 	__sk_del_bind_node(sk);
940 	inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, inet_csk(sk)->icsk_bind2_hash);
941 	spin_unlock(&head2->lock);
942 
943 	if (reset)
944 		inet_reset_saddr(sk);
945 	else
946 		inet_update_saddr(sk, saddr, family);
947 
948 	head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
949 
950 	spin_lock(&head2->lock);
951 	tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
952 	if (!tb2) {
953 		tb2 = new_tb2;
954 		inet_bind2_bucket_init(tb2, net, head2, inet_csk(sk)->icsk_bind_hash, sk);
955 	}
956 	inet_csk(sk)->icsk_bind2_hash = tb2;
957 	sk_add_bind_node(sk, &tb2->owners);
958 	spin_unlock(&head2->lock);
959 
960 	spin_unlock_bh(&head->lock);
961 
962 	if (tb2 != new_tb2)
963 		kmem_cache_free(hinfo->bind2_bucket_cachep, new_tb2);
964 
965 	return 0;
966 }
967 
968 int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family)
969 {
970 	return __inet_bhash2_update_saddr(sk, saddr, family, false);
971 }
972 EXPORT_SYMBOL_GPL(inet_bhash2_update_saddr);
973 
974 void inet_bhash2_reset_saddr(struct sock *sk)
975 {
976 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
977 		__inet_bhash2_update_saddr(sk, NULL, 0, true);
978 }
979 EXPORT_SYMBOL_GPL(inet_bhash2_reset_saddr);
980 
981 /* RFC 6056 3.3.4.  Algorithm 4: Double-Hash Port Selection Algorithm
982  * Note that we use 32bit integers (vs RFC 'short integers')
983  * because 2^16 is not a multiple of num_ephemeral and this
984  * property might be used by clever attacker.
985  *
986  * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
987  * attacks were since demonstrated, thus we use 65536 by default instead
988  * to really give more isolation and privacy, at the expense of 256kB
989  * of kernel memory.
990  */
991 #define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER)
992 static u32 *table_perturb;
993 
994 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
995 		struct sock *sk, u64 port_offset,
996 		int (*check_established)(struct inet_timewait_death_row *,
997 			struct sock *, __u16, struct inet_timewait_sock **))
998 {
999 	struct inet_hashinfo *hinfo = death_row->hashinfo;
1000 	struct inet_bind_hashbucket *head, *head2;
1001 	struct inet_timewait_sock *tw = NULL;
1002 	int port = inet_sk(sk)->inet_num;
1003 	struct net *net = sock_net(sk);
1004 	struct inet_bind2_bucket *tb2;
1005 	struct inet_bind_bucket *tb;
1006 	bool tb_created = false;
1007 	u32 remaining, offset;
1008 	int ret, i, low, high;
1009 	bool local_ports;
1010 	int step, l3mdev;
1011 	u32 index;
1012 
1013 	if (port) {
1014 		local_bh_disable();
1015 		ret = check_established(death_row, sk, port, NULL);
1016 		local_bh_enable();
1017 		return ret;
1018 	}
1019 
1020 	l3mdev = inet_sk_bound_l3mdev(sk);
1021 
1022 	local_ports = inet_sk_get_local_port_range(sk, &low, &high);
1023 	step = local_ports ? 1 : 2;
1024 
1025 	high++; /* [32768, 60999] -> [32768, 61000[ */
1026 	remaining = high - low;
1027 	if (!local_ports && remaining > 1)
1028 		remaining &= ~1U;
1029 
1030 	get_random_sleepable_once(table_perturb,
1031 				  INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
1032 	index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
1033 
1034 	offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
1035 	offset %= remaining;
1036 
1037 	/* In first pass we try ports of @low parity.
1038 	 * inet_csk_get_port() does the opposite choice.
1039 	 */
1040 	if (!local_ports)
1041 		offset &= ~1U;
1042 other_parity_scan:
1043 	port = low + offset;
1044 	for (i = 0; i < remaining; i += step, port += step) {
1045 		if (unlikely(port >= high))
1046 			port -= remaining;
1047 		if (inet_is_local_reserved_port(net, port))
1048 			continue;
1049 		head = &hinfo->bhash[inet_bhashfn(net, port,
1050 						  hinfo->bhash_size)];
1051 		spin_lock_bh(&head->lock);
1052 
1053 		/* Does not bother with rcv_saddr checks, because
1054 		 * the established check is already unique enough.
1055 		 */
1056 		inet_bind_bucket_for_each(tb, &head->chain) {
1057 			if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
1058 				if (tb->fastreuse >= 0 ||
1059 				    tb->fastreuseport >= 0)
1060 					goto next_port;
1061 				WARN_ON(hlist_empty(&tb->bhash2));
1062 				if (!check_established(death_row, sk,
1063 						       port, &tw))
1064 					goto ok;
1065 				goto next_port;
1066 			}
1067 		}
1068 
1069 		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
1070 					     net, head, port, l3mdev);
1071 		if (!tb) {
1072 			spin_unlock_bh(&head->lock);
1073 			return -ENOMEM;
1074 		}
1075 		tb_created = true;
1076 		tb->fastreuse = -1;
1077 		tb->fastreuseport = -1;
1078 		goto ok;
1079 next_port:
1080 		spin_unlock_bh(&head->lock);
1081 		cond_resched();
1082 	}
1083 
1084 	if (!local_ports) {
1085 		offset++;
1086 		if ((offset & 1) && remaining > 1)
1087 			goto other_parity_scan;
1088 	}
1089 	return -EADDRNOTAVAIL;
1090 
1091 ok:
1092 	/* Find the corresponding tb2 bucket since we need to
1093 	 * add the socket to the bhash2 table as well
1094 	 */
1095 	head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
1096 	spin_lock(&head2->lock);
1097 
1098 	tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
1099 	if (!tb2) {
1100 		tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, net,
1101 					       head2, tb, sk);
1102 		if (!tb2)
1103 			goto error;
1104 	}
1105 
1106 	/* Here we want to add a little bit of randomness to the next source
1107 	 * port that will be chosen. We use a max() with a random here so that
1108 	 * on low contention the randomness is maximal and on high contention
1109 	 * it may be inexistent.
1110 	 */
1111 	i = max_t(int, i, get_random_u32_below(8) * step);
1112 	WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + step);
1113 
1114 	/* Head lock still held and bh's disabled */
1115 	inet_bind_hash(sk, tb, tb2, port);
1116 
1117 	if (sk_unhashed(sk)) {
1118 		inet_sk(sk)->inet_sport = htons(port);
1119 		inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
1120 	}
1121 	if (tw)
1122 		inet_twsk_bind_unhash(tw, hinfo);
1123 
1124 	spin_unlock(&head2->lock);
1125 	spin_unlock(&head->lock);
1126 
1127 	if (tw)
1128 		inet_twsk_deschedule_put(tw);
1129 	local_bh_enable();
1130 	return 0;
1131 
1132 error:
1133 	spin_unlock(&head2->lock);
1134 	if (tb_created)
1135 		inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
1136 	spin_unlock_bh(&head->lock);
1137 	return -ENOMEM;
1138 }
1139 
1140 /*
1141  * Bind a port for a connect operation and hash it.
1142  */
1143 int inet_hash_connect(struct inet_timewait_death_row *death_row,
1144 		      struct sock *sk)
1145 {
1146 	u64 port_offset = 0;
1147 
1148 	if (!inet_sk(sk)->inet_num)
1149 		port_offset = inet_sk_port_offset(sk);
1150 	return __inet_hash_connect(death_row, sk, port_offset,
1151 				   __inet_check_established);
1152 }
1153 EXPORT_SYMBOL_GPL(inet_hash_connect);
1154 
1155 static void init_hashinfo_lhash2(struct inet_hashinfo *h)
1156 {
1157 	int i;
1158 
1159 	for (i = 0; i <= h->lhash2_mask; i++) {
1160 		spin_lock_init(&h->lhash2[i].lock);
1161 		INIT_HLIST_NULLS_HEAD(&h->lhash2[i].nulls_head,
1162 				      i + LISTENING_NULLS_BASE);
1163 	}
1164 }
1165 
1166 void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
1167 				unsigned long numentries, int scale,
1168 				unsigned long low_limit,
1169 				unsigned long high_limit)
1170 {
1171 	h->lhash2 = alloc_large_system_hash(name,
1172 					    sizeof(*h->lhash2),
1173 					    numentries,
1174 					    scale,
1175 					    0,
1176 					    NULL,
1177 					    &h->lhash2_mask,
1178 					    low_limit,
1179 					    high_limit);
1180 	init_hashinfo_lhash2(h);
1181 
1182 	/* this one is used for source ports of outgoing connections */
1183 	table_perturb = alloc_large_system_hash("Table-perturb",
1184 						sizeof(*table_perturb),
1185 						INET_TABLE_PERTURB_SIZE,
1186 						0, 0, NULL, NULL,
1187 						INET_TABLE_PERTURB_SIZE,
1188 						INET_TABLE_PERTURB_SIZE);
1189 }
1190 
1191 int inet_hashinfo2_init_mod(struct inet_hashinfo *h)
1192 {
1193 	h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, sizeof(*h->lhash2), GFP_KERNEL);
1194 	if (!h->lhash2)
1195 		return -ENOMEM;
1196 
1197 	h->lhash2_mask = INET_LHTABLE_SIZE - 1;
1198 	/* INET_LHTABLE_SIZE must be a power of 2 */
1199 	BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask);
1200 
1201 	init_hashinfo_lhash2(h);
1202 	return 0;
1203 }
1204 EXPORT_SYMBOL_GPL(inet_hashinfo2_init_mod);
1205 
1206 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
1207 {
1208 	unsigned int locksz = sizeof(spinlock_t);
1209 	unsigned int i, nblocks = 1;
1210 
1211 	if (locksz != 0) {
1212 		/* allocate 2 cache lines or at least one spinlock per cpu */
1213 		nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
1214 		nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
1215 
1216 		/* no more locks than number of hash buckets */
1217 		nblocks = min(nblocks, hashinfo->ehash_mask + 1);
1218 
1219 		hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
1220 		if (!hashinfo->ehash_locks)
1221 			return -ENOMEM;
1222 
1223 		for (i = 0; i < nblocks; i++)
1224 			spin_lock_init(&hashinfo->ehash_locks[i]);
1225 	}
1226 	hashinfo->ehash_locks_mask = nblocks - 1;
1227 	return 0;
1228 }
1229 EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);
1230 
1231 struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo,
1232 						 unsigned int ehash_entries)
1233 {
1234 	struct inet_hashinfo *new_hashinfo;
1235 	int i;
1236 
1237 	new_hashinfo = kmemdup(hashinfo, sizeof(*hashinfo), GFP_KERNEL);
1238 	if (!new_hashinfo)
1239 		goto err;
1240 
1241 	new_hashinfo->ehash = vmalloc_huge(ehash_entries * sizeof(struct inet_ehash_bucket),
1242 					   GFP_KERNEL_ACCOUNT);
1243 	if (!new_hashinfo->ehash)
1244 		goto free_hashinfo;
1245 
1246 	new_hashinfo->ehash_mask = ehash_entries - 1;
1247 
1248 	if (inet_ehash_locks_alloc(new_hashinfo))
1249 		goto free_ehash;
1250 
1251 	for (i = 0; i < ehash_entries; i++)
1252 		INIT_HLIST_NULLS_HEAD(&new_hashinfo->ehash[i].chain, i);
1253 
1254 	new_hashinfo->pernet = true;
1255 
1256 	return new_hashinfo;
1257 
1258 free_ehash:
1259 	vfree(new_hashinfo->ehash);
1260 free_hashinfo:
1261 	kfree(new_hashinfo);
1262 err:
1263 	return NULL;
1264 }
1265 EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_alloc);
1266 
1267 void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo)
1268 {
1269 	if (!hashinfo->pernet)
1270 		return;
1271 
1272 	inet_ehash_locks_free(hashinfo);
1273 	vfree(hashinfo->ehash);
1274 	kfree(hashinfo);
1275 }
1276 EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_free);
1277