xref: /linux/net/ipv4/inet_connection_sock.c (revision 79ac11393328fb1717d17c12e3c0eef0e9fa0647)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Support for INET connection oriented protocols.
8  *
9  * Authors:	See the TCP sources
10  */
11 
12 #include <linux/module.h>
13 #include <linux/jhash.h>
14 
15 #include <net/inet_connection_sock.h>
16 #include <net/inet_hashtables.h>
17 #include <net/inet_timewait_sock.h>
18 #include <net/ip.h>
19 #include <net/route.h>
20 #include <net/tcp_states.h>
21 #include <net/xfrm.h>
22 #include <net/tcp.h>
23 #include <net/sock_reuseport.h>
24 #include <net/addrconf.h>
25 
26 #if IS_ENABLED(CONFIG_IPV6)
27 /* match_sk*_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses
28  *				if IPv6 only, and any IPv4 addresses
29  *				if not IPv6 only
30  * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
31  *				IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
32  *				and 0.0.0.0 equals to 0.0.0.0 only
33  */
34 static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
35 				 const struct in6_addr *sk2_rcv_saddr6,
36 				 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
37 				 bool sk1_ipv6only, bool sk2_ipv6only,
38 				 bool match_sk1_wildcard,
39 				 bool match_sk2_wildcard)
40 {
41 	int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
42 	int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
43 
44 	/* if both are mapped, treat as IPv4 */
45 	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
46 		if (!sk2_ipv6only) {
47 			if (sk1_rcv_saddr == sk2_rcv_saddr)
48 				return true;
49 			return (match_sk1_wildcard && !sk1_rcv_saddr) ||
50 				(match_sk2_wildcard && !sk2_rcv_saddr);
51 		}
52 		return false;
53 	}
54 
55 	if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
56 		return true;
57 
58 	if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
59 	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
60 		return true;
61 
62 	if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
63 	    !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
64 		return true;
65 
66 	if (sk2_rcv_saddr6 &&
67 	    ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
68 		return true;
69 
70 	return false;
71 }
72 #endif
73 
74 /* match_sk*_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
75  * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
76  *				0.0.0.0 only equals to 0.0.0.0
77  */
78 static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
79 				 bool sk2_ipv6only, bool match_sk1_wildcard,
80 				 bool match_sk2_wildcard)
81 {
82 	if (!sk2_ipv6only) {
83 		if (sk1_rcv_saddr == sk2_rcv_saddr)
84 			return true;
85 		return (match_sk1_wildcard && !sk1_rcv_saddr) ||
86 			(match_sk2_wildcard && !sk2_rcv_saddr);
87 	}
88 	return false;
89 }
90 
91 bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
92 			  bool match_wildcard)
93 {
94 #if IS_ENABLED(CONFIG_IPV6)
95 	if (sk->sk_family == AF_INET6)
96 		return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
97 					    inet6_rcv_saddr(sk2),
98 					    sk->sk_rcv_saddr,
99 					    sk2->sk_rcv_saddr,
100 					    ipv6_only_sock(sk),
101 					    ipv6_only_sock(sk2),
102 					    match_wildcard,
103 					    match_wildcard);
104 #endif
105 	return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
106 				    ipv6_only_sock(sk2), match_wildcard,
107 				    match_wildcard);
108 }
109 EXPORT_SYMBOL(inet_rcv_saddr_equal);
110 
111 bool inet_rcv_saddr_any(const struct sock *sk)
112 {
113 #if IS_ENABLED(CONFIG_IPV6)
114 	if (sk->sk_family == AF_INET6)
115 		return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
116 #endif
117 	return !sk->sk_rcv_saddr;
118 }
119 
120 void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high)
121 {
122 	const struct inet_sock *inet = inet_sk(sk);
123 	const struct net *net = sock_net(sk);
124 	int lo, hi, sk_lo, sk_hi;
125 	u32 sk_range;
126 
127 	inet_get_local_port_range(net, &lo, &hi);
128 
129 	sk_range = READ_ONCE(inet->local_port_range);
130 	if (unlikely(sk_range)) {
131 		sk_lo = sk_range & 0xffff;
132 		sk_hi = sk_range >> 16;
133 
134 		if (lo <= sk_lo && sk_lo <= hi)
135 			lo = sk_lo;
136 		if (lo <= sk_hi && sk_hi <= hi)
137 			hi = sk_hi;
138 	}
139 
140 	*low = lo;
141 	*high = hi;
142 }
143 EXPORT_SYMBOL(inet_sk_get_local_port_range);
144 
145 static bool inet_use_bhash2_on_bind(const struct sock *sk)
146 {
147 #if IS_ENABLED(CONFIG_IPV6)
148 	if (sk->sk_family == AF_INET6) {
149 		int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
150 
151 		return addr_type != IPV6_ADDR_ANY &&
152 			addr_type != IPV6_ADDR_MAPPED;
153 	}
154 #endif
155 	return sk->sk_rcv_saddr != htonl(INADDR_ANY);
156 }
157 
158 static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
159 			       kuid_t sk_uid, bool relax,
160 			       bool reuseport_cb_ok, bool reuseport_ok)
161 {
162 	int bound_dev_if2;
163 
164 	if (sk == sk2)
165 		return false;
166 
167 	bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if);
168 
169 	if (!sk->sk_bound_dev_if || !bound_dev_if2 ||
170 	    sk->sk_bound_dev_if == bound_dev_if2) {
171 		if (sk->sk_reuse && sk2->sk_reuse &&
172 		    sk2->sk_state != TCP_LISTEN) {
173 			if (!relax || (!reuseport_ok && sk->sk_reuseport &&
174 				       sk2->sk_reuseport && reuseport_cb_ok &&
175 				       (sk2->sk_state == TCP_TIME_WAIT ||
176 					uid_eq(sk_uid, sock_i_uid(sk2)))))
177 				return true;
178 		} else if (!reuseport_ok || !sk->sk_reuseport ||
179 			   !sk2->sk_reuseport || !reuseport_cb_ok ||
180 			   (sk2->sk_state != TCP_TIME_WAIT &&
181 			    !uid_eq(sk_uid, sock_i_uid(sk2)))) {
182 			return true;
183 		}
184 	}
185 	return false;
186 }
187 
188 static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
189 				   kuid_t sk_uid, bool relax,
190 				   bool reuseport_cb_ok, bool reuseport_ok)
191 {
192 	if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
193 		return false;
194 
195 	return inet_bind_conflict(sk, sk2, sk_uid, relax,
196 				  reuseport_cb_ok, reuseport_ok);
197 }
198 
199 static bool inet_bhash2_conflict(const struct sock *sk,
200 				 const struct inet_bind2_bucket *tb2,
201 				 kuid_t sk_uid,
202 				 bool relax, bool reuseport_cb_ok,
203 				 bool reuseport_ok)
204 {
205 	struct inet_timewait_sock *tw2;
206 	struct sock *sk2;
207 
208 	sk_for_each_bound_bhash2(sk2, &tb2->owners) {
209 		if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
210 					   reuseport_cb_ok, reuseport_ok))
211 			return true;
212 	}
213 
214 	twsk_for_each_bound_bhash2(tw2, &tb2->deathrow) {
215 		sk2 = (struct sock *)tw2;
216 
217 		if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
218 					   reuseport_cb_ok, reuseport_ok))
219 			return true;
220 	}
221 
222 	return false;
223 }
224 
225 /* This should be called only when the tb and tb2 hashbuckets' locks are held */
226 static int inet_csk_bind_conflict(const struct sock *sk,
227 				  const struct inet_bind_bucket *tb,
228 				  const struct inet_bind2_bucket *tb2, /* may be null */
229 				  bool relax, bool reuseport_ok)
230 {
231 	bool reuseport_cb_ok;
232 	struct sock_reuseport *reuseport_cb;
233 	kuid_t uid = sock_i_uid((struct sock *)sk);
234 
235 	rcu_read_lock();
236 	reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
237 	/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
238 	reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
239 	rcu_read_unlock();
240 
241 	/*
242 	 * Unlike other sk lookup places we do not check
243 	 * for sk_net here, since _all_ the socks listed
244 	 * in tb->owners and tb2->owners list belong
245 	 * to the same net - the one this bucket belongs to.
246 	 */
247 
248 	if (!inet_use_bhash2_on_bind(sk)) {
249 		struct sock *sk2;
250 
251 		sk_for_each_bound(sk2, &tb->owners)
252 			if (inet_bind_conflict(sk, sk2, uid, relax,
253 					       reuseport_cb_ok, reuseport_ok) &&
254 			    inet_rcv_saddr_equal(sk, sk2, true))
255 				return true;
256 
257 		return false;
258 	}
259 
260 	/* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if
261 	 * ipv4) should have been checked already. We need to do these two
262 	 * checks separately because their spinlocks have to be acquired/released
263 	 * independently of each other, to prevent possible deadlocks
264 	 */
265 	return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
266 					   reuseport_ok);
267 }
268 
269 /* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or
270  * INADDR_ANY (if ipv4) socket.
271  *
272  * Caller must hold bhash hashbucket lock with local bh disabled, to protect
273  * against concurrent binds on the port for addr any
274  */
275 static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
276 					  bool relax, bool reuseport_ok)
277 {
278 	kuid_t uid = sock_i_uid((struct sock *)sk);
279 	const struct net *net = sock_net(sk);
280 	struct sock_reuseport *reuseport_cb;
281 	struct inet_bind_hashbucket *head2;
282 	struct inet_bind2_bucket *tb2;
283 	bool reuseport_cb_ok;
284 
285 	rcu_read_lock();
286 	reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
287 	/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
288 	reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
289 	rcu_read_unlock();
290 
291 	head2 = inet_bhash2_addr_any_hashbucket(sk, net, port);
292 
293 	spin_lock(&head2->lock);
294 
295 	inet_bind_bucket_for_each(tb2, &head2->chain)
296 		if (inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
297 			break;
298 
299 	if (tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
300 					reuseport_ok)) {
301 		spin_unlock(&head2->lock);
302 		return true;
303 	}
304 
305 	spin_unlock(&head2->lock);
306 	return false;
307 }
308 
309 /*
310  * Find an open port number for the socket.  Returns with the
311  * inet_bind_hashbucket locks held if successful.
312  */
313 static struct inet_bind_hashbucket *
314 inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret,
315 			struct inet_bind2_bucket **tb2_ret,
316 			struct inet_bind_hashbucket **head2_ret, int *port_ret)
317 {
318 	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
319 	int i, low, high, attempt_half, port, l3mdev;
320 	struct inet_bind_hashbucket *head, *head2;
321 	struct net *net = sock_net(sk);
322 	struct inet_bind2_bucket *tb2;
323 	struct inet_bind_bucket *tb;
324 	u32 remaining, offset;
325 	bool relax = false;
326 
327 	l3mdev = inet_sk_bound_l3mdev(sk);
328 ports_exhausted:
329 	attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
330 other_half_scan:
331 	inet_sk_get_local_port_range(sk, &low, &high);
332 	high++; /* [32768, 60999] -> [32768, 61000[ */
333 	if (high - low < 4)
334 		attempt_half = 0;
335 	if (attempt_half) {
336 		int half = low + (((high - low) >> 2) << 1);
337 
338 		if (attempt_half == 1)
339 			high = half;
340 		else
341 			low = half;
342 	}
343 	remaining = high - low;
344 	if (likely(remaining > 1))
345 		remaining &= ~1U;
346 
347 	offset = get_random_u32_below(remaining);
348 	/* __inet_hash_connect() favors ports having @low parity
349 	 * We do the opposite to not pollute connect() users.
350 	 */
351 	offset |= 1U;
352 
353 other_parity_scan:
354 	port = low + offset;
355 	for (i = 0; i < remaining; i += 2, port += 2) {
356 		if (unlikely(port >= high))
357 			port -= remaining;
358 		if (inet_is_local_reserved_port(net, port))
359 			continue;
360 		head = &hinfo->bhash[inet_bhashfn(net, port,
361 						  hinfo->bhash_size)];
362 		spin_lock_bh(&head->lock);
363 		if (inet_use_bhash2_on_bind(sk)) {
364 			if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false))
365 				goto next_port;
366 		}
367 
368 		head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
369 		spin_lock(&head2->lock);
370 		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
371 		inet_bind_bucket_for_each(tb, &head->chain)
372 			if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
373 				if (!inet_csk_bind_conflict(sk, tb, tb2,
374 							    relax, false))
375 					goto success;
376 				spin_unlock(&head2->lock);
377 				goto next_port;
378 			}
379 		tb = NULL;
380 		goto success;
381 next_port:
382 		spin_unlock_bh(&head->lock);
383 		cond_resched();
384 	}
385 
386 	offset--;
387 	if (!(offset & 1))
388 		goto other_parity_scan;
389 
390 	if (attempt_half == 1) {
391 		/* OK we now try the upper half of the range */
392 		attempt_half = 2;
393 		goto other_half_scan;
394 	}
395 
396 	if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) {
397 		/* We still have a chance to connect to different destinations */
398 		relax = true;
399 		goto ports_exhausted;
400 	}
401 	return NULL;
402 success:
403 	*port_ret = port;
404 	*tb_ret = tb;
405 	*tb2_ret = tb2;
406 	*head2_ret = head2;
407 	return head;
408 }
409 
410 static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
411 				     struct sock *sk)
412 {
413 	kuid_t uid = sock_i_uid(sk);
414 
415 	if (tb->fastreuseport <= 0)
416 		return 0;
417 	if (!sk->sk_reuseport)
418 		return 0;
419 	if (rcu_access_pointer(sk->sk_reuseport_cb))
420 		return 0;
421 	if (!uid_eq(tb->fastuid, uid))
422 		return 0;
423 	/* We only need to check the rcv_saddr if this tb was once marked
424 	 * without fastreuseport and then was reset, as we can only know that
425 	 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
426 	 * owners list.
427 	 */
428 	if (tb->fastreuseport == FASTREUSEPORT_ANY)
429 		return 1;
430 #if IS_ENABLED(CONFIG_IPV6)
431 	if (tb->fast_sk_family == AF_INET6)
432 		return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
433 					    inet6_rcv_saddr(sk),
434 					    tb->fast_rcv_saddr,
435 					    sk->sk_rcv_saddr,
436 					    tb->fast_ipv6_only,
437 					    ipv6_only_sock(sk), true, false);
438 #endif
439 	return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
440 				    ipv6_only_sock(sk), true, false);
441 }
442 
443 void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
444 			       struct sock *sk)
445 {
446 	kuid_t uid = sock_i_uid(sk);
447 	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
448 
449 	if (hlist_empty(&tb->owners)) {
450 		tb->fastreuse = reuse;
451 		if (sk->sk_reuseport) {
452 			tb->fastreuseport = FASTREUSEPORT_ANY;
453 			tb->fastuid = uid;
454 			tb->fast_rcv_saddr = sk->sk_rcv_saddr;
455 			tb->fast_ipv6_only = ipv6_only_sock(sk);
456 			tb->fast_sk_family = sk->sk_family;
457 #if IS_ENABLED(CONFIG_IPV6)
458 			tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
459 #endif
460 		} else {
461 			tb->fastreuseport = 0;
462 		}
463 	} else {
464 		if (!reuse)
465 			tb->fastreuse = 0;
466 		if (sk->sk_reuseport) {
467 			/* We didn't match or we don't have fastreuseport set on
468 			 * the tb, but we have sk_reuseport set on this socket
469 			 * and we know that there are no bind conflicts with
470 			 * this socket in this tb, so reset our tb's reuseport
471 			 * settings so that any subsequent sockets that match
472 			 * our current socket will be put on the fast path.
473 			 *
474 			 * If we reset we need to set FASTREUSEPORT_STRICT so we
475 			 * do extra checking for all subsequent sk_reuseport
476 			 * socks.
477 			 */
478 			if (!sk_reuseport_match(tb, sk)) {
479 				tb->fastreuseport = FASTREUSEPORT_STRICT;
480 				tb->fastuid = uid;
481 				tb->fast_rcv_saddr = sk->sk_rcv_saddr;
482 				tb->fast_ipv6_only = ipv6_only_sock(sk);
483 				tb->fast_sk_family = sk->sk_family;
484 #if IS_ENABLED(CONFIG_IPV6)
485 				tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
486 #endif
487 			}
488 		} else {
489 			tb->fastreuseport = 0;
490 		}
491 	}
492 }
493 
494 /* Obtain a reference to a local port for the given sock,
495  * if snum is zero it means select any available local port.
496  * We try to allocate an odd port (and leave even ports for connect())
497  */
498 int inet_csk_get_port(struct sock *sk, unsigned short snum)
499 {
500 	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
501 	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
502 	bool found_port = false, check_bind_conflict = true;
503 	bool bhash_created = false, bhash2_created = false;
504 	int ret = -EADDRINUSE, port = snum, l3mdev;
505 	struct inet_bind_hashbucket *head, *head2;
506 	struct inet_bind2_bucket *tb2 = NULL;
507 	struct inet_bind_bucket *tb = NULL;
508 	bool head2_lock_acquired = false;
509 	struct net *net = sock_net(sk);
510 
511 	l3mdev = inet_sk_bound_l3mdev(sk);
512 
513 	if (!port) {
514 		head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port);
515 		if (!head)
516 			return ret;
517 
518 		head2_lock_acquired = true;
519 
520 		if (tb && tb2)
521 			goto success;
522 		found_port = true;
523 	} else {
524 		head = &hinfo->bhash[inet_bhashfn(net, port,
525 						  hinfo->bhash_size)];
526 		spin_lock_bh(&head->lock);
527 		inet_bind_bucket_for_each(tb, &head->chain)
528 			if (inet_bind_bucket_match(tb, net, port, l3mdev))
529 				break;
530 	}
531 
532 	if (!tb) {
533 		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net,
534 					     head, port, l3mdev);
535 		if (!tb)
536 			goto fail_unlock;
537 		bhash_created = true;
538 	}
539 
540 	if (!found_port) {
541 		if (!hlist_empty(&tb->owners)) {
542 			if (sk->sk_reuse == SK_FORCE_REUSE ||
543 			    (tb->fastreuse > 0 && reuse) ||
544 			    sk_reuseport_match(tb, sk))
545 				check_bind_conflict = false;
546 		}
547 
548 		if (check_bind_conflict && inet_use_bhash2_on_bind(sk)) {
549 			if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true))
550 				goto fail_unlock;
551 		}
552 
553 		head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
554 		spin_lock(&head2->lock);
555 		head2_lock_acquired = true;
556 		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
557 	}
558 
559 	if (!tb2) {
560 		tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep,
561 					       net, head2, port, l3mdev, sk);
562 		if (!tb2)
563 			goto fail_unlock;
564 		bhash2_created = true;
565 	}
566 
567 	if (!found_port && check_bind_conflict) {
568 		if (inet_csk_bind_conflict(sk, tb, tb2, true, true))
569 			goto fail_unlock;
570 	}
571 
572 success:
573 	inet_csk_update_fastreuse(tb, sk);
574 
575 	if (!inet_csk(sk)->icsk_bind_hash)
576 		inet_bind_hash(sk, tb, tb2, port);
577 	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
578 	WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2);
579 	ret = 0;
580 
581 fail_unlock:
582 	if (ret) {
583 		if (bhash_created)
584 			inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
585 		if (bhash2_created)
586 			inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep,
587 						  tb2);
588 	}
589 	if (head2_lock_acquired)
590 		spin_unlock(&head2->lock);
591 	spin_unlock_bh(&head->lock);
592 	return ret;
593 }
594 EXPORT_SYMBOL_GPL(inet_csk_get_port);
595 
596 /*
597  * Wait for an incoming connection, avoid race conditions. This must be called
598  * with the socket locked.
599  */
600 static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
601 {
602 	struct inet_connection_sock *icsk = inet_csk(sk);
603 	DEFINE_WAIT(wait);
604 	int err;
605 
606 	/*
607 	 * True wake-one mechanism for incoming connections: only
608 	 * one process gets woken up, not the 'whole herd'.
609 	 * Since we do not 'race & poll' for established sockets
610 	 * anymore, the common case will execute the loop only once.
611 	 *
612 	 * Subtle issue: "add_wait_queue_exclusive()" will be added
613 	 * after any current non-exclusive waiters, and we know that
614 	 * it will always _stay_ after any new non-exclusive waiters
615 	 * because all non-exclusive waiters are added at the
616 	 * beginning of the wait-queue. As such, it's ok to "drop"
617 	 * our exclusiveness temporarily when we get woken up without
618 	 * having to remove and re-insert us on the wait queue.
619 	 */
620 	for (;;) {
621 		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
622 					  TASK_INTERRUPTIBLE);
623 		release_sock(sk);
624 		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
625 			timeo = schedule_timeout(timeo);
626 		sched_annotate_sleep();
627 		lock_sock(sk);
628 		err = 0;
629 		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
630 			break;
631 		err = -EINVAL;
632 		if (sk->sk_state != TCP_LISTEN)
633 			break;
634 		err = sock_intr_errno(timeo);
635 		if (signal_pending(current))
636 			break;
637 		err = -EAGAIN;
638 		if (!timeo)
639 			break;
640 	}
641 	finish_wait(sk_sleep(sk), &wait);
642 	return err;
643 }
644 
645 /*
646  * This will accept the next outstanding connection.
647  */
648 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
649 {
650 	struct inet_connection_sock *icsk = inet_csk(sk);
651 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
652 	struct request_sock *req;
653 	struct sock *newsk;
654 	int error;
655 
656 	lock_sock(sk);
657 
658 	/* We need to make sure that this socket is listening,
659 	 * and that it has something pending.
660 	 */
661 	error = -EINVAL;
662 	if (sk->sk_state != TCP_LISTEN)
663 		goto out_err;
664 
665 	/* Find already established connection */
666 	if (reqsk_queue_empty(queue)) {
667 		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
668 
669 		/* If this is a non blocking socket don't sleep */
670 		error = -EAGAIN;
671 		if (!timeo)
672 			goto out_err;
673 
674 		error = inet_csk_wait_for_connect(sk, timeo);
675 		if (error)
676 			goto out_err;
677 	}
678 	req = reqsk_queue_remove(queue, sk);
679 	newsk = req->sk;
680 
681 	if (sk->sk_protocol == IPPROTO_TCP &&
682 	    tcp_rsk(req)->tfo_listener) {
683 		spin_lock_bh(&queue->fastopenq.lock);
684 		if (tcp_rsk(req)->tfo_listener) {
685 			/* We are still waiting for the final ACK from 3WHS
686 			 * so can't free req now. Instead, we set req->sk to
687 			 * NULL to signify that the child socket is taken
688 			 * so reqsk_fastopen_remove() will free the req
689 			 * when 3WHS finishes (or is aborted).
690 			 */
691 			req->sk = NULL;
692 			req = NULL;
693 		}
694 		spin_unlock_bh(&queue->fastopenq.lock);
695 	}
696 
697 out:
698 	release_sock(sk);
699 	if (newsk && mem_cgroup_sockets_enabled) {
700 		int amt = 0;
701 
702 		/* atomically get the memory usage, set and charge the
703 		 * newsk->sk_memcg.
704 		 */
705 		lock_sock(newsk);
706 
707 		mem_cgroup_sk_alloc(newsk);
708 		if (newsk->sk_memcg) {
709 			/* The socket has not been accepted yet, no need
710 			 * to look at newsk->sk_wmem_queued.
711 			 */
712 			amt = sk_mem_pages(newsk->sk_forward_alloc +
713 					   atomic_read(&newsk->sk_rmem_alloc));
714 		}
715 
716 		if (amt)
717 			mem_cgroup_charge_skmem(newsk->sk_memcg, amt,
718 						GFP_KERNEL | __GFP_NOFAIL);
719 
720 		release_sock(newsk);
721 	}
722 	if (req)
723 		reqsk_put(req);
724 	return newsk;
725 out_err:
726 	newsk = NULL;
727 	req = NULL;
728 	*err = error;
729 	goto out;
730 }
731 EXPORT_SYMBOL(inet_csk_accept);
732 
733 /*
734  * Using different timers for retransmit, delayed acks and probes
735  * We may wish use just one timer maintaining a list of expire jiffies
736  * to optimize.
737  */
738 void inet_csk_init_xmit_timers(struct sock *sk,
739 			       void (*retransmit_handler)(struct timer_list *t),
740 			       void (*delack_handler)(struct timer_list *t),
741 			       void (*keepalive_handler)(struct timer_list *t))
742 {
743 	struct inet_connection_sock *icsk = inet_csk(sk);
744 
745 	timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
746 	timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
747 	timer_setup(&sk->sk_timer, keepalive_handler, 0);
748 	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
749 }
750 EXPORT_SYMBOL(inet_csk_init_xmit_timers);
751 
752 void inet_csk_clear_xmit_timers(struct sock *sk)
753 {
754 	struct inet_connection_sock *icsk = inet_csk(sk);
755 
756 	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
757 
758 	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
759 	sk_stop_timer(sk, &icsk->icsk_delack_timer);
760 	sk_stop_timer(sk, &sk->sk_timer);
761 }
762 EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
763 
764 void inet_csk_delete_keepalive_timer(struct sock *sk)
765 {
766 	sk_stop_timer(sk, &sk->sk_timer);
767 }
768 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
769 
770 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
771 {
772 	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
773 }
774 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
775 
776 struct dst_entry *inet_csk_route_req(const struct sock *sk,
777 				     struct flowi4 *fl4,
778 				     const struct request_sock *req)
779 {
780 	const struct inet_request_sock *ireq = inet_rsk(req);
781 	struct net *net = read_pnet(&ireq->ireq_net);
782 	struct ip_options_rcu *opt;
783 	struct rtable *rt;
784 
785 	rcu_read_lock();
786 	opt = rcu_dereference(ireq->ireq_opt);
787 
788 	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
789 			   ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
790 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
791 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
792 			   ireq->ir_loc_addr, ireq->ir_rmt_port,
793 			   htons(ireq->ir_num), sk->sk_uid);
794 	security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
795 	rt = ip_route_output_flow(net, fl4, sk);
796 	if (IS_ERR(rt))
797 		goto no_route;
798 	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
799 		goto route_err;
800 	rcu_read_unlock();
801 	return &rt->dst;
802 
803 route_err:
804 	ip_rt_put(rt);
805 no_route:
806 	rcu_read_unlock();
807 	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
808 	return NULL;
809 }
810 EXPORT_SYMBOL_GPL(inet_csk_route_req);
811 
812 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
813 					    struct sock *newsk,
814 					    const struct request_sock *req)
815 {
816 	const struct inet_request_sock *ireq = inet_rsk(req);
817 	struct net *net = read_pnet(&ireq->ireq_net);
818 	struct inet_sock *newinet = inet_sk(newsk);
819 	struct ip_options_rcu *opt;
820 	struct flowi4 *fl4;
821 	struct rtable *rt;
822 
823 	opt = rcu_dereference(ireq->ireq_opt);
824 	fl4 = &newinet->cork.fl.u.ip4;
825 
826 	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
827 			   ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
828 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
829 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
830 			   ireq->ir_loc_addr, ireq->ir_rmt_port,
831 			   htons(ireq->ir_num), sk->sk_uid);
832 	security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
833 	rt = ip_route_output_flow(net, fl4, sk);
834 	if (IS_ERR(rt))
835 		goto no_route;
836 	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
837 		goto route_err;
838 	return &rt->dst;
839 
840 route_err:
841 	ip_rt_put(rt);
842 no_route:
843 	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
844 	return NULL;
845 }
846 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
847 
848 /* Decide when to expire the request and when to resend SYN-ACK */
849 static void syn_ack_recalc(struct request_sock *req,
850 			   const int max_syn_ack_retries,
851 			   const u8 rskq_defer_accept,
852 			   int *expire, int *resend)
853 {
854 	if (!rskq_defer_accept) {
855 		*expire = req->num_timeout >= max_syn_ack_retries;
856 		*resend = 1;
857 		return;
858 	}
859 	*expire = req->num_timeout >= max_syn_ack_retries &&
860 		  (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept);
861 	/* Do not resend while waiting for data after ACK,
862 	 * start to resend on end of deferring period to give
863 	 * last chance for data or ACK to create established socket.
864 	 */
865 	*resend = !inet_rsk(req)->acked ||
866 		  req->num_timeout >= rskq_defer_accept - 1;
867 }
868 
869 int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
870 {
871 	int err = req->rsk_ops->rtx_syn_ack(parent, req);
872 
873 	if (!err)
874 		req->num_retrans++;
875 	return err;
876 }
877 EXPORT_SYMBOL(inet_rtx_syn_ack);
878 
879 static struct request_sock *inet_reqsk_clone(struct request_sock *req,
880 					     struct sock *sk)
881 {
882 	struct sock *req_sk, *nreq_sk;
883 	struct request_sock *nreq;
884 
885 	nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN);
886 	if (!nreq) {
887 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
888 
889 		/* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */
890 		sock_put(sk);
891 		return NULL;
892 	}
893 
894 	req_sk = req_to_sk(req);
895 	nreq_sk = req_to_sk(nreq);
896 
897 	memcpy(nreq_sk, req_sk,
898 	       offsetof(struct sock, sk_dontcopy_begin));
899 	memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
900 	       req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end));
901 
902 	sk_node_init(&nreq_sk->sk_node);
903 	nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
904 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
905 	nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
906 #endif
907 	nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
908 
909 	nreq->rsk_listener = sk;
910 
911 	/* We need not acquire fastopenq->lock
912 	 * because the child socket is locked in inet_csk_listen_stop().
913 	 */
914 	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener)
915 		rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq);
916 
917 	return nreq;
918 }
919 
920 static void reqsk_queue_migrated(struct request_sock_queue *queue,
921 				 const struct request_sock *req)
922 {
923 	if (req->num_timeout == 0)
924 		atomic_inc(&queue->young);
925 	atomic_inc(&queue->qlen);
926 }
927 
928 static void reqsk_migrate_reset(struct request_sock *req)
929 {
930 	req->saved_syn = NULL;
931 #if IS_ENABLED(CONFIG_IPV6)
932 	inet_rsk(req)->ipv6_opt = NULL;
933 	inet_rsk(req)->pktopts = NULL;
934 #else
935 	inet_rsk(req)->ireq_opt = NULL;
936 #endif
937 }
938 
939 /* return true if req was found in the ehash table */
940 static bool reqsk_queue_unlink(struct request_sock *req)
941 {
942 	struct sock *sk = req_to_sk(req);
943 	bool found = false;
944 
945 	if (sk_hashed(sk)) {
946 		struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
947 		spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
948 
949 		spin_lock(lock);
950 		found = __sk_nulls_del_node_init_rcu(sk);
951 		spin_unlock(lock);
952 	}
953 	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
954 		reqsk_put(req);
955 	return found;
956 }
957 
958 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
959 {
960 	bool unlinked = reqsk_queue_unlink(req);
961 
962 	if (unlinked) {
963 		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
964 		reqsk_put(req);
965 	}
966 	return unlinked;
967 }
968 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
969 
970 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
971 {
972 	inet_csk_reqsk_queue_drop(sk, req);
973 	reqsk_put(req);
974 }
975 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
976 
977 static void reqsk_timer_handler(struct timer_list *t)
978 {
979 	struct request_sock *req = from_timer(req, t, rsk_timer);
980 	struct request_sock *nreq = NULL, *oreq = req;
981 	struct sock *sk_listener = req->rsk_listener;
982 	struct inet_connection_sock *icsk;
983 	struct request_sock_queue *queue;
984 	struct net *net;
985 	int max_syn_ack_retries, qlen, expire = 0, resend = 0;
986 
987 	if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {
988 		struct sock *nsk;
989 
990 		nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
991 		if (!nsk)
992 			goto drop;
993 
994 		nreq = inet_reqsk_clone(req, nsk);
995 		if (!nreq)
996 			goto drop;
997 
998 		/* The new timer for the cloned req can decrease the 2
999 		 * by calling inet_csk_reqsk_queue_drop_and_put(), so
1000 		 * hold another count to prevent use-after-free and
1001 		 * call reqsk_put() just before return.
1002 		 */
1003 		refcount_set(&nreq->rsk_refcnt, 2 + 1);
1004 		timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1005 		reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);
1006 
1007 		req = nreq;
1008 		sk_listener = nsk;
1009 	}
1010 
1011 	icsk = inet_csk(sk_listener);
1012 	net = sock_net(sk_listener);
1013 	max_syn_ack_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
1014 		READ_ONCE(net->ipv4.sysctl_tcp_synack_retries);
1015 	/* Normally all the openreqs are young and become mature
1016 	 * (i.e. converted to established socket) for first timeout.
1017 	 * If synack was not acknowledged for 1 second, it means
1018 	 * one of the following things: synack was lost, ack was lost,
1019 	 * rtt is high or nobody planned to ack (i.e. synflood).
1020 	 * When server is a bit loaded, queue is populated with old
1021 	 * open requests, reducing effective size of queue.
1022 	 * When server is well loaded, queue size reduces to zero
1023 	 * after several minutes of work. It is not synflood,
1024 	 * it is normal operation. The solution is pruning
1025 	 * too old entries overriding normal timeout, when
1026 	 * situation becomes dangerous.
1027 	 *
1028 	 * Essentially, we reserve half of room for young
1029 	 * embrions; and abort old ones without pity, if old
1030 	 * ones are about to clog our table.
1031 	 */
1032 	queue = &icsk->icsk_accept_queue;
1033 	qlen = reqsk_queue_len(queue);
1034 	if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
1035 		int young = reqsk_queue_len_young(queue) << 1;
1036 
1037 		while (max_syn_ack_retries > 2) {
1038 			if (qlen < young)
1039 				break;
1040 			max_syn_ack_retries--;
1041 			young <<= 1;
1042 		}
1043 	}
1044 	syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept),
1045 		       &expire, &resend);
1046 	req->rsk_ops->syn_ack_timeout(req);
1047 	if (!expire &&
1048 	    (!resend ||
1049 	     !inet_rtx_syn_ack(sk_listener, req) ||
1050 	     inet_rsk(req)->acked)) {
1051 		if (req->num_timeout++ == 0)
1052 			atomic_dec(&queue->young);
1053 		mod_timer(&req->rsk_timer, jiffies + reqsk_timeout(req, TCP_RTO_MAX));
1054 
1055 		if (!nreq)
1056 			return;
1057 
1058 		if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
1059 			/* delete timer */
1060 			inet_csk_reqsk_queue_drop(sk_listener, nreq);
1061 			goto no_ownership;
1062 		}
1063 
1064 		__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS);
1065 		reqsk_migrate_reset(oreq);
1066 		reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
1067 		reqsk_put(oreq);
1068 
1069 		reqsk_put(nreq);
1070 		return;
1071 	}
1072 
1073 	/* Even if we can clone the req, we may need not retransmit any more
1074 	 * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
1075 	 * CPU may win the "own_req" race so that inet_ehash_insert() fails.
1076 	 */
1077 	if (nreq) {
1078 		__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE);
1079 no_ownership:
1080 		reqsk_migrate_reset(nreq);
1081 		reqsk_queue_removed(queue, nreq);
1082 		__reqsk_free(nreq);
1083 	}
1084 
1085 drop:
1086 	inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
1087 }
1088 
1089 static void reqsk_queue_hash_req(struct request_sock *req,
1090 				 unsigned long timeout)
1091 {
1092 	timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1093 	mod_timer(&req->rsk_timer, jiffies + timeout);
1094 
1095 	inet_ehash_insert(req_to_sk(req), NULL, NULL);
1096 	/* before letting lookups find us, make sure all req fields
1097 	 * are committed to memory and refcnt initialized.
1098 	 */
1099 	smp_wmb();
1100 	refcount_set(&req->rsk_refcnt, 2 + 1);
1101 }
1102 
1103 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
1104 				   unsigned long timeout)
1105 {
1106 	reqsk_queue_hash_req(req, timeout);
1107 	inet_csk_reqsk_queue_added(sk);
1108 }
1109 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
1110 
1111 static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
1112 			   const gfp_t priority)
1113 {
1114 	struct inet_connection_sock *icsk = inet_csk(newsk);
1115 
1116 	if (!icsk->icsk_ulp_ops)
1117 		return;
1118 
1119 	icsk->icsk_ulp_ops->clone(req, newsk, priority);
1120 }
1121 
1122 /**
1123  *	inet_csk_clone_lock - clone an inet socket, and lock its clone
1124  *	@sk: the socket to clone
1125  *	@req: request_sock
1126  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1127  *
1128  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1129  */
1130 struct sock *inet_csk_clone_lock(const struct sock *sk,
1131 				 const struct request_sock *req,
1132 				 const gfp_t priority)
1133 {
1134 	struct sock *newsk = sk_clone_lock(sk, priority);
1135 
1136 	if (newsk) {
1137 		struct inet_connection_sock *newicsk = inet_csk(newsk);
1138 
1139 		inet_sk_set_state(newsk, TCP_SYN_RECV);
1140 		newicsk->icsk_bind_hash = NULL;
1141 		newicsk->icsk_bind2_hash = NULL;
1142 
1143 		inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
1144 		inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
1145 		inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
1146 
1147 		/* listeners have SOCK_RCU_FREE, not the children */
1148 		sock_reset_flag(newsk, SOCK_RCU_FREE);
1149 
1150 		inet_sk(newsk)->mc_list = NULL;
1151 
1152 		newsk->sk_mark = inet_rsk(req)->ir_mark;
1153 		atomic64_set(&newsk->sk_cookie,
1154 			     atomic64_read(&inet_rsk(req)->ir_cookie));
1155 
1156 		newicsk->icsk_retransmits = 0;
1157 		newicsk->icsk_backoff	  = 0;
1158 		newicsk->icsk_probes_out  = 0;
1159 		newicsk->icsk_probes_tstamp = 0;
1160 
1161 		/* Deinitialize accept_queue to trap illegal accesses. */
1162 		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
1163 
1164 		inet_clone_ulp(req, newsk, priority);
1165 
1166 		security_inet_csk_clone(newsk, req);
1167 	}
1168 	return newsk;
1169 }
1170 EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
1171 
1172 /*
1173  * At this point, there should be no process reference to this
1174  * socket, and thus no user references at all.  Therefore we
1175  * can assume the socket waitqueue is inactive and nobody will
1176  * try to jump onto it.
1177  */
1178 void inet_csk_destroy_sock(struct sock *sk)
1179 {
1180 	WARN_ON(sk->sk_state != TCP_CLOSE);
1181 	WARN_ON(!sock_flag(sk, SOCK_DEAD));
1182 
1183 	/* It cannot be in hash table! */
1184 	WARN_ON(!sk_unhashed(sk));
1185 
1186 	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
1187 	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
1188 
1189 	sk->sk_prot->destroy(sk);
1190 
1191 	sk_stream_kill_queues(sk);
1192 
1193 	xfrm_sk_free_policy(sk);
1194 
1195 	this_cpu_dec(*sk->sk_prot->orphan_count);
1196 
1197 	sock_put(sk);
1198 }
1199 EXPORT_SYMBOL(inet_csk_destroy_sock);
1200 
1201 /* This function allows to force a closure of a socket after the call to
1202  * tcp/dccp_create_openreq_child().
1203  */
1204 void inet_csk_prepare_forced_close(struct sock *sk)
1205 	__releases(&sk->sk_lock.slock)
1206 {
1207 	/* sk_clone_lock locked the socket and set refcnt to 2 */
1208 	bh_unlock_sock(sk);
1209 	sock_put(sk);
1210 	inet_csk_prepare_for_destroy_sock(sk);
1211 	inet_sk(sk)->inet_num = 0;
1212 }
1213 EXPORT_SYMBOL(inet_csk_prepare_forced_close);
1214 
1215 static int inet_ulp_can_listen(const struct sock *sk)
1216 {
1217 	const struct inet_connection_sock *icsk = inet_csk(sk);
1218 
1219 	if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone)
1220 		return -EINVAL;
1221 
1222 	return 0;
1223 }
1224 
1225 int inet_csk_listen_start(struct sock *sk)
1226 {
1227 	struct inet_connection_sock *icsk = inet_csk(sk);
1228 	struct inet_sock *inet = inet_sk(sk);
1229 	int err;
1230 
1231 	err = inet_ulp_can_listen(sk);
1232 	if (unlikely(err))
1233 		return err;
1234 
1235 	reqsk_queue_alloc(&icsk->icsk_accept_queue);
1236 
1237 	sk->sk_ack_backlog = 0;
1238 	inet_csk_delack_init(sk);
1239 
1240 	/* There is race window here: we announce ourselves listening,
1241 	 * but this transition is still not validated by get_port().
1242 	 * It is OK, because this socket enters to hash table only
1243 	 * after validation is complete.
1244 	 */
1245 	inet_sk_state_store(sk, TCP_LISTEN);
1246 	err = sk->sk_prot->get_port(sk, inet->inet_num);
1247 	if (!err) {
1248 		inet->inet_sport = htons(inet->inet_num);
1249 
1250 		sk_dst_reset(sk);
1251 		err = sk->sk_prot->hash(sk);
1252 
1253 		if (likely(!err))
1254 			return 0;
1255 	}
1256 
1257 	inet_sk_set_state(sk, TCP_CLOSE);
1258 	return err;
1259 }
1260 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
1261 
1262 static void inet_child_forget(struct sock *sk, struct request_sock *req,
1263 			      struct sock *child)
1264 {
1265 	sk->sk_prot->disconnect(child, O_NONBLOCK);
1266 
1267 	sock_orphan(child);
1268 
1269 	this_cpu_inc(*sk->sk_prot->orphan_count);
1270 
1271 	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
1272 		BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
1273 		BUG_ON(sk != req->rsk_listener);
1274 
1275 		/* Paranoid, to prevent race condition if
1276 		 * an inbound pkt destined for child is
1277 		 * blocked by sock lock in tcp_v4_rcv().
1278 		 * Also to satisfy an assertion in
1279 		 * tcp_v4_destroy_sock().
1280 		 */
1281 		RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
1282 	}
1283 	inet_csk_destroy_sock(child);
1284 }
1285 
1286 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
1287 				      struct request_sock *req,
1288 				      struct sock *child)
1289 {
1290 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1291 
1292 	spin_lock(&queue->rskq_lock);
1293 	if (unlikely(sk->sk_state != TCP_LISTEN)) {
1294 		inet_child_forget(sk, req, child);
1295 		child = NULL;
1296 	} else {
1297 		req->sk = child;
1298 		req->dl_next = NULL;
1299 		if (queue->rskq_accept_head == NULL)
1300 			WRITE_ONCE(queue->rskq_accept_head, req);
1301 		else
1302 			queue->rskq_accept_tail->dl_next = req;
1303 		queue->rskq_accept_tail = req;
1304 		sk_acceptq_added(sk);
1305 	}
1306 	spin_unlock(&queue->rskq_lock);
1307 	return child;
1308 }
1309 EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
1310 
1311 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
1312 					 struct request_sock *req, bool own_req)
1313 {
1314 	if (own_req) {
1315 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
1316 		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
1317 
1318 		if (sk != req->rsk_listener) {
1319 			/* another listening sk has been selected,
1320 			 * migrate the req to it.
1321 			 */
1322 			struct request_sock *nreq;
1323 
1324 			/* hold a refcnt for the nreq->rsk_listener
1325 			 * which is assigned in inet_reqsk_clone()
1326 			 */
1327 			sock_hold(sk);
1328 			nreq = inet_reqsk_clone(req, sk);
1329 			if (!nreq) {
1330 				inet_child_forget(sk, req, child);
1331 				goto child_put;
1332 			}
1333 
1334 			refcount_set(&nreq->rsk_refcnt, 1);
1335 			if (inet_csk_reqsk_queue_add(sk, nreq, child)) {
1336 				__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS);
1337 				reqsk_migrate_reset(req);
1338 				reqsk_put(req);
1339 				return child;
1340 			}
1341 
1342 			__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
1343 			reqsk_migrate_reset(nreq);
1344 			__reqsk_free(nreq);
1345 		} else if (inet_csk_reqsk_queue_add(sk, req, child)) {
1346 			return child;
1347 		}
1348 	}
1349 	/* Too bad, another child took ownership of the request, undo. */
1350 child_put:
1351 	bh_unlock_sock(child);
1352 	sock_put(child);
1353 	return NULL;
1354 }
1355 EXPORT_SYMBOL(inet_csk_complete_hashdance);
1356 
1357 /*
1358  *	This routine closes sockets which have been at least partially
1359  *	opened, but not yet accepted.
1360  */
1361 void inet_csk_listen_stop(struct sock *sk)
1362 {
1363 	struct inet_connection_sock *icsk = inet_csk(sk);
1364 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
1365 	struct request_sock *next, *req;
1366 
1367 	/* Following specs, it would be better either to send FIN
1368 	 * (and enter FIN-WAIT-1, it is normal close)
1369 	 * or to send active reset (abort).
1370 	 * Certainly, it is pretty dangerous while synflood, but it is
1371 	 * bad justification for our negligence 8)
1372 	 * To be honest, we are not able to make either
1373 	 * of the variants now.			--ANK
1374 	 */
1375 	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
1376 		struct sock *child = req->sk, *nsk;
1377 		struct request_sock *nreq;
1378 
1379 		local_bh_disable();
1380 		bh_lock_sock(child);
1381 		WARN_ON(sock_owned_by_user(child));
1382 		sock_hold(child);
1383 
1384 		nsk = reuseport_migrate_sock(sk, child, NULL);
1385 		if (nsk) {
1386 			nreq = inet_reqsk_clone(req, nsk);
1387 			if (nreq) {
1388 				refcount_set(&nreq->rsk_refcnt, 1);
1389 
1390 				if (inet_csk_reqsk_queue_add(nsk, nreq, child)) {
1391 					__NET_INC_STATS(sock_net(nsk),
1392 							LINUX_MIB_TCPMIGRATEREQSUCCESS);
1393 					reqsk_migrate_reset(req);
1394 				} else {
1395 					__NET_INC_STATS(sock_net(nsk),
1396 							LINUX_MIB_TCPMIGRATEREQFAILURE);
1397 					reqsk_migrate_reset(nreq);
1398 					__reqsk_free(nreq);
1399 				}
1400 
1401 				/* inet_csk_reqsk_queue_add() has already
1402 				 * called inet_child_forget() on failure case.
1403 				 */
1404 				goto skip_child_forget;
1405 			}
1406 		}
1407 
1408 		inet_child_forget(sk, req, child);
1409 skip_child_forget:
1410 		reqsk_put(req);
1411 		bh_unlock_sock(child);
1412 		local_bh_enable();
1413 		sock_put(child);
1414 
1415 		cond_resched();
1416 	}
1417 	if (queue->fastopenq.rskq_rst_head) {
1418 		/* Free all the reqs queued in rskq_rst_head. */
1419 		spin_lock_bh(&queue->fastopenq.lock);
1420 		req = queue->fastopenq.rskq_rst_head;
1421 		queue->fastopenq.rskq_rst_head = NULL;
1422 		spin_unlock_bh(&queue->fastopenq.lock);
1423 		while (req != NULL) {
1424 			next = req->dl_next;
1425 			reqsk_put(req);
1426 			req = next;
1427 		}
1428 	}
1429 	WARN_ON_ONCE(sk->sk_ack_backlog);
1430 }
1431 EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
1432 
1433 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
1434 {
1435 	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
1436 	const struct inet_sock *inet = inet_sk(sk);
1437 
1438 	sin->sin_family		= AF_INET;
1439 	sin->sin_addr.s_addr	= inet->inet_daddr;
1440 	sin->sin_port		= inet->inet_dport;
1441 }
1442 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
1443 
1444 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
1445 {
1446 	const struct inet_sock *inet = inet_sk(sk);
1447 	const struct ip_options_rcu *inet_opt;
1448 	__be32 daddr = inet->inet_daddr;
1449 	struct flowi4 *fl4;
1450 	struct rtable *rt;
1451 
1452 	rcu_read_lock();
1453 	inet_opt = rcu_dereference(inet->inet_opt);
1454 	if (inet_opt && inet_opt->opt.srr)
1455 		daddr = inet_opt->opt.faddr;
1456 	fl4 = &fl->u.ip4;
1457 	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
1458 				   inet->inet_saddr, inet->inet_dport,
1459 				   inet->inet_sport, sk->sk_protocol,
1460 				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
1461 	if (IS_ERR(rt))
1462 		rt = NULL;
1463 	if (rt)
1464 		sk_setup_caps(sk, &rt->dst);
1465 	rcu_read_unlock();
1466 
1467 	return &rt->dst;
1468 }
1469 
1470 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
1471 {
1472 	struct dst_entry *dst = __sk_dst_check(sk, 0);
1473 	struct inet_sock *inet = inet_sk(sk);
1474 
1475 	if (!dst) {
1476 		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1477 		if (!dst)
1478 			goto out;
1479 	}
1480 	dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
1481 
1482 	dst = __sk_dst_check(sk, 0);
1483 	if (!dst)
1484 		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1485 out:
1486 	return dst;
1487 }
1488 EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
1489