xref: /linux/net/ipv4/inet_connection_sock.c (revision d0f4771e2befbe8de3a16a564c6bbd1d5502cec3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Support for INET connection oriented protocols.
8  *
9  * Authors:	See the TCP sources
10  */
11 
12 #include <linux/module.h>
13 #include <linux/jhash.h>
14 
15 #include <net/inet_connection_sock.h>
16 #include <net/inet_hashtables.h>
17 #include <net/inet_timewait_sock.h>
18 #include <net/ip.h>
19 #include <net/route.h>
20 #include <net/tcp_states.h>
21 #include <net/xfrm.h>
22 #include <net/tcp.h>
23 #include <net/tcp_ecn.h>
24 #include <net/sock_reuseport.h>
25 #include <net/addrconf.h>
26 
27 #if IS_ENABLED(CONFIG_IPV6)
28 /* match_sk*_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses
29  *				if IPv6 only, and any IPv4 addresses
30  *				if not IPv6 only
31  * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
32  *				IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
33  *				and 0.0.0.0 equals to 0.0.0.0 only
34  */
35 static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
36 				 const struct in6_addr *sk2_rcv_saddr6,
37 				 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
38 				 bool sk1_ipv6only, bool sk2_ipv6only,
39 				 bool match_sk1_wildcard,
40 				 bool match_sk2_wildcard)
41 {
42 	int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
43 	int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
44 
45 	/* if both are mapped, treat as IPv4 */
46 	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
47 		if (!sk2_ipv6only) {
48 			if (sk1_rcv_saddr == sk2_rcv_saddr)
49 				return true;
50 			return (match_sk1_wildcard && !sk1_rcv_saddr) ||
51 				(match_sk2_wildcard && !sk2_rcv_saddr);
52 		}
53 		return false;
54 	}
55 
56 	if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
57 		return true;
58 
59 	if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
60 	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
61 		return true;
62 
63 	if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
64 	    !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
65 		return true;
66 
67 	if (sk2_rcv_saddr6 &&
68 	    ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
69 		return true;
70 
71 	return false;
72 }
73 #endif
74 
75 /* match_sk*_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
76  * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
77  *				0.0.0.0 only equals to 0.0.0.0
78  */
79 static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
80 				 bool sk2_ipv6only, bool match_sk1_wildcard,
81 				 bool match_sk2_wildcard)
82 {
83 	if (!sk2_ipv6only) {
84 		if (sk1_rcv_saddr == sk2_rcv_saddr)
85 			return true;
86 		return (match_sk1_wildcard && !sk1_rcv_saddr) ||
87 			(match_sk2_wildcard && !sk2_rcv_saddr);
88 	}
89 	return false;
90 }
91 
92 bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
93 			  bool match_wildcard)
94 {
95 #if IS_ENABLED(CONFIG_IPV6)
96 	if (sk->sk_family == AF_INET6)
97 		return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
98 					    inet6_rcv_saddr(sk2),
99 					    sk->sk_rcv_saddr,
100 					    sk2->sk_rcv_saddr,
101 					    ipv6_only_sock(sk),
102 					    ipv6_only_sock(sk2),
103 					    match_wildcard,
104 					    match_wildcard);
105 #endif
106 	return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
107 				    ipv6_only_sock(sk2), match_wildcard,
108 				    match_wildcard);
109 }
110 EXPORT_SYMBOL(inet_rcv_saddr_equal);
111 
112 bool inet_rcv_saddr_any(const struct sock *sk)
113 {
114 #if IS_ENABLED(CONFIG_IPV6)
115 	if (sk->sk_family == AF_INET6)
116 		return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
117 #endif
118 	return !sk->sk_rcv_saddr;
119 }
120 
121 /**
122  *	inet_sk_get_local_port_range - fetch ephemeral ports range
123  *	@sk: socket
124  *	@low: pointer to low port
125  *	@high: pointer to high port
126  *
127  *	Fetch netns port range (/proc/sys/net/ipv4/ip_local_port_range)
128  *	Range can be overridden if socket got IP_LOCAL_PORT_RANGE option.
129  *	Returns true if IP_LOCAL_PORT_RANGE was set on this socket.
130  */
131 bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high)
132 {
133 	int lo, hi, sk_lo, sk_hi;
134 	bool local_range = false;
135 	u32 sk_range;
136 
137 	inet_get_local_port_range(sock_net(sk), &lo, &hi);
138 
139 	sk_range = READ_ONCE(inet_sk(sk)->local_port_range);
140 	if (unlikely(sk_range)) {
141 		sk_lo = sk_range & 0xffff;
142 		sk_hi = sk_range >> 16;
143 
144 		if (lo <= sk_lo && sk_lo <= hi)
145 			lo = sk_lo;
146 		if (lo <= sk_hi && sk_hi <= hi)
147 			hi = sk_hi;
148 		local_range = true;
149 	}
150 
151 	*low = lo;
152 	*high = hi;
153 	return local_range;
154 }
155 EXPORT_SYMBOL(inet_sk_get_local_port_range);
156 
157 static bool inet_use_bhash2_on_bind(const struct sock *sk)
158 {
159 #if IS_ENABLED(CONFIG_IPV6)
160 	if (sk->sk_family == AF_INET6) {
161 		if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
162 			return false;
163 
164 		if (!ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
165 			return true;
166 	}
167 #endif
168 	return sk->sk_rcv_saddr != htonl(INADDR_ANY);
169 }
170 
171 static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
172 			       kuid_t uid, bool relax,
173 			       bool reuseport_cb_ok, bool reuseport_ok)
174 {
175 	int bound_dev_if2;
176 
177 	if (sk == sk2)
178 		return false;
179 
180 	bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if);
181 
182 	if (!sk->sk_bound_dev_if || !bound_dev_if2 ||
183 	    sk->sk_bound_dev_if == bound_dev_if2) {
184 		if (sk->sk_reuse && sk2->sk_reuse &&
185 		    sk2->sk_state != TCP_LISTEN) {
186 			if (!relax || (!reuseport_ok && sk->sk_reuseport &&
187 				       sk2->sk_reuseport && reuseport_cb_ok &&
188 				       (sk2->sk_state == TCP_TIME_WAIT ||
189 					uid_eq(uid, sk_uid(sk2)))))
190 				return true;
191 		} else if (!reuseport_ok || !sk->sk_reuseport ||
192 			   !sk2->sk_reuseport || !reuseport_cb_ok ||
193 			   (sk2->sk_state != TCP_TIME_WAIT &&
194 			    !uid_eq(uid, sk_uid(sk2)))) {
195 			return true;
196 		}
197 	}
198 	return false;
199 }
200 
201 static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
202 				   kuid_t uid, bool relax,
203 				   bool reuseport_cb_ok, bool reuseport_ok)
204 {
205 	if (ipv6_only_sock(sk2)) {
206 		if (sk->sk_family == AF_INET)
207 			return false;
208 
209 #if IS_ENABLED(CONFIG_IPV6)
210 		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
211 			return false;
212 #endif
213 	}
214 
215 	return inet_bind_conflict(sk, sk2, uid, relax,
216 				  reuseport_cb_ok, reuseport_ok);
217 }
218 
219 static bool inet_bhash2_conflict(const struct sock *sk,
220 				 const struct inet_bind2_bucket *tb2,
221 				 kuid_t uid,
222 				 bool relax, bool reuseport_cb_ok,
223 				 bool reuseport_ok)
224 {
225 	struct sock *sk2;
226 
227 	sk_for_each_bound(sk2, &tb2->owners) {
228 		if (__inet_bhash2_conflict(sk, sk2, uid, relax,
229 					   reuseport_cb_ok, reuseport_ok))
230 			return true;
231 	}
232 
233 	return false;
234 }
235 
236 #define sk_for_each_bound_bhash(__sk, __tb2, __tb)			\
237 	hlist_for_each_entry(__tb2, &(__tb)->bhash2, bhash_node)	\
238 		sk_for_each_bound((__sk), &(__tb2)->owners)
239 
240 /* This should be called only when the tb and tb2 hashbuckets' locks are held */
241 static int inet_csk_bind_conflict(const struct sock *sk,
242 				  const struct inet_bind_bucket *tb,
243 				  const struct inet_bind2_bucket *tb2, /* may be null */
244 				  bool relax, bool reuseport_ok)
245 {
246 	struct sock_reuseport *reuseport_cb;
247 	kuid_t uid = sk_uid(sk);
248 	bool reuseport_cb_ok;
249 	struct sock *sk2;
250 
251 	rcu_read_lock();
252 	reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
253 	/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
254 	reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
255 	rcu_read_unlock();
256 
257 	/* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if
258 	 * ipv4) should have been checked already. We need to do these two
259 	 * checks separately because their spinlocks have to be acquired/released
260 	 * independently of each other, to prevent possible deadlocks
261 	 */
262 	if (inet_use_bhash2_on_bind(sk))
263 		return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax,
264 						   reuseport_cb_ok, reuseport_ok);
265 
266 	/* Unlike other sk lookup places we do not check
267 	 * for sk_net here, since _all_ the socks listed
268 	 * in tb->owners and tb2->owners list belong
269 	 * to the same net - the one this bucket belongs to.
270 	 */
271 	sk_for_each_bound_bhash(sk2, tb2, tb) {
272 		if (!inet_bind_conflict(sk, sk2, uid, relax, reuseport_cb_ok, reuseport_ok))
273 			continue;
274 
275 		if (inet_rcv_saddr_equal(sk, sk2, true))
276 			return true;
277 	}
278 
279 	return false;
280 }
281 
282 /* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or
283  * INADDR_ANY (if ipv4) socket.
284  *
285  * Caller must hold bhash hashbucket lock with local bh disabled, to protect
286  * against concurrent binds on the port for addr any
287  */
288 static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
289 					  bool relax, bool reuseport_ok)
290 {
291 	const struct net *net = sock_net(sk);
292 	struct sock_reuseport *reuseport_cb;
293 	struct inet_bind_hashbucket *head2;
294 	struct inet_bind2_bucket *tb2;
295 	kuid_t uid = sk_uid(sk);
296 	bool conflict = false;
297 	bool reuseport_cb_ok;
298 
299 	rcu_read_lock();
300 	reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
301 	/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
302 	reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
303 	rcu_read_unlock();
304 
305 	head2 = inet_bhash2_addr_any_hashbucket(sk, net, port);
306 
307 	spin_lock(&head2->lock);
308 
309 	inet_bind_bucket_for_each(tb2, &head2->chain) {
310 		if (!inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
311 			continue;
312 
313 		if (!inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,	reuseport_ok))
314 			continue;
315 
316 		conflict = true;
317 		break;
318 	}
319 
320 	spin_unlock(&head2->lock);
321 
322 	return conflict;
323 }
324 
325 /*
326  * Find an open port number for the socket.  Returns with the
327  * inet_bind_hashbucket locks held if successful.
328  */
329 static struct inet_bind_hashbucket *
330 inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret,
331 			struct inet_bind2_bucket **tb2_ret,
332 			struct inet_bind_hashbucket **head2_ret, int *port_ret)
333 {
334 	struct inet_hashinfo *hinfo = tcp_get_hashinfo(sk);
335 	int i, low, high, attempt_half, port, l3mdev;
336 	struct inet_bind_hashbucket *head, *head2;
337 	struct net *net = sock_net(sk);
338 	struct inet_bind2_bucket *tb2;
339 	struct inet_bind_bucket *tb;
340 	u32 remaining, offset;
341 	bool relax = false;
342 
343 	l3mdev = inet_sk_bound_l3mdev(sk);
344 ports_exhausted:
345 	attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
346 other_half_scan:
347 	inet_sk_get_local_port_range(sk, &low, &high);
348 	high++; /* [32768, 60999] -> [32768, 61000[ */
349 	if (high - low < 4)
350 		attempt_half = 0;
351 	if (attempt_half) {
352 		int half = low + (((high - low) >> 2) << 1);
353 
354 		if (attempt_half == 1)
355 			high = half;
356 		else
357 			low = half;
358 	}
359 	remaining = high - low;
360 	if (likely(remaining > 1))
361 		remaining &= ~1U;
362 
363 	offset = get_random_u32_below(remaining);
364 	/* __inet_hash_connect() favors ports having @low parity
365 	 * We do the opposite to not pollute connect() users.
366 	 */
367 	offset |= 1U;
368 
369 other_parity_scan:
370 	port = low + offset;
371 	for (i = 0; i < remaining; i += 2, port += 2) {
372 		if (unlikely(port >= high))
373 			port -= remaining;
374 		if (inet_is_local_reserved_port(net, port))
375 			continue;
376 		head = &hinfo->bhash[inet_bhashfn(net, port,
377 						  hinfo->bhash_size)];
378 		spin_lock_bh(&head->lock);
379 		if (inet_use_bhash2_on_bind(sk)) {
380 			if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false))
381 				goto next_port;
382 		}
383 
384 		head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
385 		spin_lock(&head2->lock);
386 		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
387 		inet_bind_bucket_for_each(tb, &head->chain)
388 			if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
389 				if (!inet_csk_bind_conflict(sk, tb, tb2,
390 							    relax, false))
391 					goto success;
392 				spin_unlock(&head2->lock);
393 				goto next_port;
394 			}
395 		tb = NULL;
396 		goto success;
397 next_port:
398 		spin_unlock_bh(&head->lock);
399 		cond_resched();
400 	}
401 
402 	offset--;
403 	if (!(offset & 1))
404 		goto other_parity_scan;
405 
406 	if (attempt_half == 1) {
407 		/* OK we now try the upper half of the range */
408 		attempt_half = 2;
409 		goto other_half_scan;
410 	}
411 
412 	if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) {
413 		/* We still have a chance to connect to different destinations */
414 		relax = true;
415 		goto ports_exhausted;
416 	}
417 	return NULL;
418 success:
419 	*port_ret = port;
420 	*tb_ret = tb;
421 	*tb2_ret = tb2;
422 	*head2_ret = head2;
423 	return head;
424 }
425 
426 static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
427 				     const struct sock *sk)
428 {
429 	if (tb->fastreuseport <= 0)
430 		return 0;
431 	if (!sk->sk_reuseport)
432 		return 0;
433 	if (rcu_access_pointer(sk->sk_reuseport_cb))
434 		return 0;
435 	if (!uid_eq(tb->fastuid, sk_uid(sk)))
436 		return 0;
437 	/* We only need to check the rcv_saddr if this tb was once marked
438 	 * without fastreuseport and then was reset, as we can only know that
439 	 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
440 	 * owners list.
441 	 */
442 	if (tb->fastreuseport == FASTREUSEPORT_ANY)
443 		return 1;
444 #if IS_ENABLED(CONFIG_IPV6)
445 	if (tb->fast_sk_family == AF_INET6)
446 		return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
447 					    inet6_rcv_saddr(sk),
448 					    tb->fast_rcv_saddr,
449 					    sk->sk_rcv_saddr,
450 					    tb->fast_ipv6_only,
451 					    ipv6_only_sock(sk), true, false);
452 #endif
453 	return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
454 				    ipv6_only_sock(sk), true, false);
455 }
456 
457 void inet_csk_update_fastreuse(const struct sock *sk,
458 			       struct inet_bind_bucket *tb,
459 			       struct inet_bind2_bucket *tb2)
460 {
461 	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
462 
463 	if (hlist_empty(&tb->bhash2)) {
464 		tb->fastreuse = reuse;
465 		if (sk->sk_reuseport) {
466 			tb->fastreuseport = FASTREUSEPORT_ANY;
467 			tb->fastuid = sk_uid(sk);
468 			tb->fast_rcv_saddr = sk->sk_rcv_saddr;
469 			tb->fast_ipv6_only = ipv6_only_sock(sk);
470 			tb->fast_sk_family = sk->sk_family;
471 #if IS_ENABLED(CONFIG_IPV6)
472 			tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
473 #endif
474 		} else {
475 			tb->fastreuseport = 0;
476 		}
477 	} else {
478 		if (!reuse)
479 			tb->fastreuse = 0;
480 		if (sk->sk_reuseport) {
481 			/* We didn't match or we don't have fastreuseport set on
482 			 * the tb, but we have sk_reuseport set on this socket
483 			 * and we know that there are no bind conflicts with
484 			 * this socket in this tb, so reset our tb's reuseport
485 			 * settings so that any subsequent sockets that match
486 			 * our current socket will be put on the fast path.
487 			 *
488 			 * If we reset we need to set FASTREUSEPORT_STRICT so we
489 			 * do extra checking for all subsequent sk_reuseport
490 			 * socks.
491 			 */
492 			if (!sk_reuseport_match(tb, sk)) {
493 				tb->fastreuseport = FASTREUSEPORT_STRICT;
494 				tb->fastuid = sk_uid(sk);
495 				tb->fast_rcv_saddr = sk->sk_rcv_saddr;
496 				tb->fast_ipv6_only = ipv6_only_sock(sk);
497 				tb->fast_sk_family = sk->sk_family;
498 #if IS_ENABLED(CONFIG_IPV6)
499 				tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
500 #endif
501 			}
502 		} else {
503 			tb->fastreuseport = 0;
504 		}
505 	}
506 
507 	tb2->fastreuse = tb->fastreuse;
508 	tb2->fastreuseport = tb->fastreuseport;
509 }
510 
511 /* Obtain a reference to a local port for the given sock,
512  * if snum is zero it means select any available local port.
513  * We try to allocate an odd port (and leave even ports for connect())
514  */
515 int inet_csk_get_port(struct sock *sk, unsigned short snum)
516 {
517 	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
518 	bool found_port = false, check_bind_conflict = true;
519 	bool bhash_created = false, bhash2_created = false;
520 	struct inet_hashinfo *hinfo = tcp_get_hashinfo(sk);
521 	int ret = -EADDRINUSE, port = snum, l3mdev;
522 	struct inet_bind_hashbucket *head, *head2;
523 	struct inet_bind2_bucket *tb2 = NULL;
524 	struct inet_bind_bucket *tb = NULL;
525 	bool head2_lock_acquired = false;
526 	struct net *net = sock_net(sk);
527 
528 	l3mdev = inet_sk_bound_l3mdev(sk);
529 
530 	if (!port) {
531 		head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port);
532 		if (!head)
533 			return ret;
534 
535 		head2_lock_acquired = true;
536 
537 		if (tb && tb2)
538 			goto success;
539 		found_port = true;
540 	} else {
541 		head = &hinfo->bhash[inet_bhashfn(net, port,
542 						  hinfo->bhash_size)];
543 		spin_lock_bh(&head->lock);
544 		inet_bind_bucket_for_each(tb, &head->chain)
545 			if (inet_bind_bucket_match(tb, net, port, l3mdev))
546 				break;
547 	}
548 
549 	if (!tb) {
550 		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net,
551 					     head, port, l3mdev);
552 		if (!tb)
553 			goto fail_unlock;
554 		bhash_created = true;
555 	}
556 
557 	if (!found_port) {
558 		if (!hlist_empty(&tb->bhash2)) {
559 			if (sk->sk_reuse == SK_FORCE_REUSE ||
560 			    (tb->fastreuse > 0 && reuse) ||
561 			    sk_reuseport_match(tb, sk))
562 				check_bind_conflict = false;
563 		}
564 
565 		if (check_bind_conflict && inet_use_bhash2_on_bind(sk)) {
566 			if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true))
567 				goto fail_unlock;
568 		}
569 
570 		head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
571 		spin_lock(&head2->lock);
572 		head2_lock_acquired = true;
573 		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
574 	}
575 
576 	if (!tb2) {
577 		tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep,
578 					       net, head2, tb, sk);
579 		if (!tb2)
580 			goto fail_unlock;
581 		bhash2_created = true;
582 	}
583 
584 	if (!found_port && check_bind_conflict) {
585 		if (inet_csk_bind_conflict(sk, tb, tb2, true, true))
586 			goto fail_unlock;
587 	}
588 
589 success:
590 	inet_csk_update_fastreuse(sk, tb, tb2);
591 
592 	if (!inet_csk(sk)->icsk_bind_hash)
593 		inet_bind_hash(sk, tb, tb2, port);
594 	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
595 	WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2);
596 	ret = 0;
597 
598 fail_unlock:
599 	if (ret) {
600 		if (bhash2_created)
601 			inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, tb2);
602 		if (bhash_created)
603 			inet_bind_bucket_destroy(tb);
604 	}
605 	if (head2_lock_acquired)
606 		spin_unlock(&head2->lock);
607 	spin_unlock_bh(&head->lock);
608 	return ret;
609 }
610 EXPORT_SYMBOL_GPL(inet_csk_get_port);
611 
612 /*
613  * Wait for an incoming connection, avoid race conditions. This must be called
614  * with the socket locked.
615  */
616 static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
617 {
618 	struct inet_connection_sock *icsk = inet_csk(sk);
619 	DEFINE_WAIT(wait);
620 	int err;
621 
622 	/*
623 	 * True wake-one mechanism for incoming connections: only
624 	 * one process gets woken up, not the 'whole herd'.
625 	 * Since we do not 'race & poll' for established sockets
626 	 * anymore, the common case will execute the loop only once.
627 	 *
628 	 * Subtle issue: "add_wait_queue_exclusive()" will be added
629 	 * after any current non-exclusive waiters, and we know that
630 	 * it will always _stay_ after any new non-exclusive waiters
631 	 * because all non-exclusive waiters are added at the
632 	 * beginning of the wait-queue. As such, it's ok to "drop"
633 	 * our exclusiveness temporarily when we get woken up without
634 	 * having to remove and re-insert us on the wait queue.
635 	 */
636 	for (;;) {
637 		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
638 					  TASK_INTERRUPTIBLE);
639 		release_sock(sk);
640 		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
641 			timeo = schedule_timeout(timeo);
642 		sched_annotate_sleep();
643 		lock_sock(sk);
644 		err = 0;
645 		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
646 			break;
647 		err = -EINVAL;
648 		if (sk->sk_state != TCP_LISTEN)
649 			break;
650 		err = sock_intr_errno(timeo);
651 		if (signal_pending(current))
652 			break;
653 		err = -EAGAIN;
654 		if (!timeo)
655 			break;
656 	}
657 	finish_wait(sk_sleep(sk), &wait);
658 	return err;
659 }
660 
661 /*
662  * This will accept the next outstanding connection.
663  */
664 struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg)
665 {
666 	struct inet_connection_sock *icsk = inet_csk(sk);
667 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
668 	struct request_sock *req;
669 	struct sock *newsk;
670 	int error;
671 
672 	lock_sock(sk);
673 
674 	/* We need to make sure that this socket is listening,
675 	 * and that it has something pending.
676 	 */
677 	error = -EINVAL;
678 	if (sk->sk_state != TCP_LISTEN)
679 		goto out_err;
680 
681 	/* Find already established connection */
682 	if (reqsk_queue_empty(queue)) {
683 		long timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
684 
685 		/* If this is a non blocking socket don't sleep */
686 		error = -EAGAIN;
687 		if (!timeo)
688 			goto out_err;
689 
690 		error = inet_csk_wait_for_connect(sk, timeo);
691 		if (error)
692 			goto out_err;
693 	}
694 	req = reqsk_queue_remove(queue, sk);
695 	arg->is_empty = reqsk_queue_empty(queue);
696 	newsk = req->sk;
697 
698 	if (sk->sk_protocol == IPPROTO_TCP &&
699 	    tcp_rsk(req)->tfo_listener) {
700 		spin_lock_bh(&queue->fastopenq.lock);
701 		if (tcp_rsk(req)->tfo_listener) {
702 			/* We are still waiting for the final ACK from 3WHS
703 			 * so can't free req now. Instead, we set req->sk to
704 			 * NULL to signify that the child socket is taken
705 			 * so reqsk_fastopen_remove() will free the req
706 			 * when 3WHS finishes (or is aborted).
707 			 */
708 			req->sk = NULL;
709 			req = NULL;
710 		}
711 		spin_unlock_bh(&queue->fastopenq.lock);
712 	}
713 
714 	release_sock(sk);
715 
716 	if (req)
717 		reqsk_put(req);
718 
719 	inet_init_csk_locks(newsk);
720 	return newsk;
721 
722 out_err:
723 	release_sock(sk);
724 	arg->err = error;
725 	return NULL;
726 }
727 EXPORT_SYMBOL(inet_csk_accept);
728 
729 /*
730  * Using different timers for retransmit, delayed acks and probes
731  * We may wish use just one timer maintaining a list of expire jiffies
732  * to optimize.
733  */
734 void inet_csk_init_xmit_timers(struct sock *sk,
735 			       void (*retransmit_handler)(struct timer_list *t),
736 			       void (*delack_handler)(struct timer_list *t),
737 			       void (*keepalive_handler)(struct timer_list *t))
738 {
739 	struct inet_connection_sock *icsk = inet_csk(sk);
740 
741 	timer_setup(&sk->tcp_retransmit_timer, retransmit_handler, 0);
742 	timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
743 	timer_setup(&icsk->icsk_keepalive_timer, keepalive_handler, 0);
744 	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
745 }
746 
747 void inet_csk_clear_xmit_timers(struct sock *sk)
748 {
749 	struct inet_connection_sock *icsk = inet_csk(sk);
750 
751 	smp_store_release(&icsk->icsk_pending, 0);
752 	smp_store_release(&icsk->icsk_ack.pending, 0);
753 
754 	sk_stop_timer(sk, &sk->tcp_retransmit_timer);
755 	sk_stop_timer(sk, &icsk->icsk_delack_timer);
756 	sk_stop_timer(sk, &icsk->icsk_keepalive_timer);
757 }
758 
759 void inet_csk_clear_xmit_timers_sync(struct sock *sk)
760 {
761 	struct inet_connection_sock *icsk = inet_csk(sk);
762 
763 	/* ongoing timer handlers need to acquire socket lock. */
764 	sock_not_owned_by_me(sk);
765 
766 	smp_store_release(&icsk->icsk_pending, 0);
767 	smp_store_release(&icsk->icsk_ack.pending, 0);
768 
769 	sk_stop_timer_sync(sk, &sk->tcp_retransmit_timer);
770 	sk_stop_timer_sync(sk, &icsk->icsk_delack_timer);
771 	sk_stop_timer_sync(sk, &icsk->icsk_keepalive_timer);
772 }
773 
774 struct dst_entry *inet_csk_route_req(const struct sock *sk,
775 				     struct flowi4 *fl4,
776 				     const struct request_sock *req)
777 {
778 	const struct inet_request_sock *ireq = inet_rsk(req);
779 	struct net *net = read_pnet(&ireq->ireq_net);
780 	struct ip_options_rcu *opt;
781 	struct rtable *rt;
782 
783 	rcu_read_lock();
784 	opt = rcu_dereference(ireq->ireq_opt);
785 
786 	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
787 			   ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
788 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
789 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
790 			   ireq->ir_loc_addr, ireq->ir_rmt_port,
791 			   htons(ireq->ir_num), sk_uid(sk));
792 	security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
793 	rt = ip_route_output_flow(net, fl4, sk);
794 	if (IS_ERR(rt))
795 		goto no_route;
796 	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
797 		goto route_err;
798 	rcu_read_unlock();
799 	return &rt->dst;
800 
801 route_err:
802 	ip_rt_put(rt);
803 no_route:
804 	rcu_read_unlock();
805 	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
806 	return NULL;
807 }
808 
809 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
810 					    struct sock *newsk,
811 					    const struct request_sock *req)
812 {
813 	const struct inet_request_sock *ireq = inet_rsk(req);
814 	struct net *net = read_pnet(&ireq->ireq_net);
815 	struct inet_sock *newinet = inet_sk(newsk);
816 	struct ip_options_rcu *opt;
817 	struct flowi4 *fl4;
818 	struct rtable *rt;
819 
820 	opt = rcu_dereference(ireq->ireq_opt);
821 	fl4 = &newinet->cork.fl.u.ip4;
822 
823 	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
824 			   ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
825 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
826 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
827 			   ireq->ir_loc_addr, ireq->ir_rmt_port,
828 			   htons(ireq->ir_num), sk_uid(sk));
829 	security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
830 	rt = ip_route_output_flow(net, fl4, sk);
831 	if (IS_ERR(rt))
832 		goto no_route;
833 	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
834 		goto route_err;
835 	return &rt->dst;
836 
837 route_err:
838 	ip_rt_put(rt);
839 no_route:
840 	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
841 	return NULL;
842 }
843 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
844 
845 /* Decide when to expire the request and when to resend SYN-ACK */
846 static void syn_ack_recalc(struct request_sock *req,
847 			   const int max_syn_ack_retries,
848 			   const u8 rskq_defer_accept,
849 			   int *expire, int *resend)
850 {
851 	if (!rskq_defer_accept) {
852 		*expire = req->num_timeout >= max_syn_ack_retries;
853 		*resend = 1;
854 		return;
855 	}
856 	*expire = req->num_timeout >= max_syn_ack_retries &&
857 		  (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept);
858 	/* Do not resend while waiting for data after ACK,
859 	 * start to resend on end of deferring period to give
860 	 * last chance for data or ACK to create established socket.
861 	 */
862 	*resend = !inet_rsk(req)->acked ||
863 		  req->num_timeout >= rskq_defer_accept - 1;
864 }
865 
866 static struct request_sock *
867 reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener,
868 		   bool attach_listener)
869 {
870 	struct request_sock *req;
871 
872 	req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
873 	if (!req)
874 		return NULL;
875 	req->rsk_listener = NULL;
876 	if (attach_listener) {
877 		if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
878 			kmem_cache_free(ops->slab, req);
879 			return NULL;
880 		}
881 		req->rsk_listener = sk_listener;
882 	}
883 	req->rsk_ops = ops;
884 	req_to_sk(req)->sk_prot = sk_listener->sk_prot;
885 	sk_node_init(&req_to_sk(req)->sk_node);
886 	sk_tx_queue_clear(req_to_sk(req));
887 	req->saved_syn = NULL;
888 	req->syncookie = 0;
889 	req->num_timeout = 0;
890 	req->num_retrans = 0;
891 	req->sk = NULL;
892 	refcount_set(&req->rsk_refcnt, 0);
893 
894 	return req;
895 }
896 #define reqsk_alloc(...)	alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__))
897 
898 struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
899 				      struct sock *sk_listener,
900 				      bool attach_listener)
901 {
902 	struct request_sock *req = reqsk_alloc(ops, sk_listener,
903 					       attach_listener);
904 
905 	if (req) {
906 		struct inet_request_sock *ireq = inet_rsk(req);
907 
908 		ireq->ireq_opt = NULL;
909 #if IS_ENABLED(CONFIG_IPV6)
910 		ireq->pktopts = NULL;
911 #endif
912 		atomic64_set(&ireq->ir_cookie, 0);
913 		ireq->ireq_state = TCP_NEW_SYN_RECV;
914 		write_pnet(&ireq->ireq_net, sock_net(sk_listener));
915 		ireq->ireq_family = sk_listener->sk_family;
916 	}
917 
918 	return req;
919 }
920 EXPORT_SYMBOL(inet_reqsk_alloc);
921 
922 static struct request_sock *inet_reqsk_clone(struct request_sock *req,
923 					     struct sock *sk)
924 {
925 	struct sock *req_sk, *nreq_sk;
926 	struct request_sock *nreq;
927 
928 	nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN);
929 	if (!nreq) {
930 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
931 
932 		/* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */
933 		sock_put(sk);
934 		return NULL;
935 	}
936 
937 	req_sk = req_to_sk(req);
938 	nreq_sk = req_to_sk(nreq);
939 
940 	memcpy(nreq_sk, req_sk,
941 	       offsetof(struct sock, sk_dontcopy_begin));
942 	unsafe_memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
943 		      req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end),
944 		      /* alloc is larger than struct, see above */);
945 
946 	sk_node_init(&nreq_sk->sk_node);
947 	nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
948 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
949 	nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
950 #endif
951 	nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
952 
953 	nreq->rsk_listener = sk;
954 
955 	/* We need not acquire fastopenq->lock
956 	 * because the child socket is locked in inet_csk_listen_stop().
957 	 */
958 	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener)
959 		rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq);
960 
961 	return nreq;
962 }
963 
964 static void reqsk_queue_migrated(struct request_sock_queue *queue,
965 				 const struct request_sock *req)
966 {
967 	if (req->num_timeout == 0)
968 		atomic_inc(&queue->young);
969 	atomic_inc(&queue->qlen);
970 }
971 
972 static void reqsk_migrate_reset(struct request_sock *req)
973 {
974 	req->saved_syn = NULL;
975 #if IS_ENABLED(CONFIG_IPV6)
976 	inet_rsk(req)->ipv6_opt = NULL;
977 	inet_rsk(req)->pktopts = NULL;
978 #else
979 	inet_rsk(req)->ireq_opt = NULL;
980 #endif
981 }
982 
983 /* return true if req was found in the ehash table */
984 static bool reqsk_queue_unlink(struct request_sock *req)
985 {
986 	struct sock *sk = req_to_sk(req);
987 	bool found = false;
988 
989 	if (sk_hashed(sk)) {
990 		struct inet_hashinfo *hashinfo = tcp_get_hashinfo(sk);
991 		spinlock_t *lock;
992 
993 		lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
994 		spin_lock(lock);
995 		found = __sk_nulls_del_node_init_rcu(sk);
996 		spin_unlock(lock);
997 	}
998 
999 	return found;
1000 }
1001 
1002 static bool __inet_csk_reqsk_queue_drop(struct sock *sk,
1003 					struct request_sock *req,
1004 					bool from_timer)
1005 {
1006 	bool unlinked = reqsk_queue_unlink(req);
1007 
1008 	if (!from_timer && timer_delete_sync(&req->rsk_timer))
1009 		reqsk_put(req);
1010 
1011 	if (unlinked) {
1012 		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
1013 		reqsk_put(req);
1014 	}
1015 
1016 	return unlinked;
1017 }
1018 
1019 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
1020 {
1021 	return __inet_csk_reqsk_queue_drop(sk, req, false);
1022 }
1023 
1024 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
1025 {
1026 	inet_csk_reqsk_queue_drop(sk, req);
1027 	reqsk_put(req);
1028 }
1029 EXPORT_IPV6_MOD(inet_csk_reqsk_queue_drop_and_put);
1030 
1031 static void reqsk_timer_handler(struct timer_list *t)
1032 {
1033 	struct request_sock *req = timer_container_of(req, t, rsk_timer);
1034 	struct request_sock *nreq = NULL, *oreq = req;
1035 	struct sock *sk_listener = req->rsk_listener;
1036 	struct inet_connection_sock *icsk;
1037 	struct request_sock_queue *queue;
1038 	struct net *net;
1039 	int max_syn_ack_retries, qlen, expire = 0, resend = 0;
1040 
1041 	if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {
1042 		struct sock *nsk;
1043 
1044 		nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
1045 		if (!nsk)
1046 			goto drop;
1047 
1048 		nreq = inet_reqsk_clone(req, nsk);
1049 		if (!nreq)
1050 			goto drop;
1051 
1052 		/* The new timer for the cloned req can decrease the 2
1053 		 * by calling inet_csk_reqsk_queue_drop_and_put(), so
1054 		 * hold another count to prevent use-after-free and
1055 		 * call reqsk_put() just before return.
1056 		 */
1057 		refcount_set(&nreq->rsk_refcnt, 2 + 1);
1058 		timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1059 		reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);
1060 
1061 		req = nreq;
1062 		sk_listener = nsk;
1063 	}
1064 
1065 	icsk = inet_csk(sk_listener);
1066 	net = sock_net(sk_listener);
1067 	max_syn_ack_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
1068 		READ_ONCE(net->ipv4.sysctl_tcp_synack_retries);
1069 	/* Normally all the openreqs are young and become mature
1070 	 * (i.e. converted to established socket) for first timeout.
1071 	 * If synack was not acknowledged for 1 second, it means
1072 	 * one of the following things: synack was lost, ack was lost,
1073 	 * rtt is high or nobody planned to ack (i.e. synflood).
1074 	 * When server is a bit loaded, queue is populated with old
1075 	 * open requests, reducing effective size of queue.
1076 	 * When server is well loaded, queue size reduces to zero
1077 	 * after several minutes of work. It is not synflood,
1078 	 * it is normal operation. The solution is pruning
1079 	 * too old entries overriding normal timeout, when
1080 	 * situation becomes dangerous.
1081 	 *
1082 	 * Essentially, we reserve half of room for young
1083 	 * embrions; and abort old ones without pity, if old
1084 	 * ones are about to clog our table.
1085 	 */
1086 	queue = &icsk->icsk_accept_queue;
1087 	qlen = reqsk_queue_len(queue);
1088 	if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
1089 		int young = reqsk_queue_len_young(queue) << 1;
1090 
1091 		while (max_syn_ack_retries > 2) {
1092 			if (qlen < young)
1093 				break;
1094 			max_syn_ack_retries--;
1095 			young <<= 1;
1096 		}
1097 	}
1098 
1099 	syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept),
1100 		       &expire, &resend);
1101 	tcp_syn_ack_timeout(req);
1102 
1103 	if (!expire &&
1104 	    (!resend ||
1105 	     !tcp_rtx_synack(sk_listener, req) ||
1106 	     inet_rsk(req)->acked)) {
1107 		if (req->num_retrans > 1 && tcp_rsk(req)->accecn_ok)
1108 			tcp_rsk(req)->accecn_fail_mode |= TCP_ACCECN_ACE_FAIL_SEND;
1109 		if (req->num_timeout++ == 0)
1110 			atomic_dec(&queue->young);
1111 		mod_timer(&req->rsk_timer, jiffies + tcp_reqsk_timeout(req));
1112 
1113 		if (!nreq)
1114 			return;
1115 
1116 		if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
1117 			/* delete timer */
1118 			__inet_csk_reqsk_queue_drop(sk_listener, nreq, true);
1119 			goto no_ownership;
1120 		}
1121 
1122 		__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS);
1123 		reqsk_migrate_reset(oreq);
1124 		reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
1125 		reqsk_put(oreq);
1126 
1127 		reqsk_put(nreq);
1128 		return;
1129 	}
1130 
1131 	/* Even if we can clone the req, we may need not retransmit any more
1132 	 * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
1133 	 * CPU may win the "own_req" race so that inet_ehash_insert() fails.
1134 	 */
1135 	if (nreq) {
1136 		__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE);
1137 no_ownership:
1138 		reqsk_migrate_reset(nreq);
1139 		reqsk_queue_removed(queue, nreq);
1140 		__reqsk_free(nreq);
1141 	}
1142 
1143 drop:
1144 	__inet_csk_reqsk_queue_drop(sk_listener, oreq, true);
1145 	reqsk_put(oreq);
1146 }
1147 
1148 static bool reqsk_queue_hash_req(struct request_sock *req)
1149 {
1150 	bool found_dup_sk = false;
1151 
1152 	if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk))
1153 		return false;
1154 
1155 	/* The timer needs to be setup after a successful insertion. */
1156 	req->timeout = tcp_timeout_init((struct sock *)req);
1157 	timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1158 	mod_timer(&req->rsk_timer, jiffies + req->timeout);
1159 
1160 	/* before letting lookups find us, make sure all req fields
1161 	 * are committed to memory and refcnt initialized.
1162 	 */
1163 	smp_wmb();
1164 	refcount_set(&req->rsk_refcnt, 2 + 1);
1165 	return true;
1166 }
1167 
1168 bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req)
1169 {
1170 	if (!reqsk_queue_hash_req(req))
1171 		return false;
1172 
1173 	inet_csk_reqsk_queue_added(sk);
1174 	return true;
1175 }
1176 
1177 static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
1178 			   const gfp_t priority)
1179 {
1180 	struct inet_connection_sock *icsk = inet_csk(newsk);
1181 
1182 	if (!icsk->icsk_ulp_ops)
1183 		return;
1184 
1185 	icsk->icsk_ulp_ops->clone(req, newsk, priority);
1186 }
1187 
1188 /**
1189  *	inet_csk_clone_lock - clone an inet socket, and lock its clone
1190  *	@sk: the socket to clone
1191  *	@req: request_sock
1192  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1193  *
1194  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1195  */
1196 struct sock *inet_csk_clone_lock(const struct sock *sk,
1197 				 const struct request_sock *req,
1198 				 const gfp_t priority)
1199 {
1200 	struct sock *newsk = sk_clone_lock(sk, priority);
1201 	struct inet_connection_sock *newicsk;
1202 	const struct inet_request_sock *ireq;
1203 	struct inet_sock *newinet;
1204 
1205 	if (!newsk)
1206 		return NULL;
1207 
1208 	newicsk = inet_csk(newsk);
1209 	newinet = inet_sk(newsk);
1210 	ireq = inet_rsk(req);
1211 
1212 	newicsk->icsk_bind_hash = NULL;
1213 	newicsk->icsk_bind2_hash = NULL;
1214 
1215 	newinet->inet_dport = ireq->ir_rmt_port;
1216 	newinet->inet_num = ireq->ir_num;
1217 	newinet->inet_sport = htons(ireq->ir_num);
1218 
1219 	newsk->sk_bound_dev_if = ireq->ir_iif;
1220 
1221 	newsk->sk_daddr = ireq->ir_rmt_addr;
1222 	newsk->sk_rcv_saddr = ireq->ir_loc_addr;
1223 	newinet->inet_saddr = ireq->ir_loc_addr;
1224 
1225 #if IS_ENABLED(CONFIG_IPV6)
1226 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1227 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1228 #endif
1229 
1230 	/* listeners have SOCK_RCU_FREE, not the children */
1231 	sock_reset_flag(newsk, SOCK_RCU_FREE);
1232 
1233 	inet_sk(newsk)->mc_list = NULL;
1234 
1235 	newsk->sk_mark = inet_rsk(req)->ir_mark;
1236 	atomic64_set(&newsk->sk_cookie,
1237 		     atomic64_read(&inet_rsk(req)->ir_cookie));
1238 
1239 	newicsk->icsk_retransmits = 0;
1240 	newicsk->icsk_backoff	  = 0;
1241 	newicsk->icsk_probes_out  = 0;
1242 	newicsk->icsk_probes_tstamp = 0;
1243 
1244 	/* Deinitialize accept_queue to trap illegal accesses. */
1245 	memset(&newicsk->icsk_accept_queue, 0,
1246 	       sizeof(newicsk->icsk_accept_queue));
1247 
1248 	inet_sk_set_state(newsk, TCP_SYN_RECV);
1249 
1250 	inet_clone_ulp(req, newsk, priority);
1251 
1252 	security_inet_csk_clone(newsk, req);
1253 
1254 	return newsk;
1255 }
1256 
1257 /*
1258  * At this point, there should be no process reference to this
1259  * socket, and thus no user references at all.  Therefore we
1260  * can assume the socket waitqueue is inactive and nobody will
1261  * try to jump onto it.
1262  */
1263 void inet_csk_destroy_sock(struct sock *sk)
1264 {
1265 	WARN_ON(sk->sk_state != TCP_CLOSE);
1266 	WARN_ON(!sock_flag(sk, SOCK_DEAD));
1267 
1268 	/* It cannot be in hash table! */
1269 	WARN_ON(!sk_unhashed(sk));
1270 
1271 	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
1272 	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
1273 
1274 	sk->sk_prot->destroy(sk);
1275 
1276 	sk_stream_kill_queues(sk);
1277 
1278 	xfrm_sk_free_policy(sk);
1279 
1280 	tcp_orphan_count_dec();
1281 
1282 	sock_put(sk);
1283 }
1284 EXPORT_SYMBOL(inet_csk_destroy_sock);
1285 
1286 void inet_csk_prepare_for_destroy_sock(struct sock *sk)
1287 {
1288 	/* The below has to be done to allow calling inet_csk_destroy_sock */
1289 	sock_set_flag(sk, SOCK_DEAD);
1290 	tcp_orphan_count_inc();
1291 }
1292 
1293 /* This function allows to force a closure of a socket after the call to
1294  * tcp_create_openreq_child().
1295  */
1296 void inet_csk_prepare_forced_close(struct sock *sk)
1297 	__releases(&sk->sk_lock.slock)
1298 {
1299 	/* sk_clone_lock locked the socket and set refcnt to 2 */
1300 	bh_unlock_sock(sk);
1301 	sock_put(sk);
1302 	inet_csk_prepare_for_destroy_sock(sk);
1303 	inet_sk(sk)->inet_num = 0;
1304 }
1305 EXPORT_SYMBOL(inet_csk_prepare_forced_close);
1306 
1307 static int inet_ulp_can_listen(const struct sock *sk)
1308 {
1309 	const struct inet_connection_sock *icsk = inet_csk(sk);
1310 
1311 	if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone)
1312 		return -EINVAL;
1313 
1314 	return 0;
1315 }
1316 
1317 int inet_csk_listen_start(struct sock *sk)
1318 {
1319 	struct inet_connection_sock *icsk = inet_csk(sk);
1320 	struct inet_sock *inet = inet_sk(sk);
1321 	int err;
1322 
1323 	err = inet_ulp_can_listen(sk);
1324 	if (unlikely(err))
1325 		return err;
1326 
1327 	reqsk_queue_alloc(&icsk->icsk_accept_queue);
1328 
1329 	sk->sk_ack_backlog = 0;
1330 	inet_csk_delack_init(sk);
1331 
1332 	/* There is race window here: we announce ourselves listening,
1333 	 * but this transition is still not validated by get_port().
1334 	 * It is OK, because this socket enters to hash table only
1335 	 * after validation is complete.
1336 	 */
1337 	inet_sk_state_store(sk, TCP_LISTEN);
1338 	err = sk->sk_prot->get_port(sk, inet->inet_num);
1339 	if (!err) {
1340 		inet->inet_sport = htons(inet->inet_num);
1341 
1342 		sk_dst_reset(sk);
1343 		err = sk->sk_prot->hash(sk);
1344 
1345 		if (likely(!err))
1346 			return 0;
1347 	}
1348 
1349 	inet_sk_set_state(sk, TCP_CLOSE);
1350 	return err;
1351 }
1352 
1353 static void inet_child_forget(struct sock *sk, struct request_sock *req,
1354 			      struct sock *child)
1355 {
1356 	sk->sk_prot->disconnect(child, O_NONBLOCK);
1357 
1358 	sock_orphan(child);
1359 
1360 	tcp_orphan_count_inc();
1361 
1362 	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
1363 		BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
1364 		BUG_ON(sk != req->rsk_listener);
1365 
1366 		/* Paranoid, to prevent race condition if
1367 		 * an inbound pkt destined for child is
1368 		 * blocked by sock lock in tcp_v4_rcv().
1369 		 * Also to satisfy an assertion in
1370 		 * tcp_v4_destroy_sock().
1371 		 */
1372 		RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
1373 	}
1374 	inet_csk_destroy_sock(child);
1375 }
1376 
1377 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
1378 				      struct request_sock *req,
1379 				      struct sock *child)
1380 {
1381 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1382 
1383 	spin_lock(&queue->rskq_lock);
1384 	if (unlikely(sk->sk_state != TCP_LISTEN)) {
1385 		inet_child_forget(sk, req, child);
1386 		child = NULL;
1387 	} else {
1388 		req->sk = child;
1389 		req->dl_next = NULL;
1390 		if (queue->rskq_accept_head == NULL)
1391 			WRITE_ONCE(queue->rskq_accept_head, req);
1392 		else
1393 			queue->rskq_accept_tail->dl_next = req;
1394 		queue->rskq_accept_tail = req;
1395 		sk_acceptq_added(sk);
1396 	}
1397 	spin_unlock(&queue->rskq_lock);
1398 	return child;
1399 }
1400 EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
1401 
1402 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
1403 					 struct request_sock *req, bool own_req)
1404 {
1405 	if (own_req) {
1406 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
1407 		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
1408 
1409 		if (sk != req->rsk_listener) {
1410 			/* another listening sk has been selected,
1411 			 * migrate the req to it.
1412 			 */
1413 			struct request_sock *nreq;
1414 
1415 			/* hold a refcnt for the nreq->rsk_listener
1416 			 * which is assigned in inet_reqsk_clone()
1417 			 */
1418 			sock_hold(sk);
1419 			nreq = inet_reqsk_clone(req, sk);
1420 			if (!nreq) {
1421 				inet_child_forget(sk, req, child);
1422 				goto child_put;
1423 			}
1424 
1425 			refcount_set(&nreq->rsk_refcnt, 1);
1426 			if (inet_csk_reqsk_queue_add(sk, nreq, child)) {
1427 				__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS);
1428 				reqsk_migrate_reset(req);
1429 				reqsk_put(req);
1430 				return child;
1431 			}
1432 
1433 			__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
1434 			reqsk_migrate_reset(nreq);
1435 			__reqsk_free(nreq);
1436 		} else if (inet_csk_reqsk_queue_add(sk, req, child)) {
1437 			return child;
1438 		}
1439 	}
1440 	/* Too bad, another child took ownership of the request, undo. */
1441 child_put:
1442 	bh_unlock_sock(child);
1443 	sock_put(child);
1444 	return NULL;
1445 }
1446 
1447 /*
1448  *	This routine closes sockets which have been at least partially
1449  *	opened, but not yet accepted.
1450  */
1451 void inet_csk_listen_stop(struct sock *sk)
1452 {
1453 	struct inet_connection_sock *icsk = inet_csk(sk);
1454 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
1455 	struct request_sock *next, *req;
1456 
1457 	/* Following specs, it would be better either to send FIN
1458 	 * (and enter FIN-WAIT-1, it is normal close)
1459 	 * or to send active reset (abort).
1460 	 * Certainly, it is pretty dangerous while synflood, but it is
1461 	 * bad justification for our negligence 8)
1462 	 * To be honest, we are not able to make either
1463 	 * of the variants now.			--ANK
1464 	 */
1465 	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
1466 		struct sock *child = req->sk, *nsk;
1467 		struct request_sock *nreq;
1468 
1469 		local_bh_disable();
1470 		bh_lock_sock(child);
1471 		WARN_ON(sock_owned_by_user(child));
1472 		sock_hold(child);
1473 
1474 		nsk = reuseport_migrate_sock(sk, child, NULL);
1475 		if (nsk) {
1476 			nreq = inet_reqsk_clone(req, nsk);
1477 			if (nreq) {
1478 				refcount_set(&nreq->rsk_refcnt, 1);
1479 
1480 				if (inet_csk_reqsk_queue_add(nsk, nreq, child)) {
1481 					__NET_INC_STATS(sock_net(nsk),
1482 							LINUX_MIB_TCPMIGRATEREQSUCCESS);
1483 					reqsk_migrate_reset(req);
1484 				} else {
1485 					__NET_INC_STATS(sock_net(nsk),
1486 							LINUX_MIB_TCPMIGRATEREQFAILURE);
1487 					reqsk_migrate_reset(nreq);
1488 					__reqsk_free(nreq);
1489 				}
1490 
1491 				/* inet_csk_reqsk_queue_add() has already
1492 				 * called inet_child_forget() on failure case.
1493 				 */
1494 				goto skip_child_forget;
1495 			}
1496 		}
1497 
1498 		inet_child_forget(sk, req, child);
1499 skip_child_forget:
1500 		reqsk_put(req);
1501 		bh_unlock_sock(child);
1502 		local_bh_enable();
1503 		sock_put(child);
1504 
1505 		cond_resched();
1506 	}
1507 	if (queue->fastopenq.rskq_rst_head) {
1508 		/* Free all the reqs queued in rskq_rst_head. */
1509 		spin_lock_bh(&queue->fastopenq.lock);
1510 		req = queue->fastopenq.rskq_rst_head;
1511 		queue->fastopenq.rskq_rst_head = NULL;
1512 		spin_unlock_bh(&queue->fastopenq.lock);
1513 		while (req != NULL) {
1514 			next = req->dl_next;
1515 			reqsk_put(req);
1516 			req = next;
1517 		}
1518 	}
1519 	WARN_ON_ONCE(sk->sk_ack_backlog);
1520 }
1521 EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
1522 
1523 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
1524 {
1525 	const struct inet_sock *inet = inet_sk(sk);
1526 	struct flowi4 *fl4;
1527 	struct rtable *rt;
1528 
1529 	rcu_read_lock();
1530 	fl4 = &fl->u.ip4;
1531 	inet_sk_init_flowi4(inet, fl4);
1532 	rt = ip_route_output_flow(sock_net(sk), fl4, sk);
1533 	if (IS_ERR(rt))
1534 		rt = NULL;
1535 	if (rt)
1536 		sk_setup_caps(sk, &rt->dst);
1537 	rcu_read_unlock();
1538 
1539 	return &rt->dst;
1540 }
1541 
1542 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
1543 {
1544 	struct dst_entry *dst = __sk_dst_check(sk, 0);
1545 	struct inet_sock *inet = inet_sk(sk);
1546 
1547 	if (!dst) {
1548 		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1549 		if (!dst)
1550 			goto out;
1551 	}
1552 	dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
1553 
1554 	dst = __sk_dst_check(sk, 0);
1555 	if (!dst)
1556 		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1557 out:
1558 	return dst;
1559 }
1560