xref: /linux/net/ipv4/inet_connection_sock.c (revision 77de28cd7cf172e782319a144bf64e693794d78b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Support for INET connection oriented protocols.
8  *
9  * Authors:	See the TCP sources
10  */
11 
12 #include <linux/module.h>
13 #include <linux/jhash.h>
14 
15 #include <net/inet_connection_sock.h>
16 #include <net/inet_hashtables.h>
17 #include <net/inet_timewait_sock.h>
18 #include <net/ip.h>
19 #include <net/route.h>
20 #include <net/tcp_states.h>
21 #include <net/xfrm.h>
22 #include <net/tcp.h>
23 #include <net/tcp_ecn.h>
24 #include <net/sock_reuseport.h>
25 #include <net/addrconf.h>
26 
27 #if IS_ENABLED(CONFIG_IPV6)
28 /* match_sk*_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses
29  *				if IPv6 only, and any IPv4 addresses
30  *				if not IPv6 only
31  * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
32  *				IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
33  *				and 0.0.0.0 equals to 0.0.0.0 only
34  */
35 static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
36 				 const struct in6_addr *sk2_rcv_saddr6,
37 				 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
38 				 bool sk1_ipv6only, bool sk2_ipv6only,
39 				 bool match_sk1_wildcard,
40 				 bool match_sk2_wildcard)
41 {
42 	int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
43 	int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
44 
45 	/* if both are mapped, treat as IPv4 */
46 	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
47 		if (!sk2_ipv6only) {
48 			if (sk1_rcv_saddr == sk2_rcv_saddr)
49 				return true;
50 			return (match_sk1_wildcard && !sk1_rcv_saddr) ||
51 				(match_sk2_wildcard && !sk2_rcv_saddr);
52 		}
53 		return false;
54 	}
55 
56 	if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
57 		return true;
58 
59 	if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
60 	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
61 		return true;
62 
63 	if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
64 	    !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
65 		return true;
66 
67 	if (sk2_rcv_saddr6 &&
68 	    ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
69 		return true;
70 
71 	return false;
72 }
73 #endif
74 
75 /* match_sk*_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
76  * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
77  *				0.0.0.0 only equals to 0.0.0.0
78  */
79 static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
80 				 bool sk2_ipv6only, bool match_sk1_wildcard,
81 				 bool match_sk2_wildcard)
82 {
83 	if (!sk2_ipv6only) {
84 		if (sk1_rcv_saddr == sk2_rcv_saddr)
85 			return true;
86 		return (match_sk1_wildcard && !sk1_rcv_saddr) ||
87 			(match_sk2_wildcard && !sk2_rcv_saddr);
88 	}
89 	return false;
90 }
91 
92 bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
93 			  bool match_wildcard)
94 {
95 #if IS_ENABLED(CONFIG_IPV6)
96 	if (sk->sk_family == AF_INET6)
97 		return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
98 					    inet6_rcv_saddr(sk2),
99 					    sk->sk_rcv_saddr,
100 					    sk2->sk_rcv_saddr,
101 					    ipv6_only_sock(sk),
102 					    ipv6_only_sock(sk2),
103 					    match_wildcard,
104 					    match_wildcard);
105 #endif
106 	return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
107 				    ipv6_only_sock(sk2), match_wildcard,
108 				    match_wildcard);
109 }
110 
111 bool inet_rcv_saddr_any(const struct sock *sk)
112 {
113 #if IS_ENABLED(CONFIG_IPV6)
114 	if (sk->sk_family == AF_INET6)
115 		return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
116 #endif
117 	return !sk->sk_rcv_saddr;
118 }
119 
120 /**
121  *	inet_sk_get_local_port_range - fetch ephemeral ports range
122  *	@sk: socket
123  *	@low: pointer to low port
124  *	@high: pointer to high port
125  *
126  *	Fetch netns port range (/proc/sys/net/ipv4/ip_local_port_range)
127  *	Range can be overridden if socket got IP_LOCAL_PORT_RANGE option.
128  *	Returns true if IP_LOCAL_PORT_RANGE was set on this socket.
129  */
130 bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high)
131 {
132 	int lo, hi, sk_lo, sk_hi;
133 	bool local_range = false;
134 	u32 sk_range;
135 
136 	inet_get_local_port_range(sock_net(sk), &lo, &hi);
137 
138 	sk_range = READ_ONCE(inet_sk(sk)->local_port_range);
139 	if (unlikely(sk_range)) {
140 		sk_lo = sk_range & 0xffff;
141 		sk_hi = sk_range >> 16;
142 
143 		if (lo <= sk_lo && sk_lo <= hi)
144 			lo = sk_lo;
145 		if (lo <= sk_hi && sk_hi <= hi)
146 			hi = sk_hi;
147 		local_range = true;
148 	}
149 
150 	*low = lo;
151 	*high = hi;
152 	return local_range;
153 }
154 EXPORT_SYMBOL(inet_sk_get_local_port_range);
155 
156 static bool inet_use_bhash2_on_bind(const struct sock *sk)
157 {
158 #if IS_ENABLED(CONFIG_IPV6)
159 	if (sk->sk_family == AF_INET6) {
160 		if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
161 			return false;
162 
163 		if (!ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
164 			return true;
165 	}
166 #endif
167 	return sk->sk_rcv_saddr != htonl(INADDR_ANY);
168 }
169 
170 static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
171 			       kuid_t uid, bool relax,
172 			       bool reuseport_cb_ok, bool reuseport_ok)
173 {
174 	int bound_dev_if2;
175 
176 	if (sk == sk2)
177 		return false;
178 
179 	bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if);
180 
181 	if (!sk->sk_bound_dev_if || !bound_dev_if2 ||
182 	    sk->sk_bound_dev_if == bound_dev_if2) {
183 		if (sk->sk_reuse && sk2->sk_reuse &&
184 		    sk2->sk_state != TCP_LISTEN) {
185 			if (!relax || (!reuseport_ok && sk->sk_reuseport &&
186 				       sk2->sk_reuseport && reuseport_cb_ok &&
187 				       (sk2->sk_state == TCP_TIME_WAIT ||
188 					uid_eq(uid, sk_uid(sk2)))))
189 				return true;
190 		} else if (!reuseport_ok || !sk->sk_reuseport ||
191 			   !sk2->sk_reuseport || !reuseport_cb_ok ||
192 			   (sk2->sk_state != TCP_TIME_WAIT &&
193 			    !uid_eq(uid, sk_uid(sk2)))) {
194 			return true;
195 		}
196 	}
197 	return false;
198 }
199 
200 static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
201 				   kuid_t uid, bool relax,
202 				   bool reuseport_cb_ok, bool reuseport_ok)
203 {
204 	if (ipv6_only_sock(sk2)) {
205 		if (sk->sk_family == AF_INET)
206 			return false;
207 
208 #if IS_ENABLED(CONFIG_IPV6)
209 		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
210 			return false;
211 #endif
212 	}
213 
214 	return inet_bind_conflict(sk, sk2, uid, relax,
215 				  reuseport_cb_ok, reuseport_ok);
216 }
217 
218 static bool inet_bhash2_conflict(const struct sock *sk,
219 				 const struct inet_bind2_bucket *tb2,
220 				 kuid_t uid,
221 				 bool relax, bool reuseport_cb_ok,
222 				 bool reuseport_ok)
223 {
224 	struct sock *sk2;
225 
226 	sk_for_each_bound(sk2, &tb2->owners) {
227 		if (__inet_bhash2_conflict(sk, sk2, uid, relax,
228 					   reuseport_cb_ok, reuseport_ok))
229 			return true;
230 	}
231 
232 	return false;
233 }
234 
235 #define sk_for_each_bound_bhash(__sk, __tb2, __tb)			\
236 	hlist_for_each_entry(__tb2, &(__tb)->bhash2, bhash_node)	\
237 		sk_for_each_bound((__sk), &(__tb2)->owners)
238 
239 /* This should be called only when the tb and tb2 hashbuckets' locks are held */
240 static int inet_csk_bind_conflict(const struct sock *sk,
241 				  const struct inet_bind_bucket *tb,
242 				  const struct inet_bind2_bucket *tb2, /* may be null */
243 				  bool relax, bool reuseport_ok)
244 {
245 	struct sock_reuseport *reuseport_cb;
246 	kuid_t uid = sk_uid(sk);
247 	bool reuseport_cb_ok;
248 	struct sock *sk2;
249 
250 	rcu_read_lock();
251 	reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
252 	/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
253 	reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
254 	rcu_read_unlock();
255 
256 	/* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if
257 	 * ipv4) should have been checked already. We need to do these two
258 	 * checks separately because their spinlocks have to be acquired/released
259 	 * independently of each other, to prevent possible deadlocks
260 	 */
261 	if (inet_use_bhash2_on_bind(sk))
262 		return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax,
263 						   reuseport_cb_ok, reuseport_ok);
264 
265 	/* Unlike other sk lookup places we do not check
266 	 * for sk_net here, since _all_ the socks listed
267 	 * in tb->owners and tb2->owners list belong
268 	 * to the same net - the one this bucket belongs to.
269 	 */
270 	sk_for_each_bound_bhash(sk2, tb2, tb) {
271 		if (!inet_bind_conflict(sk, sk2, uid, relax, reuseport_cb_ok, reuseport_ok))
272 			continue;
273 
274 		if (inet_rcv_saddr_equal(sk, sk2, true))
275 			return true;
276 	}
277 
278 	return false;
279 }
280 
281 /* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or
282  * INADDR_ANY (if ipv4) socket.
283  *
284  * Caller must hold bhash hashbucket lock with local bh disabled, to protect
285  * against concurrent binds on the port for addr any
286  */
287 static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
288 					  bool relax, bool reuseport_ok)
289 {
290 	const struct net *net = sock_net(sk);
291 	struct sock_reuseport *reuseport_cb;
292 	struct inet_bind_hashbucket *head2;
293 	struct inet_bind2_bucket *tb2;
294 	kuid_t uid = sk_uid(sk);
295 	bool conflict = false;
296 	bool reuseport_cb_ok;
297 
298 	rcu_read_lock();
299 	reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
300 	/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
301 	reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
302 	rcu_read_unlock();
303 
304 	head2 = inet_bhash2_addr_any_hashbucket(sk, net, port);
305 
306 	spin_lock(&head2->lock);
307 
308 	inet_bind_bucket_for_each(tb2, &head2->chain) {
309 		if (!inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
310 			continue;
311 
312 		if (!inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,	reuseport_ok))
313 			continue;
314 
315 		conflict = true;
316 		break;
317 	}
318 
319 	spin_unlock(&head2->lock);
320 
321 	return conflict;
322 }
323 
324 /*
325  * Find an open port number for the socket.  Returns with the
326  * inet_bind_hashbucket locks held if successful.
327  */
328 static struct inet_bind_hashbucket *
329 inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret,
330 			struct inet_bind2_bucket **tb2_ret,
331 			struct inet_bind_hashbucket **head2_ret, int *port_ret)
332 {
333 	struct inet_hashinfo *hinfo = tcp_get_hashinfo(sk);
334 	int i, low, high, attempt_half, port, l3mdev;
335 	struct inet_bind_hashbucket *head, *head2;
336 	struct net *net = sock_net(sk);
337 	struct inet_bind2_bucket *tb2;
338 	struct inet_bind_bucket *tb;
339 	u32 remaining, offset;
340 	bool relax = false;
341 
342 	l3mdev = inet_sk_bound_l3mdev(sk);
343 ports_exhausted:
344 	attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
345 other_half_scan:
346 	inet_sk_get_local_port_range(sk, &low, &high);
347 	high++; /* [32768, 60999] -> [32768, 61000[ */
348 	if (high - low < 4)
349 		attempt_half = 0;
350 	if (attempt_half) {
351 		int half = low + (((high - low) >> 2) << 1);
352 
353 		if (attempt_half == 1)
354 			high = half;
355 		else
356 			low = half;
357 	}
358 	remaining = high - low;
359 	if (likely(remaining > 1))
360 		remaining &= ~1U;
361 
362 	offset = get_random_u32_below(remaining);
363 	/* __inet_hash_connect() favors ports having @low parity
364 	 * We do the opposite to not pollute connect() users.
365 	 */
366 	offset |= 1U;
367 
368 other_parity_scan:
369 	port = low + offset;
370 	for (i = 0; i < remaining; i += 2, port += 2) {
371 		if (unlikely(port >= high))
372 			port -= remaining;
373 		if (inet_is_local_reserved_port(net, port))
374 			continue;
375 		head = &hinfo->bhash[inet_bhashfn(net, port,
376 						  hinfo->bhash_size)];
377 		spin_lock_bh(&head->lock);
378 		if (inet_use_bhash2_on_bind(sk)) {
379 			if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false))
380 				goto next_port;
381 		}
382 
383 		head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
384 		spin_lock(&head2->lock);
385 		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
386 		inet_bind_bucket_for_each(tb, &head->chain)
387 			if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
388 				if (!inet_csk_bind_conflict(sk, tb, tb2,
389 							    relax, false))
390 					goto success;
391 				spin_unlock(&head2->lock);
392 				goto next_port;
393 			}
394 		tb = NULL;
395 		goto success;
396 next_port:
397 		spin_unlock_bh(&head->lock);
398 		cond_resched();
399 	}
400 
401 	offset--;
402 	if (!(offset & 1))
403 		goto other_parity_scan;
404 
405 	if (attempt_half == 1) {
406 		/* OK we now try the upper half of the range */
407 		attempt_half = 2;
408 		goto other_half_scan;
409 	}
410 
411 	if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) {
412 		/* We still have a chance to connect to different destinations */
413 		relax = true;
414 		goto ports_exhausted;
415 	}
416 	return NULL;
417 success:
418 	*port_ret = port;
419 	*tb_ret = tb;
420 	*tb2_ret = tb2;
421 	*head2_ret = head2;
422 	return head;
423 }
424 
425 static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
426 				     const struct sock *sk)
427 {
428 	if (tb->fastreuseport <= 0)
429 		return 0;
430 	if (!sk->sk_reuseport)
431 		return 0;
432 	if (rcu_access_pointer(sk->sk_reuseport_cb))
433 		return 0;
434 	if (!uid_eq(tb->fastuid, sk_uid(sk)))
435 		return 0;
436 	/* We only need to check the rcv_saddr if this tb was once marked
437 	 * without fastreuseport and then was reset, as we can only know that
438 	 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
439 	 * owners list.
440 	 */
441 	if (tb->fastreuseport == FASTREUSEPORT_ANY)
442 		return 1;
443 #if IS_ENABLED(CONFIG_IPV6)
444 	if (tb->fast_sk_family == AF_INET6)
445 		return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
446 					    inet6_rcv_saddr(sk),
447 					    tb->fast_rcv_saddr,
448 					    sk->sk_rcv_saddr,
449 					    tb->fast_ipv6_only,
450 					    ipv6_only_sock(sk), true, false);
451 #endif
452 	return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
453 				    ipv6_only_sock(sk), true, false);
454 }
455 
456 void inet_csk_update_fastreuse(const struct sock *sk,
457 			       struct inet_bind_bucket *tb,
458 			       struct inet_bind2_bucket *tb2)
459 {
460 	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
461 
462 	if (hlist_empty(&tb->bhash2)) {
463 		tb->fastreuse = reuse;
464 		if (sk->sk_reuseport) {
465 			tb->fastreuseport = FASTREUSEPORT_ANY;
466 			tb->fastuid = sk_uid(sk);
467 			tb->fast_rcv_saddr = sk->sk_rcv_saddr;
468 			tb->fast_ipv6_only = ipv6_only_sock(sk);
469 			tb->fast_sk_family = sk->sk_family;
470 #if IS_ENABLED(CONFIG_IPV6)
471 			tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
472 #endif
473 		} else {
474 			tb->fastreuseport = 0;
475 		}
476 	} else {
477 		if (!reuse)
478 			tb->fastreuse = 0;
479 		if (sk->sk_reuseport) {
480 			/* We didn't match or we don't have fastreuseport set on
481 			 * the tb, but we have sk_reuseport set on this socket
482 			 * and we know that there are no bind conflicts with
483 			 * this socket in this tb, so reset our tb's reuseport
484 			 * settings so that any subsequent sockets that match
485 			 * our current socket will be put on the fast path.
486 			 *
487 			 * If we reset we need to set FASTREUSEPORT_STRICT so we
488 			 * do extra checking for all subsequent sk_reuseport
489 			 * socks.
490 			 */
491 			if (!sk_reuseport_match(tb, sk)) {
492 				tb->fastreuseport = FASTREUSEPORT_STRICT;
493 				tb->fastuid = sk_uid(sk);
494 				tb->fast_rcv_saddr = sk->sk_rcv_saddr;
495 				tb->fast_ipv6_only = ipv6_only_sock(sk);
496 				tb->fast_sk_family = sk->sk_family;
497 #if IS_ENABLED(CONFIG_IPV6)
498 				tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
499 #endif
500 			}
501 		} else {
502 			tb->fastreuseport = 0;
503 		}
504 	}
505 
506 	tb2->fastreuse = tb->fastreuse;
507 	tb2->fastreuseport = tb->fastreuseport;
508 }
509 
510 /* Obtain a reference to a local port for the given sock,
511  * if snum is zero it means select any available local port.
512  * We try to allocate an odd port (and leave even ports for connect())
513  */
514 int inet_csk_get_port(struct sock *sk, unsigned short snum)
515 {
516 	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
517 	bool found_port = false, check_bind_conflict = true;
518 	bool bhash_created = false, bhash2_created = false;
519 	struct inet_hashinfo *hinfo = tcp_get_hashinfo(sk);
520 	int ret = -EADDRINUSE, port = snum, l3mdev;
521 	struct inet_bind_hashbucket *head, *head2;
522 	struct inet_bind2_bucket *tb2 = NULL;
523 	struct inet_bind_bucket *tb = NULL;
524 	bool head2_lock_acquired = false;
525 	struct net *net = sock_net(sk);
526 
527 	l3mdev = inet_sk_bound_l3mdev(sk);
528 
529 	if (!port) {
530 		head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port);
531 		if (!head)
532 			return ret;
533 
534 		head2_lock_acquired = true;
535 
536 		if (tb && tb2)
537 			goto success;
538 		found_port = true;
539 	} else {
540 		head = &hinfo->bhash[inet_bhashfn(net, port,
541 						  hinfo->bhash_size)];
542 		spin_lock_bh(&head->lock);
543 		inet_bind_bucket_for_each(tb, &head->chain)
544 			if (inet_bind_bucket_match(tb, net, port, l3mdev))
545 				break;
546 	}
547 
548 	if (!tb) {
549 		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net,
550 					     head, port, l3mdev);
551 		if (!tb)
552 			goto fail_unlock;
553 		bhash_created = true;
554 	}
555 
556 	if (!found_port) {
557 		if (!hlist_empty(&tb->bhash2)) {
558 			if (sk->sk_reuse == SK_FORCE_REUSE ||
559 			    (tb->fastreuse > 0 && reuse) ||
560 			    sk_reuseport_match(tb, sk))
561 				check_bind_conflict = false;
562 		}
563 
564 		if (check_bind_conflict && inet_use_bhash2_on_bind(sk)) {
565 			if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true))
566 				goto fail_unlock;
567 		}
568 
569 		head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
570 		spin_lock(&head2->lock);
571 		head2_lock_acquired = true;
572 		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
573 	}
574 
575 	if (!tb2) {
576 		tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep,
577 					       net, head2, tb, sk);
578 		if (!tb2)
579 			goto fail_unlock;
580 		bhash2_created = true;
581 	}
582 
583 	if (!found_port && check_bind_conflict) {
584 		if (inet_csk_bind_conflict(sk, tb, tb2, true, true))
585 			goto fail_unlock;
586 	}
587 
588 success:
589 	inet_csk_update_fastreuse(sk, tb, tb2);
590 
591 	if (!inet_csk(sk)->icsk_bind_hash)
592 		inet_bind_hash(sk, tb, tb2, port);
593 	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
594 	WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2);
595 	ret = 0;
596 
597 fail_unlock:
598 	if (ret) {
599 		if (bhash2_created)
600 			inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, tb2);
601 		if (bhash_created)
602 			inet_bind_bucket_destroy(tb);
603 	}
604 	if (head2_lock_acquired)
605 		spin_unlock(&head2->lock);
606 	spin_unlock_bh(&head->lock);
607 	return ret;
608 }
609 EXPORT_SYMBOL_GPL(inet_csk_get_port);
610 
611 /*
612  * Wait for an incoming connection, avoid race conditions. This must be called
613  * with the socket locked.
614  */
615 static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
616 {
617 	struct inet_connection_sock *icsk = inet_csk(sk);
618 	DEFINE_WAIT(wait);
619 	int err;
620 
621 	/*
622 	 * True wake-one mechanism for incoming connections: only
623 	 * one process gets woken up, not the 'whole herd'.
624 	 * Since we do not 'race & poll' for established sockets
625 	 * anymore, the common case will execute the loop only once.
626 	 *
627 	 * Subtle issue: "add_wait_queue_exclusive()" will be added
628 	 * after any current non-exclusive waiters, and we know that
629 	 * it will always _stay_ after any new non-exclusive waiters
630 	 * because all non-exclusive waiters are added at the
631 	 * beginning of the wait-queue. As such, it's ok to "drop"
632 	 * our exclusiveness temporarily when we get woken up without
633 	 * having to remove and re-insert us on the wait queue.
634 	 */
635 	for (;;) {
636 		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
637 					  TASK_INTERRUPTIBLE);
638 		release_sock(sk);
639 		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
640 			timeo = schedule_timeout(timeo);
641 		sched_annotate_sleep();
642 		lock_sock(sk);
643 		err = 0;
644 		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
645 			break;
646 		err = -EINVAL;
647 		if (sk->sk_state != TCP_LISTEN)
648 			break;
649 		err = sock_intr_errno(timeo);
650 		if (signal_pending(current))
651 			break;
652 		err = -EAGAIN;
653 		if (!timeo)
654 			break;
655 	}
656 	finish_wait(sk_sleep(sk), &wait);
657 	return err;
658 }
659 
660 /*
661  * This will accept the next outstanding connection.
662  */
663 struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg)
664 {
665 	struct inet_connection_sock *icsk = inet_csk(sk);
666 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
667 	struct request_sock *req;
668 	struct sock *newsk;
669 	int error;
670 
671 	lock_sock(sk);
672 
673 	/* We need to make sure that this socket is listening,
674 	 * and that it has something pending.
675 	 */
676 	error = -EINVAL;
677 	if (sk->sk_state != TCP_LISTEN)
678 		goto out_err;
679 
680 	/* Find already established connection */
681 	if (reqsk_queue_empty(queue)) {
682 		long timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
683 
684 		/* If this is a non blocking socket don't sleep */
685 		error = -EAGAIN;
686 		if (!timeo)
687 			goto out_err;
688 
689 		error = inet_csk_wait_for_connect(sk, timeo);
690 		if (error)
691 			goto out_err;
692 	}
693 	req = reqsk_queue_remove(queue, sk);
694 	arg->is_empty = reqsk_queue_empty(queue);
695 	newsk = req->sk;
696 
697 	if (sk->sk_protocol == IPPROTO_TCP &&
698 	    tcp_rsk(req)->tfo_listener) {
699 		spin_lock_bh(&queue->fastopenq.lock);
700 		if (tcp_rsk(req)->tfo_listener) {
701 			/* We are still waiting for the final ACK from 3WHS
702 			 * so can't free req now. Instead, we set req->sk to
703 			 * NULL to signify that the child socket is taken
704 			 * so reqsk_fastopen_remove() will free the req
705 			 * when 3WHS finishes (or is aborted).
706 			 */
707 			req->sk = NULL;
708 			req = NULL;
709 		}
710 		spin_unlock_bh(&queue->fastopenq.lock);
711 	}
712 
713 	release_sock(sk);
714 
715 	if (req)
716 		reqsk_put(req);
717 
718 	inet_init_csk_locks(newsk);
719 	return newsk;
720 
721 out_err:
722 	release_sock(sk);
723 	arg->err = error;
724 	return NULL;
725 }
726 EXPORT_IPV6_MOD(inet_csk_accept);
727 
728 /*
729  * Using different timers for retransmit, delayed acks and probes
730  * We may wish use just one timer maintaining a list of expire jiffies
731  * to optimize.
732  */
733 void inet_csk_init_xmit_timers(struct sock *sk,
734 			       void (*retransmit_handler)(struct timer_list *t),
735 			       void (*delack_handler)(struct timer_list *t),
736 			       void (*keepalive_handler)(struct timer_list *t))
737 {
738 	struct inet_connection_sock *icsk = inet_csk(sk);
739 
740 	timer_setup(&sk->tcp_retransmit_timer, retransmit_handler, 0);
741 	timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
742 	timer_setup(&icsk->icsk_keepalive_timer, keepalive_handler, 0);
743 	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
744 }
745 
746 void inet_csk_clear_xmit_timers(struct sock *sk)
747 {
748 	struct inet_connection_sock *icsk = inet_csk(sk);
749 
750 	smp_store_release(&icsk->icsk_pending, 0);
751 	smp_store_release(&icsk->icsk_ack.pending, 0);
752 
753 	sk_stop_timer(sk, &sk->tcp_retransmit_timer);
754 	sk_stop_timer(sk, &icsk->icsk_delack_timer);
755 	sk_stop_timer(sk, &icsk->icsk_keepalive_timer);
756 }
757 
758 void inet_csk_clear_xmit_timers_sync(struct sock *sk)
759 {
760 	struct inet_connection_sock *icsk = inet_csk(sk);
761 
762 	/* ongoing timer handlers need to acquire socket lock. */
763 	sock_not_owned_by_me(sk);
764 
765 	smp_store_release(&icsk->icsk_pending, 0);
766 	smp_store_release(&icsk->icsk_ack.pending, 0);
767 
768 	sk_stop_timer_sync(sk, &sk->tcp_retransmit_timer);
769 	sk_stop_timer_sync(sk, &icsk->icsk_delack_timer);
770 	sk_stop_timer_sync(sk, &icsk->icsk_keepalive_timer);
771 }
772 
773 struct dst_entry *inet_csk_route_req(const struct sock *sk,
774 				     struct flowi4 *fl4,
775 				     const struct request_sock *req)
776 {
777 	const struct inet_request_sock *ireq = inet_rsk(req);
778 	struct net *net = read_pnet(&ireq->ireq_net);
779 	struct ip_options_rcu *opt;
780 	struct rtable *rt;
781 
782 	rcu_read_lock();
783 	opt = rcu_dereference(ireq->ireq_opt);
784 
785 	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
786 			   ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
787 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
788 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
789 			   ireq->ir_loc_addr, ireq->ir_rmt_port,
790 			   htons(ireq->ir_num), sk_uid(sk));
791 	security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
792 	rt = ip_route_output_flow(net, fl4, sk);
793 	if (IS_ERR(rt))
794 		goto no_route;
795 	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
796 		goto route_err;
797 	rcu_read_unlock();
798 	return &rt->dst;
799 
800 route_err:
801 	ip_rt_put(rt);
802 no_route:
803 	rcu_read_unlock();
804 	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
805 	return NULL;
806 }
807 
808 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
809 					    struct sock *newsk,
810 					    const struct request_sock *req)
811 {
812 	const struct inet_request_sock *ireq = inet_rsk(req);
813 	struct net *net = read_pnet(&ireq->ireq_net);
814 	struct inet_sock *newinet = inet_sk(newsk);
815 	struct ip_options_rcu *opt;
816 	struct flowi4 *fl4;
817 	struct rtable *rt;
818 
819 	opt = rcu_dereference(ireq->ireq_opt);
820 	fl4 = &newinet->cork.fl.u.ip4;
821 
822 	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
823 			   ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
824 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
825 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
826 			   ireq->ir_loc_addr, ireq->ir_rmt_port,
827 			   htons(ireq->ir_num), sk_uid(sk));
828 	security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
829 	rt = ip_route_output_flow(net, fl4, sk);
830 	if (IS_ERR(rt))
831 		goto no_route;
832 	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
833 		goto route_err;
834 	return &rt->dst;
835 
836 route_err:
837 	ip_rt_put(rt);
838 no_route:
839 	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
840 	return NULL;
841 }
842 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
843 
844 /* Decide when to expire the request and when to resend SYN-ACK */
845 static void syn_ack_recalc(struct request_sock *req,
846 			   const int max_syn_ack_retries,
847 			   const u8 rskq_defer_accept,
848 			   int *expire, int *resend)
849 {
850 	if (!rskq_defer_accept) {
851 		*expire = req->num_timeout >= max_syn_ack_retries;
852 		*resend = 1;
853 		return;
854 	}
855 	*expire = req->num_timeout >= max_syn_ack_retries &&
856 		  (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept);
857 	/* Do not resend while waiting for data after ACK,
858 	 * start to resend on end of deferring period to give
859 	 * last chance for data or ACK to create established socket.
860 	 */
861 	*resend = !inet_rsk(req)->acked ||
862 		  req->num_timeout >= rskq_defer_accept - 1;
863 }
864 
865 static struct request_sock *
866 reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener,
867 		   bool attach_listener)
868 {
869 	struct request_sock *req;
870 
871 	req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
872 	if (!req)
873 		return NULL;
874 	req->rsk_listener = NULL;
875 	if (attach_listener) {
876 		if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
877 			kmem_cache_free(ops->slab, req);
878 			return NULL;
879 		}
880 		req->rsk_listener = sk_listener;
881 	}
882 	req->rsk_ops = ops;
883 	req_to_sk(req)->sk_prot = sk_listener->sk_prot;
884 	sk_node_init(&req_to_sk(req)->sk_node);
885 	sk_tx_queue_clear(req_to_sk(req));
886 	req->saved_syn = NULL;
887 	req->syncookie = 0;
888 	req->num_timeout = 0;
889 	req->num_retrans = 0;
890 	req->sk = NULL;
891 	refcount_set(&req->rsk_refcnt, 0);
892 
893 	return req;
894 }
895 #define reqsk_alloc(...)	alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__))
896 
897 struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
898 				      struct sock *sk_listener,
899 				      bool attach_listener)
900 {
901 	struct request_sock *req = reqsk_alloc(ops, sk_listener,
902 					       attach_listener);
903 
904 	if (req) {
905 		struct inet_request_sock *ireq = inet_rsk(req);
906 
907 		ireq->ireq_opt = NULL;
908 #if IS_ENABLED(CONFIG_IPV6)
909 		ireq->pktopts = NULL;
910 #endif
911 		atomic64_set(&ireq->ir_cookie, 0);
912 		ireq->ireq_state = TCP_NEW_SYN_RECV;
913 		write_pnet(&ireq->ireq_net, sock_net(sk_listener));
914 		ireq->ireq_family = sk_listener->sk_family;
915 	}
916 
917 	return req;
918 }
919 EXPORT_SYMBOL(inet_reqsk_alloc);
920 
921 void __reqsk_free(struct request_sock *req)
922 {
923 	req->rsk_ops->destructor(req);
924 	if (req->rsk_listener)
925 		sock_put(req->rsk_listener);
926 	kfree(req->saved_syn);
927 	kmem_cache_free(req->rsk_ops->slab, req);
928 }
929 EXPORT_SYMBOL_GPL(__reqsk_free);
930 
931 static struct request_sock *inet_reqsk_clone(struct request_sock *req,
932 					     struct sock *sk)
933 {
934 	struct sock *req_sk, *nreq_sk;
935 	struct request_sock *nreq;
936 
937 	nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN);
938 	if (!nreq) {
939 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
940 
941 		/* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */
942 		sock_put(sk);
943 		return NULL;
944 	}
945 
946 	req_sk = req_to_sk(req);
947 	nreq_sk = req_to_sk(nreq);
948 
949 	memcpy(nreq_sk, req_sk,
950 	       offsetof(struct sock, sk_dontcopy_begin));
951 	unsafe_memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
952 		      req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end),
953 		      /* alloc is larger than struct, see above */);
954 
955 	sk_node_init(&nreq_sk->sk_node);
956 	nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
957 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
958 	nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
959 #endif
960 	nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
961 
962 	nreq->rsk_listener = sk;
963 
964 	/* We need not acquire fastopenq->lock
965 	 * because the child socket is locked in inet_csk_listen_stop().
966 	 */
967 	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener)
968 		rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq);
969 
970 	return nreq;
971 }
972 
973 static void reqsk_queue_migrated(struct request_sock_queue *queue,
974 				 const struct request_sock *req)
975 {
976 	if (req->num_timeout == 0)
977 		atomic_inc(&queue->young);
978 	atomic_inc(&queue->qlen);
979 }
980 
981 static void reqsk_migrate_reset(struct request_sock *req)
982 {
983 	req->saved_syn = NULL;
984 #if IS_ENABLED(CONFIG_IPV6)
985 	inet_rsk(req)->ipv6_opt = NULL;
986 	inet_rsk(req)->pktopts = NULL;
987 #else
988 	inet_rsk(req)->ireq_opt = NULL;
989 #endif
990 }
991 
992 /* return true if req was found in the ehash table */
993 static bool reqsk_queue_unlink(struct request_sock *req)
994 {
995 	struct sock *sk = req_to_sk(req);
996 	bool found = false;
997 
998 	if (sk_hashed(sk)) {
999 		struct inet_hashinfo *hashinfo = tcp_get_hashinfo(sk);
1000 		spinlock_t *lock;
1001 
1002 		lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
1003 		spin_lock(lock);
1004 		found = __sk_nulls_del_node_init_rcu(sk);
1005 		spin_unlock(lock);
1006 	}
1007 
1008 	return found;
1009 }
1010 
1011 static bool __inet_csk_reqsk_queue_drop(struct sock *sk,
1012 					struct request_sock *req,
1013 					bool from_timer)
1014 {
1015 	bool unlinked = reqsk_queue_unlink(req);
1016 
1017 	if (!from_timer && timer_delete_sync(&req->rsk_timer))
1018 		reqsk_put(req);
1019 
1020 	if (unlinked) {
1021 		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
1022 		reqsk_put(req);
1023 	}
1024 
1025 	return unlinked;
1026 }
1027 
1028 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
1029 {
1030 	return __inet_csk_reqsk_queue_drop(sk, req, false);
1031 }
1032 
1033 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
1034 {
1035 	inet_csk_reqsk_queue_drop(sk, req);
1036 	reqsk_put(req);
1037 }
1038 EXPORT_IPV6_MOD(inet_csk_reqsk_queue_drop_and_put);
1039 
1040 static void reqsk_timer_handler(struct timer_list *t)
1041 {
1042 	struct request_sock *req = timer_container_of(req, t, rsk_timer);
1043 	struct request_sock *nreq = NULL, *oreq = req;
1044 	struct sock *sk_listener = req->rsk_listener;
1045 	struct inet_connection_sock *icsk;
1046 	struct request_sock_queue *queue;
1047 	struct net *net;
1048 	int max_syn_ack_retries, qlen, expire = 0, resend = 0;
1049 
1050 	if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {
1051 		struct sock *nsk;
1052 
1053 		nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
1054 		if (!nsk)
1055 			goto drop;
1056 
1057 		nreq = inet_reqsk_clone(req, nsk);
1058 		if (!nreq)
1059 			goto drop;
1060 
1061 		/* The new timer for the cloned req can decrease the 2
1062 		 * by calling inet_csk_reqsk_queue_drop_and_put(), so
1063 		 * hold another count to prevent use-after-free and
1064 		 * call reqsk_put() just before return.
1065 		 */
1066 		refcount_set(&nreq->rsk_refcnt, 2 + 1);
1067 		timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1068 		reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);
1069 
1070 		req = nreq;
1071 		sk_listener = nsk;
1072 	}
1073 
1074 	icsk = inet_csk(sk_listener);
1075 	net = sock_net(sk_listener);
1076 	max_syn_ack_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
1077 		READ_ONCE(net->ipv4.sysctl_tcp_synack_retries);
1078 	/* Normally all the openreqs are young and become mature
1079 	 * (i.e. converted to established socket) for first timeout.
1080 	 * If synack was not acknowledged for 1 second, it means
1081 	 * one of the following things: synack was lost, ack was lost,
1082 	 * rtt is high or nobody planned to ack (i.e. synflood).
1083 	 * When server is a bit loaded, queue is populated with old
1084 	 * open requests, reducing effective size of queue.
1085 	 * When server is well loaded, queue size reduces to zero
1086 	 * after several minutes of work. It is not synflood,
1087 	 * it is normal operation. The solution is pruning
1088 	 * too old entries overriding normal timeout, when
1089 	 * situation becomes dangerous.
1090 	 *
1091 	 * Essentially, we reserve half of room for young
1092 	 * embrions; and abort old ones without pity, if old
1093 	 * ones are about to clog our table.
1094 	 */
1095 	queue = &icsk->icsk_accept_queue;
1096 	qlen = reqsk_queue_len(queue);
1097 	if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
1098 		int young = reqsk_queue_len_young(queue) << 1;
1099 
1100 		while (max_syn_ack_retries > 2) {
1101 			if (qlen < young)
1102 				break;
1103 			max_syn_ack_retries--;
1104 			young <<= 1;
1105 		}
1106 	}
1107 
1108 	syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept),
1109 		       &expire, &resend);
1110 	tcp_syn_ack_timeout(req);
1111 
1112 	if (!expire &&
1113 	    (!resend ||
1114 	     !tcp_rtx_synack(sk_listener, req) ||
1115 	     inet_rsk(req)->acked)) {
1116 		if (req->num_retrans > 1 && tcp_rsk(req)->accecn_ok)
1117 			tcp_rsk(req)->accecn_fail_mode |= TCP_ACCECN_ACE_FAIL_SEND;
1118 		if (req->num_timeout++ == 0)
1119 			atomic_dec(&queue->young);
1120 		mod_timer(&req->rsk_timer, jiffies + tcp_reqsk_timeout(req));
1121 
1122 		if (!nreq)
1123 			return;
1124 
1125 		if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
1126 			/* delete timer */
1127 			__inet_csk_reqsk_queue_drop(sk_listener, nreq, true);
1128 			goto no_ownership;
1129 		}
1130 
1131 		__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS);
1132 		reqsk_migrate_reset(oreq);
1133 		reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
1134 		reqsk_put(oreq);
1135 
1136 		reqsk_put(nreq);
1137 		return;
1138 	}
1139 
1140 	/* Even if we can clone the req, we may need not retransmit any more
1141 	 * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
1142 	 * CPU may win the "own_req" race so that inet_ehash_insert() fails.
1143 	 */
1144 	if (nreq) {
1145 		__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE);
1146 no_ownership:
1147 		reqsk_migrate_reset(nreq);
1148 		reqsk_queue_removed(queue, nreq);
1149 		__reqsk_free(nreq);
1150 	}
1151 
1152 drop:
1153 	__inet_csk_reqsk_queue_drop(sk_listener, oreq, true);
1154 	reqsk_put(oreq);
1155 }
1156 
1157 static bool reqsk_queue_hash_req(struct request_sock *req)
1158 {
1159 	bool found_dup_sk = false;
1160 
1161 	if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk))
1162 		return false;
1163 
1164 	/* The timer needs to be setup after a successful insertion. */
1165 	req->timeout = tcp_timeout_init((struct sock *)req);
1166 	timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1167 	mod_timer(&req->rsk_timer, jiffies + req->timeout);
1168 
1169 	/* before letting lookups find us, make sure all req fields
1170 	 * are committed to memory and refcnt initialized.
1171 	 */
1172 	smp_wmb();
1173 	refcount_set(&req->rsk_refcnt, 2 + 1);
1174 	return true;
1175 }
1176 
1177 bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req)
1178 {
1179 	if (!reqsk_queue_hash_req(req))
1180 		return false;
1181 
1182 	inet_csk_reqsk_queue_added(sk);
1183 	return true;
1184 }
1185 
1186 static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
1187 			   const gfp_t priority)
1188 {
1189 	struct inet_connection_sock *icsk = inet_csk(newsk);
1190 
1191 	if (!icsk->icsk_ulp_ops)
1192 		return;
1193 
1194 	icsk->icsk_ulp_ops->clone(req, newsk, priority);
1195 }
1196 
1197 /**
1198  *	inet_csk_clone_lock - clone an inet socket, and lock its clone
1199  *	@sk: the socket to clone
1200  *	@req: request_sock
1201  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1202  *
1203  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1204  */
1205 struct sock *inet_csk_clone_lock(const struct sock *sk,
1206 				 const struct request_sock *req,
1207 				 const gfp_t priority)
1208 {
1209 	struct sock *newsk = sk_clone_lock(sk, priority);
1210 	struct inet_connection_sock *newicsk;
1211 	const struct inet_request_sock *ireq;
1212 	struct inet_sock *newinet;
1213 
1214 	if (!newsk)
1215 		return NULL;
1216 
1217 	newicsk = inet_csk(newsk);
1218 	newinet = inet_sk(newsk);
1219 	ireq = inet_rsk(req);
1220 
1221 	newicsk->icsk_bind_hash = NULL;
1222 	newicsk->icsk_bind2_hash = NULL;
1223 
1224 	newinet->inet_dport = ireq->ir_rmt_port;
1225 	newinet->inet_num = ireq->ir_num;
1226 	newinet->inet_sport = htons(ireq->ir_num);
1227 
1228 	newsk->sk_bound_dev_if = ireq->ir_iif;
1229 
1230 	newsk->sk_daddr = ireq->ir_rmt_addr;
1231 	newsk->sk_rcv_saddr = ireq->ir_loc_addr;
1232 	newinet->inet_saddr = ireq->ir_loc_addr;
1233 
1234 #if IS_ENABLED(CONFIG_IPV6)
1235 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1236 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1237 #endif
1238 
1239 	/* listeners have SOCK_RCU_FREE, not the children */
1240 	sock_reset_flag(newsk, SOCK_RCU_FREE);
1241 
1242 	inet_sk(newsk)->mc_list = NULL;
1243 
1244 	newsk->sk_mark = inet_rsk(req)->ir_mark;
1245 	atomic64_set(&newsk->sk_cookie,
1246 		     atomic64_read(&inet_rsk(req)->ir_cookie));
1247 
1248 	newicsk->icsk_retransmits = 0;
1249 	newicsk->icsk_backoff	  = 0;
1250 	newicsk->icsk_probes_out  = 0;
1251 	newicsk->icsk_probes_tstamp = 0;
1252 
1253 	/* Deinitialize accept_queue to trap illegal accesses. */
1254 	memset(&newicsk->icsk_accept_queue, 0,
1255 	       sizeof(newicsk->icsk_accept_queue));
1256 
1257 	inet_sk_set_state(newsk, TCP_SYN_RECV);
1258 
1259 	inet_clone_ulp(req, newsk, priority);
1260 
1261 	security_inet_csk_clone(newsk, req);
1262 
1263 	return newsk;
1264 }
1265 
1266 /*
1267  * At this point, there should be no process reference to this
1268  * socket, and thus no user references at all.  Therefore we
1269  * can assume the socket waitqueue is inactive and nobody will
1270  * try to jump onto it.
1271  */
1272 void inet_csk_destroy_sock(struct sock *sk)
1273 {
1274 	WARN_ON(sk->sk_state != TCP_CLOSE);
1275 	WARN_ON(!sock_flag(sk, SOCK_DEAD));
1276 
1277 	/* It cannot be in hash table! */
1278 	WARN_ON(!sk_unhashed(sk));
1279 
1280 	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
1281 	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
1282 
1283 	sk->sk_prot->destroy(sk);
1284 
1285 	sk_stream_kill_queues(sk);
1286 
1287 	xfrm_sk_free_policy(sk);
1288 
1289 	tcp_orphan_count_dec();
1290 
1291 	sock_put(sk);
1292 }
1293 EXPORT_SYMBOL(inet_csk_destroy_sock);
1294 
1295 void inet_csk_prepare_for_destroy_sock(struct sock *sk)
1296 {
1297 	/* The below has to be done to allow calling inet_csk_destroy_sock */
1298 	sock_set_flag(sk, SOCK_DEAD);
1299 	tcp_orphan_count_inc();
1300 }
1301 
1302 /* This function allows to force a closure of a socket after the call to
1303  * tcp_create_openreq_child().
1304  */
1305 void inet_csk_prepare_forced_close(struct sock *sk)
1306 	__releases(&sk->sk_lock.slock)
1307 {
1308 	/* sk_clone_lock locked the socket and set refcnt to 2 */
1309 	bh_unlock_sock(sk);
1310 	sock_put(sk);
1311 	inet_csk_prepare_for_destroy_sock(sk);
1312 	inet_sk(sk)->inet_num = 0;
1313 }
1314 EXPORT_SYMBOL(inet_csk_prepare_forced_close);
1315 
1316 static int inet_ulp_can_listen(const struct sock *sk)
1317 {
1318 	const struct inet_connection_sock *icsk = inet_csk(sk);
1319 
1320 	if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone)
1321 		return -EINVAL;
1322 
1323 	return 0;
1324 }
1325 
1326 static void reqsk_queue_alloc(struct request_sock_queue *queue)
1327 {
1328 	queue->fastopenq.rskq_rst_head = NULL;
1329 	queue->fastopenq.rskq_rst_tail = NULL;
1330 	queue->fastopenq.qlen = 0;
1331 
1332 	queue->rskq_accept_head = NULL;
1333 }
1334 
1335 int inet_csk_listen_start(struct sock *sk)
1336 {
1337 	struct inet_connection_sock *icsk = inet_csk(sk);
1338 	struct inet_sock *inet = inet_sk(sk);
1339 	int err;
1340 
1341 	err = inet_ulp_can_listen(sk);
1342 	if (unlikely(err))
1343 		return err;
1344 
1345 	reqsk_queue_alloc(&icsk->icsk_accept_queue);
1346 
1347 	sk->sk_ack_backlog = 0;
1348 	inet_csk_delack_init(sk);
1349 
1350 	/* There is race window here: we announce ourselves listening,
1351 	 * but this transition is still not validated by get_port().
1352 	 * It is OK, because this socket enters to hash table only
1353 	 * after validation is complete.
1354 	 */
1355 	inet_sk_state_store(sk, TCP_LISTEN);
1356 	err = sk->sk_prot->get_port(sk, inet->inet_num);
1357 	if (!err) {
1358 		inet->inet_sport = htons(inet->inet_num);
1359 
1360 		sk_dst_reset(sk);
1361 		err = sk->sk_prot->hash(sk);
1362 
1363 		if (likely(!err))
1364 			return 0;
1365 	}
1366 
1367 	inet_sk_set_state(sk, TCP_CLOSE);
1368 	return err;
1369 }
1370 
1371 static void inet_child_forget(struct sock *sk, struct request_sock *req,
1372 			      struct sock *child)
1373 {
1374 	sk->sk_prot->disconnect(child, O_NONBLOCK);
1375 
1376 	sock_orphan(child);
1377 
1378 	tcp_orphan_count_inc();
1379 
1380 	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
1381 		BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
1382 		BUG_ON(sk != req->rsk_listener);
1383 
1384 		/* Paranoid, to prevent race condition if
1385 		 * an inbound pkt destined for child is
1386 		 * blocked by sock lock in tcp_v4_rcv().
1387 		 * Also to satisfy an assertion in
1388 		 * tcp_v4_destroy_sock().
1389 		 */
1390 		RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
1391 	}
1392 	inet_csk_destroy_sock(child);
1393 }
1394 
1395 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
1396 				      struct request_sock *req,
1397 				      struct sock *child)
1398 {
1399 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1400 
1401 	spin_lock(&queue->rskq_lock);
1402 	if (unlikely(sk->sk_state != TCP_LISTEN)) {
1403 		inet_child_forget(sk, req, child);
1404 		child = NULL;
1405 	} else {
1406 		req->sk = child;
1407 		req->dl_next = NULL;
1408 		if (queue->rskq_accept_head == NULL)
1409 			WRITE_ONCE(queue->rskq_accept_head, req);
1410 		else
1411 			queue->rskq_accept_tail->dl_next = req;
1412 		queue->rskq_accept_tail = req;
1413 		sk_acceptq_added(sk);
1414 	}
1415 	spin_unlock(&queue->rskq_lock);
1416 	return child;
1417 }
1418 EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
1419 
1420 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
1421 					 struct request_sock *req, bool own_req)
1422 {
1423 	if (own_req) {
1424 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
1425 		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
1426 
1427 		if (sk != req->rsk_listener) {
1428 			/* another listening sk has been selected,
1429 			 * migrate the req to it.
1430 			 */
1431 			struct request_sock *nreq;
1432 
1433 			/* hold a refcnt for the nreq->rsk_listener
1434 			 * which is assigned in inet_reqsk_clone()
1435 			 */
1436 			sock_hold(sk);
1437 			nreq = inet_reqsk_clone(req, sk);
1438 			if (!nreq) {
1439 				inet_child_forget(sk, req, child);
1440 				goto child_put;
1441 			}
1442 
1443 			refcount_set(&nreq->rsk_refcnt, 1);
1444 			if (inet_csk_reqsk_queue_add(sk, nreq, child)) {
1445 				__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS);
1446 				reqsk_migrate_reset(req);
1447 				reqsk_put(req);
1448 				return child;
1449 			}
1450 
1451 			__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
1452 			reqsk_migrate_reset(nreq);
1453 			__reqsk_free(nreq);
1454 		} else if (inet_csk_reqsk_queue_add(sk, req, child)) {
1455 			return child;
1456 		}
1457 	}
1458 	/* Too bad, another child took ownership of the request, undo. */
1459 child_put:
1460 	bh_unlock_sock(child);
1461 	sock_put(child);
1462 	return NULL;
1463 }
1464 
1465 /*
1466  *	This routine closes sockets which have been at least partially
1467  *	opened, but not yet accepted.
1468  */
1469 void inet_csk_listen_stop(struct sock *sk)
1470 {
1471 	struct inet_connection_sock *icsk = inet_csk(sk);
1472 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
1473 	struct request_sock *next, *req;
1474 
1475 	/* Following specs, it would be better either to send FIN
1476 	 * (and enter FIN-WAIT-1, it is normal close)
1477 	 * or to send active reset (abort).
1478 	 * Certainly, it is pretty dangerous while synflood, but it is
1479 	 * bad justification for our negligence 8)
1480 	 * To be honest, we are not able to make either
1481 	 * of the variants now.			--ANK
1482 	 */
1483 	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
1484 		struct sock *child = req->sk, *nsk;
1485 		struct request_sock *nreq;
1486 
1487 		local_bh_disable();
1488 		bh_lock_sock(child);
1489 		WARN_ON(sock_owned_by_user(child));
1490 		sock_hold(child);
1491 
1492 		nsk = reuseport_migrate_sock(sk, child, NULL);
1493 		if (nsk) {
1494 			nreq = inet_reqsk_clone(req, nsk);
1495 			if (nreq) {
1496 				refcount_set(&nreq->rsk_refcnt, 1);
1497 
1498 				if (inet_csk_reqsk_queue_add(nsk, nreq, child)) {
1499 					__NET_INC_STATS(sock_net(nsk),
1500 							LINUX_MIB_TCPMIGRATEREQSUCCESS);
1501 					reqsk_migrate_reset(req);
1502 				} else {
1503 					__NET_INC_STATS(sock_net(nsk),
1504 							LINUX_MIB_TCPMIGRATEREQFAILURE);
1505 					reqsk_migrate_reset(nreq);
1506 					__reqsk_free(nreq);
1507 				}
1508 
1509 				/* inet_csk_reqsk_queue_add() has already
1510 				 * called inet_child_forget() on failure case.
1511 				 */
1512 				goto skip_child_forget;
1513 			}
1514 		}
1515 
1516 		inet_child_forget(sk, req, child);
1517 skip_child_forget:
1518 		reqsk_put(req);
1519 		bh_unlock_sock(child);
1520 		local_bh_enable();
1521 		sock_put(child);
1522 
1523 		cond_resched();
1524 	}
1525 	if (queue->fastopenq.rskq_rst_head) {
1526 		/* Free all the reqs queued in rskq_rst_head. */
1527 		spin_lock_bh(&queue->fastopenq.lock);
1528 		req = queue->fastopenq.rskq_rst_head;
1529 		queue->fastopenq.rskq_rst_head = NULL;
1530 		spin_unlock_bh(&queue->fastopenq.lock);
1531 		while (req != NULL) {
1532 			next = req->dl_next;
1533 			reqsk_put(req);
1534 			req = next;
1535 		}
1536 	}
1537 	WARN_ON_ONCE(sk->sk_ack_backlog);
1538 }
1539 
1540 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
1541 {
1542 	const struct inet_sock *inet = inet_sk(sk);
1543 	struct flowi4 *fl4;
1544 	struct rtable *rt;
1545 
1546 	rcu_read_lock();
1547 	fl4 = &fl->u.ip4;
1548 	inet_sk_init_flowi4(inet, fl4);
1549 	rt = ip_route_output_flow(sock_net(sk), fl4, sk);
1550 	if (IS_ERR(rt))
1551 		rt = NULL;
1552 	if (rt)
1553 		sk_setup_caps(sk, &rt->dst);
1554 	rcu_read_unlock();
1555 
1556 	return &rt->dst;
1557 }
1558 
1559 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
1560 {
1561 	struct dst_entry *dst = __sk_dst_check(sk, 0);
1562 	struct inet_sock *inet = inet_sk(sk);
1563 
1564 	if (!dst) {
1565 		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1566 		if (!dst)
1567 			goto out;
1568 	}
1569 	dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
1570 
1571 	dst = __sk_dst_check(sk, 0);
1572 	if (!dst)
1573 		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1574 out:
1575 	return dst;
1576 }
1577