xref: /linux/net/ipv4/inet_connection_sock.c (revision 453a4a5f97f0c95b7df458e6afb98d4ab057d90b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Support for INET connection oriented protocols.
8  *
9  * Authors:	See the TCP sources
10  */
11 
12 #include <linux/module.h>
13 #include <linux/jhash.h>
14 
15 #include <net/inet_connection_sock.h>
16 #include <net/inet_hashtables.h>
17 #include <net/inet_timewait_sock.h>
18 #include <net/ip.h>
19 #include <net/route.h>
20 #include <net/tcp_states.h>
21 #include <net/xfrm.h>
22 #include <net/tcp.h>
23 #include <net/tcp_ecn.h>
24 #include <net/sock_reuseport.h>
25 #include <net/addrconf.h>
26 
27 #if IS_ENABLED(CONFIG_IPV6)
28 /* match_sk*_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses
29  *				if IPv6 only, and any IPv4 addresses
30  *				if not IPv6 only
31  * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
32  *				IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
33  *				and 0.0.0.0 equals to 0.0.0.0 only
34  */
ipv6_rcv_saddr_equal(const struct in6_addr * sk1_rcv_saddr6,const struct in6_addr * sk2_rcv_saddr6,__be32 sk1_rcv_saddr,__be32 sk2_rcv_saddr,bool sk1_ipv6only,bool sk2_ipv6only,bool match_sk1_wildcard,bool match_sk2_wildcard)35 static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
36 				 const struct in6_addr *sk2_rcv_saddr6,
37 				 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
38 				 bool sk1_ipv6only, bool sk2_ipv6only,
39 				 bool match_sk1_wildcard,
40 				 bool match_sk2_wildcard)
41 {
42 	int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
43 	int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
44 
45 	/* if both are mapped, treat as IPv4 */
46 	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
47 		if (!sk2_ipv6only) {
48 			if (sk1_rcv_saddr == sk2_rcv_saddr)
49 				return true;
50 			return (match_sk1_wildcard && !sk1_rcv_saddr) ||
51 				(match_sk2_wildcard && !sk2_rcv_saddr);
52 		}
53 		return false;
54 	}
55 
56 	if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
57 		return true;
58 
59 	if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
60 	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
61 		return true;
62 
63 	if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
64 	    !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
65 		return true;
66 
67 	if (sk2_rcv_saddr6 &&
68 	    ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
69 		return true;
70 
71 	return false;
72 }
73 #endif
74 
75 /* match_sk*_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
76  * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
77  *				0.0.0.0 only equals to 0.0.0.0
78  */
ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr,__be32 sk2_rcv_saddr,bool sk2_ipv6only,bool match_sk1_wildcard,bool match_sk2_wildcard)79 static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
80 				 bool sk2_ipv6only, bool match_sk1_wildcard,
81 				 bool match_sk2_wildcard)
82 {
83 	if (!sk2_ipv6only) {
84 		if (sk1_rcv_saddr == sk2_rcv_saddr)
85 			return true;
86 		return (match_sk1_wildcard && !sk1_rcv_saddr) ||
87 			(match_sk2_wildcard && !sk2_rcv_saddr);
88 	}
89 	return false;
90 }
91 
inet_rcv_saddr_equal(const struct sock * sk,const struct sock * sk2,bool match_wildcard)92 bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
93 			  bool match_wildcard)
94 {
95 #if IS_ENABLED(CONFIG_IPV6)
96 	if (sk->sk_family == AF_INET6)
97 		return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
98 					    inet6_rcv_saddr(sk2),
99 					    sk->sk_rcv_saddr,
100 					    sk2->sk_rcv_saddr,
101 					    ipv6_only_sock(sk),
102 					    ipv6_only_sock(sk2),
103 					    match_wildcard,
104 					    match_wildcard);
105 #endif
106 	return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
107 				    ipv6_only_sock(sk2), match_wildcard,
108 				    match_wildcard);
109 }
110 EXPORT_SYMBOL(inet_rcv_saddr_equal);
111 
inet_rcv_saddr_any(const struct sock * sk)112 bool inet_rcv_saddr_any(const struct sock *sk)
113 {
114 #if IS_ENABLED(CONFIG_IPV6)
115 	if (sk->sk_family == AF_INET6)
116 		return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
117 #endif
118 	return !sk->sk_rcv_saddr;
119 }
120 
121 /**
122  *	inet_sk_get_local_port_range - fetch ephemeral ports range
123  *	@sk: socket
124  *	@low: pointer to low port
125  *	@high: pointer to high port
126  *
127  *	Fetch netns port range (/proc/sys/net/ipv4/ip_local_port_range)
128  *	Range can be overridden if socket got IP_LOCAL_PORT_RANGE option.
129  *	Returns true if IP_LOCAL_PORT_RANGE was set on this socket.
130  */
inet_sk_get_local_port_range(const struct sock * sk,int * low,int * high)131 bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high)
132 {
133 	int lo, hi, sk_lo, sk_hi;
134 	bool local_range = false;
135 	u32 sk_range;
136 
137 	inet_get_local_port_range(sock_net(sk), &lo, &hi);
138 
139 	sk_range = READ_ONCE(inet_sk(sk)->local_port_range);
140 	if (unlikely(sk_range)) {
141 		sk_lo = sk_range & 0xffff;
142 		sk_hi = sk_range >> 16;
143 
144 		if (lo <= sk_lo && sk_lo <= hi)
145 			lo = sk_lo;
146 		if (lo <= sk_hi && sk_hi <= hi)
147 			hi = sk_hi;
148 		local_range = true;
149 	}
150 
151 	*low = lo;
152 	*high = hi;
153 	return local_range;
154 }
155 EXPORT_SYMBOL(inet_sk_get_local_port_range);
156 
inet_bind_conflict(const struct sock * sk,struct sock * sk2,kuid_t uid,bool relax,bool reuseport_cb_ok,bool reuseport_ok)157 static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
158 			       kuid_t uid, bool relax,
159 			       bool reuseport_cb_ok, bool reuseport_ok)
160 {
161 	int bound_dev_if2;
162 
163 	if (sk == sk2)
164 		return false;
165 
166 	bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if);
167 
168 	if (!sk->sk_bound_dev_if || !bound_dev_if2 ||
169 	    sk->sk_bound_dev_if == bound_dev_if2) {
170 		if (sk->sk_reuse && sk2->sk_reuse &&
171 		    sk2->sk_state != TCP_LISTEN) {
172 			if (!relax || (!reuseport_ok && sk->sk_reuseport &&
173 				       sk2->sk_reuseport && reuseport_cb_ok &&
174 				       (sk2->sk_state == TCP_TIME_WAIT ||
175 					uid_eq(uid, sk_uid(sk2)))))
176 				return true;
177 		} else if (!reuseport_ok || !sk->sk_reuseport ||
178 			   !sk2->sk_reuseport || !reuseport_cb_ok ||
179 			   (sk2->sk_state != TCP_TIME_WAIT &&
180 			    !uid_eq(uid, sk_uid(sk2)))) {
181 			return true;
182 		}
183 	}
184 	return false;
185 }
186 
__inet_bhash2_conflict(const struct sock * sk,struct sock * sk2,kuid_t uid,bool relax,bool reuseport_cb_ok,bool reuseport_ok)187 static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
188 				   kuid_t uid, bool relax,
189 				   bool reuseport_cb_ok, bool reuseport_ok)
190 {
191 	if (ipv6_only_sock(sk2)) {
192 		if (sk->sk_family == AF_INET)
193 			return false;
194 
195 #if IS_ENABLED(CONFIG_IPV6)
196 		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
197 			return false;
198 #endif
199 	}
200 
201 	return inet_bind_conflict(sk, sk2, uid, relax,
202 				  reuseport_cb_ok, reuseport_ok);
203 }
204 
inet_bhash2_conflict(const struct sock * sk,const struct inet_bind2_bucket * tb2,kuid_t uid,bool relax,bool reuseport_cb_ok,bool reuseport_ok)205 static bool inet_bhash2_conflict(const struct sock *sk,
206 				 const struct inet_bind2_bucket *tb2,
207 				 kuid_t uid,
208 				 bool relax, bool reuseport_cb_ok,
209 				 bool reuseport_ok)
210 {
211 	struct sock *sk2;
212 
213 	sk_for_each_bound(sk2, &tb2->owners) {
214 		if (__inet_bhash2_conflict(sk, sk2, uid, relax,
215 					   reuseport_cb_ok, reuseport_ok))
216 			return true;
217 	}
218 
219 	return false;
220 }
221 
222 #define sk_for_each_bound_bhash(__sk, __tb2, __tb)			\
223 	hlist_for_each_entry(__tb2, &(__tb)->bhash2, bhash_node)	\
224 		sk_for_each_bound((__sk), &(__tb2)->owners)
225 
226 /* This should be called only when the tb and tb2 hashbuckets' locks are held */
inet_csk_bind_conflict(const struct sock * sk,const struct inet_bind_bucket * tb,const struct inet_bind2_bucket * tb2,bool relax,bool reuseport_ok)227 static int inet_csk_bind_conflict(const struct sock *sk,
228 				  const struct inet_bind_bucket *tb,
229 				  const struct inet_bind2_bucket *tb2, /* may be null */
230 				  bool relax, bool reuseport_ok)
231 {
232 	struct sock_reuseport *reuseport_cb;
233 	kuid_t uid = sk_uid(sk);
234 	bool reuseport_cb_ok;
235 	struct sock *sk2;
236 
237 	rcu_read_lock();
238 	reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
239 	/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
240 	reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
241 	rcu_read_unlock();
242 
243 	/* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if
244 	 * ipv4) should have been checked already. We need to do these two
245 	 * checks separately because their spinlocks have to be acquired/released
246 	 * independently of each other, to prevent possible deadlocks
247 	 */
248 	if (inet_use_hash2_on_bind(sk))
249 		return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax,
250 						   reuseport_cb_ok, reuseport_ok);
251 
252 	/* Unlike other sk lookup places we do not check
253 	 * for sk_net here, since _all_ the socks listed
254 	 * in tb->owners and tb2->owners list belong
255 	 * to the same net - the one this bucket belongs to.
256 	 */
257 	sk_for_each_bound_bhash(sk2, tb2, tb) {
258 		if (!inet_bind_conflict(sk, sk2, uid, relax, reuseport_cb_ok, reuseport_ok))
259 			continue;
260 
261 		if (inet_rcv_saddr_equal(sk, sk2, true))
262 			return true;
263 	}
264 
265 	return false;
266 }
267 
268 /* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or
269  * INADDR_ANY (if ipv4) socket.
270  *
271  * Caller must hold bhash hashbucket lock with local bh disabled, to protect
272  * against concurrent binds on the port for addr any
273  */
inet_bhash2_addr_any_conflict(const struct sock * sk,int port,int l3mdev,bool relax,bool reuseport_ok)274 static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
275 					  bool relax, bool reuseport_ok)
276 {
277 	const struct net *net = sock_net(sk);
278 	struct sock_reuseport *reuseport_cb;
279 	struct inet_bind_hashbucket *head2;
280 	struct inet_bind2_bucket *tb2;
281 	kuid_t uid = sk_uid(sk);
282 	bool conflict = false;
283 	bool reuseport_cb_ok;
284 
285 	rcu_read_lock();
286 	reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
287 	/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
288 	reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
289 	rcu_read_unlock();
290 
291 	head2 = inet_bhash2_addr_any_hashbucket(sk, net, port);
292 
293 	spin_lock(&head2->lock);
294 
295 	inet_bind_bucket_for_each(tb2, &head2->chain) {
296 		if (!inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
297 			continue;
298 
299 		if (!inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,	reuseport_ok))
300 			continue;
301 
302 		conflict = true;
303 		break;
304 	}
305 
306 	spin_unlock(&head2->lock);
307 
308 	return conflict;
309 }
310 
311 /*
312  * Find an open port number for the socket.  Returns with the
313  * inet_bind_hashbucket locks held if successful.
314  */
315 static struct inet_bind_hashbucket *
inet_csk_find_open_port(const struct sock * sk,struct inet_bind_bucket ** tb_ret,struct inet_bind2_bucket ** tb2_ret,struct inet_bind_hashbucket ** head2_ret,int * port_ret)316 inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret,
317 			struct inet_bind2_bucket **tb2_ret,
318 			struct inet_bind_hashbucket **head2_ret, int *port_ret)
319 {
320 	struct inet_hashinfo *hinfo = tcp_get_hashinfo(sk);
321 	int i, low, high, attempt_half, port, l3mdev;
322 	struct inet_bind_hashbucket *head, *head2;
323 	struct net *net = sock_net(sk);
324 	struct inet_bind2_bucket *tb2;
325 	struct inet_bind_bucket *tb;
326 	u32 remaining, offset;
327 	bool relax = false;
328 
329 	l3mdev = inet_sk_bound_l3mdev(sk);
330 ports_exhausted:
331 	attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
332 other_half_scan:
333 	inet_sk_get_local_port_range(sk, &low, &high);
334 	high++; /* [32768, 60999] -> [32768, 61000[ */
335 	if (high - low < 4)
336 		attempt_half = 0;
337 	if (attempt_half) {
338 		int half = low + (((high - low) >> 2) << 1);
339 
340 		if (attempt_half == 1)
341 			high = half;
342 		else
343 			low = half;
344 	}
345 	remaining = high - low;
346 	if (likely(remaining > 1))
347 		remaining &= ~1U;
348 
349 	offset = get_random_u32_below(remaining);
350 	/* __inet_hash_connect() favors ports having @low parity
351 	 * We do the opposite to not pollute connect() users.
352 	 */
353 	offset |= 1U;
354 
355 other_parity_scan:
356 	port = low + offset;
357 	for (i = 0; i < remaining; i += 2, port += 2) {
358 		if (unlikely(port >= high))
359 			port -= remaining;
360 		if (inet_is_local_reserved_port(net, port))
361 			continue;
362 		head = &hinfo->bhash[inet_bhashfn(net, port,
363 						  hinfo->bhash_size)];
364 		spin_lock_bh(&head->lock);
365 		if (inet_use_hash2_on_bind(sk)) {
366 			if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false))
367 				goto next_port;
368 		}
369 
370 		head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
371 		spin_lock(&head2->lock);
372 		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
373 		inet_bind_bucket_for_each(tb, &head->chain)
374 			if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
375 				if (!inet_csk_bind_conflict(sk, tb, tb2,
376 							    relax, false))
377 					goto success;
378 				spin_unlock(&head2->lock);
379 				goto next_port;
380 			}
381 		tb = NULL;
382 		goto success;
383 next_port:
384 		spin_unlock_bh(&head->lock);
385 		cond_resched();
386 	}
387 
388 	offset--;
389 	if (!(offset & 1))
390 		goto other_parity_scan;
391 
392 	if (attempt_half == 1) {
393 		/* OK we now try the upper half of the range */
394 		attempt_half = 2;
395 		goto other_half_scan;
396 	}
397 
398 	if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) {
399 		/* We still have a chance to connect to different destinations */
400 		relax = true;
401 		goto ports_exhausted;
402 	}
403 	return NULL;
404 success:
405 	*port_ret = port;
406 	*tb_ret = tb;
407 	*tb2_ret = tb2;
408 	*head2_ret = head2;
409 	return head;
410 }
411 
sk_reuseport_match(struct inet_bind_bucket * tb,const struct sock * sk)412 static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
413 				     const struct sock *sk)
414 {
415 	if (tb->fastreuseport <= 0)
416 		return 0;
417 	if (!sk->sk_reuseport)
418 		return 0;
419 	if (rcu_access_pointer(sk->sk_reuseport_cb))
420 		return 0;
421 	if (!uid_eq(tb->fastuid, sk_uid(sk)))
422 		return 0;
423 	/* We only need to check the rcv_saddr if this tb was once marked
424 	 * without fastreuseport and then was reset, as we can only know that
425 	 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
426 	 * owners list.
427 	 */
428 	if (tb->fastreuseport == FASTREUSEPORT_ANY)
429 		return 1;
430 #if IS_ENABLED(CONFIG_IPV6)
431 	if (tb->fast_sk_family == AF_INET6)
432 		return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
433 					    inet6_rcv_saddr(sk),
434 					    tb->fast_rcv_saddr,
435 					    sk->sk_rcv_saddr,
436 					    tb->fast_ipv6_only,
437 					    ipv6_only_sock(sk), true, false);
438 #endif
439 	return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
440 				    ipv6_only_sock(sk), true, false);
441 }
442 
inet_csk_update_fastreuse(const struct sock * sk,struct inet_bind_bucket * tb,struct inet_bind2_bucket * tb2)443 void inet_csk_update_fastreuse(const struct sock *sk,
444 			       struct inet_bind_bucket *tb,
445 			       struct inet_bind2_bucket *tb2)
446 {
447 	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
448 
449 	if (hlist_empty(&tb->bhash2)) {
450 		tb->fastreuse = reuse;
451 		if (sk->sk_reuseport) {
452 			tb->fastreuseport = FASTREUSEPORT_ANY;
453 			tb->fastuid = sk_uid(sk);
454 			tb->fast_rcv_saddr = sk->sk_rcv_saddr;
455 			tb->fast_ipv6_only = ipv6_only_sock(sk);
456 			tb->fast_sk_family = sk->sk_family;
457 #if IS_ENABLED(CONFIG_IPV6)
458 			tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
459 #endif
460 		} else {
461 			tb->fastreuseport = 0;
462 		}
463 	} else {
464 		if (!reuse)
465 			tb->fastreuse = 0;
466 		if (sk->sk_reuseport) {
467 			/* We didn't match or we don't have fastreuseport set on
468 			 * the tb, but we have sk_reuseport set on this socket
469 			 * and we know that there are no bind conflicts with
470 			 * this socket in this tb, so reset our tb's reuseport
471 			 * settings so that any subsequent sockets that match
472 			 * our current socket will be put on the fast path.
473 			 *
474 			 * If we reset we need to set FASTREUSEPORT_STRICT so we
475 			 * do extra checking for all subsequent sk_reuseport
476 			 * socks.
477 			 */
478 			if (!sk_reuseport_match(tb, sk)) {
479 				tb->fastreuseport = FASTREUSEPORT_STRICT;
480 				tb->fastuid = sk_uid(sk);
481 				tb->fast_rcv_saddr = sk->sk_rcv_saddr;
482 				tb->fast_ipv6_only = ipv6_only_sock(sk);
483 				tb->fast_sk_family = sk->sk_family;
484 #if IS_ENABLED(CONFIG_IPV6)
485 				tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
486 #endif
487 			}
488 		} else {
489 			tb->fastreuseport = 0;
490 		}
491 	}
492 
493 	tb2->fastreuse = tb->fastreuse;
494 	tb2->fastreuseport = tb->fastreuseport;
495 }
496 
497 /* Obtain a reference to a local port for the given sock,
498  * if snum is zero it means select any available local port.
499  * We try to allocate an odd port (and leave even ports for connect())
500  */
inet_csk_get_port(struct sock * sk,unsigned short snum)501 int inet_csk_get_port(struct sock *sk, unsigned short snum)
502 {
503 	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
504 	bool found_port = false, check_bind_conflict = true;
505 	bool bhash_created = false, bhash2_created = false;
506 	struct inet_hashinfo *hinfo = tcp_get_hashinfo(sk);
507 	int ret = -EADDRINUSE, port = snum, l3mdev;
508 	struct inet_bind_hashbucket *head, *head2;
509 	struct inet_bind2_bucket *tb2 = NULL;
510 	struct inet_bind_bucket *tb = NULL;
511 	bool head2_lock_acquired = false;
512 	struct net *net = sock_net(sk);
513 
514 	l3mdev = inet_sk_bound_l3mdev(sk);
515 
516 	if (!port) {
517 		head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port);
518 		if (!head)
519 			return ret;
520 
521 		head2_lock_acquired = true;
522 
523 		if (tb && tb2)
524 			goto success;
525 		found_port = true;
526 	} else {
527 		head = &hinfo->bhash[inet_bhashfn(net, port,
528 						  hinfo->bhash_size)];
529 		spin_lock_bh(&head->lock);
530 		inet_bind_bucket_for_each(tb, &head->chain)
531 			if (inet_bind_bucket_match(tb, net, port, l3mdev))
532 				break;
533 	}
534 
535 	if (!tb) {
536 		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net,
537 					     head, port, l3mdev);
538 		if (!tb)
539 			goto fail_unlock;
540 		bhash_created = true;
541 	}
542 
543 	if (!found_port) {
544 		if (!hlist_empty(&tb->bhash2)) {
545 			if (sk->sk_reuse == SK_FORCE_REUSE ||
546 			    (tb->fastreuse > 0 && reuse) ||
547 			    sk_reuseport_match(tb, sk))
548 				check_bind_conflict = false;
549 		}
550 
551 		if (check_bind_conflict && inet_use_hash2_on_bind(sk)) {
552 			if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true))
553 				goto fail_unlock;
554 		}
555 
556 		head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
557 		spin_lock(&head2->lock);
558 		head2_lock_acquired = true;
559 		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
560 	}
561 
562 	if (!tb2) {
563 		tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep,
564 					       net, head2, tb, sk);
565 		if (!tb2)
566 			goto fail_unlock;
567 		bhash2_created = true;
568 	}
569 
570 	if (!found_port && check_bind_conflict) {
571 		if (inet_csk_bind_conflict(sk, tb, tb2, true, true))
572 			goto fail_unlock;
573 	}
574 
575 success:
576 	inet_csk_update_fastreuse(sk, tb, tb2);
577 
578 	if (!inet_csk(sk)->icsk_bind_hash)
579 		inet_bind_hash(sk, tb, tb2, port);
580 	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
581 	WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2);
582 	ret = 0;
583 
584 fail_unlock:
585 	if (ret) {
586 		if (bhash2_created)
587 			inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, tb2);
588 		if (bhash_created)
589 			inet_bind_bucket_destroy(tb);
590 	}
591 	if (head2_lock_acquired)
592 		spin_unlock(&head2->lock);
593 	spin_unlock_bh(&head->lock);
594 	return ret;
595 }
596 EXPORT_SYMBOL_GPL(inet_csk_get_port);
597 
598 /*
599  * Wait for an incoming connection, avoid race conditions. This must be called
600  * with the socket locked.
601  */
inet_csk_wait_for_connect(struct sock * sk,long timeo)602 static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
603 {
604 	struct inet_connection_sock *icsk = inet_csk(sk);
605 	DEFINE_WAIT(wait);
606 	int err;
607 
608 	/*
609 	 * True wake-one mechanism for incoming connections: only
610 	 * one process gets woken up, not the 'whole herd'.
611 	 * Since we do not 'race & poll' for established sockets
612 	 * anymore, the common case will execute the loop only once.
613 	 *
614 	 * Subtle issue: "add_wait_queue_exclusive()" will be added
615 	 * after any current non-exclusive waiters, and we know that
616 	 * it will always _stay_ after any new non-exclusive waiters
617 	 * because all non-exclusive waiters are added at the
618 	 * beginning of the wait-queue. As such, it's ok to "drop"
619 	 * our exclusiveness temporarily when we get woken up without
620 	 * having to remove and re-insert us on the wait queue.
621 	 */
622 	for (;;) {
623 		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
624 					  TASK_INTERRUPTIBLE);
625 		release_sock(sk);
626 		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
627 			timeo = schedule_timeout(timeo);
628 		sched_annotate_sleep();
629 		lock_sock(sk);
630 		err = 0;
631 		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
632 			break;
633 		err = -EINVAL;
634 		if (sk->sk_state != TCP_LISTEN)
635 			break;
636 		err = sock_intr_errno(timeo);
637 		if (signal_pending(current))
638 			break;
639 		err = -EAGAIN;
640 		if (!timeo)
641 			break;
642 	}
643 	finish_wait(sk_sleep(sk), &wait);
644 	return err;
645 }
646 
647 /*
648  * This will accept the next outstanding connection.
649  */
inet_csk_accept(struct sock * sk,struct proto_accept_arg * arg)650 struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg)
651 {
652 	struct inet_connection_sock *icsk = inet_csk(sk);
653 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
654 	struct request_sock *req;
655 	struct sock *newsk;
656 	int error;
657 
658 	lock_sock(sk);
659 
660 	/* We need to make sure that this socket is listening,
661 	 * and that it has something pending.
662 	 */
663 	error = -EINVAL;
664 	if (sk->sk_state != TCP_LISTEN)
665 		goto out_err;
666 
667 	/* Find already established connection */
668 	if (reqsk_queue_empty(queue)) {
669 		long timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
670 
671 		/* If this is a non blocking socket don't sleep */
672 		error = -EAGAIN;
673 		if (!timeo)
674 			goto out_err;
675 
676 		error = inet_csk_wait_for_connect(sk, timeo);
677 		if (error)
678 			goto out_err;
679 	}
680 	req = reqsk_queue_remove(queue, sk);
681 	arg->is_empty = reqsk_queue_empty(queue);
682 	newsk = req->sk;
683 
684 	if (sk->sk_protocol == IPPROTO_TCP &&
685 	    tcp_rsk(req)->tfo_listener) {
686 		spin_lock_bh(&queue->fastopenq.lock);
687 		if (tcp_rsk(req)->tfo_listener) {
688 			/* We are still waiting for the final ACK from 3WHS
689 			 * so can't free req now. Instead, we set req->sk to
690 			 * NULL to signify that the child socket is taken
691 			 * so reqsk_fastopen_remove() will free the req
692 			 * when 3WHS finishes (or is aborted).
693 			 */
694 			req->sk = NULL;
695 			req = NULL;
696 		}
697 		spin_unlock_bh(&queue->fastopenq.lock);
698 	}
699 
700 	release_sock(sk);
701 
702 	if (req)
703 		reqsk_put(req);
704 
705 	inet_init_csk_locks(newsk);
706 	return newsk;
707 
708 out_err:
709 	release_sock(sk);
710 	arg->err = error;
711 	return NULL;
712 }
713 EXPORT_SYMBOL(inet_csk_accept);
714 
715 /*
716  * Using different timers for retransmit, delayed acks and probes
717  * We may wish use just one timer maintaining a list of expire jiffies
718  * to optimize.
719  */
inet_csk_init_xmit_timers(struct sock * sk,void (* retransmit_handler)(struct timer_list * t),void (* delack_handler)(struct timer_list * t),void (* keepalive_handler)(struct timer_list * t))720 void inet_csk_init_xmit_timers(struct sock *sk,
721 			       void (*retransmit_handler)(struct timer_list *t),
722 			       void (*delack_handler)(struct timer_list *t),
723 			       void (*keepalive_handler)(struct timer_list *t))
724 {
725 	struct inet_connection_sock *icsk = inet_csk(sk);
726 
727 	timer_setup(&sk->tcp_retransmit_timer, retransmit_handler, 0);
728 	timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
729 	timer_setup(&icsk->icsk_keepalive_timer, keepalive_handler, 0);
730 	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
731 }
732 
inet_csk_clear_xmit_timers(struct sock * sk)733 void inet_csk_clear_xmit_timers(struct sock *sk)
734 {
735 	struct inet_connection_sock *icsk = inet_csk(sk);
736 
737 	smp_store_release(&icsk->icsk_pending, 0);
738 	smp_store_release(&icsk->icsk_ack.pending, 0);
739 
740 	sk_stop_timer(sk, &sk->tcp_retransmit_timer);
741 	sk_stop_timer(sk, &icsk->icsk_delack_timer);
742 	sk_stop_timer(sk, &icsk->icsk_keepalive_timer);
743 }
744 
inet_csk_clear_xmit_timers_sync(struct sock * sk)745 void inet_csk_clear_xmit_timers_sync(struct sock *sk)
746 {
747 	struct inet_connection_sock *icsk = inet_csk(sk);
748 
749 	/* ongoing timer handlers need to acquire socket lock. */
750 	sock_not_owned_by_me(sk);
751 
752 	smp_store_release(&icsk->icsk_pending, 0);
753 	smp_store_release(&icsk->icsk_ack.pending, 0);
754 
755 	sk_stop_timer_sync(sk, &sk->tcp_retransmit_timer);
756 	sk_stop_timer_sync(sk, &icsk->icsk_delack_timer);
757 	sk_stop_timer_sync(sk, &icsk->icsk_keepalive_timer);
758 }
759 
inet_csk_route_req(const struct sock * sk,struct flowi4 * fl4,const struct request_sock * req)760 struct dst_entry *inet_csk_route_req(const struct sock *sk,
761 				     struct flowi4 *fl4,
762 				     const struct request_sock *req)
763 {
764 	const struct inet_request_sock *ireq = inet_rsk(req);
765 	struct net *net = read_pnet(&ireq->ireq_net);
766 	struct ip_options_rcu *opt;
767 	struct rtable *rt;
768 
769 	rcu_read_lock();
770 	opt = rcu_dereference(ireq->ireq_opt);
771 
772 	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
773 			   ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
774 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
775 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
776 			   ireq->ir_loc_addr, ireq->ir_rmt_port,
777 			   htons(ireq->ir_num), sk_uid(sk));
778 	security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
779 	rt = ip_route_output_flow(net, fl4, sk);
780 	if (IS_ERR(rt))
781 		goto no_route;
782 	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
783 		goto route_err;
784 	rcu_read_unlock();
785 	return &rt->dst;
786 
787 route_err:
788 	ip_rt_put(rt);
789 no_route:
790 	rcu_read_unlock();
791 	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
792 	return NULL;
793 }
794 
inet_csk_route_child_sock(const struct sock * sk,struct sock * newsk,const struct request_sock * req)795 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
796 					    struct sock *newsk,
797 					    const struct request_sock *req)
798 {
799 	const struct inet_request_sock *ireq = inet_rsk(req);
800 	struct net *net = read_pnet(&ireq->ireq_net);
801 	struct inet_sock *newinet = inet_sk(newsk);
802 	struct ip_options_rcu *opt;
803 	struct flowi4 *fl4;
804 	struct rtable *rt;
805 
806 	opt = rcu_dereference(ireq->ireq_opt);
807 	fl4 = &newinet->cork.fl.u.ip4;
808 
809 	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
810 			   ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
811 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
812 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
813 			   ireq->ir_loc_addr, ireq->ir_rmt_port,
814 			   htons(ireq->ir_num), sk_uid(sk));
815 	security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
816 	rt = ip_route_output_flow(net, fl4, sk);
817 	if (IS_ERR(rt))
818 		goto no_route;
819 	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
820 		goto route_err;
821 	return &rt->dst;
822 
823 route_err:
824 	ip_rt_put(rt);
825 no_route:
826 	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
827 	return NULL;
828 }
829 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
830 
831 /* Decide when to expire the request and when to resend SYN-ACK */
syn_ack_recalc(struct request_sock * req,const int max_syn_ack_retries,const u8 rskq_defer_accept,int * expire,int * resend)832 static void syn_ack_recalc(struct request_sock *req,
833 			   const int max_syn_ack_retries,
834 			   const u8 rskq_defer_accept,
835 			   int *expire, int *resend)
836 {
837 	if (!rskq_defer_accept) {
838 		*expire = req->num_timeout >= max_syn_ack_retries;
839 		*resend = 1;
840 		return;
841 	}
842 	*expire = req->num_timeout >= max_syn_ack_retries &&
843 		  (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept);
844 	/* Do not resend while waiting for data after ACK,
845 	 * start to resend on end of deferring period to give
846 	 * last chance for data or ACK to create established socket.
847 	 */
848 	*resend = !inet_rsk(req)->acked ||
849 		  req->num_timeout >= rskq_defer_accept - 1;
850 }
851 
852 static struct request_sock *
reqsk_alloc_noprof(const struct request_sock_ops * ops,struct sock * sk_listener,bool attach_listener)853 reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener,
854 		   bool attach_listener)
855 {
856 	struct request_sock *req;
857 
858 	req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
859 	if (!req)
860 		return NULL;
861 	req->rsk_listener = NULL;
862 	if (attach_listener) {
863 		if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
864 			kmem_cache_free(ops->slab, req);
865 			return NULL;
866 		}
867 		req->rsk_listener = sk_listener;
868 	}
869 	req->rsk_ops = ops;
870 	req_to_sk(req)->sk_prot = sk_listener->sk_prot;
871 	sk_node_init(&req_to_sk(req)->sk_node);
872 	sk_tx_queue_clear(req_to_sk(req));
873 	req->saved_syn = NULL;
874 	req->syncookie = 0;
875 	req->num_timeout = 0;
876 	req->num_retrans = 0;
877 	req->sk = NULL;
878 	refcount_set(&req->rsk_refcnt, 0);
879 
880 	return req;
881 }
882 #define reqsk_alloc(...)	alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__))
883 
inet_reqsk_alloc(const struct request_sock_ops * ops,struct sock * sk_listener,bool attach_listener)884 struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
885 				      struct sock *sk_listener,
886 				      bool attach_listener)
887 {
888 	struct request_sock *req = reqsk_alloc(ops, sk_listener,
889 					       attach_listener);
890 
891 	if (req) {
892 		struct inet_request_sock *ireq = inet_rsk(req);
893 
894 		ireq->ireq_opt = NULL;
895 #if IS_ENABLED(CONFIG_IPV6)
896 		ireq->pktopts = NULL;
897 #endif
898 		atomic64_set(&ireq->ir_cookie, 0);
899 		ireq->ireq_state = TCP_NEW_SYN_RECV;
900 		write_pnet(&ireq->ireq_net, sock_net(sk_listener));
901 		ireq->ireq_family = sk_listener->sk_family;
902 	}
903 
904 	return req;
905 }
906 EXPORT_SYMBOL(inet_reqsk_alloc);
907 
__reqsk_free(struct request_sock * req)908 void __reqsk_free(struct request_sock *req)
909 {
910 	req->rsk_ops->destructor(req);
911 	if (req->rsk_listener)
912 		sock_put(req->rsk_listener);
913 	kfree(req->saved_syn);
914 	kmem_cache_free(req->rsk_ops->slab, req);
915 }
916 EXPORT_SYMBOL_GPL(__reqsk_free);
917 
inet_reqsk_clone(struct request_sock * req,struct sock * sk)918 static struct request_sock *inet_reqsk_clone(struct request_sock *req,
919 					     struct sock *sk)
920 {
921 	struct sock *req_sk, *nreq_sk;
922 	struct request_sock *nreq;
923 
924 	nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN);
925 	if (!nreq) {
926 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
927 
928 		/* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */
929 		sock_put(sk);
930 		return NULL;
931 	}
932 
933 	req_sk = req_to_sk(req);
934 	nreq_sk = req_to_sk(nreq);
935 
936 	memcpy(nreq_sk, req_sk,
937 	       offsetof(struct sock, sk_dontcopy_begin));
938 	unsafe_memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
939 		      req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end),
940 		      /* alloc is larger than struct, see above */);
941 
942 	sk_node_init(&nreq_sk->sk_node);
943 	nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
944 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
945 	nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
946 #endif
947 	nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
948 
949 	nreq->rsk_listener = sk;
950 
951 	/* We need not acquire fastopenq->lock
952 	 * because the child socket is locked in inet_csk_listen_stop().
953 	 */
954 	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener)
955 		rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq);
956 
957 	return nreq;
958 }
959 
reqsk_queue_migrated(struct request_sock_queue * queue,const struct request_sock * req)960 static void reqsk_queue_migrated(struct request_sock_queue *queue,
961 				 const struct request_sock *req)
962 {
963 	if (req->num_timeout == 0)
964 		atomic_inc(&queue->young);
965 	atomic_inc(&queue->qlen);
966 }
967 
reqsk_migrate_reset(struct request_sock * req)968 static void reqsk_migrate_reset(struct request_sock *req)
969 {
970 	req->saved_syn = NULL;
971 #if IS_ENABLED(CONFIG_IPV6)
972 	inet_rsk(req)->ipv6_opt = NULL;
973 	inet_rsk(req)->pktopts = NULL;
974 #else
975 	inet_rsk(req)->ireq_opt = NULL;
976 #endif
977 }
978 
979 /* return true if req was found in the ehash table */
reqsk_queue_unlink(struct request_sock * req)980 static bool reqsk_queue_unlink(struct request_sock *req)
981 {
982 	struct sock *sk = req_to_sk(req);
983 	bool found = false;
984 
985 	if (sk_hashed(sk)) {
986 		struct inet_hashinfo *hashinfo = tcp_get_hashinfo(sk);
987 		spinlock_t *lock;
988 
989 		lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
990 		spin_lock(lock);
991 		found = __sk_nulls_del_node_init_rcu(sk);
992 		spin_unlock(lock);
993 	}
994 
995 	return found;
996 }
997 
__inet_csk_reqsk_queue_drop(struct sock * sk,struct request_sock * req,bool from_timer)998 static bool __inet_csk_reqsk_queue_drop(struct sock *sk,
999 					struct request_sock *req,
1000 					bool from_timer)
1001 {
1002 	bool unlinked = reqsk_queue_unlink(req);
1003 
1004 	if (!from_timer && timer_delete_sync(&req->rsk_timer))
1005 		reqsk_put(req);
1006 
1007 	if (unlinked) {
1008 		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
1009 		reqsk_put(req);
1010 	}
1011 
1012 	return unlinked;
1013 }
1014 
inet_csk_reqsk_queue_drop(struct sock * sk,struct request_sock * req)1015 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
1016 {
1017 	return __inet_csk_reqsk_queue_drop(sk, req, false);
1018 }
1019 
inet_csk_reqsk_queue_drop_and_put(struct sock * sk,struct request_sock * req)1020 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
1021 {
1022 	inet_csk_reqsk_queue_drop(sk, req);
1023 	reqsk_put(req);
1024 }
1025 EXPORT_IPV6_MOD(inet_csk_reqsk_queue_drop_and_put);
1026 
reqsk_timer_handler(struct timer_list * t)1027 static void reqsk_timer_handler(struct timer_list *t)
1028 {
1029 	struct request_sock *req = timer_container_of(req, t, rsk_timer);
1030 	struct request_sock *nreq = NULL, *oreq = req;
1031 	struct sock *sk_listener = req->rsk_listener;
1032 	struct inet_connection_sock *icsk;
1033 	struct request_sock_queue *queue;
1034 	struct net *net;
1035 	int max_syn_ack_retries, qlen, expire = 0, resend = 0;
1036 
1037 	if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {
1038 		struct sock *nsk;
1039 
1040 		nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
1041 		if (!nsk)
1042 			goto drop;
1043 
1044 		nreq = inet_reqsk_clone(req, nsk);
1045 		if (!nreq)
1046 			goto drop;
1047 
1048 		/* The new timer for the cloned req can decrease the 2
1049 		 * by calling inet_csk_reqsk_queue_drop_and_put(), so
1050 		 * hold another count to prevent use-after-free and
1051 		 * call reqsk_put() just before return.
1052 		 */
1053 		refcount_set(&nreq->rsk_refcnt, 2 + 1);
1054 		timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1055 		reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);
1056 
1057 		req = nreq;
1058 		sk_listener = nsk;
1059 	}
1060 
1061 	icsk = inet_csk(sk_listener);
1062 	net = sock_net(sk_listener);
1063 	max_syn_ack_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
1064 		READ_ONCE(net->ipv4.sysctl_tcp_synack_retries);
1065 	/* Normally all the openreqs are young and become mature
1066 	 * (i.e. converted to established socket) for first timeout.
1067 	 * If synack was not acknowledged for 1 second, it means
1068 	 * one of the following things: synack was lost, ack was lost,
1069 	 * rtt is high or nobody planned to ack (i.e. synflood).
1070 	 * When server is a bit loaded, queue is populated with old
1071 	 * open requests, reducing effective size of queue.
1072 	 * When server is well loaded, queue size reduces to zero
1073 	 * after several minutes of work. It is not synflood,
1074 	 * it is normal operation. The solution is pruning
1075 	 * too old entries overriding normal timeout, when
1076 	 * situation becomes dangerous.
1077 	 *
1078 	 * Essentially, we reserve half of room for young
1079 	 * embrions; and abort old ones without pity, if old
1080 	 * ones are about to clog our table.
1081 	 */
1082 	queue = &icsk->icsk_accept_queue;
1083 	qlen = reqsk_queue_len(queue);
1084 	if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
1085 		int young = reqsk_queue_len_young(queue) << 1;
1086 
1087 		while (max_syn_ack_retries > 2) {
1088 			if (qlen < young)
1089 				break;
1090 			max_syn_ack_retries--;
1091 			young <<= 1;
1092 		}
1093 	}
1094 
1095 	syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept),
1096 		       &expire, &resend);
1097 	tcp_syn_ack_timeout(req);
1098 
1099 	if (!expire &&
1100 	    (!resend ||
1101 	     !tcp_rtx_synack(sk_listener, req) ||
1102 	     inet_rsk(req)->acked)) {
1103 		if (req->num_retrans > 1 && tcp_rsk(req)->accecn_ok)
1104 			tcp_rsk(req)->accecn_fail_mode |= TCP_ACCECN_ACE_FAIL_SEND;
1105 		if (req->num_timeout++ == 0)
1106 			atomic_dec(&queue->young);
1107 		mod_timer(&req->rsk_timer, jiffies + tcp_reqsk_timeout(req));
1108 
1109 		if (!nreq)
1110 			return;
1111 
1112 		if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
1113 			/* delete timer */
1114 			__inet_csk_reqsk_queue_drop(sk_listener, nreq, true);
1115 			goto no_ownership;
1116 		}
1117 
1118 		__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS);
1119 		reqsk_migrate_reset(oreq);
1120 		reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
1121 		reqsk_put(oreq);
1122 
1123 		reqsk_put(nreq);
1124 		return;
1125 	}
1126 
1127 	/* Even if we can clone the req, we may need not retransmit any more
1128 	 * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
1129 	 * CPU may win the "own_req" race so that inet_ehash_insert() fails.
1130 	 */
1131 	if (nreq) {
1132 		__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE);
1133 no_ownership:
1134 		reqsk_migrate_reset(nreq);
1135 		reqsk_queue_removed(queue, nreq);
1136 		__reqsk_free(nreq);
1137 	}
1138 
1139 drop:
1140 	__inet_csk_reqsk_queue_drop(sk_listener, oreq, true);
1141 	reqsk_put(oreq);
1142 }
1143 
reqsk_queue_hash_req(struct request_sock * req)1144 static bool reqsk_queue_hash_req(struct request_sock *req)
1145 {
1146 	bool found_dup_sk = false;
1147 
1148 	if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk))
1149 		return false;
1150 
1151 	/* The timer needs to be setup after a successful insertion. */
1152 	req->timeout = tcp_timeout_init((struct sock *)req);
1153 	timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1154 	mod_timer(&req->rsk_timer, jiffies + req->timeout);
1155 
1156 	/* before letting lookups find us, make sure all req fields
1157 	 * are committed to memory and refcnt initialized.
1158 	 */
1159 	smp_wmb();
1160 	refcount_set(&req->rsk_refcnt, 2 + 1);
1161 	return true;
1162 }
1163 
inet_csk_reqsk_queue_hash_add(struct sock * sk,struct request_sock * req)1164 bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req)
1165 {
1166 	if (!reqsk_queue_hash_req(req))
1167 		return false;
1168 
1169 	inet_csk_reqsk_queue_added(sk);
1170 	return true;
1171 }
1172 
inet_clone_ulp(const struct request_sock * req,struct sock * newsk,const gfp_t priority)1173 static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
1174 			   const gfp_t priority)
1175 {
1176 	struct inet_connection_sock *icsk = inet_csk(newsk);
1177 
1178 	if (!icsk->icsk_ulp_ops)
1179 		return;
1180 
1181 	icsk->icsk_ulp_ops->clone(req, newsk, priority);
1182 }
1183 
1184 /**
1185  *	inet_csk_clone_lock - clone an inet socket, and lock its clone
1186  *	@sk: the socket to clone
1187  *	@req: request_sock
1188  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1189  *
1190  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1191  */
inet_csk_clone_lock(const struct sock * sk,const struct request_sock * req,const gfp_t priority)1192 struct sock *inet_csk_clone_lock(const struct sock *sk,
1193 				 const struct request_sock *req,
1194 				 const gfp_t priority)
1195 {
1196 	struct sock *newsk = sk_clone_lock(sk, priority);
1197 	struct inet_connection_sock *newicsk;
1198 	const struct inet_request_sock *ireq;
1199 	struct inet_sock *newinet;
1200 
1201 	if (!newsk)
1202 		return NULL;
1203 
1204 	newicsk = inet_csk(newsk);
1205 	newinet = inet_sk(newsk);
1206 	ireq = inet_rsk(req);
1207 
1208 	newicsk->icsk_bind_hash = NULL;
1209 	newicsk->icsk_bind2_hash = NULL;
1210 
1211 	newinet->inet_dport = ireq->ir_rmt_port;
1212 	newinet->inet_num = ireq->ir_num;
1213 	newinet->inet_sport = htons(ireq->ir_num);
1214 
1215 	newsk->sk_bound_dev_if = ireq->ir_iif;
1216 
1217 	newsk->sk_daddr = ireq->ir_rmt_addr;
1218 	newsk->sk_rcv_saddr = ireq->ir_loc_addr;
1219 	newinet->inet_saddr = ireq->ir_loc_addr;
1220 
1221 #if IS_ENABLED(CONFIG_IPV6)
1222 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1223 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1224 #endif
1225 
1226 	/* listeners have SOCK_RCU_FREE, not the children */
1227 	sock_reset_flag(newsk, SOCK_RCU_FREE);
1228 
1229 	inet_sk(newsk)->mc_list = NULL;
1230 
1231 	newsk->sk_mark = inet_rsk(req)->ir_mark;
1232 	atomic64_set(&newsk->sk_cookie,
1233 		     atomic64_read(&inet_rsk(req)->ir_cookie));
1234 
1235 	newicsk->icsk_retransmits = 0;
1236 	newicsk->icsk_backoff	  = 0;
1237 	newicsk->icsk_probes_out  = 0;
1238 	newicsk->icsk_probes_tstamp = 0;
1239 
1240 	/* Deinitialize accept_queue to trap illegal accesses. */
1241 	memset(&newicsk->icsk_accept_queue, 0,
1242 	       sizeof(newicsk->icsk_accept_queue));
1243 
1244 	inet_sk_set_state(newsk, TCP_SYN_RECV);
1245 
1246 	inet_clone_ulp(req, newsk, priority);
1247 
1248 	security_inet_csk_clone(newsk, req);
1249 
1250 	return newsk;
1251 }
1252 
1253 /*
1254  * At this point, there should be no process reference to this
1255  * socket, and thus no user references at all.  Therefore we
1256  * can assume the socket waitqueue is inactive and nobody will
1257  * try to jump onto it.
1258  */
inet_csk_destroy_sock(struct sock * sk)1259 void inet_csk_destroy_sock(struct sock *sk)
1260 {
1261 	WARN_ON(sk->sk_state != TCP_CLOSE);
1262 	WARN_ON(!sock_flag(sk, SOCK_DEAD));
1263 
1264 	/* It cannot be in hash table! */
1265 	WARN_ON(!sk_unhashed(sk));
1266 
1267 	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
1268 	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
1269 
1270 	sk->sk_prot->destroy(sk);
1271 
1272 	sk_stream_kill_queues(sk);
1273 
1274 	xfrm_sk_free_policy(sk);
1275 
1276 	tcp_orphan_count_dec();
1277 
1278 	sock_put(sk);
1279 }
1280 EXPORT_SYMBOL(inet_csk_destroy_sock);
1281 
inet_csk_prepare_for_destroy_sock(struct sock * sk)1282 void inet_csk_prepare_for_destroy_sock(struct sock *sk)
1283 {
1284 	/* The below has to be done to allow calling inet_csk_destroy_sock */
1285 	sock_set_flag(sk, SOCK_DEAD);
1286 	tcp_orphan_count_inc();
1287 }
1288 
1289 /* This function allows to force a closure of a socket after the call to
1290  * tcp_create_openreq_child().
1291  */
inet_csk_prepare_forced_close(struct sock * sk)1292 void inet_csk_prepare_forced_close(struct sock *sk)
1293 	__releases(&sk->sk_lock.slock)
1294 {
1295 	/* sk_clone_lock locked the socket and set refcnt to 2 */
1296 	bh_unlock_sock(sk);
1297 	sock_put(sk);
1298 	inet_csk_prepare_for_destroy_sock(sk);
1299 	inet_sk(sk)->inet_num = 0;
1300 }
1301 EXPORT_SYMBOL(inet_csk_prepare_forced_close);
1302 
inet_ulp_can_listen(const struct sock * sk)1303 static int inet_ulp_can_listen(const struct sock *sk)
1304 {
1305 	const struct inet_connection_sock *icsk = inet_csk(sk);
1306 
1307 	if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone)
1308 		return -EINVAL;
1309 
1310 	return 0;
1311 }
1312 
reqsk_queue_alloc(struct request_sock_queue * queue)1313 static void reqsk_queue_alloc(struct request_sock_queue *queue)
1314 {
1315 	queue->fastopenq.rskq_rst_head = NULL;
1316 	queue->fastopenq.rskq_rst_tail = NULL;
1317 	queue->fastopenq.qlen = 0;
1318 
1319 	queue->rskq_accept_head = NULL;
1320 }
1321 
inet_csk_listen_start(struct sock * sk)1322 int inet_csk_listen_start(struct sock *sk)
1323 {
1324 	struct inet_connection_sock *icsk = inet_csk(sk);
1325 	struct inet_sock *inet = inet_sk(sk);
1326 	int err;
1327 
1328 	err = inet_ulp_can_listen(sk);
1329 	if (unlikely(err))
1330 		return err;
1331 
1332 	reqsk_queue_alloc(&icsk->icsk_accept_queue);
1333 
1334 	sk->sk_ack_backlog = 0;
1335 	inet_csk_delack_init(sk);
1336 
1337 	/* There is race window here: we announce ourselves listening,
1338 	 * but this transition is still not validated by get_port().
1339 	 * It is OK, because this socket enters to hash table only
1340 	 * after validation is complete.
1341 	 */
1342 	inet_sk_state_store(sk, TCP_LISTEN);
1343 	err = sk->sk_prot->get_port(sk, inet->inet_num);
1344 	if (!err) {
1345 		inet->inet_sport = htons(inet->inet_num);
1346 
1347 		sk_dst_reset(sk);
1348 		err = sk->sk_prot->hash(sk);
1349 
1350 		if (likely(!err))
1351 			return 0;
1352 	}
1353 
1354 	inet_sk_set_state(sk, TCP_CLOSE);
1355 	return err;
1356 }
1357 
inet_child_forget(struct sock * sk,struct request_sock * req,struct sock * child)1358 static void inet_child_forget(struct sock *sk, struct request_sock *req,
1359 			      struct sock *child)
1360 {
1361 	sk->sk_prot->disconnect(child, O_NONBLOCK);
1362 
1363 	sock_orphan(child);
1364 
1365 	tcp_orphan_count_inc();
1366 
1367 	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
1368 		BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
1369 		BUG_ON(sk != req->rsk_listener);
1370 
1371 		/* Paranoid, to prevent race condition if
1372 		 * an inbound pkt destined for child is
1373 		 * blocked by sock lock in tcp_v4_rcv().
1374 		 * Also to satisfy an assertion in
1375 		 * tcp_v4_destroy_sock().
1376 		 */
1377 		RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
1378 	}
1379 	inet_csk_destroy_sock(child);
1380 }
1381 
inet_csk_reqsk_queue_add(struct sock * sk,struct request_sock * req,struct sock * child)1382 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
1383 				      struct request_sock *req,
1384 				      struct sock *child)
1385 {
1386 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1387 
1388 	spin_lock(&queue->rskq_lock);
1389 	if (unlikely(sk->sk_state != TCP_LISTEN)) {
1390 		inet_child_forget(sk, req, child);
1391 		child = NULL;
1392 	} else {
1393 		req->sk = child;
1394 		req->dl_next = NULL;
1395 		if (queue->rskq_accept_head == NULL)
1396 			WRITE_ONCE(queue->rskq_accept_head, req);
1397 		else
1398 			queue->rskq_accept_tail->dl_next = req;
1399 		queue->rskq_accept_tail = req;
1400 		sk_acceptq_added(sk);
1401 	}
1402 	spin_unlock(&queue->rskq_lock);
1403 	return child;
1404 }
1405 EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
1406 
inet_csk_complete_hashdance(struct sock * sk,struct sock * child,struct request_sock * req,bool own_req)1407 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
1408 					 struct request_sock *req, bool own_req)
1409 {
1410 	if (own_req) {
1411 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
1412 		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
1413 
1414 		if (sk != req->rsk_listener) {
1415 			/* another listening sk has been selected,
1416 			 * migrate the req to it.
1417 			 */
1418 			struct request_sock *nreq;
1419 
1420 			/* hold a refcnt for the nreq->rsk_listener
1421 			 * which is assigned in inet_reqsk_clone()
1422 			 */
1423 			sock_hold(sk);
1424 			nreq = inet_reqsk_clone(req, sk);
1425 			if (!nreq) {
1426 				inet_child_forget(sk, req, child);
1427 				goto child_put;
1428 			}
1429 
1430 			refcount_set(&nreq->rsk_refcnt, 1);
1431 			if (inet_csk_reqsk_queue_add(sk, nreq, child)) {
1432 				__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS);
1433 				reqsk_migrate_reset(req);
1434 				reqsk_put(req);
1435 				return child;
1436 			}
1437 
1438 			__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
1439 			reqsk_migrate_reset(nreq);
1440 			__reqsk_free(nreq);
1441 		} else if (inet_csk_reqsk_queue_add(sk, req, child)) {
1442 			return child;
1443 		}
1444 	}
1445 	/* Too bad, another child took ownership of the request, undo. */
1446 child_put:
1447 	bh_unlock_sock(child);
1448 	sock_put(child);
1449 	return NULL;
1450 }
1451 
1452 /*
1453  *	This routine closes sockets which have been at least partially
1454  *	opened, but not yet accepted.
1455  */
inet_csk_listen_stop(struct sock * sk)1456 void inet_csk_listen_stop(struct sock *sk)
1457 {
1458 	struct inet_connection_sock *icsk = inet_csk(sk);
1459 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
1460 	struct request_sock *next, *req;
1461 
1462 	/* Following specs, it would be better either to send FIN
1463 	 * (and enter FIN-WAIT-1, it is normal close)
1464 	 * or to send active reset (abort).
1465 	 * Certainly, it is pretty dangerous while synflood, but it is
1466 	 * bad justification for our negligence 8)
1467 	 * To be honest, we are not able to make either
1468 	 * of the variants now.			--ANK
1469 	 */
1470 	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
1471 		struct sock *child = req->sk, *nsk;
1472 		struct request_sock *nreq;
1473 
1474 		local_bh_disable();
1475 		bh_lock_sock(child);
1476 		WARN_ON(sock_owned_by_user(child));
1477 		sock_hold(child);
1478 
1479 		nsk = reuseport_migrate_sock(sk, child, NULL);
1480 		if (nsk) {
1481 			nreq = inet_reqsk_clone(req, nsk);
1482 			if (nreq) {
1483 				refcount_set(&nreq->rsk_refcnt, 1);
1484 
1485 				if (inet_csk_reqsk_queue_add(nsk, nreq, child)) {
1486 					__NET_INC_STATS(sock_net(nsk),
1487 							LINUX_MIB_TCPMIGRATEREQSUCCESS);
1488 					reqsk_migrate_reset(req);
1489 				} else {
1490 					__NET_INC_STATS(sock_net(nsk),
1491 							LINUX_MIB_TCPMIGRATEREQFAILURE);
1492 					reqsk_migrate_reset(nreq);
1493 					__reqsk_free(nreq);
1494 				}
1495 
1496 				/* inet_csk_reqsk_queue_add() has already
1497 				 * called inet_child_forget() on failure case.
1498 				 */
1499 				goto skip_child_forget;
1500 			}
1501 		}
1502 
1503 		inet_child_forget(sk, req, child);
1504 skip_child_forget:
1505 		reqsk_put(req);
1506 		bh_unlock_sock(child);
1507 		local_bh_enable();
1508 		sock_put(child);
1509 
1510 		cond_resched();
1511 	}
1512 	if (queue->fastopenq.rskq_rst_head) {
1513 		/* Free all the reqs queued in rskq_rst_head. */
1514 		spin_lock_bh(&queue->fastopenq.lock);
1515 		req = queue->fastopenq.rskq_rst_head;
1516 		queue->fastopenq.rskq_rst_head = NULL;
1517 		spin_unlock_bh(&queue->fastopenq.lock);
1518 		while (req != NULL) {
1519 			next = req->dl_next;
1520 			reqsk_put(req);
1521 			req = next;
1522 		}
1523 	}
1524 	WARN_ON_ONCE(sk->sk_ack_backlog);
1525 }
1526 EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
1527 
inet_csk_rebuild_route(struct sock * sk,struct flowi * fl)1528 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
1529 {
1530 	const struct inet_sock *inet = inet_sk(sk);
1531 	struct flowi4 *fl4;
1532 	struct rtable *rt;
1533 
1534 	rcu_read_lock();
1535 	fl4 = &fl->u.ip4;
1536 	inet_sk_init_flowi4(inet, fl4);
1537 	rt = ip_route_output_flow(sock_net(sk), fl4, sk);
1538 	if (IS_ERR(rt))
1539 		rt = NULL;
1540 	if (rt)
1541 		sk_setup_caps(sk, &rt->dst);
1542 	rcu_read_unlock();
1543 
1544 	return &rt->dst;
1545 }
1546 
inet_csk_update_pmtu(struct sock * sk,u32 mtu)1547 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
1548 {
1549 	struct dst_entry *dst = __sk_dst_check(sk, 0);
1550 	struct inet_sock *inet = inet_sk(sk);
1551 
1552 	if (!dst) {
1553 		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1554 		if (!dst)
1555 			goto out;
1556 	}
1557 	dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
1558 
1559 	dst = __sk_dst_check(sk, 0);
1560 	if (!dst)
1561 		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1562 out:
1563 	return dst;
1564 }
1565