1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Support for INET connection oriented protocols.
8 *
9 * Authors: See the TCP sources
10 */
11
12 #include <linux/module.h>
13 #include <linux/jhash.h>
14
15 #include <net/inet_connection_sock.h>
16 #include <net/inet_hashtables.h>
17 #include <net/inet_timewait_sock.h>
18 #include <net/ip.h>
19 #include <net/route.h>
20 #include <net/tcp_states.h>
21 #include <net/xfrm.h>
22 #include <net/tcp.h>
23 #include <net/tcp_ecn.h>
24 #include <net/sock_reuseport.h>
25 #include <net/addrconf.h>
26
27 #if IS_ENABLED(CONFIG_IPV6)
28 /* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses
29 * if IPv6 only, and any IPv4 addresses
30 * if not IPv6 only
31 * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
32 * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
33 * and 0.0.0.0 equals to 0.0.0.0 only
34 */
ipv6_rcv_saddr_equal(const struct in6_addr * sk1_rcv_saddr6,const struct in6_addr * sk2_rcv_saddr6,__be32 sk1_rcv_saddr,__be32 sk2_rcv_saddr,bool sk1_ipv6only,bool sk2_ipv6only,bool match_sk1_wildcard,bool match_sk2_wildcard)35 static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
36 const struct in6_addr *sk2_rcv_saddr6,
37 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
38 bool sk1_ipv6only, bool sk2_ipv6only,
39 bool match_sk1_wildcard,
40 bool match_sk2_wildcard)
41 {
42 int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
43 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
44
45 /* if both are mapped, treat as IPv4 */
46 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
47 if (!sk2_ipv6only) {
48 if (sk1_rcv_saddr == sk2_rcv_saddr)
49 return true;
50 return (match_sk1_wildcard && !sk1_rcv_saddr) ||
51 (match_sk2_wildcard && !sk2_rcv_saddr);
52 }
53 return false;
54 }
55
56 if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
57 return true;
58
59 if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
60 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
61 return true;
62
63 if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
64 !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
65 return true;
66
67 if (sk2_rcv_saddr6 &&
68 ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
69 return true;
70
71 return false;
72 }
73 #endif
74
75 /* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
76 * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
77 * 0.0.0.0 only equals to 0.0.0.0
78 */
ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr,__be32 sk2_rcv_saddr,bool sk2_ipv6only,bool match_sk1_wildcard,bool match_sk2_wildcard)79 static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
80 bool sk2_ipv6only, bool match_sk1_wildcard,
81 bool match_sk2_wildcard)
82 {
83 if (!sk2_ipv6only) {
84 if (sk1_rcv_saddr == sk2_rcv_saddr)
85 return true;
86 return (match_sk1_wildcard && !sk1_rcv_saddr) ||
87 (match_sk2_wildcard && !sk2_rcv_saddr);
88 }
89 return false;
90 }
91
inet_rcv_saddr_equal(const struct sock * sk,const struct sock * sk2,bool match_wildcard)92 bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
93 bool match_wildcard)
94 {
95 #if IS_ENABLED(CONFIG_IPV6)
96 if (sk->sk_family == AF_INET6)
97 return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
98 inet6_rcv_saddr(sk2),
99 sk->sk_rcv_saddr,
100 sk2->sk_rcv_saddr,
101 ipv6_only_sock(sk),
102 ipv6_only_sock(sk2),
103 match_wildcard,
104 match_wildcard);
105 #endif
106 return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
107 ipv6_only_sock(sk2), match_wildcard,
108 match_wildcard);
109 }
110
inet_rcv_saddr_any(const struct sock * sk)111 bool inet_rcv_saddr_any(const struct sock *sk)
112 {
113 #if IS_ENABLED(CONFIG_IPV6)
114 if (sk->sk_family == AF_INET6)
115 return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
116 #endif
117 return !sk->sk_rcv_saddr;
118 }
119
120 /**
121 * inet_sk_get_local_port_range - fetch ephemeral ports range
122 * @sk: socket
123 * @low: pointer to low port
124 * @high: pointer to high port
125 *
126 * Fetch netns port range (/proc/sys/net/ipv4/ip_local_port_range)
127 * Range can be overridden if socket got IP_LOCAL_PORT_RANGE option.
128 * Returns true if IP_LOCAL_PORT_RANGE was set on this socket.
129 */
inet_sk_get_local_port_range(const struct sock * sk,int * low,int * high)130 bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high)
131 {
132 int lo, hi, sk_lo, sk_hi;
133 bool local_range = false;
134 u32 sk_range;
135
136 inet_get_local_port_range(sock_net(sk), &lo, &hi);
137
138 sk_range = READ_ONCE(inet_sk(sk)->local_port_range);
139 if (unlikely(sk_range)) {
140 sk_lo = sk_range & 0xffff;
141 sk_hi = sk_range >> 16;
142
143 if (lo <= sk_lo && sk_lo <= hi)
144 lo = sk_lo;
145 if (lo <= sk_hi && sk_hi <= hi)
146 hi = sk_hi;
147 local_range = true;
148 }
149
150 *low = lo;
151 *high = hi;
152 return local_range;
153 }
154 EXPORT_SYMBOL(inet_sk_get_local_port_range);
155
inet_bind_conflict(const struct sock * sk,struct sock * sk2,kuid_t uid,bool relax,bool reuseport_cb_ok,bool reuseport_ok)156 static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
157 kuid_t uid, bool relax,
158 bool reuseport_cb_ok, bool reuseport_ok)
159 {
160 int bound_dev_if2;
161
162 if (sk == sk2)
163 return false;
164
165 bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if);
166
167 if (!sk->sk_bound_dev_if || !bound_dev_if2 ||
168 sk->sk_bound_dev_if == bound_dev_if2) {
169 if (sk->sk_reuse && sk2->sk_reuse &&
170 sk2->sk_state != TCP_LISTEN) {
171 if (!relax || (!reuseport_ok && sk->sk_reuseport &&
172 sk2->sk_reuseport && reuseport_cb_ok &&
173 (sk2->sk_state == TCP_TIME_WAIT ||
174 uid_eq(uid, sk_uid(sk2)))))
175 return true;
176 } else if (!reuseport_ok || !sk->sk_reuseport ||
177 !sk2->sk_reuseport || !reuseport_cb_ok ||
178 (sk2->sk_state != TCP_TIME_WAIT &&
179 !uid_eq(uid, sk_uid(sk2)))) {
180 return true;
181 }
182 }
183 return false;
184 }
185
__inet_bhash2_conflict(const struct sock * sk,struct sock * sk2,kuid_t uid,bool relax,bool reuseport_cb_ok,bool reuseport_ok)186 static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
187 kuid_t uid, bool relax,
188 bool reuseport_cb_ok, bool reuseport_ok)
189 {
190 if (ipv6_only_sock(sk2)) {
191 if (sk->sk_family == AF_INET)
192 return false;
193
194 #if IS_ENABLED(CONFIG_IPV6)
195 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
196 return false;
197 #endif
198 }
199
200 return inet_bind_conflict(sk, sk2, uid, relax,
201 reuseport_cb_ok, reuseport_ok);
202 }
203
inet_bhash2_conflict(const struct sock * sk,const struct inet_bind2_bucket * tb2,kuid_t uid,bool relax,bool reuseport_cb_ok,bool reuseport_ok)204 static bool inet_bhash2_conflict(const struct sock *sk,
205 const struct inet_bind2_bucket *tb2,
206 kuid_t uid,
207 bool relax, bool reuseport_cb_ok,
208 bool reuseport_ok)
209 {
210 struct sock *sk2;
211
212 sk_for_each_bound(sk2, &tb2->owners) {
213 if (__inet_bhash2_conflict(sk, sk2, uid, relax,
214 reuseport_cb_ok, reuseport_ok))
215 return true;
216 }
217
218 return false;
219 }
220
221 #define sk_for_each_bound_bhash(__sk, __tb2, __tb) \
222 hlist_for_each_entry(__tb2, &(__tb)->bhash2, bhash_node) \
223 sk_for_each_bound((__sk), &(__tb2)->owners)
224
225 /* This should be called only when the tb and tb2 hashbuckets' locks are held */
inet_csk_bind_conflict(const struct sock * sk,const struct inet_bind_bucket * tb,const struct inet_bind2_bucket * tb2,bool relax,bool reuseport_ok)226 static int inet_csk_bind_conflict(const struct sock *sk,
227 const struct inet_bind_bucket *tb,
228 const struct inet_bind2_bucket *tb2, /* may be null */
229 bool relax, bool reuseport_ok)
230 {
231 struct sock_reuseport *reuseport_cb;
232 kuid_t uid = sk_uid(sk);
233 bool reuseport_cb_ok;
234 struct sock *sk2;
235
236 rcu_read_lock();
237 reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
238 /* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
239 reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
240 rcu_read_unlock();
241
242 /* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if
243 * ipv4) should have been checked already. We need to do these two
244 * checks separately because their spinlocks have to be acquired/released
245 * independently of each other, to prevent possible deadlocks
246 */
247 if (inet_use_hash2_on_bind(sk))
248 return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax,
249 reuseport_cb_ok, reuseport_ok);
250
251 /* Unlike other sk lookup places we do not check
252 * for sk_net here, since _all_ the socks listed
253 * in tb->owners and tb2->owners list belong
254 * to the same net - the one this bucket belongs to.
255 */
256 sk_for_each_bound_bhash(sk2, tb2, tb) {
257 if (!inet_bind_conflict(sk, sk2, uid, relax, reuseport_cb_ok, reuseport_ok))
258 continue;
259
260 if (inet_rcv_saddr_equal(sk, sk2, true))
261 return true;
262 }
263
264 return false;
265 }
266
267 /* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or
268 * INADDR_ANY (if ipv4) socket.
269 *
270 * Caller must hold bhash hashbucket lock with local bh disabled, to protect
271 * against concurrent binds on the port for addr any
272 */
inet_bhash2_addr_any_conflict(const struct sock * sk,int port,int l3mdev,bool relax,bool reuseport_ok)273 static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
274 bool relax, bool reuseport_ok)
275 {
276 const struct net *net = sock_net(sk);
277 struct sock_reuseport *reuseport_cb;
278 struct inet_bind_hashbucket *head2;
279 struct inet_bind2_bucket *tb2;
280 kuid_t uid = sk_uid(sk);
281 bool conflict = false;
282 bool reuseport_cb_ok;
283
284 rcu_read_lock();
285 reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
286 /* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
287 reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
288 rcu_read_unlock();
289
290 head2 = inet_bhash2_addr_any_hashbucket(sk, net, port);
291
292 spin_lock(&head2->lock);
293
294 inet_bind_bucket_for_each(tb2, &head2->chain) {
295 if (!inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
296 continue;
297
298 if (!inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, reuseport_ok))
299 continue;
300
301 conflict = true;
302 break;
303 }
304
305 spin_unlock(&head2->lock);
306
307 return conflict;
308 }
309
310 /*
311 * Find an open port number for the socket. Returns with the
312 * inet_bind_hashbucket locks held if successful.
313 */
314 static struct inet_bind_hashbucket *
inet_csk_find_open_port(const struct sock * sk,struct inet_bind_bucket ** tb_ret,struct inet_bind2_bucket ** tb2_ret,struct inet_bind_hashbucket ** head2_ret,int * port_ret)315 inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret,
316 struct inet_bind2_bucket **tb2_ret,
317 struct inet_bind_hashbucket **head2_ret, int *port_ret)
318 {
319 struct inet_hashinfo *hinfo = tcp_get_hashinfo(sk);
320 int i, low, high, attempt_half, port, l3mdev;
321 struct inet_bind_hashbucket *head, *head2;
322 struct net *net = sock_net(sk);
323 struct inet_bind2_bucket *tb2;
324 struct inet_bind_bucket *tb;
325 u32 remaining, offset;
326 bool relax = false;
327
328 l3mdev = inet_sk_bound_l3mdev(sk);
329 ports_exhausted:
330 attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
331 other_half_scan:
332 inet_sk_get_local_port_range(sk, &low, &high);
333 high++; /* [32768, 60999] -> [32768, 61000[ */
334 if (high - low < 4)
335 attempt_half = 0;
336 if (attempt_half) {
337 int half = low + (((high - low) >> 2) << 1);
338
339 if (attempt_half == 1)
340 high = half;
341 else
342 low = half;
343 }
344 remaining = high - low;
345 if (likely(remaining > 1))
346 remaining &= ~1U;
347
348 offset = get_random_u32_below(remaining);
349 /* __inet_hash_connect() favors ports having @low parity
350 * We do the opposite to not pollute connect() users.
351 */
352 offset |= 1U;
353
354 other_parity_scan:
355 port = low + offset;
356 for (i = 0; i < remaining; i += 2, port += 2) {
357 if (unlikely(port >= high))
358 port -= remaining;
359 if (inet_is_local_reserved_port(net, port))
360 continue;
361 head = &hinfo->bhash[inet_bhashfn(net, port,
362 hinfo->bhash_size)];
363 spin_lock_bh(&head->lock);
364 if (inet_use_hash2_on_bind(sk)) {
365 if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false))
366 goto next_port;
367 }
368
369 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
370 spin_lock(&head2->lock);
371 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
372 inet_bind_bucket_for_each(tb, &head->chain)
373 if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
374 if (!inet_csk_bind_conflict(sk, tb, tb2,
375 relax, false))
376 goto success;
377 spin_unlock(&head2->lock);
378 goto next_port;
379 }
380 tb = NULL;
381 goto success;
382 next_port:
383 spin_unlock_bh(&head->lock);
384 cond_resched();
385 }
386
387 offset--;
388 if (!(offset & 1))
389 goto other_parity_scan;
390
391 if (attempt_half == 1) {
392 /* OK we now try the upper half of the range */
393 attempt_half = 2;
394 goto other_half_scan;
395 }
396
397 if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) {
398 /* We still have a chance to connect to different destinations */
399 relax = true;
400 goto ports_exhausted;
401 }
402 return NULL;
403 success:
404 *port_ret = port;
405 *tb_ret = tb;
406 *tb2_ret = tb2;
407 *head2_ret = head2;
408 return head;
409 }
410
sk_reuseport_match(struct inet_bind_bucket * tb,const struct sock * sk)411 static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
412 const struct sock *sk)
413 {
414 if (tb->fastreuseport <= 0)
415 return 0;
416 if (!sk->sk_reuseport)
417 return 0;
418 if (rcu_access_pointer(sk->sk_reuseport_cb))
419 return 0;
420 if (!uid_eq(tb->fastuid, sk_uid(sk)))
421 return 0;
422 /* We only need to check the rcv_saddr if this tb was once marked
423 * without fastreuseport and then was reset, as we can only know that
424 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
425 * owners list.
426 */
427 if (tb->fastreuseport == FASTREUSEPORT_ANY)
428 return 1;
429 #if IS_ENABLED(CONFIG_IPV6)
430 if (tb->fast_sk_family == AF_INET6)
431 return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
432 inet6_rcv_saddr(sk),
433 tb->fast_rcv_saddr,
434 sk->sk_rcv_saddr,
435 tb->fast_ipv6_only,
436 ipv6_only_sock(sk), true, false);
437 #endif
438 return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
439 ipv6_only_sock(sk), true, false);
440 }
441
inet_csk_update_fastreuse(const struct sock * sk,struct inet_bind_bucket * tb,struct inet_bind2_bucket * tb2)442 void inet_csk_update_fastreuse(const struct sock *sk,
443 struct inet_bind_bucket *tb,
444 struct inet_bind2_bucket *tb2)
445 {
446 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
447
448 if (hlist_empty(&tb->bhash2)) {
449 tb->fastreuse = reuse;
450 if (sk->sk_reuseport) {
451 tb->fastreuseport = FASTREUSEPORT_ANY;
452 tb->fastuid = sk_uid(sk);
453 tb->fast_rcv_saddr = sk->sk_rcv_saddr;
454 tb->fast_ipv6_only = ipv6_only_sock(sk);
455 tb->fast_sk_family = sk->sk_family;
456 #if IS_ENABLED(CONFIG_IPV6)
457 tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
458 #endif
459 } else {
460 tb->fastreuseport = 0;
461 }
462 } else {
463 if (!reuse)
464 tb->fastreuse = 0;
465 if (sk->sk_reuseport) {
466 /* We didn't match or we don't have fastreuseport set on
467 * the tb, but we have sk_reuseport set on this socket
468 * and we know that there are no bind conflicts with
469 * this socket in this tb, so reset our tb's reuseport
470 * settings so that any subsequent sockets that match
471 * our current socket will be put on the fast path.
472 *
473 * If we reset we need to set FASTREUSEPORT_STRICT so we
474 * do extra checking for all subsequent sk_reuseport
475 * socks.
476 */
477 if (!sk_reuseport_match(tb, sk)) {
478 tb->fastreuseport = FASTREUSEPORT_STRICT;
479 tb->fastuid = sk_uid(sk);
480 tb->fast_rcv_saddr = sk->sk_rcv_saddr;
481 tb->fast_ipv6_only = ipv6_only_sock(sk);
482 tb->fast_sk_family = sk->sk_family;
483 #if IS_ENABLED(CONFIG_IPV6)
484 tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
485 #endif
486 }
487 } else {
488 tb->fastreuseport = 0;
489 }
490 }
491
492 tb2->fastreuse = tb->fastreuse;
493 tb2->fastreuseport = tb->fastreuseport;
494 }
495
496 /* Obtain a reference to a local port for the given sock,
497 * if snum is zero it means select any available local port.
498 * We try to allocate an odd port (and leave even ports for connect())
499 */
inet_csk_get_port(struct sock * sk,unsigned short snum)500 int inet_csk_get_port(struct sock *sk, unsigned short snum)
501 {
502 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
503 bool found_port = false, check_bind_conflict = true;
504 bool bhash_created = false, bhash2_created = false;
505 struct inet_hashinfo *hinfo = tcp_get_hashinfo(sk);
506 int ret = -EADDRINUSE, port = snum, l3mdev;
507 struct inet_bind_hashbucket *head, *head2;
508 struct inet_bind2_bucket *tb2 = NULL;
509 struct inet_bind_bucket *tb = NULL;
510 bool head2_lock_acquired = false;
511 struct net *net = sock_net(sk);
512
513 l3mdev = inet_sk_bound_l3mdev(sk);
514
515 if (!port) {
516 head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port);
517 if (!head)
518 return ret;
519
520 head2_lock_acquired = true;
521
522 if (tb && tb2)
523 goto success;
524 found_port = true;
525 } else {
526 head = &hinfo->bhash[inet_bhashfn(net, port,
527 hinfo->bhash_size)];
528 spin_lock_bh(&head->lock);
529 inet_bind_bucket_for_each(tb, &head->chain)
530 if (inet_bind_bucket_match(tb, net, port, l3mdev))
531 break;
532 }
533
534 if (!tb) {
535 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net,
536 head, port, l3mdev);
537 if (!tb)
538 goto fail_unlock;
539 bhash_created = true;
540 }
541
542 if (!found_port) {
543 if (!hlist_empty(&tb->bhash2)) {
544 if (sk->sk_reuse == SK_FORCE_REUSE ||
545 (tb->fastreuse > 0 && reuse) ||
546 sk_reuseport_match(tb, sk))
547 check_bind_conflict = false;
548 }
549
550 if (check_bind_conflict && inet_use_hash2_on_bind(sk)) {
551 if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true))
552 goto fail_unlock;
553 }
554
555 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
556 spin_lock(&head2->lock);
557 head2_lock_acquired = true;
558 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
559 }
560
561 if (!tb2) {
562 tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep,
563 net, head2, tb, sk);
564 if (!tb2)
565 goto fail_unlock;
566 bhash2_created = true;
567 }
568
569 if (!found_port && check_bind_conflict) {
570 if (inet_csk_bind_conflict(sk, tb, tb2, true, true))
571 goto fail_unlock;
572 }
573
574 success:
575 inet_csk_update_fastreuse(sk, tb, tb2);
576
577 if (!inet_csk(sk)->icsk_bind_hash)
578 inet_bind_hash(sk, tb, tb2, port);
579 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
580 WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2);
581 ret = 0;
582
583 fail_unlock:
584 if (ret) {
585 if (bhash2_created)
586 inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, tb2);
587 if (bhash_created)
588 inet_bind_bucket_destroy(tb);
589 }
590 if (head2_lock_acquired)
591 spin_unlock(&head2->lock);
592 spin_unlock_bh(&head->lock);
593 return ret;
594 }
595 EXPORT_SYMBOL_GPL(inet_csk_get_port);
596
597 /*
598 * Wait for an incoming connection, avoid race conditions. This must be called
599 * with the socket locked.
600 */
inet_csk_wait_for_connect(struct sock * sk,long timeo)601 static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
602 {
603 struct inet_connection_sock *icsk = inet_csk(sk);
604 DEFINE_WAIT(wait);
605 int err;
606
607 /*
608 * True wake-one mechanism for incoming connections: only
609 * one process gets woken up, not the 'whole herd'.
610 * Since we do not 'race & poll' for established sockets
611 * anymore, the common case will execute the loop only once.
612 *
613 * Subtle issue: "add_wait_queue_exclusive()" will be added
614 * after any current non-exclusive waiters, and we know that
615 * it will always _stay_ after any new non-exclusive waiters
616 * because all non-exclusive waiters are added at the
617 * beginning of the wait-queue. As such, it's ok to "drop"
618 * our exclusiveness temporarily when we get woken up without
619 * having to remove and re-insert us on the wait queue.
620 */
621 for (;;) {
622 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
623 TASK_INTERRUPTIBLE);
624 release_sock(sk);
625 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
626 timeo = schedule_timeout(timeo);
627 sched_annotate_sleep();
628 lock_sock(sk);
629 err = 0;
630 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
631 break;
632 err = -EINVAL;
633 if (sk->sk_state != TCP_LISTEN)
634 break;
635 err = sock_intr_errno(timeo);
636 if (signal_pending(current))
637 break;
638 err = -EAGAIN;
639 if (!timeo)
640 break;
641 }
642 finish_wait(sk_sleep(sk), &wait);
643 return err;
644 }
645
646 /*
647 * This will accept the next outstanding connection.
648 */
inet_csk_accept(struct sock * sk,struct proto_accept_arg * arg)649 struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg)
650 {
651 struct inet_connection_sock *icsk = inet_csk(sk);
652 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
653 struct request_sock *req;
654 struct sock *newsk;
655 int error;
656
657 lock_sock(sk);
658
659 /* We need to make sure that this socket is listening,
660 * and that it has something pending.
661 */
662 error = -EINVAL;
663 if (sk->sk_state != TCP_LISTEN)
664 goto out_err;
665
666 /* Find already established connection */
667 if (reqsk_queue_empty(queue)) {
668 long timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
669
670 /* If this is a non blocking socket don't sleep */
671 error = -EAGAIN;
672 if (!timeo)
673 goto out_err;
674
675 error = inet_csk_wait_for_connect(sk, timeo);
676 if (error)
677 goto out_err;
678 }
679 req = reqsk_queue_remove(queue, sk);
680 arg->is_empty = reqsk_queue_empty(queue);
681 newsk = req->sk;
682
683 if (sk->sk_protocol == IPPROTO_TCP &&
684 tcp_rsk(req)->tfo_listener) {
685 spin_lock_bh(&queue->fastopenq.lock);
686 if (tcp_rsk(req)->tfo_listener) {
687 /* We are still waiting for the final ACK from 3WHS
688 * so can't free req now. Instead, we set req->sk to
689 * NULL to signify that the child socket is taken
690 * so reqsk_fastopen_remove() will free the req
691 * when 3WHS finishes (or is aborted).
692 */
693 req->sk = NULL;
694 req = NULL;
695 }
696 spin_unlock_bh(&queue->fastopenq.lock);
697 }
698
699 release_sock(sk);
700
701 if (req)
702 reqsk_put(req);
703
704 inet_init_csk_locks(newsk);
705 return newsk;
706
707 out_err:
708 release_sock(sk);
709 arg->err = error;
710 return NULL;
711 }
712
713 /*
714 * Using different timers for retransmit, delayed acks and probes
715 * We may wish use just one timer maintaining a list of expire jiffies
716 * to optimize.
717 */
inet_csk_init_xmit_timers(struct sock * sk,void (* retransmit_handler)(struct timer_list * t),void (* delack_handler)(struct timer_list * t),void (* keepalive_handler)(struct timer_list * t))718 void inet_csk_init_xmit_timers(struct sock *sk,
719 void (*retransmit_handler)(struct timer_list *t),
720 void (*delack_handler)(struct timer_list *t),
721 void (*keepalive_handler)(struct timer_list *t))
722 {
723 struct inet_connection_sock *icsk = inet_csk(sk);
724
725 timer_setup(&sk->tcp_retransmit_timer, retransmit_handler, 0);
726 timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
727 timer_setup(&icsk->icsk_keepalive_timer, keepalive_handler, 0);
728 icsk->icsk_pending = icsk->icsk_ack.pending = 0;
729 }
730
inet_csk_clear_xmit_timers(struct sock * sk)731 void inet_csk_clear_xmit_timers(struct sock *sk)
732 {
733 struct inet_connection_sock *icsk = inet_csk(sk);
734
735 smp_store_release(&icsk->icsk_pending, 0);
736 smp_store_release(&icsk->icsk_ack.pending, 0);
737
738 sk_stop_timer(sk, &sk->tcp_retransmit_timer);
739 sk_stop_timer(sk, &icsk->icsk_delack_timer);
740 sk_stop_timer(sk, &icsk->icsk_keepalive_timer);
741 }
742
inet_csk_clear_xmit_timers_sync(struct sock * sk)743 void inet_csk_clear_xmit_timers_sync(struct sock *sk)
744 {
745 struct inet_connection_sock *icsk = inet_csk(sk);
746
747 /* ongoing timer handlers need to acquire socket lock. */
748 sock_not_owned_by_me(sk);
749
750 smp_store_release(&icsk->icsk_pending, 0);
751 smp_store_release(&icsk->icsk_ack.pending, 0);
752
753 sk_stop_timer_sync(sk, &sk->tcp_retransmit_timer);
754 sk_stop_timer_sync(sk, &icsk->icsk_delack_timer);
755 sk_stop_timer_sync(sk, &icsk->icsk_keepalive_timer);
756 }
757
inet_csk_route_req(const struct sock * sk,struct flowi4 * fl4,const struct request_sock * req)758 struct dst_entry *inet_csk_route_req(const struct sock *sk,
759 struct flowi4 *fl4,
760 const struct request_sock *req)
761 {
762 const struct inet_request_sock *ireq = inet_rsk(req);
763 struct net *net = read_pnet(&ireq->ireq_net);
764 struct ip_options_rcu *opt;
765 struct rtable *rt;
766
767 rcu_read_lock();
768 opt = rcu_dereference(ireq->ireq_opt);
769
770 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
771 ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
772 sk->sk_protocol, inet_sk_flowi_flags(sk),
773 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
774 ireq->ir_loc_addr, ireq->ir_rmt_port,
775 htons(ireq->ir_num), sk_uid(sk));
776 security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
777 rt = ip_route_output_flow(net, fl4, sk);
778 if (IS_ERR(rt))
779 goto no_route;
780 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
781 goto route_err;
782 rcu_read_unlock();
783 return &rt->dst;
784
785 route_err:
786 ip_rt_put(rt);
787 no_route:
788 rcu_read_unlock();
789 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
790 return NULL;
791 }
792
inet_csk_route_child_sock(const struct sock * sk,struct sock * newsk,const struct request_sock * req)793 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
794 struct sock *newsk,
795 const struct request_sock *req)
796 {
797 const struct inet_request_sock *ireq = inet_rsk(req);
798 struct net *net = read_pnet(&ireq->ireq_net);
799 struct inet_sock *newinet = inet_sk(newsk);
800 struct ip_options_rcu *opt;
801 struct flowi4 *fl4;
802 struct rtable *rt;
803
804 opt = rcu_dereference(ireq->ireq_opt);
805 fl4 = &newinet->cork.fl.u.ip4;
806
807 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
808 ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
809 sk->sk_protocol, inet_sk_flowi_flags(sk),
810 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
811 ireq->ir_loc_addr, ireq->ir_rmt_port,
812 htons(ireq->ir_num), sk_uid(sk));
813 security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
814 rt = ip_route_output_flow(net, fl4, sk);
815 if (IS_ERR(rt))
816 goto no_route;
817 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
818 goto route_err;
819 return &rt->dst;
820
821 route_err:
822 ip_rt_put(rt);
823 no_route:
824 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
825 return NULL;
826 }
827 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
828
829 /* Decide when to expire the request and when to resend SYN-ACK */
syn_ack_recalc(struct request_sock * req,const int max_syn_ack_retries,const u8 rskq_defer_accept,int * expire,int * resend)830 static void syn_ack_recalc(struct request_sock *req,
831 const int max_syn_ack_retries,
832 const u8 rskq_defer_accept,
833 int *expire, int *resend)
834 {
835 if (!rskq_defer_accept) {
836 *expire = req->num_timeout >= max_syn_ack_retries;
837 *resend = 1;
838 return;
839 }
840 *expire = req->num_timeout >= max_syn_ack_retries &&
841 (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept);
842 /* Do not resend while waiting for data after ACK,
843 * start to resend on end of deferring period to give
844 * last chance for data or ACK to create established socket.
845 */
846 *resend = !inet_rsk(req)->acked ||
847 req->num_timeout >= rskq_defer_accept - 1;
848 }
849
850 static struct request_sock *
reqsk_alloc_noprof(const struct request_sock_ops * ops,struct sock * sk_listener,bool attach_listener)851 reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener,
852 bool attach_listener)
853 {
854 struct request_sock *req;
855
856 req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
857 if (!req)
858 return NULL;
859 req->rsk_listener = NULL;
860 if (attach_listener) {
861 if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
862 kmem_cache_free(ops->slab, req);
863 return NULL;
864 }
865 req->rsk_listener = sk_listener;
866 }
867 req->rsk_ops = ops;
868 req_to_sk(req)->sk_prot = sk_listener->sk_prot;
869 sk_node_init(&req_to_sk(req)->sk_node);
870 sk_tx_queue_clear(req_to_sk(req));
871 req->saved_syn = NULL;
872 req->syncookie = 0;
873 req->num_timeout = 0;
874 req->num_retrans = 0;
875 req->sk = NULL;
876 refcount_set(&req->rsk_refcnt, 0);
877
878 return req;
879 }
880 #define reqsk_alloc(...) alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__))
881
inet_reqsk_alloc(const struct request_sock_ops * ops,struct sock * sk_listener,bool attach_listener)882 struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
883 struct sock *sk_listener,
884 bool attach_listener)
885 {
886 struct request_sock *req = reqsk_alloc(ops, sk_listener,
887 attach_listener);
888
889 if (req) {
890 struct inet_request_sock *ireq = inet_rsk(req);
891
892 ireq->ireq_opt = NULL;
893 #if IS_ENABLED(CONFIG_IPV6)
894 ireq->pktopts = NULL;
895 #endif
896 atomic64_set(&ireq->ir_cookie, 0);
897 ireq->ireq_state = TCP_NEW_SYN_RECV;
898 write_pnet(&ireq->ireq_net, sock_net(sk_listener));
899 ireq->ireq_family = sk_listener->sk_family;
900 }
901
902 return req;
903 }
904 EXPORT_SYMBOL(inet_reqsk_alloc);
905
__reqsk_free(struct request_sock * req)906 void __reqsk_free(struct request_sock *req)
907 {
908 req->rsk_ops->destructor(req);
909 if (req->rsk_listener)
910 sock_put(req->rsk_listener);
911 kfree(req->saved_syn);
912 kmem_cache_free(req->rsk_ops->slab, req);
913 }
914 EXPORT_SYMBOL_GPL(__reqsk_free);
915
inet_reqsk_clone(struct request_sock * req,struct sock * sk)916 static struct request_sock *inet_reqsk_clone(struct request_sock *req,
917 struct sock *sk)
918 {
919 struct sock *req_sk, *nreq_sk;
920 struct request_sock *nreq;
921
922 nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN);
923 if (!nreq) {
924 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
925
926 /* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */
927 sock_put(sk);
928 return NULL;
929 }
930
931 req_sk = req_to_sk(req);
932 nreq_sk = req_to_sk(nreq);
933
934 memcpy(nreq_sk, req_sk,
935 offsetof(struct sock, sk_dontcopy_begin));
936 unsafe_memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
937 req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end),
938 /* alloc is larger than struct, see above */);
939
940 sk_node_init(&nreq_sk->sk_node);
941 nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
942 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
943 nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
944 #endif
945 nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
946
947 nreq->rsk_listener = sk;
948
949 /* We need not acquire fastopenq->lock
950 * because the child socket is locked in inet_csk_listen_stop().
951 */
952 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener)
953 rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq);
954
955 return nreq;
956 }
957
reqsk_queue_migrated(struct request_sock_queue * queue,const struct request_sock * req)958 static void reqsk_queue_migrated(struct request_sock_queue *queue,
959 const struct request_sock *req)
960 {
961 if (req->num_timeout == 0)
962 atomic_inc(&queue->young);
963 atomic_inc(&queue->qlen);
964 }
965
reqsk_migrate_reset(struct request_sock * req)966 static void reqsk_migrate_reset(struct request_sock *req)
967 {
968 req->saved_syn = NULL;
969 #if IS_ENABLED(CONFIG_IPV6)
970 inet_rsk(req)->ipv6_opt = NULL;
971 inet_rsk(req)->pktopts = NULL;
972 #else
973 inet_rsk(req)->ireq_opt = NULL;
974 #endif
975 }
976
977 /* return true if req was found in the ehash table */
reqsk_queue_unlink(struct request_sock * req)978 static bool reqsk_queue_unlink(struct request_sock *req)
979 {
980 struct sock *sk = req_to_sk(req);
981 bool found = false;
982
983 if (sk_hashed(sk)) {
984 struct inet_hashinfo *hashinfo = tcp_get_hashinfo(sk);
985 spinlock_t *lock;
986
987 lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
988 spin_lock(lock);
989 found = __sk_nulls_del_node_init_rcu(sk);
990 spin_unlock(lock);
991 }
992
993 return found;
994 }
995
__inet_csk_reqsk_queue_drop(struct sock * sk,struct request_sock * req,bool from_timer)996 static bool __inet_csk_reqsk_queue_drop(struct sock *sk,
997 struct request_sock *req,
998 bool from_timer)
999 {
1000 bool unlinked = reqsk_queue_unlink(req);
1001
1002 if (!from_timer && timer_delete_sync(&req->rsk_timer))
1003 reqsk_put(req);
1004
1005 if (unlinked) {
1006 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
1007 reqsk_put(req);
1008 }
1009
1010 return unlinked;
1011 }
1012
inet_csk_reqsk_queue_drop(struct sock * sk,struct request_sock * req)1013 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
1014 {
1015 return __inet_csk_reqsk_queue_drop(sk, req, false);
1016 }
1017
inet_csk_reqsk_queue_drop_and_put(struct sock * sk,struct request_sock * req)1018 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
1019 {
1020 inet_csk_reqsk_queue_drop(sk, req);
1021 reqsk_put(req);
1022 }
1023
reqsk_timer_handler(struct timer_list * t)1024 static void reqsk_timer_handler(struct timer_list *t)
1025 {
1026 struct request_sock *req = timer_container_of(req, t, rsk_timer);
1027 struct request_sock *nreq = NULL, *oreq = req;
1028 struct sock *sk_listener = req->rsk_listener;
1029 struct inet_connection_sock *icsk;
1030 struct request_sock_queue *queue;
1031 struct net *net;
1032 int max_syn_ack_retries, qlen, expire = 0, resend = 0;
1033
1034 if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {
1035 struct sock *nsk;
1036
1037 nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
1038 if (!nsk)
1039 goto drop;
1040
1041 nreq = inet_reqsk_clone(req, nsk);
1042 if (!nreq)
1043 goto drop;
1044
1045 /* The new timer for the cloned req can decrease the 2
1046 * by calling inet_csk_reqsk_queue_drop_and_put(), so
1047 * hold another count to prevent use-after-free and
1048 * call reqsk_put() just before return.
1049 */
1050 refcount_set(&nreq->rsk_refcnt, 2 + 1);
1051 timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1052 reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);
1053
1054 req = nreq;
1055 sk_listener = nsk;
1056 }
1057
1058 icsk = inet_csk(sk_listener);
1059 net = sock_net(sk_listener);
1060 max_syn_ack_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
1061 READ_ONCE(net->ipv4.sysctl_tcp_synack_retries);
1062 /* Normally all the openreqs are young and become mature
1063 * (i.e. converted to established socket) for first timeout.
1064 * If synack was not acknowledged for 1 second, it means
1065 * one of the following things: synack was lost, ack was lost,
1066 * rtt is high or nobody planned to ack (i.e. synflood).
1067 * When server is a bit loaded, queue is populated with old
1068 * open requests, reducing effective size of queue.
1069 * When server is well loaded, queue size reduces to zero
1070 * after several minutes of work. It is not synflood,
1071 * it is normal operation. The solution is pruning
1072 * too old entries overriding normal timeout, when
1073 * situation becomes dangerous.
1074 *
1075 * Essentially, we reserve half of room for young
1076 * embrions; and abort old ones without pity, if old
1077 * ones are about to clog our table.
1078 */
1079 queue = &icsk->icsk_accept_queue;
1080 qlen = reqsk_queue_len(queue);
1081 if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
1082 int young = reqsk_queue_len_young(queue) << 1;
1083
1084 while (max_syn_ack_retries > 2) {
1085 if (qlen < young)
1086 break;
1087 max_syn_ack_retries--;
1088 young <<= 1;
1089 }
1090 }
1091
1092 syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept),
1093 &expire, &resend);
1094 tcp_syn_ack_timeout(req);
1095
1096 if (!expire &&
1097 (!resend ||
1098 !tcp_rtx_synack(sk_listener, req) ||
1099 inet_rsk(req)->acked)) {
1100 if (req->num_retrans > 1 && tcp_rsk(req)->accecn_ok)
1101 tcp_rsk(req)->accecn_fail_mode |= TCP_ACCECN_ACE_FAIL_SEND;
1102 if (req->num_timeout++ == 0)
1103 atomic_dec(&queue->young);
1104 mod_timer(&req->rsk_timer, jiffies + tcp_reqsk_timeout(req));
1105
1106 if (!nreq)
1107 return;
1108
1109 if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
1110 /* delete timer */
1111 __inet_csk_reqsk_queue_drop(sk_listener, nreq, true);
1112 goto no_ownership;
1113 }
1114
1115 __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS);
1116 reqsk_migrate_reset(oreq);
1117 reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
1118 reqsk_put(oreq);
1119
1120 reqsk_put(nreq);
1121 return;
1122 }
1123
1124 /* Even if we can clone the req, we may need not retransmit any more
1125 * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
1126 * CPU may win the "own_req" race so that inet_ehash_insert() fails.
1127 */
1128 if (nreq) {
1129 __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE);
1130 no_ownership:
1131 reqsk_migrate_reset(nreq);
1132 reqsk_queue_removed(queue, nreq);
1133 __reqsk_free(nreq);
1134 }
1135
1136 drop:
1137 __inet_csk_reqsk_queue_drop(sk_listener, oreq, true);
1138 reqsk_put(oreq);
1139 }
1140
reqsk_queue_hash_req(struct request_sock * req)1141 static bool reqsk_queue_hash_req(struct request_sock *req)
1142 {
1143 bool found_dup_sk = false;
1144
1145 if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk))
1146 return false;
1147
1148 /* The timer needs to be setup after a successful insertion. */
1149 req->timeout = tcp_timeout_init((struct sock *)req);
1150 timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1151 mod_timer(&req->rsk_timer, jiffies + req->timeout);
1152
1153 /* before letting lookups find us, make sure all req fields
1154 * are committed to memory and refcnt initialized.
1155 */
1156 smp_wmb();
1157 refcount_set(&req->rsk_refcnt, 2 + 1);
1158 return true;
1159 }
1160
inet_csk_reqsk_queue_hash_add(struct sock * sk,struct request_sock * req)1161 bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req)
1162 {
1163 if (!reqsk_queue_hash_req(req))
1164 return false;
1165
1166 inet_csk_reqsk_queue_added(sk);
1167 return true;
1168 }
1169
inet_clone_ulp(const struct request_sock * req,struct sock * newsk,const gfp_t priority)1170 static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
1171 const gfp_t priority)
1172 {
1173 struct inet_connection_sock *icsk = inet_csk(newsk);
1174
1175 if (!icsk->icsk_ulp_ops)
1176 return;
1177
1178 icsk->icsk_ulp_ops->clone(req, newsk, priority);
1179 }
1180
1181 /**
1182 * inet_csk_clone_lock - clone an inet socket, and lock its clone
1183 * @sk: the socket to clone
1184 * @req: request_sock
1185 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1186 *
1187 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1188 */
inet_csk_clone_lock(const struct sock * sk,const struct request_sock * req,const gfp_t priority)1189 struct sock *inet_csk_clone_lock(const struct sock *sk,
1190 const struct request_sock *req,
1191 const gfp_t priority)
1192 {
1193 struct sock *newsk = sk_clone_lock(sk, priority);
1194 struct inet_connection_sock *newicsk;
1195 const struct inet_request_sock *ireq;
1196 struct inet_sock *newinet;
1197
1198 if (!newsk)
1199 return NULL;
1200
1201 newicsk = inet_csk(newsk);
1202 newinet = inet_sk(newsk);
1203 ireq = inet_rsk(req);
1204
1205 newicsk->icsk_bind_hash = NULL;
1206 newicsk->icsk_bind2_hash = NULL;
1207
1208 newinet->inet_dport = ireq->ir_rmt_port;
1209 newinet->inet_num = ireq->ir_num;
1210 newinet->inet_sport = htons(ireq->ir_num);
1211
1212 newsk->sk_bound_dev_if = ireq->ir_iif;
1213
1214 newsk->sk_daddr = ireq->ir_rmt_addr;
1215 newsk->sk_rcv_saddr = ireq->ir_loc_addr;
1216 newinet->inet_saddr = ireq->ir_loc_addr;
1217
1218 #if IS_ENABLED(CONFIG_IPV6)
1219 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1220 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1221 #endif
1222
1223 /* listeners have SOCK_RCU_FREE, not the children */
1224 sock_reset_flag(newsk, SOCK_RCU_FREE);
1225
1226 inet_sk(newsk)->mc_list = NULL;
1227
1228 newsk->sk_mark = inet_rsk(req)->ir_mark;
1229 atomic64_set(&newsk->sk_cookie,
1230 atomic64_read(&inet_rsk(req)->ir_cookie));
1231
1232 newicsk->icsk_retransmits = 0;
1233 newicsk->icsk_backoff = 0;
1234 newicsk->icsk_probes_out = 0;
1235 newicsk->icsk_probes_tstamp = 0;
1236
1237 /* Deinitialize accept_queue to trap illegal accesses. */
1238 memset(&newicsk->icsk_accept_queue, 0,
1239 sizeof(newicsk->icsk_accept_queue));
1240
1241 inet_sk_set_state(newsk, TCP_SYN_RECV);
1242
1243 inet_clone_ulp(req, newsk, priority);
1244
1245 security_inet_csk_clone(newsk, req);
1246
1247 return newsk;
1248 }
1249
1250 /*
1251 * At this point, there should be no process reference to this
1252 * socket, and thus no user references at all. Therefore we
1253 * can assume the socket waitqueue is inactive and nobody will
1254 * try to jump onto it.
1255 */
inet_csk_destroy_sock(struct sock * sk)1256 void inet_csk_destroy_sock(struct sock *sk)
1257 {
1258 WARN_ON(sk->sk_state != TCP_CLOSE);
1259 WARN_ON(!sock_flag(sk, SOCK_DEAD));
1260
1261 /* It cannot be in hash table! */
1262 WARN_ON(!sk_unhashed(sk));
1263
1264 /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
1265 WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
1266
1267 sk->sk_prot->destroy(sk);
1268
1269 sk_stream_kill_queues(sk);
1270
1271 xfrm_sk_free_policy(sk);
1272
1273 tcp_orphan_count_dec();
1274
1275 sock_put(sk);
1276 }
1277 EXPORT_SYMBOL(inet_csk_destroy_sock);
1278
inet_csk_prepare_for_destroy_sock(struct sock * sk)1279 void inet_csk_prepare_for_destroy_sock(struct sock *sk)
1280 {
1281 /* The below has to be done to allow calling inet_csk_destroy_sock */
1282 sock_set_flag(sk, SOCK_DEAD);
1283 tcp_orphan_count_inc();
1284 }
1285
1286 /* This function allows to force a closure of a socket after the call to
1287 * tcp_create_openreq_child().
1288 */
inet_csk_prepare_forced_close(struct sock * sk)1289 void inet_csk_prepare_forced_close(struct sock *sk)
1290 __releases(&sk->sk_lock.slock)
1291 {
1292 /* sk_clone_lock locked the socket and set refcnt to 2 */
1293 bh_unlock_sock(sk);
1294 sock_put(sk);
1295 inet_csk_prepare_for_destroy_sock(sk);
1296 inet_sk(sk)->inet_num = 0;
1297 }
1298 EXPORT_SYMBOL(inet_csk_prepare_forced_close);
1299
inet_ulp_can_listen(const struct sock * sk)1300 static int inet_ulp_can_listen(const struct sock *sk)
1301 {
1302 const struct inet_connection_sock *icsk = inet_csk(sk);
1303
1304 if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone)
1305 return -EINVAL;
1306
1307 return 0;
1308 }
1309
reqsk_queue_alloc(struct request_sock_queue * queue)1310 static void reqsk_queue_alloc(struct request_sock_queue *queue)
1311 {
1312 queue->fastopenq.rskq_rst_head = NULL;
1313 queue->fastopenq.rskq_rst_tail = NULL;
1314 queue->fastopenq.qlen = 0;
1315
1316 queue->rskq_accept_head = NULL;
1317 }
1318
inet_csk_listen_start(struct sock * sk)1319 int inet_csk_listen_start(struct sock *sk)
1320 {
1321 struct inet_connection_sock *icsk = inet_csk(sk);
1322 struct inet_sock *inet = inet_sk(sk);
1323 int err;
1324
1325 err = inet_ulp_can_listen(sk);
1326 if (unlikely(err))
1327 return err;
1328
1329 reqsk_queue_alloc(&icsk->icsk_accept_queue);
1330
1331 sk->sk_ack_backlog = 0;
1332 inet_csk_delack_init(sk);
1333
1334 /* There is race window here: we announce ourselves listening,
1335 * but this transition is still not validated by get_port().
1336 * It is OK, because this socket enters to hash table only
1337 * after validation is complete.
1338 */
1339 inet_sk_state_store(sk, TCP_LISTEN);
1340 err = sk->sk_prot->get_port(sk, inet->inet_num);
1341 if (!err) {
1342 inet->inet_sport = htons(inet->inet_num);
1343
1344 sk_dst_reset(sk);
1345 err = sk->sk_prot->hash(sk);
1346
1347 if (likely(!err))
1348 return 0;
1349 }
1350
1351 inet_sk_set_state(sk, TCP_CLOSE);
1352 return err;
1353 }
1354
inet_child_forget(struct sock * sk,struct request_sock * req,struct sock * child)1355 static void inet_child_forget(struct sock *sk, struct request_sock *req,
1356 struct sock *child)
1357 {
1358 sk->sk_prot->disconnect(child, O_NONBLOCK);
1359
1360 sock_orphan(child);
1361
1362 tcp_orphan_count_inc();
1363
1364 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
1365 BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
1366 BUG_ON(sk != req->rsk_listener);
1367
1368 /* Paranoid, to prevent race condition if
1369 * an inbound pkt destined for child is
1370 * blocked by sock lock in tcp_v4_rcv().
1371 * Also to satisfy an assertion in
1372 * tcp_v4_destroy_sock().
1373 */
1374 RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
1375 }
1376 inet_csk_destroy_sock(child);
1377 }
1378
inet_csk_reqsk_queue_add(struct sock * sk,struct request_sock * req,struct sock * child)1379 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
1380 struct request_sock *req,
1381 struct sock *child)
1382 {
1383 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1384
1385 spin_lock(&queue->rskq_lock);
1386 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1387 inet_child_forget(sk, req, child);
1388 child = NULL;
1389 } else {
1390 req->sk = child;
1391 req->dl_next = NULL;
1392 if (queue->rskq_accept_head == NULL)
1393 WRITE_ONCE(queue->rskq_accept_head, req);
1394 else
1395 queue->rskq_accept_tail->dl_next = req;
1396 queue->rskq_accept_tail = req;
1397 sk_acceptq_added(sk);
1398 }
1399 spin_unlock(&queue->rskq_lock);
1400 return child;
1401 }
1402 EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
1403
inet_csk_complete_hashdance(struct sock * sk,struct sock * child,struct request_sock * req,bool own_req)1404 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
1405 struct request_sock *req, bool own_req)
1406 {
1407 if (own_req) {
1408 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
1409 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
1410
1411 if (sk != req->rsk_listener) {
1412 /* another listening sk has been selected,
1413 * migrate the req to it.
1414 */
1415 struct request_sock *nreq;
1416
1417 /* hold a refcnt for the nreq->rsk_listener
1418 * which is assigned in inet_reqsk_clone()
1419 */
1420 sock_hold(sk);
1421 nreq = inet_reqsk_clone(req, sk);
1422 if (!nreq) {
1423 inet_child_forget(sk, req, child);
1424 goto child_put;
1425 }
1426
1427 refcount_set(&nreq->rsk_refcnt, 1);
1428 if (inet_csk_reqsk_queue_add(sk, nreq, child)) {
1429 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS);
1430 reqsk_migrate_reset(req);
1431 reqsk_put(req);
1432 return child;
1433 }
1434
1435 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
1436 reqsk_migrate_reset(nreq);
1437 __reqsk_free(nreq);
1438 } else if (inet_csk_reqsk_queue_add(sk, req, child)) {
1439 return child;
1440 }
1441 }
1442 /* Too bad, another child took ownership of the request, undo. */
1443 child_put:
1444 bh_unlock_sock(child);
1445 sock_put(child);
1446 return NULL;
1447 }
1448
1449 /*
1450 * This routine closes sockets which have been at least partially
1451 * opened, but not yet accepted.
1452 */
inet_csk_listen_stop(struct sock * sk)1453 void inet_csk_listen_stop(struct sock *sk)
1454 {
1455 struct inet_connection_sock *icsk = inet_csk(sk);
1456 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
1457 struct request_sock *next, *req;
1458
1459 /* Following specs, it would be better either to send FIN
1460 * (and enter FIN-WAIT-1, it is normal close)
1461 * or to send active reset (abort).
1462 * Certainly, it is pretty dangerous while synflood, but it is
1463 * bad justification for our negligence 8)
1464 * To be honest, we are not able to make either
1465 * of the variants now. --ANK
1466 */
1467 while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
1468 struct sock *child = req->sk, *nsk;
1469 struct request_sock *nreq;
1470
1471 local_bh_disable();
1472 bh_lock_sock(child);
1473 WARN_ON(sock_owned_by_user(child));
1474 sock_hold(child);
1475
1476 nsk = reuseport_migrate_sock(sk, child, NULL);
1477 if (nsk) {
1478 nreq = inet_reqsk_clone(req, nsk);
1479 if (nreq) {
1480 refcount_set(&nreq->rsk_refcnt, 1);
1481
1482 rcu_read_lock();
1483 if (inet_csk_reqsk_queue_add(nsk, nreq, child)) {
1484 __NET_INC_STATS(sock_net(nsk),
1485 LINUX_MIB_TCPMIGRATEREQSUCCESS);
1486 reqsk_migrate_reset(req);
1487 READ_ONCE(nsk->sk_data_ready)(nsk);
1488 } else {
1489 __NET_INC_STATS(sock_net(nsk),
1490 LINUX_MIB_TCPMIGRATEREQFAILURE);
1491 reqsk_migrate_reset(nreq);
1492 __reqsk_free(nreq);
1493 }
1494 rcu_read_unlock();
1495
1496 /* inet_csk_reqsk_queue_add() has already
1497 * called inet_child_forget() on failure case.
1498 */
1499 goto skip_child_forget;
1500 }
1501 }
1502
1503 inet_child_forget(sk, req, child);
1504 skip_child_forget:
1505 reqsk_put(req);
1506 bh_unlock_sock(child);
1507 local_bh_enable();
1508 sock_put(child);
1509
1510 cond_resched();
1511 }
1512 if (queue->fastopenq.rskq_rst_head) {
1513 /* Free all the reqs queued in rskq_rst_head. */
1514 spin_lock_bh(&queue->fastopenq.lock);
1515 req = queue->fastopenq.rskq_rst_head;
1516 queue->fastopenq.rskq_rst_head = NULL;
1517 spin_unlock_bh(&queue->fastopenq.lock);
1518 while (req != NULL) {
1519 next = req->dl_next;
1520 reqsk_put(req);
1521 req = next;
1522 }
1523 }
1524 WARN_ON_ONCE(sk->sk_ack_backlog);
1525 }
1526
inet_csk_rebuild_route(struct sock * sk,struct flowi * fl)1527 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
1528 {
1529 const struct inet_sock *inet = inet_sk(sk);
1530 struct flowi4 *fl4;
1531 struct rtable *rt;
1532
1533 rcu_read_lock();
1534 fl4 = &fl->u.ip4;
1535 inet_sk_init_flowi4(inet, fl4);
1536 rt = ip_route_output_flow(sock_net(sk), fl4, sk);
1537 if (IS_ERR(rt))
1538 rt = NULL;
1539 if (rt)
1540 sk_setup_caps(sk, &rt->dst);
1541 rcu_read_unlock();
1542
1543 return &rt->dst;
1544 }
1545
inet_csk_update_pmtu(struct sock * sk,u32 mtu)1546 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
1547 {
1548 struct dst_entry *dst = __sk_dst_check(sk, 0);
1549 struct inet_sock *inet = inet_sk(sk);
1550
1551 if (!dst) {
1552 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1553 if (!dst)
1554 goto out;
1555 }
1556 dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
1557
1558 dst = __sk_dst_check(sk, 0);
1559 if (!dst)
1560 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1561 out:
1562 return dst;
1563 }
1564