1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Support for INET connection oriented protocols. 8 * 9 * Authors: See the TCP sources 10 */ 11 12 #include <linux/module.h> 13 #include <linux/jhash.h> 14 15 #include <net/inet_connection_sock.h> 16 #include <net/inet_hashtables.h> 17 #include <net/inet_timewait_sock.h> 18 #include <net/ip.h> 19 #include <net/route.h> 20 #include <net/tcp_states.h> 21 #include <net/xfrm.h> 22 #include <net/tcp.h> 23 #include <net/sock_reuseport.h> 24 #include <net/addrconf.h> 25 26 #if IS_ENABLED(CONFIG_IPV6) 27 /* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses 28 * if IPv6 only, and any IPv4 addresses 29 * if not IPv6 only 30 * match_sk*_wildcard == false: addresses must be exactly the same, i.e. 31 * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, 32 * and 0.0.0.0 equals to 0.0.0.0 only 33 */ 34 static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, 35 const struct in6_addr *sk2_rcv_saddr6, 36 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, 37 bool sk1_ipv6only, bool sk2_ipv6only, 38 bool match_sk1_wildcard, 39 bool match_sk2_wildcard) 40 { 41 int addr_type = ipv6_addr_type(sk1_rcv_saddr6); 42 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; 43 44 /* if both are mapped, treat as IPv4 */ 45 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) { 46 if (!sk2_ipv6only) { 47 if (sk1_rcv_saddr == sk2_rcv_saddr) 48 return true; 49 return (match_sk1_wildcard && !sk1_rcv_saddr) || 50 (match_sk2_wildcard && !sk2_rcv_saddr); 51 } 52 return false; 53 } 54 55 if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY) 56 return true; 57 58 if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard && 59 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) 60 return true; 61 62 if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard && 63 !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) 64 return true; 65 66 if (sk2_rcv_saddr6 && 67 ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6)) 68 return true; 69 70 return false; 71 } 72 #endif 73 74 /* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses 75 * match_sk*_wildcard == false: addresses must be exactly the same, i.e. 76 * 0.0.0.0 only equals to 0.0.0.0 77 */ 78 static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, 79 bool sk2_ipv6only, bool match_sk1_wildcard, 80 bool match_sk2_wildcard) 81 { 82 if (!sk2_ipv6only) { 83 if (sk1_rcv_saddr == sk2_rcv_saddr) 84 return true; 85 return (match_sk1_wildcard && !sk1_rcv_saddr) || 86 (match_sk2_wildcard && !sk2_rcv_saddr); 87 } 88 return false; 89 } 90 91 bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, 92 bool match_wildcard) 93 { 94 #if IS_ENABLED(CONFIG_IPV6) 95 if (sk->sk_family == AF_INET6) 96 return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr, 97 inet6_rcv_saddr(sk2), 98 sk->sk_rcv_saddr, 99 sk2->sk_rcv_saddr, 100 ipv6_only_sock(sk), 101 ipv6_only_sock(sk2), 102 match_wildcard, 103 match_wildcard); 104 #endif 105 return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr, 106 ipv6_only_sock(sk2), match_wildcard, 107 match_wildcard); 108 } 109 EXPORT_SYMBOL(inet_rcv_saddr_equal); 110 111 bool inet_rcv_saddr_any(const struct sock *sk) 112 { 113 #if IS_ENABLED(CONFIG_IPV6) 114 if (sk->sk_family == AF_INET6) 115 return ipv6_addr_any(&sk->sk_v6_rcv_saddr); 116 #endif 117 return !sk->sk_rcv_saddr; 118 } 119 120 /** 121 * inet_sk_get_local_port_range - fetch ephemeral ports range 122 * @sk: socket 123 * @low: pointer to low port 124 * @high: pointer to high port 125 * 126 * Fetch netns port range (/proc/sys/net/ipv4/ip_local_port_range) 127 * Range can be overridden if socket got IP_LOCAL_PORT_RANGE option. 128 * Returns true if IP_LOCAL_PORT_RANGE was set on this socket. 129 */ 130 bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high) 131 { 132 int lo, hi, sk_lo, sk_hi; 133 bool local_range = false; 134 u32 sk_range; 135 136 inet_get_local_port_range(sock_net(sk), &lo, &hi); 137 138 sk_range = READ_ONCE(inet_sk(sk)->local_port_range); 139 if (unlikely(sk_range)) { 140 sk_lo = sk_range & 0xffff; 141 sk_hi = sk_range >> 16; 142 143 if (lo <= sk_lo && sk_lo <= hi) 144 lo = sk_lo; 145 if (lo <= sk_hi && sk_hi <= hi) 146 hi = sk_hi; 147 local_range = true; 148 } 149 150 *low = lo; 151 *high = hi; 152 return local_range; 153 } 154 EXPORT_SYMBOL(inet_sk_get_local_port_range); 155 156 static bool inet_use_bhash2_on_bind(const struct sock *sk) 157 { 158 #if IS_ENABLED(CONFIG_IPV6) 159 if (sk->sk_family == AF_INET6) { 160 int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); 161 162 return addr_type != IPV6_ADDR_ANY && 163 addr_type != IPV6_ADDR_MAPPED; 164 } 165 #endif 166 return sk->sk_rcv_saddr != htonl(INADDR_ANY); 167 } 168 169 static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2, 170 kuid_t sk_uid, bool relax, 171 bool reuseport_cb_ok, bool reuseport_ok) 172 { 173 int bound_dev_if2; 174 175 if (sk == sk2) 176 return false; 177 178 bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if); 179 180 if (!sk->sk_bound_dev_if || !bound_dev_if2 || 181 sk->sk_bound_dev_if == bound_dev_if2) { 182 if (sk->sk_reuse && sk2->sk_reuse && 183 sk2->sk_state != TCP_LISTEN) { 184 if (!relax || (!reuseport_ok && sk->sk_reuseport && 185 sk2->sk_reuseport && reuseport_cb_ok && 186 (sk2->sk_state == TCP_TIME_WAIT || 187 uid_eq(sk_uid, sock_i_uid(sk2))))) 188 return true; 189 } else if (!reuseport_ok || !sk->sk_reuseport || 190 !sk2->sk_reuseport || !reuseport_cb_ok || 191 (sk2->sk_state != TCP_TIME_WAIT && 192 !uid_eq(sk_uid, sock_i_uid(sk2)))) { 193 return true; 194 } 195 } 196 return false; 197 } 198 199 static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2, 200 kuid_t sk_uid, bool relax, 201 bool reuseport_cb_ok, bool reuseport_ok) 202 { 203 if (sk->sk_family == AF_INET && ipv6_only_sock(sk2)) 204 return false; 205 206 return inet_bind_conflict(sk, sk2, sk_uid, relax, 207 reuseport_cb_ok, reuseport_ok); 208 } 209 210 static bool inet_bhash2_conflict(const struct sock *sk, 211 const struct inet_bind2_bucket *tb2, 212 kuid_t sk_uid, 213 bool relax, bool reuseport_cb_ok, 214 bool reuseport_ok) 215 { 216 struct inet_timewait_sock *tw2; 217 struct sock *sk2; 218 219 sk_for_each_bound_bhash2(sk2, &tb2->owners) { 220 if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax, 221 reuseport_cb_ok, reuseport_ok)) 222 return true; 223 } 224 225 twsk_for_each_bound_bhash2(tw2, &tb2->deathrow) { 226 sk2 = (struct sock *)tw2; 227 228 if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax, 229 reuseport_cb_ok, reuseport_ok)) 230 return true; 231 } 232 233 return false; 234 } 235 236 /* This should be called only when the tb and tb2 hashbuckets' locks are held */ 237 static int inet_csk_bind_conflict(const struct sock *sk, 238 const struct inet_bind_bucket *tb, 239 const struct inet_bind2_bucket *tb2, /* may be null */ 240 bool relax, bool reuseport_ok) 241 { 242 bool reuseport_cb_ok; 243 struct sock_reuseport *reuseport_cb; 244 kuid_t uid = sock_i_uid((struct sock *)sk); 245 246 rcu_read_lock(); 247 reuseport_cb = rcu_dereference(sk->sk_reuseport_cb); 248 /* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */ 249 reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks); 250 rcu_read_unlock(); 251 252 /* 253 * Unlike other sk lookup places we do not check 254 * for sk_net here, since _all_ the socks listed 255 * in tb->owners and tb2->owners list belong 256 * to the same net - the one this bucket belongs to. 257 */ 258 259 if (!inet_use_bhash2_on_bind(sk)) { 260 struct sock *sk2; 261 262 sk_for_each_bound(sk2, &tb->owners) 263 if (inet_bind_conflict(sk, sk2, uid, relax, 264 reuseport_cb_ok, reuseport_ok) && 265 inet_rcv_saddr_equal(sk, sk2, true)) 266 return true; 267 268 return false; 269 } 270 271 /* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if 272 * ipv4) should have been checked already. We need to do these two 273 * checks separately because their spinlocks have to be acquired/released 274 * independently of each other, to prevent possible deadlocks 275 */ 276 return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, 277 reuseport_ok); 278 } 279 280 /* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or 281 * INADDR_ANY (if ipv4) socket. 282 * 283 * Caller must hold bhash hashbucket lock with local bh disabled, to protect 284 * against concurrent binds on the port for addr any 285 */ 286 static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev, 287 bool relax, bool reuseport_ok) 288 { 289 kuid_t uid = sock_i_uid((struct sock *)sk); 290 const struct net *net = sock_net(sk); 291 struct sock_reuseport *reuseport_cb; 292 struct inet_bind_hashbucket *head2; 293 struct inet_bind2_bucket *tb2; 294 bool reuseport_cb_ok; 295 296 rcu_read_lock(); 297 reuseport_cb = rcu_dereference(sk->sk_reuseport_cb); 298 /* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */ 299 reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks); 300 rcu_read_unlock(); 301 302 head2 = inet_bhash2_addr_any_hashbucket(sk, net, port); 303 304 spin_lock(&head2->lock); 305 306 inet_bind_bucket_for_each(tb2, &head2->chain) 307 if (inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk)) 308 break; 309 310 if (tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, 311 reuseport_ok)) { 312 spin_unlock(&head2->lock); 313 return true; 314 } 315 316 spin_unlock(&head2->lock); 317 return false; 318 } 319 320 /* 321 * Find an open port number for the socket. Returns with the 322 * inet_bind_hashbucket locks held if successful. 323 */ 324 static struct inet_bind_hashbucket * 325 inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret, 326 struct inet_bind2_bucket **tb2_ret, 327 struct inet_bind_hashbucket **head2_ret, int *port_ret) 328 { 329 struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk); 330 int i, low, high, attempt_half, port, l3mdev; 331 struct inet_bind_hashbucket *head, *head2; 332 struct net *net = sock_net(sk); 333 struct inet_bind2_bucket *tb2; 334 struct inet_bind_bucket *tb; 335 u32 remaining, offset; 336 bool relax = false; 337 338 l3mdev = inet_sk_bound_l3mdev(sk); 339 ports_exhausted: 340 attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0; 341 other_half_scan: 342 inet_sk_get_local_port_range(sk, &low, &high); 343 high++; /* [32768, 60999] -> [32768, 61000[ */ 344 if (high - low < 4) 345 attempt_half = 0; 346 if (attempt_half) { 347 int half = low + (((high - low) >> 2) << 1); 348 349 if (attempt_half == 1) 350 high = half; 351 else 352 low = half; 353 } 354 remaining = high - low; 355 if (likely(remaining > 1)) 356 remaining &= ~1U; 357 358 offset = get_random_u32_below(remaining); 359 /* __inet_hash_connect() favors ports having @low parity 360 * We do the opposite to not pollute connect() users. 361 */ 362 offset |= 1U; 363 364 other_parity_scan: 365 port = low + offset; 366 for (i = 0; i < remaining; i += 2, port += 2) { 367 if (unlikely(port >= high)) 368 port -= remaining; 369 if (inet_is_local_reserved_port(net, port)) 370 continue; 371 head = &hinfo->bhash[inet_bhashfn(net, port, 372 hinfo->bhash_size)]; 373 spin_lock_bh(&head->lock); 374 if (inet_use_bhash2_on_bind(sk)) { 375 if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false)) 376 goto next_port; 377 } 378 379 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 380 spin_lock(&head2->lock); 381 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); 382 inet_bind_bucket_for_each(tb, &head->chain) 383 if (inet_bind_bucket_match(tb, net, port, l3mdev)) { 384 if (!inet_csk_bind_conflict(sk, tb, tb2, 385 relax, false)) 386 goto success; 387 spin_unlock(&head2->lock); 388 goto next_port; 389 } 390 tb = NULL; 391 goto success; 392 next_port: 393 spin_unlock_bh(&head->lock); 394 cond_resched(); 395 } 396 397 offset--; 398 if (!(offset & 1)) 399 goto other_parity_scan; 400 401 if (attempt_half == 1) { 402 /* OK we now try the upper half of the range */ 403 attempt_half = 2; 404 goto other_half_scan; 405 } 406 407 if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) { 408 /* We still have a chance to connect to different destinations */ 409 relax = true; 410 goto ports_exhausted; 411 } 412 return NULL; 413 success: 414 *port_ret = port; 415 *tb_ret = tb; 416 *tb2_ret = tb2; 417 *head2_ret = head2; 418 return head; 419 } 420 421 static inline int sk_reuseport_match(struct inet_bind_bucket *tb, 422 struct sock *sk) 423 { 424 kuid_t uid = sock_i_uid(sk); 425 426 if (tb->fastreuseport <= 0) 427 return 0; 428 if (!sk->sk_reuseport) 429 return 0; 430 if (rcu_access_pointer(sk->sk_reuseport_cb)) 431 return 0; 432 if (!uid_eq(tb->fastuid, uid)) 433 return 0; 434 /* We only need to check the rcv_saddr if this tb was once marked 435 * without fastreuseport and then was reset, as we can only know that 436 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the 437 * owners list. 438 */ 439 if (tb->fastreuseport == FASTREUSEPORT_ANY) 440 return 1; 441 #if IS_ENABLED(CONFIG_IPV6) 442 if (tb->fast_sk_family == AF_INET6) 443 return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr, 444 inet6_rcv_saddr(sk), 445 tb->fast_rcv_saddr, 446 sk->sk_rcv_saddr, 447 tb->fast_ipv6_only, 448 ipv6_only_sock(sk), true, false); 449 #endif 450 return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr, 451 ipv6_only_sock(sk), true, false); 452 } 453 454 void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, 455 struct sock *sk) 456 { 457 kuid_t uid = sock_i_uid(sk); 458 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; 459 460 if (hlist_empty(&tb->owners)) { 461 tb->fastreuse = reuse; 462 if (sk->sk_reuseport) { 463 tb->fastreuseport = FASTREUSEPORT_ANY; 464 tb->fastuid = uid; 465 tb->fast_rcv_saddr = sk->sk_rcv_saddr; 466 tb->fast_ipv6_only = ipv6_only_sock(sk); 467 tb->fast_sk_family = sk->sk_family; 468 #if IS_ENABLED(CONFIG_IPV6) 469 tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 470 #endif 471 } else { 472 tb->fastreuseport = 0; 473 } 474 } else { 475 if (!reuse) 476 tb->fastreuse = 0; 477 if (sk->sk_reuseport) { 478 /* We didn't match or we don't have fastreuseport set on 479 * the tb, but we have sk_reuseport set on this socket 480 * and we know that there are no bind conflicts with 481 * this socket in this tb, so reset our tb's reuseport 482 * settings so that any subsequent sockets that match 483 * our current socket will be put on the fast path. 484 * 485 * If we reset we need to set FASTREUSEPORT_STRICT so we 486 * do extra checking for all subsequent sk_reuseport 487 * socks. 488 */ 489 if (!sk_reuseport_match(tb, sk)) { 490 tb->fastreuseport = FASTREUSEPORT_STRICT; 491 tb->fastuid = uid; 492 tb->fast_rcv_saddr = sk->sk_rcv_saddr; 493 tb->fast_ipv6_only = ipv6_only_sock(sk); 494 tb->fast_sk_family = sk->sk_family; 495 #if IS_ENABLED(CONFIG_IPV6) 496 tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 497 #endif 498 } 499 } else { 500 tb->fastreuseport = 0; 501 } 502 } 503 } 504 505 /* Obtain a reference to a local port for the given sock, 506 * if snum is zero it means select any available local port. 507 * We try to allocate an odd port (and leave even ports for connect()) 508 */ 509 int inet_csk_get_port(struct sock *sk, unsigned short snum) 510 { 511 struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk); 512 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; 513 bool found_port = false, check_bind_conflict = true; 514 bool bhash_created = false, bhash2_created = false; 515 int ret = -EADDRINUSE, port = snum, l3mdev; 516 struct inet_bind_hashbucket *head, *head2; 517 struct inet_bind2_bucket *tb2 = NULL; 518 struct inet_bind_bucket *tb = NULL; 519 bool head2_lock_acquired = false; 520 struct net *net = sock_net(sk); 521 522 l3mdev = inet_sk_bound_l3mdev(sk); 523 524 if (!port) { 525 head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port); 526 if (!head) 527 return ret; 528 529 head2_lock_acquired = true; 530 531 if (tb && tb2) 532 goto success; 533 found_port = true; 534 } else { 535 head = &hinfo->bhash[inet_bhashfn(net, port, 536 hinfo->bhash_size)]; 537 spin_lock_bh(&head->lock); 538 inet_bind_bucket_for_each(tb, &head->chain) 539 if (inet_bind_bucket_match(tb, net, port, l3mdev)) 540 break; 541 } 542 543 if (!tb) { 544 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net, 545 head, port, l3mdev); 546 if (!tb) 547 goto fail_unlock; 548 bhash_created = true; 549 } 550 551 if (!found_port) { 552 if (!hlist_empty(&tb->owners)) { 553 if (sk->sk_reuse == SK_FORCE_REUSE || 554 (tb->fastreuse > 0 && reuse) || 555 sk_reuseport_match(tb, sk)) 556 check_bind_conflict = false; 557 } 558 559 if (check_bind_conflict && inet_use_bhash2_on_bind(sk)) { 560 if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true)) 561 goto fail_unlock; 562 } 563 564 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 565 spin_lock(&head2->lock); 566 head2_lock_acquired = true; 567 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); 568 } 569 570 if (!tb2) { 571 tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, 572 net, head2, port, l3mdev, sk); 573 if (!tb2) 574 goto fail_unlock; 575 bhash2_created = true; 576 } 577 578 if (!found_port && check_bind_conflict) { 579 if (inet_csk_bind_conflict(sk, tb, tb2, true, true)) 580 goto fail_unlock; 581 } 582 583 success: 584 inet_csk_update_fastreuse(tb, sk); 585 586 if (!inet_csk(sk)->icsk_bind_hash) 587 inet_bind_hash(sk, tb, tb2, port); 588 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); 589 WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2); 590 ret = 0; 591 592 fail_unlock: 593 if (ret) { 594 if (bhash_created) 595 inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb); 596 if (bhash2_created) 597 inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, 598 tb2); 599 } 600 if (head2_lock_acquired) 601 spin_unlock(&head2->lock); 602 spin_unlock_bh(&head->lock); 603 return ret; 604 } 605 EXPORT_SYMBOL_GPL(inet_csk_get_port); 606 607 /* 608 * Wait for an incoming connection, avoid race conditions. This must be called 609 * with the socket locked. 610 */ 611 static int inet_csk_wait_for_connect(struct sock *sk, long timeo) 612 { 613 struct inet_connection_sock *icsk = inet_csk(sk); 614 DEFINE_WAIT(wait); 615 int err; 616 617 /* 618 * True wake-one mechanism for incoming connections: only 619 * one process gets woken up, not the 'whole herd'. 620 * Since we do not 'race & poll' for established sockets 621 * anymore, the common case will execute the loop only once. 622 * 623 * Subtle issue: "add_wait_queue_exclusive()" will be added 624 * after any current non-exclusive waiters, and we know that 625 * it will always _stay_ after any new non-exclusive waiters 626 * because all non-exclusive waiters are added at the 627 * beginning of the wait-queue. As such, it's ok to "drop" 628 * our exclusiveness temporarily when we get woken up without 629 * having to remove and re-insert us on the wait queue. 630 */ 631 for (;;) { 632 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 633 TASK_INTERRUPTIBLE); 634 release_sock(sk); 635 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) 636 timeo = schedule_timeout(timeo); 637 sched_annotate_sleep(); 638 lock_sock(sk); 639 err = 0; 640 if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) 641 break; 642 err = -EINVAL; 643 if (sk->sk_state != TCP_LISTEN) 644 break; 645 err = sock_intr_errno(timeo); 646 if (signal_pending(current)) 647 break; 648 err = -EAGAIN; 649 if (!timeo) 650 break; 651 } 652 finish_wait(sk_sleep(sk), &wait); 653 return err; 654 } 655 656 /* 657 * This will accept the next outstanding connection. 658 */ 659 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern) 660 { 661 struct inet_connection_sock *icsk = inet_csk(sk); 662 struct request_sock_queue *queue = &icsk->icsk_accept_queue; 663 struct request_sock *req; 664 struct sock *newsk; 665 int error; 666 667 lock_sock(sk); 668 669 /* We need to make sure that this socket is listening, 670 * and that it has something pending. 671 */ 672 error = -EINVAL; 673 if (sk->sk_state != TCP_LISTEN) 674 goto out_err; 675 676 /* Find already established connection */ 677 if (reqsk_queue_empty(queue)) { 678 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 679 680 /* If this is a non blocking socket don't sleep */ 681 error = -EAGAIN; 682 if (!timeo) 683 goto out_err; 684 685 error = inet_csk_wait_for_connect(sk, timeo); 686 if (error) 687 goto out_err; 688 } 689 req = reqsk_queue_remove(queue, sk); 690 newsk = req->sk; 691 692 if (sk->sk_protocol == IPPROTO_TCP && 693 tcp_rsk(req)->tfo_listener) { 694 spin_lock_bh(&queue->fastopenq.lock); 695 if (tcp_rsk(req)->tfo_listener) { 696 /* We are still waiting for the final ACK from 3WHS 697 * so can't free req now. Instead, we set req->sk to 698 * NULL to signify that the child socket is taken 699 * so reqsk_fastopen_remove() will free the req 700 * when 3WHS finishes (or is aborted). 701 */ 702 req->sk = NULL; 703 req = NULL; 704 } 705 spin_unlock_bh(&queue->fastopenq.lock); 706 } 707 708 out: 709 release_sock(sk); 710 if (newsk && mem_cgroup_sockets_enabled) { 711 int amt = 0; 712 713 /* atomically get the memory usage, set and charge the 714 * newsk->sk_memcg. 715 */ 716 lock_sock(newsk); 717 718 mem_cgroup_sk_alloc(newsk); 719 if (newsk->sk_memcg) { 720 /* The socket has not been accepted yet, no need 721 * to look at newsk->sk_wmem_queued. 722 */ 723 amt = sk_mem_pages(newsk->sk_forward_alloc + 724 atomic_read(&newsk->sk_rmem_alloc)); 725 } 726 727 if (amt) 728 mem_cgroup_charge_skmem(newsk->sk_memcg, amt, 729 GFP_KERNEL | __GFP_NOFAIL); 730 731 release_sock(newsk); 732 } 733 if (req) 734 reqsk_put(req); 735 return newsk; 736 out_err: 737 newsk = NULL; 738 req = NULL; 739 *err = error; 740 goto out; 741 } 742 EXPORT_SYMBOL(inet_csk_accept); 743 744 /* 745 * Using different timers for retransmit, delayed acks and probes 746 * We may wish use just one timer maintaining a list of expire jiffies 747 * to optimize. 748 */ 749 void inet_csk_init_xmit_timers(struct sock *sk, 750 void (*retransmit_handler)(struct timer_list *t), 751 void (*delack_handler)(struct timer_list *t), 752 void (*keepalive_handler)(struct timer_list *t)) 753 { 754 struct inet_connection_sock *icsk = inet_csk(sk); 755 756 timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0); 757 timer_setup(&icsk->icsk_delack_timer, delack_handler, 0); 758 timer_setup(&sk->sk_timer, keepalive_handler, 0); 759 icsk->icsk_pending = icsk->icsk_ack.pending = 0; 760 } 761 EXPORT_SYMBOL(inet_csk_init_xmit_timers); 762 763 void inet_csk_clear_xmit_timers(struct sock *sk) 764 { 765 struct inet_connection_sock *icsk = inet_csk(sk); 766 767 icsk->icsk_pending = icsk->icsk_ack.pending = 0; 768 769 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); 770 sk_stop_timer(sk, &icsk->icsk_delack_timer); 771 sk_stop_timer(sk, &sk->sk_timer); 772 } 773 EXPORT_SYMBOL(inet_csk_clear_xmit_timers); 774 775 void inet_csk_delete_keepalive_timer(struct sock *sk) 776 { 777 sk_stop_timer(sk, &sk->sk_timer); 778 } 779 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); 780 781 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) 782 { 783 sk_reset_timer(sk, &sk->sk_timer, jiffies + len); 784 } 785 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); 786 787 struct dst_entry *inet_csk_route_req(const struct sock *sk, 788 struct flowi4 *fl4, 789 const struct request_sock *req) 790 { 791 const struct inet_request_sock *ireq = inet_rsk(req); 792 struct net *net = read_pnet(&ireq->ireq_net); 793 struct ip_options_rcu *opt; 794 struct rtable *rt; 795 796 rcu_read_lock(); 797 opt = rcu_dereference(ireq->ireq_opt); 798 799 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 800 ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), 801 sk->sk_protocol, inet_sk_flowi_flags(sk), 802 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 803 ireq->ir_loc_addr, ireq->ir_rmt_port, 804 htons(ireq->ir_num), sk->sk_uid); 805 security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); 806 rt = ip_route_output_flow(net, fl4, sk); 807 if (IS_ERR(rt)) 808 goto no_route; 809 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) 810 goto route_err; 811 rcu_read_unlock(); 812 return &rt->dst; 813 814 route_err: 815 ip_rt_put(rt); 816 no_route: 817 rcu_read_unlock(); 818 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 819 return NULL; 820 } 821 EXPORT_SYMBOL_GPL(inet_csk_route_req); 822 823 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, 824 struct sock *newsk, 825 const struct request_sock *req) 826 { 827 const struct inet_request_sock *ireq = inet_rsk(req); 828 struct net *net = read_pnet(&ireq->ireq_net); 829 struct inet_sock *newinet = inet_sk(newsk); 830 struct ip_options_rcu *opt; 831 struct flowi4 *fl4; 832 struct rtable *rt; 833 834 opt = rcu_dereference(ireq->ireq_opt); 835 fl4 = &newinet->cork.fl.u.ip4; 836 837 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 838 ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), 839 sk->sk_protocol, inet_sk_flowi_flags(sk), 840 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 841 ireq->ir_loc_addr, ireq->ir_rmt_port, 842 htons(ireq->ir_num), sk->sk_uid); 843 security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); 844 rt = ip_route_output_flow(net, fl4, sk); 845 if (IS_ERR(rt)) 846 goto no_route; 847 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) 848 goto route_err; 849 return &rt->dst; 850 851 route_err: 852 ip_rt_put(rt); 853 no_route: 854 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 855 return NULL; 856 } 857 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock); 858 859 /* Decide when to expire the request and when to resend SYN-ACK */ 860 static void syn_ack_recalc(struct request_sock *req, 861 const int max_syn_ack_retries, 862 const u8 rskq_defer_accept, 863 int *expire, int *resend) 864 { 865 if (!rskq_defer_accept) { 866 *expire = req->num_timeout >= max_syn_ack_retries; 867 *resend = 1; 868 return; 869 } 870 *expire = req->num_timeout >= max_syn_ack_retries && 871 (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept); 872 /* Do not resend while waiting for data after ACK, 873 * start to resend on end of deferring period to give 874 * last chance for data or ACK to create established socket. 875 */ 876 *resend = !inet_rsk(req)->acked || 877 req->num_timeout >= rskq_defer_accept - 1; 878 } 879 880 int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req) 881 { 882 int err = req->rsk_ops->rtx_syn_ack(parent, req); 883 884 if (!err) 885 req->num_retrans++; 886 return err; 887 } 888 EXPORT_SYMBOL(inet_rtx_syn_ack); 889 890 static struct request_sock *inet_reqsk_clone(struct request_sock *req, 891 struct sock *sk) 892 { 893 struct sock *req_sk, *nreq_sk; 894 struct request_sock *nreq; 895 896 nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN); 897 if (!nreq) { 898 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); 899 900 /* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */ 901 sock_put(sk); 902 return NULL; 903 } 904 905 req_sk = req_to_sk(req); 906 nreq_sk = req_to_sk(nreq); 907 908 memcpy(nreq_sk, req_sk, 909 offsetof(struct sock, sk_dontcopy_begin)); 910 memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end, 911 req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end)); 912 913 sk_node_init(&nreq_sk->sk_node); 914 nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping; 915 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING 916 nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping; 917 #endif 918 nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu; 919 920 nreq->rsk_listener = sk; 921 922 /* We need not acquire fastopenq->lock 923 * because the child socket is locked in inet_csk_listen_stop(). 924 */ 925 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener) 926 rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq); 927 928 return nreq; 929 } 930 931 static void reqsk_queue_migrated(struct request_sock_queue *queue, 932 const struct request_sock *req) 933 { 934 if (req->num_timeout == 0) 935 atomic_inc(&queue->young); 936 atomic_inc(&queue->qlen); 937 } 938 939 static void reqsk_migrate_reset(struct request_sock *req) 940 { 941 req->saved_syn = NULL; 942 #if IS_ENABLED(CONFIG_IPV6) 943 inet_rsk(req)->ipv6_opt = NULL; 944 inet_rsk(req)->pktopts = NULL; 945 #else 946 inet_rsk(req)->ireq_opt = NULL; 947 #endif 948 } 949 950 /* return true if req was found in the ehash table */ 951 static bool reqsk_queue_unlink(struct request_sock *req) 952 { 953 struct sock *sk = req_to_sk(req); 954 bool found = false; 955 956 if (sk_hashed(sk)) { 957 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); 958 spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash); 959 960 spin_lock(lock); 961 found = __sk_nulls_del_node_init_rcu(sk); 962 spin_unlock(lock); 963 } 964 if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer)) 965 reqsk_put(req); 966 return found; 967 } 968 969 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) 970 { 971 bool unlinked = reqsk_queue_unlink(req); 972 973 if (unlinked) { 974 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); 975 reqsk_put(req); 976 } 977 return unlinked; 978 } 979 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop); 980 981 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req) 982 { 983 inet_csk_reqsk_queue_drop(sk, req); 984 reqsk_put(req); 985 } 986 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put); 987 988 static void reqsk_timer_handler(struct timer_list *t) 989 { 990 struct request_sock *req = from_timer(req, t, rsk_timer); 991 struct request_sock *nreq = NULL, *oreq = req; 992 struct sock *sk_listener = req->rsk_listener; 993 struct inet_connection_sock *icsk; 994 struct request_sock_queue *queue; 995 struct net *net; 996 int max_syn_ack_retries, qlen, expire = 0, resend = 0; 997 998 if (inet_sk_state_load(sk_listener) != TCP_LISTEN) { 999 struct sock *nsk; 1000 1001 nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL); 1002 if (!nsk) 1003 goto drop; 1004 1005 nreq = inet_reqsk_clone(req, nsk); 1006 if (!nreq) 1007 goto drop; 1008 1009 /* The new timer for the cloned req can decrease the 2 1010 * by calling inet_csk_reqsk_queue_drop_and_put(), so 1011 * hold another count to prevent use-after-free and 1012 * call reqsk_put() just before return. 1013 */ 1014 refcount_set(&nreq->rsk_refcnt, 2 + 1); 1015 timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED); 1016 reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req); 1017 1018 req = nreq; 1019 sk_listener = nsk; 1020 } 1021 1022 icsk = inet_csk(sk_listener); 1023 net = sock_net(sk_listener); 1024 max_syn_ack_retries = READ_ONCE(icsk->icsk_syn_retries) ? : 1025 READ_ONCE(net->ipv4.sysctl_tcp_synack_retries); 1026 /* Normally all the openreqs are young and become mature 1027 * (i.e. converted to established socket) for first timeout. 1028 * If synack was not acknowledged for 1 second, it means 1029 * one of the following things: synack was lost, ack was lost, 1030 * rtt is high or nobody planned to ack (i.e. synflood). 1031 * When server is a bit loaded, queue is populated with old 1032 * open requests, reducing effective size of queue. 1033 * When server is well loaded, queue size reduces to zero 1034 * after several minutes of work. It is not synflood, 1035 * it is normal operation. The solution is pruning 1036 * too old entries overriding normal timeout, when 1037 * situation becomes dangerous. 1038 * 1039 * Essentially, we reserve half of room for young 1040 * embrions; and abort old ones without pity, if old 1041 * ones are about to clog our table. 1042 */ 1043 queue = &icsk->icsk_accept_queue; 1044 qlen = reqsk_queue_len(queue); 1045 if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) { 1046 int young = reqsk_queue_len_young(queue) << 1; 1047 1048 while (max_syn_ack_retries > 2) { 1049 if (qlen < young) 1050 break; 1051 max_syn_ack_retries--; 1052 young <<= 1; 1053 } 1054 } 1055 syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept), 1056 &expire, &resend); 1057 req->rsk_ops->syn_ack_timeout(req); 1058 if (!expire && 1059 (!resend || 1060 !inet_rtx_syn_ack(sk_listener, req) || 1061 inet_rsk(req)->acked)) { 1062 if (req->num_timeout++ == 0) 1063 atomic_dec(&queue->young); 1064 mod_timer(&req->rsk_timer, jiffies + reqsk_timeout(req, TCP_RTO_MAX)); 1065 1066 if (!nreq) 1067 return; 1068 1069 if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) { 1070 /* delete timer */ 1071 inet_csk_reqsk_queue_drop(sk_listener, nreq); 1072 goto no_ownership; 1073 } 1074 1075 __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS); 1076 reqsk_migrate_reset(oreq); 1077 reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq); 1078 reqsk_put(oreq); 1079 1080 reqsk_put(nreq); 1081 return; 1082 } 1083 1084 /* Even if we can clone the req, we may need not retransmit any more 1085 * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another 1086 * CPU may win the "own_req" race so that inet_ehash_insert() fails. 1087 */ 1088 if (nreq) { 1089 __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE); 1090 no_ownership: 1091 reqsk_migrate_reset(nreq); 1092 reqsk_queue_removed(queue, nreq); 1093 __reqsk_free(nreq); 1094 } 1095 1096 drop: 1097 inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq); 1098 } 1099 1100 static void reqsk_queue_hash_req(struct request_sock *req, 1101 unsigned long timeout) 1102 { 1103 timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED); 1104 mod_timer(&req->rsk_timer, jiffies + timeout); 1105 1106 inet_ehash_insert(req_to_sk(req), NULL, NULL); 1107 /* before letting lookups find us, make sure all req fields 1108 * are committed to memory and refcnt initialized. 1109 */ 1110 smp_wmb(); 1111 refcount_set(&req->rsk_refcnt, 2 + 1); 1112 } 1113 1114 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, 1115 unsigned long timeout) 1116 { 1117 reqsk_queue_hash_req(req, timeout); 1118 inet_csk_reqsk_queue_added(sk); 1119 } 1120 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); 1121 1122 static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk, 1123 const gfp_t priority) 1124 { 1125 struct inet_connection_sock *icsk = inet_csk(newsk); 1126 1127 if (!icsk->icsk_ulp_ops) 1128 return; 1129 1130 icsk->icsk_ulp_ops->clone(req, newsk, priority); 1131 } 1132 1133 /** 1134 * inet_csk_clone_lock - clone an inet socket, and lock its clone 1135 * @sk: the socket to clone 1136 * @req: request_sock 1137 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1138 * 1139 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 1140 */ 1141 struct sock *inet_csk_clone_lock(const struct sock *sk, 1142 const struct request_sock *req, 1143 const gfp_t priority) 1144 { 1145 struct sock *newsk = sk_clone_lock(sk, priority); 1146 1147 if (newsk) { 1148 struct inet_connection_sock *newicsk = inet_csk(newsk); 1149 1150 inet_sk_set_state(newsk, TCP_SYN_RECV); 1151 newicsk->icsk_bind_hash = NULL; 1152 newicsk->icsk_bind2_hash = NULL; 1153 1154 inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port; 1155 inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num; 1156 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num); 1157 1158 /* listeners have SOCK_RCU_FREE, not the children */ 1159 sock_reset_flag(newsk, SOCK_RCU_FREE); 1160 1161 inet_sk(newsk)->mc_list = NULL; 1162 1163 newsk->sk_mark = inet_rsk(req)->ir_mark; 1164 atomic64_set(&newsk->sk_cookie, 1165 atomic64_read(&inet_rsk(req)->ir_cookie)); 1166 1167 newicsk->icsk_retransmits = 0; 1168 newicsk->icsk_backoff = 0; 1169 newicsk->icsk_probes_out = 0; 1170 newicsk->icsk_probes_tstamp = 0; 1171 1172 /* Deinitialize accept_queue to trap illegal accesses. */ 1173 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); 1174 1175 inet_clone_ulp(req, newsk, priority); 1176 1177 security_inet_csk_clone(newsk, req); 1178 } 1179 return newsk; 1180 } 1181 EXPORT_SYMBOL_GPL(inet_csk_clone_lock); 1182 1183 /* 1184 * At this point, there should be no process reference to this 1185 * socket, and thus no user references at all. Therefore we 1186 * can assume the socket waitqueue is inactive and nobody will 1187 * try to jump onto it. 1188 */ 1189 void inet_csk_destroy_sock(struct sock *sk) 1190 { 1191 WARN_ON(sk->sk_state != TCP_CLOSE); 1192 WARN_ON(!sock_flag(sk, SOCK_DEAD)); 1193 1194 /* It cannot be in hash table! */ 1195 WARN_ON(!sk_unhashed(sk)); 1196 1197 /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */ 1198 WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); 1199 1200 sk->sk_prot->destroy(sk); 1201 1202 sk_stream_kill_queues(sk); 1203 1204 xfrm_sk_free_policy(sk); 1205 1206 this_cpu_dec(*sk->sk_prot->orphan_count); 1207 1208 sock_put(sk); 1209 } 1210 EXPORT_SYMBOL(inet_csk_destroy_sock); 1211 1212 /* This function allows to force a closure of a socket after the call to 1213 * tcp/dccp_create_openreq_child(). 1214 */ 1215 void inet_csk_prepare_forced_close(struct sock *sk) 1216 __releases(&sk->sk_lock.slock) 1217 { 1218 /* sk_clone_lock locked the socket and set refcnt to 2 */ 1219 bh_unlock_sock(sk); 1220 sock_put(sk); 1221 inet_csk_prepare_for_destroy_sock(sk); 1222 inet_sk(sk)->inet_num = 0; 1223 } 1224 EXPORT_SYMBOL(inet_csk_prepare_forced_close); 1225 1226 static int inet_ulp_can_listen(const struct sock *sk) 1227 { 1228 const struct inet_connection_sock *icsk = inet_csk(sk); 1229 1230 if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone) 1231 return -EINVAL; 1232 1233 return 0; 1234 } 1235 1236 int inet_csk_listen_start(struct sock *sk) 1237 { 1238 struct inet_connection_sock *icsk = inet_csk(sk); 1239 struct inet_sock *inet = inet_sk(sk); 1240 int err; 1241 1242 err = inet_ulp_can_listen(sk); 1243 if (unlikely(err)) 1244 return err; 1245 1246 reqsk_queue_alloc(&icsk->icsk_accept_queue); 1247 1248 sk->sk_ack_backlog = 0; 1249 inet_csk_delack_init(sk); 1250 1251 /* There is race window here: we announce ourselves listening, 1252 * but this transition is still not validated by get_port(). 1253 * It is OK, because this socket enters to hash table only 1254 * after validation is complete. 1255 */ 1256 inet_sk_state_store(sk, TCP_LISTEN); 1257 err = sk->sk_prot->get_port(sk, inet->inet_num); 1258 if (!err) { 1259 inet->inet_sport = htons(inet->inet_num); 1260 1261 sk_dst_reset(sk); 1262 err = sk->sk_prot->hash(sk); 1263 1264 if (likely(!err)) 1265 return 0; 1266 } 1267 1268 inet_sk_set_state(sk, TCP_CLOSE); 1269 return err; 1270 } 1271 EXPORT_SYMBOL_GPL(inet_csk_listen_start); 1272 1273 static void inet_child_forget(struct sock *sk, struct request_sock *req, 1274 struct sock *child) 1275 { 1276 sk->sk_prot->disconnect(child, O_NONBLOCK); 1277 1278 sock_orphan(child); 1279 1280 this_cpu_inc(*sk->sk_prot->orphan_count); 1281 1282 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { 1283 BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req); 1284 BUG_ON(sk != req->rsk_listener); 1285 1286 /* Paranoid, to prevent race condition if 1287 * an inbound pkt destined for child is 1288 * blocked by sock lock in tcp_v4_rcv(). 1289 * Also to satisfy an assertion in 1290 * tcp_v4_destroy_sock(). 1291 */ 1292 RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL); 1293 } 1294 inet_csk_destroy_sock(child); 1295 } 1296 1297 struct sock *inet_csk_reqsk_queue_add(struct sock *sk, 1298 struct request_sock *req, 1299 struct sock *child) 1300 { 1301 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 1302 1303 spin_lock(&queue->rskq_lock); 1304 if (unlikely(sk->sk_state != TCP_LISTEN)) { 1305 inet_child_forget(sk, req, child); 1306 child = NULL; 1307 } else { 1308 req->sk = child; 1309 req->dl_next = NULL; 1310 if (queue->rskq_accept_head == NULL) 1311 WRITE_ONCE(queue->rskq_accept_head, req); 1312 else 1313 queue->rskq_accept_tail->dl_next = req; 1314 queue->rskq_accept_tail = req; 1315 sk_acceptq_added(sk); 1316 } 1317 spin_unlock(&queue->rskq_lock); 1318 return child; 1319 } 1320 EXPORT_SYMBOL(inet_csk_reqsk_queue_add); 1321 1322 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, 1323 struct request_sock *req, bool own_req) 1324 { 1325 if (own_req) { 1326 inet_csk_reqsk_queue_drop(req->rsk_listener, req); 1327 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); 1328 1329 if (sk != req->rsk_listener) { 1330 /* another listening sk has been selected, 1331 * migrate the req to it. 1332 */ 1333 struct request_sock *nreq; 1334 1335 /* hold a refcnt for the nreq->rsk_listener 1336 * which is assigned in inet_reqsk_clone() 1337 */ 1338 sock_hold(sk); 1339 nreq = inet_reqsk_clone(req, sk); 1340 if (!nreq) { 1341 inet_child_forget(sk, req, child); 1342 goto child_put; 1343 } 1344 1345 refcount_set(&nreq->rsk_refcnt, 1); 1346 if (inet_csk_reqsk_queue_add(sk, nreq, child)) { 1347 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS); 1348 reqsk_migrate_reset(req); 1349 reqsk_put(req); 1350 return child; 1351 } 1352 1353 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); 1354 reqsk_migrate_reset(nreq); 1355 __reqsk_free(nreq); 1356 } else if (inet_csk_reqsk_queue_add(sk, req, child)) { 1357 return child; 1358 } 1359 } 1360 /* Too bad, another child took ownership of the request, undo. */ 1361 child_put: 1362 bh_unlock_sock(child); 1363 sock_put(child); 1364 return NULL; 1365 } 1366 EXPORT_SYMBOL(inet_csk_complete_hashdance); 1367 1368 /* 1369 * This routine closes sockets which have been at least partially 1370 * opened, but not yet accepted. 1371 */ 1372 void inet_csk_listen_stop(struct sock *sk) 1373 { 1374 struct inet_connection_sock *icsk = inet_csk(sk); 1375 struct request_sock_queue *queue = &icsk->icsk_accept_queue; 1376 struct request_sock *next, *req; 1377 1378 /* Following specs, it would be better either to send FIN 1379 * (and enter FIN-WAIT-1, it is normal close) 1380 * or to send active reset (abort). 1381 * Certainly, it is pretty dangerous while synflood, but it is 1382 * bad justification for our negligence 8) 1383 * To be honest, we are not able to make either 1384 * of the variants now. --ANK 1385 */ 1386 while ((req = reqsk_queue_remove(queue, sk)) != NULL) { 1387 struct sock *child = req->sk, *nsk; 1388 struct request_sock *nreq; 1389 1390 local_bh_disable(); 1391 bh_lock_sock(child); 1392 WARN_ON(sock_owned_by_user(child)); 1393 sock_hold(child); 1394 1395 nsk = reuseport_migrate_sock(sk, child, NULL); 1396 if (nsk) { 1397 nreq = inet_reqsk_clone(req, nsk); 1398 if (nreq) { 1399 refcount_set(&nreq->rsk_refcnt, 1); 1400 1401 if (inet_csk_reqsk_queue_add(nsk, nreq, child)) { 1402 __NET_INC_STATS(sock_net(nsk), 1403 LINUX_MIB_TCPMIGRATEREQSUCCESS); 1404 reqsk_migrate_reset(req); 1405 } else { 1406 __NET_INC_STATS(sock_net(nsk), 1407 LINUX_MIB_TCPMIGRATEREQFAILURE); 1408 reqsk_migrate_reset(nreq); 1409 __reqsk_free(nreq); 1410 } 1411 1412 /* inet_csk_reqsk_queue_add() has already 1413 * called inet_child_forget() on failure case. 1414 */ 1415 goto skip_child_forget; 1416 } 1417 } 1418 1419 inet_child_forget(sk, req, child); 1420 skip_child_forget: 1421 reqsk_put(req); 1422 bh_unlock_sock(child); 1423 local_bh_enable(); 1424 sock_put(child); 1425 1426 cond_resched(); 1427 } 1428 if (queue->fastopenq.rskq_rst_head) { 1429 /* Free all the reqs queued in rskq_rst_head. */ 1430 spin_lock_bh(&queue->fastopenq.lock); 1431 req = queue->fastopenq.rskq_rst_head; 1432 queue->fastopenq.rskq_rst_head = NULL; 1433 spin_unlock_bh(&queue->fastopenq.lock); 1434 while (req != NULL) { 1435 next = req->dl_next; 1436 reqsk_put(req); 1437 req = next; 1438 } 1439 } 1440 WARN_ON_ONCE(sk->sk_ack_backlog); 1441 } 1442 EXPORT_SYMBOL_GPL(inet_csk_listen_stop); 1443 1444 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) 1445 { 1446 struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; 1447 const struct inet_sock *inet = inet_sk(sk); 1448 1449 sin->sin_family = AF_INET; 1450 sin->sin_addr.s_addr = inet->inet_daddr; 1451 sin->sin_port = inet->inet_dport; 1452 } 1453 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); 1454 1455 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl) 1456 { 1457 const struct inet_sock *inet = inet_sk(sk); 1458 const struct ip_options_rcu *inet_opt; 1459 __be32 daddr = inet->inet_daddr; 1460 struct flowi4 *fl4; 1461 struct rtable *rt; 1462 1463 rcu_read_lock(); 1464 inet_opt = rcu_dereference(inet->inet_opt); 1465 if (inet_opt && inet_opt->opt.srr) 1466 daddr = inet_opt->opt.faddr; 1467 fl4 = &fl->u.ip4; 1468 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, 1469 inet->inet_saddr, inet->inet_dport, 1470 inet->inet_sport, sk->sk_protocol, 1471 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); 1472 if (IS_ERR(rt)) 1473 rt = NULL; 1474 if (rt) 1475 sk_setup_caps(sk, &rt->dst); 1476 rcu_read_unlock(); 1477 1478 return &rt->dst; 1479 } 1480 1481 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu) 1482 { 1483 struct dst_entry *dst = __sk_dst_check(sk, 0); 1484 struct inet_sock *inet = inet_sk(sk); 1485 1486 if (!dst) { 1487 dst = inet_csk_rebuild_route(sk, &inet->cork.fl); 1488 if (!dst) 1489 goto out; 1490 } 1491 dst->ops->update_pmtu(dst, sk, NULL, mtu, true); 1492 1493 dst = __sk_dst_check(sk, 0); 1494 if (!dst) 1495 dst = inet_csk_rebuild_route(sk, &inet->cork.fl); 1496 out: 1497 return dst; 1498 } 1499 EXPORT_SYMBOL_GPL(inet_csk_update_pmtu); 1500