1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Support for INET connection oriented protocols. 8 * 9 * Authors: See the TCP sources 10 */ 11 12 #include <linux/module.h> 13 #include <linux/jhash.h> 14 15 #include <net/inet_connection_sock.h> 16 #include <net/inet_hashtables.h> 17 #include <net/inet_timewait_sock.h> 18 #include <net/ip.h> 19 #include <net/route.h> 20 #include <net/tcp_states.h> 21 #include <net/xfrm.h> 22 #include <net/tcp.h> 23 #include <net/sock_reuseport.h> 24 #include <net/addrconf.h> 25 26 #if IS_ENABLED(CONFIG_IPV6) 27 /* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses 28 * if IPv6 only, and any IPv4 addresses 29 * if not IPv6 only 30 * match_sk*_wildcard == false: addresses must be exactly the same, i.e. 31 * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, 32 * and 0.0.0.0 equals to 0.0.0.0 only 33 */ 34 static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, 35 const struct in6_addr *sk2_rcv_saddr6, 36 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, 37 bool sk1_ipv6only, bool sk2_ipv6only, 38 bool match_sk1_wildcard, 39 bool match_sk2_wildcard) 40 { 41 int addr_type = ipv6_addr_type(sk1_rcv_saddr6); 42 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; 43 44 /* if both are mapped, treat as IPv4 */ 45 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) { 46 if (!sk2_ipv6only) { 47 if (sk1_rcv_saddr == sk2_rcv_saddr) 48 return true; 49 return (match_sk1_wildcard && !sk1_rcv_saddr) || 50 (match_sk2_wildcard && !sk2_rcv_saddr); 51 } 52 return false; 53 } 54 55 if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY) 56 return true; 57 58 if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard && 59 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) 60 return true; 61 62 if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard && 63 !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) 64 return true; 65 66 if (sk2_rcv_saddr6 && 67 ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6)) 68 return true; 69 70 return false; 71 } 72 #endif 73 74 /* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses 75 * match_sk*_wildcard == false: addresses must be exactly the same, i.e. 76 * 0.0.0.0 only equals to 0.0.0.0 77 */ 78 static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, 79 bool sk2_ipv6only, bool match_sk1_wildcard, 80 bool match_sk2_wildcard) 81 { 82 if (!sk2_ipv6only) { 83 if (sk1_rcv_saddr == sk2_rcv_saddr) 84 return true; 85 return (match_sk1_wildcard && !sk1_rcv_saddr) || 86 (match_sk2_wildcard && !sk2_rcv_saddr); 87 } 88 return false; 89 } 90 91 bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, 92 bool match_wildcard) 93 { 94 #if IS_ENABLED(CONFIG_IPV6) 95 if (sk->sk_family == AF_INET6) 96 return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr, 97 inet6_rcv_saddr(sk2), 98 sk->sk_rcv_saddr, 99 sk2->sk_rcv_saddr, 100 ipv6_only_sock(sk), 101 ipv6_only_sock(sk2), 102 match_wildcard, 103 match_wildcard); 104 #endif 105 return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr, 106 ipv6_only_sock(sk2), match_wildcard, 107 match_wildcard); 108 } 109 EXPORT_SYMBOL(inet_rcv_saddr_equal); 110 111 bool inet_rcv_saddr_any(const struct sock *sk) 112 { 113 #if IS_ENABLED(CONFIG_IPV6) 114 if (sk->sk_family == AF_INET6) 115 return ipv6_addr_any(&sk->sk_v6_rcv_saddr); 116 #endif 117 return !sk->sk_rcv_saddr; 118 } 119 120 /** 121 * inet_sk_get_local_port_range - fetch ephemeral ports range 122 * @sk: socket 123 * @low: pointer to low port 124 * @high: pointer to high port 125 * 126 * Fetch netns port range (/proc/sys/net/ipv4/ip_local_port_range) 127 * Range can be overridden if socket got IP_LOCAL_PORT_RANGE option. 128 * Returns true if IP_LOCAL_PORT_RANGE was set on this socket. 129 */ 130 bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high) 131 { 132 int lo, hi, sk_lo, sk_hi; 133 bool local_range = false; 134 u32 sk_range; 135 136 inet_get_local_port_range(sock_net(sk), &lo, &hi); 137 138 sk_range = READ_ONCE(inet_sk(sk)->local_port_range); 139 if (unlikely(sk_range)) { 140 sk_lo = sk_range & 0xffff; 141 sk_hi = sk_range >> 16; 142 143 if (lo <= sk_lo && sk_lo <= hi) 144 lo = sk_lo; 145 if (lo <= sk_hi && sk_hi <= hi) 146 hi = sk_hi; 147 local_range = true; 148 } 149 150 *low = lo; 151 *high = hi; 152 return local_range; 153 } 154 EXPORT_SYMBOL(inet_sk_get_local_port_range); 155 156 static bool inet_use_bhash2_on_bind(const struct sock *sk) 157 { 158 #if IS_ENABLED(CONFIG_IPV6) 159 if (sk->sk_family == AF_INET6) { 160 int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); 161 162 if (addr_type == IPV6_ADDR_ANY) 163 return false; 164 165 if (addr_type != IPV6_ADDR_MAPPED) 166 return true; 167 } 168 #endif 169 return sk->sk_rcv_saddr != htonl(INADDR_ANY); 170 } 171 172 static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2, 173 kuid_t sk_uid, bool relax, 174 bool reuseport_cb_ok, bool reuseport_ok) 175 { 176 int bound_dev_if2; 177 178 if (sk == sk2) 179 return false; 180 181 bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if); 182 183 if (!sk->sk_bound_dev_if || !bound_dev_if2 || 184 sk->sk_bound_dev_if == bound_dev_if2) { 185 if (sk->sk_reuse && sk2->sk_reuse && 186 sk2->sk_state != TCP_LISTEN) { 187 if (!relax || (!reuseport_ok && sk->sk_reuseport && 188 sk2->sk_reuseport && reuseport_cb_ok && 189 (sk2->sk_state == TCP_TIME_WAIT || 190 uid_eq(sk_uid, sock_i_uid(sk2))))) 191 return true; 192 } else if (!reuseport_ok || !sk->sk_reuseport || 193 !sk2->sk_reuseport || !reuseport_cb_ok || 194 (sk2->sk_state != TCP_TIME_WAIT && 195 !uid_eq(sk_uid, sock_i_uid(sk2)))) { 196 return true; 197 } 198 } 199 return false; 200 } 201 202 static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2, 203 kuid_t sk_uid, bool relax, 204 bool reuseport_cb_ok, bool reuseport_ok) 205 { 206 if (ipv6_only_sock(sk2)) { 207 if (sk->sk_family == AF_INET) 208 return false; 209 210 #if IS_ENABLED(CONFIG_IPV6) 211 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) 212 return false; 213 #endif 214 } 215 216 return inet_bind_conflict(sk, sk2, sk_uid, relax, 217 reuseport_cb_ok, reuseport_ok); 218 } 219 220 static bool inet_bhash2_conflict(const struct sock *sk, 221 const struct inet_bind2_bucket *tb2, 222 kuid_t sk_uid, 223 bool relax, bool reuseport_cb_ok, 224 bool reuseport_ok) 225 { 226 struct sock *sk2; 227 228 sk_for_each_bound(sk2, &tb2->owners) { 229 if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax, 230 reuseport_cb_ok, reuseport_ok)) 231 return true; 232 } 233 234 return false; 235 } 236 237 #define sk_for_each_bound_bhash(__sk, __tb2, __tb) \ 238 hlist_for_each_entry(__tb2, &(__tb)->bhash2, bhash_node) \ 239 sk_for_each_bound((__sk), &(__tb2)->owners) 240 241 /* This should be called only when the tb and tb2 hashbuckets' locks are held */ 242 static int inet_csk_bind_conflict(const struct sock *sk, 243 const struct inet_bind_bucket *tb, 244 const struct inet_bind2_bucket *tb2, /* may be null */ 245 bool relax, bool reuseport_ok) 246 { 247 kuid_t uid = sock_i_uid((struct sock *)sk); 248 struct sock_reuseport *reuseport_cb; 249 bool reuseport_cb_ok; 250 struct sock *sk2; 251 252 rcu_read_lock(); 253 reuseport_cb = rcu_dereference(sk->sk_reuseport_cb); 254 /* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */ 255 reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks); 256 rcu_read_unlock(); 257 258 /* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if 259 * ipv4) should have been checked already. We need to do these two 260 * checks separately because their spinlocks have to be acquired/released 261 * independently of each other, to prevent possible deadlocks 262 */ 263 if (inet_use_bhash2_on_bind(sk)) 264 return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, 265 reuseport_cb_ok, reuseport_ok); 266 267 /* Unlike other sk lookup places we do not check 268 * for sk_net here, since _all_ the socks listed 269 * in tb->owners and tb2->owners list belong 270 * to the same net - the one this bucket belongs to. 271 */ 272 sk_for_each_bound_bhash(sk2, tb2, tb) { 273 if (!inet_bind_conflict(sk, sk2, uid, relax, reuseport_cb_ok, reuseport_ok)) 274 continue; 275 276 if (inet_rcv_saddr_equal(sk, sk2, true)) 277 return true; 278 } 279 280 return false; 281 } 282 283 /* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or 284 * INADDR_ANY (if ipv4) socket. 285 * 286 * Caller must hold bhash hashbucket lock with local bh disabled, to protect 287 * against concurrent binds on the port for addr any 288 */ 289 static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev, 290 bool relax, bool reuseport_ok) 291 { 292 kuid_t uid = sock_i_uid((struct sock *)sk); 293 const struct net *net = sock_net(sk); 294 struct sock_reuseport *reuseport_cb; 295 struct inet_bind_hashbucket *head2; 296 struct inet_bind2_bucket *tb2; 297 bool conflict = false; 298 bool reuseport_cb_ok; 299 300 rcu_read_lock(); 301 reuseport_cb = rcu_dereference(sk->sk_reuseport_cb); 302 /* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */ 303 reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks); 304 rcu_read_unlock(); 305 306 head2 = inet_bhash2_addr_any_hashbucket(sk, net, port); 307 308 spin_lock(&head2->lock); 309 310 inet_bind_bucket_for_each(tb2, &head2->chain) { 311 if (!inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk)) 312 continue; 313 314 if (!inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, reuseport_ok)) 315 continue; 316 317 conflict = true; 318 break; 319 } 320 321 spin_unlock(&head2->lock); 322 323 return conflict; 324 } 325 326 /* 327 * Find an open port number for the socket. Returns with the 328 * inet_bind_hashbucket locks held if successful. 329 */ 330 static struct inet_bind_hashbucket * 331 inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret, 332 struct inet_bind2_bucket **tb2_ret, 333 struct inet_bind_hashbucket **head2_ret, int *port_ret) 334 { 335 struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk); 336 int i, low, high, attempt_half, port, l3mdev; 337 struct inet_bind_hashbucket *head, *head2; 338 struct net *net = sock_net(sk); 339 struct inet_bind2_bucket *tb2; 340 struct inet_bind_bucket *tb; 341 u32 remaining, offset; 342 bool relax = false; 343 344 l3mdev = inet_sk_bound_l3mdev(sk); 345 ports_exhausted: 346 attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0; 347 other_half_scan: 348 inet_sk_get_local_port_range(sk, &low, &high); 349 high++; /* [32768, 60999] -> [32768, 61000[ */ 350 if (high - low < 4) 351 attempt_half = 0; 352 if (attempt_half) { 353 int half = low + (((high - low) >> 2) << 1); 354 355 if (attempt_half == 1) 356 high = half; 357 else 358 low = half; 359 } 360 remaining = high - low; 361 if (likely(remaining > 1)) 362 remaining &= ~1U; 363 364 offset = get_random_u32_below(remaining); 365 /* __inet_hash_connect() favors ports having @low parity 366 * We do the opposite to not pollute connect() users. 367 */ 368 offset |= 1U; 369 370 other_parity_scan: 371 port = low + offset; 372 for (i = 0; i < remaining; i += 2, port += 2) { 373 if (unlikely(port >= high)) 374 port -= remaining; 375 if (inet_is_local_reserved_port(net, port)) 376 continue; 377 head = &hinfo->bhash[inet_bhashfn(net, port, 378 hinfo->bhash_size)]; 379 spin_lock_bh(&head->lock); 380 if (inet_use_bhash2_on_bind(sk)) { 381 if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false)) 382 goto next_port; 383 } 384 385 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 386 spin_lock(&head2->lock); 387 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); 388 inet_bind_bucket_for_each(tb, &head->chain) 389 if (inet_bind_bucket_match(tb, net, port, l3mdev)) { 390 if (!inet_csk_bind_conflict(sk, tb, tb2, 391 relax, false)) 392 goto success; 393 spin_unlock(&head2->lock); 394 goto next_port; 395 } 396 tb = NULL; 397 goto success; 398 next_port: 399 spin_unlock_bh(&head->lock); 400 cond_resched(); 401 } 402 403 offset--; 404 if (!(offset & 1)) 405 goto other_parity_scan; 406 407 if (attempt_half == 1) { 408 /* OK we now try the upper half of the range */ 409 attempt_half = 2; 410 goto other_half_scan; 411 } 412 413 if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) { 414 /* We still have a chance to connect to different destinations */ 415 relax = true; 416 goto ports_exhausted; 417 } 418 return NULL; 419 success: 420 *port_ret = port; 421 *tb_ret = tb; 422 *tb2_ret = tb2; 423 *head2_ret = head2; 424 return head; 425 } 426 427 static inline int sk_reuseport_match(struct inet_bind_bucket *tb, 428 struct sock *sk) 429 { 430 kuid_t uid = sock_i_uid(sk); 431 432 if (tb->fastreuseport <= 0) 433 return 0; 434 if (!sk->sk_reuseport) 435 return 0; 436 if (rcu_access_pointer(sk->sk_reuseport_cb)) 437 return 0; 438 if (!uid_eq(tb->fastuid, uid)) 439 return 0; 440 /* We only need to check the rcv_saddr if this tb was once marked 441 * without fastreuseport and then was reset, as we can only know that 442 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the 443 * owners list. 444 */ 445 if (tb->fastreuseport == FASTREUSEPORT_ANY) 446 return 1; 447 #if IS_ENABLED(CONFIG_IPV6) 448 if (tb->fast_sk_family == AF_INET6) 449 return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr, 450 inet6_rcv_saddr(sk), 451 tb->fast_rcv_saddr, 452 sk->sk_rcv_saddr, 453 tb->fast_ipv6_only, 454 ipv6_only_sock(sk), true, false); 455 #endif 456 return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr, 457 ipv6_only_sock(sk), true, false); 458 } 459 460 void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, 461 struct sock *sk) 462 { 463 kuid_t uid = sock_i_uid(sk); 464 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; 465 466 if (hlist_empty(&tb->bhash2)) { 467 tb->fastreuse = reuse; 468 if (sk->sk_reuseport) { 469 tb->fastreuseport = FASTREUSEPORT_ANY; 470 tb->fastuid = uid; 471 tb->fast_rcv_saddr = sk->sk_rcv_saddr; 472 tb->fast_ipv6_only = ipv6_only_sock(sk); 473 tb->fast_sk_family = sk->sk_family; 474 #if IS_ENABLED(CONFIG_IPV6) 475 tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 476 #endif 477 } else { 478 tb->fastreuseport = 0; 479 } 480 } else { 481 if (!reuse) 482 tb->fastreuse = 0; 483 if (sk->sk_reuseport) { 484 /* We didn't match or we don't have fastreuseport set on 485 * the tb, but we have sk_reuseport set on this socket 486 * and we know that there are no bind conflicts with 487 * this socket in this tb, so reset our tb's reuseport 488 * settings so that any subsequent sockets that match 489 * our current socket will be put on the fast path. 490 * 491 * If we reset we need to set FASTREUSEPORT_STRICT so we 492 * do extra checking for all subsequent sk_reuseport 493 * socks. 494 */ 495 if (!sk_reuseport_match(tb, sk)) { 496 tb->fastreuseport = FASTREUSEPORT_STRICT; 497 tb->fastuid = uid; 498 tb->fast_rcv_saddr = sk->sk_rcv_saddr; 499 tb->fast_ipv6_only = ipv6_only_sock(sk); 500 tb->fast_sk_family = sk->sk_family; 501 #if IS_ENABLED(CONFIG_IPV6) 502 tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 503 #endif 504 } 505 } else { 506 tb->fastreuseport = 0; 507 } 508 } 509 } 510 511 /* Obtain a reference to a local port for the given sock, 512 * if snum is zero it means select any available local port. 513 * We try to allocate an odd port (and leave even ports for connect()) 514 */ 515 int inet_csk_get_port(struct sock *sk, unsigned short snum) 516 { 517 struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk); 518 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; 519 bool found_port = false, check_bind_conflict = true; 520 bool bhash_created = false, bhash2_created = false; 521 int ret = -EADDRINUSE, port = snum, l3mdev; 522 struct inet_bind_hashbucket *head, *head2; 523 struct inet_bind2_bucket *tb2 = NULL; 524 struct inet_bind_bucket *tb = NULL; 525 bool head2_lock_acquired = false; 526 struct net *net = sock_net(sk); 527 528 l3mdev = inet_sk_bound_l3mdev(sk); 529 530 if (!port) { 531 head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port); 532 if (!head) 533 return ret; 534 535 head2_lock_acquired = true; 536 537 if (tb && tb2) 538 goto success; 539 found_port = true; 540 } else { 541 head = &hinfo->bhash[inet_bhashfn(net, port, 542 hinfo->bhash_size)]; 543 spin_lock_bh(&head->lock); 544 inet_bind_bucket_for_each(tb, &head->chain) 545 if (inet_bind_bucket_match(tb, net, port, l3mdev)) 546 break; 547 } 548 549 if (!tb) { 550 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net, 551 head, port, l3mdev); 552 if (!tb) 553 goto fail_unlock; 554 bhash_created = true; 555 } 556 557 if (!found_port) { 558 if (!hlist_empty(&tb->bhash2)) { 559 if (sk->sk_reuse == SK_FORCE_REUSE || 560 (tb->fastreuse > 0 && reuse) || 561 sk_reuseport_match(tb, sk)) 562 check_bind_conflict = false; 563 } 564 565 if (check_bind_conflict && inet_use_bhash2_on_bind(sk)) { 566 if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true)) 567 goto fail_unlock; 568 } 569 570 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 571 spin_lock(&head2->lock); 572 head2_lock_acquired = true; 573 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); 574 } 575 576 if (!tb2) { 577 tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, 578 net, head2, tb, sk); 579 if (!tb2) 580 goto fail_unlock; 581 bhash2_created = true; 582 } 583 584 if (!found_port && check_bind_conflict) { 585 if (inet_csk_bind_conflict(sk, tb, tb2, true, true)) 586 goto fail_unlock; 587 } 588 589 success: 590 inet_csk_update_fastreuse(tb, sk); 591 592 if (!inet_csk(sk)->icsk_bind_hash) 593 inet_bind_hash(sk, tb, tb2, port); 594 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); 595 WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2); 596 ret = 0; 597 598 fail_unlock: 599 if (ret) { 600 if (bhash2_created) 601 inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, tb2); 602 if (bhash_created) 603 inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb); 604 } 605 if (head2_lock_acquired) 606 spin_unlock(&head2->lock); 607 spin_unlock_bh(&head->lock); 608 return ret; 609 } 610 EXPORT_SYMBOL_GPL(inet_csk_get_port); 611 612 /* 613 * Wait for an incoming connection, avoid race conditions. This must be called 614 * with the socket locked. 615 */ 616 static int inet_csk_wait_for_connect(struct sock *sk, long timeo) 617 { 618 struct inet_connection_sock *icsk = inet_csk(sk); 619 DEFINE_WAIT(wait); 620 int err; 621 622 /* 623 * True wake-one mechanism for incoming connections: only 624 * one process gets woken up, not the 'whole herd'. 625 * Since we do not 'race & poll' for established sockets 626 * anymore, the common case will execute the loop only once. 627 * 628 * Subtle issue: "add_wait_queue_exclusive()" will be added 629 * after any current non-exclusive waiters, and we know that 630 * it will always _stay_ after any new non-exclusive waiters 631 * because all non-exclusive waiters are added at the 632 * beginning of the wait-queue. As such, it's ok to "drop" 633 * our exclusiveness temporarily when we get woken up without 634 * having to remove and re-insert us on the wait queue. 635 */ 636 for (;;) { 637 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 638 TASK_INTERRUPTIBLE); 639 release_sock(sk); 640 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) 641 timeo = schedule_timeout(timeo); 642 sched_annotate_sleep(); 643 lock_sock(sk); 644 err = 0; 645 if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) 646 break; 647 err = -EINVAL; 648 if (sk->sk_state != TCP_LISTEN) 649 break; 650 err = sock_intr_errno(timeo); 651 if (signal_pending(current)) 652 break; 653 err = -EAGAIN; 654 if (!timeo) 655 break; 656 } 657 finish_wait(sk_sleep(sk), &wait); 658 return err; 659 } 660 661 /* 662 * This will accept the next outstanding connection. 663 */ 664 struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg) 665 { 666 struct inet_connection_sock *icsk = inet_csk(sk); 667 struct request_sock_queue *queue = &icsk->icsk_accept_queue; 668 struct request_sock *req; 669 struct sock *newsk; 670 int error; 671 672 lock_sock(sk); 673 674 /* We need to make sure that this socket is listening, 675 * and that it has something pending. 676 */ 677 error = -EINVAL; 678 if (sk->sk_state != TCP_LISTEN) 679 goto out_err; 680 681 /* Find already established connection */ 682 if (reqsk_queue_empty(queue)) { 683 long timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK); 684 685 /* If this is a non blocking socket don't sleep */ 686 error = -EAGAIN; 687 if (!timeo) 688 goto out_err; 689 690 error = inet_csk_wait_for_connect(sk, timeo); 691 if (error) 692 goto out_err; 693 } 694 req = reqsk_queue_remove(queue, sk); 695 arg->is_empty = reqsk_queue_empty(queue); 696 newsk = req->sk; 697 698 if (sk->sk_protocol == IPPROTO_TCP && 699 tcp_rsk(req)->tfo_listener) { 700 spin_lock_bh(&queue->fastopenq.lock); 701 if (tcp_rsk(req)->tfo_listener) { 702 /* We are still waiting for the final ACK from 3WHS 703 * so can't free req now. Instead, we set req->sk to 704 * NULL to signify that the child socket is taken 705 * so reqsk_fastopen_remove() will free the req 706 * when 3WHS finishes (or is aborted). 707 */ 708 req->sk = NULL; 709 req = NULL; 710 } 711 spin_unlock_bh(&queue->fastopenq.lock); 712 } 713 714 out: 715 release_sock(sk); 716 if (newsk && mem_cgroup_sockets_enabled) { 717 gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL; 718 int amt = 0; 719 720 /* atomically get the memory usage, set and charge the 721 * newsk->sk_memcg. 722 */ 723 lock_sock(newsk); 724 725 mem_cgroup_sk_alloc(newsk); 726 if (newsk->sk_memcg) { 727 /* The socket has not been accepted yet, no need 728 * to look at newsk->sk_wmem_queued. 729 */ 730 amt = sk_mem_pages(newsk->sk_forward_alloc + 731 atomic_read(&newsk->sk_rmem_alloc)); 732 } 733 734 if (amt) 735 mem_cgroup_charge_skmem(newsk->sk_memcg, amt, gfp); 736 kmem_cache_charge(newsk, gfp); 737 738 release_sock(newsk); 739 } 740 if (req) 741 reqsk_put(req); 742 743 if (newsk) 744 inet_init_csk_locks(newsk); 745 746 return newsk; 747 out_err: 748 newsk = NULL; 749 req = NULL; 750 arg->err = error; 751 goto out; 752 } 753 EXPORT_SYMBOL(inet_csk_accept); 754 755 /* 756 * Using different timers for retransmit, delayed acks and probes 757 * We may wish use just one timer maintaining a list of expire jiffies 758 * to optimize. 759 */ 760 void inet_csk_init_xmit_timers(struct sock *sk, 761 void (*retransmit_handler)(struct timer_list *t), 762 void (*delack_handler)(struct timer_list *t), 763 void (*keepalive_handler)(struct timer_list *t)) 764 { 765 struct inet_connection_sock *icsk = inet_csk(sk); 766 767 timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0); 768 timer_setup(&icsk->icsk_delack_timer, delack_handler, 0); 769 timer_setup(&sk->sk_timer, keepalive_handler, 0); 770 icsk->icsk_pending = icsk->icsk_ack.pending = 0; 771 } 772 EXPORT_SYMBOL(inet_csk_init_xmit_timers); 773 774 void inet_csk_clear_xmit_timers(struct sock *sk) 775 { 776 struct inet_connection_sock *icsk = inet_csk(sk); 777 778 icsk->icsk_pending = icsk->icsk_ack.pending = 0; 779 780 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); 781 sk_stop_timer(sk, &icsk->icsk_delack_timer); 782 sk_stop_timer(sk, &sk->sk_timer); 783 } 784 EXPORT_SYMBOL(inet_csk_clear_xmit_timers); 785 786 void inet_csk_clear_xmit_timers_sync(struct sock *sk) 787 { 788 struct inet_connection_sock *icsk = inet_csk(sk); 789 790 /* ongoing timer handlers need to acquire socket lock. */ 791 sock_not_owned_by_me(sk); 792 793 icsk->icsk_pending = icsk->icsk_ack.pending = 0; 794 795 sk_stop_timer_sync(sk, &icsk->icsk_retransmit_timer); 796 sk_stop_timer_sync(sk, &icsk->icsk_delack_timer); 797 sk_stop_timer_sync(sk, &sk->sk_timer); 798 } 799 800 void inet_csk_delete_keepalive_timer(struct sock *sk) 801 { 802 sk_stop_timer(sk, &sk->sk_timer); 803 } 804 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); 805 806 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) 807 { 808 sk_reset_timer(sk, &sk->sk_timer, jiffies + len); 809 } 810 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); 811 812 struct dst_entry *inet_csk_route_req(const struct sock *sk, 813 struct flowi4 *fl4, 814 const struct request_sock *req) 815 { 816 const struct inet_request_sock *ireq = inet_rsk(req); 817 struct net *net = read_pnet(&ireq->ireq_net); 818 struct ip_options_rcu *opt; 819 struct rtable *rt; 820 821 rcu_read_lock(); 822 opt = rcu_dereference(ireq->ireq_opt); 823 824 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 825 ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), 826 sk->sk_protocol, inet_sk_flowi_flags(sk), 827 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 828 ireq->ir_loc_addr, ireq->ir_rmt_port, 829 htons(ireq->ir_num), sk->sk_uid); 830 security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); 831 rt = ip_route_output_flow(net, fl4, sk); 832 if (IS_ERR(rt)) 833 goto no_route; 834 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) 835 goto route_err; 836 rcu_read_unlock(); 837 return &rt->dst; 838 839 route_err: 840 ip_rt_put(rt); 841 no_route: 842 rcu_read_unlock(); 843 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 844 return NULL; 845 } 846 EXPORT_SYMBOL_GPL(inet_csk_route_req); 847 848 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, 849 struct sock *newsk, 850 const struct request_sock *req) 851 { 852 const struct inet_request_sock *ireq = inet_rsk(req); 853 struct net *net = read_pnet(&ireq->ireq_net); 854 struct inet_sock *newinet = inet_sk(newsk); 855 struct ip_options_rcu *opt; 856 struct flowi4 *fl4; 857 struct rtable *rt; 858 859 opt = rcu_dereference(ireq->ireq_opt); 860 fl4 = &newinet->cork.fl.u.ip4; 861 862 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 863 ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), 864 sk->sk_protocol, inet_sk_flowi_flags(sk), 865 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 866 ireq->ir_loc_addr, ireq->ir_rmt_port, 867 htons(ireq->ir_num), sk->sk_uid); 868 security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); 869 rt = ip_route_output_flow(net, fl4, sk); 870 if (IS_ERR(rt)) 871 goto no_route; 872 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) 873 goto route_err; 874 return &rt->dst; 875 876 route_err: 877 ip_rt_put(rt); 878 no_route: 879 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 880 return NULL; 881 } 882 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock); 883 884 /* Decide when to expire the request and when to resend SYN-ACK */ 885 static void syn_ack_recalc(struct request_sock *req, 886 const int max_syn_ack_retries, 887 const u8 rskq_defer_accept, 888 int *expire, int *resend) 889 { 890 if (!rskq_defer_accept) { 891 *expire = req->num_timeout >= max_syn_ack_retries; 892 *resend = 1; 893 return; 894 } 895 *expire = req->num_timeout >= max_syn_ack_retries && 896 (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept); 897 /* Do not resend while waiting for data after ACK, 898 * start to resend on end of deferring period to give 899 * last chance for data or ACK to create established socket. 900 */ 901 *resend = !inet_rsk(req)->acked || 902 req->num_timeout >= rskq_defer_accept - 1; 903 } 904 905 int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req) 906 { 907 int err = req->rsk_ops->rtx_syn_ack(parent, req); 908 909 if (!err) 910 req->num_retrans++; 911 return err; 912 } 913 EXPORT_SYMBOL(inet_rtx_syn_ack); 914 915 static struct request_sock * 916 reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener, 917 bool attach_listener) 918 { 919 struct request_sock *req; 920 921 req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN); 922 if (!req) 923 return NULL; 924 req->rsk_listener = NULL; 925 if (attach_listener) { 926 if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) { 927 kmem_cache_free(ops->slab, req); 928 return NULL; 929 } 930 req->rsk_listener = sk_listener; 931 } 932 req->rsk_ops = ops; 933 req_to_sk(req)->sk_prot = sk_listener->sk_prot; 934 sk_node_init(&req_to_sk(req)->sk_node); 935 sk_tx_queue_clear(req_to_sk(req)); 936 req->saved_syn = NULL; 937 req->syncookie = 0; 938 req->timeout = 0; 939 req->num_timeout = 0; 940 req->num_retrans = 0; 941 req->sk = NULL; 942 refcount_set(&req->rsk_refcnt, 0); 943 944 return req; 945 } 946 #define reqsk_alloc(...) alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__)) 947 948 struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, 949 struct sock *sk_listener, 950 bool attach_listener) 951 { 952 struct request_sock *req = reqsk_alloc(ops, sk_listener, 953 attach_listener); 954 955 if (req) { 956 struct inet_request_sock *ireq = inet_rsk(req); 957 958 ireq->ireq_opt = NULL; 959 #if IS_ENABLED(CONFIG_IPV6) 960 ireq->pktopts = NULL; 961 #endif 962 atomic64_set(&ireq->ir_cookie, 0); 963 ireq->ireq_state = TCP_NEW_SYN_RECV; 964 write_pnet(&ireq->ireq_net, sock_net(sk_listener)); 965 ireq->ireq_family = sk_listener->sk_family; 966 req->timeout = TCP_TIMEOUT_INIT; 967 } 968 969 return req; 970 } 971 EXPORT_SYMBOL(inet_reqsk_alloc); 972 973 static struct request_sock *inet_reqsk_clone(struct request_sock *req, 974 struct sock *sk) 975 { 976 struct sock *req_sk, *nreq_sk; 977 struct request_sock *nreq; 978 979 nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN); 980 if (!nreq) { 981 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); 982 983 /* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */ 984 sock_put(sk); 985 return NULL; 986 } 987 988 req_sk = req_to_sk(req); 989 nreq_sk = req_to_sk(nreq); 990 991 memcpy(nreq_sk, req_sk, 992 offsetof(struct sock, sk_dontcopy_begin)); 993 unsafe_memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end, 994 req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end), 995 /* alloc is larger than struct, see above */); 996 997 sk_node_init(&nreq_sk->sk_node); 998 nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping; 999 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING 1000 nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping; 1001 #endif 1002 nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu; 1003 1004 nreq->rsk_listener = sk; 1005 1006 /* We need not acquire fastopenq->lock 1007 * because the child socket is locked in inet_csk_listen_stop(). 1008 */ 1009 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener) 1010 rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq); 1011 1012 return nreq; 1013 } 1014 1015 static void reqsk_queue_migrated(struct request_sock_queue *queue, 1016 const struct request_sock *req) 1017 { 1018 if (req->num_timeout == 0) 1019 atomic_inc(&queue->young); 1020 atomic_inc(&queue->qlen); 1021 } 1022 1023 static void reqsk_migrate_reset(struct request_sock *req) 1024 { 1025 req->saved_syn = NULL; 1026 #if IS_ENABLED(CONFIG_IPV6) 1027 inet_rsk(req)->ipv6_opt = NULL; 1028 inet_rsk(req)->pktopts = NULL; 1029 #else 1030 inet_rsk(req)->ireq_opt = NULL; 1031 #endif 1032 } 1033 1034 /* return true if req was found in the ehash table */ 1035 static bool reqsk_queue_unlink(struct request_sock *req) 1036 { 1037 struct sock *sk = req_to_sk(req); 1038 bool found = false; 1039 1040 if (sk_hashed(sk)) { 1041 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); 1042 spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash); 1043 1044 spin_lock(lock); 1045 found = __sk_nulls_del_node_init_rcu(sk); 1046 spin_unlock(lock); 1047 } 1048 if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer)) 1049 reqsk_put(req); 1050 return found; 1051 } 1052 1053 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) 1054 { 1055 bool unlinked = reqsk_queue_unlink(req); 1056 1057 if (unlinked) { 1058 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); 1059 reqsk_put(req); 1060 } 1061 return unlinked; 1062 } 1063 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop); 1064 1065 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req) 1066 { 1067 inet_csk_reqsk_queue_drop(sk, req); 1068 reqsk_put(req); 1069 } 1070 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put); 1071 1072 static void reqsk_timer_handler(struct timer_list *t) 1073 { 1074 struct request_sock *req = from_timer(req, t, rsk_timer); 1075 struct request_sock *nreq = NULL, *oreq = req; 1076 struct sock *sk_listener = req->rsk_listener; 1077 struct inet_connection_sock *icsk; 1078 struct request_sock_queue *queue; 1079 struct net *net; 1080 int max_syn_ack_retries, qlen, expire = 0, resend = 0; 1081 1082 if (inet_sk_state_load(sk_listener) != TCP_LISTEN) { 1083 struct sock *nsk; 1084 1085 nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL); 1086 if (!nsk) 1087 goto drop; 1088 1089 nreq = inet_reqsk_clone(req, nsk); 1090 if (!nreq) 1091 goto drop; 1092 1093 /* The new timer for the cloned req can decrease the 2 1094 * by calling inet_csk_reqsk_queue_drop_and_put(), so 1095 * hold another count to prevent use-after-free and 1096 * call reqsk_put() just before return. 1097 */ 1098 refcount_set(&nreq->rsk_refcnt, 2 + 1); 1099 timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED); 1100 reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req); 1101 1102 req = nreq; 1103 sk_listener = nsk; 1104 } 1105 1106 icsk = inet_csk(sk_listener); 1107 net = sock_net(sk_listener); 1108 max_syn_ack_retries = READ_ONCE(icsk->icsk_syn_retries) ? : 1109 READ_ONCE(net->ipv4.sysctl_tcp_synack_retries); 1110 /* Normally all the openreqs are young and become mature 1111 * (i.e. converted to established socket) for first timeout. 1112 * If synack was not acknowledged for 1 second, it means 1113 * one of the following things: synack was lost, ack was lost, 1114 * rtt is high or nobody planned to ack (i.e. synflood). 1115 * When server is a bit loaded, queue is populated with old 1116 * open requests, reducing effective size of queue. 1117 * When server is well loaded, queue size reduces to zero 1118 * after several minutes of work. It is not synflood, 1119 * it is normal operation. The solution is pruning 1120 * too old entries overriding normal timeout, when 1121 * situation becomes dangerous. 1122 * 1123 * Essentially, we reserve half of room for young 1124 * embrions; and abort old ones without pity, if old 1125 * ones are about to clog our table. 1126 */ 1127 queue = &icsk->icsk_accept_queue; 1128 qlen = reqsk_queue_len(queue); 1129 if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) { 1130 int young = reqsk_queue_len_young(queue) << 1; 1131 1132 while (max_syn_ack_retries > 2) { 1133 if (qlen < young) 1134 break; 1135 max_syn_ack_retries--; 1136 young <<= 1; 1137 } 1138 } 1139 syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept), 1140 &expire, &resend); 1141 req->rsk_ops->syn_ack_timeout(req); 1142 if (!expire && 1143 (!resend || 1144 !inet_rtx_syn_ack(sk_listener, req) || 1145 inet_rsk(req)->acked)) { 1146 if (req->num_timeout++ == 0) 1147 atomic_dec(&queue->young); 1148 mod_timer(&req->rsk_timer, jiffies + reqsk_timeout(req, TCP_RTO_MAX)); 1149 1150 if (!nreq) 1151 return; 1152 1153 if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) { 1154 /* delete timer */ 1155 inet_csk_reqsk_queue_drop(sk_listener, nreq); 1156 goto no_ownership; 1157 } 1158 1159 __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS); 1160 reqsk_migrate_reset(oreq); 1161 reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq); 1162 reqsk_put(oreq); 1163 1164 reqsk_put(nreq); 1165 return; 1166 } 1167 1168 /* Even if we can clone the req, we may need not retransmit any more 1169 * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another 1170 * CPU may win the "own_req" race so that inet_ehash_insert() fails. 1171 */ 1172 if (nreq) { 1173 __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE); 1174 no_ownership: 1175 reqsk_migrate_reset(nreq); 1176 reqsk_queue_removed(queue, nreq); 1177 __reqsk_free(nreq); 1178 } 1179 1180 drop: 1181 inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq); 1182 } 1183 1184 static bool reqsk_queue_hash_req(struct request_sock *req, 1185 unsigned long timeout) 1186 { 1187 bool found_dup_sk = false; 1188 1189 if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk)) 1190 return false; 1191 1192 /* The timer needs to be setup after a successful insertion. */ 1193 timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED); 1194 mod_timer(&req->rsk_timer, jiffies + timeout); 1195 1196 /* before letting lookups find us, make sure all req fields 1197 * are committed to memory and refcnt initialized. 1198 */ 1199 smp_wmb(); 1200 refcount_set(&req->rsk_refcnt, 2 + 1); 1201 return true; 1202 } 1203 1204 bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, 1205 unsigned long timeout) 1206 { 1207 if (!reqsk_queue_hash_req(req, timeout)) 1208 return false; 1209 1210 inet_csk_reqsk_queue_added(sk); 1211 return true; 1212 } 1213 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); 1214 1215 static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk, 1216 const gfp_t priority) 1217 { 1218 struct inet_connection_sock *icsk = inet_csk(newsk); 1219 1220 if (!icsk->icsk_ulp_ops) 1221 return; 1222 1223 icsk->icsk_ulp_ops->clone(req, newsk, priority); 1224 } 1225 1226 /** 1227 * inet_csk_clone_lock - clone an inet socket, and lock its clone 1228 * @sk: the socket to clone 1229 * @req: request_sock 1230 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1231 * 1232 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 1233 */ 1234 struct sock *inet_csk_clone_lock(const struct sock *sk, 1235 const struct request_sock *req, 1236 const gfp_t priority) 1237 { 1238 struct sock *newsk = sk_clone_lock(sk, priority); 1239 1240 if (newsk) { 1241 struct inet_connection_sock *newicsk = inet_csk(newsk); 1242 1243 inet_sk_set_state(newsk, TCP_SYN_RECV); 1244 newicsk->icsk_bind_hash = NULL; 1245 newicsk->icsk_bind2_hash = NULL; 1246 1247 inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port; 1248 inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num; 1249 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num); 1250 1251 /* listeners have SOCK_RCU_FREE, not the children */ 1252 sock_reset_flag(newsk, SOCK_RCU_FREE); 1253 1254 inet_sk(newsk)->mc_list = NULL; 1255 1256 newsk->sk_mark = inet_rsk(req)->ir_mark; 1257 atomic64_set(&newsk->sk_cookie, 1258 atomic64_read(&inet_rsk(req)->ir_cookie)); 1259 1260 newicsk->icsk_retransmits = 0; 1261 newicsk->icsk_backoff = 0; 1262 newicsk->icsk_probes_out = 0; 1263 newicsk->icsk_probes_tstamp = 0; 1264 1265 /* Deinitialize accept_queue to trap illegal accesses. */ 1266 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); 1267 1268 inet_clone_ulp(req, newsk, priority); 1269 1270 security_inet_csk_clone(newsk, req); 1271 } 1272 return newsk; 1273 } 1274 EXPORT_SYMBOL_GPL(inet_csk_clone_lock); 1275 1276 /* 1277 * At this point, there should be no process reference to this 1278 * socket, and thus no user references at all. Therefore we 1279 * can assume the socket waitqueue is inactive and nobody will 1280 * try to jump onto it. 1281 */ 1282 void inet_csk_destroy_sock(struct sock *sk) 1283 { 1284 WARN_ON(sk->sk_state != TCP_CLOSE); 1285 WARN_ON(!sock_flag(sk, SOCK_DEAD)); 1286 1287 /* It cannot be in hash table! */ 1288 WARN_ON(!sk_unhashed(sk)); 1289 1290 /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */ 1291 WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); 1292 1293 sk->sk_prot->destroy(sk); 1294 1295 sk_stream_kill_queues(sk); 1296 1297 xfrm_sk_free_policy(sk); 1298 1299 this_cpu_dec(*sk->sk_prot->orphan_count); 1300 1301 sock_put(sk); 1302 } 1303 EXPORT_SYMBOL(inet_csk_destroy_sock); 1304 1305 /* This function allows to force a closure of a socket after the call to 1306 * tcp/dccp_create_openreq_child(). 1307 */ 1308 void inet_csk_prepare_forced_close(struct sock *sk) 1309 __releases(&sk->sk_lock.slock) 1310 { 1311 /* sk_clone_lock locked the socket and set refcnt to 2 */ 1312 bh_unlock_sock(sk); 1313 sock_put(sk); 1314 inet_csk_prepare_for_destroy_sock(sk); 1315 inet_sk(sk)->inet_num = 0; 1316 } 1317 EXPORT_SYMBOL(inet_csk_prepare_forced_close); 1318 1319 static int inet_ulp_can_listen(const struct sock *sk) 1320 { 1321 const struct inet_connection_sock *icsk = inet_csk(sk); 1322 1323 if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone) 1324 return -EINVAL; 1325 1326 return 0; 1327 } 1328 1329 int inet_csk_listen_start(struct sock *sk) 1330 { 1331 struct inet_connection_sock *icsk = inet_csk(sk); 1332 struct inet_sock *inet = inet_sk(sk); 1333 int err; 1334 1335 err = inet_ulp_can_listen(sk); 1336 if (unlikely(err)) 1337 return err; 1338 1339 reqsk_queue_alloc(&icsk->icsk_accept_queue); 1340 1341 sk->sk_ack_backlog = 0; 1342 inet_csk_delack_init(sk); 1343 1344 /* There is race window here: we announce ourselves listening, 1345 * but this transition is still not validated by get_port(). 1346 * It is OK, because this socket enters to hash table only 1347 * after validation is complete. 1348 */ 1349 inet_sk_state_store(sk, TCP_LISTEN); 1350 err = sk->sk_prot->get_port(sk, inet->inet_num); 1351 if (!err) { 1352 inet->inet_sport = htons(inet->inet_num); 1353 1354 sk_dst_reset(sk); 1355 err = sk->sk_prot->hash(sk); 1356 1357 if (likely(!err)) 1358 return 0; 1359 } 1360 1361 inet_sk_set_state(sk, TCP_CLOSE); 1362 return err; 1363 } 1364 EXPORT_SYMBOL_GPL(inet_csk_listen_start); 1365 1366 static void inet_child_forget(struct sock *sk, struct request_sock *req, 1367 struct sock *child) 1368 { 1369 sk->sk_prot->disconnect(child, O_NONBLOCK); 1370 1371 sock_orphan(child); 1372 1373 this_cpu_inc(*sk->sk_prot->orphan_count); 1374 1375 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { 1376 BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req); 1377 BUG_ON(sk != req->rsk_listener); 1378 1379 /* Paranoid, to prevent race condition if 1380 * an inbound pkt destined for child is 1381 * blocked by sock lock in tcp_v4_rcv(). 1382 * Also to satisfy an assertion in 1383 * tcp_v4_destroy_sock(). 1384 */ 1385 RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL); 1386 } 1387 inet_csk_destroy_sock(child); 1388 } 1389 1390 struct sock *inet_csk_reqsk_queue_add(struct sock *sk, 1391 struct request_sock *req, 1392 struct sock *child) 1393 { 1394 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 1395 1396 spin_lock(&queue->rskq_lock); 1397 if (unlikely(sk->sk_state != TCP_LISTEN)) { 1398 inet_child_forget(sk, req, child); 1399 child = NULL; 1400 } else { 1401 req->sk = child; 1402 req->dl_next = NULL; 1403 if (queue->rskq_accept_head == NULL) 1404 WRITE_ONCE(queue->rskq_accept_head, req); 1405 else 1406 queue->rskq_accept_tail->dl_next = req; 1407 queue->rskq_accept_tail = req; 1408 sk_acceptq_added(sk); 1409 } 1410 spin_unlock(&queue->rskq_lock); 1411 return child; 1412 } 1413 EXPORT_SYMBOL(inet_csk_reqsk_queue_add); 1414 1415 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, 1416 struct request_sock *req, bool own_req) 1417 { 1418 if (own_req) { 1419 inet_csk_reqsk_queue_drop(req->rsk_listener, req); 1420 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); 1421 1422 if (sk != req->rsk_listener) { 1423 /* another listening sk has been selected, 1424 * migrate the req to it. 1425 */ 1426 struct request_sock *nreq; 1427 1428 /* hold a refcnt for the nreq->rsk_listener 1429 * which is assigned in inet_reqsk_clone() 1430 */ 1431 sock_hold(sk); 1432 nreq = inet_reqsk_clone(req, sk); 1433 if (!nreq) { 1434 inet_child_forget(sk, req, child); 1435 goto child_put; 1436 } 1437 1438 refcount_set(&nreq->rsk_refcnt, 1); 1439 if (inet_csk_reqsk_queue_add(sk, nreq, child)) { 1440 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS); 1441 reqsk_migrate_reset(req); 1442 reqsk_put(req); 1443 return child; 1444 } 1445 1446 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); 1447 reqsk_migrate_reset(nreq); 1448 __reqsk_free(nreq); 1449 } else if (inet_csk_reqsk_queue_add(sk, req, child)) { 1450 return child; 1451 } 1452 } 1453 /* Too bad, another child took ownership of the request, undo. */ 1454 child_put: 1455 bh_unlock_sock(child); 1456 sock_put(child); 1457 return NULL; 1458 } 1459 EXPORT_SYMBOL(inet_csk_complete_hashdance); 1460 1461 /* 1462 * This routine closes sockets which have been at least partially 1463 * opened, but not yet accepted. 1464 */ 1465 void inet_csk_listen_stop(struct sock *sk) 1466 { 1467 struct inet_connection_sock *icsk = inet_csk(sk); 1468 struct request_sock_queue *queue = &icsk->icsk_accept_queue; 1469 struct request_sock *next, *req; 1470 1471 /* Following specs, it would be better either to send FIN 1472 * (and enter FIN-WAIT-1, it is normal close) 1473 * or to send active reset (abort). 1474 * Certainly, it is pretty dangerous while synflood, but it is 1475 * bad justification for our negligence 8) 1476 * To be honest, we are not able to make either 1477 * of the variants now. --ANK 1478 */ 1479 while ((req = reqsk_queue_remove(queue, sk)) != NULL) { 1480 struct sock *child = req->sk, *nsk; 1481 struct request_sock *nreq; 1482 1483 local_bh_disable(); 1484 bh_lock_sock(child); 1485 WARN_ON(sock_owned_by_user(child)); 1486 sock_hold(child); 1487 1488 nsk = reuseport_migrate_sock(sk, child, NULL); 1489 if (nsk) { 1490 nreq = inet_reqsk_clone(req, nsk); 1491 if (nreq) { 1492 refcount_set(&nreq->rsk_refcnt, 1); 1493 1494 if (inet_csk_reqsk_queue_add(nsk, nreq, child)) { 1495 __NET_INC_STATS(sock_net(nsk), 1496 LINUX_MIB_TCPMIGRATEREQSUCCESS); 1497 reqsk_migrate_reset(req); 1498 } else { 1499 __NET_INC_STATS(sock_net(nsk), 1500 LINUX_MIB_TCPMIGRATEREQFAILURE); 1501 reqsk_migrate_reset(nreq); 1502 __reqsk_free(nreq); 1503 } 1504 1505 /* inet_csk_reqsk_queue_add() has already 1506 * called inet_child_forget() on failure case. 1507 */ 1508 goto skip_child_forget; 1509 } 1510 } 1511 1512 inet_child_forget(sk, req, child); 1513 skip_child_forget: 1514 reqsk_put(req); 1515 bh_unlock_sock(child); 1516 local_bh_enable(); 1517 sock_put(child); 1518 1519 cond_resched(); 1520 } 1521 if (queue->fastopenq.rskq_rst_head) { 1522 /* Free all the reqs queued in rskq_rst_head. */ 1523 spin_lock_bh(&queue->fastopenq.lock); 1524 req = queue->fastopenq.rskq_rst_head; 1525 queue->fastopenq.rskq_rst_head = NULL; 1526 spin_unlock_bh(&queue->fastopenq.lock); 1527 while (req != NULL) { 1528 next = req->dl_next; 1529 reqsk_put(req); 1530 req = next; 1531 } 1532 } 1533 WARN_ON_ONCE(sk->sk_ack_backlog); 1534 } 1535 EXPORT_SYMBOL_GPL(inet_csk_listen_stop); 1536 1537 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) 1538 { 1539 struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; 1540 const struct inet_sock *inet = inet_sk(sk); 1541 1542 sin->sin_family = AF_INET; 1543 sin->sin_addr.s_addr = inet->inet_daddr; 1544 sin->sin_port = inet->inet_dport; 1545 } 1546 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); 1547 1548 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl) 1549 { 1550 const struct inet_sock *inet = inet_sk(sk); 1551 const struct ip_options_rcu *inet_opt; 1552 __be32 daddr = inet->inet_daddr; 1553 struct flowi4 *fl4; 1554 struct rtable *rt; 1555 1556 rcu_read_lock(); 1557 inet_opt = rcu_dereference(inet->inet_opt); 1558 if (inet_opt && inet_opt->opt.srr) 1559 daddr = inet_opt->opt.faddr; 1560 fl4 = &fl->u.ip4; 1561 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, 1562 inet->inet_saddr, inet->inet_dport, 1563 inet->inet_sport, sk->sk_protocol, 1564 ip_sock_rt_tos(sk), sk->sk_bound_dev_if); 1565 if (IS_ERR(rt)) 1566 rt = NULL; 1567 if (rt) 1568 sk_setup_caps(sk, &rt->dst); 1569 rcu_read_unlock(); 1570 1571 return &rt->dst; 1572 } 1573 1574 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu) 1575 { 1576 struct dst_entry *dst = __sk_dst_check(sk, 0); 1577 struct inet_sock *inet = inet_sk(sk); 1578 1579 if (!dst) { 1580 dst = inet_csk_rebuild_route(sk, &inet->cork.fl); 1581 if (!dst) 1582 goto out; 1583 } 1584 dst->ops->update_pmtu(dst, sk, NULL, mtu, true); 1585 1586 dst = __sk_dst_check(sk, 0); 1587 if (!dst) 1588 dst = inet_csk_rebuild_route(sk, &inet->cork.fl); 1589 out: 1590 return dst; 1591 } 1592 EXPORT_SYMBOL_GPL(inet_csk_update_pmtu); 1593