1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Support for INET connection oriented protocols. 8 * 9 * Authors: See the TCP sources 10 */ 11 12 #include <linux/module.h> 13 #include <linux/jhash.h> 14 15 #include <net/inet_connection_sock.h> 16 #include <net/inet_hashtables.h> 17 #include <net/inet_timewait_sock.h> 18 #include <net/ip.h> 19 #include <net/route.h> 20 #include <net/tcp_states.h> 21 #include <net/xfrm.h> 22 #include <net/tcp.h> 23 #include <net/sock_reuseport.h> 24 #include <net/addrconf.h> 25 26 #if IS_ENABLED(CONFIG_IPV6) 27 /* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses 28 * if IPv6 only, and any IPv4 addresses 29 * if not IPv6 only 30 * match_sk*_wildcard == false: addresses must be exactly the same, i.e. 31 * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, 32 * and 0.0.0.0 equals to 0.0.0.0 only 33 */ 34 static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, 35 const struct in6_addr *sk2_rcv_saddr6, 36 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, 37 bool sk1_ipv6only, bool sk2_ipv6only, 38 bool match_sk1_wildcard, 39 bool match_sk2_wildcard) 40 { 41 int addr_type = ipv6_addr_type(sk1_rcv_saddr6); 42 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; 43 44 /* if both are mapped, treat as IPv4 */ 45 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) { 46 if (!sk2_ipv6only) { 47 if (sk1_rcv_saddr == sk2_rcv_saddr) 48 return true; 49 return (match_sk1_wildcard && !sk1_rcv_saddr) || 50 (match_sk2_wildcard && !sk2_rcv_saddr); 51 } 52 return false; 53 } 54 55 if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY) 56 return true; 57 58 if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard && 59 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) 60 return true; 61 62 if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard && 63 !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) 64 return true; 65 66 if (sk2_rcv_saddr6 && 67 ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6)) 68 return true; 69 70 return false; 71 } 72 #endif 73 74 /* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses 75 * match_sk*_wildcard == false: addresses must be exactly the same, i.e. 76 * 0.0.0.0 only equals to 0.0.0.0 77 */ 78 static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, 79 bool sk2_ipv6only, bool match_sk1_wildcard, 80 bool match_sk2_wildcard) 81 { 82 if (!sk2_ipv6only) { 83 if (sk1_rcv_saddr == sk2_rcv_saddr) 84 return true; 85 return (match_sk1_wildcard && !sk1_rcv_saddr) || 86 (match_sk2_wildcard && !sk2_rcv_saddr); 87 } 88 return false; 89 } 90 91 bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, 92 bool match_wildcard) 93 { 94 #if IS_ENABLED(CONFIG_IPV6) 95 if (sk->sk_family == AF_INET6) 96 return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr, 97 inet6_rcv_saddr(sk2), 98 sk->sk_rcv_saddr, 99 sk2->sk_rcv_saddr, 100 ipv6_only_sock(sk), 101 ipv6_only_sock(sk2), 102 match_wildcard, 103 match_wildcard); 104 #endif 105 return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr, 106 ipv6_only_sock(sk2), match_wildcard, 107 match_wildcard); 108 } 109 EXPORT_SYMBOL(inet_rcv_saddr_equal); 110 111 bool inet_rcv_saddr_any(const struct sock *sk) 112 { 113 #if IS_ENABLED(CONFIG_IPV6) 114 if (sk->sk_family == AF_INET6) 115 return ipv6_addr_any(&sk->sk_v6_rcv_saddr); 116 #endif 117 return !sk->sk_rcv_saddr; 118 } 119 120 /** 121 * inet_sk_get_local_port_range - fetch ephemeral ports range 122 * @sk: socket 123 * @low: pointer to low port 124 * @high: pointer to high port 125 * 126 * Fetch netns port range (/proc/sys/net/ipv4/ip_local_port_range) 127 * Range can be overridden if socket got IP_LOCAL_PORT_RANGE option. 128 * Returns true if IP_LOCAL_PORT_RANGE was set on this socket. 129 */ 130 bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high) 131 { 132 int lo, hi, sk_lo, sk_hi; 133 bool local_range = false; 134 u32 sk_range; 135 136 inet_get_local_port_range(sock_net(sk), &lo, &hi); 137 138 sk_range = READ_ONCE(inet_sk(sk)->local_port_range); 139 if (unlikely(sk_range)) { 140 sk_lo = sk_range & 0xffff; 141 sk_hi = sk_range >> 16; 142 143 if (lo <= sk_lo && sk_lo <= hi) 144 lo = sk_lo; 145 if (lo <= sk_hi && sk_hi <= hi) 146 hi = sk_hi; 147 local_range = true; 148 } 149 150 *low = lo; 151 *high = hi; 152 return local_range; 153 } 154 EXPORT_SYMBOL(inet_sk_get_local_port_range); 155 156 static bool inet_use_bhash2_on_bind(const struct sock *sk) 157 { 158 #if IS_ENABLED(CONFIG_IPV6) 159 if (sk->sk_family == AF_INET6) { 160 int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); 161 162 if (addr_type == IPV6_ADDR_ANY) 163 return false; 164 165 if (addr_type != IPV6_ADDR_MAPPED) 166 return true; 167 } 168 #endif 169 return sk->sk_rcv_saddr != htonl(INADDR_ANY); 170 } 171 172 static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2, 173 kuid_t sk_uid, bool relax, 174 bool reuseport_cb_ok, bool reuseport_ok) 175 { 176 int bound_dev_if2; 177 178 if (sk == sk2) 179 return false; 180 181 bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if); 182 183 if (!sk->sk_bound_dev_if || !bound_dev_if2 || 184 sk->sk_bound_dev_if == bound_dev_if2) { 185 if (sk->sk_reuse && sk2->sk_reuse && 186 sk2->sk_state != TCP_LISTEN) { 187 if (!relax || (!reuseport_ok && sk->sk_reuseport && 188 sk2->sk_reuseport && reuseport_cb_ok && 189 (sk2->sk_state == TCP_TIME_WAIT || 190 uid_eq(sk_uid, sock_i_uid(sk2))))) 191 return true; 192 } else if (!reuseport_ok || !sk->sk_reuseport || 193 !sk2->sk_reuseport || !reuseport_cb_ok || 194 (sk2->sk_state != TCP_TIME_WAIT && 195 !uid_eq(sk_uid, sock_i_uid(sk2)))) { 196 return true; 197 } 198 } 199 return false; 200 } 201 202 static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2, 203 kuid_t sk_uid, bool relax, 204 bool reuseport_cb_ok, bool reuseport_ok) 205 { 206 if (ipv6_only_sock(sk2)) { 207 if (sk->sk_family == AF_INET) 208 return false; 209 210 #if IS_ENABLED(CONFIG_IPV6) 211 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) 212 return false; 213 #endif 214 } 215 216 return inet_bind_conflict(sk, sk2, sk_uid, relax, 217 reuseport_cb_ok, reuseport_ok); 218 } 219 220 static bool inet_bhash2_conflict(const struct sock *sk, 221 const struct inet_bind2_bucket *tb2, 222 kuid_t sk_uid, 223 bool relax, bool reuseport_cb_ok, 224 bool reuseport_ok) 225 { 226 struct sock *sk2; 227 228 sk_for_each_bound(sk2, &tb2->owners) { 229 if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax, 230 reuseport_cb_ok, reuseport_ok)) 231 return true; 232 } 233 234 return false; 235 } 236 237 #define sk_for_each_bound_bhash(__sk, __tb2, __tb) \ 238 hlist_for_each_entry(__tb2, &(__tb)->bhash2, bhash_node) \ 239 sk_for_each_bound((__sk), &(__tb2)->owners) 240 241 /* This should be called only when the tb and tb2 hashbuckets' locks are held */ 242 static int inet_csk_bind_conflict(const struct sock *sk, 243 const struct inet_bind_bucket *tb, 244 const struct inet_bind2_bucket *tb2, /* may be null */ 245 bool relax, bool reuseport_ok) 246 { 247 kuid_t uid = sock_i_uid((struct sock *)sk); 248 struct sock_reuseport *reuseport_cb; 249 bool reuseport_cb_ok; 250 struct sock *sk2; 251 252 rcu_read_lock(); 253 reuseport_cb = rcu_dereference(sk->sk_reuseport_cb); 254 /* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */ 255 reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks); 256 rcu_read_unlock(); 257 258 /* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if 259 * ipv4) should have been checked already. We need to do these two 260 * checks separately because their spinlocks have to be acquired/released 261 * independently of each other, to prevent possible deadlocks 262 */ 263 if (inet_use_bhash2_on_bind(sk)) 264 return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, 265 reuseport_cb_ok, reuseport_ok); 266 267 /* Unlike other sk lookup places we do not check 268 * for sk_net here, since _all_ the socks listed 269 * in tb->owners and tb2->owners list belong 270 * to the same net - the one this bucket belongs to. 271 */ 272 sk_for_each_bound_bhash(sk2, tb2, tb) { 273 if (!inet_bind_conflict(sk, sk2, uid, relax, reuseport_cb_ok, reuseport_ok)) 274 continue; 275 276 if (inet_rcv_saddr_equal(sk, sk2, true)) 277 return true; 278 } 279 280 return false; 281 } 282 283 /* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or 284 * INADDR_ANY (if ipv4) socket. 285 * 286 * Caller must hold bhash hashbucket lock with local bh disabled, to protect 287 * against concurrent binds on the port for addr any 288 */ 289 static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev, 290 bool relax, bool reuseport_ok) 291 { 292 kuid_t uid = sock_i_uid((struct sock *)sk); 293 const struct net *net = sock_net(sk); 294 struct sock_reuseport *reuseport_cb; 295 struct inet_bind_hashbucket *head2; 296 struct inet_bind2_bucket *tb2; 297 bool conflict = false; 298 bool reuseport_cb_ok; 299 300 rcu_read_lock(); 301 reuseport_cb = rcu_dereference(sk->sk_reuseport_cb); 302 /* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */ 303 reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks); 304 rcu_read_unlock(); 305 306 head2 = inet_bhash2_addr_any_hashbucket(sk, net, port); 307 308 spin_lock(&head2->lock); 309 310 inet_bind_bucket_for_each(tb2, &head2->chain) { 311 if (!inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk)) 312 continue; 313 314 if (!inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, reuseport_ok)) 315 continue; 316 317 conflict = true; 318 break; 319 } 320 321 spin_unlock(&head2->lock); 322 323 return conflict; 324 } 325 326 /* 327 * Find an open port number for the socket. Returns with the 328 * inet_bind_hashbucket locks held if successful. 329 */ 330 static struct inet_bind_hashbucket * 331 inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret, 332 struct inet_bind2_bucket **tb2_ret, 333 struct inet_bind_hashbucket **head2_ret, int *port_ret) 334 { 335 struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk); 336 int i, low, high, attempt_half, port, l3mdev; 337 struct inet_bind_hashbucket *head, *head2; 338 struct net *net = sock_net(sk); 339 struct inet_bind2_bucket *tb2; 340 struct inet_bind_bucket *tb; 341 u32 remaining, offset; 342 bool relax = false; 343 344 l3mdev = inet_sk_bound_l3mdev(sk); 345 ports_exhausted: 346 attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0; 347 other_half_scan: 348 inet_sk_get_local_port_range(sk, &low, &high); 349 high++; /* [32768, 60999] -> [32768, 61000[ */ 350 if (high - low < 4) 351 attempt_half = 0; 352 if (attempt_half) { 353 int half = low + (((high - low) >> 2) << 1); 354 355 if (attempt_half == 1) 356 high = half; 357 else 358 low = half; 359 } 360 remaining = high - low; 361 if (likely(remaining > 1)) 362 remaining &= ~1U; 363 364 offset = get_random_u32_below(remaining); 365 /* __inet_hash_connect() favors ports having @low parity 366 * We do the opposite to not pollute connect() users. 367 */ 368 offset |= 1U; 369 370 other_parity_scan: 371 port = low + offset; 372 for (i = 0; i < remaining; i += 2, port += 2) { 373 if (unlikely(port >= high)) 374 port -= remaining; 375 if (inet_is_local_reserved_port(net, port)) 376 continue; 377 head = &hinfo->bhash[inet_bhashfn(net, port, 378 hinfo->bhash_size)]; 379 spin_lock_bh(&head->lock); 380 if (inet_use_bhash2_on_bind(sk)) { 381 if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false)) 382 goto next_port; 383 } 384 385 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 386 spin_lock(&head2->lock); 387 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); 388 inet_bind_bucket_for_each(tb, &head->chain) 389 if (inet_bind_bucket_match(tb, net, port, l3mdev)) { 390 if (!inet_csk_bind_conflict(sk, tb, tb2, 391 relax, false)) 392 goto success; 393 spin_unlock(&head2->lock); 394 goto next_port; 395 } 396 tb = NULL; 397 goto success; 398 next_port: 399 spin_unlock_bh(&head->lock); 400 cond_resched(); 401 } 402 403 offset--; 404 if (!(offset & 1)) 405 goto other_parity_scan; 406 407 if (attempt_half == 1) { 408 /* OK we now try the upper half of the range */ 409 attempt_half = 2; 410 goto other_half_scan; 411 } 412 413 if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) { 414 /* We still have a chance to connect to different destinations */ 415 relax = true; 416 goto ports_exhausted; 417 } 418 return NULL; 419 success: 420 *port_ret = port; 421 *tb_ret = tb; 422 *tb2_ret = tb2; 423 *head2_ret = head2; 424 return head; 425 } 426 427 static inline int sk_reuseport_match(struct inet_bind_bucket *tb, 428 struct sock *sk) 429 { 430 kuid_t uid = sock_i_uid(sk); 431 432 if (tb->fastreuseport <= 0) 433 return 0; 434 if (!sk->sk_reuseport) 435 return 0; 436 if (rcu_access_pointer(sk->sk_reuseport_cb)) 437 return 0; 438 if (!uid_eq(tb->fastuid, uid)) 439 return 0; 440 /* We only need to check the rcv_saddr if this tb was once marked 441 * without fastreuseport and then was reset, as we can only know that 442 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the 443 * owners list. 444 */ 445 if (tb->fastreuseport == FASTREUSEPORT_ANY) 446 return 1; 447 #if IS_ENABLED(CONFIG_IPV6) 448 if (tb->fast_sk_family == AF_INET6) 449 return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr, 450 inet6_rcv_saddr(sk), 451 tb->fast_rcv_saddr, 452 sk->sk_rcv_saddr, 453 tb->fast_ipv6_only, 454 ipv6_only_sock(sk), true, false); 455 #endif 456 return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr, 457 ipv6_only_sock(sk), true, false); 458 } 459 460 void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, 461 struct sock *sk) 462 { 463 kuid_t uid = sock_i_uid(sk); 464 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; 465 466 if (hlist_empty(&tb->bhash2)) { 467 tb->fastreuse = reuse; 468 if (sk->sk_reuseport) { 469 tb->fastreuseport = FASTREUSEPORT_ANY; 470 tb->fastuid = uid; 471 tb->fast_rcv_saddr = sk->sk_rcv_saddr; 472 tb->fast_ipv6_only = ipv6_only_sock(sk); 473 tb->fast_sk_family = sk->sk_family; 474 #if IS_ENABLED(CONFIG_IPV6) 475 tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 476 #endif 477 } else { 478 tb->fastreuseport = 0; 479 } 480 } else { 481 if (!reuse) 482 tb->fastreuse = 0; 483 if (sk->sk_reuseport) { 484 /* We didn't match or we don't have fastreuseport set on 485 * the tb, but we have sk_reuseport set on this socket 486 * and we know that there are no bind conflicts with 487 * this socket in this tb, so reset our tb's reuseport 488 * settings so that any subsequent sockets that match 489 * our current socket will be put on the fast path. 490 * 491 * If we reset we need to set FASTREUSEPORT_STRICT so we 492 * do extra checking for all subsequent sk_reuseport 493 * socks. 494 */ 495 if (!sk_reuseport_match(tb, sk)) { 496 tb->fastreuseport = FASTREUSEPORT_STRICT; 497 tb->fastuid = uid; 498 tb->fast_rcv_saddr = sk->sk_rcv_saddr; 499 tb->fast_ipv6_only = ipv6_only_sock(sk); 500 tb->fast_sk_family = sk->sk_family; 501 #if IS_ENABLED(CONFIG_IPV6) 502 tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 503 #endif 504 } 505 } else { 506 tb->fastreuseport = 0; 507 } 508 } 509 } 510 511 /* Obtain a reference to a local port for the given sock, 512 * if snum is zero it means select any available local port. 513 * We try to allocate an odd port (and leave even ports for connect()) 514 */ 515 int inet_csk_get_port(struct sock *sk, unsigned short snum) 516 { 517 struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk); 518 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; 519 bool found_port = false, check_bind_conflict = true; 520 bool bhash_created = false, bhash2_created = false; 521 int ret = -EADDRINUSE, port = snum, l3mdev; 522 struct inet_bind_hashbucket *head, *head2; 523 struct inet_bind2_bucket *tb2 = NULL; 524 struct inet_bind_bucket *tb = NULL; 525 bool head2_lock_acquired = false; 526 struct net *net = sock_net(sk); 527 528 l3mdev = inet_sk_bound_l3mdev(sk); 529 530 if (!port) { 531 head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port); 532 if (!head) 533 return ret; 534 535 head2_lock_acquired = true; 536 537 if (tb && tb2) 538 goto success; 539 found_port = true; 540 } else { 541 head = &hinfo->bhash[inet_bhashfn(net, port, 542 hinfo->bhash_size)]; 543 spin_lock_bh(&head->lock); 544 inet_bind_bucket_for_each(tb, &head->chain) 545 if (inet_bind_bucket_match(tb, net, port, l3mdev)) 546 break; 547 } 548 549 if (!tb) { 550 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net, 551 head, port, l3mdev); 552 if (!tb) 553 goto fail_unlock; 554 bhash_created = true; 555 } 556 557 if (!found_port) { 558 if (!hlist_empty(&tb->bhash2)) { 559 if (sk->sk_reuse == SK_FORCE_REUSE || 560 (tb->fastreuse > 0 && reuse) || 561 sk_reuseport_match(tb, sk)) 562 check_bind_conflict = false; 563 } 564 565 if (check_bind_conflict && inet_use_bhash2_on_bind(sk)) { 566 if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true)) 567 goto fail_unlock; 568 } 569 570 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 571 spin_lock(&head2->lock); 572 head2_lock_acquired = true; 573 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); 574 } 575 576 if (!tb2) { 577 tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, 578 net, head2, tb, sk); 579 if (!tb2) 580 goto fail_unlock; 581 bhash2_created = true; 582 } 583 584 if (!found_port && check_bind_conflict) { 585 if (inet_csk_bind_conflict(sk, tb, tb2, true, true)) 586 goto fail_unlock; 587 } 588 589 success: 590 inet_csk_update_fastreuse(tb, sk); 591 592 if (!inet_csk(sk)->icsk_bind_hash) 593 inet_bind_hash(sk, tb, tb2, port); 594 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); 595 WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2); 596 ret = 0; 597 598 fail_unlock: 599 if (ret) { 600 if (bhash2_created) 601 inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, tb2); 602 if (bhash_created) 603 inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb); 604 } 605 if (head2_lock_acquired) 606 spin_unlock(&head2->lock); 607 spin_unlock_bh(&head->lock); 608 return ret; 609 } 610 EXPORT_SYMBOL_GPL(inet_csk_get_port); 611 612 /* 613 * Wait for an incoming connection, avoid race conditions. This must be called 614 * with the socket locked. 615 */ 616 static int inet_csk_wait_for_connect(struct sock *sk, long timeo) 617 { 618 struct inet_connection_sock *icsk = inet_csk(sk); 619 DEFINE_WAIT(wait); 620 int err; 621 622 /* 623 * True wake-one mechanism for incoming connections: only 624 * one process gets woken up, not the 'whole herd'. 625 * Since we do not 'race & poll' for established sockets 626 * anymore, the common case will execute the loop only once. 627 * 628 * Subtle issue: "add_wait_queue_exclusive()" will be added 629 * after any current non-exclusive waiters, and we know that 630 * it will always _stay_ after any new non-exclusive waiters 631 * because all non-exclusive waiters are added at the 632 * beginning of the wait-queue. As such, it's ok to "drop" 633 * our exclusiveness temporarily when we get woken up without 634 * having to remove and re-insert us on the wait queue. 635 */ 636 for (;;) { 637 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 638 TASK_INTERRUPTIBLE); 639 release_sock(sk); 640 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) 641 timeo = schedule_timeout(timeo); 642 sched_annotate_sleep(); 643 lock_sock(sk); 644 err = 0; 645 if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) 646 break; 647 err = -EINVAL; 648 if (sk->sk_state != TCP_LISTEN) 649 break; 650 err = sock_intr_errno(timeo); 651 if (signal_pending(current)) 652 break; 653 err = -EAGAIN; 654 if (!timeo) 655 break; 656 } 657 finish_wait(sk_sleep(sk), &wait); 658 return err; 659 } 660 661 /* 662 * This will accept the next outstanding connection. 663 */ 664 struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg) 665 { 666 struct inet_connection_sock *icsk = inet_csk(sk); 667 struct request_sock_queue *queue = &icsk->icsk_accept_queue; 668 struct request_sock *req; 669 struct sock *newsk; 670 int error; 671 672 lock_sock(sk); 673 674 /* We need to make sure that this socket is listening, 675 * and that it has something pending. 676 */ 677 error = -EINVAL; 678 if (sk->sk_state != TCP_LISTEN) 679 goto out_err; 680 681 /* Find already established connection */ 682 if (reqsk_queue_empty(queue)) { 683 long timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK); 684 685 /* If this is a non blocking socket don't sleep */ 686 error = -EAGAIN; 687 if (!timeo) 688 goto out_err; 689 690 error = inet_csk_wait_for_connect(sk, timeo); 691 if (error) 692 goto out_err; 693 } 694 req = reqsk_queue_remove(queue, sk); 695 arg->is_empty = reqsk_queue_empty(queue); 696 newsk = req->sk; 697 698 if (sk->sk_protocol == IPPROTO_TCP && 699 tcp_rsk(req)->tfo_listener) { 700 spin_lock_bh(&queue->fastopenq.lock); 701 if (tcp_rsk(req)->tfo_listener) { 702 /* We are still waiting for the final ACK from 3WHS 703 * so can't free req now. Instead, we set req->sk to 704 * NULL to signify that the child socket is taken 705 * so reqsk_fastopen_remove() will free the req 706 * when 3WHS finishes (or is aborted). 707 */ 708 req->sk = NULL; 709 req = NULL; 710 } 711 spin_unlock_bh(&queue->fastopenq.lock); 712 } 713 714 out: 715 release_sock(sk); 716 if (newsk && mem_cgroup_sockets_enabled) { 717 gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL; 718 int amt = 0; 719 720 /* atomically get the memory usage, set and charge the 721 * newsk->sk_memcg. 722 */ 723 lock_sock(newsk); 724 725 mem_cgroup_sk_alloc(newsk); 726 if (newsk->sk_memcg) { 727 /* The socket has not been accepted yet, no need 728 * to look at newsk->sk_wmem_queued. 729 */ 730 amt = sk_mem_pages(newsk->sk_forward_alloc + 731 atomic_read(&newsk->sk_rmem_alloc)); 732 } 733 734 if (amt) 735 mem_cgroup_charge_skmem(newsk->sk_memcg, amt, gfp); 736 kmem_cache_charge(newsk, gfp); 737 738 release_sock(newsk); 739 } 740 if (req) 741 reqsk_put(req); 742 743 if (newsk) 744 inet_init_csk_locks(newsk); 745 746 return newsk; 747 out_err: 748 newsk = NULL; 749 req = NULL; 750 arg->err = error; 751 goto out; 752 } 753 EXPORT_SYMBOL(inet_csk_accept); 754 755 /* 756 * Using different timers for retransmit, delayed acks and probes 757 * We may wish use just one timer maintaining a list of expire jiffies 758 * to optimize. 759 */ 760 void inet_csk_init_xmit_timers(struct sock *sk, 761 void (*retransmit_handler)(struct timer_list *t), 762 void (*delack_handler)(struct timer_list *t), 763 void (*keepalive_handler)(struct timer_list *t)) 764 { 765 struct inet_connection_sock *icsk = inet_csk(sk); 766 767 timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0); 768 timer_setup(&icsk->icsk_delack_timer, delack_handler, 0); 769 timer_setup(&sk->sk_timer, keepalive_handler, 0); 770 icsk->icsk_pending = icsk->icsk_ack.pending = 0; 771 } 772 EXPORT_SYMBOL(inet_csk_init_xmit_timers); 773 774 void inet_csk_clear_xmit_timers(struct sock *sk) 775 { 776 struct inet_connection_sock *icsk = inet_csk(sk); 777 778 smp_store_release(&icsk->icsk_pending, 0); 779 smp_store_release(&icsk->icsk_ack.pending, 0); 780 781 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); 782 sk_stop_timer(sk, &icsk->icsk_delack_timer); 783 sk_stop_timer(sk, &sk->sk_timer); 784 } 785 EXPORT_SYMBOL(inet_csk_clear_xmit_timers); 786 787 void inet_csk_clear_xmit_timers_sync(struct sock *sk) 788 { 789 struct inet_connection_sock *icsk = inet_csk(sk); 790 791 /* ongoing timer handlers need to acquire socket lock. */ 792 sock_not_owned_by_me(sk); 793 794 smp_store_release(&icsk->icsk_pending, 0); 795 smp_store_release(&icsk->icsk_ack.pending, 0); 796 797 sk_stop_timer_sync(sk, &icsk->icsk_retransmit_timer); 798 sk_stop_timer_sync(sk, &icsk->icsk_delack_timer); 799 sk_stop_timer_sync(sk, &sk->sk_timer); 800 } 801 802 void inet_csk_delete_keepalive_timer(struct sock *sk) 803 { 804 sk_stop_timer(sk, &sk->sk_timer); 805 } 806 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); 807 808 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) 809 { 810 sk_reset_timer(sk, &sk->sk_timer, jiffies + len); 811 } 812 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); 813 814 struct dst_entry *inet_csk_route_req(const struct sock *sk, 815 struct flowi4 *fl4, 816 const struct request_sock *req) 817 { 818 const struct inet_request_sock *ireq = inet_rsk(req); 819 struct net *net = read_pnet(&ireq->ireq_net); 820 struct ip_options_rcu *opt; 821 struct rtable *rt; 822 823 rcu_read_lock(); 824 opt = rcu_dereference(ireq->ireq_opt); 825 826 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 827 ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), 828 sk->sk_protocol, inet_sk_flowi_flags(sk), 829 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 830 ireq->ir_loc_addr, ireq->ir_rmt_port, 831 htons(ireq->ir_num), sk->sk_uid); 832 security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); 833 rt = ip_route_output_flow(net, fl4, sk); 834 if (IS_ERR(rt)) 835 goto no_route; 836 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) 837 goto route_err; 838 rcu_read_unlock(); 839 return &rt->dst; 840 841 route_err: 842 ip_rt_put(rt); 843 no_route: 844 rcu_read_unlock(); 845 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 846 return NULL; 847 } 848 EXPORT_SYMBOL_GPL(inet_csk_route_req); 849 850 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, 851 struct sock *newsk, 852 const struct request_sock *req) 853 { 854 const struct inet_request_sock *ireq = inet_rsk(req); 855 struct net *net = read_pnet(&ireq->ireq_net); 856 struct inet_sock *newinet = inet_sk(newsk); 857 struct ip_options_rcu *opt; 858 struct flowi4 *fl4; 859 struct rtable *rt; 860 861 opt = rcu_dereference(ireq->ireq_opt); 862 fl4 = &newinet->cork.fl.u.ip4; 863 864 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 865 ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), 866 sk->sk_protocol, inet_sk_flowi_flags(sk), 867 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 868 ireq->ir_loc_addr, ireq->ir_rmt_port, 869 htons(ireq->ir_num), sk->sk_uid); 870 security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); 871 rt = ip_route_output_flow(net, fl4, sk); 872 if (IS_ERR(rt)) 873 goto no_route; 874 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) 875 goto route_err; 876 return &rt->dst; 877 878 route_err: 879 ip_rt_put(rt); 880 no_route: 881 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 882 return NULL; 883 } 884 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock); 885 886 /* Decide when to expire the request and when to resend SYN-ACK */ 887 static void syn_ack_recalc(struct request_sock *req, 888 const int max_syn_ack_retries, 889 const u8 rskq_defer_accept, 890 int *expire, int *resend) 891 { 892 if (!rskq_defer_accept) { 893 *expire = req->num_timeout >= max_syn_ack_retries; 894 *resend = 1; 895 return; 896 } 897 *expire = req->num_timeout >= max_syn_ack_retries && 898 (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept); 899 /* Do not resend while waiting for data after ACK, 900 * start to resend on end of deferring period to give 901 * last chance for data or ACK to create established socket. 902 */ 903 *resend = !inet_rsk(req)->acked || 904 req->num_timeout >= rskq_defer_accept - 1; 905 } 906 907 int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req) 908 { 909 int err = req->rsk_ops->rtx_syn_ack(parent, req); 910 911 if (!err) 912 req->num_retrans++; 913 return err; 914 } 915 EXPORT_SYMBOL(inet_rtx_syn_ack); 916 917 static struct request_sock * 918 reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener, 919 bool attach_listener) 920 { 921 struct request_sock *req; 922 923 req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN); 924 if (!req) 925 return NULL; 926 req->rsk_listener = NULL; 927 if (attach_listener) { 928 if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) { 929 kmem_cache_free(ops->slab, req); 930 return NULL; 931 } 932 req->rsk_listener = sk_listener; 933 } 934 req->rsk_ops = ops; 935 req_to_sk(req)->sk_prot = sk_listener->sk_prot; 936 sk_node_init(&req_to_sk(req)->sk_node); 937 sk_tx_queue_clear(req_to_sk(req)); 938 req->saved_syn = NULL; 939 req->syncookie = 0; 940 req->timeout = 0; 941 req->num_timeout = 0; 942 req->num_retrans = 0; 943 req->sk = NULL; 944 refcount_set(&req->rsk_refcnt, 0); 945 946 return req; 947 } 948 #define reqsk_alloc(...) alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__)) 949 950 struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, 951 struct sock *sk_listener, 952 bool attach_listener) 953 { 954 struct request_sock *req = reqsk_alloc(ops, sk_listener, 955 attach_listener); 956 957 if (req) { 958 struct inet_request_sock *ireq = inet_rsk(req); 959 960 ireq->ireq_opt = NULL; 961 #if IS_ENABLED(CONFIG_IPV6) 962 ireq->pktopts = NULL; 963 #endif 964 atomic64_set(&ireq->ir_cookie, 0); 965 ireq->ireq_state = TCP_NEW_SYN_RECV; 966 write_pnet(&ireq->ireq_net, sock_net(sk_listener)); 967 ireq->ireq_family = sk_listener->sk_family; 968 req->timeout = TCP_TIMEOUT_INIT; 969 } 970 971 return req; 972 } 973 EXPORT_SYMBOL(inet_reqsk_alloc); 974 975 static struct request_sock *inet_reqsk_clone(struct request_sock *req, 976 struct sock *sk) 977 { 978 struct sock *req_sk, *nreq_sk; 979 struct request_sock *nreq; 980 981 nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN); 982 if (!nreq) { 983 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); 984 985 /* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */ 986 sock_put(sk); 987 return NULL; 988 } 989 990 req_sk = req_to_sk(req); 991 nreq_sk = req_to_sk(nreq); 992 993 memcpy(nreq_sk, req_sk, 994 offsetof(struct sock, sk_dontcopy_begin)); 995 unsafe_memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end, 996 req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end), 997 /* alloc is larger than struct, see above */); 998 999 sk_node_init(&nreq_sk->sk_node); 1000 nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping; 1001 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING 1002 nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping; 1003 #endif 1004 nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu; 1005 1006 nreq->rsk_listener = sk; 1007 1008 /* We need not acquire fastopenq->lock 1009 * because the child socket is locked in inet_csk_listen_stop(). 1010 */ 1011 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener) 1012 rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq); 1013 1014 return nreq; 1015 } 1016 1017 static void reqsk_queue_migrated(struct request_sock_queue *queue, 1018 const struct request_sock *req) 1019 { 1020 if (req->num_timeout == 0) 1021 atomic_inc(&queue->young); 1022 atomic_inc(&queue->qlen); 1023 } 1024 1025 static void reqsk_migrate_reset(struct request_sock *req) 1026 { 1027 req->saved_syn = NULL; 1028 #if IS_ENABLED(CONFIG_IPV6) 1029 inet_rsk(req)->ipv6_opt = NULL; 1030 inet_rsk(req)->pktopts = NULL; 1031 #else 1032 inet_rsk(req)->ireq_opt = NULL; 1033 #endif 1034 } 1035 1036 /* return true if req was found in the ehash table */ 1037 static bool reqsk_queue_unlink(struct request_sock *req) 1038 { 1039 struct sock *sk = req_to_sk(req); 1040 bool found = false; 1041 1042 if (sk_hashed(sk)) { 1043 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); 1044 spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash); 1045 1046 spin_lock(lock); 1047 found = __sk_nulls_del_node_init_rcu(sk); 1048 spin_unlock(lock); 1049 } 1050 1051 return found; 1052 } 1053 1054 static bool __inet_csk_reqsk_queue_drop(struct sock *sk, 1055 struct request_sock *req, 1056 bool from_timer) 1057 { 1058 bool unlinked = reqsk_queue_unlink(req); 1059 1060 if (!from_timer && timer_delete_sync(&req->rsk_timer)) 1061 reqsk_put(req); 1062 1063 if (unlinked) { 1064 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); 1065 reqsk_put(req); 1066 } 1067 1068 return unlinked; 1069 } 1070 1071 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) 1072 { 1073 return __inet_csk_reqsk_queue_drop(sk, req, false); 1074 } 1075 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop); 1076 1077 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req) 1078 { 1079 inet_csk_reqsk_queue_drop(sk, req); 1080 reqsk_put(req); 1081 } 1082 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put); 1083 1084 static void reqsk_timer_handler(struct timer_list *t) 1085 { 1086 struct request_sock *req = from_timer(req, t, rsk_timer); 1087 struct request_sock *nreq = NULL, *oreq = req; 1088 struct sock *sk_listener = req->rsk_listener; 1089 struct inet_connection_sock *icsk; 1090 struct request_sock_queue *queue; 1091 struct net *net; 1092 int max_syn_ack_retries, qlen, expire = 0, resend = 0; 1093 1094 if (inet_sk_state_load(sk_listener) != TCP_LISTEN) { 1095 struct sock *nsk; 1096 1097 nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL); 1098 if (!nsk) 1099 goto drop; 1100 1101 nreq = inet_reqsk_clone(req, nsk); 1102 if (!nreq) 1103 goto drop; 1104 1105 /* The new timer for the cloned req can decrease the 2 1106 * by calling inet_csk_reqsk_queue_drop_and_put(), so 1107 * hold another count to prevent use-after-free and 1108 * call reqsk_put() just before return. 1109 */ 1110 refcount_set(&nreq->rsk_refcnt, 2 + 1); 1111 timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED); 1112 reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req); 1113 1114 req = nreq; 1115 sk_listener = nsk; 1116 } 1117 1118 icsk = inet_csk(sk_listener); 1119 net = sock_net(sk_listener); 1120 max_syn_ack_retries = READ_ONCE(icsk->icsk_syn_retries) ? : 1121 READ_ONCE(net->ipv4.sysctl_tcp_synack_retries); 1122 /* Normally all the openreqs are young and become mature 1123 * (i.e. converted to established socket) for first timeout. 1124 * If synack was not acknowledged for 1 second, it means 1125 * one of the following things: synack was lost, ack was lost, 1126 * rtt is high or nobody planned to ack (i.e. synflood). 1127 * When server is a bit loaded, queue is populated with old 1128 * open requests, reducing effective size of queue. 1129 * When server is well loaded, queue size reduces to zero 1130 * after several minutes of work. It is not synflood, 1131 * it is normal operation. The solution is pruning 1132 * too old entries overriding normal timeout, when 1133 * situation becomes dangerous. 1134 * 1135 * Essentially, we reserve half of room for young 1136 * embrions; and abort old ones without pity, if old 1137 * ones are about to clog our table. 1138 */ 1139 queue = &icsk->icsk_accept_queue; 1140 qlen = reqsk_queue_len(queue); 1141 if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) { 1142 int young = reqsk_queue_len_young(queue) << 1; 1143 1144 while (max_syn_ack_retries > 2) { 1145 if (qlen < young) 1146 break; 1147 max_syn_ack_retries--; 1148 young <<= 1; 1149 } 1150 } 1151 syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept), 1152 &expire, &resend); 1153 req->rsk_ops->syn_ack_timeout(req); 1154 if (!expire && 1155 (!resend || 1156 !inet_rtx_syn_ack(sk_listener, req) || 1157 inet_rsk(req)->acked)) { 1158 if (req->num_timeout++ == 0) 1159 atomic_dec(&queue->young); 1160 mod_timer(&req->rsk_timer, jiffies + reqsk_timeout(req, TCP_RTO_MAX)); 1161 1162 if (!nreq) 1163 return; 1164 1165 if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) { 1166 /* delete timer */ 1167 __inet_csk_reqsk_queue_drop(sk_listener, nreq, true); 1168 goto no_ownership; 1169 } 1170 1171 __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS); 1172 reqsk_migrate_reset(oreq); 1173 reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq); 1174 reqsk_put(oreq); 1175 1176 reqsk_put(nreq); 1177 return; 1178 } 1179 1180 /* Even if we can clone the req, we may need not retransmit any more 1181 * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another 1182 * CPU may win the "own_req" race so that inet_ehash_insert() fails. 1183 */ 1184 if (nreq) { 1185 __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE); 1186 no_ownership: 1187 reqsk_migrate_reset(nreq); 1188 reqsk_queue_removed(queue, nreq); 1189 __reqsk_free(nreq); 1190 } 1191 1192 drop: 1193 __inet_csk_reqsk_queue_drop(sk_listener, oreq, true); 1194 reqsk_put(oreq); 1195 } 1196 1197 static bool reqsk_queue_hash_req(struct request_sock *req, 1198 unsigned long timeout) 1199 { 1200 bool found_dup_sk = false; 1201 1202 if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk)) 1203 return false; 1204 1205 /* The timer needs to be setup after a successful insertion. */ 1206 timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED); 1207 mod_timer(&req->rsk_timer, jiffies + timeout); 1208 1209 /* before letting lookups find us, make sure all req fields 1210 * are committed to memory and refcnt initialized. 1211 */ 1212 smp_wmb(); 1213 refcount_set(&req->rsk_refcnt, 2 + 1); 1214 return true; 1215 } 1216 1217 bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, 1218 unsigned long timeout) 1219 { 1220 if (!reqsk_queue_hash_req(req, timeout)) 1221 return false; 1222 1223 inet_csk_reqsk_queue_added(sk); 1224 return true; 1225 } 1226 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); 1227 1228 static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk, 1229 const gfp_t priority) 1230 { 1231 struct inet_connection_sock *icsk = inet_csk(newsk); 1232 1233 if (!icsk->icsk_ulp_ops) 1234 return; 1235 1236 icsk->icsk_ulp_ops->clone(req, newsk, priority); 1237 } 1238 1239 /** 1240 * inet_csk_clone_lock - clone an inet socket, and lock its clone 1241 * @sk: the socket to clone 1242 * @req: request_sock 1243 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1244 * 1245 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 1246 */ 1247 struct sock *inet_csk_clone_lock(const struct sock *sk, 1248 const struct request_sock *req, 1249 const gfp_t priority) 1250 { 1251 struct sock *newsk = sk_clone_lock(sk, priority); 1252 1253 if (newsk) { 1254 struct inet_connection_sock *newicsk = inet_csk(newsk); 1255 1256 inet_sk_set_state(newsk, TCP_SYN_RECV); 1257 newicsk->icsk_bind_hash = NULL; 1258 newicsk->icsk_bind2_hash = NULL; 1259 1260 inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port; 1261 inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num; 1262 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num); 1263 1264 /* listeners have SOCK_RCU_FREE, not the children */ 1265 sock_reset_flag(newsk, SOCK_RCU_FREE); 1266 1267 inet_sk(newsk)->mc_list = NULL; 1268 1269 newsk->sk_mark = inet_rsk(req)->ir_mark; 1270 atomic64_set(&newsk->sk_cookie, 1271 atomic64_read(&inet_rsk(req)->ir_cookie)); 1272 1273 newicsk->icsk_retransmits = 0; 1274 newicsk->icsk_backoff = 0; 1275 newicsk->icsk_probes_out = 0; 1276 newicsk->icsk_probes_tstamp = 0; 1277 1278 /* Deinitialize accept_queue to trap illegal accesses. */ 1279 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); 1280 1281 inet_clone_ulp(req, newsk, priority); 1282 1283 security_inet_csk_clone(newsk, req); 1284 } 1285 return newsk; 1286 } 1287 EXPORT_SYMBOL_GPL(inet_csk_clone_lock); 1288 1289 /* 1290 * At this point, there should be no process reference to this 1291 * socket, and thus no user references at all. Therefore we 1292 * can assume the socket waitqueue is inactive and nobody will 1293 * try to jump onto it. 1294 */ 1295 void inet_csk_destroy_sock(struct sock *sk) 1296 { 1297 WARN_ON(sk->sk_state != TCP_CLOSE); 1298 WARN_ON(!sock_flag(sk, SOCK_DEAD)); 1299 1300 /* It cannot be in hash table! */ 1301 WARN_ON(!sk_unhashed(sk)); 1302 1303 /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */ 1304 WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); 1305 1306 sk->sk_prot->destroy(sk); 1307 1308 sk_stream_kill_queues(sk); 1309 1310 xfrm_sk_free_policy(sk); 1311 1312 this_cpu_dec(*sk->sk_prot->orphan_count); 1313 1314 sock_put(sk); 1315 } 1316 EXPORT_SYMBOL(inet_csk_destroy_sock); 1317 1318 /* This function allows to force a closure of a socket after the call to 1319 * tcp/dccp_create_openreq_child(). 1320 */ 1321 void inet_csk_prepare_forced_close(struct sock *sk) 1322 __releases(&sk->sk_lock.slock) 1323 { 1324 /* sk_clone_lock locked the socket and set refcnt to 2 */ 1325 bh_unlock_sock(sk); 1326 sock_put(sk); 1327 inet_csk_prepare_for_destroy_sock(sk); 1328 inet_sk(sk)->inet_num = 0; 1329 } 1330 EXPORT_SYMBOL(inet_csk_prepare_forced_close); 1331 1332 static int inet_ulp_can_listen(const struct sock *sk) 1333 { 1334 const struct inet_connection_sock *icsk = inet_csk(sk); 1335 1336 if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone) 1337 return -EINVAL; 1338 1339 return 0; 1340 } 1341 1342 int inet_csk_listen_start(struct sock *sk) 1343 { 1344 struct inet_connection_sock *icsk = inet_csk(sk); 1345 struct inet_sock *inet = inet_sk(sk); 1346 int err; 1347 1348 err = inet_ulp_can_listen(sk); 1349 if (unlikely(err)) 1350 return err; 1351 1352 reqsk_queue_alloc(&icsk->icsk_accept_queue); 1353 1354 sk->sk_ack_backlog = 0; 1355 inet_csk_delack_init(sk); 1356 1357 /* There is race window here: we announce ourselves listening, 1358 * but this transition is still not validated by get_port(). 1359 * It is OK, because this socket enters to hash table only 1360 * after validation is complete. 1361 */ 1362 inet_sk_state_store(sk, TCP_LISTEN); 1363 err = sk->sk_prot->get_port(sk, inet->inet_num); 1364 if (!err) { 1365 inet->inet_sport = htons(inet->inet_num); 1366 1367 sk_dst_reset(sk); 1368 err = sk->sk_prot->hash(sk); 1369 1370 if (likely(!err)) 1371 return 0; 1372 } 1373 1374 inet_sk_set_state(sk, TCP_CLOSE); 1375 return err; 1376 } 1377 EXPORT_SYMBOL_GPL(inet_csk_listen_start); 1378 1379 static void inet_child_forget(struct sock *sk, struct request_sock *req, 1380 struct sock *child) 1381 { 1382 sk->sk_prot->disconnect(child, O_NONBLOCK); 1383 1384 sock_orphan(child); 1385 1386 this_cpu_inc(*sk->sk_prot->orphan_count); 1387 1388 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { 1389 BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req); 1390 BUG_ON(sk != req->rsk_listener); 1391 1392 /* Paranoid, to prevent race condition if 1393 * an inbound pkt destined for child is 1394 * blocked by sock lock in tcp_v4_rcv(). 1395 * Also to satisfy an assertion in 1396 * tcp_v4_destroy_sock(). 1397 */ 1398 RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL); 1399 } 1400 inet_csk_destroy_sock(child); 1401 } 1402 1403 struct sock *inet_csk_reqsk_queue_add(struct sock *sk, 1404 struct request_sock *req, 1405 struct sock *child) 1406 { 1407 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 1408 1409 spin_lock(&queue->rskq_lock); 1410 if (unlikely(sk->sk_state != TCP_LISTEN)) { 1411 inet_child_forget(sk, req, child); 1412 child = NULL; 1413 } else { 1414 req->sk = child; 1415 req->dl_next = NULL; 1416 if (queue->rskq_accept_head == NULL) 1417 WRITE_ONCE(queue->rskq_accept_head, req); 1418 else 1419 queue->rskq_accept_tail->dl_next = req; 1420 queue->rskq_accept_tail = req; 1421 sk_acceptq_added(sk); 1422 } 1423 spin_unlock(&queue->rskq_lock); 1424 return child; 1425 } 1426 EXPORT_SYMBOL(inet_csk_reqsk_queue_add); 1427 1428 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, 1429 struct request_sock *req, bool own_req) 1430 { 1431 if (own_req) { 1432 inet_csk_reqsk_queue_drop(req->rsk_listener, req); 1433 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); 1434 1435 if (sk != req->rsk_listener) { 1436 /* another listening sk has been selected, 1437 * migrate the req to it. 1438 */ 1439 struct request_sock *nreq; 1440 1441 /* hold a refcnt for the nreq->rsk_listener 1442 * which is assigned in inet_reqsk_clone() 1443 */ 1444 sock_hold(sk); 1445 nreq = inet_reqsk_clone(req, sk); 1446 if (!nreq) { 1447 inet_child_forget(sk, req, child); 1448 goto child_put; 1449 } 1450 1451 refcount_set(&nreq->rsk_refcnt, 1); 1452 if (inet_csk_reqsk_queue_add(sk, nreq, child)) { 1453 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS); 1454 reqsk_migrate_reset(req); 1455 reqsk_put(req); 1456 return child; 1457 } 1458 1459 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); 1460 reqsk_migrate_reset(nreq); 1461 __reqsk_free(nreq); 1462 } else if (inet_csk_reqsk_queue_add(sk, req, child)) { 1463 return child; 1464 } 1465 } 1466 /* Too bad, another child took ownership of the request, undo. */ 1467 child_put: 1468 bh_unlock_sock(child); 1469 sock_put(child); 1470 return NULL; 1471 } 1472 EXPORT_SYMBOL(inet_csk_complete_hashdance); 1473 1474 /* 1475 * This routine closes sockets which have been at least partially 1476 * opened, but not yet accepted. 1477 */ 1478 void inet_csk_listen_stop(struct sock *sk) 1479 { 1480 struct inet_connection_sock *icsk = inet_csk(sk); 1481 struct request_sock_queue *queue = &icsk->icsk_accept_queue; 1482 struct request_sock *next, *req; 1483 1484 /* Following specs, it would be better either to send FIN 1485 * (and enter FIN-WAIT-1, it is normal close) 1486 * or to send active reset (abort). 1487 * Certainly, it is pretty dangerous while synflood, but it is 1488 * bad justification for our negligence 8) 1489 * To be honest, we are not able to make either 1490 * of the variants now. --ANK 1491 */ 1492 while ((req = reqsk_queue_remove(queue, sk)) != NULL) { 1493 struct sock *child = req->sk, *nsk; 1494 struct request_sock *nreq; 1495 1496 local_bh_disable(); 1497 bh_lock_sock(child); 1498 WARN_ON(sock_owned_by_user(child)); 1499 sock_hold(child); 1500 1501 nsk = reuseport_migrate_sock(sk, child, NULL); 1502 if (nsk) { 1503 nreq = inet_reqsk_clone(req, nsk); 1504 if (nreq) { 1505 refcount_set(&nreq->rsk_refcnt, 1); 1506 1507 if (inet_csk_reqsk_queue_add(nsk, nreq, child)) { 1508 __NET_INC_STATS(sock_net(nsk), 1509 LINUX_MIB_TCPMIGRATEREQSUCCESS); 1510 reqsk_migrate_reset(req); 1511 } else { 1512 __NET_INC_STATS(sock_net(nsk), 1513 LINUX_MIB_TCPMIGRATEREQFAILURE); 1514 reqsk_migrate_reset(nreq); 1515 __reqsk_free(nreq); 1516 } 1517 1518 /* inet_csk_reqsk_queue_add() has already 1519 * called inet_child_forget() on failure case. 1520 */ 1521 goto skip_child_forget; 1522 } 1523 } 1524 1525 inet_child_forget(sk, req, child); 1526 skip_child_forget: 1527 reqsk_put(req); 1528 bh_unlock_sock(child); 1529 local_bh_enable(); 1530 sock_put(child); 1531 1532 cond_resched(); 1533 } 1534 if (queue->fastopenq.rskq_rst_head) { 1535 /* Free all the reqs queued in rskq_rst_head. */ 1536 spin_lock_bh(&queue->fastopenq.lock); 1537 req = queue->fastopenq.rskq_rst_head; 1538 queue->fastopenq.rskq_rst_head = NULL; 1539 spin_unlock_bh(&queue->fastopenq.lock); 1540 while (req != NULL) { 1541 next = req->dl_next; 1542 reqsk_put(req); 1543 req = next; 1544 } 1545 } 1546 WARN_ON_ONCE(sk->sk_ack_backlog); 1547 } 1548 EXPORT_SYMBOL_GPL(inet_csk_listen_stop); 1549 1550 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) 1551 { 1552 struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; 1553 const struct inet_sock *inet = inet_sk(sk); 1554 1555 sin->sin_family = AF_INET; 1556 sin->sin_addr.s_addr = inet->inet_daddr; 1557 sin->sin_port = inet->inet_dport; 1558 } 1559 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); 1560 1561 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl) 1562 { 1563 const struct inet_sock *inet = inet_sk(sk); 1564 const struct ip_options_rcu *inet_opt; 1565 __be32 daddr = inet->inet_daddr; 1566 struct flowi4 *fl4; 1567 struct rtable *rt; 1568 1569 rcu_read_lock(); 1570 inet_opt = rcu_dereference(inet->inet_opt); 1571 if (inet_opt && inet_opt->opt.srr) 1572 daddr = inet_opt->opt.faddr; 1573 fl4 = &fl->u.ip4; 1574 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, 1575 inet->inet_saddr, inet->inet_dport, 1576 inet->inet_sport, sk->sk_protocol, 1577 ip_sock_rt_tos(sk), sk->sk_bound_dev_if); 1578 if (IS_ERR(rt)) 1579 rt = NULL; 1580 if (rt) 1581 sk_setup_caps(sk, &rt->dst); 1582 rcu_read_unlock(); 1583 1584 return &rt->dst; 1585 } 1586 1587 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu) 1588 { 1589 struct dst_entry *dst = __sk_dst_check(sk, 0); 1590 struct inet_sock *inet = inet_sk(sk); 1591 1592 if (!dst) { 1593 dst = inet_csk_rebuild_route(sk, &inet->cork.fl); 1594 if (!dst) 1595 goto out; 1596 } 1597 dst->ops->update_pmtu(dst, sk, NULL, mtu, true); 1598 1599 dst = __sk_dst_check(sk, 0); 1600 if (!dst) 1601 dst = inet_csk_rebuild_route(sk, &inet->cork.fl); 1602 out: 1603 return dst; 1604 } 1605 EXPORT_SYMBOL_GPL(inet_csk_update_pmtu); 1606