1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 #include "opt_mac.h" 38 #include "opt_tcpdebug.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/callout.h> 43 #include <sys/kernel.h> 44 #include <sys/sysctl.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/priv.h> 48 #include <sys/proc.h> 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/protosw.h> 52 #include <sys/random.h> 53 54 #include <vm/uma.h> 55 56 #include <net/route.h> 57 #include <net/if.h> 58 59 #include <netinet/in.h> 60 #include <netinet/in_systm.h> 61 #include <netinet/ip.h> 62 #ifdef INET6 63 #include <netinet/ip6.h> 64 #endif 65 #include <netinet/in_pcb.h> 66 #ifdef INET6 67 #include <netinet6/in6_pcb.h> 68 #endif 69 #include <netinet/in_var.h> 70 #include <netinet/ip_var.h> 71 #ifdef INET6 72 #include <netinet6/ip6_var.h> 73 #include <netinet6/scope6_var.h> 74 #include <netinet6/nd6.h> 75 #endif 76 #include <netinet/ip_icmp.h> 77 #include <netinet/tcp.h> 78 #include <netinet/tcp_fsm.h> 79 #include <netinet/tcp_seq.h> 80 #include <netinet/tcp_timer.h> 81 #include <netinet/tcp_var.h> 82 #ifdef INET6 83 #include <netinet6/tcp6_var.h> 84 #endif 85 #include <netinet/tcpip.h> 86 #ifdef TCPDEBUG 87 #include <netinet/tcp_debug.h> 88 #endif 89 #include <netinet6/ip6protosw.h> 90 91 #include <machine/in_cksum.h> 92 93 #include <security/mac/mac_framework.h> 94 95 static uma_zone_t tcptw_zone; 96 static int maxtcptw; 97 98 /* 99 * The timed wait queue contains references to each of the TCP sessions 100 * currently in the TIME_WAIT state. The queue pointers, including the 101 * queue pointers in each tcptw structure, are protected using the global 102 * tcbinfo lock, which must be held over queue iteration and modification. 103 */ 104 static TAILQ_HEAD(, tcptw) twq_2msl; 105 106 static void tcp_tw_2msl_reset(struct tcptw *, int); 107 static void tcp_tw_2msl_stop(struct tcptw *); 108 109 static int 110 tcptw_auto_size(void) 111 { 112 int halfrange; 113 114 /* 115 * Max out at half the ephemeral port range so that TIME_WAIT 116 * sockets don't tie up too many ephemeral ports. 117 */ 118 if (ipport_lastauto > ipport_firstauto) 119 halfrange = (ipport_lastauto - ipport_firstauto) / 2; 120 else 121 halfrange = (ipport_firstauto - ipport_lastauto) / 2; 122 /* Protect against goofy port ranges smaller than 32. */ 123 return (imin(imax(halfrange, 32), maxsockets / 5)); 124 } 125 126 static int 127 sysctl_maxtcptw(SYSCTL_HANDLER_ARGS) 128 { 129 int error, new; 130 131 if (maxtcptw == 0) 132 new = tcptw_auto_size(); 133 else 134 new = maxtcptw; 135 error = sysctl_handle_int(oidp, &new, 0, req); 136 if (error == 0 && req->newptr) 137 if (new >= 32) { 138 maxtcptw = new; 139 uma_zone_set_max(tcptw_zone, maxtcptw); 140 } 141 return (error); 142 } 143 144 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw, CTLTYPE_INT|CTLFLAG_RW, 145 &maxtcptw, 0, sysctl_maxtcptw, "IU", 146 "Maximum number of compressed TCP TIME_WAIT entries"); 147 148 static int nolocaltimewait = 0; 149 SYSCTL_INT(_net_inet_tcp, OID_AUTO, nolocaltimewait, CTLFLAG_RW, 150 &nolocaltimewait, 0, 151 "Do not create compressed TCP TIME_WAIT entries for local connections"); 152 153 void 154 tcp_tw_zone_change(void) 155 { 156 157 if (maxtcptw == 0) 158 uma_zone_set_max(tcptw_zone, tcptw_auto_size()); 159 } 160 161 void 162 tcp_tw_init(void) 163 { 164 165 tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw), 166 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 167 TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw); 168 if (maxtcptw == 0) 169 uma_zone_set_max(tcptw_zone, tcptw_auto_size()); 170 else 171 uma_zone_set_max(tcptw_zone, maxtcptw); 172 TAILQ_INIT(&twq_2msl); 173 } 174 175 /* 176 * Move a TCP connection into TIME_WAIT state. 177 * tcbinfo is locked. 178 * inp is locked, and is unlocked before returning. 179 */ 180 void 181 tcp_twstart(struct tcpcb *tp) 182 { 183 struct tcptw *tw; 184 struct inpcb *inp = tp->t_inpcb; 185 int acknow; 186 struct socket *so; 187 188 INP_INFO_WLOCK_ASSERT(&tcbinfo); /* tcp_tw_2msl_reset(). */ 189 INP_LOCK_ASSERT(inp); 190 191 if (nolocaltimewait && in_localip(inp->inp_faddr)) { 192 tp = tcp_close(tp); 193 if (tp != NULL) 194 INP_UNLOCK(inp); 195 return; 196 } 197 198 tw = uma_zalloc(tcptw_zone, M_NOWAIT); 199 if (tw == NULL) { 200 tw = tcp_tw_2msl_scan(1); 201 if (tw == NULL) { 202 tp = tcp_close(tp); 203 if (tp != NULL) 204 INP_UNLOCK(inp); 205 return; 206 } 207 } 208 tw->tw_inpcb = inp; 209 210 /* 211 * Recover last window size sent. 212 */ 213 tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale; 214 215 /* 216 * Set t_recent if timestamps are used on the connection. 217 */ 218 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) == 219 (TF_REQ_TSTMP|TF_RCVD_TSTMP)) { 220 tw->t_recent = tp->ts_recent; 221 tw->ts_offset = tp->ts_offset; 222 } else { 223 tw->t_recent = 0; 224 tw->ts_offset = 0; 225 } 226 227 tw->snd_nxt = tp->snd_nxt; 228 tw->rcv_nxt = tp->rcv_nxt; 229 tw->iss = tp->iss; 230 tw->irs = tp->irs; 231 tw->t_starttime = tp->t_starttime; 232 tw->tw_time = 0; 233 234 /* XXX 235 * If this code will 236 * be used for fin-wait-2 state also, then we may need 237 * a ts_recent from the last segment. 238 */ 239 acknow = tp->t_flags & TF_ACKNOW; 240 241 /* 242 * First, discard tcpcb state, which includes stopping its timers and 243 * freeing it. tcp_discardcb() used to also release the inpcb, but 244 * that work is now done in the caller. 245 * 246 * Note: soisdisconnected() call used to be made in tcp_discardcb(), 247 * and might not be needed here any longer. 248 */ 249 tcp_discardcb(tp); 250 so = inp->inp_socket; 251 soisdisconnected(so); 252 tw->tw_cred = crhold(so->so_cred); 253 SOCK_LOCK(so); 254 tw->tw_so_options = so->so_options; 255 SOCK_UNLOCK(so); 256 if (acknow) 257 tcp_twrespond(tw, TH_ACK); 258 inp->inp_ppcb = tw; 259 inp->inp_vflag |= INP_TIMEWAIT; 260 tcp_tw_2msl_reset(tw, 0); 261 262 /* 263 * If the inpcb owns the sole reference to the socket, then we can 264 * detach and free the socket as it is not needed in time wait. 265 */ 266 if (inp->inp_vflag & INP_SOCKREF) { 267 KASSERT(so->so_state & SS_PROTOREF, 268 ("tcp_twstart: !SS_PROTOREF")); 269 inp->inp_vflag &= ~INP_SOCKREF; 270 INP_UNLOCK(inp); 271 ACCEPT_LOCK(); 272 SOCK_LOCK(so); 273 so->so_state &= ~SS_PROTOREF; 274 sofree(so); 275 } else 276 INP_UNLOCK(inp); 277 } 278 279 #if 0 280 /* 281 * The appromixate rate of ISN increase of Microsoft TCP stacks; 282 * the actual rate is slightly higher due to the addition of 283 * random positive increments. 284 * 285 * Most other new OSes use semi-randomized ISN values, so we 286 * do not need to worry about them. 287 */ 288 #define MS_ISN_BYTES_PER_SECOND 250000 289 290 /* 291 * Determine if the ISN we will generate has advanced beyond the last 292 * sequence number used by the previous connection. If so, indicate 293 * that it is safe to recycle this tw socket by returning 1. 294 */ 295 int 296 tcp_twrecycleable(struct tcptw *tw) 297 { 298 tcp_seq new_iss = tw->iss; 299 tcp_seq new_irs = tw->irs; 300 301 INP_INFO_WLOCK_ASSERT(&tcbinfo); 302 new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz); 303 new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz); 304 305 if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt)) 306 return (1); 307 else 308 return (0); 309 } 310 #endif 311 312 /* 313 * Returns 1 if the TIME_WAIT state was killed and we should start over, 314 * looking for a pcb in the listen state. Returns 0 otherwise. 315 */ 316 int 317 tcp_twcheck(struct inpcb *inp, struct tcpopt *to, struct tcphdr *th, 318 struct mbuf *m, int tlen) 319 { 320 struct tcptw *tw; 321 int thflags; 322 tcp_seq seq; 323 #ifdef INET6 324 int isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 325 #else 326 const int isipv6 = 0; 327 #endif 328 329 /* tcbinfo lock required for tcp_twclose(), tcp_tw_2msl_reset(). */ 330 INP_INFO_WLOCK_ASSERT(&tcbinfo); 331 INP_LOCK_ASSERT(inp); 332 333 /* 334 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is 335 * still present. This is undesirable, but temporarily necessary 336 * until we work out how to handle inpcb's who's timewait state has 337 * been removed. 338 */ 339 tw = intotw(inp); 340 if (tw == NULL) 341 goto drop; 342 343 thflags = th->th_flags; 344 345 /* 346 * NOTE: for FIN_WAIT_2 (to be added later), 347 * must validate sequence number before accepting RST 348 */ 349 350 /* 351 * If the segment contains RST: 352 * Drop the segment - see Stevens, vol. 2, p. 964 and 353 * RFC 1337. 354 */ 355 if (thflags & TH_RST) 356 goto drop; 357 358 #if 0 359 /* PAWS not needed at the moment */ 360 /* 361 * RFC 1323 PAWS: If we have a timestamp reply on this segment 362 * and it's less than ts_recent, drop it. 363 */ 364 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 365 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 366 if ((thflags & TH_ACK) == 0) 367 goto drop; 368 goto ack; 369 } 370 /* 371 * ts_recent is never updated because we never accept new segments. 372 */ 373 #endif 374 375 /* 376 * If a new connection request is received 377 * while in TIME_WAIT, drop the old connection 378 * and start over if the sequence numbers 379 * are above the previous ones. 380 */ 381 if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) { 382 tcp_twclose(tw, 0); 383 return (1); 384 } 385 386 /* 387 * Drop the the segment if it does not contain an ACK. 388 */ 389 if ((thflags & TH_ACK) == 0) 390 goto drop; 391 392 /* 393 * Reset the 2MSL timer if this is a duplicate FIN. 394 */ 395 if (thflags & TH_FIN) { 396 seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0); 397 if (seq + 1 == tw->rcv_nxt) 398 tcp_tw_2msl_reset(tw, 1); 399 } 400 401 /* 402 * Acknowledge the segment if it has data or is not a duplicate ACK. 403 */ 404 if (thflags != TH_ACK || tlen != 0 || 405 th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt) 406 tcp_twrespond(tw, TH_ACK); 407 goto drop; 408 409 /* 410 * Generate a RST, dropping incoming segment. 411 * Make ACK acceptable to originator of segment. 412 * Don't bother to respond if destination was broadcast/multicast. 413 */ 414 if (m->m_flags & (M_BCAST|M_MCAST)) 415 goto drop; 416 if (isipv6) { 417 #ifdef INET6 418 struct ip6_hdr *ip6; 419 420 /* IPv6 anycast check is done at tcp6_input() */ 421 ip6 = mtod(m, struct ip6_hdr *); 422 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 423 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 424 goto drop; 425 #endif 426 } else { 427 struct ip *ip; 428 429 ip = mtod(m, struct ip *); 430 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 431 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 432 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 433 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 434 goto drop; 435 } 436 if (thflags & TH_ACK) { 437 tcp_respond(NULL, 438 mtod(m, void *), th, m, 0, th->th_ack, TH_RST); 439 } else { 440 seq = th->th_seq + (thflags & TH_SYN ? 1 : 0); 441 tcp_respond(NULL, 442 mtod(m, void *), th, m, seq, 0, TH_RST|TH_ACK); 443 } 444 INP_UNLOCK(inp); 445 return (0); 446 447 drop: 448 INP_UNLOCK(inp); 449 m_freem(m); 450 return (0); 451 } 452 453 void 454 tcp_twclose(struct tcptw *tw, int reuse) 455 { 456 struct socket *so; 457 struct inpcb *inp; 458 459 /* 460 * At this point, we are in one of two situations: 461 * 462 * (1) We have no socket, just an inpcb<->twtcp pair. We can free 463 * all state. 464 * 465 * (2) We have a socket -- if we own a reference, release it and 466 * notify the socket layer. 467 */ 468 inp = tw->tw_inpcb; 469 KASSERT((inp->inp_vflag & INP_TIMEWAIT), ("tcp_twclose: !timewait")); 470 KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw")); 471 INP_INFO_WLOCK_ASSERT(&tcbinfo); /* tcp_tw_2msl_stop(). */ 472 INP_LOCK_ASSERT(inp); 473 474 tw->tw_inpcb = NULL; 475 tcp_tw_2msl_stop(tw); 476 inp->inp_ppcb = NULL; 477 in_pcbdrop(inp); 478 479 so = inp->inp_socket; 480 if (so != NULL) { 481 /* 482 * If there's a socket, handle two cases: first, we own a 483 * strong reference, which we will now release, or we don't 484 * in which case another reference exists (XXXRW: think 485 * about this more), and we don't need to take action. 486 */ 487 if (inp->inp_vflag & INP_SOCKREF) { 488 inp->inp_vflag &= ~INP_SOCKREF; 489 INP_UNLOCK(inp); 490 ACCEPT_LOCK(); 491 SOCK_LOCK(so); 492 KASSERT(so->so_state & SS_PROTOREF, 493 ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF")); 494 so->so_state &= ~SS_PROTOREF; 495 sofree(so); 496 } else { 497 /* 498 * If we don't own the only reference, the socket and 499 * inpcb need to be left around to be handled by 500 * tcp_usr_detach() later. 501 */ 502 INP_UNLOCK(inp); 503 } 504 } else { 505 #ifdef INET6 506 if (inp->inp_vflag & INP_IPV6PROTO) 507 in6_pcbfree(inp); 508 else 509 #endif 510 in_pcbfree(inp); 511 } 512 tcpstat.tcps_closed++; 513 crfree(tw->tw_cred); 514 tw->tw_cred = NULL; 515 if (reuse) 516 return; 517 uma_zfree(tcptw_zone, tw); 518 } 519 520 int 521 tcp_twrespond(struct tcptw *tw, int flags) 522 { 523 struct inpcb *inp = tw->tw_inpcb; 524 struct tcphdr *th; 525 struct mbuf *m; 526 struct ip *ip = NULL; 527 u_int hdrlen, optlen; 528 int error; 529 struct tcpopt to; 530 #ifdef INET6 531 struct ip6_hdr *ip6 = NULL; 532 int isipv6 = inp->inp_inc.inc_isipv6; 533 #endif 534 535 INP_LOCK_ASSERT(inp); 536 537 m = m_gethdr(M_DONTWAIT, MT_DATA); 538 if (m == NULL) 539 return (ENOBUFS); 540 m->m_data += max_linkhdr; 541 542 #ifdef MAC 543 mac_inpcb_create_mbuf(inp, m); 544 #endif 545 546 #ifdef INET6 547 if (isipv6) { 548 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 549 ip6 = mtod(m, struct ip6_hdr *); 550 th = (struct tcphdr *)(ip6 + 1); 551 tcpip_fillheaders(inp, ip6, th); 552 } else 553 #endif 554 { 555 hdrlen = sizeof(struct tcpiphdr); 556 ip = mtod(m, struct ip *); 557 th = (struct tcphdr *)(ip + 1); 558 tcpip_fillheaders(inp, ip, th); 559 } 560 to.to_flags = 0; 561 562 /* 563 * Send a timestamp and echo-reply if both our side and our peer 564 * have sent timestamps in our SYN's and this is not a RST. 565 */ 566 if (tw->t_recent && flags == TH_ACK) { 567 to.to_flags |= TOF_TS; 568 to.to_tsval = ticks + tw->ts_offset; 569 to.to_tsecr = tw->t_recent; 570 } 571 optlen = tcp_addoptions(&to, (u_char *)(th + 1)); 572 573 m->m_len = hdrlen + optlen; 574 m->m_pkthdr.len = m->m_len; 575 576 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small")); 577 578 th->th_seq = htonl(tw->snd_nxt); 579 th->th_ack = htonl(tw->rcv_nxt); 580 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 581 th->th_flags = flags; 582 th->th_win = htons(tw->last_win); 583 584 #ifdef INET6 585 if (isipv6) { 586 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr), 587 sizeof(struct tcphdr) + optlen); 588 ip6->ip6_hlim = in6_selecthlim(inp, NULL); 589 error = ip6_output(m, inp->in6p_outputopts, NULL, 590 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp); 591 } else 592 #endif 593 { 594 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 595 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP)); 596 m->m_pkthdr.csum_flags = CSUM_TCP; 597 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 598 ip->ip_len = m->m_pkthdr.len; 599 if (path_mtu_discovery) 600 ip->ip_off |= IP_DF; 601 error = ip_output(m, inp->inp_options, NULL, 602 ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 603 NULL, inp); 604 } 605 if (flags & TH_ACK) 606 tcpstat.tcps_sndacks++; 607 else 608 tcpstat.tcps_sndctrl++; 609 tcpstat.tcps_sndtotal++; 610 return (error); 611 } 612 613 static void 614 tcp_tw_2msl_reset(struct tcptw *tw, int rearm) 615 { 616 617 INP_INFO_WLOCK_ASSERT(&tcbinfo); 618 INP_LOCK_ASSERT(tw->tw_inpcb); 619 if (rearm) 620 TAILQ_REMOVE(&twq_2msl, tw, tw_2msl); 621 tw->tw_time = ticks + 2 * tcp_msl; 622 TAILQ_INSERT_TAIL(&twq_2msl, tw, tw_2msl); 623 } 624 625 static void 626 tcp_tw_2msl_stop(struct tcptw *tw) 627 { 628 629 INP_INFO_WLOCK_ASSERT(&tcbinfo); 630 TAILQ_REMOVE(&twq_2msl, tw, tw_2msl); 631 } 632 633 struct tcptw * 634 tcp_tw_2msl_scan(int reuse) 635 { 636 struct tcptw *tw; 637 638 INP_INFO_WLOCK_ASSERT(&tcbinfo); 639 for (;;) { 640 tw = TAILQ_FIRST(&twq_2msl); 641 if (tw == NULL || (!reuse && tw->tw_time > ticks)) 642 break; 643 INP_LOCK(tw->tw_inpcb); 644 tcp_twclose(tw, reuse); 645 if (reuse) 646 return (tw); 647 } 648 return (NULL); 649 } 650