1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 30 * $FreeBSD$ 31 */ 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_mac.h" 36 #include "opt_tcpdebug.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/callout.h> 41 #include <sys/kernel.h> 42 #include <sys/sysctl.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/priv.h> 46 #include <sys/proc.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/protosw.h> 50 #include <sys/random.h> 51 52 #include <vm/uma.h> 53 54 #include <net/route.h> 55 #include <net/if.h> 56 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/ip.h> 60 #ifdef INET6 61 #include <netinet/ip6.h> 62 #endif 63 #include <netinet/in_pcb.h> 64 #ifdef INET6 65 #include <netinet6/in6_pcb.h> 66 #endif 67 #include <netinet/in_var.h> 68 #include <netinet/ip_var.h> 69 #ifdef INET6 70 #include <netinet6/ip6_var.h> 71 #include <netinet6/scope6_var.h> 72 #include <netinet6/nd6.h> 73 #endif 74 #include <netinet/ip_icmp.h> 75 #include <netinet/tcp.h> 76 #include <netinet/tcp_fsm.h> 77 #include <netinet/tcp_seq.h> 78 #include <netinet/tcp_timer.h> 79 #include <netinet/tcp_var.h> 80 #ifdef INET6 81 #include <netinet6/tcp6_var.h> 82 #endif 83 #include <netinet/tcpip.h> 84 #ifdef TCPDEBUG 85 #include <netinet/tcp_debug.h> 86 #endif 87 #include <netinet6/ip6protosw.h> 88 89 #include <machine/in_cksum.h> 90 91 #include <security/mac/mac_framework.h> 92 93 static uma_zone_t tcptw_zone; 94 static int maxtcptw; 95 96 /* 97 * The timed wait queue contains references to each of the TCP sessions 98 * currently in the TIME_WAIT state. The queue pointers, including the 99 * queue pointers in each tcptw structure, are protected using the global 100 * tcbinfo lock, which must be held over queue iteration and modification. 101 */ 102 static TAILQ_HEAD(, tcptw) twq_2msl; 103 104 static void tcp_tw_2msl_reset(struct tcptw *, int); 105 static void tcp_tw_2msl_stop(struct tcptw *); 106 107 static int 108 tcptw_auto_size(void) 109 { 110 int halfrange; 111 112 /* 113 * Max out at half the ephemeral port range so that TIME_WAIT 114 * sockets don't tie up too many ephemeral ports. 115 */ 116 if (ipport_lastauto > ipport_firstauto) 117 halfrange = (ipport_lastauto - ipport_firstauto) / 2; 118 else 119 halfrange = (ipport_firstauto - ipport_lastauto) / 2; 120 /* Protect against goofy port ranges smaller than 32. */ 121 return (imin(imax(halfrange, 32), maxsockets / 5)); 122 } 123 124 static int 125 sysctl_maxtcptw(SYSCTL_HANDLER_ARGS) 126 { 127 int error, new; 128 129 if (maxtcptw == 0) 130 new = tcptw_auto_size(); 131 else 132 new = maxtcptw; 133 error = sysctl_handle_int(oidp, &new, sizeof(int), req); 134 if (error == 0 && req->newptr) 135 if (new >= 32) { 136 maxtcptw = new; 137 uma_zone_set_max(tcptw_zone, maxtcptw); 138 } 139 return (error); 140 } 141 142 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw, CTLTYPE_INT|CTLFLAG_RW, 143 &maxtcptw, 0, sysctl_maxtcptw, "IU", 144 "Maximum number of compressed TCP TIME_WAIT entries"); 145 146 static int nolocaltimewait = 0; 147 SYSCTL_INT(_net_inet_tcp, OID_AUTO, nolocaltimewait, CTLFLAG_RW, 148 &nolocaltimewait, 0, 149 "Do not create compressed TCP TIME_WAIT entries for local connections"); 150 151 void 152 tcp_tw_zone_change(void) 153 { 154 155 if (maxtcptw == 0) 156 uma_zone_set_max(tcptw_zone, tcptw_auto_size()); 157 } 158 159 void 160 tcp_tw_init(void) 161 { 162 163 tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw), 164 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 165 TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw); 166 if (maxtcptw == 0) 167 uma_zone_set_max(tcptw_zone, tcptw_auto_size()); 168 else 169 uma_zone_set_max(tcptw_zone, maxtcptw); 170 TAILQ_INIT(&twq_2msl); 171 } 172 173 /* 174 * Move a TCP connection into TIME_WAIT state. 175 * tcbinfo is locked. 176 * inp is locked, and is unlocked before returning. 177 */ 178 void 179 tcp_twstart(struct tcpcb *tp) 180 { 181 struct tcptw *tw; 182 struct inpcb *inp = tp->t_inpcb; 183 int acknow; 184 struct socket *so; 185 186 INP_INFO_WLOCK_ASSERT(&tcbinfo); /* tcp_tw_2msl_reset(). */ 187 INP_LOCK_ASSERT(inp); 188 189 if (nolocaltimewait && in_localip(inp->inp_faddr)) { 190 tp = tcp_close(tp); 191 if (tp != NULL) 192 INP_UNLOCK(inp); 193 return; 194 } 195 196 tw = uma_zalloc(tcptw_zone, M_NOWAIT); 197 if (tw == NULL) { 198 tw = tcp_tw_2msl_scan(1); 199 if (tw == NULL) { 200 tp = tcp_close(tp); 201 if (tp != NULL) 202 INP_UNLOCK(inp); 203 return; 204 } 205 } 206 tw->tw_inpcb = inp; 207 208 /* 209 * Recover last window size sent. 210 */ 211 tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale; 212 213 /* 214 * Set t_recent if timestamps are used on the connection. 215 */ 216 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) == 217 (TF_REQ_TSTMP|TF_RCVD_TSTMP)) { 218 tw->t_recent = tp->ts_recent; 219 tw->ts_offset = tp->ts_offset; 220 } else { 221 tw->t_recent = 0; 222 tw->ts_offset = 0; 223 } 224 225 tw->snd_nxt = tp->snd_nxt; 226 tw->rcv_nxt = tp->rcv_nxt; 227 tw->iss = tp->iss; 228 tw->irs = tp->irs; 229 tw->t_starttime = tp->t_starttime; 230 tw->tw_time = 0; 231 232 /* XXX 233 * If this code will 234 * be used for fin-wait-2 state also, then we may need 235 * a ts_recent from the last segment. 236 */ 237 acknow = tp->t_flags & TF_ACKNOW; 238 239 /* 240 * First, discard tcpcb state, which includes stopping its timers and 241 * freeing it. tcp_discardcb() used to also release the inpcb, but 242 * that work is now done in the caller. 243 * 244 * Note: soisdisconnected() call used to be made in tcp_discardcb(), 245 * and might not be needed here any longer. 246 */ 247 tcp_discardcb(tp); 248 so = inp->inp_socket; 249 soisdisconnected(so); 250 tw->tw_cred = crhold(so->so_cred); 251 SOCK_LOCK(so); 252 tw->tw_so_options = so->so_options; 253 SOCK_UNLOCK(so); 254 if (acknow) 255 tcp_twrespond(tw, TH_ACK); 256 inp->inp_ppcb = tw; 257 inp->inp_vflag |= INP_TIMEWAIT; 258 tcp_tw_2msl_reset(tw, 0); 259 260 /* 261 * If the inpcb owns the sole reference to the socket, then we can 262 * detach and free the socket as it is not needed in time wait. 263 */ 264 if (inp->inp_vflag & INP_SOCKREF) { 265 KASSERT(so->so_state & SS_PROTOREF, 266 ("tcp_twstart: !SS_PROTOREF")); 267 inp->inp_vflag &= ~INP_SOCKREF; 268 INP_UNLOCK(inp); 269 ACCEPT_LOCK(); 270 SOCK_LOCK(so); 271 so->so_state &= ~SS_PROTOREF; 272 sofree(so); 273 } else 274 INP_UNLOCK(inp); 275 } 276 277 #if 0 278 /* 279 * The appromixate rate of ISN increase of Microsoft TCP stacks; 280 * the actual rate is slightly higher due to the addition of 281 * random positive increments. 282 * 283 * Most other new OSes use semi-randomized ISN values, so we 284 * do not need to worry about them. 285 */ 286 #define MS_ISN_BYTES_PER_SECOND 250000 287 288 /* 289 * Determine if the ISN we will generate has advanced beyond the last 290 * sequence number used by the previous connection. If so, indicate 291 * that it is safe to recycle this tw socket by returning 1. 292 */ 293 int 294 tcp_twrecycleable(struct tcptw *tw) 295 { 296 tcp_seq new_iss = tw->iss; 297 tcp_seq new_irs = tw->irs; 298 299 INP_INFO_WLOCK_ASSERT(&tcbinfo); 300 new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz); 301 new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz); 302 303 if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt)) 304 return (1); 305 else 306 return (0); 307 } 308 #endif 309 310 /* 311 * Returns 1 if the TIME_WAIT state was killed and we should start over, 312 * looking for a pcb in the listen state. Returns 0 otherwise. 313 */ 314 int 315 tcp_twcheck(struct inpcb *inp, struct tcpopt *to, struct tcphdr *th, 316 struct mbuf *m, int tlen) 317 { 318 struct tcptw *tw; 319 int thflags; 320 tcp_seq seq; 321 #ifdef INET6 322 int isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 323 #else 324 const int isipv6 = 0; 325 #endif 326 327 /* tcbinfo lock required for tcp_twclose(), tcp_tw_2msl_reset(). */ 328 INP_INFO_WLOCK_ASSERT(&tcbinfo); 329 INP_LOCK_ASSERT(inp); 330 331 /* 332 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is 333 * still present. This is undesirable, but temporarily necessary 334 * until we work out how to handle inpcb's who's timewait state has 335 * been removed. 336 */ 337 tw = intotw(inp); 338 if (tw == NULL) 339 goto drop; 340 341 thflags = th->th_flags; 342 343 /* 344 * NOTE: for FIN_WAIT_2 (to be added later), 345 * must validate sequence number before accepting RST 346 */ 347 348 /* 349 * If the segment contains RST: 350 * Drop the segment - see Stevens, vol. 2, p. 964 and 351 * RFC 1337. 352 */ 353 if (thflags & TH_RST) 354 goto drop; 355 356 #if 0 357 /* PAWS not needed at the moment */ 358 /* 359 * RFC 1323 PAWS: If we have a timestamp reply on this segment 360 * and it's less than ts_recent, drop it. 361 */ 362 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 363 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 364 if ((thflags & TH_ACK) == 0) 365 goto drop; 366 goto ack; 367 } 368 /* 369 * ts_recent is never updated because we never accept new segments. 370 */ 371 #endif 372 373 /* 374 * If a new connection request is received 375 * while in TIME_WAIT, drop the old connection 376 * and start over if the sequence numbers 377 * are above the previous ones. 378 */ 379 if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) { 380 tcp_twclose(tw, 0); 381 return (1); 382 } 383 384 /* 385 * Drop the the segment if it does not contain an ACK. 386 */ 387 if ((thflags & TH_ACK) == 0) 388 goto drop; 389 390 /* 391 * Reset the 2MSL timer if this is a duplicate FIN. 392 */ 393 if (thflags & TH_FIN) { 394 seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0); 395 if (seq + 1 == tw->rcv_nxt) 396 tcp_tw_2msl_reset(tw, 1); 397 } 398 399 /* 400 * Acknowledge the segment if it has data or is not a duplicate ACK. 401 */ 402 if (thflags != TH_ACK || tlen != 0 || 403 th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt) 404 tcp_twrespond(tw, TH_ACK); 405 goto drop; 406 407 /* 408 * Generate a RST, dropping incoming segment. 409 * Make ACK acceptable to originator of segment. 410 * Don't bother to respond if destination was broadcast/multicast. 411 */ 412 if (m->m_flags & (M_BCAST|M_MCAST)) 413 goto drop; 414 if (isipv6) { 415 #ifdef INET6 416 struct ip6_hdr *ip6; 417 418 /* IPv6 anycast check is done at tcp6_input() */ 419 ip6 = mtod(m, struct ip6_hdr *); 420 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 421 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 422 goto drop; 423 #endif 424 } else { 425 struct ip *ip; 426 427 ip = mtod(m, struct ip *); 428 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 429 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 430 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 431 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 432 goto drop; 433 } 434 if (thflags & TH_ACK) { 435 tcp_respond(NULL, 436 mtod(m, void *), th, m, 0, th->th_ack, TH_RST); 437 } else { 438 seq = th->th_seq + (thflags & TH_SYN ? 1 : 0); 439 tcp_respond(NULL, 440 mtod(m, void *), th, m, seq, 0, TH_RST|TH_ACK); 441 } 442 INP_UNLOCK(inp); 443 return (0); 444 445 drop: 446 INP_UNLOCK(inp); 447 m_freem(m); 448 return (0); 449 } 450 451 void 452 tcp_twclose(struct tcptw *tw, int reuse) 453 { 454 struct socket *so; 455 struct inpcb *inp; 456 457 /* 458 * At this point, we are in one of two situations: 459 * 460 * (1) We have no socket, just an inpcb<->twtcp pair. We can free 461 * all state. 462 * 463 * (2) We have a socket -- if we own a reference, release it and 464 * notify the socket layer. 465 */ 466 inp = tw->tw_inpcb; 467 KASSERT((inp->inp_vflag & INP_TIMEWAIT), ("tcp_twclose: !timewait")); 468 KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw")); 469 INP_INFO_WLOCK_ASSERT(&tcbinfo); /* tcp_tw_2msl_stop(). */ 470 INP_LOCK_ASSERT(inp); 471 472 tw->tw_inpcb = NULL; 473 tcp_tw_2msl_stop(tw); 474 inp->inp_ppcb = NULL; 475 in_pcbdrop(inp); 476 477 so = inp->inp_socket; 478 if (so != NULL) { 479 /* 480 * If there's a socket, handle two cases: first, we own a 481 * strong reference, which we will now release, or we don't 482 * in which case another reference exists (XXXRW: think 483 * about this more), and we don't need to take action. 484 */ 485 if (inp->inp_vflag & INP_SOCKREF) { 486 inp->inp_vflag &= ~INP_SOCKREF; 487 INP_UNLOCK(inp); 488 ACCEPT_LOCK(); 489 SOCK_LOCK(so); 490 KASSERT(so->so_state & SS_PROTOREF, 491 ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF")); 492 so->so_state &= ~SS_PROTOREF; 493 sofree(so); 494 } else { 495 /* 496 * If we don't own the only reference, the socket and 497 * inpcb need to be left around to be handled by 498 * tcp_usr_detach() later. 499 */ 500 INP_UNLOCK(inp); 501 } 502 } else { 503 #ifdef INET6 504 if (inp->inp_vflag & INP_IPV6PROTO) 505 in6_pcbfree(inp); 506 else 507 #endif 508 in_pcbfree(inp); 509 } 510 tcpstat.tcps_closed++; 511 crfree(tw->tw_cred); 512 tw->tw_cred = NULL; 513 if (reuse) 514 return; 515 uma_zfree(tcptw_zone, tw); 516 } 517 518 int 519 tcp_twrespond(struct tcptw *tw, int flags) 520 { 521 struct inpcb *inp = tw->tw_inpcb; 522 struct tcphdr *th; 523 struct mbuf *m; 524 struct ip *ip = NULL; 525 u_int hdrlen, optlen; 526 int error; 527 struct tcpopt to; 528 #ifdef INET6 529 struct ip6_hdr *ip6 = NULL; 530 int isipv6 = inp->inp_inc.inc_isipv6; 531 #endif 532 533 INP_LOCK_ASSERT(inp); 534 535 m = m_gethdr(M_DONTWAIT, MT_DATA); 536 if (m == NULL) 537 return (ENOBUFS); 538 m->m_data += max_linkhdr; 539 540 #ifdef MAC 541 mac_create_mbuf_from_inpcb(inp, m); 542 #endif 543 544 #ifdef INET6 545 if (isipv6) { 546 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 547 ip6 = mtod(m, struct ip6_hdr *); 548 th = (struct tcphdr *)(ip6 + 1); 549 tcpip_fillheaders(inp, ip6, th); 550 } else 551 #endif 552 { 553 hdrlen = sizeof(struct tcpiphdr); 554 ip = mtod(m, struct ip *); 555 th = (struct tcphdr *)(ip + 1); 556 tcpip_fillheaders(inp, ip, th); 557 } 558 to.to_flags = 0; 559 560 /* 561 * Send a timestamp and echo-reply if both our side and our peer 562 * have sent timestamps in our SYN's and this is not a RST. 563 */ 564 if (tw->t_recent && flags == TH_ACK) { 565 to.to_flags |= TOF_TS; 566 to.to_tsval = ticks + tw->ts_offset; 567 to.to_tsecr = tw->t_recent; 568 } 569 optlen = tcp_addoptions(&to, (u_char *)(th + 1)); 570 571 m->m_len = hdrlen + optlen; 572 m->m_pkthdr.len = m->m_len; 573 574 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small")); 575 576 th->th_seq = htonl(tw->snd_nxt); 577 th->th_ack = htonl(tw->rcv_nxt); 578 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 579 th->th_flags = flags; 580 th->th_win = htons(tw->last_win); 581 582 #ifdef INET6 583 if (isipv6) { 584 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr), 585 sizeof(struct tcphdr) + optlen); 586 ip6->ip6_hlim = in6_selecthlim(inp, NULL); 587 error = ip6_output(m, inp->in6p_outputopts, NULL, 588 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp); 589 } else 590 #endif 591 { 592 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 593 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP)); 594 m->m_pkthdr.csum_flags = CSUM_TCP; 595 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 596 ip->ip_len = m->m_pkthdr.len; 597 if (path_mtu_discovery) 598 ip->ip_off |= IP_DF; 599 error = ip_output(m, inp->inp_options, NULL, 600 ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 601 NULL, inp); 602 } 603 if (flags & TH_ACK) 604 tcpstat.tcps_sndacks++; 605 else 606 tcpstat.tcps_sndctrl++; 607 tcpstat.tcps_sndtotal++; 608 return (error); 609 } 610 611 static void 612 tcp_tw_2msl_reset(struct tcptw *tw, int rearm) 613 { 614 615 INP_INFO_WLOCK_ASSERT(&tcbinfo); 616 INP_LOCK_ASSERT(tw->tw_inpcb); 617 if (rearm) 618 TAILQ_REMOVE(&twq_2msl, tw, tw_2msl); 619 tw->tw_time = ticks + 2 * tcp_msl; 620 TAILQ_INSERT_TAIL(&twq_2msl, tw, tw_2msl); 621 } 622 623 static void 624 tcp_tw_2msl_stop(struct tcptw *tw) 625 { 626 627 INP_INFO_WLOCK_ASSERT(&tcbinfo); 628 TAILQ_REMOVE(&twq_2msl, tw, tw_2msl); 629 } 630 631 struct tcptw * 632 tcp_tw_2msl_scan(int reuse) 633 { 634 struct tcptw *tw; 635 636 INP_INFO_WLOCK_ASSERT(&tcbinfo); 637 for (;;) { 638 tw = TAILQ_FIRST(&twq_2msl); 639 if (tw == NULL || (!reuse && tw->tw_time > ticks)) 640 break; 641 INP_LOCK(tw->tw_inpcb); 642 tcp_twclose(tw, reuse); 643 if (reuse) 644 return (tw); 645 } 646 return (NULL); 647 } 648