1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 34 * $FreeBSD$ 35 */ 36 37 #include "opt_compat.h" 38 #include "opt_inet6.h" 39 #include "opt_ipsec.h" 40 #include "opt_mac.h" 41 #include "opt_tcpdebug.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/callout.h> 46 #include <sys/kernel.h> 47 #include <sys/sysctl.h> 48 #include <sys/mac.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #ifdef INET6 52 #include <sys/domain.h> 53 #endif 54 #include <sys/proc.h> 55 #include <sys/socket.h> 56 #include <sys/socketvar.h> 57 #include <sys/protosw.h> 58 #include <sys/random.h> 59 60 #include <vm/uma.h> 61 62 #include <net/route.h> 63 #include <net/if.h> 64 65 #include <netinet/in.h> 66 #include <netinet/in_systm.h> 67 #include <netinet/ip.h> 68 #ifdef INET6 69 #include <netinet/ip6.h> 70 #endif 71 #include <netinet/in_pcb.h> 72 #ifdef INET6 73 #include <netinet6/in6_pcb.h> 74 #endif 75 #include <netinet/in_var.h> 76 #include <netinet/ip_var.h> 77 #ifdef INET6 78 #include <netinet6/ip6_var.h> 79 #endif 80 #include <netinet/tcp.h> 81 #include <netinet/tcp_fsm.h> 82 #include <netinet/tcp_seq.h> 83 #include <netinet/tcp_timer.h> 84 #include <netinet/tcp_var.h> 85 #ifdef INET6 86 #include <netinet6/tcp6_var.h> 87 #endif 88 #include <netinet/tcpip.h> 89 #ifdef TCPDEBUG 90 #include <netinet/tcp_debug.h> 91 #endif 92 #include <netinet6/ip6protosw.h> 93 94 #ifdef IPSEC 95 #include <netinet6/ipsec.h> 96 #ifdef INET6 97 #include <netinet6/ipsec6.h> 98 #endif 99 #endif /*IPSEC*/ 100 101 #ifdef FAST_IPSEC 102 #include <netipsec/ipsec.h> 103 #ifdef INET6 104 #include <netipsec/ipsec6.h> 105 #endif 106 #define IPSEC 107 #endif /*FAST_IPSEC*/ 108 109 #include <machine/in_cksum.h> 110 #include <sys/md5.h> 111 112 int tcp_mssdflt = TCP_MSS; 113 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 114 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size"); 115 116 #ifdef INET6 117 int tcp_v6mssdflt = TCP6_MSS; 118 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, 119 CTLFLAG_RW, &tcp_v6mssdflt , 0, 120 "Default TCP Maximum Segment Size for IPv6"); 121 #endif 122 123 #if 0 124 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 125 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 126 &tcp_rttdflt , 0, "Default maximum TCP Round Trip Time"); 127 #endif 128 129 int tcp_do_rfc1323 = 1; 130 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 131 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions"); 132 133 int tcp_do_rfc1644 = 0; 134 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW, 135 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions"); 136 137 static int tcp_tcbhashsize = 0; 138 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD, 139 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); 140 141 static int do_tcpdrain = 1; 142 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 143 "Enable tcp_drain routine for extra help when low on mbufs"); 144 145 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD, 146 &tcbinfo.ipi_count, 0, "Number of active PCBs"); 147 148 static int icmp_may_rst = 1; 149 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, 150 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 151 152 static int tcp_isn_reseed_interval = 0; 153 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, 154 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); 155 156 /* 157 * TCP bandwidth limiting sysctls. Note that the default lower bound of 158 * 1024 exists only for debugging. A good production default would be 159 * something like 6100. 160 */ 161 static int tcp_inflight_enable = 0; 162 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, 163 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); 164 165 static int tcp_inflight_debug = 0; 166 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, 167 &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); 168 169 static int tcp_inflight_min = 6144; 170 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, 171 &tcp_inflight_min, 0, "Lower-bound for TCP inflight window"); 172 173 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; 174 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, 175 &tcp_inflight_max, 0, "Upper-bound for TCP inflight window"); 176 static int tcp_inflight_stab = 20; 177 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, 178 &tcp_inflight_stab, 0, "Inflight Algorithm Stabilization 20 = 2 packets"); 179 180 static void tcp_cleartaocache(void); 181 static struct inpcb *tcp_notify(struct inpcb *, int); 182 static void tcp_discardcb(struct tcpcb *); 183 184 /* 185 * Target size of TCP PCB hash tables. Must be a power of two. 186 * 187 * Note that this can be overridden by the kernel environment 188 * variable net.inet.tcp.tcbhashsize 189 */ 190 #ifndef TCBHASHSIZE 191 #define TCBHASHSIZE 512 192 #endif 193 194 /* 195 * XXX 196 * Callouts should be moved into struct tcp directly. They are currently 197 * separate becuase the tcpcb structure is exported to userland for sysctl 198 * parsing purposes, which do not know about callouts. 199 */ 200 struct tcpcb_mem { 201 struct tcpcb tcb; 202 struct callout tcpcb_mem_rexmt, tcpcb_mem_persist, tcpcb_mem_keep; 203 struct callout tcpcb_mem_2msl, tcpcb_mem_delack; 204 }; 205 206 static uma_zone_t tcpcb_zone; 207 static uma_zone_t tcptw_zone; 208 209 /* 210 * Tcp initialization 211 */ 212 void 213 tcp_init() 214 { 215 int hashsize = TCBHASHSIZE; 216 217 tcp_ccgen = 1; 218 tcp_cleartaocache(); 219 220 tcp_delacktime = TCPTV_DELACK; 221 tcp_keepinit = TCPTV_KEEP_INIT; 222 tcp_keepidle = TCPTV_KEEP_IDLE; 223 tcp_keepintvl = TCPTV_KEEPINTVL; 224 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 225 tcp_msl = TCPTV_MSL; 226 tcp_rexmit_min = TCPTV_MIN; 227 tcp_rexmit_slop = TCPTV_CPU_VAR; 228 229 INP_INFO_LOCK_INIT(&tcbinfo, "tcp"); 230 LIST_INIT(&tcb); 231 tcbinfo.listhead = &tcb; 232 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); 233 if (!powerof2(hashsize)) { 234 printf("WARNING: TCB hash size not a power of 2\n"); 235 hashsize = 512; /* safe default */ 236 } 237 tcp_tcbhashsize = hashsize; 238 tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask); 239 tcbinfo.porthashbase = hashinit(hashsize, M_PCB, 240 &tcbinfo.porthashmask); 241 tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb), 242 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 243 uma_zone_set_max(tcbinfo.ipi_zone, maxsockets); 244 #ifdef INET6 245 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 246 #else /* INET6 */ 247 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 248 #endif /* INET6 */ 249 if (max_protohdr < TCP_MINPROTOHDR) 250 max_protohdr = TCP_MINPROTOHDR; 251 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 252 panic("tcp_init"); 253 #undef TCP_MINPROTOHDR 254 /* 255 * These have to be type stable for the benefit of the timers. 256 */ 257 tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem), 258 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 259 uma_zone_set_max(tcpcb_zone, maxsockets); 260 tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw), 261 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 262 uma_zone_set_max(tcptw_zone, maxsockets); 263 tcp_timer_init(); 264 syncache_init(); 265 } 266 267 /* 268 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 269 * tcp_template used to store this data in mbufs, but we now recopy it out 270 * of the tcpcb each time to conserve mbufs. 271 */ 272 void 273 tcpip_fillheaders(inp, ip_ptr, tcp_ptr) 274 struct inpcb *inp; 275 void *ip_ptr; 276 void *tcp_ptr; 277 { 278 struct tcphdr *th = (struct tcphdr *)tcp_ptr; 279 280 #ifdef INET6 281 if ((inp->inp_vflag & INP_IPV6) != 0) { 282 struct ip6_hdr *ip6; 283 284 ip6 = (struct ip6_hdr *)ip_ptr; 285 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 286 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); 287 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 288 (IPV6_VERSION & IPV6_VERSION_MASK); 289 ip6->ip6_nxt = IPPROTO_TCP; 290 ip6->ip6_plen = sizeof(struct tcphdr); 291 ip6->ip6_src = inp->in6p_laddr; 292 ip6->ip6_dst = inp->in6p_faddr; 293 } else 294 #endif 295 { 296 struct ip *ip; 297 298 ip = (struct ip *)ip_ptr; 299 ip->ip_v = IPVERSION; 300 ip->ip_hl = 5; 301 ip->ip_tos = inp->inp_ip_tos; 302 ip->ip_len = 0; 303 ip->ip_id = 0; 304 ip->ip_off = 0; 305 ip->ip_ttl = inp->inp_ip_ttl; 306 ip->ip_sum = 0; 307 ip->ip_p = IPPROTO_TCP; 308 ip->ip_src = inp->inp_laddr; 309 ip->ip_dst = inp->inp_faddr; 310 } 311 th->th_sport = inp->inp_lport; 312 th->th_dport = inp->inp_fport; 313 th->th_seq = 0; 314 th->th_ack = 0; 315 th->th_x2 = 0; 316 th->th_off = 5; 317 th->th_flags = 0; 318 th->th_win = 0; 319 th->th_urp = 0; 320 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */ 321 } 322 323 /* 324 * Create template to be used to send tcp packets on a connection. 325 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 326 * use for this function is in keepalives, which use tcp_respond. 327 */ 328 struct tcptemp * 329 tcpip_maketemplate(inp) 330 struct inpcb *inp; 331 { 332 struct mbuf *m; 333 struct tcptemp *n; 334 335 m = m_get(M_DONTWAIT, MT_HEADER); 336 if (m == NULL) 337 return (0); 338 m->m_len = sizeof(struct tcptemp); 339 n = mtod(m, struct tcptemp *); 340 341 tcpip_fillheaders(inp, (void *)&n->tt_ipgen, (void *)&n->tt_t); 342 return (n); 343 } 344 345 /* 346 * Send a single message to the TCP at address specified by 347 * the given TCP/IP header. If m == 0, then we make a copy 348 * of the tcpiphdr at ti and send directly to the addressed host. 349 * This is used to force keep alive messages out using the TCP 350 * template for a connection. If flags are given then we send 351 * a message back to the TCP which originated the * segment ti, 352 * and discard the mbuf containing it and any other attached mbufs. 353 * 354 * In any case the ack and sequence number of the transmitted 355 * segment are as specified by the parameters. 356 * 357 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 358 */ 359 void 360 tcp_respond(tp, ipgen, th, m, ack, seq, flags) 361 struct tcpcb *tp; 362 void *ipgen; 363 register struct tcphdr *th; 364 register struct mbuf *m; 365 tcp_seq ack, seq; 366 int flags; 367 { 368 register int tlen; 369 int win = 0; 370 struct route *ro = 0; 371 struct route sro; 372 struct ip *ip; 373 struct tcphdr *nth; 374 #ifdef INET6 375 struct route_in6 *ro6 = 0; 376 struct route_in6 sro6; 377 struct ip6_hdr *ip6; 378 int isipv6; 379 #endif /* INET6 */ 380 int ipflags = 0; 381 382 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL")); 383 384 #ifdef INET6 385 isipv6 = ((struct ip *)ipgen)->ip_v == 6; 386 ip6 = ipgen; 387 #endif /* INET6 */ 388 ip = ipgen; 389 390 if (tp) { 391 if (!(flags & TH_RST)) { 392 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv); 393 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 394 win = (long)TCP_MAXWIN << tp->rcv_scale; 395 } 396 #ifdef INET6 397 if (isipv6) 398 ro6 = &tp->t_inpcb->in6p_route; 399 else 400 #endif /* INET6 */ 401 ro = &tp->t_inpcb->inp_route; 402 } else { 403 #ifdef INET6 404 if (isipv6) { 405 ro6 = &sro6; 406 bzero(ro6, sizeof *ro6); 407 } else 408 #endif /* INET6 */ 409 { 410 ro = &sro; 411 bzero(ro, sizeof *ro); 412 } 413 } 414 if (m == 0) { 415 m = m_gethdr(M_DONTWAIT, MT_HEADER); 416 if (m == NULL) 417 return; 418 tlen = 0; 419 m->m_data += max_linkhdr; 420 #ifdef INET6 421 if (isipv6) { 422 bcopy((caddr_t)ip6, mtod(m, caddr_t), 423 sizeof(struct ip6_hdr)); 424 ip6 = mtod(m, struct ip6_hdr *); 425 nth = (struct tcphdr *)(ip6 + 1); 426 } else 427 #endif /* INET6 */ 428 { 429 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); 430 ip = mtod(m, struct ip *); 431 nth = (struct tcphdr *)(ip + 1); 432 } 433 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 434 flags = TH_ACK; 435 } else { 436 m_freem(m->m_next); 437 m->m_next = 0; 438 m->m_data = (caddr_t)ipgen; 439 /* m_len is set later */ 440 tlen = 0; 441 #define xchg(a,b,type) { type t; t=a; a=b; b=t; } 442 #ifdef INET6 443 if (isipv6) { 444 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 445 nth = (struct tcphdr *)(ip6 + 1); 446 } else 447 #endif /* INET6 */ 448 { 449 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); 450 nth = (struct tcphdr *)(ip + 1); 451 } 452 if (th != nth) { 453 /* 454 * this is usually a case when an extension header 455 * exists between the IPv6 header and the 456 * TCP header. 457 */ 458 nth->th_sport = th->th_sport; 459 nth->th_dport = th->th_dport; 460 } 461 xchg(nth->th_dport, nth->th_sport, n_short); 462 #undef xchg 463 } 464 #ifdef INET6 465 if (isipv6) { 466 ip6->ip6_flow = 0; 467 ip6->ip6_vfc = IPV6_VERSION; 468 ip6->ip6_nxt = IPPROTO_TCP; 469 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) + 470 tlen)); 471 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr); 472 } else 473 #endif 474 { 475 tlen += sizeof (struct tcpiphdr); 476 ip->ip_len = tlen; 477 ip->ip_ttl = ip_defttl; 478 } 479 m->m_len = tlen; 480 m->m_pkthdr.len = tlen; 481 m->m_pkthdr.rcvif = (struct ifnet *) 0; 482 #ifdef MAC 483 if (tp != NULL && tp->t_inpcb != NULL) { 484 /* 485 * Packet is associated with a socket, so allow the 486 * label of the response to reflect the socket label. 487 */ 488 mac_create_mbuf_from_socket(tp->t_inpcb->inp_socket, m); 489 } else { 490 /* 491 * Packet is not associated with a socket, so possibly 492 * update the label in place. 493 */ 494 mac_reflect_mbuf_tcp(m); 495 } 496 #endif 497 nth->th_seq = htonl(seq); 498 nth->th_ack = htonl(ack); 499 nth->th_x2 = 0; 500 nth->th_off = sizeof (struct tcphdr) >> 2; 501 nth->th_flags = flags; 502 if (tp) 503 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 504 else 505 nth->th_win = htons((u_short)win); 506 nth->th_urp = 0; 507 #ifdef INET6 508 if (isipv6) { 509 nth->th_sum = 0; 510 nth->th_sum = in6_cksum(m, IPPROTO_TCP, 511 sizeof(struct ip6_hdr), 512 tlen - sizeof(struct ip6_hdr)); 513 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL, 514 ro6 && ro6->ro_rt ? 515 ro6->ro_rt->rt_ifp : 516 NULL); 517 } else 518 #endif /* INET6 */ 519 { 520 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 521 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 522 m->m_pkthdr.csum_flags = CSUM_TCP; 523 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 524 } 525 #ifdef TCPDEBUG 526 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 527 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 528 #endif 529 #ifdef INET6 530 if (isipv6) { 531 (void)ip6_output(m, NULL, ro6, ipflags, NULL, NULL, 532 tp ? tp->t_inpcb : NULL); 533 if (ro6 == &sro6 && ro6->ro_rt) { 534 RTFREE(ro6->ro_rt); 535 ro6->ro_rt = NULL; 536 } 537 } else 538 #endif /* INET6 */ 539 { 540 (void) ip_output(m, NULL, ro, ipflags, NULL, tp ? tp->t_inpcb : NULL); 541 if (ro == &sro && ro->ro_rt) { 542 RTFREE(ro->ro_rt); 543 ro->ro_rt = NULL; 544 } 545 } 546 } 547 548 /* 549 * Create a new TCP control block, making an 550 * empty reassembly queue and hooking it to the argument 551 * protocol control block. The `inp' parameter must have 552 * come from the zone allocator set up in tcp_init(). 553 */ 554 struct tcpcb * 555 tcp_newtcpcb(inp) 556 struct inpcb *inp; 557 { 558 struct tcpcb_mem *tm; 559 struct tcpcb *tp; 560 #ifdef INET6 561 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 562 #endif /* INET6 */ 563 564 tm = uma_zalloc(tcpcb_zone, M_NOWAIT | M_ZERO); 565 if (tm == NULL) 566 return (NULL); 567 tp = &tm->tcb; 568 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */ 569 tp->t_maxseg = tp->t_maxopd = 570 #ifdef INET6 571 isipv6 ? tcp_v6mssdflt : 572 #endif /* INET6 */ 573 tcp_mssdflt; 574 575 /* Set up our timeouts. */ 576 callout_init(tp->tt_rexmt = &tm->tcpcb_mem_rexmt, 0); 577 callout_init(tp->tt_persist = &tm->tcpcb_mem_persist, 0); 578 callout_init(tp->tt_keep = &tm->tcpcb_mem_keep, 0); 579 callout_init(tp->tt_2msl = &tm->tcpcb_mem_2msl, 0); 580 callout_init(tp->tt_delack = &tm->tcpcb_mem_delack, 0); 581 582 if (tcp_do_rfc1323) 583 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); 584 if (tcp_do_rfc1644) 585 tp->t_flags |= TF_REQ_CC; 586 tp->t_inpcb = inp; /* XXX */ 587 /* 588 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 589 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 590 * reasonable initial retransmit time. 591 */ 592 tp->t_srtt = TCPTV_SRTTBASE; 593 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 594 tp->t_rttmin = tcp_rexmit_min; 595 tp->t_rxtcur = TCPTV_RTOBASE; 596 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 597 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 598 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 599 tp->t_rcvtime = ticks; 600 tp->t_bw_rtttime = ticks; 601 /* 602 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 603 * because the socket may be bound to an IPv6 wildcard address, 604 * which may match an IPv4-mapped IPv6 address. 605 */ 606 inp->inp_ip_ttl = ip_defttl; 607 inp->inp_ppcb = (caddr_t)tp; 608 return (tp); /* XXX */ 609 } 610 611 /* 612 * Drop a TCP connection, reporting 613 * the specified error. If connection is synchronized, 614 * then send a RST to peer. 615 */ 616 struct tcpcb * 617 tcp_drop(tp, errno) 618 register struct tcpcb *tp; 619 int errno; 620 { 621 struct socket *so = tp->t_inpcb->inp_socket; 622 623 if (TCPS_HAVERCVDSYN(tp->t_state)) { 624 tp->t_state = TCPS_CLOSED; 625 (void) tcp_output(tp); 626 tcpstat.tcps_drops++; 627 } else 628 tcpstat.tcps_conndrops++; 629 if (errno == ETIMEDOUT && tp->t_softerror) 630 errno = tp->t_softerror; 631 so->so_error = errno; 632 return (tcp_close(tp)); 633 } 634 635 static void 636 tcp_discardcb(tp) 637 struct tcpcb *tp; 638 { 639 struct tseg_qent *q; 640 struct inpcb *inp = tp->t_inpcb; 641 struct socket *so = inp->inp_socket; 642 #ifdef INET6 643 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 644 #endif /* INET6 */ 645 struct rtentry *rt; 646 int dosavessthresh; 647 648 /* 649 * Make sure that all of our timers are stopped before we 650 * delete the PCB. 651 */ 652 callout_stop(tp->tt_rexmt); 653 callout_stop(tp->tt_persist); 654 callout_stop(tp->tt_keep); 655 callout_stop(tp->tt_2msl); 656 callout_stop(tp->tt_delack); 657 658 /* 659 * If we got enough samples through the srtt filter, 660 * save the rtt and rttvar in the routing entry. 661 * 'Enough' is arbitrarily defined as the 16 samples. 662 * 16 samples is enough for the srtt filter to converge 663 * to within 5% of the correct value; fewer samples and 664 * we could save a very bogus rtt. 665 * 666 * Don't update the default route's characteristics and don't 667 * update anything that the user "locked". 668 */ 669 if (tp->t_rttupdated >= 16) { 670 register u_long i = 0; 671 #ifdef INET6 672 if (isipv6) { 673 struct sockaddr_in6 *sin6; 674 675 if ((rt = inp->in6p_route.ro_rt) == NULL) 676 goto no_valid_rt; 677 sin6 = (struct sockaddr_in6 *)rt_key(rt); 678 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 679 goto no_valid_rt; 680 } 681 else 682 #endif /* INET6 */ 683 if ((rt = inp->inp_route.ro_rt) == NULL || 684 ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr 685 == INADDR_ANY) 686 goto no_valid_rt; 687 688 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) { 689 i = tp->t_srtt * 690 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 691 if (rt->rt_rmx.rmx_rtt && i) 692 /* 693 * filter this update to half the old & half 694 * the new values, converting scale. 695 * See route.h and tcp_var.h for a 696 * description of the scaling constants. 697 */ 698 rt->rt_rmx.rmx_rtt = 699 (rt->rt_rmx.rmx_rtt + i) / 2; 700 else 701 rt->rt_rmx.rmx_rtt = i; 702 tcpstat.tcps_cachedrtt++; 703 } 704 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) { 705 i = tp->t_rttvar * 706 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 707 if (rt->rt_rmx.rmx_rttvar && i) 708 rt->rt_rmx.rmx_rttvar = 709 (rt->rt_rmx.rmx_rttvar + i) / 2; 710 else 711 rt->rt_rmx.rmx_rttvar = i; 712 tcpstat.tcps_cachedrttvar++; 713 } 714 /* 715 * The old comment here said: 716 * update the pipelimit (ssthresh) if it has been updated 717 * already or if a pipesize was specified & the threshhold 718 * got below half the pipesize. I.e., wait for bad news 719 * before we start updating, then update on both good 720 * and bad news. 721 * 722 * But we want to save the ssthresh even if no pipesize is 723 * specified explicitly in the route, because such 724 * connections still have an implicit pipesize specified 725 * by the global tcp_sendspace. In the absence of a reliable 726 * way to calculate the pipesize, it will have to do. 727 */ 728 i = tp->snd_ssthresh; 729 if (rt->rt_rmx.rmx_sendpipe != 0) 730 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2); 731 else 732 dosavessthresh = (i < so->so_snd.sb_hiwat / 2); 733 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 && 734 i != 0 && rt->rt_rmx.rmx_ssthresh != 0) 735 || dosavessthresh) { 736 /* 737 * convert the limit from user data bytes to 738 * packets then to packet data bytes. 739 */ 740 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 741 if (i < 2) 742 i = 2; 743 i *= (u_long)(tp->t_maxseg + 744 #ifdef INET6 745 (isipv6 ? sizeof (struct ip6_hdr) + 746 sizeof (struct tcphdr) : 747 #endif 748 sizeof (struct tcpiphdr) 749 #ifdef INET6 750 ) 751 #endif 752 ); 753 if (rt->rt_rmx.rmx_ssthresh) 754 rt->rt_rmx.rmx_ssthresh = 755 (rt->rt_rmx.rmx_ssthresh + i) / 2; 756 else 757 rt->rt_rmx.rmx_ssthresh = i; 758 tcpstat.tcps_cachedssthresh++; 759 } 760 } 761 no_valid_rt: 762 /* free the reassembly queue, if any */ 763 while ((q = LIST_FIRST(&tp->t_segq)) != NULL) { 764 LIST_REMOVE(q, tqe_q); 765 m_freem(q->tqe_m); 766 FREE(q, M_TSEGQ); 767 } 768 inp->inp_ppcb = NULL; 769 tp->t_inpcb = NULL; 770 uma_zfree(tcpcb_zone, tp); 771 soisdisconnected(so); 772 } 773 774 /* 775 * Close a TCP control block: 776 * discard all space held by the tcp 777 * discard internet protocol block 778 * wake up any sleepers 779 */ 780 struct tcpcb * 781 tcp_close(tp) 782 struct tcpcb *tp; 783 { 784 struct inpcb *inp = tp->t_inpcb; 785 #ifdef INET6 786 struct socket *so = inp->inp_socket; 787 #endif 788 789 tcp_discardcb(tp); 790 #ifdef INET6 791 if (INP_CHECK_SOCKAF(so, AF_INET6)) 792 in6_pcbdetach(inp); 793 else 794 #endif 795 in_pcbdetach(inp); 796 tcpstat.tcps_closed++; 797 return ((struct tcpcb *)0); 798 } 799 800 void 801 tcp_drain() 802 { 803 if (do_tcpdrain) 804 { 805 struct inpcb *inpb; 806 struct tcpcb *tcpb; 807 struct tseg_qent *te; 808 809 /* 810 * Walk the tcpbs, if existing, and flush the reassembly queue, 811 * if there is one... 812 * XXX: The "Net/3" implementation doesn't imply that the TCP 813 * reassembly queue should be flushed, but in a situation 814 * where we're really low on mbufs, this is potentially 815 * usefull. 816 */ 817 INP_INFO_RLOCK(&tcbinfo); 818 LIST_FOREACH(inpb, tcbinfo.listhead, inp_list) { 819 if (inpb->inp_vflag & INP_TIMEWAIT) 820 continue; 821 INP_LOCK(inpb); 822 if ((tcpb = intotcpcb(inpb))) { 823 while ((te = LIST_FIRST(&tcpb->t_segq)) 824 != NULL) { 825 LIST_REMOVE(te, tqe_q); 826 m_freem(te->tqe_m); 827 FREE(te, M_TSEGQ); 828 } 829 } 830 INP_UNLOCK(inpb); 831 } 832 INP_INFO_RUNLOCK(&tcbinfo); 833 } 834 } 835 836 /* 837 * Notify a tcp user of an asynchronous error; 838 * store error as soft error, but wake up user 839 * (for now, won't do anything until can select for soft error). 840 * 841 * Do not wake up user since there currently is no mechanism for 842 * reporting soft errors (yet - a kqueue filter may be added). 843 */ 844 static struct inpcb * 845 tcp_notify(inp, error) 846 struct inpcb *inp; 847 int error; 848 { 849 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; 850 851 /* 852 * Ignore some errors if we are hooked up. 853 * If connection hasn't completed, has retransmitted several times, 854 * and receives a second error, give up now. This is better 855 * than waiting a long time to establish a connection that 856 * can never complete. 857 */ 858 if (tp->t_state == TCPS_ESTABLISHED && 859 (error == EHOSTUNREACH || error == ENETUNREACH || 860 error == EHOSTDOWN)) { 861 return inp; 862 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 863 tp->t_softerror) { 864 tcp_drop(tp, error); 865 return (struct inpcb *)0; 866 } else { 867 tp->t_softerror = error; 868 return inp; 869 } 870 #if 0 871 wakeup( &so->so_timeo); 872 sorwakeup(so); 873 sowwakeup(so); 874 #endif 875 } 876 877 static int 878 tcp_pcblist(SYSCTL_HANDLER_ARGS) 879 { 880 int error, i, n, s; 881 struct inpcb *inp, **inp_list; 882 inp_gen_t gencnt; 883 struct xinpgen xig; 884 885 /* 886 * The process of preparing the TCB list is too time-consuming and 887 * resource-intensive to repeat twice on every request. 888 */ 889 if (req->oldptr == 0) { 890 n = tcbinfo.ipi_count; 891 req->oldidx = 2 * (sizeof xig) 892 + (n + n/8) * sizeof(struct xtcpcb); 893 return 0; 894 } 895 896 if (req->newptr != 0) 897 return EPERM; 898 899 /* 900 * OK, now we're committed to doing something. 901 */ 902 s = splnet(); 903 INP_INFO_RLOCK(&tcbinfo); 904 gencnt = tcbinfo.ipi_gencnt; 905 n = tcbinfo.ipi_count; 906 INP_INFO_RUNLOCK(&tcbinfo); 907 splx(s); 908 909 sysctl_wire_old_buffer(req, 2 * (sizeof xig) 910 + n * sizeof(struct xtcpcb)); 911 912 xig.xig_len = sizeof xig; 913 xig.xig_count = n; 914 xig.xig_gen = gencnt; 915 xig.xig_sogen = so_gencnt; 916 error = SYSCTL_OUT(req, &xig, sizeof xig); 917 if (error) 918 return error; 919 920 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 921 if (inp_list == 0) 922 return ENOMEM; 923 924 s = splnet(); 925 INP_INFO_RLOCK(&tcbinfo); 926 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp && i < n; 927 inp = LIST_NEXT(inp, inp_list)) { 928 INP_LOCK(inp); 929 if (inp->inp_gencnt <= gencnt) { 930 /* 931 * XXX: This use of cr_cansee(), introduced with 932 * TCP state changes, is not quite right, but for 933 * now, better than nothing. 934 */ 935 if (inp->inp_vflag & INP_TIMEWAIT) 936 error = cr_cansee(req->td->td_ucred, 937 intotw(inp)->tw_cred); 938 else 939 error = cr_canseesocket(req->td->td_ucred, 940 inp->inp_socket); 941 if (error == 0) 942 inp_list[i++] = inp; 943 } 944 INP_UNLOCK(inp); 945 } 946 INP_INFO_RUNLOCK(&tcbinfo); 947 splx(s); 948 n = i; 949 950 error = 0; 951 for (i = 0; i < n; i++) { 952 inp = inp_list[i]; 953 if (inp->inp_gencnt <= gencnt) { 954 struct xtcpcb xt; 955 caddr_t inp_ppcb; 956 xt.xt_len = sizeof xt; 957 /* XXX should avoid extra copy */ 958 bcopy(inp, &xt.xt_inp, sizeof *inp); 959 inp_ppcb = inp->inp_ppcb; 960 if (inp_ppcb == NULL) 961 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp); 962 else if (inp->inp_vflag & INP_TIMEWAIT) { 963 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp); 964 xt.xt_tp.t_state = TCPS_TIME_WAIT; 965 } else 966 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 967 if (inp->inp_socket) 968 sotoxsocket(inp->inp_socket, &xt.xt_socket); 969 else { 970 bzero(&xt.xt_socket, sizeof xt.xt_socket); 971 xt.xt_socket.xso_protocol = IPPROTO_TCP; 972 } 973 xt.xt_inp.inp_gencnt = inp->inp_gencnt; 974 error = SYSCTL_OUT(req, &xt, sizeof xt); 975 } 976 } 977 if (!error) { 978 /* 979 * Give the user an updated idea of our state. 980 * If the generation differs from what we told 981 * her before, she knows that something happened 982 * while we were processing this request, and it 983 * might be necessary to retry. 984 */ 985 s = splnet(); 986 INP_INFO_RLOCK(&tcbinfo); 987 xig.xig_gen = tcbinfo.ipi_gencnt; 988 xig.xig_sogen = so_gencnt; 989 xig.xig_count = tcbinfo.ipi_count; 990 INP_INFO_RUNLOCK(&tcbinfo); 991 splx(s); 992 error = SYSCTL_OUT(req, &xig, sizeof xig); 993 } 994 free(inp_list, M_TEMP); 995 return error; 996 } 997 998 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 999 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 1000 1001 static int 1002 tcp_getcred(SYSCTL_HANDLER_ARGS) 1003 { 1004 struct xucred xuc; 1005 struct sockaddr_in addrs[2]; 1006 struct inpcb *inp; 1007 int error, s; 1008 1009 error = suser_cred(req->td->td_ucred, PRISON_ROOT); 1010 if (error) 1011 return (error); 1012 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 1013 if (error) 1014 return (error); 1015 s = splnet(); 1016 INP_INFO_RLOCK(&tcbinfo); 1017 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port, 1018 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 1019 if (inp == NULL) { 1020 error = ENOENT; 1021 goto outunlocked; 1022 } 1023 INP_LOCK(inp); 1024 if (inp->inp_socket == NULL) { 1025 error = ENOENT; 1026 goto out; 1027 } 1028 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket); 1029 if (error) 1030 goto out; 1031 cru2x(inp->inp_socket->so_cred, &xuc); 1032 out: 1033 INP_UNLOCK(inp); 1034 outunlocked: 1035 INP_INFO_RUNLOCK(&tcbinfo); 1036 splx(s); 1037 if (error == 0) 1038 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 1039 return (error); 1040 } 1041 1042 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, 1043 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 1044 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection"); 1045 1046 #ifdef INET6 1047 static int 1048 tcp6_getcred(SYSCTL_HANDLER_ARGS) 1049 { 1050 struct xucred xuc; 1051 struct sockaddr_in6 addrs[2]; 1052 struct inpcb *inp; 1053 int error, s, mapped = 0; 1054 1055 error = suser_cred(req->td->td_ucred, PRISON_ROOT); 1056 if (error) 1057 return (error); 1058 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 1059 if (error) 1060 return (error); 1061 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 1062 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 1063 mapped = 1; 1064 else 1065 return (EINVAL); 1066 } 1067 s = splnet(); 1068 INP_INFO_RLOCK(&tcbinfo); 1069 if (mapped == 1) 1070 inp = in_pcblookup_hash(&tcbinfo, 1071 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 1072 addrs[1].sin6_port, 1073 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 1074 addrs[0].sin6_port, 1075 0, NULL); 1076 else 1077 inp = in6_pcblookup_hash(&tcbinfo, &addrs[1].sin6_addr, 1078 addrs[1].sin6_port, 1079 &addrs[0].sin6_addr, addrs[0].sin6_port, 1080 0, NULL); 1081 if (inp == NULL) { 1082 error = ENOENT; 1083 goto outunlocked; 1084 } 1085 INP_LOCK(inp); 1086 if (inp->inp_socket == NULL) { 1087 error = ENOENT; 1088 goto out; 1089 } 1090 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket); 1091 if (error) 1092 goto out; 1093 cru2x(inp->inp_socket->so_cred, &xuc); 1094 out: 1095 INP_UNLOCK(inp); 1096 outunlocked: 1097 INP_INFO_RUNLOCK(&tcbinfo); 1098 splx(s); 1099 if (error == 0) 1100 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 1101 return (error); 1102 } 1103 1104 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, 1105 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 1106 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection"); 1107 #endif 1108 1109 1110 void 1111 tcp_ctlinput(cmd, sa, vip) 1112 int cmd; 1113 struct sockaddr *sa; 1114 void *vip; 1115 { 1116 struct ip *ip = vip; 1117 struct tcphdr *th; 1118 struct in_addr faddr; 1119 struct inpcb *inp; 1120 struct tcpcb *tp; 1121 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 1122 tcp_seq icmp_seq; 1123 int s; 1124 1125 faddr = ((struct sockaddr_in *)sa)->sin_addr; 1126 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 1127 return; 1128 1129 if (cmd == PRC_QUENCH) 1130 notify = tcp_quench; 1131 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || 1132 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip) 1133 notify = tcp_drop_syn_sent; 1134 else if (cmd == PRC_MSGSIZE) 1135 notify = tcp_mtudisc; 1136 else if (PRC_IS_REDIRECT(cmd)) { 1137 ip = 0; 1138 notify = in_rtchange; 1139 } else if (cmd == PRC_HOSTDEAD) 1140 ip = 0; 1141 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) 1142 return; 1143 if (ip) { 1144 s = splnet(); 1145 th = (struct tcphdr *)((caddr_t)ip 1146 + (ip->ip_hl << 2)); 1147 INP_INFO_WLOCK(&tcbinfo); 1148 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport, 1149 ip->ip_src, th->th_sport, 0, NULL); 1150 if (inp != NULL) { 1151 INP_LOCK(inp); 1152 if (inp->inp_socket != NULL) { 1153 icmp_seq = htonl(th->th_seq); 1154 tp = intotcpcb(inp); 1155 if (SEQ_GEQ(icmp_seq, tp->snd_una) && 1156 SEQ_LT(icmp_seq, tp->snd_max)) 1157 inp = (*notify)(inp, inetctlerrmap[cmd]); 1158 } 1159 if (inp) 1160 INP_UNLOCK(inp); 1161 } else { 1162 struct in_conninfo inc; 1163 1164 inc.inc_fport = th->th_dport; 1165 inc.inc_lport = th->th_sport; 1166 inc.inc_faddr = faddr; 1167 inc.inc_laddr = ip->ip_src; 1168 #ifdef INET6 1169 inc.inc_isipv6 = 0; 1170 #endif 1171 syncache_unreach(&inc, th); 1172 } 1173 INP_INFO_WUNLOCK(&tcbinfo); 1174 splx(s); 1175 } else 1176 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify); 1177 } 1178 1179 #ifdef INET6 1180 void 1181 tcp6_ctlinput(cmd, sa, d) 1182 int cmd; 1183 struct sockaddr *sa; 1184 void *d; 1185 { 1186 struct tcphdr th; 1187 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 1188 struct ip6_hdr *ip6; 1189 struct mbuf *m; 1190 struct ip6ctlparam *ip6cp = NULL; 1191 const struct sockaddr_in6 *sa6_src = NULL; 1192 int off; 1193 struct tcp_portonly { 1194 u_int16_t th_sport; 1195 u_int16_t th_dport; 1196 } *thp; 1197 1198 if (sa->sa_family != AF_INET6 || 1199 sa->sa_len != sizeof(struct sockaddr_in6)) 1200 return; 1201 1202 if (cmd == PRC_QUENCH) 1203 notify = tcp_quench; 1204 else if (cmd == PRC_MSGSIZE) 1205 notify = tcp_mtudisc; 1206 else if (!PRC_IS_REDIRECT(cmd) && 1207 ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) 1208 return; 1209 1210 /* if the parameter is from icmp6, decode it. */ 1211 if (d != NULL) { 1212 ip6cp = (struct ip6ctlparam *)d; 1213 m = ip6cp->ip6c_m; 1214 ip6 = ip6cp->ip6c_ip6; 1215 off = ip6cp->ip6c_off; 1216 sa6_src = ip6cp->ip6c_src; 1217 } else { 1218 m = NULL; 1219 ip6 = NULL; 1220 off = 0; /* fool gcc */ 1221 sa6_src = &sa6_any; 1222 } 1223 1224 if (ip6) { 1225 struct in_conninfo inc; 1226 /* 1227 * XXX: We assume that when IPV6 is non NULL, 1228 * M and OFF are valid. 1229 */ 1230 1231 /* check if we can safely examine src and dst ports */ 1232 if (m->m_pkthdr.len < off + sizeof(*thp)) 1233 return; 1234 1235 bzero(&th, sizeof(th)); 1236 m_copydata(m, off, sizeof(*thp), (caddr_t)&th); 1237 1238 in6_pcbnotify(&tcb, sa, th.th_dport, 1239 (struct sockaddr *)ip6cp->ip6c_src, 1240 th.th_sport, cmd, notify); 1241 1242 inc.inc_fport = th.th_dport; 1243 inc.inc_lport = th.th_sport; 1244 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; 1245 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; 1246 inc.inc_isipv6 = 1; 1247 syncache_unreach(&inc, &th); 1248 } else 1249 in6_pcbnotify(&tcb, sa, 0, (const struct sockaddr *)sa6_src, 1250 0, cmd, notify); 1251 } 1252 #endif /* INET6 */ 1253 1254 1255 /* 1256 * Following is where TCP initial sequence number generation occurs. 1257 * 1258 * There are two places where we must use initial sequence numbers: 1259 * 1. In SYN-ACK packets. 1260 * 2. In SYN packets. 1261 * 1262 * All ISNs for SYN-ACK packets are generated by the syncache. See 1263 * tcp_syncache.c for details. 1264 * 1265 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 1266 * depends on this property. In addition, these ISNs should be 1267 * unguessable so as to prevent connection hijacking. To satisfy 1268 * the requirements of this situation, the algorithm outlined in 1269 * RFC 1948 is used to generate sequence numbers. 1270 * 1271 * Implementation details: 1272 * 1273 * Time is based off the system timer, and is corrected so that it 1274 * increases by one megabyte per second. This allows for proper 1275 * recycling on high speed LANs while still leaving over an hour 1276 * before rollover. 1277 * 1278 * net.inet.tcp.isn_reseed_interval controls the number of seconds 1279 * between seeding of isn_secret. This is normally set to zero, 1280 * as reseeding should not be necessary. 1281 * 1282 */ 1283 1284 #define ISN_BYTES_PER_SECOND 1048576 1285 1286 u_char isn_secret[32]; 1287 int isn_last_reseed; 1288 MD5_CTX isn_ctx; 1289 1290 tcp_seq 1291 tcp_new_isn(tp) 1292 struct tcpcb *tp; 1293 { 1294 u_int32_t md5_buffer[4]; 1295 tcp_seq new_isn; 1296 1297 /* Seed if this is the first use, reseed if requested. */ 1298 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) && 1299 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) 1300 < (u_int)ticks))) { 1301 read_random(&isn_secret, sizeof(isn_secret)); 1302 isn_last_reseed = ticks; 1303 } 1304 1305 /* Compute the md5 hash and return the ISN. */ 1306 MD5Init(&isn_ctx); 1307 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short)); 1308 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short)); 1309 #ifdef INET6 1310 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) { 1311 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, 1312 sizeof(struct in6_addr)); 1313 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, 1314 sizeof(struct in6_addr)); 1315 } else 1316 #endif 1317 { 1318 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, 1319 sizeof(struct in_addr)); 1320 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, 1321 sizeof(struct in_addr)); 1322 } 1323 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret)); 1324 MD5Final((u_char *) &md5_buffer, &isn_ctx); 1325 new_isn = (tcp_seq) md5_buffer[0]; 1326 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz); 1327 return new_isn; 1328 } 1329 1330 /* 1331 * When a source quench is received, close congestion window 1332 * to one segment. We will gradually open it again as we proceed. 1333 */ 1334 struct inpcb * 1335 tcp_quench(inp, errno) 1336 struct inpcb *inp; 1337 int errno; 1338 { 1339 struct tcpcb *tp = intotcpcb(inp); 1340 1341 if (tp) 1342 tp->snd_cwnd = tp->t_maxseg; 1343 return (inp); 1344 } 1345 1346 /* 1347 * When a specific ICMP unreachable message is received and the 1348 * connection state is SYN-SENT, drop the connection. This behavior 1349 * is controlled by the icmp_may_rst sysctl. 1350 */ 1351 struct inpcb * 1352 tcp_drop_syn_sent(inp, errno) 1353 struct inpcb *inp; 1354 int errno; 1355 { 1356 struct tcpcb *tp = intotcpcb(inp); 1357 1358 if (tp && tp->t_state == TCPS_SYN_SENT) { 1359 tcp_drop(tp, errno); 1360 return (struct inpcb *)0; 1361 } 1362 return inp; 1363 } 1364 1365 /* 1366 * When `need fragmentation' ICMP is received, update our idea of the MSS 1367 * based on the new value in the route. Also nudge TCP to send something, 1368 * since we know the packet we just sent was dropped. 1369 * This duplicates some code in the tcp_mss() function in tcp_input.c. 1370 */ 1371 struct inpcb * 1372 tcp_mtudisc(inp, errno) 1373 struct inpcb *inp; 1374 int errno; 1375 { 1376 struct tcpcb *tp = intotcpcb(inp); 1377 struct rtentry *rt; 1378 struct rmxp_tao *taop; 1379 struct socket *so = inp->inp_socket; 1380 int offered; 1381 int mss; 1382 #ifdef INET6 1383 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 1384 #endif /* INET6 */ 1385 1386 if (tp) { 1387 #ifdef INET6 1388 if (isipv6) 1389 rt = tcp_rtlookup6(&inp->inp_inc); 1390 else 1391 #endif /* INET6 */ 1392 rt = tcp_rtlookup(&inp->inp_inc); 1393 if (!rt || !rt->rt_rmx.rmx_mtu) { 1394 tp->t_maxopd = tp->t_maxseg = 1395 #ifdef INET6 1396 isipv6 ? tcp_v6mssdflt : 1397 #endif /* INET6 */ 1398 tcp_mssdflt; 1399 return inp; 1400 } 1401 taop = rmx_taop(rt->rt_rmx); 1402 offered = taop->tao_mssopt; 1403 mss = rt->rt_rmx.rmx_mtu - 1404 #ifdef INET6 1405 (isipv6 ? 1406 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1407 #endif /* INET6 */ 1408 sizeof(struct tcpiphdr) 1409 #ifdef INET6 1410 ) 1411 #endif /* INET6 */ 1412 ; 1413 1414 if (offered) 1415 mss = min(mss, offered); 1416 /* 1417 * XXX - The above conditional probably violates the TCP 1418 * spec. The problem is that, since we don't know the 1419 * other end's MSS, we are supposed to use a conservative 1420 * default. But, if we do that, then MTU discovery will 1421 * never actually take place, because the conservative 1422 * default is much less than the MTUs typically seen 1423 * on the Internet today. For the moment, we'll sweep 1424 * this under the carpet. 1425 * 1426 * The conservative default might not actually be a problem 1427 * if the only case this occurs is when sending an initial 1428 * SYN with options and data to a host we've never talked 1429 * to before. Then, they will reply with an MSS value which 1430 * will get recorded and the new parameters should get 1431 * recomputed. For Further Study. 1432 */ 1433 if (tp->t_maxopd <= mss) 1434 return inp; 1435 tp->t_maxopd = mss; 1436 1437 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 1438 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) 1439 mss -= TCPOLEN_TSTAMP_APPA; 1440 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && 1441 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC) 1442 mss -= TCPOLEN_CC_APPA; 1443 #if (MCLBYTES & (MCLBYTES - 1)) == 0 1444 if (mss > MCLBYTES) 1445 mss &= ~(MCLBYTES-1); 1446 #else 1447 if (mss > MCLBYTES) 1448 mss = mss / MCLBYTES * MCLBYTES; 1449 #endif 1450 if (so->so_snd.sb_hiwat < mss) 1451 mss = so->so_snd.sb_hiwat; 1452 1453 tp->t_maxseg = mss; 1454 1455 tcpstat.tcps_mturesent++; 1456 tp->t_rtttime = 0; 1457 tp->snd_nxt = tp->snd_una; 1458 tcp_output(tp); 1459 } 1460 return inp; 1461 } 1462 1463 /* 1464 * Look-up the routing entry to the peer of this inpcb. If no route 1465 * is found and it cannot be allocated, then return NULL. This routine 1466 * is called by TCP routines that access the rmx structure and by tcp_mss 1467 * to get the interface MTU. 1468 */ 1469 struct rtentry * 1470 tcp_rtlookup(inc) 1471 struct in_conninfo *inc; 1472 { 1473 struct route *ro; 1474 struct rtentry *rt; 1475 1476 ro = &inc->inc_route; 1477 rt = ro->ro_rt; 1478 if (rt == NULL || !(rt->rt_flags & RTF_UP)) { 1479 /* No route yet, so try to acquire one */ 1480 if (inc->inc_faddr.s_addr != INADDR_ANY) { 1481 ro->ro_dst.sa_family = AF_INET; 1482 ro->ro_dst.sa_len = sizeof(struct sockaddr_in); 1483 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = 1484 inc->inc_faddr; 1485 rtalloc(ro); 1486 rt = ro->ro_rt; 1487 } 1488 } 1489 return rt; 1490 } 1491 1492 #ifdef INET6 1493 struct rtentry * 1494 tcp_rtlookup6(inc) 1495 struct in_conninfo *inc; 1496 { 1497 struct route_in6 *ro6; 1498 struct rtentry *rt; 1499 1500 ro6 = &inc->inc6_route; 1501 rt = ro6->ro_rt; 1502 if (rt == NULL || !(rt->rt_flags & RTF_UP)) { 1503 /* No route yet, so try to acquire one */ 1504 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 1505 ro6->ro_dst.sin6_family = AF_INET6; 1506 ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6); 1507 ro6->ro_dst.sin6_addr = inc->inc6_faddr; 1508 rtalloc((struct route *)ro6); 1509 rt = ro6->ro_rt; 1510 } 1511 } 1512 return rt; 1513 } 1514 #endif /* INET6 */ 1515 1516 #ifdef IPSEC 1517 /* compute ESP/AH header size for TCP, including outer IP header. */ 1518 size_t 1519 ipsec_hdrsiz_tcp(tp) 1520 struct tcpcb *tp; 1521 { 1522 struct inpcb *inp; 1523 struct mbuf *m; 1524 size_t hdrsiz; 1525 struct ip *ip; 1526 #ifdef INET6 1527 struct ip6_hdr *ip6; 1528 #endif 1529 struct tcphdr *th; 1530 1531 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) 1532 return 0; 1533 MGETHDR(m, M_DONTWAIT, MT_DATA); 1534 if (!m) 1535 return 0; 1536 1537 #ifdef INET6 1538 if ((inp->inp_vflag & INP_IPV6) != 0) { 1539 ip6 = mtod(m, struct ip6_hdr *); 1540 th = (struct tcphdr *)(ip6 + 1); 1541 m->m_pkthdr.len = m->m_len = 1542 sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1543 tcpip_fillheaders(inp, ip6, th); 1544 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1545 } else 1546 #endif /* INET6 */ 1547 { 1548 ip = mtod(m, struct ip *); 1549 th = (struct tcphdr *)(ip + 1); 1550 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); 1551 tcpip_fillheaders(inp, ip, th); 1552 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1553 } 1554 1555 m_free(m); 1556 return hdrsiz; 1557 } 1558 #endif /*IPSEC*/ 1559 1560 /* 1561 * Return a pointer to the cached information about the remote host. 1562 * The cached information is stored in the protocol specific part of 1563 * the route metrics. 1564 */ 1565 struct rmxp_tao * 1566 tcp_gettaocache(inc) 1567 struct in_conninfo *inc; 1568 { 1569 struct rtentry *rt; 1570 1571 #ifdef INET6 1572 if (inc->inc_isipv6) 1573 rt = tcp_rtlookup6(inc); 1574 else 1575 #endif /* INET6 */ 1576 rt = tcp_rtlookup(inc); 1577 1578 /* Make sure this is a host route and is up. */ 1579 if (rt == NULL || 1580 (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST)) 1581 return NULL; 1582 1583 return rmx_taop(rt->rt_rmx); 1584 } 1585 1586 /* 1587 * Clear all the TAO cache entries, called from tcp_init. 1588 * 1589 * XXX 1590 * This routine is just an empty one, because we assume that the routing 1591 * routing tables are initialized at the same time when TCP, so there is 1592 * nothing in the cache left over. 1593 */ 1594 static void 1595 tcp_cleartaocache() 1596 { 1597 } 1598 1599 /* 1600 * Move a TCP connection into TIME_WAIT state. 1601 * tcbinfo is unlocked. 1602 * inp is locked, and is unlocked before returning. 1603 */ 1604 void 1605 tcp_twstart(tp) 1606 struct tcpcb *tp; 1607 { 1608 struct tcptw *tw; 1609 struct inpcb *inp; 1610 int tw_time, acknow; 1611 struct socket *so; 1612 1613 tw = uma_zalloc(tcptw_zone, M_NOWAIT); 1614 if (tw == NULL) { 1615 tw = tcp_timer_2msl_tw(1); 1616 if (tw == NULL) { 1617 tcp_close(tp); 1618 return; 1619 } 1620 } 1621 inp = tp->t_inpcb; 1622 tw->tw_inpcb = inp; 1623 1624 /* 1625 * Recover last window size sent. 1626 */ 1627 tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale; 1628 1629 /* 1630 * Set t_recent if timestamps are used on the connection. 1631 */ 1632 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) == 1633 (TF_REQ_TSTMP|TF_RCVD_TSTMP)) 1634 tw->t_recent = tp->ts_recent; 1635 else 1636 tw->t_recent = 0; 1637 1638 tw->snd_nxt = tp->snd_nxt; 1639 tw->rcv_nxt = tp->rcv_nxt; 1640 tw->cc_recv = tp->cc_recv; 1641 tw->cc_send = tp->cc_send; 1642 tw->t_starttime = tp->t_starttime; 1643 tw->tw_time = 0; 1644 1645 /* XXX 1646 * If this code will 1647 * be used for fin-wait-2 state also, then we may need 1648 * a ts_recent from the last segment. 1649 */ 1650 /* Shorten TIME_WAIT [RFC-1644, p.28] */ 1651 if (tp->cc_recv != 0 && (ticks - tp->t_starttime) < tcp_msl) { 1652 tw_time = tp->t_rxtcur * TCPTV_TWTRUNC; 1653 /* For T/TCP client, force ACK now. */ 1654 acknow = 1; 1655 } else { 1656 tw_time = 2 * tcp_msl; 1657 acknow = tp->t_flags & TF_ACKNOW; 1658 } 1659 tcp_discardcb(tp); 1660 so = inp->inp_socket; 1661 so->so_pcb = NULL; 1662 tw->tw_cred = crhold(so->so_cred); 1663 tw->tw_so_options = so->so_options; 1664 if (acknow) 1665 tcp_twrespond(tw, so, NULL, TH_ACK); 1666 sotryfree(so); 1667 inp->inp_socket = NULL; 1668 inp->inp_ppcb = (caddr_t)tw; 1669 inp->inp_vflag |= INP_TIMEWAIT; 1670 tcp_timer_2msl_reset(tw, tw_time); 1671 INP_UNLOCK(inp); 1672 } 1673 1674 struct tcptw * 1675 tcp_twclose(struct tcptw *tw, int reuse) 1676 { 1677 struct inpcb *inp; 1678 1679 inp = tw->tw_inpcb; 1680 tw->tw_inpcb = NULL; 1681 tcp_timer_2msl_stop(tw); 1682 inp->inp_ppcb = NULL; 1683 #ifdef INET6 1684 if (inp->inp_vflag & INP_IPV6PROTO) 1685 in6_pcbdetach(inp); 1686 else 1687 #endif 1688 in_pcbdetach(inp); 1689 tcpstat.tcps_closed++; 1690 if (reuse) 1691 return (tw); 1692 uma_zfree(tcptw_zone, tw); 1693 return (NULL); 1694 } 1695 1696 /* 1697 * One of so and msrc must be non-NULL for use by the MAC Framework to 1698 * construct a label for ay resulting packet. 1699 */ 1700 int 1701 tcp_twrespond(struct tcptw *tw, struct socket *so, struct mbuf *msrc, 1702 int flags) 1703 { 1704 struct inpcb *inp = tw->tw_inpcb; 1705 struct tcphdr *th; 1706 struct mbuf *m; 1707 struct ip *ip = NULL; 1708 u_int8_t *optp; 1709 u_int hdrlen, optlen; 1710 int error; 1711 #ifdef INET6 1712 struct ip6_hdr *ip6 = NULL; 1713 int isipv6 = inp->inp_inc.inc_isipv6; 1714 #endif 1715 1716 KASSERT(so != NULL || msrc != NULL, 1717 ("tcp_twrespond: so and msrc NULL")); 1718 1719 m = m_gethdr(M_DONTWAIT, MT_HEADER); 1720 if (m == NULL) 1721 return (ENOBUFS); 1722 m->m_data += max_linkhdr; 1723 1724 #ifdef MAC 1725 if (so != NULL) 1726 mac_create_mbuf_from_socket(so, m); 1727 else 1728 mac_create_mbuf_netlayer(msrc, m); 1729 #endif 1730 1731 #ifdef INET6 1732 if (isipv6) { 1733 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1734 ip6 = mtod(m, struct ip6_hdr *); 1735 th = (struct tcphdr *)(ip6 + 1); 1736 tcpip_fillheaders(inp, ip6, th); 1737 } else 1738 #endif 1739 { 1740 hdrlen = sizeof(struct tcpiphdr); 1741 ip = mtod(m, struct ip *); 1742 th = (struct tcphdr *)(ip + 1); 1743 tcpip_fillheaders(inp, ip, th); 1744 } 1745 optp = (u_int8_t *)(th + 1); 1746 1747 /* 1748 * Send a timestamp and echo-reply if both our side and our peer 1749 * have sent timestamps in our SYN's and this is not a RST. 1750 */ 1751 if (tw->t_recent && flags == TH_ACK) { 1752 u_int32_t *lp = (u_int32_t *)optp; 1753 1754 /* Form timestamp option as shown in appendix A of RFC 1323. */ 1755 *lp++ = htonl(TCPOPT_TSTAMP_HDR); 1756 *lp++ = htonl(ticks); 1757 *lp = htonl(tw->t_recent); 1758 optp += TCPOLEN_TSTAMP_APPA; 1759 } 1760 1761 /* 1762 * Send `CC-family' options if needed, and it's not a RST. 1763 */ 1764 if (tw->cc_recv != 0 && flags == TH_ACK) { 1765 u_int32_t *lp = (u_int32_t *)optp; 1766 1767 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC)); 1768 *lp = htonl(tw->cc_send); 1769 optp += TCPOLEN_CC_APPA; 1770 } 1771 optlen = optp - (u_int8_t *)(th + 1); 1772 1773 m->m_len = hdrlen + optlen; 1774 m->m_pkthdr.len = m->m_len; 1775 1776 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small")); 1777 1778 th->th_seq = htonl(tw->snd_nxt); 1779 th->th_ack = htonl(tw->rcv_nxt); 1780 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1781 th->th_flags = flags; 1782 th->th_win = htons(tw->last_win); 1783 1784 #ifdef INET6 1785 if (isipv6) { 1786 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr), 1787 sizeof(struct tcphdr) + optlen); 1788 ip6->ip6_hlim = in6_selecthlim(inp, inp->in6p_route.ro_rt ? 1789 inp->in6p_route.ro_rt->rt_ifp : NULL); 1790 error = ip6_output(m, inp->in6p_outputopts, &inp->in6p_route, 1791 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp); 1792 } else 1793 #endif 1794 { 1795 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1796 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP)); 1797 m->m_pkthdr.csum_flags = CSUM_TCP; 1798 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1799 ip->ip_len = m->m_pkthdr.len; 1800 error = ip_output(m, inp->inp_options, &inp->inp_route, 1801 (tw->tw_so_options & SO_DONTROUTE), NULL, inp); 1802 } 1803 if (flags & TH_ACK) 1804 tcpstat.tcps_sndacks++; 1805 else 1806 tcpstat.tcps_sndctrl++; 1807 tcpstat.tcps_sndtotal++; 1808 return (error); 1809 } 1810 1811 /* 1812 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING 1813 * 1814 * This code attempts to calculate the bandwidth-delay product as a 1815 * means of determining the optimal window size to maximize bandwidth, 1816 * minimize RTT, and avoid the over-allocation of buffers on interfaces and 1817 * routers. This code also does a fairly good job keeping RTTs in check 1818 * across slow links like modems. We implement an algorithm which is very 1819 * similar (but not meant to be) TCP/Vegas. The code operates on the 1820 * transmitter side of a TCP connection and so only effects the transmit 1821 * side of the connection. 1822 * 1823 * BACKGROUND: TCP makes no provision for the management of buffer space 1824 * at the end points or at the intermediate routers and switches. A TCP 1825 * stream, whether using NewReno or not, will eventually buffer as 1826 * many packets as it is able and the only reason this typically works is 1827 * due to the fairly small default buffers made available for a connection 1828 * (typicaly 16K or 32K). As machines use larger windows and/or window 1829 * scaling it is now fairly easy for even a single TCP connection to blow-out 1830 * all available buffer space not only on the local interface, but on 1831 * intermediate routers and switches as well. NewReno makes a misguided 1832 * attempt to 'solve' this problem by waiting for an actual failure to occur, 1833 * then backing off, then steadily increasing the window again until another 1834 * failure occurs, ad-infinitum. This results in terrible oscillation that 1835 * is only made worse as network loads increase and the idea of intentionally 1836 * blowing out network buffers is, frankly, a terrible way to manage network 1837 * resources. 1838 * 1839 * It is far better to limit the transmit window prior to the failure 1840 * condition being achieved. There are two general ways to do this: First 1841 * you can 'scan' through different transmit window sizes and locate the 1842 * point where the RTT stops increasing, indicating that you have filled the 1843 * pipe, then scan backwards until you note that RTT stops decreasing, then 1844 * repeat ad-infinitum. This method works in principle but has severe 1845 * implementation issues due to RTT variances, timer granularity, and 1846 * instability in the algorithm which can lead to many false positives and 1847 * create oscillations as well as interact badly with other TCP streams 1848 * implementing the same algorithm. 1849 * 1850 * The second method is to limit the window to the bandwidth delay product 1851 * of the link. This is the method we implement. RTT variances and our 1852 * own manipulation of the congestion window, bwnd, can potentially 1853 * destabilize the algorithm. For this reason we have to stabilize the 1854 * elements used to calculate the window. We do this by using the minimum 1855 * observed RTT, the long term average of the observed bandwidth, and 1856 * by adding two segments worth of slop. It isn't perfect but it is able 1857 * to react to changing conditions and gives us a very stable basis on 1858 * which to extend the algorithm. 1859 */ 1860 void 1861 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) 1862 { 1863 u_long bw; 1864 u_long bwnd; 1865 int save_ticks; 1866 1867 /* 1868 * If inflight_enable is disabled in the middle of a tcp connection, 1869 * make sure snd_bwnd is effectively disabled. 1870 */ 1871 if (tcp_inflight_enable == 0) { 1872 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1873 tp->snd_bandwidth = 0; 1874 return; 1875 } 1876 1877 /* 1878 * Figure out the bandwidth. Due to the tick granularity this 1879 * is a very rough number and it MUST be averaged over a fairly 1880 * long period of time. XXX we need to take into account a link 1881 * that is not using all available bandwidth, but for now our 1882 * slop will ramp us up if this case occurs and the bandwidth later 1883 * increases. 1884 * 1885 * Note: if ticks rollover 'bw' may wind up negative. We must 1886 * effectively reset t_bw_rtttime for this case. 1887 */ 1888 save_ticks = ticks; 1889 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1) 1890 return; 1891 1892 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / 1893 (save_ticks - tp->t_bw_rtttime); 1894 tp->t_bw_rtttime = save_ticks; 1895 tp->t_bw_rtseq = ack_seq; 1896 if (tp->t_bw_rtttime == 0 || (int)bw < 0) 1897 return; 1898 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4; 1899 1900 tp->snd_bandwidth = bw; 1901 1902 /* 1903 * Calculate the semi-static bandwidth delay product, plus two maximal 1904 * segments. The additional slop puts us squarely in the sweet 1905 * spot and also handles the bandwidth run-up case and stabilization. 1906 * Without the slop we could be locking ourselves into a lower 1907 * bandwidth. 1908 * 1909 * Situations Handled: 1910 * (1) Prevents over-queueing of packets on LANs, especially on 1911 * high speed LANs, allowing larger TCP buffers to be 1912 * specified, and also does a good job preventing 1913 * over-queueing of packets over choke points like modems 1914 * (at least for the transmit side). 1915 * 1916 * (2) Is able to handle changing network loads (bandwidth 1917 * drops so bwnd drops, bandwidth increases so bwnd 1918 * increases). 1919 * 1920 * (3) Theoretically should stabilize in the face of multiple 1921 * connections implementing the same algorithm (this may need 1922 * a little work). 1923 * 1924 * (4) Stability value (defaults to 20 = 2 maximal packets) can 1925 * be adjusted with a sysctl but typically only needs to be 1926 * on very slow connections. A value no smaller then 5 1927 * should be used, but only reduce this default if you have 1928 * no other choice. 1929 */ 1930 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2) 1931 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + tcp_inflight_stab * tp->t_maxseg / 10; 1932 #undef USERTT 1933 1934 if (tcp_inflight_debug > 0) { 1935 static int ltime; 1936 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) { 1937 ltime = ticks; 1938 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n", 1939 tp, 1940 bw, 1941 tp->t_rttbest, 1942 tp->t_srtt, 1943 bwnd 1944 ); 1945 } 1946 } 1947 if ((long)bwnd < tcp_inflight_min) 1948 bwnd = tcp_inflight_min; 1949 if (bwnd > tcp_inflight_max) 1950 bwnd = tcp_inflight_max; 1951 if ((long)bwnd < tp->t_maxseg * 2) 1952 bwnd = tp->t_maxseg * 2; 1953 tp->snd_bwnd = bwnd; 1954 } 1955 1956