1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 34 * $FreeBSD$ 35 */ 36 37 #include "opt_compat.h" 38 #include "opt_inet6.h" 39 #include "opt_ipsec.h" 40 #include "opt_mac.h" 41 #include "opt_tcpdebug.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/callout.h> 46 #include <sys/kernel.h> 47 #include <sys/sysctl.h> 48 #include <sys/mac.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #ifdef INET6 52 #include <sys/domain.h> 53 #endif 54 #include <sys/proc.h> 55 #include <sys/socket.h> 56 #include <sys/socketvar.h> 57 #include <sys/protosw.h> 58 #include <sys/random.h> 59 60 #include <vm/uma.h> 61 62 #include <net/route.h> 63 #include <net/if.h> 64 65 #include <netinet/in.h> 66 #include <netinet/in_systm.h> 67 #include <netinet/ip.h> 68 #ifdef INET6 69 #include <netinet/ip6.h> 70 #endif 71 #include <netinet/in_pcb.h> 72 #ifdef INET6 73 #include <netinet6/in6_pcb.h> 74 #endif 75 #include <netinet/in_var.h> 76 #include <netinet/ip_var.h> 77 #ifdef INET6 78 #include <netinet6/ip6_var.h> 79 #endif 80 #include <netinet/tcp.h> 81 #include <netinet/tcp_fsm.h> 82 #include <netinet/tcp_seq.h> 83 #include <netinet/tcp_timer.h> 84 #include <netinet/tcp_var.h> 85 #ifdef INET6 86 #include <netinet6/tcp6_var.h> 87 #endif 88 #include <netinet/tcpip.h> 89 #ifdef TCPDEBUG 90 #include <netinet/tcp_debug.h> 91 #endif 92 #include <netinet6/ip6protosw.h> 93 94 #ifdef IPSEC 95 #include <netinet6/ipsec.h> 96 #ifdef INET6 97 #include <netinet6/ipsec6.h> 98 #endif 99 #endif /*IPSEC*/ 100 101 #ifdef FAST_IPSEC 102 #include <netipsec/ipsec.h> 103 #ifdef INET6 104 #include <netipsec/ipsec6.h> 105 #endif 106 #define IPSEC 107 #endif /*FAST_IPSEC*/ 108 109 #include <machine/in_cksum.h> 110 #include <sys/md5.h> 111 112 int tcp_mssdflt = TCP_MSS; 113 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 114 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size"); 115 116 #ifdef INET6 117 int tcp_v6mssdflt = TCP6_MSS; 118 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, 119 CTLFLAG_RW, &tcp_v6mssdflt , 0, 120 "Default TCP Maximum Segment Size for IPv6"); 121 #endif 122 123 #if 0 124 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 125 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 126 &tcp_rttdflt , 0, "Default maximum TCP Round Trip Time"); 127 #endif 128 129 int tcp_do_rfc1323 = 1; 130 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 131 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions"); 132 133 int tcp_do_rfc1644 = 0; 134 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW, 135 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions"); 136 137 static int tcp_tcbhashsize = 0; 138 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD, 139 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); 140 141 static int do_tcpdrain = 1; 142 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 143 "Enable tcp_drain routine for extra help when low on mbufs"); 144 145 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD, 146 &tcbinfo.ipi_count, 0, "Number of active PCBs"); 147 148 static int icmp_may_rst = 1; 149 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, 150 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 151 152 static int tcp_isn_reseed_interval = 0; 153 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, 154 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); 155 156 /* 157 * TCP bandwidth limiting sysctls. Note that the default lower bound of 158 * 1024 exists only for debugging. A good production default would be 159 * something like 6100. 160 */ 161 static int tcp_inflight_enable = 0; 162 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, 163 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); 164 165 static int tcp_inflight_debug = 0; 166 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, 167 &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); 168 169 static int tcp_inflight_min = 6144; 170 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, 171 &tcp_inflight_min, 0, "Lower-bound for TCP inflight window"); 172 173 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; 174 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, 175 &tcp_inflight_max, 0, "Upper-bound for TCP inflight window"); 176 static int tcp_inflight_stab = 20; 177 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, 178 &tcp_inflight_stab, 0, "Inflight Algorithm Stabilization 20 = 2 packets"); 179 180 static void tcp_cleartaocache(void); 181 static struct inpcb *tcp_notify(struct inpcb *, int); 182 static void tcp_discardcb(struct tcpcb *); 183 184 /* 185 * Target size of TCP PCB hash tables. Must be a power of two. 186 * 187 * Note that this can be overridden by the kernel environment 188 * variable net.inet.tcp.tcbhashsize 189 */ 190 #ifndef TCBHASHSIZE 191 #define TCBHASHSIZE 512 192 #endif 193 194 /* 195 * XXX 196 * Callouts should be moved into struct tcp directly. They are currently 197 * separate becuase the tcpcb structure is exported to userland for sysctl 198 * parsing purposes, which do not know about callouts. 199 */ 200 struct tcpcb_mem { 201 struct tcpcb tcb; 202 struct callout tcpcb_mem_rexmt, tcpcb_mem_persist, tcpcb_mem_keep; 203 struct callout tcpcb_mem_2msl, tcpcb_mem_delack; 204 }; 205 206 static uma_zone_t tcpcb_zone; 207 static uma_zone_t tcptw_zone; 208 209 /* 210 * Tcp initialization 211 */ 212 void 213 tcp_init() 214 { 215 int hashsize = TCBHASHSIZE; 216 217 tcp_ccgen = 1; 218 tcp_cleartaocache(); 219 220 tcp_delacktime = TCPTV_DELACK; 221 tcp_keepinit = TCPTV_KEEP_INIT; 222 tcp_keepidle = TCPTV_KEEP_IDLE; 223 tcp_keepintvl = TCPTV_KEEPINTVL; 224 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 225 tcp_msl = TCPTV_MSL; 226 tcp_rexmit_min = TCPTV_MIN; 227 tcp_rexmit_slop = TCPTV_CPU_VAR; 228 229 INP_INFO_LOCK_INIT(&tcbinfo, "tcp"); 230 LIST_INIT(&tcb); 231 tcbinfo.listhead = &tcb; 232 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); 233 if (!powerof2(hashsize)) { 234 printf("WARNING: TCB hash size not a power of 2\n"); 235 hashsize = 512; /* safe default */ 236 } 237 tcp_tcbhashsize = hashsize; 238 tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask); 239 tcbinfo.porthashbase = hashinit(hashsize, M_PCB, 240 &tcbinfo.porthashmask); 241 tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb), 242 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 243 uma_zone_set_max(tcbinfo.ipi_zone, maxsockets); 244 #ifdef INET6 245 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 246 #else /* INET6 */ 247 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 248 #endif /* INET6 */ 249 if (max_protohdr < TCP_MINPROTOHDR) 250 max_protohdr = TCP_MINPROTOHDR; 251 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 252 panic("tcp_init"); 253 #undef TCP_MINPROTOHDR 254 /* 255 * These have to be type stable for the benefit of the timers. 256 */ 257 tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem), 258 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 259 uma_zone_set_max(tcpcb_zone, maxsockets); 260 tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw), 261 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 262 uma_zone_set_max(tcptw_zone, maxsockets); 263 tcp_timer_init(); 264 syncache_init(); 265 } 266 267 /* 268 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 269 * tcp_template used to store this data in mbufs, but we now recopy it out 270 * of the tcpcb each time to conserve mbufs. 271 */ 272 void 273 tcpip_fillheaders(inp, ip_ptr, tcp_ptr) 274 struct inpcb *inp; 275 void *ip_ptr; 276 void *tcp_ptr; 277 { 278 struct tcphdr *th = (struct tcphdr *)tcp_ptr; 279 280 #ifdef INET6 281 if ((inp->inp_vflag & INP_IPV6) != 0) { 282 struct ip6_hdr *ip6; 283 284 ip6 = (struct ip6_hdr *)ip_ptr; 285 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 286 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); 287 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 288 (IPV6_VERSION & IPV6_VERSION_MASK); 289 ip6->ip6_nxt = IPPROTO_TCP; 290 ip6->ip6_plen = sizeof(struct tcphdr); 291 ip6->ip6_src = inp->in6p_laddr; 292 ip6->ip6_dst = inp->in6p_faddr; 293 } else 294 #endif 295 { 296 struct ip *ip; 297 298 ip = (struct ip *)ip_ptr; 299 ip->ip_v = IPVERSION; 300 ip->ip_hl = 5; 301 ip->ip_tos = inp->inp_ip_tos; 302 ip->ip_len = 0; 303 ip->ip_id = 0; 304 ip->ip_off = 0; 305 ip->ip_ttl = inp->inp_ip_ttl; 306 ip->ip_sum = 0; 307 ip->ip_p = IPPROTO_TCP; 308 ip->ip_src = inp->inp_laddr; 309 ip->ip_dst = inp->inp_faddr; 310 } 311 th->th_sport = inp->inp_lport; 312 th->th_dport = inp->inp_fport; 313 th->th_seq = 0; 314 th->th_ack = 0; 315 th->th_x2 = 0; 316 th->th_off = 5; 317 th->th_flags = 0; 318 th->th_win = 0; 319 th->th_urp = 0; 320 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */ 321 } 322 323 /* 324 * Create template to be used to send tcp packets on a connection. 325 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 326 * use for this function is in keepalives, which use tcp_respond. 327 */ 328 struct tcptemp * 329 tcpip_maketemplate(inp) 330 struct inpcb *inp; 331 { 332 struct mbuf *m; 333 struct tcptemp *n; 334 335 m = m_get(M_DONTWAIT, MT_HEADER); 336 if (m == NULL) 337 return (0); 338 m->m_len = sizeof(struct tcptemp); 339 n = mtod(m, struct tcptemp *); 340 341 tcpip_fillheaders(inp, (void *)&n->tt_ipgen, (void *)&n->tt_t); 342 return (n); 343 } 344 345 /* 346 * Send a single message to the TCP at address specified by 347 * the given TCP/IP header. If m == 0, then we make a copy 348 * of the tcpiphdr at ti and send directly to the addressed host. 349 * This is used to force keep alive messages out using the TCP 350 * template for a connection. If flags are given then we send 351 * a message back to the TCP which originated the * segment ti, 352 * and discard the mbuf containing it and any other attached mbufs. 353 * 354 * In any case the ack and sequence number of the transmitted 355 * segment are as specified by the parameters. 356 * 357 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 358 */ 359 void 360 tcp_respond(tp, ipgen, th, m, ack, seq, flags) 361 struct tcpcb *tp; 362 void *ipgen; 363 register struct tcphdr *th; 364 register struct mbuf *m; 365 tcp_seq ack, seq; 366 int flags; 367 { 368 register int tlen; 369 int win = 0; 370 struct route *ro = 0; 371 struct route sro; 372 struct ip *ip; 373 struct tcphdr *nth; 374 #ifdef INET6 375 struct route_in6 *ro6 = 0; 376 struct route_in6 sro6; 377 struct ip6_hdr *ip6; 378 int isipv6; 379 #endif /* INET6 */ 380 int ipflags = 0; 381 382 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL")); 383 384 #ifdef INET6 385 isipv6 = ((struct ip *)ipgen)->ip_v == 6; 386 ip6 = ipgen; 387 #endif /* INET6 */ 388 ip = ipgen; 389 390 if (tp) { 391 if (!(flags & TH_RST)) { 392 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv); 393 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 394 win = (long)TCP_MAXWIN << tp->rcv_scale; 395 } 396 #ifdef INET6 397 if (isipv6) 398 ro6 = &tp->t_inpcb->in6p_route; 399 else 400 #endif /* INET6 */ 401 ro = &tp->t_inpcb->inp_route; 402 } else { 403 #ifdef INET6 404 if (isipv6) { 405 ro6 = &sro6; 406 bzero(ro6, sizeof *ro6); 407 } else 408 #endif /* INET6 */ 409 { 410 ro = &sro; 411 bzero(ro, sizeof *ro); 412 } 413 } 414 if (m == 0) { 415 m = m_gethdr(M_DONTWAIT, MT_HEADER); 416 if (m == NULL) 417 return; 418 tlen = 0; 419 m->m_data += max_linkhdr; 420 #ifdef INET6 421 if (isipv6) { 422 bcopy((caddr_t)ip6, mtod(m, caddr_t), 423 sizeof(struct ip6_hdr)); 424 ip6 = mtod(m, struct ip6_hdr *); 425 nth = (struct tcphdr *)(ip6 + 1); 426 } else 427 #endif /* INET6 */ 428 { 429 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); 430 ip = mtod(m, struct ip *); 431 nth = (struct tcphdr *)(ip + 1); 432 } 433 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 434 flags = TH_ACK; 435 } else { 436 m_freem(m->m_next); 437 m->m_next = 0; 438 m->m_data = (caddr_t)ipgen; 439 /* m_len is set later */ 440 tlen = 0; 441 #define xchg(a,b,type) { type t; t=a; a=b; b=t; } 442 #ifdef INET6 443 if (isipv6) { 444 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 445 nth = (struct tcphdr *)(ip6 + 1); 446 } else 447 #endif /* INET6 */ 448 { 449 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); 450 nth = (struct tcphdr *)(ip + 1); 451 } 452 if (th != nth) { 453 /* 454 * this is usually a case when an extension header 455 * exists between the IPv6 header and the 456 * TCP header. 457 */ 458 nth->th_sport = th->th_sport; 459 nth->th_dport = th->th_dport; 460 } 461 xchg(nth->th_dport, nth->th_sport, n_short); 462 #undef xchg 463 } 464 #ifdef INET6 465 if (isipv6) { 466 ip6->ip6_flow = 0; 467 ip6->ip6_vfc = IPV6_VERSION; 468 ip6->ip6_nxt = IPPROTO_TCP; 469 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) + 470 tlen)); 471 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr); 472 } else 473 #endif 474 { 475 tlen += sizeof (struct tcpiphdr); 476 ip->ip_len = tlen; 477 ip->ip_ttl = ip_defttl; 478 } 479 m->m_len = tlen; 480 m->m_pkthdr.len = tlen; 481 m->m_pkthdr.rcvif = (struct ifnet *) 0; 482 #ifdef MAC 483 if (tp != NULL && tp->t_inpcb != NULL) { 484 /* 485 * Packet is associated with a socket, so allow the 486 * label of the response to reflect the socket label. 487 */ 488 mac_create_mbuf_from_socket(tp->t_inpcb->inp_socket, m); 489 } else { 490 /* 491 * XXXMAC: This will need to call a mac function that 492 * modifies the mbuf label in place for TCP datagrams 493 * not associated with a PCB. 494 */ 495 } 496 #endif 497 nth->th_seq = htonl(seq); 498 nth->th_ack = htonl(ack); 499 nth->th_x2 = 0; 500 nth->th_off = sizeof (struct tcphdr) >> 2; 501 nth->th_flags = flags; 502 if (tp) 503 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 504 else 505 nth->th_win = htons((u_short)win); 506 nth->th_urp = 0; 507 #ifdef INET6 508 if (isipv6) { 509 nth->th_sum = 0; 510 nth->th_sum = in6_cksum(m, IPPROTO_TCP, 511 sizeof(struct ip6_hdr), 512 tlen - sizeof(struct ip6_hdr)); 513 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL, 514 ro6 && ro6->ro_rt ? 515 ro6->ro_rt->rt_ifp : 516 NULL); 517 } else 518 #endif /* INET6 */ 519 { 520 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 521 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 522 m->m_pkthdr.csum_flags = CSUM_TCP; 523 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 524 } 525 #ifdef TCPDEBUG 526 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 527 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 528 #endif 529 #ifdef INET6 530 if (isipv6) { 531 (void)ip6_output(m, NULL, ro6, ipflags, NULL, NULL, 532 tp ? tp->t_inpcb : NULL); 533 if (ro6 == &sro6 && ro6->ro_rt) { 534 RTFREE(ro6->ro_rt); 535 ro6->ro_rt = NULL; 536 } 537 } else 538 #endif /* INET6 */ 539 { 540 (void) ip_output(m, NULL, ro, ipflags, NULL, tp ? tp->t_inpcb : NULL); 541 if (ro == &sro && ro->ro_rt) { 542 RTFREE(ro->ro_rt); 543 ro->ro_rt = NULL; 544 } 545 } 546 } 547 548 /* 549 * Create a new TCP control block, making an 550 * empty reassembly queue and hooking it to the argument 551 * protocol control block. The `inp' parameter must have 552 * come from the zone allocator set up in tcp_init(). 553 */ 554 struct tcpcb * 555 tcp_newtcpcb(inp) 556 struct inpcb *inp; 557 { 558 struct tcpcb_mem *tm; 559 struct tcpcb *tp; 560 #ifdef INET6 561 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 562 #endif /* INET6 */ 563 564 tm = uma_zalloc(tcpcb_zone, M_NOWAIT | M_ZERO); 565 if (tm == NULL) 566 return (NULL); 567 tp = &tm->tcb; 568 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */ 569 tp->t_maxseg = tp->t_maxopd = 570 #ifdef INET6 571 isipv6 ? tcp_v6mssdflt : 572 #endif /* INET6 */ 573 tcp_mssdflt; 574 575 /* Set up our timeouts. */ 576 callout_init(tp->tt_rexmt = &tm->tcpcb_mem_rexmt, 0); 577 callout_init(tp->tt_persist = &tm->tcpcb_mem_persist, 0); 578 callout_init(tp->tt_keep = &tm->tcpcb_mem_keep, 0); 579 callout_init(tp->tt_2msl = &tm->tcpcb_mem_2msl, 0); 580 callout_init(tp->tt_delack = &tm->tcpcb_mem_delack, 0); 581 582 if (tcp_do_rfc1323) 583 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); 584 if (tcp_do_rfc1644) 585 tp->t_flags |= TF_REQ_CC; 586 tp->t_inpcb = inp; /* XXX */ 587 /* 588 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 589 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 590 * reasonable initial retransmit time. 591 */ 592 tp->t_srtt = TCPTV_SRTTBASE; 593 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 594 tp->t_rttmin = tcp_rexmit_min; 595 tp->t_rxtcur = TCPTV_RTOBASE; 596 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 597 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 598 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 599 tp->t_rcvtime = ticks; 600 tp->t_bw_rtttime = ticks; 601 /* 602 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 603 * because the socket may be bound to an IPv6 wildcard address, 604 * which may match an IPv4-mapped IPv6 address. 605 */ 606 inp->inp_ip_ttl = ip_defttl; 607 inp->inp_ppcb = (caddr_t)tp; 608 return (tp); /* XXX */ 609 } 610 611 /* 612 * Drop a TCP connection, reporting 613 * the specified error. If connection is synchronized, 614 * then send a RST to peer. 615 */ 616 struct tcpcb * 617 tcp_drop(tp, errno) 618 register struct tcpcb *tp; 619 int errno; 620 { 621 struct socket *so = tp->t_inpcb->inp_socket; 622 623 if (TCPS_HAVERCVDSYN(tp->t_state)) { 624 tp->t_state = TCPS_CLOSED; 625 (void) tcp_output(tp); 626 tcpstat.tcps_drops++; 627 } else 628 tcpstat.tcps_conndrops++; 629 if (errno == ETIMEDOUT && tp->t_softerror) 630 errno = tp->t_softerror; 631 so->so_error = errno; 632 return (tcp_close(tp)); 633 } 634 635 static void 636 tcp_discardcb(tp) 637 struct tcpcb *tp; 638 { 639 struct tseg_qent *q; 640 struct inpcb *inp = tp->t_inpcb; 641 struct socket *so = inp->inp_socket; 642 #ifdef INET6 643 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 644 #endif /* INET6 */ 645 struct rtentry *rt; 646 int dosavessthresh; 647 648 /* 649 * Make sure that all of our timers are stopped before we 650 * delete the PCB. 651 */ 652 callout_stop(tp->tt_rexmt); 653 callout_stop(tp->tt_persist); 654 callout_stop(tp->tt_keep); 655 callout_stop(tp->tt_2msl); 656 callout_stop(tp->tt_delack); 657 658 /* 659 * If we got enough samples through the srtt filter, 660 * save the rtt and rttvar in the routing entry. 661 * 'Enough' is arbitrarily defined as the 16 samples. 662 * 16 samples is enough for the srtt filter to converge 663 * to within 5% of the correct value; fewer samples and 664 * we could save a very bogus rtt. 665 * 666 * Don't update the default route's characteristics and don't 667 * update anything that the user "locked". 668 */ 669 if (tp->t_rttupdated >= 16) { 670 register u_long i = 0; 671 #ifdef INET6 672 if (isipv6) { 673 struct sockaddr_in6 *sin6; 674 675 if ((rt = inp->in6p_route.ro_rt) == NULL) 676 goto no_valid_rt; 677 sin6 = (struct sockaddr_in6 *)rt_key(rt); 678 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 679 goto no_valid_rt; 680 } 681 else 682 #endif /* INET6 */ 683 if ((rt = inp->inp_route.ro_rt) == NULL || 684 ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr 685 == INADDR_ANY) 686 goto no_valid_rt; 687 688 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) { 689 i = tp->t_srtt * 690 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 691 if (rt->rt_rmx.rmx_rtt && i) 692 /* 693 * filter this update to half the old & half 694 * the new values, converting scale. 695 * See route.h and tcp_var.h for a 696 * description of the scaling constants. 697 */ 698 rt->rt_rmx.rmx_rtt = 699 (rt->rt_rmx.rmx_rtt + i) / 2; 700 else 701 rt->rt_rmx.rmx_rtt = i; 702 tcpstat.tcps_cachedrtt++; 703 } 704 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) { 705 i = tp->t_rttvar * 706 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 707 if (rt->rt_rmx.rmx_rttvar && i) 708 rt->rt_rmx.rmx_rttvar = 709 (rt->rt_rmx.rmx_rttvar + i) / 2; 710 else 711 rt->rt_rmx.rmx_rttvar = i; 712 tcpstat.tcps_cachedrttvar++; 713 } 714 /* 715 * The old comment here said: 716 * update the pipelimit (ssthresh) if it has been updated 717 * already or if a pipesize was specified & the threshhold 718 * got below half the pipesize. I.e., wait for bad news 719 * before we start updating, then update on both good 720 * and bad news. 721 * 722 * But we want to save the ssthresh even if no pipesize is 723 * specified explicitly in the route, because such 724 * connections still have an implicit pipesize specified 725 * by the global tcp_sendspace. In the absence of a reliable 726 * way to calculate the pipesize, it will have to do. 727 */ 728 i = tp->snd_ssthresh; 729 if (rt->rt_rmx.rmx_sendpipe != 0) 730 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2); 731 else 732 dosavessthresh = (i < so->so_snd.sb_hiwat / 2); 733 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 && 734 i != 0 && rt->rt_rmx.rmx_ssthresh != 0) 735 || dosavessthresh) { 736 /* 737 * convert the limit from user data bytes to 738 * packets then to packet data bytes. 739 */ 740 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 741 if (i < 2) 742 i = 2; 743 i *= (u_long)(tp->t_maxseg + 744 #ifdef INET6 745 (isipv6 ? sizeof (struct ip6_hdr) + 746 sizeof (struct tcphdr) : 747 #endif 748 sizeof (struct tcpiphdr) 749 #ifdef INET6 750 ) 751 #endif 752 ); 753 if (rt->rt_rmx.rmx_ssthresh) 754 rt->rt_rmx.rmx_ssthresh = 755 (rt->rt_rmx.rmx_ssthresh + i) / 2; 756 else 757 rt->rt_rmx.rmx_ssthresh = i; 758 tcpstat.tcps_cachedssthresh++; 759 } 760 } 761 no_valid_rt: 762 /* free the reassembly queue, if any */ 763 while ((q = LIST_FIRST(&tp->t_segq)) != NULL) { 764 LIST_REMOVE(q, tqe_q); 765 m_freem(q->tqe_m); 766 FREE(q, M_TSEGQ); 767 } 768 inp->inp_ppcb = NULL; 769 tp->t_inpcb = NULL; 770 uma_zfree(tcpcb_zone, tp); 771 soisdisconnected(so); 772 } 773 774 /* 775 * Close a TCP control block: 776 * discard all space held by the tcp 777 * discard internet protocol block 778 * wake up any sleepers 779 */ 780 struct tcpcb * 781 tcp_close(tp) 782 struct tcpcb *tp; 783 { 784 struct inpcb *inp = tp->t_inpcb; 785 #ifdef INET6 786 struct socket *so = inp->inp_socket; 787 #endif 788 789 tcp_discardcb(tp); 790 #ifdef INET6 791 if (INP_CHECK_SOCKAF(so, AF_INET6)) 792 in6_pcbdetach(inp); 793 else 794 #endif 795 in_pcbdetach(inp); 796 tcpstat.tcps_closed++; 797 return ((struct tcpcb *)0); 798 } 799 800 void 801 tcp_drain() 802 { 803 if (do_tcpdrain) 804 { 805 struct inpcb *inpb; 806 struct tcpcb *tcpb; 807 struct tseg_qent *te; 808 809 /* 810 * Walk the tcpbs, if existing, and flush the reassembly queue, 811 * if there is one... 812 * XXX: The "Net/3" implementation doesn't imply that the TCP 813 * reassembly queue should be flushed, but in a situation 814 * where we're really low on mbufs, this is potentially 815 * usefull. 816 */ 817 INP_INFO_RLOCK(&tcbinfo); 818 LIST_FOREACH(inpb, tcbinfo.listhead, inp_list) { 819 if (inpb->inp_vflag & INP_TIMEWAIT) 820 continue; 821 INP_LOCK(inpb); 822 if ((tcpb = intotcpcb(inpb))) { 823 while ((te = LIST_FIRST(&tcpb->t_segq)) 824 != NULL) { 825 LIST_REMOVE(te, tqe_q); 826 m_freem(te->tqe_m); 827 FREE(te, M_TSEGQ); 828 } 829 } 830 INP_UNLOCK(inpb); 831 } 832 INP_INFO_RUNLOCK(&tcbinfo); 833 } 834 } 835 836 /* 837 * Notify a tcp user of an asynchronous error; 838 * store error as soft error, but wake up user 839 * (for now, won't do anything until can select for soft error). 840 * 841 * Do not wake up user since there currently is no mechanism for 842 * reporting soft errors (yet - a kqueue filter may be added). 843 */ 844 static struct inpcb * 845 tcp_notify(inp, error) 846 struct inpcb *inp; 847 int error; 848 { 849 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; 850 851 /* 852 * Ignore some errors if we are hooked up. 853 * If connection hasn't completed, has retransmitted several times, 854 * and receives a second error, give up now. This is better 855 * than waiting a long time to establish a connection that 856 * can never complete. 857 */ 858 if (tp->t_state == TCPS_ESTABLISHED && 859 (error == EHOSTUNREACH || error == ENETUNREACH || 860 error == EHOSTDOWN)) { 861 return inp; 862 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 863 tp->t_softerror) { 864 tcp_drop(tp, error); 865 return (struct inpcb *)0; 866 } else { 867 tp->t_softerror = error; 868 return inp; 869 } 870 #if 0 871 wakeup( &so->so_timeo); 872 sorwakeup(so); 873 sowwakeup(so); 874 #endif 875 } 876 877 static int 878 tcp_pcblist(SYSCTL_HANDLER_ARGS) 879 { 880 int error, i, n, s; 881 struct inpcb *inp, **inp_list; 882 inp_gen_t gencnt; 883 struct xinpgen xig; 884 885 /* 886 * The process of preparing the TCB list is too time-consuming and 887 * resource-intensive to repeat twice on every request. 888 */ 889 if (req->oldptr == 0) { 890 n = tcbinfo.ipi_count; 891 req->oldidx = 2 * (sizeof xig) 892 + (n + n/8) * sizeof(struct xtcpcb); 893 return 0; 894 } 895 896 if (req->newptr != 0) 897 return EPERM; 898 899 /* 900 * OK, now we're committed to doing something. 901 */ 902 s = splnet(); 903 INP_INFO_RLOCK(&tcbinfo); 904 gencnt = tcbinfo.ipi_gencnt; 905 n = tcbinfo.ipi_count; 906 INP_INFO_RUNLOCK(&tcbinfo); 907 splx(s); 908 909 sysctl_wire_old_buffer(req, 2 * (sizeof xig) 910 + n * sizeof(struct xtcpcb)); 911 912 xig.xig_len = sizeof xig; 913 xig.xig_count = n; 914 xig.xig_gen = gencnt; 915 xig.xig_sogen = so_gencnt; 916 error = SYSCTL_OUT(req, &xig, sizeof xig); 917 if (error) 918 return error; 919 920 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 921 if (inp_list == 0) 922 return ENOMEM; 923 924 s = splnet(); 925 INP_INFO_RLOCK(&tcbinfo); 926 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp && i < n; 927 inp = LIST_NEXT(inp, inp_list)) { 928 INP_LOCK(inp); 929 if (inp->inp_gencnt <= gencnt && 930 (((inp->inp_vflag & INP_TIMEWAIT) && 931 cr_cansee(req->td->td_ucred, intotw(inp)->tw_cred) == 0) || 932 cr_canseesocket(req->td->td_ucred, inp->inp_socket) == 0)) 933 inp_list[i++] = inp; 934 INP_UNLOCK(inp); 935 } 936 INP_INFO_RUNLOCK(&tcbinfo); 937 splx(s); 938 n = i; 939 940 error = 0; 941 for (i = 0; i < n; i++) { 942 inp = inp_list[i]; 943 if (inp->inp_gencnt <= gencnt) { 944 struct xtcpcb xt; 945 caddr_t inp_ppcb; 946 xt.xt_len = sizeof xt; 947 /* XXX should avoid extra copy */ 948 bcopy(inp, &xt.xt_inp, sizeof *inp); 949 inp_ppcb = inp->inp_ppcb; 950 if (inp_ppcb == NULL) 951 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp); 952 else if (inp->inp_vflag & INP_TIMEWAIT) { 953 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp); 954 xt.xt_tp.t_state = TCPS_TIME_WAIT; 955 } else 956 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 957 if (inp->inp_socket) 958 sotoxsocket(inp->inp_socket, &xt.xt_socket); 959 else { 960 bzero(&xt.xt_socket, sizeof xt.xt_socket); 961 xt.xt_socket.xso_protocol = IPPROTO_TCP; 962 } 963 xt.xt_inp.inp_gencnt = inp->inp_gencnt; 964 error = SYSCTL_OUT(req, &xt, sizeof xt); 965 } 966 } 967 if (!error) { 968 /* 969 * Give the user an updated idea of our state. 970 * If the generation differs from what we told 971 * her before, she knows that something happened 972 * while we were processing this request, and it 973 * might be necessary to retry. 974 */ 975 s = splnet(); 976 INP_INFO_RLOCK(&tcbinfo); 977 xig.xig_gen = tcbinfo.ipi_gencnt; 978 xig.xig_sogen = so_gencnt; 979 xig.xig_count = tcbinfo.ipi_count; 980 INP_INFO_RUNLOCK(&tcbinfo); 981 splx(s); 982 error = SYSCTL_OUT(req, &xig, sizeof xig); 983 } 984 free(inp_list, M_TEMP); 985 return error; 986 } 987 988 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 989 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 990 991 static int 992 tcp_getcred(SYSCTL_HANDLER_ARGS) 993 { 994 struct xucred xuc; 995 struct sockaddr_in addrs[2]; 996 struct inpcb *inp; 997 int error, s; 998 999 error = suser_cred(req->td->td_ucred, PRISON_ROOT); 1000 if (error) 1001 return (error); 1002 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 1003 if (error) 1004 return (error); 1005 s = splnet(); 1006 INP_INFO_RLOCK(&tcbinfo); 1007 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port, 1008 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 1009 if (inp == NULL) { 1010 error = ENOENT; 1011 goto outunlocked; 1012 } 1013 INP_LOCK(inp); 1014 if (inp->inp_socket == NULL) { 1015 error = ENOENT; 1016 goto out; 1017 } 1018 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket); 1019 if (error) 1020 goto out; 1021 cru2x(inp->inp_socket->so_cred, &xuc); 1022 out: 1023 INP_UNLOCK(inp); 1024 outunlocked: 1025 INP_INFO_RUNLOCK(&tcbinfo); 1026 splx(s); 1027 if (error == 0) 1028 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 1029 return (error); 1030 } 1031 1032 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, 1033 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 1034 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection"); 1035 1036 #ifdef INET6 1037 static int 1038 tcp6_getcred(SYSCTL_HANDLER_ARGS) 1039 { 1040 struct xucred xuc; 1041 struct sockaddr_in6 addrs[2]; 1042 struct inpcb *inp; 1043 int error, s, mapped = 0; 1044 1045 error = suser_cred(req->td->td_ucred, PRISON_ROOT); 1046 if (error) 1047 return (error); 1048 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 1049 if (error) 1050 return (error); 1051 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 1052 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 1053 mapped = 1; 1054 else 1055 return (EINVAL); 1056 } 1057 s = splnet(); 1058 INP_INFO_RLOCK(&tcbinfo); 1059 if (mapped == 1) 1060 inp = in_pcblookup_hash(&tcbinfo, 1061 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 1062 addrs[1].sin6_port, 1063 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 1064 addrs[0].sin6_port, 1065 0, NULL); 1066 else 1067 inp = in6_pcblookup_hash(&tcbinfo, &addrs[1].sin6_addr, 1068 addrs[1].sin6_port, 1069 &addrs[0].sin6_addr, addrs[0].sin6_port, 1070 0, NULL); 1071 if (inp == NULL) { 1072 error = ENOENT; 1073 goto outunlocked; 1074 } 1075 INP_LOCK(inp); 1076 if (inp->inp_socket == NULL) { 1077 error = ENOENT; 1078 goto out; 1079 } 1080 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket); 1081 if (error) 1082 goto out; 1083 cru2x(inp->inp_socket->so_cred, &xuc); 1084 out: 1085 INP_UNLOCK(inp); 1086 outunlocked: 1087 INP_INFO_RUNLOCK(&tcbinfo); 1088 splx(s); 1089 if (error == 0) 1090 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 1091 return (error); 1092 } 1093 1094 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, 1095 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 1096 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection"); 1097 #endif 1098 1099 1100 void 1101 tcp_ctlinput(cmd, sa, vip) 1102 int cmd; 1103 struct sockaddr *sa; 1104 void *vip; 1105 { 1106 struct ip *ip = vip; 1107 struct tcphdr *th; 1108 struct in_addr faddr; 1109 struct inpcb *inp; 1110 struct tcpcb *tp; 1111 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 1112 tcp_seq icmp_seq; 1113 int s; 1114 1115 faddr = ((struct sockaddr_in *)sa)->sin_addr; 1116 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 1117 return; 1118 1119 if (cmd == PRC_QUENCH) 1120 notify = tcp_quench; 1121 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || 1122 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip) 1123 notify = tcp_drop_syn_sent; 1124 else if (cmd == PRC_MSGSIZE) 1125 notify = tcp_mtudisc; 1126 else if (PRC_IS_REDIRECT(cmd)) { 1127 ip = 0; 1128 notify = in_rtchange; 1129 } else if (cmd == PRC_HOSTDEAD) 1130 ip = 0; 1131 else if ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0) 1132 return; 1133 if (ip) { 1134 s = splnet(); 1135 th = (struct tcphdr *)((caddr_t)ip 1136 + (ip->ip_hl << 2)); 1137 INP_INFO_WLOCK(&tcbinfo); 1138 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport, 1139 ip->ip_src, th->th_sport, 0, NULL); 1140 if (inp != NULL) { 1141 INP_LOCK(inp); 1142 if (inp->inp_socket != NULL) { 1143 icmp_seq = htonl(th->th_seq); 1144 tp = intotcpcb(inp); 1145 if (SEQ_GEQ(icmp_seq, tp->snd_una) && 1146 SEQ_LT(icmp_seq, tp->snd_max)) 1147 inp = (*notify)(inp, inetctlerrmap[cmd]); 1148 } 1149 if (inp) 1150 INP_UNLOCK(inp); 1151 } else { 1152 struct in_conninfo inc; 1153 1154 inc.inc_fport = th->th_dport; 1155 inc.inc_lport = th->th_sport; 1156 inc.inc_faddr = faddr; 1157 inc.inc_laddr = ip->ip_src; 1158 #ifdef INET6 1159 inc.inc_isipv6 = 0; 1160 #endif 1161 syncache_unreach(&inc, th); 1162 } 1163 INP_INFO_WUNLOCK(&tcbinfo); 1164 splx(s); 1165 } else 1166 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify); 1167 } 1168 1169 #ifdef INET6 1170 void 1171 tcp6_ctlinput(cmd, sa, d) 1172 int cmd; 1173 struct sockaddr *sa; 1174 void *d; 1175 { 1176 struct tcphdr th; 1177 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 1178 struct ip6_hdr *ip6; 1179 struct mbuf *m; 1180 struct ip6ctlparam *ip6cp = NULL; 1181 const struct sockaddr_in6 *sa6_src = NULL; 1182 int off; 1183 struct tcp_portonly { 1184 u_int16_t th_sport; 1185 u_int16_t th_dport; 1186 } *thp; 1187 1188 if (sa->sa_family != AF_INET6 || 1189 sa->sa_len != sizeof(struct sockaddr_in6)) 1190 return; 1191 1192 if (cmd == PRC_QUENCH) 1193 notify = tcp_quench; 1194 else if (cmd == PRC_MSGSIZE) 1195 notify = tcp_mtudisc; 1196 else if (!PRC_IS_REDIRECT(cmd) && 1197 ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) 1198 return; 1199 1200 /* if the parameter is from icmp6, decode it. */ 1201 if (d != NULL) { 1202 ip6cp = (struct ip6ctlparam *)d; 1203 m = ip6cp->ip6c_m; 1204 ip6 = ip6cp->ip6c_ip6; 1205 off = ip6cp->ip6c_off; 1206 sa6_src = ip6cp->ip6c_src; 1207 } else { 1208 m = NULL; 1209 ip6 = NULL; 1210 off = 0; /* fool gcc */ 1211 sa6_src = &sa6_any; 1212 } 1213 1214 if (ip6) { 1215 struct in_conninfo inc; 1216 /* 1217 * XXX: We assume that when IPV6 is non NULL, 1218 * M and OFF are valid. 1219 */ 1220 1221 /* check if we can safely examine src and dst ports */ 1222 if (m->m_pkthdr.len < off + sizeof(*thp)) 1223 return; 1224 1225 bzero(&th, sizeof(th)); 1226 m_copydata(m, off, sizeof(*thp), (caddr_t)&th); 1227 1228 in6_pcbnotify(&tcb, sa, th.th_dport, 1229 (struct sockaddr *)ip6cp->ip6c_src, 1230 th.th_sport, cmd, notify); 1231 1232 inc.inc_fport = th.th_dport; 1233 inc.inc_lport = th.th_sport; 1234 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; 1235 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; 1236 inc.inc_isipv6 = 1; 1237 syncache_unreach(&inc, &th); 1238 } else 1239 in6_pcbnotify(&tcb, sa, 0, (const struct sockaddr *)sa6_src, 1240 0, cmd, notify); 1241 } 1242 #endif /* INET6 */ 1243 1244 1245 /* 1246 * Following is where TCP initial sequence number generation occurs. 1247 * 1248 * There are two places where we must use initial sequence numbers: 1249 * 1. In SYN-ACK packets. 1250 * 2. In SYN packets. 1251 * 1252 * All ISNs for SYN-ACK packets are generated by the syncache. See 1253 * tcp_syncache.c for details. 1254 * 1255 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 1256 * depends on this property. In addition, these ISNs should be 1257 * unguessable so as to prevent connection hijacking. To satisfy 1258 * the requirements of this situation, the algorithm outlined in 1259 * RFC 1948 is used to generate sequence numbers. 1260 * 1261 * Implementation details: 1262 * 1263 * Time is based off the system timer, and is corrected so that it 1264 * increases by one megabyte per second. This allows for proper 1265 * recycling on high speed LANs while still leaving over an hour 1266 * before rollover. 1267 * 1268 * net.inet.tcp.isn_reseed_interval controls the number of seconds 1269 * between seeding of isn_secret. This is normally set to zero, 1270 * as reseeding should not be necessary. 1271 * 1272 */ 1273 1274 #define ISN_BYTES_PER_SECOND 1048576 1275 1276 u_char isn_secret[32]; 1277 int isn_last_reseed; 1278 MD5_CTX isn_ctx; 1279 1280 tcp_seq 1281 tcp_new_isn(tp) 1282 struct tcpcb *tp; 1283 { 1284 u_int32_t md5_buffer[4]; 1285 tcp_seq new_isn; 1286 1287 /* Seed if this is the first use, reseed if requested. */ 1288 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) && 1289 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) 1290 < (u_int)ticks))) { 1291 read_random(&isn_secret, sizeof(isn_secret)); 1292 isn_last_reseed = ticks; 1293 } 1294 1295 /* Compute the md5 hash and return the ISN. */ 1296 MD5Init(&isn_ctx); 1297 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short)); 1298 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short)); 1299 #ifdef INET6 1300 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) { 1301 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, 1302 sizeof(struct in6_addr)); 1303 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, 1304 sizeof(struct in6_addr)); 1305 } else 1306 #endif 1307 { 1308 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, 1309 sizeof(struct in_addr)); 1310 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, 1311 sizeof(struct in_addr)); 1312 } 1313 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret)); 1314 MD5Final((u_char *) &md5_buffer, &isn_ctx); 1315 new_isn = (tcp_seq) md5_buffer[0]; 1316 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz); 1317 return new_isn; 1318 } 1319 1320 /* 1321 * When a source quench is received, close congestion window 1322 * to one segment. We will gradually open it again as we proceed. 1323 */ 1324 struct inpcb * 1325 tcp_quench(inp, errno) 1326 struct inpcb *inp; 1327 int errno; 1328 { 1329 struct tcpcb *tp = intotcpcb(inp); 1330 1331 if (tp) 1332 tp->snd_cwnd = tp->t_maxseg; 1333 return (inp); 1334 } 1335 1336 /* 1337 * When a specific ICMP unreachable message is received and the 1338 * connection state is SYN-SENT, drop the connection. This behavior 1339 * is controlled by the icmp_may_rst sysctl. 1340 */ 1341 struct inpcb * 1342 tcp_drop_syn_sent(inp, errno) 1343 struct inpcb *inp; 1344 int errno; 1345 { 1346 struct tcpcb *tp = intotcpcb(inp); 1347 1348 if (tp && tp->t_state == TCPS_SYN_SENT) { 1349 tcp_drop(tp, errno); 1350 return (struct inpcb *)0; 1351 } 1352 return inp; 1353 } 1354 1355 /* 1356 * When `need fragmentation' ICMP is received, update our idea of the MSS 1357 * based on the new value in the route. Also nudge TCP to send something, 1358 * since we know the packet we just sent was dropped. 1359 * This duplicates some code in the tcp_mss() function in tcp_input.c. 1360 */ 1361 struct inpcb * 1362 tcp_mtudisc(inp, errno) 1363 struct inpcb *inp; 1364 int errno; 1365 { 1366 struct tcpcb *tp = intotcpcb(inp); 1367 struct rtentry *rt; 1368 struct rmxp_tao *taop; 1369 struct socket *so = inp->inp_socket; 1370 int offered; 1371 int mss; 1372 #ifdef INET6 1373 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 1374 #endif /* INET6 */ 1375 1376 if (tp) { 1377 #ifdef INET6 1378 if (isipv6) 1379 rt = tcp_rtlookup6(&inp->inp_inc); 1380 else 1381 #endif /* INET6 */ 1382 rt = tcp_rtlookup(&inp->inp_inc); 1383 if (!rt || !rt->rt_rmx.rmx_mtu) { 1384 tp->t_maxopd = tp->t_maxseg = 1385 #ifdef INET6 1386 isipv6 ? tcp_v6mssdflt : 1387 #endif /* INET6 */ 1388 tcp_mssdflt; 1389 return inp; 1390 } 1391 taop = rmx_taop(rt->rt_rmx); 1392 offered = taop->tao_mssopt; 1393 mss = rt->rt_rmx.rmx_mtu - 1394 #ifdef INET6 1395 (isipv6 ? 1396 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1397 #endif /* INET6 */ 1398 sizeof(struct tcpiphdr) 1399 #ifdef INET6 1400 ) 1401 #endif /* INET6 */ 1402 ; 1403 1404 if (offered) 1405 mss = min(mss, offered); 1406 /* 1407 * XXX - The above conditional probably violates the TCP 1408 * spec. The problem is that, since we don't know the 1409 * other end's MSS, we are supposed to use a conservative 1410 * default. But, if we do that, then MTU discovery will 1411 * never actually take place, because the conservative 1412 * default is much less than the MTUs typically seen 1413 * on the Internet today. For the moment, we'll sweep 1414 * this under the carpet. 1415 * 1416 * The conservative default might not actually be a problem 1417 * if the only case this occurs is when sending an initial 1418 * SYN with options and data to a host we've never talked 1419 * to before. Then, they will reply with an MSS value which 1420 * will get recorded and the new parameters should get 1421 * recomputed. For Further Study. 1422 */ 1423 if (tp->t_maxopd <= mss) 1424 return inp; 1425 tp->t_maxopd = mss; 1426 1427 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 1428 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) 1429 mss -= TCPOLEN_TSTAMP_APPA; 1430 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && 1431 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC) 1432 mss -= TCPOLEN_CC_APPA; 1433 #if (MCLBYTES & (MCLBYTES - 1)) == 0 1434 if (mss > MCLBYTES) 1435 mss &= ~(MCLBYTES-1); 1436 #else 1437 if (mss > MCLBYTES) 1438 mss = mss / MCLBYTES * MCLBYTES; 1439 #endif 1440 if (so->so_snd.sb_hiwat < mss) 1441 mss = so->so_snd.sb_hiwat; 1442 1443 tp->t_maxseg = mss; 1444 1445 tcpstat.tcps_mturesent++; 1446 tp->t_rtttime = 0; 1447 tp->snd_nxt = tp->snd_una; 1448 tcp_output(tp); 1449 } 1450 return inp; 1451 } 1452 1453 /* 1454 * Look-up the routing entry to the peer of this inpcb. If no route 1455 * is found and it cannot be allocated, then return NULL. This routine 1456 * is called by TCP routines that access the rmx structure and by tcp_mss 1457 * to get the interface MTU. 1458 */ 1459 struct rtentry * 1460 tcp_rtlookup(inc) 1461 struct in_conninfo *inc; 1462 { 1463 struct route *ro; 1464 struct rtentry *rt; 1465 1466 ro = &inc->inc_route; 1467 rt = ro->ro_rt; 1468 if (rt == NULL || !(rt->rt_flags & RTF_UP)) { 1469 /* No route yet, so try to acquire one */ 1470 if (inc->inc_faddr.s_addr != INADDR_ANY) { 1471 ro->ro_dst.sa_family = AF_INET; 1472 ro->ro_dst.sa_len = sizeof(struct sockaddr_in); 1473 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = 1474 inc->inc_faddr; 1475 rtalloc(ro); 1476 rt = ro->ro_rt; 1477 } 1478 } 1479 return rt; 1480 } 1481 1482 #ifdef INET6 1483 struct rtentry * 1484 tcp_rtlookup6(inc) 1485 struct in_conninfo *inc; 1486 { 1487 struct route_in6 *ro6; 1488 struct rtentry *rt; 1489 1490 ro6 = &inc->inc6_route; 1491 rt = ro6->ro_rt; 1492 if (rt == NULL || !(rt->rt_flags & RTF_UP)) { 1493 /* No route yet, so try to acquire one */ 1494 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 1495 ro6->ro_dst.sin6_family = AF_INET6; 1496 ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6); 1497 ro6->ro_dst.sin6_addr = inc->inc6_faddr; 1498 rtalloc((struct route *)ro6); 1499 rt = ro6->ro_rt; 1500 } 1501 } 1502 return rt; 1503 } 1504 #endif /* INET6 */ 1505 1506 #ifdef IPSEC 1507 /* compute ESP/AH header size for TCP, including outer IP header. */ 1508 size_t 1509 ipsec_hdrsiz_tcp(tp) 1510 struct tcpcb *tp; 1511 { 1512 struct inpcb *inp; 1513 struct mbuf *m; 1514 size_t hdrsiz; 1515 struct ip *ip; 1516 #ifdef INET6 1517 struct ip6_hdr *ip6; 1518 #endif 1519 struct tcphdr *th; 1520 1521 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) 1522 return 0; 1523 MGETHDR(m, M_DONTWAIT, MT_DATA); 1524 if (!m) 1525 return 0; 1526 1527 #ifdef INET6 1528 if ((inp->inp_vflag & INP_IPV6) != 0) { 1529 ip6 = mtod(m, struct ip6_hdr *); 1530 th = (struct tcphdr *)(ip6 + 1); 1531 m->m_pkthdr.len = m->m_len = 1532 sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1533 tcpip_fillheaders(inp, ip6, th); 1534 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1535 } else 1536 #endif /* INET6 */ 1537 { 1538 ip = mtod(m, struct ip *); 1539 th = (struct tcphdr *)(ip + 1); 1540 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); 1541 tcpip_fillheaders(inp, ip, th); 1542 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1543 } 1544 1545 m_free(m); 1546 return hdrsiz; 1547 } 1548 #endif /*IPSEC*/ 1549 1550 /* 1551 * Return a pointer to the cached information about the remote host. 1552 * The cached information is stored in the protocol specific part of 1553 * the route metrics. 1554 */ 1555 struct rmxp_tao * 1556 tcp_gettaocache(inc) 1557 struct in_conninfo *inc; 1558 { 1559 struct rtentry *rt; 1560 1561 #ifdef INET6 1562 if (inc->inc_isipv6) 1563 rt = tcp_rtlookup6(inc); 1564 else 1565 #endif /* INET6 */ 1566 rt = tcp_rtlookup(inc); 1567 1568 /* Make sure this is a host route and is up. */ 1569 if (rt == NULL || 1570 (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST)) 1571 return NULL; 1572 1573 return rmx_taop(rt->rt_rmx); 1574 } 1575 1576 /* 1577 * Clear all the TAO cache entries, called from tcp_init. 1578 * 1579 * XXX 1580 * This routine is just an empty one, because we assume that the routing 1581 * routing tables are initialized at the same time when TCP, so there is 1582 * nothing in the cache left over. 1583 */ 1584 static void 1585 tcp_cleartaocache() 1586 { 1587 } 1588 1589 /* 1590 * Move a TCP connection into TIME_WAIT state. 1591 * tcbinfo is unlocked. 1592 * inp is locked, and is unlocked before returning. 1593 */ 1594 void 1595 tcp_twstart(tp) 1596 struct tcpcb *tp; 1597 { 1598 struct tcptw *tw; 1599 struct inpcb *inp; 1600 int tw_time, acknow; 1601 struct socket *so; 1602 1603 tw = uma_zalloc(tcptw_zone, M_NOWAIT); 1604 if (tw == NULL) { 1605 tw = tcp_timer_2msl_tw(1); 1606 if (tw == NULL) { 1607 tcp_close(tp); 1608 return; 1609 } 1610 } 1611 inp = tp->t_inpcb; 1612 tw->tw_inpcb = inp; 1613 1614 /* 1615 * Recover last window size sent. 1616 */ 1617 tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale; 1618 1619 /* 1620 * Set t_recent if timestamps are used on the connection. 1621 */ 1622 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) == 1623 (TF_REQ_TSTMP|TF_RCVD_TSTMP)) 1624 tw->t_recent = tp->ts_recent; 1625 else 1626 tw->t_recent = 0; 1627 1628 tw->snd_nxt = tp->snd_nxt; 1629 tw->rcv_nxt = tp->rcv_nxt; 1630 tw->cc_recv = tp->cc_recv; 1631 tw->cc_send = tp->cc_send; 1632 tw->t_starttime = tp->t_starttime; 1633 tw->tw_time = 0; 1634 1635 /* XXX 1636 * If this code will 1637 * be used for fin-wait-2 state also, then we may need 1638 * a ts_recent from the last segment. 1639 */ 1640 /* Shorten TIME_WAIT [RFC-1644, p.28] */ 1641 if (tp->cc_recv != 0 && (ticks - tp->t_starttime) < tcp_msl) { 1642 tw_time = tp->t_rxtcur * TCPTV_TWTRUNC; 1643 /* For T/TCP client, force ACK now. */ 1644 acknow = 1; 1645 } else { 1646 tw_time = 2 * tcp_msl; 1647 acknow = tp->t_flags & TF_ACKNOW; 1648 } 1649 tcp_discardcb(tp); 1650 so = inp->inp_socket; 1651 so->so_pcb = NULL; 1652 tw->tw_cred = crhold(so->so_cred); 1653 tw->tw_so_options = so->so_options; 1654 sotryfree(so); 1655 inp->inp_socket = NULL; 1656 inp->inp_ppcb = (caddr_t)tw; 1657 inp->inp_vflag |= INP_TIMEWAIT; 1658 tcp_timer_2msl_reset(tw, tw_time); 1659 if (acknow) 1660 tcp_twrespond(tw, TH_ACK); 1661 INP_UNLOCK(inp); 1662 } 1663 1664 struct tcptw * 1665 tcp_twclose(struct tcptw *tw, int reuse) 1666 { 1667 struct inpcb *inp; 1668 1669 inp = tw->tw_inpcb; 1670 tw->tw_inpcb = NULL; 1671 tcp_timer_2msl_stop(tw); 1672 inp->inp_ppcb = NULL; 1673 #ifdef INET6 1674 if (inp->inp_vflag & INP_IPV6PROTO) 1675 in6_pcbdetach(inp); 1676 else 1677 #endif 1678 in_pcbdetach(inp); 1679 tcpstat.tcps_closed++; 1680 if (reuse) 1681 return (tw); 1682 uma_zfree(tcptw_zone, tw); 1683 return (NULL); 1684 } 1685 1686 int 1687 tcp_twrespond(struct tcptw *tw, int flags) 1688 { 1689 struct inpcb *inp = tw->tw_inpcb; 1690 struct tcphdr *th; 1691 struct mbuf *m; 1692 struct ip *ip = NULL; 1693 u_int8_t *optp; 1694 u_int hdrlen, optlen; 1695 int error; 1696 #ifdef INET6 1697 struct ip6_hdr *ip6 = NULL; 1698 int isipv6 = inp->inp_inc.inc_isipv6; 1699 #endif 1700 1701 m = m_gethdr(M_DONTWAIT, MT_HEADER); 1702 if (m == NULL) 1703 return (ENOBUFS); 1704 m->m_data += max_linkhdr; 1705 1706 #ifdef INET6 1707 if (isipv6) { 1708 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1709 ip6 = mtod(m, struct ip6_hdr *); 1710 th = (struct tcphdr *)(ip6 + 1); 1711 tcpip_fillheaders(inp, ip6, th); 1712 } else 1713 #endif 1714 { 1715 hdrlen = sizeof(struct tcpiphdr); 1716 ip = mtod(m, struct ip *); 1717 th = (struct tcphdr *)(ip + 1); 1718 tcpip_fillheaders(inp, ip, th); 1719 } 1720 optp = (u_int8_t *)(th + 1); 1721 1722 /* 1723 * Send a timestamp and echo-reply if both our side and our peer 1724 * have sent timestamps in our SYN's and this is not a RST. 1725 */ 1726 if (tw->t_recent && flags == TH_ACK) { 1727 u_int32_t *lp = (u_int32_t *)optp; 1728 1729 /* Form timestamp option as shown in appendix A of RFC 1323. */ 1730 *lp++ = htonl(TCPOPT_TSTAMP_HDR); 1731 *lp++ = htonl(ticks); 1732 *lp = htonl(tw->t_recent); 1733 optp += TCPOLEN_TSTAMP_APPA; 1734 } 1735 1736 /* 1737 * Send `CC-family' options if needed, and it's not a RST. 1738 */ 1739 if (tw->cc_recv != 0 && flags == TH_ACK) { 1740 u_int32_t *lp = (u_int32_t *)optp; 1741 1742 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC)); 1743 *lp = htonl(tw->cc_send); 1744 optp += TCPOLEN_CC_APPA; 1745 } 1746 optlen = optp - (u_int8_t *)(th + 1); 1747 1748 m->m_len = hdrlen + optlen; 1749 m->m_pkthdr.len = m->m_len; 1750 1751 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small")); 1752 1753 th->th_seq = htonl(tw->snd_nxt); 1754 th->th_ack = htonl(tw->rcv_nxt); 1755 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1756 th->th_flags = flags; 1757 th->th_win = htons(tw->last_win); 1758 1759 #ifdef INET6 1760 if (isipv6) { 1761 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr), 1762 sizeof(struct tcphdr) + optlen); 1763 ip6->ip6_hlim = in6_selecthlim(inp, inp->in6p_route.ro_rt ? 1764 inp->in6p_route.ro_rt->rt_ifp : NULL); 1765 error = ip6_output(m, inp->in6p_outputopts, &inp->in6p_route, 1766 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp); 1767 } else 1768 #endif 1769 { 1770 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1771 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP)); 1772 m->m_pkthdr.csum_flags = CSUM_TCP; 1773 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1774 ip->ip_len = m->m_pkthdr.len; 1775 error = ip_output(m, inp->inp_options, &inp->inp_route, 1776 (tw->tw_so_options & SO_DONTROUTE), NULL, inp); 1777 } 1778 if (flags & TH_ACK) 1779 tcpstat.tcps_sndacks++; 1780 else 1781 tcpstat.tcps_sndctrl++; 1782 tcpstat.tcps_sndtotal++; 1783 return (error); 1784 } 1785 1786 /* 1787 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING 1788 * 1789 * This code attempts to calculate the bandwidth-delay product as a 1790 * means of determining the optimal window size to maximize bandwidth, 1791 * minimize RTT, and avoid the over-allocation of buffers on interfaces and 1792 * routers. This code also does a fairly good job keeping RTTs in check 1793 * across slow links like modems. We implement an algorithm which is very 1794 * similar (but not meant to be) TCP/Vegas. The code operates on the 1795 * transmitter side of a TCP connection and so only effects the transmit 1796 * side of the connection. 1797 * 1798 * BACKGROUND: TCP makes no provision for the management of buffer space 1799 * at the end points or at the intermediate routers and switches. A TCP 1800 * stream, whether using NewReno or not, will eventually buffer as 1801 * many packets as it is able and the only reason this typically works is 1802 * due to the fairly small default buffers made available for a connection 1803 * (typicaly 16K or 32K). As machines use larger windows and/or window 1804 * scaling it is now fairly easy for even a single TCP connection to blow-out 1805 * all available buffer space not only on the local interface, but on 1806 * intermediate routers and switches as well. NewReno makes a misguided 1807 * attempt to 'solve' this problem by waiting for an actual failure to occur, 1808 * then backing off, then steadily increasing the window again until another 1809 * failure occurs, ad-infinitum. This results in terrible oscillation that 1810 * is only made worse as network loads increase and the idea of intentionally 1811 * blowing out network buffers is, frankly, a terrible way to manage network 1812 * resources. 1813 * 1814 * It is far better to limit the transmit window prior to the failure 1815 * condition being achieved. There are two general ways to do this: First 1816 * you can 'scan' through different transmit window sizes and locate the 1817 * point where the RTT stops increasing, indicating that you have filled the 1818 * pipe, then scan backwards until you note that RTT stops decreasing, then 1819 * repeat ad-infinitum. This method works in principle but has severe 1820 * implementation issues due to RTT variances, timer granularity, and 1821 * instability in the algorithm which can lead to many false positives and 1822 * create oscillations as well as interact badly with other TCP streams 1823 * implementing the same algorithm. 1824 * 1825 * The second method is to limit the window to the bandwidth delay product 1826 * of the link. This is the method we implement. RTT variances and our 1827 * own manipulation of the congestion window, bwnd, can potentially 1828 * destabilize the algorithm. For this reason we have to stabilize the 1829 * elements used to calculate the window. We do this by using the minimum 1830 * observed RTT, the long term average of the observed bandwidth, and 1831 * by adding two segments worth of slop. It isn't perfect but it is able 1832 * to react to changing conditions and gives us a very stable basis on 1833 * which to extend the algorithm. 1834 */ 1835 void 1836 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) 1837 { 1838 u_long bw; 1839 u_long bwnd; 1840 int save_ticks; 1841 1842 /* 1843 * If inflight_enable is disabled in the middle of a tcp connection, 1844 * make sure snd_bwnd is effectively disabled. 1845 */ 1846 if (tcp_inflight_enable == 0) { 1847 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1848 tp->snd_bandwidth = 0; 1849 return; 1850 } 1851 1852 /* 1853 * Figure out the bandwidth. Due to the tick granularity this 1854 * is a very rough number and it MUST be averaged over a fairly 1855 * long period of time. XXX we need to take into account a link 1856 * that is not using all available bandwidth, but for now our 1857 * slop will ramp us up if this case occurs and the bandwidth later 1858 * increases. 1859 * 1860 * Note: if ticks rollover 'bw' may wind up negative. We must 1861 * effectively reset t_bw_rtttime for this case. 1862 */ 1863 save_ticks = ticks; 1864 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1) 1865 return; 1866 1867 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / 1868 (save_ticks - tp->t_bw_rtttime); 1869 tp->t_bw_rtttime = save_ticks; 1870 tp->t_bw_rtseq = ack_seq; 1871 if (tp->t_bw_rtttime == 0 || (int)bw < 0) 1872 return; 1873 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4; 1874 1875 tp->snd_bandwidth = bw; 1876 1877 /* 1878 * Calculate the semi-static bandwidth delay product, plus two maximal 1879 * segments. The additional slop puts us squarely in the sweet 1880 * spot and also handles the bandwidth run-up case and stabilization. 1881 * Without the slop we could be locking ourselves into a lower 1882 * bandwidth. 1883 * 1884 * Situations Handled: 1885 * (1) Prevents over-queueing of packets on LANs, especially on 1886 * high speed LANs, allowing larger TCP buffers to be 1887 * specified, and also does a good job preventing 1888 * over-queueing of packets over choke points like modems 1889 * (at least for the transmit side). 1890 * 1891 * (2) Is able to handle changing network loads (bandwidth 1892 * drops so bwnd drops, bandwidth increases so bwnd 1893 * increases). 1894 * 1895 * (3) Theoretically should stabilize in the face of multiple 1896 * connections implementing the same algorithm (this may need 1897 * a little work). 1898 * 1899 * (4) Stability value (defaults to 20 = 2 maximal packets) can 1900 * be adjusted with a sysctl but typically only needs to be 1901 * on very slow connections. A value no smaller then 5 1902 * should be used, but only reduce this default if you have 1903 * no other choice. 1904 */ 1905 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2) 1906 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + tcp_inflight_stab * tp->t_maxseg / 10; 1907 #undef USERTT 1908 1909 if (tcp_inflight_debug > 0) { 1910 static int ltime; 1911 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) { 1912 ltime = ticks; 1913 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n", 1914 tp, 1915 bw, 1916 tp->t_rttbest, 1917 tp->t_srtt, 1918 bwnd 1919 ); 1920 } 1921 } 1922 if ((long)bwnd < tcp_inflight_min) 1923 bwnd = tcp_inflight_min; 1924 if (bwnd > tcp_inflight_max) 1925 bwnd = tcp_inflight_max; 1926 if ((long)bwnd < tp->t_maxseg * 2) 1927 bwnd = tp->t_maxseg * 2; 1928 tp->snd_bwnd = bwnd; 1929 } 1930 1931