1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 34 * $FreeBSD$ 35 */ 36 37 #include "opt_compat.h" 38 #include "opt_inet6.h" 39 #include "opt_ipsec.h" 40 #include "opt_mac.h" 41 #include "opt_tcpdebug.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/callout.h> 46 #include <sys/kernel.h> 47 #include <sys/sysctl.h> 48 #include <sys/mac.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #ifdef INET6 52 #include <sys/domain.h> 53 #endif 54 #include <sys/proc.h> 55 #include <sys/socket.h> 56 #include <sys/socketvar.h> 57 #include <sys/protosw.h> 58 #include <sys/random.h> 59 60 #include <vm/uma.h> 61 62 #include <net/route.h> 63 #include <net/if.h> 64 65 #include <netinet/in.h> 66 #include <netinet/in_systm.h> 67 #include <netinet/ip.h> 68 #ifdef INET6 69 #include <netinet/ip6.h> 70 #endif 71 #include <netinet/in_pcb.h> 72 #ifdef INET6 73 #include <netinet6/in6_pcb.h> 74 #endif 75 #include <netinet/in_var.h> 76 #include <netinet/ip_var.h> 77 #ifdef INET6 78 #include <netinet6/ip6_var.h> 79 #endif 80 #include <netinet/tcp.h> 81 #include <netinet/tcp_fsm.h> 82 #include <netinet/tcp_seq.h> 83 #include <netinet/tcp_timer.h> 84 #include <netinet/tcp_var.h> 85 #ifdef INET6 86 #include <netinet6/tcp6_var.h> 87 #endif 88 #include <netinet/tcpip.h> 89 #ifdef TCPDEBUG 90 #include <netinet/tcp_debug.h> 91 #endif 92 #include <netinet6/ip6protosw.h> 93 94 #ifdef IPSEC 95 #include <netinet6/ipsec.h> 96 #ifdef INET6 97 #include <netinet6/ipsec6.h> 98 #endif 99 #endif /*IPSEC*/ 100 101 #ifdef FAST_IPSEC 102 #include <netipsec/ipsec.h> 103 #ifdef INET6 104 #include <netipsec/ipsec6.h> 105 #endif 106 #define IPSEC 107 #endif /*FAST_IPSEC*/ 108 109 #include <machine/in_cksum.h> 110 #include <sys/md5.h> 111 112 int tcp_mssdflt = TCP_MSS; 113 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 114 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size"); 115 116 #ifdef INET6 117 int tcp_v6mssdflt = TCP6_MSS; 118 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, 119 CTLFLAG_RW, &tcp_v6mssdflt , 0, 120 "Default TCP Maximum Segment Size for IPv6"); 121 #endif 122 123 #if 0 124 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 125 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 126 &tcp_rttdflt , 0, "Default maximum TCP Round Trip Time"); 127 #endif 128 129 int tcp_do_rfc1323 = 1; 130 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 131 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions"); 132 133 int tcp_do_rfc1644 = 0; 134 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW, 135 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions"); 136 137 static int tcp_tcbhashsize = 0; 138 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN, 139 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); 140 141 static int do_tcpdrain = 1; 142 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 143 "Enable tcp_drain routine for extra help when low on mbufs"); 144 145 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD, 146 &tcbinfo.ipi_count, 0, "Number of active PCBs"); 147 148 static int icmp_may_rst = 1; 149 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, 150 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 151 152 static int tcp_isn_reseed_interval = 0; 153 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, 154 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); 155 156 /* 157 * TCP bandwidth limiting sysctls. Note that the default lower bound of 158 * 1024 exists only for debugging. A good production default would be 159 * something like 6100. 160 */ 161 static int tcp_inflight_enable = 0; 162 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, 163 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); 164 165 static int tcp_inflight_debug = 0; 166 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, 167 &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); 168 169 static int tcp_inflight_min = 6144; 170 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, 171 &tcp_inflight_min, 0, "Lower-bound for TCP inflight window"); 172 173 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; 174 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, 175 &tcp_inflight_max, 0, "Upper-bound for TCP inflight window"); 176 static int tcp_inflight_stab = 20; 177 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, 178 &tcp_inflight_stab, 0, "Inflight Algorithm Stabilization 20 = 2 packets"); 179 180 static void tcp_cleartaocache(void); 181 static struct inpcb *tcp_notify(struct inpcb *, int); 182 static void tcp_discardcb(struct tcpcb *); 183 184 /* 185 * Target size of TCP PCB hash tables. Must be a power of two. 186 * 187 * Note that this can be overridden by the kernel environment 188 * variable net.inet.tcp.tcbhashsize 189 */ 190 #ifndef TCBHASHSIZE 191 #define TCBHASHSIZE 512 192 #endif 193 194 /* 195 * XXX 196 * Callouts should be moved into struct tcp directly. They are currently 197 * separate becuase the tcpcb structure is exported to userland for sysctl 198 * parsing purposes, which do not know about callouts. 199 */ 200 struct tcpcb_mem { 201 struct tcpcb tcb; 202 struct callout tcpcb_mem_rexmt, tcpcb_mem_persist, tcpcb_mem_keep; 203 struct callout tcpcb_mem_2msl, tcpcb_mem_delack; 204 }; 205 206 static uma_zone_t tcpcb_zone; 207 static uma_zone_t tcptw_zone; 208 209 /* 210 * Tcp initialization 211 */ 212 void 213 tcp_init() 214 { 215 int hashsize = TCBHASHSIZE; 216 217 tcp_ccgen = 1; 218 tcp_cleartaocache(); 219 220 tcp_delacktime = TCPTV_DELACK; 221 tcp_keepinit = TCPTV_KEEP_INIT; 222 tcp_keepidle = TCPTV_KEEP_IDLE; 223 tcp_keepintvl = TCPTV_KEEPINTVL; 224 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 225 tcp_msl = TCPTV_MSL; 226 tcp_rexmit_min = TCPTV_MIN; 227 tcp_rexmit_slop = TCPTV_CPU_VAR; 228 229 INP_INFO_LOCK_INIT(&tcbinfo, "tcp"); 230 LIST_INIT(&tcb); 231 tcbinfo.listhead = &tcb; 232 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); 233 if (!powerof2(hashsize)) { 234 printf("WARNING: TCB hash size not a power of 2\n"); 235 hashsize = 512; /* safe default */ 236 } 237 tcp_tcbhashsize = hashsize; 238 tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask); 239 tcbinfo.porthashbase = hashinit(hashsize, M_PCB, 240 &tcbinfo.porthashmask); 241 tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb), 242 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 243 uma_zone_set_max(tcbinfo.ipi_zone, maxsockets); 244 #ifdef INET6 245 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 246 #else /* INET6 */ 247 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 248 #endif /* INET6 */ 249 if (max_protohdr < TCP_MINPROTOHDR) 250 max_protohdr = TCP_MINPROTOHDR; 251 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 252 panic("tcp_init"); 253 #undef TCP_MINPROTOHDR 254 /* 255 * These have to be type stable for the benefit of the timers. 256 */ 257 tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem), 258 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 259 uma_zone_set_max(tcpcb_zone, maxsockets); 260 tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw), 261 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 262 uma_zone_set_max(tcptw_zone, maxsockets / 5); 263 tcp_timer_init(); 264 syncache_init(); 265 } 266 267 /* 268 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 269 * tcp_template used to store this data in mbufs, but we now recopy it out 270 * of the tcpcb each time to conserve mbufs. 271 */ 272 void 273 tcpip_fillheaders(inp, ip_ptr, tcp_ptr) 274 struct inpcb *inp; 275 void *ip_ptr; 276 void *tcp_ptr; 277 { 278 struct tcphdr *th = (struct tcphdr *)tcp_ptr; 279 280 #ifdef INET6 281 if ((inp->inp_vflag & INP_IPV6) != 0) { 282 struct ip6_hdr *ip6; 283 284 ip6 = (struct ip6_hdr *)ip_ptr; 285 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 286 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); 287 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 288 (IPV6_VERSION & IPV6_VERSION_MASK); 289 ip6->ip6_nxt = IPPROTO_TCP; 290 ip6->ip6_plen = sizeof(struct tcphdr); 291 ip6->ip6_src = inp->in6p_laddr; 292 ip6->ip6_dst = inp->in6p_faddr; 293 } else 294 #endif 295 { 296 struct ip *ip; 297 298 ip = (struct ip *)ip_ptr; 299 ip->ip_v = IPVERSION; 300 ip->ip_hl = 5; 301 ip->ip_tos = inp->inp_ip_tos; 302 ip->ip_len = 0; 303 ip->ip_id = 0; 304 ip->ip_off = 0; 305 ip->ip_ttl = inp->inp_ip_ttl; 306 ip->ip_sum = 0; 307 ip->ip_p = IPPROTO_TCP; 308 ip->ip_src = inp->inp_laddr; 309 ip->ip_dst = inp->inp_faddr; 310 } 311 th->th_sport = inp->inp_lport; 312 th->th_dport = inp->inp_fport; 313 th->th_seq = 0; 314 th->th_ack = 0; 315 th->th_x2 = 0; 316 th->th_off = 5; 317 th->th_flags = 0; 318 th->th_win = 0; 319 th->th_urp = 0; 320 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */ 321 } 322 323 /* 324 * Create template to be used to send tcp packets on a connection. 325 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 326 * use for this function is in keepalives, which use tcp_respond. 327 */ 328 struct tcptemp * 329 tcpip_maketemplate(inp) 330 struct inpcb *inp; 331 { 332 struct mbuf *m; 333 struct tcptemp *n; 334 335 m = m_get(M_DONTWAIT, MT_HEADER); 336 if (m == NULL) 337 return (0); 338 m->m_len = sizeof(struct tcptemp); 339 n = mtod(m, struct tcptemp *); 340 341 tcpip_fillheaders(inp, (void *)&n->tt_ipgen, (void *)&n->tt_t); 342 return (n); 343 } 344 345 /* 346 * Send a single message to the TCP at address specified by 347 * the given TCP/IP header. If m == 0, then we make a copy 348 * of the tcpiphdr at ti and send directly to the addressed host. 349 * This is used to force keep alive messages out using the TCP 350 * template for a connection. If flags are given then we send 351 * a message back to the TCP which originated the * segment ti, 352 * and discard the mbuf containing it and any other attached mbufs. 353 * 354 * In any case the ack and sequence number of the transmitted 355 * segment are as specified by the parameters. 356 * 357 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 358 */ 359 void 360 tcp_respond(tp, ipgen, th, m, ack, seq, flags) 361 struct tcpcb *tp; 362 void *ipgen; 363 register struct tcphdr *th; 364 register struct mbuf *m; 365 tcp_seq ack, seq; 366 int flags; 367 { 368 register int tlen; 369 int win = 0; 370 struct route *ro = 0; 371 struct route sro; 372 struct ip *ip; 373 struct tcphdr *nth; 374 #ifdef INET6 375 struct route_in6 *ro6 = 0; 376 struct route_in6 sro6; 377 struct ip6_hdr *ip6; 378 int isipv6; 379 #endif /* INET6 */ 380 int ipflags = 0; 381 struct inpcb *inp; 382 383 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL")); 384 385 #ifdef INET6 386 isipv6 = ((struct ip *)ipgen)->ip_v == 6; 387 ip6 = ipgen; 388 #endif /* INET6 */ 389 ip = ipgen; 390 391 if (tp) { 392 inp = tp->t_inpcb; 393 KASSERT(inp != NULL, ("tcp control block w/o inpcb")); 394 INP_INFO_WLOCK_ASSERT(&tcbinfo); 395 INP_LOCK_ASSERT(inp); 396 if (!(flags & TH_RST)) { 397 win = sbspace(&inp->inp_socket->so_rcv); 398 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 399 win = (long)TCP_MAXWIN << tp->rcv_scale; 400 } 401 #ifdef INET6 402 if (isipv6) 403 ro6 = &inp->in6p_route; 404 else 405 #endif /* INET6 */ 406 ro = &inp->inp_route; 407 } else { 408 inp = NULL; 409 #ifdef INET6 410 if (isipv6) { 411 ro6 = &sro6; 412 bzero(ro6, sizeof *ro6); 413 } else 414 #endif /* INET6 */ 415 { 416 ro = &sro; 417 bzero(ro, sizeof *ro); 418 } 419 } 420 if (m == 0) { 421 m = m_gethdr(M_DONTWAIT, MT_HEADER); 422 if (m == NULL) 423 return; 424 tlen = 0; 425 m->m_data += max_linkhdr; 426 #ifdef INET6 427 if (isipv6) { 428 bcopy((caddr_t)ip6, mtod(m, caddr_t), 429 sizeof(struct ip6_hdr)); 430 ip6 = mtod(m, struct ip6_hdr *); 431 nth = (struct tcphdr *)(ip6 + 1); 432 } else 433 #endif /* INET6 */ 434 { 435 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); 436 ip = mtod(m, struct ip *); 437 nth = (struct tcphdr *)(ip + 1); 438 } 439 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 440 flags = TH_ACK; 441 } else { 442 m_freem(m->m_next); 443 m->m_next = 0; 444 m->m_data = (caddr_t)ipgen; 445 /* m_len is set later */ 446 tlen = 0; 447 #define xchg(a,b,type) { type t; t=a; a=b; b=t; } 448 #ifdef INET6 449 if (isipv6) { 450 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 451 nth = (struct tcphdr *)(ip6 + 1); 452 } else 453 #endif /* INET6 */ 454 { 455 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); 456 nth = (struct tcphdr *)(ip + 1); 457 } 458 if (th != nth) { 459 /* 460 * this is usually a case when an extension header 461 * exists between the IPv6 header and the 462 * TCP header. 463 */ 464 nth->th_sport = th->th_sport; 465 nth->th_dport = th->th_dport; 466 } 467 xchg(nth->th_dport, nth->th_sport, n_short); 468 #undef xchg 469 } 470 #ifdef INET6 471 if (isipv6) { 472 ip6->ip6_flow = 0; 473 ip6->ip6_vfc = IPV6_VERSION; 474 ip6->ip6_nxt = IPPROTO_TCP; 475 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) + 476 tlen)); 477 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr); 478 } else 479 #endif 480 { 481 tlen += sizeof (struct tcpiphdr); 482 ip->ip_len = tlen; 483 ip->ip_ttl = ip_defttl; 484 } 485 m->m_len = tlen; 486 m->m_pkthdr.len = tlen; 487 m->m_pkthdr.rcvif = (struct ifnet *) 0; 488 #ifdef MAC 489 if (inp != NULL) { 490 /* 491 * Packet is associated with a socket, so allow the 492 * label of the response to reflect the socket label. 493 */ 494 mac_create_mbuf_from_socket(inp->inp_socket, m); 495 } else { 496 /* 497 * Packet is not associated with a socket, so possibly 498 * update the label in place. 499 */ 500 mac_reflect_mbuf_tcp(m); 501 } 502 #endif 503 nth->th_seq = htonl(seq); 504 nth->th_ack = htonl(ack); 505 nth->th_x2 = 0; 506 nth->th_off = sizeof (struct tcphdr) >> 2; 507 nth->th_flags = flags; 508 if (tp) 509 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 510 else 511 nth->th_win = htons((u_short)win); 512 nth->th_urp = 0; 513 #ifdef INET6 514 if (isipv6) { 515 nth->th_sum = 0; 516 nth->th_sum = in6_cksum(m, IPPROTO_TCP, 517 sizeof(struct ip6_hdr), 518 tlen - sizeof(struct ip6_hdr)); 519 ip6->ip6_hlim = in6_selecthlim(inp, 520 ro6 && ro6->ro_rt ? 521 ro6->ro_rt->rt_ifp : 522 NULL); 523 } else 524 #endif /* INET6 */ 525 { 526 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 527 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 528 m->m_pkthdr.csum_flags = CSUM_TCP; 529 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 530 } 531 #ifdef TCPDEBUG 532 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG)) 533 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 534 #endif 535 #ifdef INET6 536 if (isipv6) { 537 (void) ip6_output(m, NULL, ro6, ipflags, NULL, NULL, inp); 538 if (ro6 == &sro6 && ro6->ro_rt) { 539 RTFREE(ro6->ro_rt); 540 ro6->ro_rt = NULL; 541 } 542 } else 543 #endif /* INET6 */ 544 { 545 (void) ip_output(m, NULL, ro, ipflags, NULL, inp); 546 if (ro == &sro && ro->ro_rt) { 547 RTFREE(ro->ro_rt); 548 ro->ro_rt = NULL; 549 } 550 } 551 } 552 553 /* 554 * Create a new TCP control block, making an 555 * empty reassembly queue and hooking it to the argument 556 * protocol control block. The `inp' parameter must have 557 * come from the zone allocator set up in tcp_init(). 558 */ 559 struct tcpcb * 560 tcp_newtcpcb(inp) 561 struct inpcb *inp; 562 { 563 struct tcpcb_mem *tm; 564 struct tcpcb *tp; 565 #ifdef INET6 566 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 567 #endif /* INET6 */ 568 569 tm = uma_zalloc(tcpcb_zone, M_NOWAIT | M_ZERO); 570 if (tm == NULL) 571 return (NULL); 572 tp = &tm->tcb; 573 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */ 574 tp->t_maxseg = tp->t_maxopd = 575 #ifdef INET6 576 isipv6 ? tcp_v6mssdflt : 577 #endif /* INET6 */ 578 tcp_mssdflt; 579 580 /* Set up our timeouts. */ 581 callout_init(tp->tt_rexmt = &tm->tcpcb_mem_rexmt, 0); 582 callout_init(tp->tt_persist = &tm->tcpcb_mem_persist, 0); 583 callout_init(tp->tt_keep = &tm->tcpcb_mem_keep, 0); 584 callout_init(tp->tt_2msl = &tm->tcpcb_mem_2msl, 0); 585 callout_init(tp->tt_delack = &tm->tcpcb_mem_delack, 0); 586 587 if (tcp_do_rfc1323) 588 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); 589 if (tcp_do_rfc1644) 590 tp->t_flags |= TF_REQ_CC; 591 tp->t_inpcb = inp; /* XXX */ 592 /* 593 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 594 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 595 * reasonable initial retransmit time. 596 */ 597 tp->t_srtt = TCPTV_SRTTBASE; 598 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 599 tp->t_rttmin = tcp_rexmit_min; 600 tp->t_rxtcur = TCPTV_RTOBASE; 601 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 602 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 603 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 604 tp->t_rcvtime = ticks; 605 tp->t_bw_rtttime = ticks; 606 /* 607 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 608 * because the socket may be bound to an IPv6 wildcard address, 609 * which may match an IPv4-mapped IPv6 address. 610 */ 611 inp->inp_ip_ttl = ip_defttl; 612 inp->inp_ppcb = (caddr_t)tp; 613 return (tp); /* XXX */ 614 } 615 616 /* 617 * Drop a TCP connection, reporting 618 * the specified error. If connection is synchronized, 619 * then send a RST to peer. 620 */ 621 struct tcpcb * 622 tcp_drop(tp, errno) 623 register struct tcpcb *tp; 624 int errno; 625 { 626 struct socket *so = tp->t_inpcb->inp_socket; 627 628 if (TCPS_HAVERCVDSYN(tp->t_state)) { 629 tp->t_state = TCPS_CLOSED; 630 (void) tcp_output(tp); 631 tcpstat.tcps_drops++; 632 } else 633 tcpstat.tcps_conndrops++; 634 if (errno == ETIMEDOUT && tp->t_softerror) 635 errno = tp->t_softerror; 636 so->so_error = errno; 637 return (tcp_close(tp)); 638 } 639 640 static void 641 tcp_discardcb(tp) 642 struct tcpcb *tp; 643 { 644 struct tseg_qent *q; 645 struct inpcb *inp = tp->t_inpcb; 646 struct socket *so = inp->inp_socket; 647 #ifdef INET6 648 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 649 #endif /* INET6 */ 650 struct rtentry *rt; 651 int dosavessthresh; 652 653 /* 654 * Make sure that all of our timers are stopped before we 655 * delete the PCB. 656 */ 657 callout_stop(tp->tt_rexmt); 658 callout_stop(tp->tt_persist); 659 callout_stop(tp->tt_keep); 660 callout_stop(tp->tt_2msl); 661 callout_stop(tp->tt_delack); 662 663 /* 664 * If we got enough samples through the srtt filter, 665 * save the rtt and rttvar in the routing entry. 666 * 'Enough' is arbitrarily defined as the 16 samples. 667 * 16 samples is enough for the srtt filter to converge 668 * to within 5% of the correct value; fewer samples and 669 * we could save a very bogus rtt. 670 * 671 * Don't update the default route's characteristics and don't 672 * update anything that the user "locked". 673 */ 674 if (tp->t_rttupdated >= 16) { 675 register u_long i = 0; 676 #ifdef INET6 677 if (isipv6) { 678 struct sockaddr_in6 *sin6; 679 680 if ((rt = inp->in6p_route.ro_rt) == NULL) 681 goto no_valid_rt; 682 sin6 = (struct sockaddr_in6 *)rt_key(rt); 683 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 684 goto no_valid_rt; 685 } 686 else 687 #endif /* INET6 */ 688 if ((rt = inp->inp_route.ro_rt) == NULL || 689 ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr 690 == INADDR_ANY) 691 goto no_valid_rt; 692 693 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) { 694 i = tp->t_srtt * 695 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 696 if (rt->rt_rmx.rmx_rtt && i) 697 /* 698 * filter this update to half the old & half 699 * the new values, converting scale. 700 * See route.h and tcp_var.h for a 701 * description of the scaling constants. 702 */ 703 rt->rt_rmx.rmx_rtt = 704 (rt->rt_rmx.rmx_rtt + i) / 2; 705 else 706 rt->rt_rmx.rmx_rtt = i; 707 tcpstat.tcps_cachedrtt++; 708 } 709 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) { 710 i = tp->t_rttvar * 711 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 712 if (rt->rt_rmx.rmx_rttvar && i) 713 rt->rt_rmx.rmx_rttvar = 714 (rt->rt_rmx.rmx_rttvar + i) / 2; 715 else 716 rt->rt_rmx.rmx_rttvar = i; 717 tcpstat.tcps_cachedrttvar++; 718 } 719 /* 720 * The old comment here said: 721 * update the pipelimit (ssthresh) if it has been updated 722 * already or if a pipesize was specified & the threshhold 723 * got below half the pipesize. I.e., wait for bad news 724 * before we start updating, then update on both good 725 * and bad news. 726 * 727 * But we want to save the ssthresh even if no pipesize is 728 * specified explicitly in the route, because such 729 * connections still have an implicit pipesize specified 730 * by the global tcp_sendspace. In the absence of a reliable 731 * way to calculate the pipesize, it will have to do. 732 */ 733 i = tp->snd_ssthresh; 734 if (rt->rt_rmx.rmx_sendpipe != 0) 735 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2); 736 else 737 dosavessthresh = (i < so->so_snd.sb_hiwat / 2); 738 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 && 739 i != 0 && rt->rt_rmx.rmx_ssthresh != 0) 740 || dosavessthresh) { 741 /* 742 * convert the limit from user data bytes to 743 * packets then to packet data bytes. 744 */ 745 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 746 if (i < 2) 747 i = 2; 748 i *= (u_long)(tp->t_maxseg + 749 #ifdef INET6 750 (isipv6 ? sizeof (struct ip6_hdr) + 751 sizeof (struct tcphdr) : 752 #endif 753 sizeof (struct tcpiphdr) 754 #ifdef INET6 755 ) 756 #endif 757 ); 758 if (rt->rt_rmx.rmx_ssthresh) 759 rt->rt_rmx.rmx_ssthresh = 760 (rt->rt_rmx.rmx_ssthresh + i) / 2; 761 else 762 rt->rt_rmx.rmx_ssthresh = i; 763 tcpstat.tcps_cachedssthresh++; 764 } 765 } 766 no_valid_rt: 767 /* free the reassembly queue, if any */ 768 while ((q = LIST_FIRST(&tp->t_segq)) != NULL) { 769 LIST_REMOVE(q, tqe_q); 770 m_freem(q->tqe_m); 771 FREE(q, M_TSEGQ); 772 } 773 inp->inp_ppcb = NULL; 774 tp->t_inpcb = NULL; 775 uma_zfree(tcpcb_zone, tp); 776 soisdisconnected(so); 777 } 778 779 /* 780 * Close a TCP control block: 781 * discard all space held by the tcp 782 * discard internet protocol block 783 * wake up any sleepers 784 */ 785 struct tcpcb * 786 tcp_close(tp) 787 struct tcpcb *tp; 788 { 789 struct inpcb *inp = tp->t_inpcb; 790 #ifdef INET6 791 struct socket *so = inp->inp_socket; 792 #endif 793 794 tcp_discardcb(tp); 795 #ifdef INET6 796 if (INP_CHECK_SOCKAF(so, AF_INET6)) 797 in6_pcbdetach(inp); 798 else 799 #endif 800 in_pcbdetach(inp); 801 tcpstat.tcps_closed++; 802 return ((struct tcpcb *)0); 803 } 804 805 void 806 tcp_drain() 807 { 808 if (do_tcpdrain) 809 { 810 struct inpcb *inpb; 811 struct tcpcb *tcpb; 812 struct tseg_qent *te; 813 814 /* 815 * Walk the tcpbs, if existing, and flush the reassembly queue, 816 * if there is one... 817 * XXX: The "Net/3" implementation doesn't imply that the TCP 818 * reassembly queue should be flushed, but in a situation 819 * where we're really low on mbufs, this is potentially 820 * usefull. 821 */ 822 INP_INFO_RLOCK(&tcbinfo); 823 LIST_FOREACH(inpb, tcbinfo.listhead, inp_list) { 824 if (inpb->inp_vflag & INP_TIMEWAIT) 825 continue; 826 INP_LOCK(inpb); 827 if ((tcpb = intotcpcb(inpb))) { 828 while ((te = LIST_FIRST(&tcpb->t_segq)) 829 != NULL) { 830 LIST_REMOVE(te, tqe_q); 831 m_freem(te->tqe_m); 832 FREE(te, M_TSEGQ); 833 } 834 } 835 INP_UNLOCK(inpb); 836 } 837 INP_INFO_RUNLOCK(&tcbinfo); 838 } 839 } 840 841 /* 842 * Notify a tcp user of an asynchronous error; 843 * store error as soft error, but wake up user 844 * (for now, won't do anything until can select for soft error). 845 * 846 * Do not wake up user since there currently is no mechanism for 847 * reporting soft errors (yet - a kqueue filter may be added). 848 */ 849 static struct inpcb * 850 tcp_notify(inp, error) 851 struct inpcb *inp; 852 int error; 853 { 854 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; 855 856 /* 857 * Ignore some errors if we are hooked up. 858 * If connection hasn't completed, has retransmitted several times, 859 * and receives a second error, give up now. This is better 860 * than waiting a long time to establish a connection that 861 * can never complete. 862 */ 863 if (tp->t_state == TCPS_ESTABLISHED && 864 (error == EHOSTUNREACH || error == ENETUNREACH || 865 error == EHOSTDOWN)) { 866 return inp; 867 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 868 tp->t_softerror) { 869 tcp_drop(tp, error); 870 return (struct inpcb *)0; 871 } else { 872 tp->t_softerror = error; 873 return inp; 874 } 875 #if 0 876 wakeup( &so->so_timeo); 877 sorwakeup(so); 878 sowwakeup(so); 879 #endif 880 } 881 882 static int 883 tcp_pcblist(SYSCTL_HANDLER_ARGS) 884 { 885 int error, i, n, s; 886 struct inpcb *inp, **inp_list; 887 inp_gen_t gencnt; 888 struct xinpgen xig; 889 890 /* 891 * The process of preparing the TCB list is too time-consuming and 892 * resource-intensive to repeat twice on every request. 893 */ 894 if (req->oldptr == 0) { 895 n = tcbinfo.ipi_count; 896 req->oldidx = 2 * (sizeof xig) 897 + (n + n/8) * sizeof(struct xtcpcb); 898 return 0; 899 } 900 901 if (req->newptr != 0) 902 return EPERM; 903 904 /* 905 * OK, now we're committed to doing something. 906 */ 907 s = splnet(); 908 INP_INFO_RLOCK(&tcbinfo); 909 gencnt = tcbinfo.ipi_gencnt; 910 n = tcbinfo.ipi_count; 911 INP_INFO_RUNLOCK(&tcbinfo); 912 splx(s); 913 914 sysctl_wire_old_buffer(req, 2 * (sizeof xig) 915 + n * sizeof(struct xtcpcb)); 916 917 xig.xig_len = sizeof xig; 918 xig.xig_count = n; 919 xig.xig_gen = gencnt; 920 xig.xig_sogen = so_gencnt; 921 error = SYSCTL_OUT(req, &xig, sizeof xig); 922 if (error) 923 return error; 924 925 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 926 if (inp_list == 0) 927 return ENOMEM; 928 929 s = splnet(); 930 INP_INFO_RLOCK(&tcbinfo); 931 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp && i < n; 932 inp = LIST_NEXT(inp, inp_list)) { 933 INP_LOCK(inp); 934 if (inp->inp_gencnt <= gencnt) { 935 /* 936 * XXX: This use of cr_cansee(), introduced with 937 * TCP state changes, is not quite right, but for 938 * now, better than nothing. 939 */ 940 if (inp->inp_vflag & INP_TIMEWAIT) 941 error = cr_cansee(req->td->td_ucred, 942 intotw(inp)->tw_cred); 943 else 944 error = cr_canseesocket(req->td->td_ucred, 945 inp->inp_socket); 946 if (error == 0) 947 inp_list[i++] = inp; 948 } 949 INP_UNLOCK(inp); 950 } 951 INP_INFO_RUNLOCK(&tcbinfo); 952 splx(s); 953 n = i; 954 955 error = 0; 956 for (i = 0; i < n; i++) { 957 inp = inp_list[i]; 958 if (inp->inp_gencnt <= gencnt) { 959 struct xtcpcb xt; 960 caddr_t inp_ppcb; 961 xt.xt_len = sizeof xt; 962 /* XXX should avoid extra copy */ 963 bcopy(inp, &xt.xt_inp, sizeof *inp); 964 inp_ppcb = inp->inp_ppcb; 965 if (inp_ppcb == NULL) 966 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp); 967 else if (inp->inp_vflag & INP_TIMEWAIT) { 968 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp); 969 xt.xt_tp.t_state = TCPS_TIME_WAIT; 970 } else 971 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 972 if (inp->inp_socket) 973 sotoxsocket(inp->inp_socket, &xt.xt_socket); 974 else { 975 bzero(&xt.xt_socket, sizeof xt.xt_socket); 976 xt.xt_socket.xso_protocol = IPPROTO_TCP; 977 } 978 xt.xt_inp.inp_gencnt = inp->inp_gencnt; 979 error = SYSCTL_OUT(req, &xt, sizeof xt); 980 } 981 } 982 if (!error) { 983 /* 984 * Give the user an updated idea of our state. 985 * If the generation differs from what we told 986 * her before, she knows that something happened 987 * while we were processing this request, and it 988 * might be necessary to retry. 989 */ 990 s = splnet(); 991 INP_INFO_RLOCK(&tcbinfo); 992 xig.xig_gen = tcbinfo.ipi_gencnt; 993 xig.xig_sogen = so_gencnt; 994 xig.xig_count = tcbinfo.ipi_count; 995 INP_INFO_RUNLOCK(&tcbinfo); 996 splx(s); 997 error = SYSCTL_OUT(req, &xig, sizeof xig); 998 } 999 free(inp_list, M_TEMP); 1000 return error; 1001 } 1002 1003 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 1004 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 1005 1006 static int 1007 tcp_getcred(SYSCTL_HANDLER_ARGS) 1008 { 1009 struct xucred xuc; 1010 struct sockaddr_in addrs[2]; 1011 struct inpcb *inp; 1012 int error, s; 1013 1014 error = suser_cred(req->td->td_ucred, PRISON_ROOT); 1015 if (error) 1016 return (error); 1017 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 1018 if (error) 1019 return (error); 1020 s = splnet(); 1021 INP_INFO_RLOCK(&tcbinfo); 1022 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port, 1023 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 1024 if (inp == NULL) { 1025 error = ENOENT; 1026 goto outunlocked; 1027 } 1028 INP_LOCK(inp); 1029 if (inp->inp_socket == NULL) { 1030 error = ENOENT; 1031 goto out; 1032 } 1033 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket); 1034 if (error) 1035 goto out; 1036 cru2x(inp->inp_socket->so_cred, &xuc); 1037 out: 1038 INP_UNLOCK(inp); 1039 outunlocked: 1040 INP_INFO_RUNLOCK(&tcbinfo); 1041 splx(s); 1042 if (error == 0) 1043 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 1044 return (error); 1045 } 1046 1047 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, 1048 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 1049 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection"); 1050 1051 #ifdef INET6 1052 static int 1053 tcp6_getcred(SYSCTL_HANDLER_ARGS) 1054 { 1055 struct xucred xuc; 1056 struct sockaddr_in6 addrs[2]; 1057 struct inpcb *inp; 1058 int error, s, mapped = 0; 1059 1060 error = suser_cred(req->td->td_ucred, PRISON_ROOT); 1061 if (error) 1062 return (error); 1063 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 1064 if (error) 1065 return (error); 1066 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 1067 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 1068 mapped = 1; 1069 else 1070 return (EINVAL); 1071 } 1072 s = splnet(); 1073 INP_INFO_RLOCK(&tcbinfo); 1074 if (mapped == 1) 1075 inp = in_pcblookup_hash(&tcbinfo, 1076 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 1077 addrs[1].sin6_port, 1078 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 1079 addrs[0].sin6_port, 1080 0, NULL); 1081 else 1082 inp = in6_pcblookup_hash(&tcbinfo, &addrs[1].sin6_addr, 1083 addrs[1].sin6_port, 1084 &addrs[0].sin6_addr, addrs[0].sin6_port, 1085 0, NULL); 1086 if (inp == NULL) { 1087 error = ENOENT; 1088 goto outunlocked; 1089 } 1090 INP_LOCK(inp); 1091 if (inp->inp_socket == NULL) { 1092 error = ENOENT; 1093 goto out; 1094 } 1095 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket); 1096 if (error) 1097 goto out; 1098 cru2x(inp->inp_socket->so_cred, &xuc); 1099 out: 1100 INP_UNLOCK(inp); 1101 outunlocked: 1102 INP_INFO_RUNLOCK(&tcbinfo); 1103 splx(s); 1104 if (error == 0) 1105 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 1106 return (error); 1107 } 1108 1109 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, 1110 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 1111 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection"); 1112 #endif 1113 1114 1115 void 1116 tcp_ctlinput(cmd, sa, vip) 1117 int cmd; 1118 struct sockaddr *sa; 1119 void *vip; 1120 { 1121 struct ip *ip = vip; 1122 struct tcphdr *th; 1123 struct in_addr faddr; 1124 struct inpcb *inp; 1125 struct tcpcb *tp; 1126 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 1127 tcp_seq icmp_seq; 1128 int s; 1129 1130 faddr = ((struct sockaddr_in *)sa)->sin_addr; 1131 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 1132 return; 1133 1134 if (cmd == PRC_QUENCH) 1135 notify = tcp_quench; 1136 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || 1137 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip) 1138 notify = tcp_drop_syn_sent; 1139 else if (cmd == PRC_MSGSIZE) 1140 notify = tcp_mtudisc; 1141 else if (PRC_IS_REDIRECT(cmd)) { 1142 ip = 0; 1143 notify = in_rtchange; 1144 } else if (cmd == PRC_HOSTDEAD) 1145 ip = 0; 1146 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) 1147 return; 1148 if (ip) { 1149 s = splnet(); 1150 th = (struct tcphdr *)((caddr_t)ip 1151 + (ip->ip_hl << 2)); 1152 INP_INFO_WLOCK(&tcbinfo); 1153 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport, 1154 ip->ip_src, th->th_sport, 0, NULL); 1155 if (inp != NULL) { 1156 INP_LOCK(inp); 1157 if (inp->inp_socket != NULL) { 1158 icmp_seq = htonl(th->th_seq); 1159 tp = intotcpcb(inp); 1160 if (SEQ_GEQ(icmp_seq, tp->snd_una) && 1161 SEQ_LT(icmp_seq, tp->snd_max)) 1162 inp = (*notify)(inp, inetctlerrmap[cmd]); 1163 } 1164 if (inp) 1165 INP_UNLOCK(inp); 1166 } else { 1167 struct in_conninfo inc; 1168 1169 inc.inc_fport = th->th_dport; 1170 inc.inc_lport = th->th_sport; 1171 inc.inc_faddr = faddr; 1172 inc.inc_laddr = ip->ip_src; 1173 #ifdef INET6 1174 inc.inc_isipv6 = 0; 1175 #endif 1176 syncache_unreach(&inc, th); 1177 } 1178 INP_INFO_WUNLOCK(&tcbinfo); 1179 splx(s); 1180 } else 1181 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify); 1182 } 1183 1184 #ifdef INET6 1185 void 1186 tcp6_ctlinput(cmd, sa, d) 1187 int cmd; 1188 struct sockaddr *sa; 1189 void *d; 1190 { 1191 struct tcphdr th; 1192 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 1193 struct ip6_hdr *ip6; 1194 struct mbuf *m; 1195 struct ip6ctlparam *ip6cp = NULL; 1196 const struct sockaddr_in6 *sa6_src = NULL; 1197 int off; 1198 struct tcp_portonly { 1199 u_int16_t th_sport; 1200 u_int16_t th_dport; 1201 } *thp; 1202 1203 if (sa->sa_family != AF_INET6 || 1204 sa->sa_len != sizeof(struct sockaddr_in6)) 1205 return; 1206 1207 if (cmd == PRC_QUENCH) 1208 notify = tcp_quench; 1209 else if (cmd == PRC_MSGSIZE) 1210 notify = tcp_mtudisc; 1211 else if (!PRC_IS_REDIRECT(cmd) && 1212 ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) 1213 return; 1214 1215 /* if the parameter is from icmp6, decode it. */ 1216 if (d != NULL) { 1217 ip6cp = (struct ip6ctlparam *)d; 1218 m = ip6cp->ip6c_m; 1219 ip6 = ip6cp->ip6c_ip6; 1220 off = ip6cp->ip6c_off; 1221 sa6_src = ip6cp->ip6c_src; 1222 } else { 1223 m = NULL; 1224 ip6 = NULL; 1225 off = 0; /* fool gcc */ 1226 sa6_src = &sa6_any; 1227 } 1228 1229 if (ip6) { 1230 struct in_conninfo inc; 1231 /* 1232 * XXX: We assume that when IPV6 is non NULL, 1233 * M and OFF are valid. 1234 */ 1235 1236 /* check if we can safely examine src and dst ports */ 1237 if (m->m_pkthdr.len < off + sizeof(*thp)) 1238 return; 1239 1240 bzero(&th, sizeof(th)); 1241 m_copydata(m, off, sizeof(*thp), (caddr_t)&th); 1242 1243 in6_pcbnotify(&tcb, sa, th.th_dport, 1244 (struct sockaddr *)ip6cp->ip6c_src, 1245 th.th_sport, cmd, notify); 1246 1247 inc.inc_fport = th.th_dport; 1248 inc.inc_lport = th.th_sport; 1249 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; 1250 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; 1251 inc.inc_isipv6 = 1; 1252 syncache_unreach(&inc, &th); 1253 } else 1254 in6_pcbnotify(&tcb, sa, 0, (const struct sockaddr *)sa6_src, 1255 0, cmd, notify); 1256 } 1257 #endif /* INET6 */ 1258 1259 1260 /* 1261 * Following is where TCP initial sequence number generation occurs. 1262 * 1263 * There are two places where we must use initial sequence numbers: 1264 * 1. In SYN-ACK packets. 1265 * 2. In SYN packets. 1266 * 1267 * All ISNs for SYN-ACK packets are generated by the syncache. See 1268 * tcp_syncache.c for details. 1269 * 1270 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 1271 * depends on this property. In addition, these ISNs should be 1272 * unguessable so as to prevent connection hijacking. To satisfy 1273 * the requirements of this situation, the algorithm outlined in 1274 * RFC 1948 is used to generate sequence numbers. 1275 * 1276 * Implementation details: 1277 * 1278 * Time is based off the system timer, and is corrected so that it 1279 * increases by one megabyte per second. This allows for proper 1280 * recycling on high speed LANs while still leaving over an hour 1281 * before rollover. 1282 * 1283 * net.inet.tcp.isn_reseed_interval controls the number of seconds 1284 * between seeding of isn_secret. This is normally set to zero, 1285 * as reseeding should not be necessary. 1286 * 1287 */ 1288 1289 #define ISN_BYTES_PER_SECOND 1048576 1290 1291 u_char isn_secret[32]; 1292 int isn_last_reseed; 1293 MD5_CTX isn_ctx; 1294 1295 tcp_seq 1296 tcp_new_isn(tp) 1297 struct tcpcb *tp; 1298 { 1299 u_int32_t md5_buffer[4]; 1300 tcp_seq new_isn; 1301 1302 /* Seed if this is the first use, reseed if requested. */ 1303 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) && 1304 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) 1305 < (u_int)ticks))) { 1306 read_random(&isn_secret, sizeof(isn_secret)); 1307 isn_last_reseed = ticks; 1308 } 1309 1310 /* Compute the md5 hash and return the ISN. */ 1311 MD5Init(&isn_ctx); 1312 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short)); 1313 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short)); 1314 #ifdef INET6 1315 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) { 1316 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, 1317 sizeof(struct in6_addr)); 1318 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, 1319 sizeof(struct in6_addr)); 1320 } else 1321 #endif 1322 { 1323 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, 1324 sizeof(struct in_addr)); 1325 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, 1326 sizeof(struct in_addr)); 1327 } 1328 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret)); 1329 MD5Final((u_char *) &md5_buffer, &isn_ctx); 1330 new_isn = (tcp_seq) md5_buffer[0]; 1331 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz); 1332 return new_isn; 1333 } 1334 1335 /* 1336 * When a source quench is received, close congestion window 1337 * to one segment. We will gradually open it again as we proceed. 1338 */ 1339 struct inpcb * 1340 tcp_quench(inp, errno) 1341 struct inpcb *inp; 1342 int errno; 1343 { 1344 struct tcpcb *tp = intotcpcb(inp); 1345 1346 if (tp) 1347 tp->snd_cwnd = tp->t_maxseg; 1348 return (inp); 1349 } 1350 1351 /* 1352 * When a specific ICMP unreachable message is received and the 1353 * connection state is SYN-SENT, drop the connection. This behavior 1354 * is controlled by the icmp_may_rst sysctl. 1355 */ 1356 struct inpcb * 1357 tcp_drop_syn_sent(inp, errno) 1358 struct inpcb *inp; 1359 int errno; 1360 { 1361 struct tcpcb *tp = intotcpcb(inp); 1362 1363 if (tp && tp->t_state == TCPS_SYN_SENT) { 1364 tcp_drop(tp, errno); 1365 return (struct inpcb *)0; 1366 } 1367 return inp; 1368 } 1369 1370 /* 1371 * When `need fragmentation' ICMP is received, update our idea of the MSS 1372 * based on the new value in the route. Also nudge TCP to send something, 1373 * since we know the packet we just sent was dropped. 1374 * This duplicates some code in the tcp_mss() function in tcp_input.c. 1375 */ 1376 struct inpcb * 1377 tcp_mtudisc(inp, errno) 1378 struct inpcb *inp; 1379 int errno; 1380 { 1381 struct tcpcb *tp = intotcpcb(inp); 1382 struct rtentry *rt; 1383 struct rmxp_tao *taop; 1384 struct socket *so = inp->inp_socket; 1385 int offered; 1386 int mss; 1387 #ifdef INET6 1388 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 1389 #endif /* INET6 */ 1390 1391 if (tp) { 1392 #ifdef INET6 1393 if (isipv6) 1394 rt = tcp_rtlookup6(&inp->inp_inc); 1395 else 1396 #endif /* INET6 */ 1397 rt = tcp_rtlookup(&inp->inp_inc); 1398 if (!rt || !rt->rt_rmx.rmx_mtu) { 1399 tp->t_maxopd = tp->t_maxseg = 1400 #ifdef INET6 1401 isipv6 ? tcp_v6mssdflt : 1402 #endif /* INET6 */ 1403 tcp_mssdflt; 1404 return inp; 1405 } 1406 taop = rmx_taop(rt->rt_rmx); 1407 offered = taop->tao_mssopt; 1408 mss = rt->rt_rmx.rmx_mtu - 1409 #ifdef INET6 1410 (isipv6 ? 1411 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1412 #endif /* INET6 */ 1413 sizeof(struct tcpiphdr) 1414 #ifdef INET6 1415 ) 1416 #endif /* INET6 */ 1417 ; 1418 1419 if (offered) 1420 mss = min(mss, offered); 1421 /* 1422 * XXX - The above conditional probably violates the TCP 1423 * spec. The problem is that, since we don't know the 1424 * other end's MSS, we are supposed to use a conservative 1425 * default. But, if we do that, then MTU discovery will 1426 * never actually take place, because the conservative 1427 * default is much less than the MTUs typically seen 1428 * on the Internet today. For the moment, we'll sweep 1429 * this under the carpet. 1430 * 1431 * The conservative default might not actually be a problem 1432 * if the only case this occurs is when sending an initial 1433 * SYN with options and data to a host we've never talked 1434 * to before. Then, they will reply with an MSS value which 1435 * will get recorded and the new parameters should get 1436 * recomputed. For Further Study. 1437 */ 1438 if (tp->t_maxopd <= mss) 1439 return inp; 1440 tp->t_maxopd = mss; 1441 1442 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 1443 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) 1444 mss -= TCPOLEN_TSTAMP_APPA; 1445 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && 1446 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC) 1447 mss -= TCPOLEN_CC_APPA; 1448 #if (MCLBYTES & (MCLBYTES - 1)) == 0 1449 if (mss > MCLBYTES) 1450 mss &= ~(MCLBYTES-1); 1451 #else 1452 if (mss > MCLBYTES) 1453 mss = mss / MCLBYTES * MCLBYTES; 1454 #endif 1455 if (so->so_snd.sb_hiwat < mss) 1456 mss = so->so_snd.sb_hiwat; 1457 1458 tp->t_maxseg = mss; 1459 1460 tcpstat.tcps_mturesent++; 1461 tp->t_rtttime = 0; 1462 tp->snd_nxt = tp->snd_una; 1463 tcp_output(tp); 1464 } 1465 return inp; 1466 } 1467 1468 /* 1469 * Look-up the routing entry to the peer of this inpcb. If no route 1470 * is found and it cannot be allocated, then return NULL. This routine 1471 * is called by TCP routines that access the rmx structure and by tcp_mss 1472 * to get the interface MTU. 1473 */ 1474 struct rtentry * 1475 tcp_rtlookup(inc) 1476 struct in_conninfo *inc; 1477 { 1478 struct route *ro; 1479 struct rtentry *rt; 1480 1481 ro = &inc->inc_route; 1482 rt = ro->ro_rt; 1483 if (rt == NULL || !(rt->rt_flags & RTF_UP)) { 1484 /* No route yet, so try to acquire one */ 1485 if (inc->inc_faddr.s_addr != INADDR_ANY) { 1486 ro->ro_dst.sa_family = AF_INET; 1487 ro->ro_dst.sa_len = sizeof(struct sockaddr_in); 1488 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = 1489 inc->inc_faddr; 1490 rtalloc(ro); 1491 rt = ro->ro_rt; 1492 } 1493 } 1494 return rt; 1495 } 1496 1497 #ifdef INET6 1498 struct rtentry * 1499 tcp_rtlookup6(inc) 1500 struct in_conninfo *inc; 1501 { 1502 struct route_in6 *ro6; 1503 struct rtentry *rt; 1504 1505 ro6 = &inc->inc6_route; 1506 rt = ro6->ro_rt; 1507 if (rt == NULL || !(rt->rt_flags & RTF_UP)) { 1508 /* No route yet, so try to acquire one */ 1509 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 1510 ro6->ro_dst.sin6_family = AF_INET6; 1511 ro6->ro_dst.sin6_len = sizeof(struct sockaddr_in6); 1512 ro6->ro_dst.sin6_addr = inc->inc6_faddr; 1513 rtalloc((struct route *)ro6); 1514 rt = ro6->ro_rt; 1515 } 1516 } 1517 return rt; 1518 } 1519 #endif /* INET6 */ 1520 1521 #ifdef IPSEC 1522 /* compute ESP/AH header size for TCP, including outer IP header. */ 1523 size_t 1524 ipsec_hdrsiz_tcp(tp) 1525 struct tcpcb *tp; 1526 { 1527 struct inpcb *inp; 1528 struct mbuf *m; 1529 size_t hdrsiz; 1530 struct ip *ip; 1531 #ifdef INET6 1532 struct ip6_hdr *ip6; 1533 #endif 1534 struct tcphdr *th; 1535 1536 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) 1537 return 0; 1538 MGETHDR(m, M_DONTWAIT, MT_DATA); 1539 if (!m) 1540 return 0; 1541 1542 #ifdef INET6 1543 if ((inp->inp_vflag & INP_IPV6) != 0) { 1544 ip6 = mtod(m, struct ip6_hdr *); 1545 th = (struct tcphdr *)(ip6 + 1); 1546 m->m_pkthdr.len = m->m_len = 1547 sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1548 tcpip_fillheaders(inp, ip6, th); 1549 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1550 } else 1551 #endif /* INET6 */ 1552 { 1553 ip = mtod(m, struct ip *); 1554 th = (struct tcphdr *)(ip + 1); 1555 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); 1556 tcpip_fillheaders(inp, ip, th); 1557 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1558 } 1559 1560 m_free(m); 1561 return hdrsiz; 1562 } 1563 #endif /*IPSEC*/ 1564 1565 /* 1566 * Return a pointer to the cached information about the remote host. 1567 * The cached information is stored in the protocol specific part of 1568 * the route metrics. 1569 */ 1570 struct rmxp_tao * 1571 tcp_gettaocache(inc) 1572 struct in_conninfo *inc; 1573 { 1574 struct rtentry *rt; 1575 1576 #ifdef INET6 1577 if (inc->inc_isipv6) 1578 rt = tcp_rtlookup6(inc); 1579 else 1580 #endif /* INET6 */ 1581 rt = tcp_rtlookup(inc); 1582 1583 /* Make sure this is a host route and is up. */ 1584 if (rt == NULL || 1585 (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST)) 1586 return NULL; 1587 1588 return rmx_taop(rt->rt_rmx); 1589 } 1590 1591 /* 1592 * Clear all the TAO cache entries, called from tcp_init. 1593 * 1594 * XXX 1595 * This routine is just an empty one, because we assume that the routing 1596 * routing tables are initialized at the same time when TCP, so there is 1597 * nothing in the cache left over. 1598 */ 1599 static void 1600 tcp_cleartaocache() 1601 { 1602 } 1603 1604 /* 1605 * Move a TCP connection into TIME_WAIT state. 1606 * tcbinfo is unlocked. 1607 * inp is locked, and is unlocked before returning. 1608 */ 1609 void 1610 tcp_twstart(tp) 1611 struct tcpcb *tp; 1612 { 1613 struct tcptw *tw; 1614 struct inpcb *inp; 1615 int tw_time, acknow; 1616 struct socket *so; 1617 1618 tw = uma_zalloc(tcptw_zone, M_NOWAIT); 1619 if (tw == NULL) { 1620 tw = tcp_timer_2msl_tw(1); 1621 if (tw == NULL) { 1622 tcp_close(tp); 1623 return; 1624 } 1625 } 1626 inp = tp->t_inpcb; 1627 tw->tw_inpcb = inp; 1628 1629 /* 1630 * Recover last window size sent. 1631 */ 1632 tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale; 1633 1634 /* 1635 * Set t_recent if timestamps are used on the connection. 1636 */ 1637 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) == 1638 (TF_REQ_TSTMP|TF_RCVD_TSTMP)) 1639 tw->t_recent = tp->ts_recent; 1640 else 1641 tw->t_recent = 0; 1642 1643 tw->snd_nxt = tp->snd_nxt; 1644 tw->rcv_nxt = tp->rcv_nxt; 1645 tw->iss = tp->iss; 1646 tw->irs = tp->irs; 1647 tw->cc_recv = tp->cc_recv; 1648 tw->cc_send = tp->cc_send; 1649 tw->t_starttime = tp->t_starttime; 1650 tw->tw_time = 0; 1651 1652 /* XXX 1653 * If this code will 1654 * be used for fin-wait-2 state also, then we may need 1655 * a ts_recent from the last segment. 1656 */ 1657 /* Shorten TIME_WAIT [RFC-1644, p.28] */ 1658 if (tp->cc_recv != 0 && (ticks - tp->t_starttime) < tcp_msl) { 1659 tw_time = tp->t_rxtcur * TCPTV_TWTRUNC; 1660 /* For T/TCP client, force ACK now. */ 1661 acknow = 1; 1662 } else { 1663 tw_time = 2 * tcp_msl; 1664 acknow = tp->t_flags & TF_ACKNOW; 1665 } 1666 tcp_discardcb(tp); 1667 so = inp->inp_socket; 1668 so->so_pcb = NULL; 1669 tw->tw_cred = crhold(so->so_cred); 1670 tw->tw_so_options = so->so_options; 1671 if (acknow) 1672 tcp_twrespond(tw, so, NULL, TH_ACK); 1673 sotryfree(so); 1674 inp->inp_socket = NULL; 1675 inp->inp_ppcb = (caddr_t)tw; 1676 inp->inp_vflag |= INP_TIMEWAIT; 1677 tcp_timer_2msl_reset(tw, tw_time); 1678 INP_UNLOCK(inp); 1679 } 1680 1681 /* 1682 * The appromixate rate of ISN increase of Microsoft TCP stacks; 1683 * the actual rate is slightly higher due to the addition of 1684 * random positive increments. 1685 * 1686 * Most other new OSes use semi-randomized ISN values, so we 1687 * do not need to worry about them. 1688 */ 1689 #define MS_ISN_BYTES_PER_SECOND 250000 1690 1691 /* 1692 * Determine if the ISN we will generate has advanced beyond the last 1693 * sequence number used by the previous connection. If so, indicate 1694 * that it is safe to recycle this tw socket by returning 1. 1695 */ 1696 int 1697 tcp_twrecycleable(struct tcptw *tw) 1698 { 1699 tcp_seq new_iss = tw->iss; 1700 tcp_seq new_irs = tw->irs; 1701 1702 new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz); 1703 new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz); 1704 1705 if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt)) 1706 return 1; 1707 else 1708 return 0; 1709 } 1710 1711 struct tcptw * 1712 tcp_twclose(struct tcptw *tw, int reuse) 1713 { 1714 struct inpcb *inp; 1715 1716 inp = tw->tw_inpcb; 1717 tw->tw_inpcb = NULL; 1718 tcp_timer_2msl_stop(tw); 1719 inp->inp_ppcb = NULL; 1720 #ifdef INET6 1721 if (inp->inp_vflag & INP_IPV6PROTO) 1722 in6_pcbdetach(inp); 1723 else 1724 #endif 1725 in_pcbdetach(inp); 1726 tcpstat.tcps_closed++; 1727 if (reuse) 1728 return (tw); 1729 uma_zfree(tcptw_zone, tw); 1730 return (NULL); 1731 } 1732 1733 /* 1734 * One of so and msrc must be non-NULL for use by the MAC Framework to 1735 * construct a label for ay resulting packet. 1736 */ 1737 int 1738 tcp_twrespond(struct tcptw *tw, struct socket *so, struct mbuf *msrc, 1739 int flags) 1740 { 1741 struct inpcb *inp = tw->tw_inpcb; 1742 struct tcphdr *th; 1743 struct mbuf *m; 1744 struct ip *ip = NULL; 1745 u_int8_t *optp; 1746 u_int hdrlen, optlen; 1747 int error; 1748 #ifdef INET6 1749 struct ip6_hdr *ip6 = NULL; 1750 int isipv6 = inp->inp_inc.inc_isipv6; 1751 #endif 1752 1753 KASSERT(so != NULL || msrc != NULL, 1754 ("tcp_twrespond: so and msrc NULL")); 1755 1756 m = m_gethdr(M_DONTWAIT, MT_HEADER); 1757 if (m == NULL) 1758 return (ENOBUFS); 1759 m->m_data += max_linkhdr; 1760 1761 #ifdef MAC 1762 if (so != NULL) 1763 mac_create_mbuf_from_socket(so, m); 1764 else 1765 mac_create_mbuf_netlayer(msrc, m); 1766 #endif 1767 1768 #ifdef INET6 1769 if (isipv6) { 1770 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1771 ip6 = mtod(m, struct ip6_hdr *); 1772 th = (struct tcphdr *)(ip6 + 1); 1773 tcpip_fillheaders(inp, ip6, th); 1774 } else 1775 #endif 1776 { 1777 hdrlen = sizeof(struct tcpiphdr); 1778 ip = mtod(m, struct ip *); 1779 th = (struct tcphdr *)(ip + 1); 1780 tcpip_fillheaders(inp, ip, th); 1781 } 1782 optp = (u_int8_t *)(th + 1); 1783 1784 /* 1785 * Send a timestamp and echo-reply if both our side and our peer 1786 * have sent timestamps in our SYN's and this is not a RST. 1787 */ 1788 if (tw->t_recent && flags == TH_ACK) { 1789 u_int32_t *lp = (u_int32_t *)optp; 1790 1791 /* Form timestamp option as shown in appendix A of RFC 1323. */ 1792 *lp++ = htonl(TCPOPT_TSTAMP_HDR); 1793 *lp++ = htonl(ticks); 1794 *lp = htonl(tw->t_recent); 1795 optp += TCPOLEN_TSTAMP_APPA; 1796 } 1797 1798 /* 1799 * Send `CC-family' options if needed, and it's not a RST. 1800 */ 1801 if (tw->cc_recv != 0 && flags == TH_ACK) { 1802 u_int32_t *lp = (u_int32_t *)optp; 1803 1804 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC)); 1805 *lp = htonl(tw->cc_send); 1806 optp += TCPOLEN_CC_APPA; 1807 } 1808 optlen = optp - (u_int8_t *)(th + 1); 1809 1810 m->m_len = hdrlen + optlen; 1811 m->m_pkthdr.len = m->m_len; 1812 1813 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small")); 1814 1815 th->th_seq = htonl(tw->snd_nxt); 1816 th->th_ack = htonl(tw->rcv_nxt); 1817 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1818 th->th_flags = flags; 1819 th->th_win = htons(tw->last_win); 1820 1821 #ifdef INET6 1822 if (isipv6) { 1823 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr), 1824 sizeof(struct tcphdr) + optlen); 1825 ip6->ip6_hlim = in6_selecthlim(inp, inp->in6p_route.ro_rt ? 1826 inp->in6p_route.ro_rt->rt_ifp : NULL); 1827 error = ip6_output(m, inp->in6p_outputopts, &inp->in6p_route, 1828 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp); 1829 } else 1830 #endif 1831 { 1832 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1833 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP)); 1834 m->m_pkthdr.csum_flags = CSUM_TCP; 1835 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1836 ip->ip_len = m->m_pkthdr.len; 1837 error = ip_output(m, inp->inp_options, &inp->inp_route, 1838 (tw->tw_so_options & SO_DONTROUTE), NULL, inp); 1839 } 1840 if (flags & TH_ACK) 1841 tcpstat.tcps_sndacks++; 1842 else 1843 tcpstat.tcps_sndctrl++; 1844 tcpstat.tcps_sndtotal++; 1845 return (error); 1846 } 1847 1848 /* 1849 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING 1850 * 1851 * This code attempts to calculate the bandwidth-delay product as a 1852 * means of determining the optimal window size to maximize bandwidth, 1853 * minimize RTT, and avoid the over-allocation of buffers on interfaces and 1854 * routers. This code also does a fairly good job keeping RTTs in check 1855 * across slow links like modems. We implement an algorithm which is very 1856 * similar (but not meant to be) TCP/Vegas. The code operates on the 1857 * transmitter side of a TCP connection and so only effects the transmit 1858 * side of the connection. 1859 * 1860 * BACKGROUND: TCP makes no provision for the management of buffer space 1861 * at the end points or at the intermediate routers and switches. A TCP 1862 * stream, whether using NewReno or not, will eventually buffer as 1863 * many packets as it is able and the only reason this typically works is 1864 * due to the fairly small default buffers made available for a connection 1865 * (typicaly 16K or 32K). As machines use larger windows and/or window 1866 * scaling it is now fairly easy for even a single TCP connection to blow-out 1867 * all available buffer space not only on the local interface, but on 1868 * intermediate routers and switches as well. NewReno makes a misguided 1869 * attempt to 'solve' this problem by waiting for an actual failure to occur, 1870 * then backing off, then steadily increasing the window again until another 1871 * failure occurs, ad-infinitum. This results in terrible oscillation that 1872 * is only made worse as network loads increase and the idea of intentionally 1873 * blowing out network buffers is, frankly, a terrible way to manage network 1874 * resources. 1875 * 1876 * It is far better to limit the transmit window prior to the failure 1877 * condition being achieved. There are two general ways to do this: First 1878 * you can 'scan' through different transmit window sizes and locate the 1879 * point where the RTT stops increasing, indicating that you have filled the 1880 * pipe, then scan backwards until you note that RTT stops decreasing, then 1881 * repeat ad-infinitum. This method works in principle but has severe 1882 * implementation issues due to RTT variances, timer granularity, and 1883 * instability in the algorithm which can lead to many false positives and 1884 * create oscillations as well as interact badly with other TCP streams 1885 * implementing the same algorithm. 1886 * 1887 * The second method is to limit the window to the bandwidth delay product 1888 * of the link. This is the method we implement. RTT variances and our 1889 * own manipulation of the congestion window, bwnd, can potentially 1890 * destabilize the algorithm. For this reason we have to stabilize the 1891 * elements used to calculate the window. We do this by using the minimum 1892 * observed RTT, the long term average of the observed bandwidth, and 1893 * by adding two segments worth of slop. It isn't perfect but it is able 1894 * to react to changing conditions and gives us a very stable basis on 1895 * which to extend the algorithm. 1896 */ 1897 void 1898 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) 1899 { 1900 u_long bw; 1901 u_long bwnd; 1902 int save_ticks; 1903 1904 /* 1905 * If inflight_enable is disabled in the middle of a tcp connection, 1906 * make sure snd_bwnd is effectively disabled. 1907 */ 1908 if (tcp_inflight_enable == 0) { 1909 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1910 tp->snd_bandwidth = 0; 1911 return; 1912 } 1913 1914 /* 1915 * Figure out the bandwidth. Due to the tick granularity this 1916 * is a very rough number and it MUST be averaged over a fairly 1917 * long period of time. XXX we need to take into account a link 1918 * that is not using all available bandwidth, but for now our 1919 * slop will ramp us up if this case occurs and the bandwidth later 1920 * increases. 1921 * 1922 * Note: if ticks rollover 'bw' may wind up negative. We must 1923 * effectively reset t_bw_rtttime for this case. 1924 */ 1925 save_ticks = ticks; 1926 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1) 1927 return; 1928 1929 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / 1930 (save_ticks - tp->t_bw_rtttime); 1931 tp->t_bw_rtttime = save_ticks; 1932 tp->t_bw_rtseq = ack_seq; 1933 if (tp->t_bw_rtttime == 0 || (int)bw < 0) 1934 return; 1935 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4; 1936 1937 tp->snd_bandwidth = bw; 1938 1939 /* 1940 * Calculate the semi-static bandwidth delay product, plus two maximal 1941 * segments. The additional slop puts us squarely in the sweet 1942 * spot and also handles the bandwidth run-up case and stabilization. 1943 * Without the slop we could be locking ourselves into a lower 1944 * bandwidth. 1945 * 1946 * Situations Handled: 1947 * (1) Prevents over-queueing of packets on LANs, especially on 1948 * high speed LANs, allowing larger TCP buffers to be 1949 * specified, and also does a good job preventing 1950 * over-queueing of packets over choke points like modems 1951 * (at least for the transmit side). 1952 * 1953 * (2) Is able to handle changing network loads (bandwidth 1954 * drops so bwnd drops, bandwidth increases so bwnd 1955 * increases). 1956 * 1957 * (3) Theoretically should stabilize in the face of multiple 1958 * connections implementing the same algorithm (this may need 1959 * a little work). 1960 * 1961 * (4) Stability value (defaults to 20 = 2 maximal packets) can 1962 * be adjusted with a sysctl but typically only needs to be 1963 * on very slow connections. A value no smaller then 5 1964 * should be used, but only reduce this default if you have 1965 * no other choice. 1966 */ 1967 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2) 1968 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + tcp_inflight_stab * tp->t_maxseg / 10; 1969 #undef USERTT 1970 1971 if (tcp_inflight_debug > 0) { 1972 static int ltime; 1973 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) { 1974 ltime = ticks; 1975 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n", 1976 tp, 1977 bw, 1978 tp->t_rttbest, 1979 tp->t_srtt, 1980 bwnd 1981 ); 1982 } 1983 } 1984 if ((long)bwnd < tcp_inflight_min) 1985 bwnd = tcp_inflight_min; 1986 if (bwnd > tcp_inflight_max) 1987 bwnd = tcp_inflight_max; 1988 if ((long)bwnd < tp->t_maxseg * 2) 1989 bwnd = tp->t_maxseg * 2; 1990 tp->snd_bwnd = bwnd; 1991 } 1992 1993