1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 30 * $FreeBSD$ 31 */ 32 33 #include "opt_compat.h" 34 #include "opt_inet.h" 35 #include "opt_inet6.h" 36 #include "opt_ipsec.h" 37 #include "opt_mac.h" 38 #include "opt_tcpdebug.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/callout.h> 43 #include <sys/kernel.h> 44 #include <sys/sysctl.h> 45 #include <sys/mac.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #ifdef INET6 49 #include <sys/domain.h> 50 #endif 51 #include <sys/proc.h> 52 #include <sys/socket.h> 53 #include <sys/socketvar.h> 54 #include <sys/protosw.h> 55 #include <sys/random.h> 56 57 #include <vm/uma.h> 58 59 #include <net/route.h> 60 #include <net/if.h> 61 62 #include <netinet/in.h> 63 #include <netinet/in_systm.h> 64 #include <netinet/ip.h> 65 #ifdef INET6 66 #include <netinet/ip6.h> 67 #endif 68 #include <netinet/in_pcb.h> 69 #ifdef INET6 70 #include <netinet6/in6_pcb.h> 71 #endif 72 #include <netinet/in_var.h> 73 #include <netinet/ip_var.h> 74 #ifdef INET6 75 #include <netinet6/ip6_var.h> 76 #include <netinet6/nd6.h> 77 #endif 78 #include <netinet/tcp.h> 79 #include <netinet/tcp_fsm.h> 80 #include <netinet/tcp_seq.h> 81 #include <netinet/tcp_timer.h> 82 #include <netinet/tcp_var.h> 83 #ifdef INET6 84 #include <netinet6/tcp6_var.h> 85 #endif 86 #include <netinet/tcpip.h> 87 #ifdef TCPDEBUG 88 #include <netinet/tcp_debug.h> 89 #endif 90 #include <netinet6/ip6protosw.h> 91 92 #ifdef IPSEC 93 #include <netinet6/ipsec.h> 94 #ifdef INET6 95 #include <netinet6/ipsec6.h> 96 #endif 97 #endif /*IPSEC*/ 98 99 #ifdef FAST_IPSEC 100 #include <netipsec/ipsec.h> 101 #include <netipsec/xform.h> 102 #ifdef INET6 103 #include <netipsec/ipsec6.h> 104 #endif 105 #include <netipsec/key.h> 106 #define IPSEC 107 #endif /*FAST_IPSEC*/ 108 109 #include <machine/in_cksum.h> 110 #include <sys/md5.h> 111 112 int tcp_mssdflt = TCP_MSS; 113 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 114 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size"); 115 116 #ifdef INET6 117 int tcp_v6mssdflt = TCP6_MSS; 118 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, 119 CTLFLAG_RW, &tcp_v6mssdflt , 0, 120 "Default TCP Maximum Segment Size for IPv6"); 121 #endif 122 123 /* 124 * Minimum MSS we accept and use. This prevents DoS attacks where 125 * we are forced to a ridiculous low MSS like 20 and send hundreds 126 * of packets instead of one. The effect scales with the available 127 * bandwidth and quickly saturates the CPU and network interface 128 * with packet generation and sending. Set to zero to disable MINMSS 129 * checking. This setting prevents us from sending too small packets. 130 */ 131 int tcp_minmss = TCP_MINMSS; 132 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW, 133 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size"); 134 /* 135 * Number of TCP segments per second we accept from remote host 136 * before we start to calculate average segment size. If average 137 * segment size drops below the minimum TCP MSS we assume a DoS 138 * attack and reset+drop the connection. Care has to be taken not to 139 * set this value too small to not kill interactive type connections 140 * (telnet, SSH) which send many small packets. 141 */ 142 int tcp_minmssoverload = TCP_MINMSSOVERLOAD; 143 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW, 144 &tcp_minmssoverload , 0, "Number of TCP Segments per Second allowed to" 145 "be under the MINMSS Size"); 146 147 #if 0 148 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 149 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 150 &tcp_rttdflt , 0, "Default maximum TCP Round Trip Time"); 151 #endif 152 153 int tcp_do_rfc1323 = 1; 154 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 155 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions"); 156 157 int tcp_do_rfc1644 = 0; 158 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW, 159 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions"); 160 161 static int tcp_tcbhashsize = 0; 162 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN, 163 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); 164 165 static int do_tcpdrain = 1; 166 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 167 "Enable tcp_drain routine for extra help when low on mbufs"); 168 169 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD, 170 &tcbinfo.ipi_count, 0, "Number of active PCBs"); 171 172 static int icmp_may_rst = 1; 173 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, 174 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 175 176 static int tcp_isn_reseed_interval = 0; 177 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, 178 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); 179 180 /* 181 * TCP bandwidth limiting sysctls. Note that the default lower bound of 182 * 1024 exists only for debugging. A good production default would be 183 * something like 6100. 184 */ 185 static int tcp_inflight_enable = 1; 186 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, 187 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); 188 189 static int tcp_inflight_debug = 0; 190 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, 191 &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); 192 193 static int tcp_inflight_min = 6144; 194 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, 195 &tcp_inflight_min, 0, "Lower-bound for TCP inflight window"); 196 197 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; 198 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, 199 &tcp_inflight_max, 0, "Upper-bound for TCP inflight window"); 200 static int tcp_inflight_stab = 20; 201 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, 202 &tcp_inflight_stab, 0, "Inflight Algorithm Stabilization 20 = 2 packets"); 203 204 static struct inpcb *tcp_notify(struct inpcb *, int); 205 static void tcp_discardcb(struct tcpcb *); 206 207 /* 208 * Target size of TCP PCB hash tables. Must be a power of two. 209 * 210 * Note that this can be overridden by the kernel environment 211 * variable net.inet.tcp.tcbhashsize 212 */ 213 #ifndef TCBHASHSIZE 214 #define TCBHASHSIZE 512 215 #endif 216 217 /* 218 * XXX 219 * Callouts should be moved into struct tcp directly. They are currently 220 * separate because the tcpcb structure is exported to userland for sysctl 221 * parsing purposes, which do not know about callouts. 222 */ 223 struct tcpcb_mem { 224 struct tcpcb tcb; 225 struct callout tcpcb_mem_rexmt, tcpcb_mem_persist, tcpcb_mem_keep; 226 struct callout tcpcb_mem_2msl, tcpcb_mem_delack; 227 }; 228 229 static uma_zone_t tcpcb_zone; 230 static uma_zone_t tcptw_zone; 231 232 /* 233 * Tcp initialization 234 */ 235 void 236 tcp_init() 237 { 238 int hashsize = TCBHASHSIZE; 239 240 tcp_ccgen = 1; 241 242 tcp_delacktime = TCPTV_DELACK; 243 tcp_keepinit = TCPTV_KEEP_INIT; 244 tcp_keepidle = TCPTV_KEEP_IDLE; 245 tcp_keepintvl = TCPTV_KEEPINTVL; 246 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 247 tcp_msl = TCPTV_MSL; 248 tcp_rexmit_min = TCPTV_MIN; 249 tcp_rexmit_slop = TCPTV_CPU_VAR; 250 251 INP_INFO_LOCK_INIT(&tcbinfo, "tcp"); 252 LIST_INIT(&tcb); 253 tcbinfo.listhead = &tcb; 254 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); 255 if (!powerof2(hashsize)) { 256 printf("WARNING: TCB hash size not a power of 2\n"); 257 hashsize = 512; /* safe default */ 258 } 259 tcp_tcbhashsize = hashsize; 260 tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask); 261 tcbinfo.porthashbase = hashinit(hashsize, M_PCB, 262 &tcbinfo.porthashmask); 263 tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb), 264 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 265 uma_zone_set_max(tcbinfo.ipi_zone, maxsockets); 266 #ifdef INET6 267 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 268 #else /* INET6 */ 269 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 270 #endif /* INET6 */ 271 if (max_protohdr < TCP_MINPROTOHDR) 272 max_protohdr = TCP_MINPROTOHDR; 273 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 274 panic("tcp_init"); 275 #undef TCP_MINPROTOHDR 276 /* 277 * These have to be type stable for the benefit of the timers. 278 */ 279 tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem), 280 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 281 uma_zone_set_max(tcpcb_zone, maxsockets); 282 tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw), 283 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 284 uma_zone_set_max(tcptw_zone, maxsockets / 5); 285 tcp_timer_init(); 286 syncache_init(); 287 tcp_hc_init(); 288 tcp_reass_init(); 289 } 290 291 /* 292 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 293 * tcp_template used to store this data in mbufs, but we now recopy it out 294 * of the tcpcb each time to conserve mbufs. 295 */ 296 void 297 tcpip_fillheaders(inp, ip_ptr, tcp_ptr) 298 struct inpcb *inp; 299 void *ip_ptr; 300 void *tcp_ptr; 301 { 302 struct tcphdr *th = (struct tcphdr *)tcp_ptr; 303 304 #ifdef INET6 305 if ((inp->inp_vflag & INP_IPV6) != 0) { 306 struct ip6_hdr *ip6; 307 308 ip6 = (struct ip6_hdr *)ip_ptr; 309 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 310 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); 311 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 312 (IPV6_VERSION & IPV6_VERSION_MASK); 313 ip6->ip6_nxt = IPPROTO_TCP; 314 ip6->ip6_plen = sizeof(struct tcphdr); 315 ip6->ip6_src = inp->in6p_laddr; 316 ip6->ip6_dst = inp->in6p_faddr; 317 } else 318 #endif 319 { 320 struct ip *ip; 321 322 ip = (struct ip *)ip_ptr; 323 ip->ip_v = IPVERSION; 324 ip->ip_hl = 5; 325 ip->ip_tos = inp->inp_ip_tos; 326 ip->ip_len = 0; 327 ip->ip_id = 0; 328 ip->ip_off = 0; 329 ip->ip_ttl = inp->inp_ip_ttl; 330 ip->ip_sum = 0; 331 ip->ip_p = IPPROTO_TCP; 332 ip->ip_src = inp->inp_laddr; 333 ip->ip_dst = inp->inp_faddr; 334 } 335 th->th_sport = inp->inp_lport; 336 th->th_dport = inp->inp_fport; 337 th->th_seq = 0; 338 th->th_ack = 0; 339 th->th_x2 = 0; 340 th->th_off = 5; 341 th->th_flags = 0; 342 th->th_win = 0; 343 th->th_urp = 0; 344 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */ 345 } 346 347 /* 348 * Create template to be used to send tcp packets on a connection. 349 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 350 * use for this function is in keepalives, which use tcp_respond. 351 */ 352 struct tcptemp * 353 tcpip_maketemplate(inp) 354 struct inpcb *inp; 355 { 356 struct mbuf *m; 357 struct tcptemp *n; 358 359 m = m_get(M_DONTWAIT, MT_HEADER); 360 if (m == NULL) 361 return (0); 362 m->m_len = sizeof(struct tcptemp); 363 n = mtod(m, struct tcptemp *); 364 365 tcpip_fillheaders(inp, (void *)&n->tt_ipgen, (void *)&n->tt_t); 366 return (n); 367 } 368 369 /* 370 * Send a single message to the TCP at address specified by 371 * the given TCP/IP header. If m == NULL, then we make a copy 372 * of the tcpiphdr at ti and send directly to the addressed host. 373 * This is used to force keep alive messages out using the TCP 374 * template for a connection. If flags are given then we send 375 * a message back to the TCP which originated the * segment ti, 376 * and discard the mbuf containing it and any other attached mbufs. 377 * 378 * In any case the ack and sequence number of the transmitted 379 * segment are as specified by the parameters. 380 * 381 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 382 */ 383 void 384 tcp_respond(tp, ipgen, th, m, ack, seq, flags) 385 struct tcpcb *tp; 386 void *ipgen; 387 register struct tcphdr *th; 388 register struct mbuf *m; 389 tcp_seq ack, seq; 390 int flags; 391 { 392 register int tlen; 393 int win = 0; 394 struct ip *ip; 395 struct tcphdr *nth; 396 #ifdef INET6 397 struct ip6_hdr *ip6; 398 int isipv6; 399 #endif /* INET6 */ 400 int ipflags = 0; 401 struct inpcb *inp = NULL; 402 403 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL")); 404 405 #ifdef INET6 406 isipv6 = ((struct ip *)ipgen)->ip_v == 6; 407 ip6 = ipgen; 408 #endif /* INET6 */ 409 ip = ipgen; 410 411 if (tp != NULL) { 412 inp = tp->t_inpcb; 413 KASSERT(inp != NULL, ("tcp control block w/o inpcb")); 414 INP_INFO_WLOCK_ASSERT(&tcbinfo); 415 INP_LOCK_ASSERT(inp); 416 if (!(flags & TH_RST)) { 417 win = sbspace(&inp->inp_socket->so_rcv); 418 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 419 win = (long)TCP_MAXWIN << tp->rcv_scale; 420 } 421 } 422 if (m == NULL) { 423 m = m_gethdr(M_DONTWAIT, MT_HEADER); 424 if (m == NULL) 425 return; 426 tlen = 0; 427 m->m_data += max_linkhdr; 428 #ifdef INET6 429 if (isipv6) { 430 bcopy((caddr_t)ip6, mtod(m, caddr_t), 431 sizeof(struct ip6_hdr)); 432 ip6 = mtod(m, struct ip6_hdr *); 433 nth = (struct tcphdr *)(ip6 + 1); 434 } else 435 #endif /* INET6 */ 436 { 437 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); 438 ip = mtod(m, struct ip *); 439 nth = (struct tcphdr *)(ip + 1); 440 } 441 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 442 flags = TH_ACK; 443 } else { 444 m_freem(m->m_next); 445 m->m_next = NULL; 446 m->m_data = (caddr_t)ipgen; 447 /* m_len is set later */ 448 tlen = 0; 449 #define xchg(a,b,type) { type t; t=a; a=b; b=t; } 450 #ifdef INET6 451 if (isipv6) { 452 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 453 nth = (struct tcphdr *)(ip6 + 1); 454 } else 455 #endif /* INET6 */ 456 { 457 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); 458 nth = (struct tcphdr *)(ip + 1); 459 } 460 if (th != nth) { 461 /* 462 * this is usually a case when an extension header 463 * exists between the IPv6 header and the 464 * TCP header. 465 */ 466 nth->th_sport = th->th_sport; 467 nth->th_dport = th->th_dport; 468 } 469 xchg(nth->th_dport, nth->th_sport, n_short); 470 #undef xchg 471 } 472 #ifdef INET6 473 if (isipv6) { 474 ip6->ip6_flow = 0; 475 ip6->ip6_vfc = IPV6_VERSION; 476 ip6->ip6_nxt = IPPROTO_TCP; 477 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) + 478 tlen)); 479 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr); 480 } else 481 #endif 482 { 483 tlen += sizeof (struct tcpiphdr); 484 ip->ip_len = tlen; 485 ip->ip_ttl = ip_defttl; 486 if (path_mtu_discovery) 487 ip->ip_off |= IP_DF; 488 } 489 m->m_len = tlen; 490 m->m_pkthdr.len = tlen; 491 m->m_pkthdr.rcvif = NULL; 492 #ifdef MAC 493 if (inp != NULL) { 494 /* 495 * Packet is associated with a socket, so allow the 496 * label of the response to reflect the socket label. 497 */ 498 mac_create_mbuf_from_socket(inp->inp_socket, m); 499 } else { 500 /* 501 * Packet is not associated with a socket, so possibly 502 * update the label in place. 503 */ 504 mac_reflect_mbuf_tcp(m); 505 } 506 #endif 507 nth->th_seq = htonl(seq); 508 nth->th_ack = htonl(ack); 509 nth->th_x2 = 0; 510 nth->th_off = sizeof (struct tcphdr) >> 2; 511 nth->th_flags = flags; 512 if (tp != NULL) 513 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 514 else 515 nth->th_win = htons((u_short)win); 516 nth->th_urp = 0; 517 #ifdef INET6 518 if (isipv6) { 519 nth->th_sum = 0; 520 nth->th_sum = in6_cksum(m, IPPROTO_TCP, 521 sizeof(struct ip6_hdr), 522 tlen - sizeof(struct ip6_hdr)); 523 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb : 524 NULL, NULL); 525 } else 526 #endif /* INET6 */ 527 { 528 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 529 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 530 m->m_pkthdr.csum_flags = CSUM_TCP; 531 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 532 } 533 #ifdef TCPDEBUG 534 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG)) 535 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 536 #endif 537 #ifdef INET6 538 if (isipv6) 539 (void) ip6_output(m, NULL, NULL, ipflags, NULL, NULL, inp); 540 else 541 #endif /* INET6 */ 542 (void) ip_output(m, NULL, NULL, ipflags, NULL, inp); 543 } 544 545 /* 546 * Create a new TCP control block, making an 547 * empty reassembly queue and hooking it to the argument 548 * protocol control block. The `inp' parameter must have 549 * come from the zone allocator set up in tcp_init(). 550 */ 551 struct tcpcb * 552 tcp_newtcpcb(inp) 553 struct inpcb *inp; 554 { 555 struct tcpcb_mem *tm; 556 struct tcpcb *tp; 557 #ifdef INET6 558 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 559 #endif /* INET6 */ 560 561 tm = uma_zalloc(tcpcb_zone, M_NOWAIT | M_ZERO); 562 if (tm == NULL) 563 return (NULL); 564 tp = &tm->tcb; 565 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */ 566 tp->t_maxseg = tp->t_maxopd = 567 #ifdef INET6 568 isipv6 ? tcp_v6mssdflt : 569 #endif /* INET6 */ 570 tcp_mssdflt; 571 572 /* Set up our timeouts. */ 573 callout_init(tp->tt_rexmt = &tm->tcpcb_mem_rexmt, 0); 574 callout_init(tp->tt_persist = &tm->tcpcb_mem_persist, 0); 575 callout_init(tp->tt_keep = &tm->tcpcb_mem_keep, 0); 576 callout_init(tp->tt_2msl = &tm->tcpcb_mem_2msl, 0); 577 callout_init(tp->tt_delack = &tm->tcpcb_mem_delack, 0); 578 579 if (tcp_do_rfc1323) 580 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); 581 if (tcp_do_rfc1644) 582 tp->t_flags |= TF_REQ_CC; 583 tp->t_inpcb = inp; /* XXX */ 584 /* 585 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 586 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 587 * reasonable initial retransmit time. 588 */ 589 tp->t_srtt = TCPTV_SRTTBASE; 590 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 591 tp->t_rttmin = tcp_rexmit_min; 592 tp->t_rxtcur = TCPTV_RTOBASE; 593 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 594 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 595 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 596 tp->t_rcvtime = ticks; 597 tp->t_bw_rtttime = ticks; 598 /* 599 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 600 * because the socket may be bound to an IPv6 wildcard address, 601 * which may match an IPv4-mapped IPv6 address. 602 */ 603 inp->inp_ip_ttl = ip_defttl; 604 inp->inp_ppcb = (caddr_t)tp; 605 return (tp); /* XXX */ 606 } 607 608 /* 609 * Drop a TCP connection, reporting 610 * the specified error. If connection is synchronized, 611 * then send a RST to peer. 612 */ 613 struct tcpcb * 614 tcp_drop(tp, errno) 615 register struct tcpcb *tp; 616 int errno; 617 { 618 struct socket *so = tp->t_inpcb->inp_socket; 619 620 if (TCPS_HAVERCVDSYN(tp->t_state)) { 621 tp->t_state = TCPS_CLOSED; 622 (void) tcp_output(tp); 623 tcpstat.tcps_drops++; 624 } else 625 tcpstat.tcps_conndrops++; 626 if (errno == ETIMEDOUT && tp->t_softerror) 627 errno = tp->t_softerror; 628 so->so_error = errno; 629 return (tcp_close(tp)); 630 } 631 632 static void 633 tcp_discardcb(tp) 634 struct tcpcb *tp; 635 { 636 struct tseg_qent *q; 637 struct inpcb *inp = tp->t_inpcb; 638 struct socket *so = inp->inp_socket; 639 #ifdef INET6 640 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 641 #endif /* INET6 */ 642 643 /* 644 * Make sure that all of our timers are stopped before we 645 * delete the PCB. 646 */ 647 callout_stop(tp->tt_rexmt); 648 callout_stop(tp->tt_persist); 649 callout_stop(tp->tt_keep); 650 callout_stop(tp->tt_2msl); 651 callout_stop(tp->tt_delack); 652 653 /* 654 * If we got enough samples through the srtt filter, 655 * save the rtt and rttvar in the routing entry. 656 * 'Enough' is arbitrarily defined as 4 rtt samples. 657 * 4 samples is enough for the srtt filter to converge 658 * to within enough % of the correct value; fewer samples 659 * and we could save a bogus rtt. The danger is not high 660 * as tcp quickly recovers from everything. 661 * XXX: Works very well but needs some more statistics! 662 */ 663 if (tp->t_rttupdated >= 4) { 664 struct hc_metrics_lite metrics; 665 u_long ssthresh; 666 667 bzero(&metrics, sizeof(metrics)); 668 /* 669 * Update the ssthresh always when the conditions below 670 * are satisfied. This gives us better new start value 671 * for the congestion avoidance for new connections. 672 * ssthresh is only set if packet loss occured on a session. 673 */ 674 ssthresh = tp->snd_ssthresh; 675 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) { 676 /* 677 * convert the limit from user data bytes to 678 * packets then to packet data bytes. 679 */ 680 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg; 681 if (ssthresh < 2) 682 ssthresh = 2; 683 ssthresh *= (u_long)(tp->t_maxseg + 684 #ifdef INET6 685 (isipv6 ? sizeof (struct ip6_hdr) + 686 sizeof (struct tcphdr) : 687 #endif 688 sizeof (struct tcpiphdr) 689 #ifdef INET6 690 ) 691 #endif 692 ); 693 } else 694 ssthresh = 0; 695 metrics.rmx_ssthresh = ssthresh; 696 697 metrics.rmx_rtt = tp->t_srtt; 698 metrics.rmx_rttvar = tp->t_rttvar; 699 /* XXX: This wraps if the pipe is more than 4 Gbit per second */ 700 metrics.rmx_bandwidth = tp->snd_bandwidth; 701 metrics.rmx_cwnd = tp->snd_cwnd; 702 metrics.rmx_sendpipe = 0; 703 metrics.rmx_recvpipe = 0; 704 705 tcp_hc_update(&inp->inp_inc, &metrics); 706 } 707 708 /* free the reassembly queue, if any */ 709 while ((q = LIST_FIRST(&tp->t_segq)) != NULL) { 710 LIST_REMOVE(q, tqe_q); 711 m_freem(q->tqe_m); 712 uma_zfree(tcp_reass_zone, q); 713 tp->t_segqlen--; 714 tcp_reass_qsize--; 715 } 716 inp->inp_ppcb = NULL; 717 tp->t_inpcb = NULL; 718 uma_zfree(tcpcb_zone, tp); 719 soisdisconnected(so); 720 } 721 722 /* 723 * Close a TCP control block: 724 * discard all space held by the tcp 725 * discard internet protocol block 726 * wake up any sleepers 727 */ 728 struct tcpcb * 729 tcp_close(tp) 730 struct tcpcb *tp; 731 { 732 struct inpcb *inp = tp->t_inpcb; 733 #ifdef INET6 734 struct socket *so = inp->inp_socket; 735 #endif 736 737 tcp_discardcb(tp); 738 #ifdef INET6 739 if (INP_CHECK_SOCKAF(so, AF_INET6)) 740 in6_pcbdetach(inp); 741 else 742 #endif 743 in_pcbdetach(inp); 744 tcpstat.tcps_closed++; 745 return (NULL); 746 } 747 748 void 749 tcp_drain() 750 { 751 if (do_tcpdrain) 752 { 753 struct inpcb *inpb; 754 struct tcpcb *tcpb; 755 struct tseg_qent *te; 756 757 /* 758 * Walk the tcpbs, if existing, and flush the reassembly queue, 759 * if there is one... 760 * XXX: The "Net/3" implementation doesn't imply that the TCP 761 * reassembly queue should be flushed, but in a situation 762 * where we're really low on mbufs, this is potentially 763 * usefull. 764 */ 765 INP_INFO_RLOCK(&tcbinfo); 766 LIST_FOREACH(inpb, tcbinfo.listhead, inp_list) { 767 if (inpb->inp_vflag & INP_TIMEWAIT) 768 continue; 769 INP_LOCK(inpb); 770 if ((tcpb = intotcpcb(inpb)) != NULL) { 771 while ((te = LIST_FIRST(&tcpb->t_segq)) 772 != NULL) { 773 LIST_REMOVE(te, tqe_q); 774 m_freem(te->tqe_m); 775 uma_zfree(tcp_reass_zone, te); 776 tcpb->t_segqlen--; 777 tcp_reass_qsize--; 778 } 779 } 780 INP_UNLOCK(inpb); 781 } 782 INP_INFO_RUNLOCK(&tcbinfo); 783 } 784 } 785 786 /* 787 * Notify a tcp user of an asynchronous error; 788 * store error as soft error, but wake up user 789 * (for now, won't do anything until can select for soft error). 790 * 791 * Do not wake up user since there currently is no mechanism for 792 * reporting soft errors (yet - a kqueue filter may be added). 793 */ 794 static struct inpcb * 795 tcp_notify(inp, error) 796 struct inpcb *inp; 797 int error; 798 { 799 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; 800 801 /* 802 * Ignore some errors if we are hooked up. 803 * If connection hasn't completed, has retransmitted several times, 804 * and receives a second error, give up now. This is better 805 * than waiting a long time to establish a connection that 806 * can never complete. 807 */ 808 if (tp->t_state == TCPS_ESTABLISHED && 809 (error == EHOSTUNREACH || error == ENETUNREACH || 810 error == EHOSTDOWN)) { 811 return inp; 812 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 813 tp->t_softerror) { 814 tcp_drop(tp, error); 815 return (struct inpcb *)0; 816 } else { 817 tp->t_softerror = error; 818 return inp; 819 } 820 #if 0 821 wakeup( &so->so_timeo); 822 sorwakeup(so); 823 sowwakeup(so); 824 #endif 825 } 826 827 static int 828 tcp_pcblist(SYSCTL_HANDLER_ARGS) 829 { 830 int error, i, n, s; 831 struct inpcb *inp, **inp_list; 832 inp_gen_t gencnt; 833 struct xinpgen xig; 834 835 /* 836 * The process of preparing the TCB list is too time-consuming and 837 * resource-intensive to repeat twice on every request. 838 */ 839 if (req->oldptr == NULL) { 840 n = tcbinfo.ipi_count; 841 req->oldidx = 2 * (sizeof xig) 842 + (n + n/8) * sizeof(struct xtcpcb); 843 return 0; 844 } 845 846 if (req->newptr != NULL) 847 return EPERM; 848 849 /* 850 * OK, now we're committed to doing something. 851 */ 852 s = splnet(); 853 INP_INFO_RLOCK(&tcbinfo); 854 gencnt = tcbinfo.ipi_gencnt; 855 n = tcbinfo.ipi_count; 856 INP_INFO_RUNLOCK(&tcbinfo); 857 splx(s); 858 859 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig) 860 + n * sizeof(struct xtcpcb)); 861 if (error != 0) 862 return (error); 863 864 xig.xig_len = sizeof xig; 865 xig.xig_count = n; 866 xig.xig_gen = gencnt; 867 xig.xig_sogen = so_gencnt; 868 error = SYSCTL_OUT(req, &xig, sizeof xig); 869 if (error) 870 return error; 871 872 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 873 if (inp_list == NULL) 874 return ENOMEM; 875 876 s = splnet(); 877 INP_INFO_RLOCK(&tcbinfo); 878 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp != NULL && i < n; 879 inp = LIST_NEXT(inp, inp_list)) { 880 INP_LOCK(inp); 881 if (inp->inp_gencnt <= gencnt) { 882 /* 883 * XXX: This use of cr_cansee(), introduced with 884 * TCP state changes, is not quite right, but for 885 * now, better than nothing. 886 */ 887 if (inp->inp_vflag & INP_TIMEWAIT) 888 error = cr_cansee(req->td->td_ucred, 889 intotw(inp)->tw_cred); 890 else 891 error = cr_canseesocket(req->td->td_ucred, 892 inp->inp_socket); 893 if (error == 0) 894 inp_list[i++] = inp; 895 } 896 INP_UNLOCK(inp); 897 } 898 INP_INFO_RUNLOCK(&tcbinfo); 899 splx(s); 900 n = i; 901 902 error = 0; 903 for (i = 0; i < n; i++) { 904 inp = inp_list[i]; 905 if (inp->inp_gencnt <= gencnt) { 906 struct xtcpcb xt; 907 caddr_t inp_ppcb; 908 xt.xt_len = sizeof xt; 909 /* XXX should avoid extra copy */ 910 bcopy(inp, &xt.xt_inp, sizeof *inp); 911 inp_ppcb = inp->inp_ppcb; 912 if (inp_ppcb == NULL) 913 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp); 914 else if (inp->inp_vflag & INP_TIMEWAIT) { 915 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp); 916 xt.xt_tp.t_state = TCPS_TIME_WAIT; 917 } else 918 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 919 if (inp->inp_socket != NULL) 920 sotoxsocket(inp->inp_socket, &xt.xt_socket); 921 else { 922 bzero(&xt.xt_socket, sizeof xt.xt_socket); 923 xt.xt_socket.xso_protocol = IPPROTO_TCP; 924 } 925 xt.xt_inp.inp_gencnt = inp->inp_gencnt; 926 error = SYSCTL_OUT(req, &xt, sizeof xt); 927 } 928 } 929 if (!error) { 930 /* 931 * Give the user an updated idea of our state. 932 * If the generation differs from what we told 933 * her before, she knows that something happened 934 * while we were processing this request, and it 935 * might be necessary to retry. 936 */ 937 s = splnet(); 938 INP_INFO_RLOCK(&tcbinfo); 939 xig.xig_gen = tcbinfo.ipi_gencnt; 940 xig.xig_sogen = so_gencnt; 941 xig.xig_count = tcbinfo.ipi_count; 942 INP_INFO_RUNLOCK(&tcbinfo); 943 splx(s); 944 error = SYSCTL_OUT(req, &xig, sizeof xig); 945 } 946 free(inp_list, M_TEMP); 947 return error; 948 } 949 950 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 951 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 952 953 static int 954 tcp_getcred(SYSCTL_HANDLER_ARGS) 955 { 956 struct xucred xuc; 957 struct sockaddr_in addrs[2]; 958 struct inpcb *inp; 959 int error, s; 960 961 error = suser_cred(req->td->td_ucred, PRISON_ROOT); 962 if (error) 963 return (error); 964 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 965 if (error) 966 return (error); 967 s = splnet(); 968 INP_INFO_RLOCK(&tcbinfo); 969 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port, 970 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 971 if (inp == NULL) { 972 error = ENOENT; 973 goto outunlocked; 974 } 975 INP_LOCK(inp); 976 if (inp->inp_socket == NULL) { 977 error = ENOENT; 978 goto out; 979 } 980 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket); 981 if (error) 982 goto out; 983 cru2x(inp->inp_socket->so_cred, &xuc); 984 out: 985 INP_UNLOCK(inp); 986 outunlocked: 987 INP_INFO_RUNLOCK(&tcbinfo); 988 splx(s); 989 if (error == 0) 990 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 991 return (error); 992 } 993 994 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, 995 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 996 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection"); 997 998 #ifdef INET6 999 static int 1000 tcp6_getcred(SYSCTL_HANDLER_ARGS) 1001 { 1002 struct xucred xuc; 1003 struct sockaddr_in6 addrs[2]; 1004 struct inpcb *inp; 1005 int error, s, mapped = 0; 1006 1007 error = suser_cred(req->td->td_ucred, PRISON_ROOT); 1008 if (error) 1009 return (error); 1010 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 1011 if (error) 1012 return (error); 1013 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 1014 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 1015 mapped = 1; 1016 else 1017 return (EINVAL); 1018 } 1019 s = splnet(); 1020 INP_INFO_RLOCK(&tcbinfo); 1021 if (mapped == 1) 1022 inp = in_pcblookup_hash(&tcbinfo, 1023 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 1024 addrs[1].sin6_port, 1025 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 1026 addrs[0].sin6_port, 1027 0, NULL); 1028 else 1029 inp = in6_pcblookup_hash(&tcbinfo, &addrs[1].sin6_addr, 1030 addrs[1].sin6_port, 1031 &addrs[0].sin6_addr, addrs[0].sin6_port, 1032 0, NULL); 1033 if (inp == NULL) { 1034 error = ENOENT; 1035 goto outunlocked; 1036 } 1037 INP_LOCK(inp); 1038 if (inp->inp_socket == NULL) { 1039 error = ENOENT; 1040 goto out; 1041 } 1042 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket); 1043 if (error) 1044 goto out; 1045 cru2x(inp->inp_socket->so_cred, &xuc); 1046 out: 1047 INP_UNLOCK(inp); 1048 outunlocked: 1049 INP_INFO_RUNLOCK(&tcbinfo); 1050 splx(s); 1051 if (error == 0) 1052 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 1053 return (error); 1054 } 1055 1056 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, 1057 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 1058 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection"); 1059 #endif 1060 1061 1062 void 1063 tcp_ctlinput(cmd, sa, vip) 1064 int cmd; 1065 struct sockaddr *sa; 1066 void *vip; 1067 { 1068 struct ip *ip = vip; 1069 struct tcphdr *th; 1070 struct in_addr faddr; 1071 struct inpcb *inp; 1072 struct tcpcb *tp; 1073 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 1074 tcp_seq icmp_seq; 1075 int s; 1076 1077 faddr = ((struct sockaddr_in *)sa)->sin_addr; 1078 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 1079 return; 1080 1081 if (cmd == PRC_QUENCH) 1082 notify = tcp_quench; 1083 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || 1084 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip) 1085 notify = tcp_drop_syn_sent; 1086 else if (cmd == PRC_MSGSIZE) 1087 notify = tcp_mtudisc; 1088 /* 1089 * Redirects don't need to be handled up here. 1090 */ 1091 else if (PRC_IS_REDIRECT(cmd)) 1092 return; 1093 /* 1094 * Hostdead is ugly because it goes linearly through all PCBs. 1095 * XXX: We never get this from ICMP, otherwise it makes an 1096 * excellent DoS attack on machines with many connections. 1097 */ 1098 else if (cmd == PRC_HOSTDEAD) 1099 ip = NULL; 1100 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) 1101 return; 1102 if (ip != NULL) { 1103 s = splnet(); 1104 th = (struct tcphdr *)((caddr_t)ip 1105 + (ip->ip_hl << 2)); 1106 INP_INFO_WLOCK(&tcbinfo); 1107 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport, 1108 ip->ip_src, th->th_sport, 0, NULL); 1109 if (inp != NULL) { 1110 INP_LOCK(inp); 1111 if (inp->inp_socket != NULL) { 1112 icmp_seq = htonl(th->th_seq); 1113 tp = intotcpcb(inp); 1114 if (SEQ_GEQ(icmp_seq, tp->snd_una) && 1115 SEQ_LT(icmp_seq, tp->snd_max)) 1116 inp = (*notify)(inp, inetctlerrmap[cmd]); 1117 } 1118 if (inp != NULL) 1119 INP_UNLOCK(inp); 1120 } else { 1121 struct in_conninfo inc; 1122 1123 inc.inc_fport = th->th_dport; 1124 inc.inc_lport = th->th_sport; 1125 inc.inc_faddr = faddr; 1126 inc.inc_laddr = ip->ip_src; 1127 #ifdef INET6 1128 inc.inc_isipv6 = 0; 1129 #endif 1130 syncache_unreach(&inc, th); 1131 } 1132 INP_INFO_WUNLOCK(&tcbinfo); 1133 splx(s); 1134 } else 1135 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify); 1136 } 1137 1138 #ifdef INET6 1139 void 1140 tcp6_ctlinput(cmd, sa, d) 1141 int cmd; 1142 struct sockaddr *sa; 1143 void *d; 1144 { 1145 struct tcphdr th; 1146 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 1147 struct ip6_hdr *ip6; 1148 struct mbuf *m; 1149 struct ip6ctlparam *ip6cp = NULL; 1150 const struct sockaddr_in6 *sa6_src = NULL; 1151 int off; 1152 struct tcp_portonly { 1153 u_int16_t th_sport; 1154 u_int16_t th_dport; 1155 } *thp; 1156 1157 if (sa->sa_family != AF_INET6 || 1158 sa->sa_len != sizeof(struct sockaddr_in6)) 1159 return; 1160 1161 if (cmd == PRC_QUENCH) 1162 notify = tcp_quench; 1163 else if (cmd == PRC_MSGSIZE) 1164 notify = tcp_mtudisc; 1165 else if (!PRC_IS_REDIRECT(cmd) && 1166 ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) 1167 return; 1168 1169 /* if the parameter is from icmp6, decode it. */ 1170 if (d != NULL) { 1171 ip6cp = (struct ip6ctlparam *)d; 1172 m = ip6cp->ip6c_m; 1173 ip6 = ip6cp->ip6c_ip6; 1174 off = ip6cp->ip6c_off; 1175 sa6_src = ip6cp->ip6c_src; 1176 } else { 1177 m = NULL; 1178 ip6 = NULL; 1179 off = 0; /* fool gcc */ 1180 sa6_src = &sa6_any; 1181 } 1182 1183 if (ip6 != NULL) { 1184 struct in_conninfo inc; 1185 /* 1186 * XXX: We assume that when IPV6 is non NULL, 1187 * M and OFF are valid. 1188 */ 1189 1190 /* check if we can safely examine src and dst ports */ 1191 if (m->m_pkthdr.len < off + sizeof(*thp)) 1192 return; 1193 1194 bzero(&th, sizeof(th)); 1195 m_copydata(m, off, sizeof(*thp), (caddr_t)&th); 1196 1197 in6_pcbnotify(&tcb, sa, th.th_dport, 1198 (struct sockaddr *)ip6cp->ip6c_src, 1199 th.th_sport, cmd, NULL, notify); 1200 1201 inc.inc_fport = th.th_dport; 1202 inc.inc_lport = th.th_sport; 1203 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; 1204 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; 1205 inc.inc_isipv6 = 1; 1206 syncache_unreach(&inc, &th); 1207 } else 1208 in6_pcbnotify(&tcb, sa, 0, (const struct sockaddr *)sa6_src, 1209 0, cmd, NULL, notify); 1210 } 1211 #endif /* INET6 */ 1212 1213 1214 /* 1215 * Following is where TCP initial sequence number generation occurs. 1216 * 1217 * There are two places where we must use initial sequence numbers: 1218 * 1. In SYN-ACK packets. 1219 * 2. In SYN packets. 1220 * 1221 * All ISNs for SYN-ACK packets are generated by the syncache. See 1222 * tcp_syncache.c for details. 1223 * 1224 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 1225 * depends on this property. In addition, these ISNs should be 1226 * unguessable so as to prevent connection hijacking. To satisfy 1227 * the requirements of this situation, the algorithm outlined in 1228 * RFC 1948 is used to generate sequence numbers. 1229 * 1230 * Implementation details: 1231 * 1232 * Time is based off the system timer, and is corrected so that it 1233 * increases by one megabyte per second. This allows for proper 1234 * recycling on high speed LANs while still leaving over an hour 1235 * before rollover. 1236 * 1237 * net.inet.tcp.isn_reseed_interval controls the number of seconds 1238 * between seeding of isn_secret. This is normally set to zero, 1239 * as reseeding should not be necessary. 1240 * 1241 */ 1242 1243 #define ISN_BYTES_PER_SECOND 1048576 1244 1245 u_char isn_secret[32]; 1246 int isn_last_reseed; 1247 MD5_CTX isn_ctx; 1248 1249 tcp_seq 1250 tcp_new_isn(tp) 1251 struct tcpcb *tp; 1252 { 1253 u_int32_t md5_buffer[4]; 1254 tcp_seq new_isn; 1255 1256 /* Seed if this is the first use, reseed if requested. */ 1257 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) && 1258 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) 1259 < (u_int)ticks))) { 1260 read_random(&isn_secret, sizeof(isn_secret)); 1261 isn_last_reseed = ticks; 1262 } 1263 1264 /* Compute the md5 hash and return the ISN. */ 1265 MD5Init(&isn_ctx); 1266 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short)); 1267 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short)); 1268 #ifdef INET6 1269 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) { 1270 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, 1271 sizeof(struct in6_addr)); 1272 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, 1273 sizeof(struct in6_addr)); 1274 } else 1275 #endif 1276 { 1277 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, 1278 sizeof(struct in_addr)); 1279 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, 1280 sizeof(struct in_addr)); 1281 } 1282 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret)); 1283 MD5Final((u_char *) &md5_buffer, &isn_ctx); 1284 new_isn = (tcp_seq) md5_buffer[0]; 1285 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz); 1286 return new_isn; 1287 } 1288 1289 /* 1290 * When a source quench is received, close congestion window 1291 * to one segment. We will gradually open it again as we proceed. 1292 */ 1293 struct inpcb * 1294 tcp_quench(inp, errno) 1295 struct inpcb *inp; 1296 int errno; 1297 { 1298 struct tcpcb *tp = intotcpcb(inp); 1299 1300 if (tp != NULL) 1301 tp->snd_cwnd = tp->t_maxseg; 1302 return (inp); 1303 } 1304 1305 /* 1306 * When a specific ICMP unreachable message is received and the 1307 * connection state is SYN-SENT, drop the connection. This behavior 1308 * is controlled by the icmp_may_rst sysctl. 1309 */ 1310 struct inpcb * 1311 tcp_drop_syn_sent(inp, errno) 1312 struct inpcb *inp; 1313 int errno; 1314 { 1315 struct tcpcb *tp = intotcpcb(inp); 1316 1317 if (tp != NULL && tp->t_state == TCPS_SYN_SENT) { 1318 tcp_drop(tp, errno); 1319 return (struct inpcb *)0; 1320 } 1321 return inp; 1322 } 1323 1324 /* 1325 * When `need fragmentation' ICMP is received, update our idea of the MSS 1326 * based on the new value in the route. Also nudge TCP to send something, 1327 * since we know the packet we just sent was dropped. 1328 * This duplicates some code in the tcp_mss() function in tcp_input.c. 1329 */ 1330 struct inpcb * 1331 tcp_mtudisc(inp, errno) 1332 struct inpcb *inp; 1333 int errno; 1334 { 1335 struct tcpcb *tp = intotcpcb(inp); 1336 struct rmxp_tao tao; 1337 struct socket *so = inp->inp_socket; 1338 u_int maxmtu; 1339 u_int romtu; 1340 int mss; 1341 #ifdef INET6 1342 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 1343 #endif /* INET6 */ 1344 bzero(&tao, sizeof(tao)); 1345 1346 if (tp != NULL) { 1347 maxmtu = tcp_hc_getmtu(&inp->inp_inc); /* IPv4 and IPv6 */ 1348 romtu = 1349 #ifdef INET6 1350 isipv6 ? tcp_maxmtu6(&inp->inp_inc) : 1351 #endif /* INET6 */ 1352 tcp_maxmtu(&inp->inp_inc); 1353 if (!maxmtu) 1354 maxmtu = romtu; 1355 else 1356 maxmtu = min(maxmtu, romtu); 1357 if (!maxmtu) { 1358 tp->t_maxopd = tp->t_maxseg = 1359 #ifdef INET6 1360 isipv6 ? tcp_v6mssdflt : 1361 #endif /* INET6 */ 1362 tcp_mssdflt; 1363 return inp; 1364 } 1365 mss = maxmtu - 1366 #ifdef INET6 1367 (isipv6 ? 1368 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1369 #endif /* INET6 */ 1370 sizeof(struct tcpiphdr) 1371 #ifdef INET6 1372 ) 1373 #endif /* INET6 */ 1374 ; 1375 1376 if (tcp_do_rfc1644) { 1377 tcp_hc_gettao(&inp->inp_inc, &tao); 1378 if (tao.tao_mssopt) 1379 mss = min(mss, tao.tao_mssopt); 1380 } 1381 /* 1382 * XXX - The above conditional probably violates the TCP 1383 * spec. The problem is that, since we don't know the 1384 * other end's MSS, we are supposed to use a conservative 1385 * default. But, if we do that, then MTU discovery will 1386 * never actually take place, because the conservative 1387 * default is much less than the MTUs typically seen 1388 * on the Internet today. For the moment, we'll sweep 1389 * this under the carpet. 1390 * 1391 * The conservative default might not actually be a problem 1392 * if the only case this occurs is when sending an initial 1393 * SYN with options and data to a host we've never talked 1394 * to before. Then, they will reply with an MSS value which 1395 * will get recorded and the new parameters should get 1396 * recomputed. For Further Study. 1397 */ 1398 if (tp->t_maxopd <= mss) 1399 return inp; 1400 tp->t_maxopd = mss; 1401 1402 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 1403 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) 1404 mss -= TCPOLEN_TSTAMP_APPA; 1405 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && 1406 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC) 1407 mss -= TCPOLEN_CC_APPA; 1408 #if (MCLBYTES & (MCLBYTES - 1)) == 0 1409 if (mss > MCLBYTES) 1410 mss &= ~(MCLBYTES-1); 1411 #else 1412 if (mss > MCLBYTES) 1413 mss = mss / MCLBYTES * MCLBYTES; 1414 #endif 1415 if (so->so_snd.sb_hiwat < mss) 1416 mss = so->so_snd.sb_hiwat; 1417 1418 tp->t_maxseg = mss; 1419 1420 tcpstat.tcps_mturesent++; 1421 tp->t_rtttime = 0; 1422 tp->snd_nxt = tp->snd_una; 1423 tcp_output(tp); 1424 } 1425 return inp; 1426 } 1427 1428 /* 1429 * Look-up the routing entry to the peer of this inpcb. If no route 1430 * is found and it cannot be allocated, then return NULL. This routine 1431 * is called by TCP routines that access the rmx structure and by tcp_mss 1432 * to get the interface MTU. 1433 */ 1434 u_long 1435 tcp_maxmtu(inc) 1436 struct in_conninfo *inc; 1437 { 1438 struct route sro; 1439 struct sockaddr_in *dst; 1440 struct ifnet *ifp; 1441 u_long maxmtu = 0; 1442 1443 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer")); 1444 1445 bzero(&sro, sizeof(sro)); 1446 if (inc->inc_faddr.s_addr != INADDR_ANY) { 1447 dst = (struct sockaddr_in *)&sro.ro_dst; 1448 dst->sin_family = AF_INET; 1449 dst->sin_len = sizeof(*dst); 1450 dst->sin_addr = inc->inc_faddr; 1451 rtalloc_ign(&sro, RTF_CLONING); 1452 } 1453 if (sro.ro_rt != NULL) { 1454 ifp = sro.ro_rt->rt_ifp; 1455 if (sro.ro_rt->rt_rmx.rmx_mtu == 0) 1456 maxmtu = ifp->if_mtu; 1457 else 1458 maxmtu = min(sro.ro_rt->rt_rmx.rmx_mtu, ifp->if_mtu); 1459 RTFREE(sro.ro_rt); 1460 } 1461 return (maxmtu); 1462 } 1463 1464 #ifdef INET6 1465 u_long 1466 tcp_maxmtu6(inc) 1467 struct in_conninfo *inc; 1468 { 1469 struct route_in6 sro6; 1470 struct ifnet *ifp; 1471 u_long maxmtu = 0; 1472 1473 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer")); 1474 1475 bzero(&sro6, sizeof(sro6)); 1476 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 1477 sro6.ro_dst.sin6_family = AF_INET6; 1478 sro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6); 1479 sro6.ro_dst.sin6_addr = inc->inc6_faddr; 1480 rtalloc_ign((struct route *)&sro6, RTF_CLONING); 1481 } 1482 if (sro6.ro_rt != NULL) { 1483 ifp = sro6.ro_rt->rt_ifp; 1484 if (sro6.ro_rt->rt_rmx.rmx_mtu == 0) 1485 maxmtu = IN6_LINKMTU(sro6.ro_rt->rt_ifp); 1486 else 1487 maxmtu = min(sro6.ro_rt->rt_rmx.rmx_mtu, 1488 IN6_LINKMTU(sro6.ro_rt->rt_ifp)); 1489 RTFREE(sro6.ro_rt); 1490 } 1491 1492 return (maxmtu); 1493 } 1494 #endif /* INET6 */ 1495 1496 #ifdef IPSEC 1497 /* compute ESP/AH header size for TCP, including outer IP header. */ 1498 size_t 1499 ipsec_hdrsiz_tcp(tp) 1500 struct tcpcb *tp; 1501 { 1502 struct inpcb *inp; 1503 struct mbuf *m; 1504 size_t hdrsiz; 1505 struct ip *ip; 1506 #ifdef INET6 1507 struct ip6_hdr *ip6; 1508 #endif 1509 struct tcphdr *th; 1510 1511 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) 1512 return 0; 1513 MGETHDR(m, M_DONTWAIT, MT_DATA); 1514 if (!m) 1515 return 0; 1516 1517 #ifdef INET6 1518 if ((inp->inp_vflag & INP_IPV6) != 0) { 1519 ip6 = mtod(m, struct ip6_hdr *); 1520 th = (struct tcphdr *)(ip6 + 1); 1521 m->m_pkthdr.len = m->m_len = 1522 sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1523 tcpip_fillheaders(inp, ip6, th); 1524 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1525 } else 1526 #endif /* INET6 */ 1527 { 1528 ip = mtod(m, struct ip *); 1529 th = (struct tcphdr *)(ip + 1); 1530 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); 1531 tcpip_fillheaders(inp, ip, th); 1532 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1533 } 1534 1535 m_free(m); 1536 return hdrsiz; 1537 } 1538 #endif /*IPSEC*/ 1539 1540 /* 1541 * Move a TCP connection into TIME_WAIT state. 1542 * tcbinfo is unlocked. 1543 * inp is locked, and is unlocked before returning. 1544 */ 1545 void 1546 tcp_twstart(tp) 1547 struct tcpcb *tp; 1548 { 1549 struct tcptw *tw; 1550 struct inpcb *inp; 1551 int tw_time, acknow; 1552 struct socket *so; 1553 1554 tw = uma_zalloc(tcptw_zone, M_NOWAIT); 1555 if (tw == NULL) { 1556 tw = tcp_timer_2msl_tw(1); 1557 if (tw == NULL) { 1558 tcp_close(tp); 1559 return; 1560 } 1561 } 1562 inp = tp->t_inpcb; 1563 tw->tw_inpcb = inp; 1564 1565 /* 1566 * Recover last window size sent. 1567 */ 1568 tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale; 1569 1570 /* 1571 * Set t_recent if timestamps are used on the connection. 1572 */ 1573 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) == 1574 (TF_REQ_TSTMP|TF_RCVD_TSTMP)) 1575 tw->t_recent = tp->ts_recent; 1576 else 1577 tw->t_recent = 0; 1578 1579 tw->snd_nxt = tp->snd_nxt; 1580 tw->rcv_nxt = tp->rcv_nxt; 1581 tw->iss = tp->iss; 1582 tw->irs = tp->irs; 1583 tw->cc_recv = tp->cc_recv; 1584 tw->cc_send = tp->cc_send; 1585 tw->t_starttime = tp->t_starttime; 1586 tw->tw_time = 0; 1587 1588 /* XXX 1589 * If this code will 1590 * be used for fin-wait-2 state also, then we may need 1591 * a ts_recent from the last segment. 1592 */ 1593 /* Shorten TIME_WAIT [RFC-1644, p.28] */ 1594 if (tp->cc_recv != 0 && (ticks - tp->t_starttime) < tcp_msl) { 1595 tw_time = tp->t_rxtcur * TCPTV_TWTRUNC; 1596 /* For T/TCP client, force ACK now. */ 1597 acknow = 1; 1598 } else { 1599 tw_time = 2 * tcp_msl; 1600 acknow = tp->t_flags & TF_ACKNOW; 1601 } 1602 tcp_discardcb(tp); 1603 so = inp->inp_socket; 1604 so->so_pcb = NULL; 1605 tw->tw_cred = crhold(so->so_cred); 1606 tw->tw_so_options = so->so_options; 1607 if (acknow) 1608 tcp_twrespond(tw, TH_ACK); 1609 sotryfree(so); 1610 inp->inp_socket = NULL; 1611 inp->inp_ppcb = (caddr_t)tw; 1612 inp->inp_vflag |= INP_TIMEWAIT; 1613 tcp_timer_2msl_reset(tw, tw_time); 1614 INP_UNLOCK(inp); 1615 } 1616 1617 /* 1618 * The appromixate rate of ISN increase of Microsoft TCP stacks; 1619 * the actual rate is slightly higher due to the addition of 1620 * random positive increments. 1621 * 1622 * Most other new OSes use semi-randomized ISN values, so we 1623 * do not need to worry about them. 1624 */ 1625 #define MS_ISN_BYTES_PER_SECOND 250000 1626 1627 /* 1628 * Determine if the ISN we will generate has advanced beyond the last 1629 * sequence number used by the previous connection. If so, indicate 1630 * that it is safe to recycle this tw socket by returning 1. 1631 */ 1632 int 1633 tcp_twrecycleable(struct tcptw *tw) 1634 { 1635 tcp_seq new_iss = tw->iss; 1636 tcp_seq new_irs = tw->irs; 1637 1638 new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz); 1639 new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz); 1640 1641 if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt)) 1642 return 1; 1643 else 1644 return 0; 1645 } 1646 1647 struct tcptw * 1648 tcp_twclose(struct tcptw *tw, int reuse) 1649 { 1650 struct inpcb *inp; 1651 1652 inp = tw->tw_inpcb; 1653 tw->tw_inpcb = NULL; 1654 tcp_timer_2msl_stop(tw); 1655 inp->inp_ppcb = NULL; 1656 #ifdef INET6 1657 if (inp->inp_vflag & INP_IPV6PROTO) 1658 in6_pcbdetach(inp); 1659 else 1660 #endif 1661 in_pcbdetach(inp); 1662 tcpstat.tcps_closed++; 1663 crfree(tw->tw_cred); 1664 tw->tw_cred = NULL; 1665 if (reuse) 1666 return (tw); 1667 uma_zfree(tcptw_zone, tw); 1668 return (NULL); 1669 } 1670 1671 int 1672 tcp_twrespond(struct tcptw *tw, int flags) 1673 { 1674 struct inpcb *inp = tw->tw_inpcb; 1675 struct tcphdr *th; 1676 struct mbuf *m; 1677 struct ip *ip = NULL; 1678 u_int8_t *optp; 1679 u_int hdrlen, optlen; 1680 int error; 1681 #ifdef INET6 1682 struct ip6_hdr *ip6 = NULL; 1683 int isipv6 = inp->inp_inc.inc_isipv6; 1684 #endif 1685 1686 m = m_gethdr(M_DONTWAIT, MT_HEADER); 1687 if (m == NULL) 1688 return (ENOBUFS); 1689 m->m_data += max_linkhdr; 1690 1691 #ifdef MAC 1692 mac_create_mbuf_from_inpcb(inp, m); 1693 #endif 1694 1695 #ifdef INET6 1696 if (isipv6) { 1697 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1698 ip6 = mtod(m, struct ip6_hdr *); 1699 th = (struct tcphdr *)(ip6 + 1); 1700 tcpip_fillheaders(inp, ip6, th); 1701 } else 1702 #endif 1703 { 1704 hdrlen = sizeof(struct tcpiphdr); 1705 ip = mtod(m, struct ip *); 1706 th = (struct tcphdr *)(ip + 1); 1707 tcpip_fillheaders(inp, ip, th); 1708 } 1709 optp = (u_int8_t *)(th + 1); 1710 1711 /* 1712 * Send a timestamp and echo-reply if both our side and our peer 1713 * have sent timestamps in our SYN's and this is not a RST. 1714 */ 1715 if (tw->t_recent && flags == TH_ACK) { 1716 u_int32_t *lp = (u_int32_t *)optp; 1717 1718 /* Form timestamp option as shown in appendix A of RFC 1323. */ 1719 *lp++ = htonl(TCPOPT_TSTAMP_HDR); 1720 *lp++ = htonl(ticks); 1721 *lp = htonl(tw->t_recent); 1722 optp += TCPOLEN_TSTAMP_APPA; 1723 } 1724 1725 /* 1726 * Send `CC-family' options if needed, and it's not a RST. 1727 */ 1728 if (tw->cc_recv != 0 && flags == TH_ACK) { 1729 u_int32_t *lp = (u_int32_t *)optp; 1730 1731 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC)); 1732 *lp = htonl(tw->cc_send); 1733 optp += TCPOLEN_CC_APPA; 1734 } 1735 optlen = optp - (u_int8_t *)(th + 1); 1736 1737 m->m_len = hdrlen + optlen; 1738 m->m_pkthdr.len = m->m_len; 1739 1740 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small")); 1741 1742 th->th_seq = htonl(tw->snd_nxt); 1743 th->th_ack = htonl(tw->rcv_nxt); 1744 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1745 th->th_flags = flags; 1746 th->th_win = htons(tw->last_win); 1747 1748 #ifdef INET6 1749 if (isipv6) { 1750 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr), 1751 sizeof(struct tcphdr) + optlen); 1752 ip6->ip6_hlim = in6_selecthlim(inp, NULL); 1753 error = ip6_output(m, inp->in6p_outputopts, NULL, 1754 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp); 1755 } else 1756 #endif 1757 { 1758 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1759 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP)); 1760 m->m_pkthdr.csum_flags = CSUM_TCP; 1761 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1762 ip->ip_len = m->m_pkthdr.len; 1763 if (path_mtu_discovery) 1764 ip->ip_off |= IP_DF; 1765 error = ip_output(m, inp->inp_options, NULL, 1766 (tw->tw_so_options & SO_DONTROUTE), NULL, inp); 1767 } 1768 if (flags & TH_ACK) 1769 tcpstat.tcps_sndacks++; 1770 else 1771 tcpstat.tcps_sndctrl++; 1772 tcpstat.tcps_sndtotal++; 1773 return (error); 1774 } 1775 1776 /* 1777 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING 1778 * 1779 * This code attempts to calculate the bandwidth-delay product as a 1780 * means of determining the optimal window size to maximize bandwidth, 1781 * minimize RTT, and avoid the over-allocation of buffers on interfaces and 1782 * routers. This code also does a fairly good job keeping RTTs in check 1783 * across slow links like modems. We implement an algorithm which is very 1784 * similar (but not meant to be) TCP/Vegas. The code operates on the 1785 * transmitter side of a TCP connection and so only effects the transmit 1786 * side of the connection. 1787 * 1788 * BACKGROUND: TCP makes no provision for the management of buffer space 1789 * at the end points or at the intermediate routers and switches. A TCP 1790 * stream, whether using NewReno or not, will eventually buffer as 1791 * many packets as it is able and the only reason this typically works is 1792 * due to the fairly small default buffers made available for a connection 1793 * (typicaly 16K or 32K). As machines use larger windows and/or window 1794 * scaling it is now fairly easy for even a single TCP connection to blow-out 1795 * all available buffer space not only on the local interface, but on 1796 * intermediate routers and switches as well. NewReno makes a misguided 1797 * attempt to 'solve' this problem by waiting for an actual failure to occur, 1798 * then backing off, then steadily increasing the window again until another 1799 * failure occurs, ad-infinitum. This results in terrible oscillation that 1800 * is only made worse as network loads increase and the idea of intentionally 1801 * blowing out network buffers is, frankly, a terrible way to manage network 1802 * resources. 1803 * 1804 * It is far better to limit the transmit window prior to the failure 1805 * condition being achieved. There are two general ways to do this: First 1806 * you can 'scan' through different transmit window sizes and locate the 1807 * point where the RTT stops increasing, indicating that you have filled the 1808 * pipe, then scan backwards until you note that RTT stops decreasing, then 1809 * repeat ad-infinitum. This method works in principle but has severe 1810 * implementation issues due to RTT variances, timer granularity, and 1811 * instability in the algorithm which can lead to many false positives and 1812 * create oscillations as well as interact badly with other TCP streams 1813 * implementing the same algorithm. 1814 * 1815 * The second method is to limit the window to the bandwidth delay product 1816 * of the link. This is the method we implement. RTT variances and our 1817 * own manipulation of the congestion window, bwnd, can potentially 1818 * destabilize the algorithm. For this reason we have to stabilize the 1819 * elements used to calculate the window. We do this by using the minimum 1820 * observed RTT, the long term average of the observed bandwidth, and 1821 * by adding two segments worth of slop. It isn't perfect but it is able 1822 * to react to changing conditions and gives us a very stable basis on 1823 * which to extend the algorithm. 1824 */ 1825 void 1826 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) 1827 { 1828 u_long bw; 1829 u_long bwnd; 1830 int save_ticks; 1831 1832 /* 1833 * If inflight_enable is disabled in the middle of a tcp connection, 1834 * make sure snd_bwnd is effectively disabled. 1835 */ 1836 if (tcp_inflight_enable == 0) { 1837 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1838 tp->snd_bandwidth = 0; 1839 return; 1840 } 1841 1842 /* 1843 * Figure out the bandwidth. Due to the tick granularity this 1844 * is a very rough number and it MUST be averaged over a fairly 1845 * long period of time. XXX we need to take into account a link 1846 * that is not using all available bandwidth, but for now our 1847 * slop will ramp us up if this case occurs and the bandwidth later 1848 * increases. 1849 * 1850 * Note: if ticks rollover 'bw' may wind up negative. We must 1851 * effectively reset t_bw_rtttime for this case. 1852 */ 1853 save_ticks = ticks; 1854 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1) 1855 return; 1856 1857 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / 1858 (save_ticks - tp->t_bw_rtttime); 1859 tp->t_bw_rtttime = save_ticks; 1860 tp->t_bw_rtseq = ack_seq; 1861 if (tp->t_bw_rtttime == 0 || (int)bw < 0) 1862 return; 1863 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4; 1864 1865 tp->snd_bandwidth = bw; 1866 1867 /* 1868 * Calculate the semi-static bandwidth delay product, plus two maximal 1869 * segments. The additional slop puts us squarely in the sweet 1870 * spot and also handles the bandwidth run-up case and stabilization. 1871 * Without the slop we could be locking ourselves into a lower 1872 * bandwidth. 1873 * 1874 * Situations Handled: 1875 * (1) Prevents over-queueing of packets on LANs, especially on 1876 * high speed LANs, allowing larger TCP buffers to be 1877 * specified, and also does a good job preventing 1878 * over-queueing of packets over choke points like modems 1879 * (at least for the transmit side). 1880 * 1881 * (2) Is able to handle changing network loads (bandwidth 1882 * drops so bwnd drops, bandwidth increases so bwnd 1883 * increases). 1884 * 1885 * (3) Theoretically should stabilize in the face of multiple 1886 * connections implementing the same algorithm (this may need 1887 * a little work). 1888 * 1889 * (4) Stability value (defaults to 20 = 2 maximal packets) can 1890 * be adjusted with a sysctl but typically only needs to be 1891 * on very slow connections. A value no smaller then 5 1892 * should be used, but only reduce this default if you have 1893 * no other choice. 1894 */ 1895 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2) 1896 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + tcp_inflight_stab * tp->t_maxseg / 10; 1897 #undef USERTT 1898 1899 if (tcp_inflight_debug > 0) { 1900 static int ltime; 1901 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) { 1902 ltime = ticks; 1903 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n", 1904 tp, 1905 bw, 1906 tp->t_rttbest, 1907 tp->t_srtt, 1908 bwnd 1909 ); 1910 } 1911 } 1912 if ((long)bwnd < tcp_inflight_min) 1913 bwnd = tcp_inflight_min; 1914 if (bwnd > tcp_inflight_max) 1915 bwnd = tcp_inflight_max; 1916 if ((long)bwnd < tp->t_maxseg * 2) 1917 bwnd = tp->t_maxseg * 2; 1918 tp->snd_bwnd = bwnd; 1919 } 1920 1921 #ifdef TCP_SIGNATURE 1922 /* 1923 * Callback function invoked by m_apply() to digest TCP segment data 1924 * contained within an mbuf chain. 1925 */ 1926 static int 1927 tcp_signature_apply(void *fstate, void *data, u_int len) 1928 { 1929 1930 MD5Update(fstate, (u_char *)data, len); 1931 return (0); 1932 } 1933 1934 /* 1935 * Compute TCP-MD5 hash of a TCPv4 segment. (RFC2385) 1936 * 1937 * Parameters: 1938 * m pointer to head of mbuf chain 1939 * off0 offset to TCP header within the mbuf chain 1940 * len length of TCP segment data, excluding options 1941 * optlen length of TCP segment options 1942 * buf pointer to storage for computed MD5 digest 1943 * direction direction of flow (IPSEC_DIR_INBOUND or OUTBOUND) 1944 * 1945 * We do this over ip, tcphdr, segment data, and the key in the SADB. 1946 * When called from tcp_input(), we can be sure that th_sum has been 1947 * zeroed out and verified already. 1948 * 1949 * This function is for IPv4 use only. Calling this function with an 1950 * IPv6 packet in the mbuf chain will yield undefined results. 1951 * 1952 * Return 0 if successful, otherwise return -1. 1953 * 1954 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a 1955 * search with the destination IP address, and a 'magic SPI' to be 1956 * determined by the application. This is hardcoded elsewhere to 1179 1957 * right now. Another branch of this code exists which uses the SPD to 1958 * specify per-application flows but it is unstable. 1959 */ 1960 int 1961 tcp_signature_compute(struct mbuf *m, int off0, int len, int optlen, 1962 u_char *buf, u_int direction) 1963 { 1964 union sockaddr_union dst; 1965 struct ippseudo ippseudo; 1966 MD5_CTX ctx; 1967 int doff; 1968 struct ip *ip; 1969 struct ipovly *ipovly; 1970 struct secasvar *sav; 1971 struct tcphdr *th; 1972 u_short savecsum; 1973 1974 KASSERT(m != NULL, ("NULL mbuf chain")); 1975 KASSERT(buf != NULL, ("NULL signature pointer")); 1976 1977 /* Extract the destination from the IP header in the mbuf. */ 1978 ip = mtod(m, struct ip *); 1979 bzero(&dst, sizeof(union sockaddr_union)); 1980 dst.sa.sa_len = sizeof(struct sockaddr_in); 1981 dst.sa.sa_family = AF_INET; 1982 dst.sin.sin_addr = (direction == IPSEC_DIR_INBOUND) ? 1983 ip->ip_src : ip->ip_dst; 1984 1985 /* Look up an SADB entry which matches the address of the peer. */ 1986 sav = KEY_ALLOCSA(&dst, IPPROTO_TCP, htonl(TCP_SIG_SPI)); 1987 if (sav == NULL) { 1988 printf("%s: SADB lookup failed for %s\n", __func__, 1989 inet_ntoa(dst.sin.sin_addr)); 1990 return (EINVAL); 1991 } 1992 1993 MD5Init(&ctx); 1994 ipovly = (struct ipovly *)ip; 1995 th = (struct tcphdr *)((u_char *)ip + off0); 1996 doff = off0 + sizeof(struct tcphdr) + optlen; 1997 1998 /* 1999 * Step 1: Update MD5 hash with IP pseudo-header. 2000 * 2001 * XXX The ippseudo header MUST be digested in network byte order, 2002 * or else we'll fail the regression test. Assume all fields we've 2003 * been doing arithmetic on have been in host byte order. 2004 * XXX One cannot depend on ipovly->ih_len here. When called from 2005 * tcp_output(), the underlying ip_len member has not yet been set. 2006 */ 2007 ippseudo.ippseudo_src = ipovly->ih_src; 2008 ippseudo.ippseudo_dst = ipovly->ih_dst; 2009 ippseudo.ippseudo_pad = 0; 2010 ippseudo.ippseudo_p = IPPROTO_TCP; 2011 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen); 2012 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo)); 2013 2014 /* 2015 * Step 2: Update MD5 hash with TCP header, excluding options. 2016 * The TCP checksum must be set to zero. 2017 */ 2018 savecsum = th->th_sum; 2019 th->th_sum = 0; 2020 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr)); 2021 th->th_sum = savecsum; 2022 2023 /* 2024 * Step 3: Update MD5 hash with TCP segment data. 2025 * Use m_apply() to avoid an early m_pullup(). 2026 */ 2027 if (len > 0) 2028 m_apply(m, doff, len, tcp_signature_apply, &ctx); 2029 2030 /* 2031 * Step 4: Update MD5 hash with shared secret. 2032 */ 2033 MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth)); 2034 MD5Final(buf, &ctx); 2035 2036 key_sa_recordxfer(sav, m); 2037 KEY_FREESAV(&sav); 2038 return (0); 2039 } 2040 #endif /* TCP_SIGNATURE */ 2041