1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 30 * $FreeBSD$ 31 */ 32 33 #include "opt_compat.h" 34 #include "opt_inet.h" 35 #include "opt_inet6.h" 36 #include "opt_ipsec.h" 37 #include "opt_mac.h" 38 #include "opt_tcpdebug.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/callout.h> 43 #include <sys/kernel.h> 44 #include <sys/sysctl.h> 45 #include <sys/mac.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #ifdef INET6 49 #include <sys/domain.h> 50 #endif 51 #include <sys/proc.h> 52 #include <sys/socket.h> 53 #include <sys/socketvar.h> 54 #include <sys/protosw.h> 55 #include <sys/random.h> 56 57 #include <vm/uma.h> 58 59 #include <net/route.h> 60 #include <net/if.h> 61 62 #include <netinet/in.h> 63 #include <netinet/in_systm.h> 64 #include <netinet/ip.h> 65 #ifdef INET6 66 #include <netinet/ip6.h> 67 #endif 68 #include <netinet/in_pcb.h> 69 #ifdef INET6 70 #include <netinet6/in6_pcb.h> 71 #endif 72 #include <netinet/in_var.h> 73 #include <netinet/ip_var.h> 74 #ifdef INET6 75 #include <netinet6/ip6_var.h> 76 #include <netinet6/nd6.h> 77 #endif 78 #include <netinet/tcp.h> 79 #include <netinet/tcp_fsm.h> 80 #include <netinet/tcp_seq.h> 81 #include <netinet/tcp_timer.h> 82 #include <netinet/tcp_var.h> 83 #ifdef INET6 84 #include <netinet6/tcp6_var.h> 85 #endif 86 #include <netinet/tcpip.h> 87 #ifdef TCPDEBUG 88 #include <netinet/tcp_debug.h> 89 #endif 90 #include <netinet6/ip6protosw.h> 91 92 #ifdef IPSEC 93 #include <netinet6/ipsec.h> 94 #ifdef INET6 95 #include <netinet6/ipsec6.h> 96 #endif 97 #endif /*IPSEC*/ 98 99 #ifdef FAST_IPSEC 100 #include <netipsec/ipsec.h> 101 #include <netipsec/xform.h> 102 #ifdef INET6 103 #include <netipsec/ipsec6.h> 104 #endif 105 #include <netipsec/key.h> 106 #define IPSEC 107 #endif /*FAST_IPSEC*/ 108 109 #include <machine/in_cksum.h> 110 #include <sys/md5.h> 111 112 int tcp_mssdflt = TCP_MSS; 113 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 114 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size"); 115 116 #ifdef INET6 117 int tcp_v6mssdflt = TCP6_MSS; 118 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, 119 CTLFLAG_RW, &tcp_v6mssdflt , 0, 120 "Default TCP Maximum Segment Size for IPv6"); 121 #endif 122 123 /* 124 * Minimum MSS we accept and use. This prevents DoS attacks where 125 * we are forced to a ridiculous low MSS like 20 and send hundreds 126 * of packets instead of one. The effect scales with the available 127 * bandwidth and quickly saturates the CPU and network interface 128 * with packet generation and sending. Set to zero to disable MINMSS 129 * checking. This setting prevents us from sending too small packets. 130 */ 131 int tcp_minmss = TCP_MINMSS; 132 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW, 133 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size"); 134 /* 135 * Number of TCP segments per second we accept from remote host 136 * before we start to calculate average segment size. If average 137 * segment size drops below the minimum TCP MSS we assume a DoS 138 * attack and reset+drop the connection. Care has to be taken not to 139 * set this value too small to not kill interactive type connections 140 * (telnet, SSH) which send many small packets. 141 */ 142 int tcp_minmssoverload = TCP_MINMSSOVERLOAD; 143 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW, 144 &tcp_minmssoverload , 0, "Number of TCP Segments per Second allowed to" 145 "be under the MINMSS Size"); 146 147 #if 0 148 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 149 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 150 &tcp_rttdflt , 0, "Default maximum TCP Round Trip Time"); 151 #endif 152 153 int tcp_do_rfc1323 = 1; 154 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 155 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions"); 156 157 int tcp_do_rfc1644 = 0; 158 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW, 159 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions"); 160 161 static int tcp_tcbhashsize = 0; 162 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN, 163 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); 164 165 static int do_tcpdrain = 1; 166 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 167 "Enable tcp_drain routine for extra help when low on mbufs"); 168 169 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD, 170 &tcbinfo.ipi_count, 0, "Number of active PCBs"); 171 172 static int icmp_may_rst = 1; 173 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, 174 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 175 176 static int tcp_isn_reseed_interval = 0; 177 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, 178 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); 179 180 /* 181 * TCP bandwidth limiting sysctls. Note that the default lower bound of 182 * 1024 exists only for debugging. A good production default would be 183 * something like 6100. 184 */ 185 static int tcp_inflight_enable = 1; 186 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, 187 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); 188 189 static int tcp_inflight_debug = 0; 190 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, 191 &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); 192 193 static int tcp_inflight_min = 6144; 194 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, 195 &tcp_inflight_min, 0, "Lower-bound for TCP inflight window"); 196 197 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; 198 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, 199 &tcp_inflight_max, 0, "Upper-bound for TCP inflight window"); 200 static int tcp_inflight_stab = 20; 201 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, 202 &tcp_inflight_stab, 0, "Inflight Algorithm Stabilization 20 = 2 packets"); 203 204 static struct inpcb *tcp_notify(struct inpcb *, int); 205 static void tcp_discardcb(struct tcpcb *); 206 static void tcp_isn_tick(void *); 207 208 /* 209 * Target size of TCP PCB hash tables. Must be a power of two. 210 * 211 * Note that this can be overridden by the kernel environment 212 * variable net.inet.tcp.tcbhashsize 213 */ 214 #ifndef TCBHASHSIZE 215 #define TCBHASHSIZE 512 216 #endif 217 218 /* 219 * XXX 220 * Callouts should be moved into struct tcp directly. They are currently 221 * separate because the tcpcb structure is exported to userland for sysctl 222 * parsing purposes, which do not know about callouts. 223 */ 224 struct tcpcb_mem { 225 struct tcpcb tcb; 226 struct callout tcpcb_mem_rexmt, tcpcb_mem_persist, tcpcb_mem_keep; 227 struct callout tcpcb_mem_2msl, tcpcb_mem_delack; 228 }; 229 230 static uma_zone_t tcpcb_zone; 231 static uma_zone_t tcptw_zone; 232 struct callout isn_callout; 233 234 /* 235 * Tcp initialization 236 */ 237 void 238 tcp_init() 239 { 240 int hashsize = TCBHASHSIZE; 241 242 tcp_ccgen = 1; 243 244 tcp_delacktime = TCPTV_DELACK; 245 tcp_keepinit = TCPTV_KEEP_INIT; 246 tcp_keepidle = TCPTV_KEEP_IDLE; 247 tcp_keepintvl = TCPTV_KEEPINTVL; 248 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 249 tcp_msl = TCPTV_MSL; 250 tcp_rexmit_min = TCPTV_MIN; 251 tcp_rexmit_slop = TCPTV_CPU_VAR; 252 253 INP_INFO_LOCK_INIT(&tcbinfo, "tcp"); 254 LIST_INIT(&tcb); 255 tcbinfo.listhead = &tcb; 256 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); 257 if (!powerof2(hashsize)) { 258 printf("WARNING: TCB hash size not a power of 2\n"); 259 hashsize = 512; /* safe default */ 260 } 261 tcp_tcbhashsize = hashsize; 262 tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask); 263 tcbinfo.porthashbase = hashinit(hashsize, M_PCB, 264 &tcbinfo.porthashmask); 265 tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb), 266 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 267 uma_zone_set_max(tcbinfo.ipi_zone, maxsockets); 268 #ifdef INET6 269 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 270 #else /* INET6 */ 271 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 272 #endif /* INET6 */ 273 if (max_protohdr < TCP_MINPROTOHDR) 274 max_protohdr = TCP_MINPROTOHDR; 275 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 276 panic("tcp_init"); 277 #undef TCP_MINPROTOHDR 278 /* 279 * These have to be type stable for the benefit of the timers. 280 */ 281 tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem), 282 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 283 uma_zone_set_max(tcpcb_zone, maxsockets); 284 tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw), 285 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 286 uma_zone_set_max(tcptw_zone, maxsockets / 5); 287 tcp_timer_init(); 288 syncache_init(); 289 tcp_hc_init(); 290 tcp_reass_init(); 291 callout_init(&isn_callout, CALLOUT_MPSAFE); 292 tcp_isn_tick(NULL); 293 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL, 294 SHUTDOWN_PRI_DEFAULT); 295 } 296 297 void 298 tcp_fini(xtp) 299 void *xtp; 300 { 301 callout_stop(&isn_callout); 302 303 } 304 305 /* 306 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 307 * tcp_template used to store this data in mbufs, but we now recopy it out 308 * of the tcpcb each time to conserve mbufs. 309 */ 310 void 311 tcpip_fillheaders(inp, ip_ptr, tcp_ptr) 312 struct inpcb *inp; 313 void *ip_ptr; 314 void *tcp_ptr; 315 { 316 struct tcphdr *th = (struct tcphdr *)tcp_ptr; 317 318 #ifdef INET6 319 if ((inp->inp_vflag & INP_IPV6) != 0) { 320 struct ip6_hdr *ip6; 321 322 ip6 = (struct ip6_hdr *)ip_ptr; 323 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 324 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); 325 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 326 (IPV6_VERSION & IPV6_VERSION_MASK); 327 ip6->ip6_nxt = IPPROTO_TCP; 328 ip6->ip6_plen = sizeof(struct tcphdr); 329 ip6->ip6_src = inp->in6p_laddr; 330 ip6->ip6_dst = inp->in6p_faddr; 331 } else 332 #endif 333 { 334 struct ip *ip; 335 336 ip = (struct ip *)ip_ptr; 337 ip->ip_v = IPVERSION; 338 ip->ip_hl = 5; 339 ip->ip_tos = inp->inp_ip_tos; 340 ip->ip_len = 0; 341 ip->ip_id = 0; 342 ip->ip_off = 0; 343 ip->ip_ttl = inp->inp_ip_ttl; 344 ip->ip_sum = 0; 345 ip->ip_p = IPPROTO_TCP; 346 ip->ip_src = inp->inp_laddr; 347 ip->ip_dst = inp->inp_faddr; 348 } 349 th->th_sport = inp->inp_lport; 350 th->th_dport = inp->inp_fport; 351 th->th_seq = 0; 352 th->th_ack = 0; 353 th->th_x2 = 0; 354 th->th_off = 5; 355 th->th_flags = 0; 356 th->th_win = 0; 357 th->th_urp = 0; 358 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */ 359 } 360 361 /* 362 * Create template to be used to send tcp packets on a connection. 363 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 364 * use for this function is in keepalives, which use tcp_respond. 365 */ 366 struct tcptemp * 367 tcpip_maketemplate(inp) 368 struct inpcb *inp; 369 { 370 struct mbuf *m; 371 struct tcptemp *n; 372 373 m = m_get(M_DONTWAIT, MT_HEADER); 374 if (m == NULL) 375 return (0); 376 m->m_len = sizeof(struct tcptemp); 377 n = mtod(m, struct tcptemp *); 378 379 tcpip_fillheaders(inp, (void *)&n->tt_ipgen, (void *)&n->tt_t); 380 return (n); 381 } 382 383 /* 384 * Send a single message to the TCP at address specified by 385 * the given TCP/IP header. If m == NULL, then we make a copy 386 * of the tcpiphdr at ti and send directly to the addressed host. 387 * This is used to force keep alive messages out using the TCP 388 * template for a connection. If flags are given then we send 389 * a message back to the TCP which originated the * segment ti, 390 * and discard the mbuf containing it and any other attached mbufs. 391 * 392 * In any case the ack and sequence number of the transmitted 393 * segment are as specified by the parameters. 394 * 395 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 396 */ 397 void 398 tcp_respond(tp, ipgen, th, m, ack, seq, flags) 399 struct tcpcb *tp; 400 void *ipgen; 401 register struct tcphdr *th; 402 register struct mbuf *m; 403 tcp_seq ack, seq; 404 int flags; 405 { 406 register int tlen; 407 int win = 0; 408 struct ip *ip; 409 struct tcphdr *nth; 410 #ifdef INET6 411 struct ip6_hdr *ip6; 412 int isipv6; 413 #endif /* INET6 */ 414 int ipflags = 0; 415 struct inpcb *inp; 416 417 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL")); 418 419 #ifdef INET6 420 isipv6 = ((struct ip *)ipgen)->ip_v == 6; 421 ip6 = ipgen; 422 #endif /* INET6 */ 423 ip = ipgen; 424 425 if (tp != NULL) { 426 inp = tp->t_inpcb; 427 KASSERT(inp != NULL, ("tcp control block w/o inpcb")); 428 INP_INFO_WLOCK_ASSERT(&tcbinfo); 429 INP_LOCK_ASSERT(inp); 430 } else 431 inp = NULL; 432 433 if (tp != NULL) { 434 if (!(flags & TH_RST)) { 435 win = sbspace(&inp->inp_socket->so_rcv); 436 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 437 win = (long)TCP_MAXWIN << tp->rcv_scale; 438 } 439 } 440 if (m == NULL) { 441 m = m_gethdr(M_DONTWAIT, MT_HEADER); 442 if (m == NULL) 443 return; 444 tlen = 0; 445 m->m_data += max_linkhdr; 446 #ifdef INET6 447 if (isipv6) { 448 bcopy((caddr_t)ip6, mtod(m, caddr_t), 449 sizeof(struct ip6_hdr)); 450 ip6 = mtod(m, struct ip6_hdr *); 451 nth = (struct tcphdr *)(ip6 + 1); 452 } else 453 #endif /* INET6 */ 454 { 455 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); 456 ip = mtod(m, struct ip *); 457 nth = (struct tcphdr *)(ip + 1); 458 } 459 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 460 flags = TH_ACK; 461 } else { 462 m_freem(m->m_next); 463 m->m_next = NULL; 464 m->m_data = (caddr_t)ipgen; 465 /* m_len is set later */ 466 tlen = 0; 467 #define xchg(a,b,type) { type t; t=a; a=b; b=t; } 468 #ifdef INET6 469 if (isipv6) { 470 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 471 nth = (struct tcphdr *)(ip6 + 1); 472 } else 473 #endif /* INET6 */ 474 { 475 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); 476 nth = (struct tcphdr *)(ip + 1); 477 } 478 if (th != nth) { 479 /* 480 * this is usually a case when an extension header 481 * exists between the IPv6 header and the 482 * TCP header. 483 */ 484 nth->th_sport = th->th_sport; 485 nth->th_dport = th->th_dport; 486 } 487 xchg(nth->th_dport, nth->th_sport, n_short); 488 #undef xchg 489 } 490 #ifdef INET6 491 if (isipv6) { 492 ip6->ip6_flow = 0; 493 ip6->ip6_vfc = IPV6_VERSION; 494 ip6->ip6_nxt = IPPROTO_TCP; 495 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) + 496 tlen)); 497 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr); 498 } else 499 #endif 500 { 501 tlen += sizeof (struct tcpiphdr); 502 ip->ip_len = tlen; 503 ip->ip_ttl = ip_defttl; 504 if (path_mtu_discovery) 505 ip->ip_off |= IP_DF; 506 } 507 m->m_len = tlen; 508 m->m_pkthdr.len = tlen; 509 m->m_pkthdr.rcvif = NULL; 510 #ifdef MAC 511 if (inp != NULL) { 512 /* 513 * Packet is associated with a socket, so allow the 514 * label of the response to reflect the socket label. 515 */ 516 INP_LOCK_ASSERT(inp); 517 mac_create_mbuf_from_inpcb(inp, m); 518 } else { 519 /* 520 * Packet is not associated with a socket, so possibly 521 * update the label in place. 522 */ 523 mac_reflect_mbuf_tcp(m); 524 } 525 #endif 526 nth->th_seq = htonl(seq); 527 nth->th_ack = htonl(ack); 528 nth->th_x2 = 0; 529 nth->th_off = sizeof (struct tcphdr) >> 2; 530 nth->th_flags = flags; 531 if (tp != NULL) 532 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 533 else 534 nth->th_win = htons((u_short)win); 535 nth->th_urp = 0; 536 #ifdef INET6 537 if (isipv6) { 538 nth->th_sum = 0; 539 nth->th_sum = in6_cksum(m, IPPROTO_TCP, 540 sizeof(struct ip6_hdr), 541 tlen - sizeof(struct ip6_hdr)); 542 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb : 543 NULL, NULL); 544 } else 545 #endif /* INET6 */ 546 { 547 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 548 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 549 m->m_pkthdr.csum_flags = CSUM_TCP; 550 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 551 } 552 #ifdef TCPDEBUG 553 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG)) 554 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 555 #endif 556 #ifdef INET6 557 if (isipv6) 558 (void) ip6_output(m, NULL, NULL, ipflags, NULL, NULL, inp); 559 else 560 #endif /* INET6 */ 561 (void) ip_output(m, NULL, NULL, ipflags, NULL, inp); 562 } 563 564 /* 565 * Create a new TCP control block, making an 566 * empty reassembly queue and hooking it to the argument 567 * protocol control block. The `inp' parameter must have 568 * come from the zone allocator set up in tcp_init(). 569 */ 570 struct tcpcb * 571 tcp_newtcpcb(inp) 572 struct inpcb *inp; 573 { 574 struct tcpcb_mem *tm; 575 struct tcpcb *tp; 576 #ifdef INET6 577 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 578 #endif /* INET6 */ 579 580 tm = uma_zalloc(tcpcb_zone, M_NOWAIT | M_ZERO); 581 if (tm == NULL) 582 return (NULL); 583 tp = &tm->tcb; 584 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */ 585 tp->t_maxseg = tp->t_maxopd = 586 #ifdef INET6 587 isipv6 ? tcp_v6mssdflt : 588 #endif /* INET6 */ 589 tcp_mssdflt; 590 591 /* Set up our timeouts. */ 592 callout_init(tp->tt_rexmt = &tm->tcpcb_mem_rexmt, 0); 593 callout_init(tp->tt_persist = &tm->tcpcb_mem_persist, 0); 594 callout_init(tp->tt_keep = &tm->tcpcb_mem_keep, 0); 595 callout_init(tp->tt_2msl = &tm->tcpcb_mem_2msl, 0); 596 callout_init(tp->tt_delack = &tm->tcpcb_mem_delack, 0); 597 598 if (tcp_do_rfc1323) 599 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); 600 if (tcp_do_rfc1644) 601 tp->t_flags |= TF_REQ_CC; 602 tp->t_inpcb = inp; /* XXX */ 603 /* 604 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 605 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 606 * reasonable initial retransmit time. 607 */ 608 tp->t_srtt = TCPTV_SRTTBASE; 609 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 610 tp->t_rttmin = tcp_rexmit_min; 611 tp->t_rxtcur = TCPTV_RTOBASE; 612 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 613 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 614 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 615 tp->t_rcvtime = ticks; 616 tp->t_bw_rtttime = ticks; 617 /* 618 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 619 * because the socket may be bound to an IPv6 wildcard address, 620 * which may match an IPv4-mapped IPv6 address. 621 */ 622 inp->inp_ip_ttl = ip_defttl; 623 inp->inp_ppcb = (caddr_t)tp; 624 return (tp); /* XXX */ 625 } 626 627 /* 628 * Drop a TCP connection, reporting 629 * the specified error. If connection is synchronized, 630 * then send a RST to peer. 631 */ 632 struct tcpcb * 633 tcp_drop(tp, errno) 634 register struct tcpcb *tp; 635 int errno; 636 { 637 struct socket *so = tp->t_inpcb->inp_socket; 638 639 if (TCPS_HAVERCVDSYN(tp->t_state)) { 640 tp->t_state = TCPS_CLOSED; 641 (void) tcp_output(tp); 642 tcpstat.tcps_drops++; 643 } else 644 tcpstat.tcps_conndrops++; 645 if (errno == ETIMEDOUT && tp->t_softerror) 646 errno = tp->t_softerror; 647 so->so_error = errno; 648 return (tcp_close(tp)); 649 } 650 651 static void 652 tcp_discardcb(tp) 653 struct tcpcb *tp; 654 { 655 struct tseg_qent *q; 656 struct inpcb *inp = tp->t_inpcb; 657 struct socket *so = inp->inp_socket; 658 #ifdef INET6 659 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 660 #endif /* INET6 */ 661 662 /* 663 * Make sure that all of our timers are stopped before we 664 * delete the PCB. 665 */ 666 callout_stop(tp->tt_rexmt); 667 callout_stop(tp->tt_persist); 668 callout_stop(tp->tt_keep); 669 callout_stop(tp->tt_2msl); 670 callout_stop(tp->tt_delack); 671 672 /* 673 * If we got enough samples through the srtt filter, 674 * save the rtt and rttvar in the routing entry. 675 * 'Enough' is arbitrarily defined as 4 rtt samples. 676 * 4 samples is enough for the srtt filter to converge 677 * to within enough % of the correct value; fewer samples 678 * and we could save a bogus rtt. The danger is not high 679 * as tcp quickly recovers from everything. 680 * XXX: Works very well but needs some more statistics! 681 */ 682 if (tp->t_rttupdated >= 4) { 683 struct hc_metrics_lite metrics; 684 u_long ssthresh; 685 686 bzero(&metrics, sizeof(metrics)); 687 /* 688 * Update the ssthresh always when the conditions below 689 * are satisfied. This gives us better new start value 690 * for the congestion avoidance for new connections. 691 * ssthresh is only set if packet loss occured on a session. 692 */ 693 ssthresh = tp->snd_ssthresh; 694 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) { 695 /* 696 * convert the limit from user data bytes to 697 * packets then to packet data bytes. 698 */ 699 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg; 700 if (ssthresh < 2) 701 ssthresh = 2; 702 ssthresh *= (u_long)(tp->t_maxseg + 703 #ifdef INET6 704 (isipv6 ? sizeof (struct ip6_hdr) + 705 sizeof (struct tcphdr) : 706 #endif 707 sizeof (struct tcpiphdr) 708 #ifdef INET6 709 ) 710 #endif 711 ); 712 } else 713 ssthresh = 0; 714 metrics.rmx_ssthresh = ssthresh; 715 716 metrics.rmx_rtt = tp->t_srtt; 717 metrics.rmx_rttvar = tp->t_rttvar; 718 /* XXX: This wraps if the pipe is more than 4 Gbit per second */ 719 metrics.rmx_bandwidth = tp->snd_bandwidth; 720 metrics.rmx_cwnd = tp->snd_cwnd; 721 metrics.rmx_sendpipe = 0; 722 metrics.rmx_recvpipe = 0; 723 724 tcp_hc_update(&inp->inp_inc, &metrics); 725 } 726 727 /* free the reassembly queue, if any */ 728 while ((q = LIST_FIRST(&tp->t_segq)) != NULL) { 729 LIST_REMOVE(q, tqe_q); 730 m_freem(q->tqe_m); 731 uma_zfree(tcp_reass_zone, q); 732 tp->t_segqlen--; 733 tcp_reass_qsize--; 734 } 735 inp->inp_ppcb = NULL; 736 tp->t_inpcb = NULL; 737 uma_zfree(tcpcb_zone, tp); 738 soisdisconnected(so); 739 } 740 741 /* 742 * Close a TCP control block: 743 * discard all space held by the tcp 744 * discard internet protocol block 745 * wake up any sleepers 746 */ 747 struct tcpcb * 748 tcp_close(tp) 749 struct tcpcb *tp; 750 { 751 struct inpcb *inp = tp->t_inpcb; 752 #ifdef INET6 753 struct socket *so = inp->inp_socket; 754 #endif 755 756 tcp_discardcb(tp); 757 #ifdef INET6 758 if (INP_CHECK_SOCKAF(so, AF_INET6)) 759 in6_pcbdetach(inp); 760 else 761 #endif 762 in_pcbdetach(inp); 763 tcpstat.tcps_closed++; 764 return (NULL); 765 } 766 767 void 768 tcp_drain() 769 { 770 if (do_tcpdrain) 771 { 772 struct inpcb *inpb; 773 struct tcpcb *tcpb; 774 struct tseg_qent *te; 775 776 /* 777 * Walk the tcpbs, if existing, and flush the reassembly queue, 778 * if there is one... 779 * XXX: The "Net/3" implementation doesn't imply that the TCP 780 * reassembly queue should be flushed, but in a situation 781 * where we're really low on mbufs, this is potentially 782 * usefull. 783 */ 784 INP_INFO_RLOCK(&tcbinfo); 785 LIST_FOREACH(inpb, tcbinfo.listhead, inp_list) { 786 if (inpb->inp_vflag & INP_TIMEWAIT) 787 continue; 788 INP_LOCK(inpb); 789 if ((tcpb = intotcpcb(inpb)) != NULL) { 790 while ((te = LIST_FIRST(&tcpb->t_segq)) 791 != NULL) { 792 LIST_REMOVE(te, tqe_q); 793 m_freem(te->tqe_m); 794 uma_zfree(tcp_reass_zone, te); 795 tcpb->t_segqlen--; 796 tcp_reass_qsize--; 797 } 798 } 799 INP_UNLOCK(inpb); 800 } 801 INP_INFO_RUNLOCK(&tcbinfo); 802 } 803 } 804 805 /* 806 * Notify a tcp user of an asynchronous error; 807 * store error as soft error, but wake up user 808 * (for now, won't do anything until can select for soft error). 809 * 810 * Do not wake up user since there currently is no mechanism for 811 * reporting soft errors (yet - a kqueue filter may be added). 812 */ 813 static struct inpcb * 814 tcp_notify(inp, error) 815 struct inpcb *inp; 816 int error; 817 { 818 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; 819 820 /* 821 * Ignore some errors if we are hooked up. 822 * If connection hasn't completed, has retransmitted several times, 823 * and receives a second error, give up now. This is better 824 * than waiting a long time to establish a connection that 825 * can never complete. 826 */ 827 if (tp->t_state == TCPS_ESTABLISHED && 828 (error == EHOSTUNREACH || error == ENETUNREACH || 829 error == EHOSTDOWN)) { 830 return inp; 831 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 832 tp->t_softerror) { 833 tcp_drop(tp, error); 834 return (struct inpcb *)0; 835 } else { 836 tp->t_softerror = error; 837 return inp; 838 } 839 #if 0 840 wakeup( &so->so_timeo); 841 sorwakeup(so); 842 sowwakeup(so); 843 #endif 844 } 845 846 static int 847 tcp_pcblist(SYSCTL_HANDLER_ARGS) 848 { 849 int error, i, n, s; 850 struct inpcb *inp, **inp_list; 851 inp_gen_t gencnt; 852 struct xinpgen xig; 853 854 /* 855 * The process of preparing the TCB list is too time-consuming and 856 * resource-intensive to repeat twice on every request. 857 */ 858 if (req->oldptr == NULL) { 859 n = tcbinfo.ipi_count; 860 req->oldidx = 2 * (sizeof xig) 861 + (n + n/8) * sizeof(struct xtcpcb); 862 return 0; 863 } 864 865 if (req->newptr != NULL) 866 return EPERM; 867 868 /* 869 * OK, now we're committed to doing something. 870 */ 871 s = splnet(); 872 INP_INFO_RLOCK(&tcbinfo); 873 gencnt = tcbinfo.ipi_gencnt; 874 n = tcbinfo.ipi_count; 875 INP_INFO_RUNLOCK(&tcbinfo); 876 splx(s); 877 878 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig) 879 + n * sizeof(struct xtcpcb)); 880 if (error != 0) 881 return (error); 882 883 xig.xig_len = sizeof xig; 884 xig.xig_count = n; 885 xig.xig_gen = gencnt; 886 xig.xig_sogen = so_gencnt; 887 error = SYSCTL_OUT(req, &xig, sizeof xig); 888 if (error) 889 return error; 890 891 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 892 if (inp_list == NULL) 893 return ENOMEM; 894 895 s = splnet(); 896 INP_INFO_RLOCK(&tcbinfo); 897 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp != NULL && i < n; 898 inp = LIST_NEXT(inp, inp_list)) { 899 INP_LOCK(inp); 900 if (inp->inp_gencnt <= gencnt) { 901 /* 902 * XXX: This use of cr_cansee(), introduced with 903 * TCP state changes, is not quite right, but for 904 * now, better than nothing. 905 */ 906 if (inp->inp_vflag & INP_TIMEWAIT) 907 error = cr_cansee(req->td->td_ucred, 908 intotw(inp)->tw_cred); 909 else 910 error = cr_canseesocket(req->td->td_ucred, 911 inp->inp_socket); 912 if (error == 0) 913 inp_list[i++] = inp; 914 } 915 INP_UNLOCK(inp); 916 } 917 INP_INFO_RUNLOCK(&tcbinfo); 918 splx(s); 919 n = i; 920 921 error = 0; 922 for (i = 0; i < n; i++) { 923 inp = inp_list[i]; 924 if (inp->inp_gencnt <= gencnt) { 925 struct xtcpcb xt; 926 caddr_t inp_ppcb; 927 xt.xt_len = sizeof xt; 928 /* XXX should avoid extra copy */ 929 bcopy(inp, &xt.xt_inp, sizeof *inp); 930 inp_ppcb = inp->inp_ppcb; 931 if (inp_ppcb == NULL) 932 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp); 933 else if (inp->inp_vflag & INP_TIMEWAIT) { 934 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp); 935 xt.xt_tp.t_state = TCPS_TIME_WAIT; 936 } else 937 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 938 if (inp->inp_socket != NULL) 939 sotoxsocket(inp->inp_socket, &xt.xt_socket); 940 else { 941 bzero(&xt.xt_socket, sizeof xt.xt_socket); 942 xt.xt_socket.xso_protocol = IPPROTO_TCP; 943 } 944 xt.xt_inp.inp_gencnt = inp->inp_gencnt; 945 error = SYSCTL_OUT(req, &xt, sizeof xt); 946 } 947 } 948 if (!error) { 949 /* 950 * Give the user an updated idea of our state. 951 * If the generation differs from what we told 952 * her before, she knows that something happened 953 * while we were processing this request, and it 954 * might be necessary to retry. 955 */ 956 s = splnet(); 957 INP_INFO_RLOCK(&tcbinfo); 958 xig.xig_gen = tcbinfo.ipi_gencnt; 959 xig.xig_sogen = so_gencnt; 960 xig.xig_count = tcbinfo.ipi_count; 961 INP_INFO_RUNLOCK(&tcbinfo); 962 splx(s); 963 error = SYSCTL_OUT(req, &xig, sizeof xig); 964 } 965 free(inp_list, M_TEMP); 966 return error; 967 } 968 969 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 970 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 971 972 static int 973 tcp_getcred(SYSCTL_HANDLER_ARGS) 974 { 975 struct xucred xuc; 976 struct sockaddr_in addrs[2]; 977 struct inpcb *inp; 978 int error, s; 979 980 error = suser_cred(req->td->td_ucred, PRISON_ROOT); 981 if (error) 982 return (error); 983 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 984 if (error) 985 return (error); 986 s = splnet(); 987 INP_INFO_RLOCK(&tcbinfo); 988 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port, 989 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 990 if (inp == NULL) { 991 error = ENOENT; 992 goto outunlocked; 993 } 994 INP_LOCK(inp); 995 if (inp->inp_socket == NULL) { 996 error = ENOENT; 997 goto out; 998 } 999 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket); 1000 if (error) 1001 goto out; 1002 cru2x(inp->inp_socket->so_cred, &xuc); 1003 out: 1004 INP_UNLOCK(inp); 1005 outunlocked: 1006 INP_INFO_RUNLOCK(&tcbinfo); 1007 splx(s); 1008 if (error == 0) 1009 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 1010 return (error); 1011 } 1012 1013 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, 1014 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 1015 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection"); 1016 1017 #ifdef INET6 1018 static int 1019 tcp6_getcred(SYSCTL_HANDLER_ARGS) 1020 { 1021 struct xucred xuc; 1022 struct sockaddr_in6 addrs[2]; 1023 struct inpcb *inp; 1024 int error, s, mapped = 0; 1025 1026 error = suser_cred(req->td->td_ucred, PRISON_ROOT); 1027 if (error) 1028 return (error); 1029 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 1030 if (error) 1031 return (error); 1032 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 1033 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 1034 mapped = 1; 1035 else 1036 return (EINVAL); 1037 } 1038 s = splnet(); 1039 INP_INFO_RLOCK(&tcbinfo); 1040 if (mapped == 1) 1041 inp = in_pcblookup_hash(&tcbinfo, 1042 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 1043 addrs[1].sin6_port, 1044 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 1045 addrs[0].sin6_port, 1046 0, NULL); 1047 else 1048 inp = in6_pcblookup_hash(&tcbinfo, &addrs[1].sin6_addr, 1049 addrs[1].sin6_port, 1050 &addrs[0].sin6_addr, addrs[0].sin6_port, 1051 0, NULL); 1052 if (inp == NULL) { 1053 error = ENOENT; 1054 goto outunlocked; 1055 } 1056 INP_LOCK(inp); 1057 if (inp->inp_socket == NULL) { 1058 error = ENOENT; 1059 goto out; 1060 } 1061 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket); 1062 if (error) 1063 goto out; 1064 cru2x(inp->inp_socket->so_cred, &xuc); 1065 out: 1066 INP_UNLOCK(inp); 1067 outunlocked: 1068 INP_INFO_RUNLOCK(&tcbinfo); 1069 splx(s); 1070 if (error == 0) 1071 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 1072 return (error); 1073 } 1074 1075 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, 1076 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 1077 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection"); 1078 #endif 1079 1080 1081 void 1082 tcp_ctlinput(cmd, sa, vip) 1083 int cmd; 1084 struct sockaddr *sa; 1085 void *vip; 1086 { 1087 struct ip *ip = vip; 1088 struct tcphdr *th; 1089 struct in_addr faddr; 1090 struct inpcb *inp; 1091 struct tcpcb *tp; 1092 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 1093 tcp_seq icmp_seq; 1094 int s; 1095 1096 faddr = ((struct sockaddr_in *)sa)->sin_addr; 1097 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 1098 return; 1099 1100 if (cmd == PRC_QUENCH) 1101 notify = tcp_quench; 1102 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || 1103 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip) 1104 notify = tcp_drop_syn_sent; 1105 else if (cmd == PRC_MSGSIZE) 1106 notify = tcp_mtudisc; 1107 /* 1108 * Redirects don't need to be handled up here. 1109 */ 1110 else if (PRC_IS_REDIRECT(cmd)) 1111 return; 1112 /* 1113 * Hostdead is ugly because it goes linearly through all PCBs. 1114 * XXX: We never get this from ICMP, otherwise it makes an 1115 * excellent DoS attack on machines with many connections. 1116 */ 1117 else if (cmd == PRC_HOSTDEAD) 1118 ip = NULL; 1119 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) 1120 return; 1121 if (ip != NULL) { 1122 s = splnet(); 1123 th = (struct tcphdr *)((caddr_t)ip 1124 + (ip->ip_hl << 2)); 1125 INP_INFO_WLOCK(&tcbinfo); 1126 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport, 1127 ip->ip_src, th->th_sport, 0, NULL); 1128 if (inp != NULL) { 1129 INP_LOCK(inp); 1130 if (inp->inp_socket != NULL) { 1131 icmp_seq = htonl(th->th_seq); 1132 tp = intotcpcb(inp); 1133 if (SEQ_GEQ(icmp_seq, tp->snd_una) && 1134 SEQ_LT(icmp_seq, tp->snd_max)) 1135 inp = (*notify)(inp, inetctlerrmap[cmd]); 1136 } 1137 if (inp != NULL) 1138 INP_UNLOCK(inp); 1139 } else { 1140 struct in_conninfo inc; 1141 1142 inc.inc_fport = th->th_dport; 1143 inc.inc_lport = th->th_sport; 1144 inc.inc_faddr = faddr; 1145 inc.inc_laddr = ip->ip_src; 1146 #ifdef INET6 1147 inc.inc_isipv6 = 0; 1148 #endif 1149 syncache_unreach(&inc, th); 1150 } 1151 INP_INFO_WUNLOCK(&tcbinfo); 1152 splx(s); 1153 } else 1154 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify); 1155 } 1156 1157 #ifdef INET6 1158 void 1159 tcp6_ctlinput(cmd, sa, d) 1160 int cmd; 1161 struct sockaddr *sa; 1162 void *d; 1163 { 1164 struct tcphdr th; 1165 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 1166 struct ip6_hdr *ip6; 1167 struct mbuf *m; 1168 struct ip6ctlparam *ip6cp = NULL; 1169 const struct sockaddr_in6 *sa6_src = NULL; 1170 int off; 1171 struct tcp_portonly { 1172 u_int16_t th_sport; 1173 u_int16_t th_dport; 1174 } *thp; 1175 1176 if (sa->sa_family != AF_INET6 || 1177 sa->sa_len != sizeof(struct sockaddr_in6)) 1178 return; 1179 1180 if (cmd == PRC_QUENCH) 1181 notify = tcp_quench; 1182 else if (cmd == PRC_MSGSIZE) 1183 notify = tcp_mtudisc; 1184 else if (!PRC_IS_REDIRECT(cmd) && 1185 ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) 1186 return; 1187 1188 /* if the parameter is from icmp6, decode it. */ 1189 if (d != NULL) { 1190 ip6cp = (struct ip6ctlparam *)d; 1191 m = ip6cp->ip6c_m; 1192 ip6 = ip6cp->ip6c_ip6; 1193 off = ip6cp->ip6c_off; 1194 sa6_src = ip6cp->ip6c_src; 1195 } else { 1196 m = NULL; 1197 ip6 = NULL; 1198 off = 0; /* fool gcc */ 1199 sa6_src = &sa6_any; 1200 } 1201 1202 if (ip6 != NULL) { 1203 struct in_conninfo inc; 1204 /* 1205 * XXX: We assume that when IPV6 is non NULL, 1206 * M and OFF are valid. 1207 */ 1208 1209 /* check if we can safely examine src and dst ports */ 1210 if (m->m_pkthdr.len < off + sizeof(*thp)) 1211 return; 1212 1213 bzero(&th, sizeof(th)); 1214 m_copydata(m, off, sizeof(*thp), (caddr_t)&th); 1215 1216 in6_pcbnotify(&tcb, sa, th.th_dport, 1217 (struct sockaddr *)ip6cp->ip6c_src, 1218 th.th_sport, cmd, NULL, notify); 1219 1220 inc.inc_fport = th.th_dport; 1221 inc.inc_lport = th.th_sport; 1222 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; 1223 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; 1224 inc.inc_isipv6 = 1; 1225 syncache_unreach(&inc, &th); 1226 } else 1227 in6_pcbnotify(&tcb, sa, 0, (const struct sockaddr *)sa6_src, 1228 0, cmd, NULL, notify); 1229 } 1230 #endif /* INET6 */ 1231 1232 1233 /* 1234 * Following is where TCP initial sequence number generation occurs. 1235 * 1236 * There are two places where we must use initial sequence numbers: 1237 * 1. In SYN-ACK packets. 1238 * 2. In SYN packets. 1239 * 1240 * All ISNs for SYN-ACK packets are generated by the syncache. See 1241 * tcp_syncache.c for details. 1242 * 1243 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 1244 * depends on this property. In addition, these ISNs should be 1245 * unguessable so as to prevent connection hijacking. To satisfy 1246 * the requirements of this situation, the algorithm outlined in 1247 * RFC 1948 is used, with only small modifications. 1248 * 1249 * Implementation details: 1250 * 1251 * Time is based off the system timer, and is corrected so that it 1252 * increases by one megabyte per second. This allows for proper 1253 * recycling on high speed LANs while still leaving over an hour 1254 * before rollover. 1255 * 1256 * As reading the *exact* system time is too expensive to be done 1257 * whenever setting up a TCP connection, we increment the time 1258 * offset in two ways. First, a small random positive increment 1259 * is added to isn_offset for each connection that is set up. 1260 * Second, the function tcp_isn_tick fires once per clock tick 1261 * and increments isn_offset as necessary so that sequence numbers 1262 * are incremented at approximately ISN_BYTES_PER_SECOND. The 1263 * random positive increments serve only to ensure that the same 1264 * exact sequence number is never sent out twice (as could otherwise 1265 * happen when a port is recycled in less than the system tick 1266 * interval.) 1267 * 1268 * net.inet.tcp.isn_reseed_interval controls the number of seconds 1269 * between seeding of isn_secret. This is normally set to zero, 1270 * as reseeding should not be necessary. 1271 * 1272 */ 1273 1274 #define ISN_BYTES_PER_SECOND 1048576 1275 #define ISN_STATIC_INCREMENT 4096 1276 #define ISN_RANDOM_INCREMENT (4096 - 1) 1277 1278 u_char isn_secret[32]; 1279 int isn_last_reseed; 1280 u_int32_t isn_offset, isn_offset_old; 1281 MD5_CTX isn_ctx; 1282 1283 tcp_seq 1284 tcp_new_isn(tp) 1285 struct tcpcb *tp; 1286 { 1287 u_int32_t md5_buffer[4]; 1288 tcp_seq new_isn; 1289 1290 /* Seed if this is the first use, reseed if requested. */ 1291 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) && 1292 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) 1293 < (u_int)ticks))) { 1294 read_random(&isn_secret, sizeof(isn_secret)); 1295 isn_last_reseed = ticks; 1296 } 1297 1298 /* Compute the md5 hash and return the ISN. */ 1299 MD5Init(&isn_ctx); 1300 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short)); 1301 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short)); 1302 #ifdef INET6 1303 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) { 1304 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, 1305 sizeof(struct in6_addr)); 1306 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, 1307 sizeof(struct in6_addr)); 1308 } else 1309 #endif 1310 { 1311 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, 1312 sizeof(struct in_addr)); 1313 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, 1314 sizeof(struct in_addr)); 1315 } 1316 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret)); 1317 MD5Final((u_char *) &md5_buffer, &isn_ctx); 1318 new_isn = (tcp_seq) md5_buffer[0]; 1319 isn_offset += ISN_STATIC_INCREMENT + 1320 (arc4random() & ISN_RANDOM_INCREMENT); 1321 new_isn += isn_offset; 1322 return new_isn; 1323 } 1324 1325 /* 1326 * Increment the offset to the next ISN_BYTES_PER_SECOND / hz boundary 1327 * to keep time flowing at a relatively constant rate. If the random 1328 * increments have already pushed us past the projected offset, do nothing. 1329 */ 1330 static void 1331 tcp_isn_tick(xtp) 1332 void *xtp; 1333 { 1334 u_int32_t projected_offset; 1335 1336 projected_offset = isn_offset_old + ISN_BYTES_PER_SECOND / hz; 1337 1338 if (projected_offset > isn_offset) 1339 isn_offset = projected_offset; 1340 1341 isn_offset_old = isn_offset; 1342 callout_reset(&isn_callout, 1, tcp_isn_tick, NULL); 1343 } 1344 1345 /* 1346 * When a source quench is received, close congestion window 1347 * to one segment. We will gradually open it again as we proceed. 1348 */ 1349 struct inpcb * 1350 tcp_quench(inp, errno) 1351 struct inpcb *inp; 1352 int errno; 1353 { 1354 struct tcpcb *tp = intotcpcb(inp); 1355 1356 if (tp != NULL) 1357 tp->snd_cwnd = tp->t_maxseg; 1358 return (inp); 1359 } 1360 1361 /* 1362 * When a specific ICMP unreachable message is received and the 1363 * connection state is SYN-SENT, drop the connection. This behavior 1364 * is controlled by the icmp_may_rst sysctl. 1365 */ 1366 struct inpcb * 1367 tcp_drop_syn_sent(inp, errno) 1368 struct inpcb *inp; 1369 int errno; 1370 { 1371 struct tcpcb *tp = intotcpcb(inp); 1372 1373 if (tp != NULL && tp->t_state == TCPS_SYN_SENT) { 1374 tcp_drop(tp, errno); 1375 return (struct inpcb *)0; 1376 } 1377 return inp; 1378 } 1379 1380 /* 1381 * When `need fragmentation' ICMP is received, update our idea of the MSS 1382 * based on the new value in the route. Also nudge TCP to send something, 1383 * since we know the packet we just sent was dropped. 1384 * This duplicates some code in the tcp_mss() function in tcp_input.c. 1385 */ 1386 struct inpcb * 1387 tcp_mtudisc(inp, errno) 1388 struct inpcb *inp; 1389 int errno; 1390 { 1391 struct tcpcb *tp = intotcpcb(inp); 1392 struct rmxp_tao tao; 1393 struct socket *so = inp->inp_socket; 1394 u_int maxmtu; 1395 u_int romtu; 1396 int mss; 1397 #ifdef INET6 1398 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 1399 #endif /* INET6 */ 1400 bzero(&tao, sizeof(tao)); 1401 1402 if (tp != NULL) { 1403 maxmtu = tcp_hc_getmtu(&inp->inp_inc); /* IPv4 and IPv6 */ 1404 romtu = 1405 #ifdef INET6 1406 isipv6 ? tcp_maxmtu6(&inp->inp_inc) : 1407 #endif /* INET6 */ 1408 tcp_maxmtu(&inp->inp_inc); 1409 if (!maxmtu) 1410 maxmtu = romtu; 1411 else 1412 maxmtu = min(maxmtu, romtu); 1413 if (!maxmtu) { 1414 tp->t_maxopd = tp->t_maxseg = 1415 #ifdef INET6 1416 isipv6 ? tcp_v6mssdflt : 1417 #endif /* INET6 */ 1418 tcp_mssdflt; 1419 return inp; 1420 } 1421 mss = maxmtu - 1422 #ifdef INET6 1423 (isipv6 ? 1424 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1425 #endif /* INET6 */ 1426 sizeof(struct tcpiphdr) 1427 #ifdef INET6 1428 ) 1429 #endif /* INET6 */ 1430 ; 1431 1432 if (tcp_do_rfc1644) { 1433 tcp_hc_gettao(&inp->inp_inc, &tao); 1434 if (tao.tao_mssopt) 1435 mss = min(mss, tao.tao_mssopt); 1436 } 1437 /* 1438 * XXX - The above conditional probably violates the TCP 1439 * spec. The problem is that, since we don't know the 1440 * other end's MSS, we are supposed to use a conservative 1441 * default. But, if we do that, then MTU discovery will 1442 * never actually take place, because the conservative 1443 * default is much less than the MTUs typically seen 1444 * on the Internet today. For the moment, we'll sweep 1445 * this under the carpet. 1446 * 1447 * The conservative default might not actually be a problem 1448 * if the only case this occurs is when sending an initial 1449 * SYN with options and data to a host we've never talked 1450 * to before. Then, they will reply with an MSS value which 1451 * will get recorded and the new parameters should get 1452 * recomputed. For Further Study. 1453 */ 1454 if (tp->t_maxopd <= mss) 1455 return inp; 1456 tp->t_maxopd = mss; 1457 1458 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 1459 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) 1460 mss -= TCPOLEN_TSTAMP_APPA; 1461 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && 1462 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC) 1463 mss -= TCPOLEN_CC_APPA; 1464 #if (MCLBYTES & (MCLBYTES - 1)) == 0 1465 if (mss > MCLBYTES) 1466 mss &= ~(MCLBYTES-1); 1467 #else 1468 if (mss > MCLBYTES) 1469 mss = mss / MCLBYTES * MCLBYTES; 1470 #endif 1471 if (so->so_snd.sb_hiwat < mss) 1472 mss = so->so_snd.sb_hiwat; 1473 1474 tp->t_maxseg = mss; 1475 1476 tcpstat.tcps_mturesent++; 1477 tp->t_rtttime = 0; 1478 tp->snd_nxt = tp->snd_una; 1479 tcp_output(tp); 1480 } 1481 return inp; 1482 } 1483 1484 /* 1485 * Look-up the routing entry to the peer of this inpcb. If no route 1486 * is found and it cannot be allocated, then return NULL. This routine 1487 * is called by TCP routines that access the rmx structure and by tcp_mss 1488 * to get the interface MTU. 1489 */ 1490 u_long 1491 tcp_maxmtu(inc) 1492 struct in_conninfo *inc; 1493 { 1494 struct route sro; 1495 struct sockaddr_in *dst; 1496 struct ifnet *ifp; 1497 u_long maxmtu = 0; 1498 1499 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer")); 1500 1501 bzero(&sro, sizeof(sro)); 1502 if (inc->inc_faddr.s_addr != INADDR_ANY) { 1503 dst = (struct sockaddr_in *)&sro.ro_dst; 1504 dst->sin_family = AF_INET; 1505 dst->sin_len = sizeof(*dst); 1506 dst->sin_addr = inc->inc_faddr; 1507 rtalloc_ign(&sro, RTF_CLONING); 1508 } 1509 if (sro.ro_rt != NULL) { 1510 ifp = sro.ro_rt->rt_ifp; 1511 if (sro.ro_rt->rt_rmx.rmx_mtu == 0) 1512 maxmtu = ifp->if_mtu; 1513 else 1514 maxmtu = min(sro.ro_rt->rt_rmx.rmx_mtu, ifp->if_mtu); 1515 RTFREE(sro.ro_rt); 1516 } 1517 return (maxmtu); 1518 } 1519 1520 #ifdef INET6 1521 u_long 1522 tcp_maxmtu6(inc) 1523 struct in_conninfo *inc; 1524 { 1525 struct route_in6 sro6; 1526 struct ifnet *ifp; 1527 u_long maxmtu = 0; 1528 1529 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer")); 1530 1531 bzero(&sro6, sizeof(sro6)); 1532 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 1533 sro6.ro_dst.sin6_family = AF_INET6; 1534 sro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6); 1535 sro6.ro_dst.sin6_addr = inc->inc6_faddr; 1536 rtalloc_ign((struct route *)&sro6, RTF_CLONING); 1537 } 1538 if (sro6.ro_rt != NULL) { 1539 ifp = sro6.ro_rt->rt_ifp; 1540 if (sro6.ro_rt->rt_rmx.rmx_mtu == 0) 1541 maxmtu = IN6_LINKMTU(sro6.ro_rt->rt_ifp); 1542 else 1543 maxmtu = min(sro6.ro_rt->rt_rmx.rmx_mtu, 1544 IN6_LINKMTU(sro6.ro_rt->rt_ifp)); 1545 RTFREE(sro6.ro_rt); 1546 } 1547 1548 return (maxmtu); 1549 } 1550 #endif /* INET6 */ 1551 1552 #ifdef IPSEC 1553 /* compute ESP/AH header size for TCP, including outer IP header. */ 1554 size_t 1555 ipsec_hdrsiz_tcp(tp) 1556 struct tcpcb *tp; 1557 { 1558 struct inpcb *inp; 1559 struct mbuf *m; 1560 size_t hdrsiz; 1561 struct ip *ip; 1562 #ifdef INET6 1563 struct ip6_hdr *ip6; 1564 #endif 1565 struct tcphdr *th; 1566 1567 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) 1568 return 0; 1569 MGETHDR(m, M_DONTWAIT, MT_DATA); 1570 if (!m) 1571 return 0; 1572 1573 #ifdef INET6 1574 if ((inp->inp_vflag & INP_IPV6) != 0) { 1575 ip6 = mtod(m, struct ip6_hdr *); 1576 th = (struct tcphdr *)(ip6 + 1); 1577 m->m_pkthdr.len = m->m_len = 1578 sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1579 tcpip_fillheaders(inp, ip6, th); 1580 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1581 } else 1582 #endif /* INET6 */ 1583 { 1584 ip = mtod(m, struct ip *); 1585 th = (struct tcphdr *)(ip + 1); 1586 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); 1587 tcpip_fillheaders(inp, ip, th); 1588 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1589 } 1590 1591 m_free(m); 1592 return hdrsiz; 1593 } 1594 #endif /*IPSEC*/ 1595 1596 /* 1597 * Move a TCP connection into TIME_WAIT state. 1598 * tcbinfo is unlocked. 1599 * inp is locked, and is unlocked before returning. 1600 */ 1601 void 1602 tcp_twstart(tp) 1603 struct tcpcb *tp; 1604 { 1605 struct tcptw *tw; 1606 struct inpcb *inp; 1607 int tw_time, acknow; 1608 struct socket *so; 1609 1610 tw = uma_zalloc(tcptw_zone, M_NOWAIT); 1611 if (tw == NULL) { 1612 tw = tcp_timer_2msl_tw(1); 1613 if (tw == NULL) { 1614 tcp_close(tp); 1615 return; 1616 } 1617 } 1618 inp = tp->t_inpcb; 1619 tw->tw_inpcb = inp; 1620 1621 /* 1622 * Recover last window size sent. 1623 */ 1624 tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale; 1625 1626 /* 1627 * Set t_recent if timestamps are used on the connection. 1628 */ 1629 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) == 1630 (TF_REQ_TSTMP|TF_RCVD_TSTMP)) 1631 tw->t_recent = tp->ts_recent; 1632 else 1633 tw->t_recent = 0; 1634 1635 tw->snd_nxt = tp->snd_nxt; 1636 tw->rcv_nxt = tp->rcv_nxt; 1637 tw->iss = tp->iss; 1638 tw->irs = tp->irs; 1639 tw->cc_recv = tp->cc_recv; 1640 tw->cc_send = tp->cc_send; 1641 tw->t_starttime = tp->t_starttime; 1642 tw->tw_time = 0; 1643 1644 /* XXX 1645 * If this code will 1646 * be used for fin-wait-2 state also, then we may need 1647 * a ts_recent from the last segment. 1648 */ 1649 /* Shorten TIME_WAIT [RFC-1644, p.28] */ 1650 if (tp->cc_recv != 0 && (ticks - tp->t_starttime) < tcp_msl) { 1651 tw_time = tp->t_rxtcur * TCPTV_TWTRUNC; 1652 /* For T/TCP client, force ACK now. */ 1653 acknow = 1; 1654 } else { 1655 tw_time = 2 * tcp_msl; 1656 acknow = tp->t_flags & TF_ACKNOW; 1657 } 1658 tcp_discardcb(tp); 1659 so = inp->inp_socket; 1660 so->so_pcb = NULL; 1661 tw->tw_cred = crhold(so->so_cred); 1662 tw->tw_so_options = so->so_options; 1663 if (acknow) 1664 tcp_twrespond(tw, TH_ACK); 1665 sotryfree(so); 1666 inp->inp_socket = NULL; 1667 inp->inp_ppcb = (caddr_t)tw; 1668 inp->inp_vflag |= INP_TIMEWAIT; 1669 tcp_timer_2msl_reset(tw, tw_time); 1670 INP_UNLOCK(inp); 1671 } 1672 1673 /* 1674 * The appromixate rate of ISN increase of Microsoft TCP stacks; 1675 * the actual rate is slightly higher due to the addition of 1676 * random positive increments. 1677 * 1678 * Most other new OSes use semi-randomized ISN values, so we 1679 * do not need to worry about them. 1680 */ 1681 #define MS_ISN_BYTES_PER_SECOND 250000 1682 1683 /* 1684 * Determine if the ISN we will generate has advanced beyond the last 1685 * sequence number used by the previous connection. If so, indicate 1686 * that it is safe to recycle this tw socket by returning 1. 1687 */ 1688 int 1689 tcp_twrecycleable(struct tcptw *tw) 1690 { 1691 tcp_seq new_iss = tw->iss; 1692 tcp_seq new_irs = tw->irs; 1693 1694 new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz); 1695 new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz); 1696 1697 if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt)) 1698 return 1; 1699 else 1700 return 0; 1701 } 1702 1703 struct tcptw * 1704 tcp_twclose(struct tcptw *tw, int reuse) 1705 { 1706 struct inpcb *inp; 1707 1708 inp = tw->tw_inpcb; 1709 tw->tw_inpcb = NULL; 1710 tcp_timer_2msl_stop(tw); 1711 inp->inp_ppcb = NULL; 1712 #ifdef INET6 1713 if (inp->inp_vflag & INP_IPV6PROTO) 1714 in6_pcbdetach(inp); 1715 else 1716 #endif 1717 in_pcbdetach(inp); 1718 tcpstat.tcps_closed++; 1719 crfree(tw->tw_cred); 1720 tw->tw_cred = NULL; 1721 if (reuse) 1722 return (tw); 1723 uma_zfree(tcptw_zone, tw); 1724 return (NULL); 1725 } 1726 1727 int 1728 tcp_twrespond(struct tcptw *tw, int flags) 1729 { 1730 struct inpcb *inp = tw->tw_inpcb; 1731 struct tcphdr *th; 1732 struct mbuf *m; 1733 struct ip *ip = NULL; 1734 u_int8_t *optp; 1735 u_int hdrlen, optlen; 1736 int error; 1737 #ifdef INET6 1738 struct ip6_hdr *ip6 = NULL; 1739 int isipv6 = inp->inp_inc.inc_isipv6; 1740 #endif 1741 1742 m = m_gethdr(M_DONTWAIT, MT_HEADER); 1743 if (m == NULL) 1744 return (ENOBUFS); 1745 m->m_data += max_linkhdr; 1746 1747 #ifdef MAC 1748 mac_create_mbuf_from_inpcb(inp, m); 1749 #endif 1750 1751 #ifdef INET6 1752 if (isipv6) { 1753 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1754 ip6 = mtod(m, struct ip6_hdr *); 1755 th = (struct tcphdr *)(ip6 + 1); 1756 tcpip_fillheaders(inp, ip6, th); 1757 } else 1758 #endif 1759 { 1760 hdrlen = sizeof(struct tcpiphdr); 1761 ip = mtod(m, struct ip *); 1762 th = (struct tcphdr *)(ip + 1); 1763 tcpip_fillheaders(inp, ip, th); 1764 } 1765 optp = (u_int8_t *)(th + 1); 1766 1767 /* 1768 * Send a timestamp and echo-reply if both our side and our peer 1769 * have sent timestamps in our SYN's and this is not a RST. 1770 */ 1771 if (tw->t_recent && flags == TH_ACK) { 1772 u_int32_t *lp = (u_int32_t *)optp; 1773 1774 /* Form timestamp option as shown in appendix A of RFC 1323. */ 1775 *lp++ = htonl(TCPOPT_TSTAMP_HDR); 1776 *lp++ = htonl(ticks); 1777 *lp = htonl(tw->t_recent); 1778 optp += TCPOLEN_TSTAMP_APPA; 1779 } 1780 1781 /* 1782 * Send `CC-family' options if needed, and it's not a RST. 1783 */ 1784 if (tw->cc_recv != 0 && flags == TH_ACK) { 1785 u_int32_t *lp = (u_int32_t *)optp; 1786 1787 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC)); 1788 *lp = htonl(tw->cc_send); 1789 optp += TCPOLEN_CC_APPA; 1790 } 1791 optlen = optp - (u_int8_t *)(th + 1); 1792 1793 m->m_len = hdrlen + optlen; 1794 m->m_pkthdr.len = m->m_len; 1795 1796 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small")); 1797 1798 th->th_seq = htonl(tw->snd_nxt); 1799 th->th_ack = htonl(tw->rcv_nxt); 1800 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1801 th->th_flags = flags; 1802 th->th_win = htons(tw->last_win); 1803 1804 #ifdef INET6 1805 if (isipv6) { 1806 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr), 1807 sizeof(struct tcphdr) + optlen); 1808 ip6->ip6_hlim = in6_selecthlim(inp, NULL); 1809 error = ip6_output(m, inp->in6p_outputopts, NULL, 1810 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp); 1811 } else 1812 #endif 1813 { 1814 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1815 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP)); 1816 m->m_pkthdr.csum_flags = CSUM_TCP; 1817 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1818 ip->ip_len = m->m_pkthdr.len; 1819 if (path_mtu_discovery) 1820 ip->ip_off |= IP_DF; 1821 error = ip_output(m, inp->inp_options, NULL, 1822 (tw->tw_so_options & SO_DONTROUTE), NULL, inp); 1823 } 1824 if (flags & TH_ACK) 1825 tcpstat.tcps_sndacks++; 1826 else 1827 tcpstat.tcps_sndctrl++; 1828 tcpstat.tcps_sndtotal++; 1829 return (error); 1830 } 1831 1832 /* 1833 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING 1834 * 1835 * This code attempts to calculate the bandwidth-delay product as a 1836 * means of determining the optimal window size to maximize bandwidth, 1837 * minimize RTT, and avoid the over-allocation of buffers on interfaces and 1838 * routers. This code also does a fairly good job keeping RTTs in check 1839 * across slow links like modems. We implement an algorithm which is very 1840 * similar (but not meant to be) TCP/Vegas. The code operates on the 1841 * transmitter side of a TCP connection and so only effects the transmit 1842 * side of the connection. 1843 * 1844 * BACKGROUND: TCP makes no provision for the management of buffer space 1845 * at the end points or at the intermediate routers and switches. A TCP 1846 * stream, whether using NewReno or not, will eventually buffer as 1847 * many packets as it is able and the only reason this typically works is 1848 * due to the fairly small default buffers made available for a connection 1849 * (typicaly 16K or 32K). As machines use larger windows and/or window 1850 * scaling it is now fairly easy for even a single TCP connection to blow-out 1851 * all available buffer space not only on the local interface, but on 1852 * intermediate routers and switches as well. NewReno makes a misguided 1853 * attempt to 'solve' this problem by waiting for an actual failure to occur, 1854 * then backing off, then steadily increasing the window again until another 1855 * failure occurs, ad-infinitum. This results in terrible oscillation that 1856 * is only made worse as network loads increase and the idea of intentionally 1857 * blowing out network buffers is, frankly, a terrible way to manage network 1858 * resources. 1859 * 1860 * It is far better to limit the transmit window prior to the failure 1861 * condition being achieved. There are two general ways to do this: First 1862 * you can 'scan' through different transmit window sizes and locate the 1863 * point where the RTT stops increasing, indicating that you have filled the 1864 * pipe, then scan backwards until you note that RTT stops decreasing, then 1865 * repeat ad-infinitum. This method works in principle but has severe 1866 * implementation issues due to RTT variances, timer granularity, and 1867 * instability in the algorithm which can lead to many false positives and 1868 * create oscillations as well as interact badly with other TCP streams 1869 * implementing the same algorithm. 1870 * 1871 * The second method is to limit the window to the bandwidth delay product 1872 * of the link. This is the method we implement. RTT variances and our 1873 * own manipulation of the congestion window, bwnd, can potentially 1874 * destabilize the algorithm. For this reason we have to stabilize the 1875 * elements used to calculate the window. We do this by using the minimum 1876 * observed RTT, the long term average of the observed bandwidth, and 1877 * by adding two segments worth of slop. It isn't perfect but it is able 1878 * to react to changing conditions and gives us a very stable basis on 1879 * which to extend the algorithm. 1880 */ 1881 void 1882 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) 1883 { 1884 u_long bw; 1885 u_long bwnd; 1886 int save_ticks; 1887 1888 /* 1889 * If inflight_enable is disabled in the middle of a tcp connection, 1890 * make sure snd_bwnd is effectively disabled. 1891 */ 1892 if (tcp_inflight_enable == 0) { 1893 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1894 tp->snd_bandwidth = 0; 1895 return; 1896 } 1897 1898 /* 1899 * Figure out the bandwidth. Due to the tick granularity this 1900 * is a very rough number and it MUST be averaged over a fairly 1901 * long period of time. XXX we need to take into account a link 1902 * that is not using all available bandwidth, but for now our 1903 * slop will ramp us up if this case occurs and the bandwidth later 1904 * increases. 1905 * 1906 * Note: if ticks rollover 'bw' may wind up negative. We must 1907 * effectively reset t_bw_rtttime for this case. 1908 */ 1909 save_ticks = ticks; 1910 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1) 1911 return; 1912 1913 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / 1914 (save_ticks - tp->t_bw_rtttime); 1915 tp->t_bw_rtttime = save_ticks; 1916 tp->t_bw_rtseq = ack_seq; 1917 if (tp->t_bw_rtttime == 0 || (int)bw < 0) 1918 return; 1919 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4; 1920 1921 tp->snd_bandwidth = bw; 1922 1923 /* 1924 * Calculate the semi-static bandwidth delay product, plus two maximal 1925 * segments. The additional slop puts us squarely in the sweet 1926 * spot and also handles the bandwidth run-up case and stabilization. 1927 * Without the slop we could be locking ourselves into a lower 1928 * bandwidth. 1929 * 1930 * Situations Handled: 1931 * (1) Prevents over-queueing of packets on LANs, especially on 1932 * high speed LANs, allowing larger TCP buffers to be 1933 * specified, and also does a good job preventing 1934 * over-queueing of packets over choke points like modems 1935 * (at least for the transmit side). 1936 * 1937 * (2) Is able to handle changing network loads (bandwidth 1938 * drops so bwnd drops, bandwidth increases so bwnd 1939 * increases). 1940 * 1941 * (3) Theoretically should stabilize in the face of multiple 1942 * connections implementing the same algorithm (this may need 1943 * a little work). 1944 * 1945 * (4) Stability value (defaults to 20 = 2 maximal packets) can 1946 * be adjusted with a sysctl but typically only needs to be 1947 * on very slow connections. A value no smaller then 5 1948 * should be used, but only reduce this default if you have 1949 * no other choice. 1950 */ 1951 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2) 1952 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + tcp_inflight_stab * tp->t_maxseg / 10; 1953 #undef USERTT 1954 1955 if (tcp_inflight_debug > 0) { 1956 static int ltime; 1957 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) { 1958 ltime = ticks; 1959 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n", 1960 tp, 1961 bw, 1962 tp->t_rttbest, 1963 tp->t_srtt, 1964 bwnd 1965 ); 1966 } 1967 } 1968 if ((long)bwnd < tcp_inflight_min) 1969 bwnd = tcp_inflight_min; 1970 if (bwnd > tcp_inflight_max) 1971 bwnd = tcp_inflight_max; 1972 if ((long)bwnd < tp->t_maxseg * 2) 1973 bwnd = tp->t_maxseg * 2; 1974 tp->snd_bwnd = bwnd; 1975 } 1976 1977 #ifdef TCP_SIGNATURE 1978 /* 1979 * Callback function invoked by m_apply() to digest TCP segment data 1980 * contained within an mbuf chain. 1981 */ 1982 static int 1983 tcp_signature_apply(void *fstate, void *data, u_int len) 1984 { 1985 1986 MD5Update(fstate, (u_char *)data, len); 1987 return (0); 1988 } 1989 1990 /* 1991 * Compute TCP-MD5 hash of a TCPv4 segment. (RFC2385) 1992 * 1993 * Parameters: 1994 * m pointer to head of mbuf chain 1995 * off0 offset to TCP header within the mbuf chain 1996 * len length of TCP segment data, excluding options 1997 * optlen length of TCP segment options 1998 * buf pointer to storage for computed MD5 digest 1999 * direction direction of flow (IPSEC_DIR_INBOUND or OUTBOUND) 2000 * 2001 * We do this over ip, tcphdr, segment data, and the key in the SADB. 2002 * When called from tcp_input(), we can be sure that th_sum has been 2003 * zeroed out and verified already. 2004 * 2005 * This function is for IPv4 use only. Calling this function with an 2006 * IPv6 packet in the mbuf chain will yield undefined results. 2007 * 2008 * Return 0 if successful, otherwise return -1. 2009 * 2010 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a 2011 * search with the destination IP address, and a 'magic SPI' to be 2012 * determined by the application. This is hardcoded elsewhere to 1179 2013 * right now. Another branch of this code exists which uses the SPD to 2014 * specify per-application flows but it is unstable. 2015 */ 2016 int 2017 tcp_signature_compute(struct mbuf *m, int off0, int len, int optlen, 2018 u_char *buf, u_int direction) 2019 { 2020 union sockaddr_union dst; 2021 struct ippseudo ippseudo; 2022 MD5_CTX ctx; 2023 int doff; 2024 struct ip *ip; 2025 struct ipovly *ipovly; 2026 struct secasvar *sav; 2027 struct tcphdr *th; 2028 u_short savecsum; 2029 2030 KASSERT(m != NULL, ("NULL mbuf chain")); 2031 KASSERT(buf != NULL, ("NULL signature pointer")); 2032 2033 /* Extract the destination from the IP header in the mbuf. */ 2034 ip = mtod(m, struct ip *); 2035 bzero(&dst, sizeof(union sockaddr_union)); 2036 dst.sa.sa_len = sizeof(struct sockaddr_in); 2037 dst.sa.sa_family = AF_INET; 2038 dst.sin.sin_addr = (direction == IPSEC_DIR_INBOUND) ? 2039 ip->ip_src : ip->ip_dst; 2040 2041 /* Look up an SADB entry which matches the address of the peer. */ 2042 sav = KEY_ALLOCSA(&dst, IPPROTO_TCP, htonl(TCP_SIG_SPI)); 2043 if (sav == NULL) { 2044 printf("%s: SADB lookup failed for %s\n", __func__, 2045 inet_ntoa(dst.sin.sin_addr)); 2046 return (EINVAL); 2047 } 2048 2049 MD5Init(&ctx); 2050 ipovly = (struct ipovly *)ip; 2051 th = (struct tcphdr *)((u_char *)ip + off0); 2052 doff = off0 + sizeof(struct tcphdr) + optlen; 2053 2054 /* 2055 * Step 1: Update MD5 hash with IP pseudo-header. 2056 * 2057 * XXX The ippseudo header MUST be digested in network byte order, 2058 * or else we'll fail the regression test. Assume all fields we've 2059 * been doing arithmetic on have been in host byte order. 2060 * XXX One cannot depend on ipovly->ih_len here. When called from 2061 * tcp_output(), the underlying ip_len member has not yet been set. 2062 */ 2063 ippseudo.ippseudo_src = ipovly->ih_src; 2064 ippseudo.ippseudo_dst = ipovly->ih_dst; 2065 ippseudo.ippseudo_pad = 0; 2066 ippseudo.ippseudo_p = IPPROTO_TCP; 2067 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen); 2068 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo)); 2069 2070 /* 2071 * Step 2: Update MD5 hash with TCP header, excluding options. 2072 * The TCP checksum must be set to zero. 2073 */ 2074 savecsum = th->th_sum; 2075 th->th_sum = 0; 2076 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr)); 2077 th->th_sum = savecsum; 2078 2079 /* 2080 * Step 3: Update MD5 hash with TCP segment data. 2081 * Use m_apply() to avoid an early m_pullup(). 2082 */ 2083 if (len > 0) 2084 m_apply(m, doff, len, tcp_signature_apply, &ctx); 2085 2086 /* 2087 * Step 4: Update MD5 hash with shared secret. 2088 */ 2089 MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth)); 2090 MD5Final(buf, &ctx); 2091 2092 key_sa_recordxfer(sav, m); 2093 KEY_FREESAV(&sav); 2094 return (0); 2095 } 2096 #endif /* TCP_SIGNATURE */ 2097