1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_ipsec.h" 36 #include "opt_kern_tls.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/arb.h> 41 #include <sys/domain.h> 42 #ifdef TCP_HHOOK 43 #include <sys/hhook.h> 44 #endif 45 #include <sys/kernel.h> 46 #ifdef KERN_TLS 47 #include <sys/ktls.h> 48 #endif 49 #include <sys/lock.h> 50 #include <sys/mbuf.h> 51 #include <sys/mutex.h> 52 #include <sys/protosw.h> 53 #include <sys/qmath.h> 54 #include <sys/sdt.h> 55 #include <sys/socket.h> 56 #include <sys/socketvar.h> 57 #include <sys/sysctl.h> 58 #include <sys/stats.h> 59 60 #include <net/if.h> 61 #include <net/route.h> 62 #include <net/route/nhop.h> 63 #include <net/vnet.h> 64 65 #include <netinet/in.h> 66 #include <netinet/in_kdtrace.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/ip.h> 69 #include <netinet/in_pcb.h> 70 #include <netinet/ip_var.h> 71 #include <netinet/ip_options.h> 72 #ifdef INET6 73 #include <netinet6/in6_pcb.h> 74 #include <netinet/ip6.h> 75 #include <netinet6/ip6_var.h> 76 #endif 77 #include <netinet/tcp.h> 78 #define TCPOUTFLAGS 79 #include <netinet/tcp_fsm.h> 80 #include <netinet/tcp_seq.h> 81 #include <netinet/tcp_var.h> 82 #include <netinet/tcp_log_buf.h> 83 #include <netinet/tcp_syncache.h> 84 #include <netinet/tcp_timer.h> 85 #include <netinet/tcpip.h> 86 #include <netinet/cc/cc.h> 87 #include <netinet/tcp_fastopen.h> 88 #ifdef TCPPCAP 89 #include <netinet/tcp_pcap.h> 90 #endif 91 #ifdef TCP_OFFLOAD 92 #include <netinet/tcp_offload.h> 93 #endif 94 #include <netinet/tcp_ecn.h> 95 96 #include <netipsec/ipsec_support.h> 97 98 #include <netinet/udp.h> 99 #include <netinet/udp_var.h> 100 #include <machine/in_cksum.h> 101 102 #include <security/mac/mac_framework.h> 103 104 VNET_DEFINE(int, path_mtu_discovery) = 1; 105 SYSCTL_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_VNET | CTLFLAG_RW, 106 &VNET_NAME(path_mtu_discovery), 1, 107 "Enable Path MTU Discovery"); 108 109 VNET_DEFINE(int, tcp_do_tso) = 1; 110 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_VNET | CTLFLAG_RW, 111 &VNET_NAME(tcp_do_tso), 0, 112 "Enable TCP Segmentation Offload"); 113 114 VNET_DEFINE(int, tcp_sendspace) = 1024*32; 115 #define V_tcp_sendspace VNET(tcp_sendspace) 116 SYSCTL_INT(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLFLAG_VNET | CTLFLAG_RW, 117 &VNET_NAME(tcp_sendspace), 0, "Initial send socket buffer size"); 118 119 VNET_DEFINE(int, tcp_do_autosndbuf) = 1; 120 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto, CTLFLAG_VNET | CTLFLAG_RW, 121 &VNET_NAME(tcp_do_autosndbuf), 0, 122 "Enable automatic send buffer sizing"); 123 124 VNET_DEFINE(int, tcp_autosndbuf_inc) = 8*1024; 125 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_VNET | CTLFLAG_RW, 126 &VNET_NAME(tcp_autosndbuf_inc), 0, 127 "Incrementor step size of automatic send buffer"); 128 129 VNET_DEFINE(int, tcp_autosndbuf_max) = 2*1024*1024; 130 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_VNET | CTLFLAG_RW, 131 &VNET_NAME(tcp_autosndbuf_max), 0, 132 "Max size of automatic send buffer"); 133 134 VNET_DEFINE(int, tcp_sendbuf_auto_lowat) = 0; 135 #define V_tcp_sendbuf_auto_lowat VNET(tcp_sendbuf_auto_lowat) 136 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto_lowat, CTLFLAG_VNET | CTLFLAG_RW, 137 &VNET_NAME(tcp_sendbuf_auto_lowat), 0, 138 "Modify threshold for auto send buffer growth to account for SO_SNDLOWAT"); 139 140 /* 141 * Make sure that either retransmit or persist timer is set for SYN, FIN and 142 * non-ACK. 143 */ 144 #define TCP_XMIT_TIMER_ASSERT(tp, len, th_flags) \ 145 KASSERT(((len) == 0 && ((th_flags) & (TH_SYN | TH_FIN)) == 0) ||\ 146 tcp_timer_active((tp), TT_REXMT) || \ 147 tcp_timer_active((tp), TT_PERSIST), \ 148 ("neither rexmt nor persist timer is set")) 149 150 #ifdef TCP_HHOOK 151 /* 152 * Wrapper for the TCP established output helper hook. 153 */ 154 void 155 hhook_run_tcp_est_out(struct tcpcb *tp, struct tcphdr *th, 156 struct tcpopt *to, uint32_t len, int tso) 157 { 158 struct tcp_hhook_data hhook_data; 159 160 if (V_tcp_hhh[HHOOK_TCP_EST_OUT]->hhh_nhooks > 0) { 161 hhook_data.tp = tp; 162 hhook_data.th = th; 163 hhook_data.to = to; 164 hhook_data.len = len; 165 hhook_data.tso = tso; 166 167 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_OUT], &hhook_data, 168 &tp->t_osd); 169 } 170 } 171 #endif 172 173 /* 174 * CC wrapper hook functions 175 */ 176 void 177 cc_after_idle(struct tcpcb *tp) 178 { 179 INP_WLOCK_ASSERT(tptoinpcb(tp)); 180 181 if (CC_ALGO(tp)->after_idle != NULL) 182 CC_ALGO(tp)->after_idle(&tp->t_ccv); 183 } 184 185 /* 186 * Tcp output routine: figure out what should be sent and send it. 187 */ 188 int 189 tcp_default_output(struct tcpcb *tp) 190 { 191 struct socket *so = tptosocket(tp); 192 struct inpcb *inp = tptoinpcb(tp); 193 int32_t len; 194 uint32_t recwin, sendwin; 195 uint16_t flags; 196 int off, error = 0; /* Keep compiler happy */ 197 u_int if_hw_tsomaxsegcount = 0; 198 u_int if_hw_tsomaxsegsize = 0; 199 struct mbuf *m; 200 struct ip *ip = NULL; 201 struct tcphdr *th; 202 u_char opt[TCP_MAXOLEN]; 203 unsigned ipoptlen, optlen, hdrlen, ulen; 204 unsigned ipsec_optlen = 0; 205 int idle, sendalot, curticks; 206 int sack_rxmit, sack_bytes_rxmt; 207 struct sackhole *p; 208 int tso, mtu; 209 struct tcpopt to; 210 struct udphdr *udp = NULL; 211 struct tcp_log_buffer *lgb; 212 unsigned int wanted_cookie = 0; 213 unsigned int dont_sendalot = 0; 214 #ifdef INET6 215 struct ip6_hdr *ip6 = NULL; 216 const bool isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 217 #endif 218 #ifdef KERN_TLS 219 const bool hw_tls = tp->t_nic_ktls_xmit != 0; 220 #else 221 const bool hw_tls = false; 222 #endif 223 224 NET_EPOCH_ASSERT(); 225 INP_WLOCK_ASSERT(inp); 226 227 #ifdef TCP_OFFLOAD 228 if (tp->t_flags & TF_TOE) 229 return (tcp_offload_output(tp)); 230 #endif 231 232 /* 233 * For TFO connections in SYN_SENT or SYN_RECEIVED, 234 * only allow the initial SYN or SYN|ACK and those sent 235 * by the retransmit timer. 236 */ 237 if ((tp->t_flags & TF_FASTOPEN) && 238 ((tp->t_state == TCPS_SYN_SENT) || 239 (tp->t_state == TCPS_SYN_RECEIVED)) && 240 SEQ_GT(tp->snd_max, tp->snd_una) && /* SYN or SYN|ACK sent */ 241 (tp->snd_nxt != tp->snd_una)) /* not a retransmit */ 242 return (0); 243 244 /* 245 * Determine length of data that should be transmitted, 246 * and flags that will be used. 247 * If there is some data or critical controls (SYN, RST) 248 * to send, then transmit; otherwise, investigate further. 249 */ 250 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 251 if (idle && (((ticks - tp->t_rcvtime) >= tp->t_rxtcur) || 252 (tp->t_sndtime && ((ticks - tp->t_sndtime) >= tp->t_rxtcur)))) 253 cc_after_idle(tp); 254 tp->t_flags &= ~TF_LASTIDLE; 255 if (idle) { 256 if (tp->t_flags & TF_MORETOCOME) { 257 tp->t_flags |= TF_LASTIDLE; 258 idle = 0; 259 } 260 } 261 again: 262 /* 263 * If we've recently taken a timeout, snd_max will be greater than 264 * snd_nxt. There may be SACK information that allows us to avoid 265 * resending already delivered data. Adjust snd_nxt accordingly. 266 */ 267 if ((tp->t_flags & TF_SACK_PERMIT) && 268 SEQ_LT(tp->snd_nxt, tp->snd_max)) 269 tcp_sack_adjust(tp); 270 sendalot = 0; 271 tso = 0; 272 mtu = 0; 273 off = tp->snd_nxt - tp->snd_una; 274 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 275 276 flags = tcp_outflags[tp->t_state]; 277 /* 278 * Send any SACK-generated retransmissions. If we're explicitly trying 279 * to send out new data (when sendalot is 1), bypass this function. 280 * If we retransmit in fast recovery mode, decrement snd_cwnd, since 281 * we're replacing a (future) new transmission with a retransmission 282 * now, and we previously incremented snd_cwnd in tcp_input(). 283 */ 284 /* 285 * Still in sack recovery , reset rxmit flag to zero. 286 */ 287 sack_rxmit = 0; 288 sack_bytes_rxmt = 0; 289 len = 0; 290 p = NULL; 291 if ((tp->t_flags & TF_SACK_PERMIT) && IN_FASTRECOVERY(tp->t_flags) && 292 (p = tcp_sack_output(tp, &sack_bytes_rxmt))) { 293 uint32_t cwin; 294 295 cwin = 296 imax(min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt, 0); 297 /* Do not retransmit SACK segments beyond snd_recover */ 298 if (SEQ_GT(p->end, tp->snd_recover)) { 299 /* 300 * (At least) part of sack hole extends beyond 301 * snd_recover. Check to see if we can rexmit data 302 * for this hole. 303 */ 304 if (SEQ_GEQ(p->rxmit, tp->snd_recover)) { 305 /* 306 * Can't rexmit any more data for this hole. 307 * That data will be rexmitted in the next 308 * sack recovery episode, when snd_recover 309 * moves past p->rxmit. 310 */ 311 p = NULL; 312 goto after_sack_rexmit; 313 } else { 314 /* Can rexmit part of the current hole */ 315 len = ((int32_t)ulmin(cwin, 316 SEQ_SUB(tp->snd_recover, p->rxmit))); 317 } 318 } else { 319 len = ((int32_t)ulmin(cwin, 320 SEQ_SUB(p->end, p->rxmit))); 321 } 322 if (len > 0) { 323 off = SEQ_SUB(p->rxmit, tp->snd_una); 324 KASSERT(off >= 0,("%s: sack block to the left of una : %d", 325 __func__, off)); 326 sack_rxmit = 1; 327 sendalot = 1; 328 } 329 } 330 after_sack_rexmit: 331 /* 332 * Get standard flags, and add SYN or FIN if requested by 'hidden' 333 * state flags. 334 */ 335 if (tp->t_flags & TF_NEEDFIN) 336 flags |= TH_FIN; 337 if (tp->t_flags & TF_NEEDSYN) 338 flags |= TH_SYN; 339 340 SOCKBUF_LOCK(&so->so_snd); 341 /* 342 * If in persist timeout with window of 0, send 1 byte. 343 * Otherwise, if window is small but nonzero 344 * and timer expired, we will send what we can 345 * and go to transmit state. 346 */ 347 if (tp->t_flags & TF_FORCEDATA) { 348 if (sendwin == 0) { 349 /* 350 * If we still have some data to send, then 351 * clear the FIN bit. Usually this would 352 * happen below when it realizes that we 353 * aren't sending all the data. However, 354 * if we have exactly 1 byte of unsent data, 355 * then it won't clear the FIN bit below, 356 * and if we are in persist state, we wind 357 * up sending the packet without recording 358 * that we sent the FIN bit. 359 * 360 * We can't just blindly clear the FIN bit, 361 * because if we don't have any more data 362 * to send then the probe will be the FIN 363 * itself. 364 */ 365 if (off < sbused(&so->so_snd)) 366 flags &= ~TH_FIN; 367 sendwin = 1; 368 } else { 369 tcp_timer_activate(tp, TT_PERSIST, 0); 370 tp->t_rxtshift = 0; 371 } 372 } 373 374 /* 375 * If snd_nxt == snd_max and we have transmitted a FIN, the 376 * offset will be > 0 even if so_snd.sb_cc is 0, resulting in 377 * a negative length. This can also occur when TCP opens up 378 * its congestion window while receiving additional duplicate 379 * acks after fast-retransmit because TCP will reset snd_nxt 380 * to snd_max after the fast-retransmit. 381 * 382 * In the normal retransmit-FIN-only case, however, snd_nxt will 383 * be set to snd_una, the offset will be 0, and the length may 384 * wind up 0. 385 * 386 * If sack_rxmit is true we are retransmitting from the scoreboard 387 * in which case len is already set. 388 */ 389 if (sack_rxmit == 0) { 390 if (sack_bytes_rxmt == 0) { 391 len = ((int32_t)min(sbavail(&so->so_snd), sendwin) - 392 off); 393 } else { 394 int32_t cwin; 395 396 /* 397 * We are inside of a SACK recovery episode and are 398 * sending new data, having retransmitted all the 399 * data possible in the scoreboard. 400 */ 401 len = ((int32_t)min(sbavail(&so->so_snd), tp->snd_wnd) - 402 off); 403 /* 404 * Don't remove this (len > 0) check ! 405 * We explicitly check for len > 0 here (although it 406 * isn't really necessary), to work around a gcc 407 * optimization issue - to force gcc to compute 408 * len above. Without this check, the computation 409 * of len is bungled by the optimizer. 410 */ 411 if (len > 0) { 412 cwin = tp->snd_cwnd - imax(0, (int32_t) 413 (tp->snd_nxt - tp->snd_recover)) - 414 sack_bytes_rxmt; 415 if (cwin < 0) 416 cwin = 0; 417 len = imin(len, cwin); 418 } 419 } 420 } 421 422 /* 423 * Lop off SYN bit if it has already been sent. However, if this 424 * is SYN-SENT state and if segment contains data and if we don't 425 * know that foreign host supports TAO, suppress sending segment. 426 */ 427 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) { 428 if (tp->t_state != TCPS_SYN_RECEIVED) 429 flags &= ~TH_SYN; 430 /* 431 * When sending additional segments following a TFO SYN|ACK, 432 * do not include the SYN bit. 433 */ 434 if ((tp->t_flags & TF_FASTOPEN) && 435 (tp->t_state == TCPS_SYN_RECEIVED)) 436 flags &= ~TH_SYN; 437 off--, len++; 438 } 439 440 /* 441 * Be careful not to send data and/or FIN on SYN segments. 442 * This measure is needed to prevent interoperability problems 443 * with not fully conformant TCP implementations. 444 */ 445 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 446 len = 0; 447 flags &= ~TH_FIN; 448 } 449 450 /* 451 * On TFO sockets, ensure no data is sent in the following cases: 452 * 453 * - When retransmitting SYN|ACK on a passively-created socket 454 * 455 * - When retransmitting SYN on an actively created socket 456 * 457 * - When sending a zero-length cookie (cookie request) on an 458 * actively created socket 459 * 460 * - When the socket is in the CLOSED state (RST is being sent) 461 */ 462 if ((tp->t_flags & TF_FASTOPEN) && 463 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 464 ((tp->t_state == TCPS_SYN_SENT) && 465 (tp->t_tfo_client_cookie_len == 0)) || 466 (flags & TH_RST))) 467 len = 0; 468 469 /* Without fast-open there should never be data sent on a SYN. */ 470 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) { 471 len = 0; 472 } 473 474 if (len <= 0) { 475 /* 476 * If FIN has been sent but not acked, 477 * but we haven't been called to retransmit, 478 * len will be < 0. Otherwise, window shrank 479 * after we sent into it. If window shrank to 0, 480 * cancel pending retransmit, pull snd_nxt back 481 * to (closed) window, and set the persist timer 482 * if it isn't already going. If the window didn't 483 * close completely, just wait for an ACK. 484 * 485 * We also do a general check here to ensure that 486 * we will set the persist timer when we have data 487 * to send, but a 0-byte window. This makes sure 488 * the persist timer is set even if the packet 489 * hits one of the "goto send" lines below. 490 */ 491 len = 0; 492 if ((sendwin == 0) && (TCPS_HAVEESTABLISHED(tp->t_state)) && 493 (off < (int) sbavail(&so->so_snd)) && 494 !tcp_timer_active(tp, TT_PERSIST)) { 495 tcp_timer_activate(tp, TT_REXMT, 0); 496 tp->t_rxtshift = 0; 497 tp->snd_nxt = tp->snd_una; 498 if (!tcp_timer_active(tp, TT_PERSIST)) 499 tcp_setpersist(tp); 500 } 501 } 502 503 /* len will be >= 0 after this point. */ 504 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 505 506 tcp_sndbuf_autoscale(tp, so, sendwin); 507 508 /* 509 * Decide if we can use TCP Segmentation Offloading (if supported by 510 * hardware). 511 * 512 * TSO may only be used if we are in a pure bulk sending state. The 513 * presence of TCP-MD5, SACK retransmits, SACK advertizements and 514 * IP options prevent using TSO. With TSO the TCP header is the same 515 * (except for the sequence number) for all generated packets. This 516 * makes it impossible to transmit any options which vary per generated 517 * segment or packet. 518 * 519 * IPv4 handling has a clear separation of ip options and ip header 520 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 521 * the right thing below to provide length of just ip options and thus 522 * checking for ipoptlen is enough to decide if ip options are present. 523 */ 524 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 525 /* 526 * Pre-calculate here as we save another lookup into the darknesses 527 * of IPsec that way and can actually decide if TSO is ok. 528 */ 529 #ifdef INET6 530 if (isipv6 && IPSEC_ENABLED(ipv6)) 531 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 532 #ifdef INET 533 else 534 #endif 535 #endif /* INET6 */ 536 #ifdef INET 537 if (IPSEC_ENABLED(ipv4)) 538 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 539 #endif /* INET */ 540 #endif /* IPSEC */ 541 #ifdef INET6 542 if (isipv6) 543 ipoptlen = ip6_optlen(inp); 544 else 545 #endif 546 if (inp->inp_options) 547 ipoptlen = inp->inp_options->m_len - 548 offsetof(struct ipoption, ipopt_list); 549 else 550 ipoptlen = 0; 551 ipoptlen += ipsec_optlen; 552 553 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > tp->t_maxseg && 554 (tp->t_port == 0) && 555 ((tp->t_flags & TF_SIGNATURE) == 0) && 556 tp->rcv_numsacks == 0 && ((sack_rxmit == 0) || V_tcp_sack_tso) && 557 (ipoptlen == 0 || (ipoptlen == ipsec_optlen && 558 (tp->t_flags2 & TF2_IPSEC_TSO) != 0)) && 559 !(flags & TH_SYN)) 560 tso = 1; 561 562 if (SEQ_LT((sack_rxmit ? p->rxmit : tp->snd_nxt) + len, 563 tp->snd_una + sbused(&so->so_snd))) { 564 flags &= ~TH_FIN; 565 } 566 567 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 568 (long)TCP_MAXWIN << tp->rcv_scale); 569 570 /* 571 * Sender silly window avoidance. We transmit under the following 572 * conditions when len is non-zero: 573 * 574 * - We have a full segment (or more with TSO) 575 * - This is the last buffer in a write()/send() and we are 576 * either idle or running NODELAY 577 * - we've timed out (e.g. persist timer) 578 * - we have more then 1/2 the maximum send window's worth of 579 * data (receiver may be limited the window size) 580 * - we need to retransmit 581 */ 582 if (len) { 583 if (len >= tp->t_maxseg) 584 goto send; 585 /* 586 * As the TCP header options are now 587 * considered when setting up the initial 588 * window, we would not send the last segment 589 * if we skip considering the option length here. 590 * Note: this may not work when tcp headers change 591 * very dynamically in the future. 592 */ 593 if ((((tp->t_flags & TF_SIGNATURE) ? 594 PADTCPOLEN(TCPOLEN_SIGNATURE) : 0) + 595 ((tp->t_flags & TF_RCVD_TSTMP) ? 596 PADTCPOLEN(TCPOLEN_TIMESTAMP) : 0) + 597 len) >= tp->t_maxseg) 598 goto send; 599 /* 600 * NOTE! on localhost connections an 'ack' from the remote 601 * end may occur synchronously with the output and cause 602 * us to flush a buffer queued with moretocome. XXX 603 * 604 * note: the len + off check is almost certainly unnecessary. 605 */ 606 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 607 (idle || (tp->t_flags & TF_NODELAY)) && 608 (uint32_t)len + (uint32_t)off >= sbavail(&so->so_snd) && 609 (tp->t_flags & TF_NOPUSH) == 0) { 610 goto send; 611 } 612 if (tp->t_flags & TF_FORCEDATA) /* typ. timeout case */ 613 goto send; 614 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) 615 goto send; 616 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) /* retransmit case */ 617 goto send; 618 if (sack_rxmit) 619 goto send; 620 } 621 622 /* 623 * Sending of standalone window updates. 624 * 625 * Window updates are important when we close our window due to a 626 * full socket buffer and are opening it again after the application 627 * reads data from it. Once the window has opened again and the 628 * remote end starts to send again the ACK clock takes over and 629 * provides the most current window information. 630 * 631 * We must avoid the silly window syndrome whereas every read 632 * from the receive buffer, no matter how small, causes a window 633 * update to be sent. We also should avoid sending a flurry of 634 * window updates when the socket buffer had queued a lot of data 635 * and the application is doing small reads. 636 * 637 * Prevent a flurry of pointless window updates by only sending 638 * an update when we can increase the advertized window by more 639 * than 1/4th of the socket buffer capacity. When the buffer is 640 * getting full or is very small be more aggressive and send an 641 * update whenever we can increase by two mss sized segments. 642 * In all other situations the ACK's to new incoming data will 643 * carry further window increases. 644 * 645 * Don't send an independent window update if a delayed 646 * ACK is pending (it will get piggy-backed on it) or the 647 * remote side already has done a half-close and won't send 648 * more data. 649 */ 650 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 651 !(tp->t_flags & TF_DELACK) && 652 !TCPS_HAVERCVDFIN(tp->t_state)) { 653 /* 654 * "adv" is the amount we could increase the window, 655 * taking into account that we are limited by 656 * TCP_MAXWIN << tp->rcv_scale. 657 */ 658 int32_t adv; 659 int oldwin; 660 661 adv = recwin; 662 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 663 oldwin = (tp->rcv_adv - tp->rcv_nxt); 664 if (adv > oldwin) 665 adv -= oldwin; 666 else 667 adv = 0; 668 } else 669 oldwin = 0; 670 671 /* 672 * If the new window size ends up being the same as or less 673 * than the old size when it is scaled, then don't force 674 * a window update. 675 */ 676 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 677 goto dontupdate; 678 679 if (adv >= (int32_t)(2 * tp->t_maxseg) && 680 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 681 recwin <= (so->so_rcv.sb_hiwat / 8) || 682 so->so_rcv.sb_hiwat <= 8 * tp->t_maxseg || 683 adv >= TCP_MAXWIN << tp->rcv_scale)) 684 goto send; 685 if (2 * adv >= (int32_t)so->so_rcv.sb_hiwat) 686 goto send; 687 } 688 dontupdate: 689 690 /* 691 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 692 * is also a catch-all for the retransmit timer timeout case. 693 */ 694 if (tp->t_flags & TF_ACKNOW) 695 goto send; 696 if ((flags & TH_RST) || 697 ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) 698 goto send; 699 if (SEQ_GT(tp->snd_up, tp->snd_una)) 700 goto send; 701 /* 702 * If our state indicates that FIN should be sent 703 * and we have not yet done so, then we need to send. 704 */ 705 if (flags & TH_FIN && 706 ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una)) 707 goto send; 708 /* 709 * In SACK, it is possible for tcp_output to fail to send a segment 710 * after the retransmission timer has been turned off. Make sure 711 * that the retransmission timer is set. 712 */ 713 if ((tp->t_flags & TF_SACK_PERMIT) && 714 SEQ_GT(tp->snd_max, tp->snd_una) && 715 !tcp_timer_active(tp, TT_REXMT) && 716 !tcp_timer_active(tp, TT_PERSIST)) { 717 tcp_timer_activate(tp, TT_REXMT, TP_RXTCUR(tp)); 718 goto just_return; 719 } 720 /* 721 * TCP window updates are not reliable, rather a polling protocol 722 * using ``persist'' packets is used to insure receipt of window 723 * updates. The three ``states'' for the output side are: 724 * idle not doing retransmits or persists 725 * persisting to move a small or zero window 726 * (re)transmitting and thereby not persisting 727 * 728 * tcp_timer_active(tp, TT_PERSIST) 729 * is true when we are in persist state. 730 * (tp->t_flags & TF_FORCEDATA) 731 * is set when we are called to send a persist packet. 732 * tcp_timer_active(tp, TT_REXMT) 733 * is set when we are retransmitting 734 * The output side is idle when both timers are zero. 735 * 736 * If send window is too small, there is data to transmit, and no 737 * retransmit or persist is pending, then go to persist state. 738 * If nothing happens soon, send when timer expires: 739 * if window is nonzero, transmit what we can, 740 * otherwise force out a byte. 741 */ 742 if (sbavail(&so->so_snd) && !tcp_timer_active(tp, TT_REXMT) && 743 !tcp_timer_active(tp, TT_PERSIST)) { 744 tp->t_rxtshift = 0; 745 tcp_setpersist(tp); 746 } 747 748 /* 749 * No reason to send a segment, just return. 750 */ 751 just_return: 752 SOCKBUF_UNLOCK(&so->so_snd); 753 return (0); 754 755 send: 756 SOCKBUF_LOCK_ASSERT(&so->so_snd); 757 if (len > 0) { 758 if (len >= tp->t_maxseg) 759 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 760 else 761 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 762 } 763 /* 764 * Before ESTABLISHED, force sending of initial options 765 * unless TCP set not to do any options. 766 * NOTE: we assume that the IP/TCP header plus TCP options 767 * always fit in a single mbuf, leaving room for a maximum 768 * link header, i.e. 769 * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES 770 */ 771 optlen = 0; 772 #ifdef INET6 773 if (isipv6) 774 hdrlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr); 775 else 776 #endif 777 hdrlen = sizeof (struct tcpiphdr); 778 779 if (flags & TH_SYN) { 780 tp->snd_nxt = tp->iss; 781 } 782 783 /* 784 * Compute options for segment. 785 * We only have to care about SYN and established connection 786 * segments. Options for SYN-ACK segments are handled in TCP 787 * syncache. 788 */ 789 to.to_flags = 0; 790 if ((tp->t_flags & TF_NOOPT) == 0) { 791 /* Maximum segment size. */ 792 if (flags & TH_SYN) { 793 to.to_mss = tcp_mssopt(&inp->inp_inc); 794 if (tp->t_port) 795 to.to_mss -= V_tcp_udp_tunneling_overhead; 796 to.to_flags |= TOF_MSS; 797 798 /* 799 * On SYN or SYN|ACK transmits on TFO connections, 800 * only include the TFO option if it is not a 801 * retransmit, as the presence of the TFO option may 802 * have caused the original SYN or SYN|ACK to have 803 * been dropped by a middlebox. 804 */ 805 if ((tp->t_flags & TF_FASTOPEN) && 806 (tp->t_rxtshift == 0)) { 807 if (tp->t_state == TCPS_SYN_RECEIVED) { 808 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 809 to.to_tfo_cookie = 810 (u_int8_t *)&tp->t_tfo_cookie.server; 811 to.to_flags |= TOF_FASTOPEN; 812 wanted_cookie = 1; 813 } else if (tp->t_state == TCPS_SYN_SENT) { 814 to.to_tfo_len = 815 tp->t_tfo_client_cookie_len; 816 to.to_tfo_cookie = 817 tp->t_tfo_cookie.client; 818 to.to_flags |= TOF_FASTOPEN; 819 wanted_cookie = 1; 820 /* 821 * If we wind up having more data to 822 * send with the SYN than can fit in 823 * one segment, don't send any more 824 * until the SYN|ACK comes back from 825 * the other end. 826 */ 827 dont_sendalot = 1; 828 } 829 } 830 } 831 /* Window scaling. */ 832 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 833 to.to_wscale = tp->request_r_scale; 834 to.to_flags |= TOF_SCALE; 835 } 836 /* Timestamps. */ 837 if ((tp->t_flags & TF_RCVD_TSTMP) || 838 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 839 curticks = tcp_ts_getticks(); 840 to.to_tsval = curticks + tp->ts_offset; 841 to.to_tsecr = tp->ts_recent; 842 to.to_flags |= TOF_TS; 843 if (tp->t_rxtshift == 1) 844 tp->t_badrxtwin = curticks; 845 } 846 847 /* Set receive buffer autosizing timestamp. */ 848 if (tp->rfbuf_ts == 0 && 849 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 850 tp->rfbuf_ts = tcp_ts_getticks(); 851 852 /* Selective ACK's. */ 853 if (tp->t_flags & TF_SACK_PERMIT) { 854 if (flags & TH_SYN) 855 to.to_flags |= TOF_SACKPERM; 856 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 857 tp->rcv_numsacks > 0) { 858 to.to_flags |= TOF_SACK; 859 to.to_nsacks = tp->rcv_numsacks; 860 to.to_sacks = (u_char *)tp->sackblks; 861 } 862 } 863 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 864 /* TCP-MD5 (RFC2385). */ 865 /* 866 * Check that TCP_MD5SIG is enabled in tcpcb to 867 * account the size needed to set this TCP option. 868 */ 869 if (tp->t_flags & TF_SIGNATURE) 870 to.to_flags |= TOF_SIGNATURE; 871 #endif /* TCP_SIGNATURE */ 872 873 /* Processing the options. */ 874 hdrlen += optlen = tcp_addoptions(&to, opt); 875 /* 876 * If we wanted a TFO option to be added, but it was unable 877 * to fit, ensure no data is sent. 878 */ 879 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie && 880 !(to.to_flags & TOF_FASTOPEN)) 881 len = 0; 882 } 883 if (tp->t_port) { 884 if (V_tcp_udp_tunneling_port == 0) { 885 /* The port was removed?? */ 886 SOCKBUF_UNLOCK(&so->so_snd); 887 return (EHOSTUNREACH); 888 } 889 hdrlen += sizeof(struct udphdr); 890 } 891 /* 892 * Adjust data length if insertion of options will 893 * bump the packet length beyond the t_maxseg length. 894 * Clear the FIN bit because we cut off the tail of 895 * the segment. 896 */ 897 if (len + optlen + ipoptlen > tp->t_maxseg) { 898 flags &= ~TH_FIN; 899 900 if (tso) { 901 u_int if_hw_tsomax; 902 u_int moff; 903 int max_len; 904 905 /* extract TSO information */ 906 if_hw_tsomax = tp->t_tsomax; 907 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 908 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 909 910 /* 911 * Limit a TSO burst to prevent it from 912 * overflowing or exceeding the maximum length 913 * allowed by the network interface: 914 */ 915 KASSERT(ipoptlen == ipsec_optlen, 916 ("%s: TSO can't do IP options", __func__)); 917 918 /* 919 * Check if we should limit by maximum payload 920 * length: 921 */ 922 if (if_hw_tsomax != 0) { 923 /* compute maximum TSO length */ 924 max_len = if_hw_tsomax - hdrlen - 925 ipsec_optlen - max_linkhdr; 926 if (max_len <= 0) { 927 len = 0; 928 } else if (len > max_len) { 929 sendalot = 1; 930 len = max_len; 931 } 932 } 933 934 /* 935 * Prevent the last segment from being 936 * fractional unless the send sockbuf can be 937 * emptied: 938 */ 939 max_len = tp->t_maxseg - optlen - ipsec_optlen; 940 if (((uint32_t)off + (uint32_t)len) < 941 sbavail(&so->so_snd)) { 942 moff = len % max_len; 943 if (moff != 0) { 944 len -= moff; 945 sendalot = 1; 946 } 947 } 948 949 /* 950 * In case there are too many small fragments 951 * don't use TSO: 952 */ 953 if (len <= max_len) { 954 len = max_len; 955 sendalot = 1; 956 tso = 0; 957 } 958 959 /* 960 * Send the FIN in a separate segment 961 * after the bulk sending is done. 962 * We don't trust the TSO implementations 963 * to clear the FIN flag on all but the 964 * last segment. 965 */ 966 if (tp->t_flags & TF_NEEDFIN) 967 sendalot = 1; 968 } else { 969 if (optlen + ipoptlen >= tp->t_maxseg) { 970 /* 971 * Since we don't have enough space to put 972 * the IP header chain and the TCP header in 973 * one packet as required by RFC 7112, don't 974 * send it. Also ensure that at least one 975 * byte of the payload can be put into the 976 * TCP segment. 977 */ 978 SOCKBUF_UNLOCK(&so->so_snd); 979 error = EMSGSIZE; 980 sack_rxmit = 0; 981 goto out; 982 } 983 len = tp->t_maxseg - optlen - ipoptlen; 984 sendalot = 1; 985 if (dont_sendalot) 986 sendalot = 0; 987 } 988 } else 989 tso = 0; 990 991 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 992 ("%s: len > IP_MAXPACKET", __func__)); 993 994 /*#ifdef DIAGNOSTIC*/ 995 #ifdef INET6 996 if (max_linkhdr + hdrlen > MCLBYTES) 997 #else 998 if (max_linkhdr + hdrlen > MHLEN) 999 #endif 1000 panic("tcphdr too big"); 1001 /*#endif*/ 1002 1003 /* 1004 * This KASSERT is here to catch edge cases at a well defined place. 1005 * Before, those had triggered (random) panic conditions further down. 1006 */ 1007 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 1008 1009 /* 1010 * Grab a header mbuf, attaching a copy of data to 1011 * be transmitted, and initialize the header from 1012 * the template for sends on this connection. 1013 */ 1014 if (len) { 1015 struct mbuf *mb; 1016 struct sockbuf *msb; 1017 u_int moff; 1018 1019 if ((tp->t_flags & TF_FORCEDATA) && len == 1) { 1020 TCPSTAT_INC(tcps_sndprobe); 1021 #ifdef STATS 1022 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 1023 stats_voi_update_abs_u32(tp->t_stats, 1024 VOI_TCP_RETXPB, len); 1025 else 1026 stats_voi_update_abs_u64(tp->t_stats, 1027 VOI_TCP_TXPB, len); 1028 #endif /* STATS */ 1029 } else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 1030 tp->t_sndrexmitpack++; 1031 TCPSTAT_INC(tcps_sndrexmitpack); 1032 TCPSTAT_ADD(tcps_sndrexmitbyte, len); 1033 if (sack_rxmit) { 1034 TCPSTAT_INC(tcps_sack_rexmits); 1035 if (tso) { 1036 TCPSTAT_INC(tcps_sack_rexmits_tso); 1037 } 1038 TCPSTAT_ADD(tcps_sack_rexmit_bytes, len); 1039 } 1040 #ifdef STATS 1041 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 1042 len); 1043 #endif /* STATS */ 1044 } else { 1045 TCPSTAT_INC(tcps_sndpack); 1046 TCPSTAT_ADD(tcps_sndbyte, len); 1047 #ifdef STATS 1048 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 1049 len); 1050 #endif /* STATS */ 1051 } 1052 #ifdef INET6 1053 if (MHLEN < hdrlen + max_linkhdr) 1054 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1055 else 1056 #endif 1057 m = m_gethdr(M_NOWAIT, MT_DATA); 1058 1059 if (m == NULL) { 1060 SOCKBUF_UNLOCK(&so->so_snd); 1061 error = ENOBUFS; 1062 sack_rxmit = 0; 1063 goto out; 1064 } 1065 1066 m->m_data += max_linkhdr; 1067 m->m_len = hdrlen; 1068 1069 /* 1070 * Start the m_copy functions from the closest mbuf 1071 * to the offset in the socket buffer chain. 1072 */ 1073 mb = sbsndptr_noadv(&so->so_snd, off, &moff); 1074 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 1075 m_copydata(mb, moff, len, 1076 mtod(m, caddr_t) + hdrlen); 1077 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 1078 sbsndptr_adv(&so->so_snd, mb, len); 1079 m->m_len += len; 1080 } else { 1081 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 1082 msb = NULL; 1083 else 1084 msb = &so->so_snd; 1085 m->m_next = tcp_m_copym(mb, moff, 1086 &len, if_hw_tsomaxsegcount, 1087 if_hw_tsomaxsegsize, msb, hw_tls); 1088 if (len <= (tp->t_maxseg - optlen)) { 1089 /* 1090 * Must have ran out of mbufs for the copy 1091 * shorten it to no longer need tso. Lets 1092 * not put on sendalot since we are low on 1093 * mbufs. 1094 */ 1095 tso = 0; 1096 } 1097 if (m->m_next == NULL) { 1098 SOCKBUF_UNLOCK(&so->so_snd); 1099 (void) m_free(m); 1100 error = ENOBUFS; 1101 sack_rxmit = 0; 1102 goto out; 1103 } 1104 } 1105 1106 /* 1107 * If we're sending everything we've got, set PUSH. 1108 * (This will keep happy those implementations which only 1109 * give data to the user when a buffer fills or 1110 * a PUSH comes in.) 1111 */ 1112 if (((uint32_t)off + (uint32_t)len == sbused(&so->so_snd)) && 1113 !(flags & TH_SYN)) 1114 flags |= TH_PUSH; 1115 SOCKBUF_UNLOCK(&so->so_snd); 1116 } else { 1117 SOCKBUF_UNLOCK(&so->so_snd); 1118 if (tp->t_flags & TF_ACKNOW) 1119 TCPSTAT_INC(tcps_sndacks); 1120 else if (flags & (TH_SYN|TH_FIN|TH_RST)) 1121 TCPSTAT_INC(tcps_sndctrl); 1122 else if (SEQ_GT(tp->snd_up, tp->snd_una)) 1123 TCPSTAT_INC(tcps_sndurg); 1124 else 1125 TCPSTAT_INC(tcps_sndwinup); 1126 1127 m = m_gethdr(M_NOWAIT, MT_DATA); 1128 if (m == NULL) { 1129 error = ENOBUFS; 1130 sack_rxmit = 0; 1131 goto out; 1132 } 1133 #ifdef INET6 1134 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 1135 MHLEN >= hdrlen) { 1136 M_ALIGN(m, hdrlen); 1137 } else 1138 #endif 1139 m->m_data += max_linkhdr; 1140 m->m_len = hdrlen; 1141 } 1142 SOCKBUF_UNLOCK_ASSERT(&so->so_snd); 1143 m->m_pkthdr.rcvif = (struct ifnet *)0; 1144 #ifdef MAC 1145 mac_inpcb_create_mbuf(inp, m); 1146 #endif 1147 #ifdef INET6 1148 if (isipv6) { 1149 ip6 = mtod(m, struct ip6_hdr *); 1150 if (tp->t_port) { 1151 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 1152 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 1153 udp->uh_dport = tp->t_port; 1154 ulen = hdrlen + len - sizeof(struct ip6_hdr); 1155 udp->uh_ulen = htons(ulen); 1156 th = (struct tcphdr *)(udp + 1); 1157 } else { 1158 th = (struct tcphdr *)(ip6 + 1); 1159 } 1160 tcpip_fillheaders(inp, tp->t_port, ip6, th); 1161 } else 1162 #endif /* INET6 */ 1163 { 1164 ip = mtod(m, struct ip *); 1165 if (tp->t_port) { 1166 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 1167 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 1168 udp->uh_dport = tp->t_port; 1169 ulen = hdrlen + len - sizeof(struct ip); 1170 udp->uh_ulen = htons(ulen); 1171 th = (struct tcphdr *)(udp + 1); 1172 } else 1173 th = (struct tcphdr *)(ip + 1); 1174 tcpip_fillheaders(inp, tp->t_port, ip, th); 1175 } 1176 1177 /* 1178 * Fill in fields, remembering maximum advertised 1179 * window for use in delaying messages about window sizes. 1180 * If resending a FIN, be sure not to use a new sequence number. 1181 */ 1182 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 1183 tp->snd_nxt == tp->snd_max) 1184 tp->snd_nxt--; 1185 /* 1186 * If we are starting a connection, send ECN setup 1187 * SYN packet. If we are on a retransmit, we may 1188 * resend those bits a number of times as per 1189 * RFC 3168. 1190 */ 1191 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 1192 flags |= tcp_ecn_output_syn_sent(tp); 1193 } 1194 /* Also handle parallel SYN for ECN */ 1195 if ((TCPS_HAVERCVDSYN(tp->t_state)) && 1196 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 1197 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 1198 if ((tp->t_state == TCPS_SYN_RECEIVED) && 1199 (tp->t_flags2 & TF2_ECN_SND_ECE)) 1200 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 1201 #ifdef INET6 1202 if (isipv6) { 1203 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << IPV6_FLOWLABEL_LEN); 1204 ip6->ip6_flow |= htonl(ect << IPV6_FLOWLABEL_LEN); 1205 } 1206 else 1207 #endif 1208 { 1209 ip->ip_tos &= ~IPTOS_ECN_MASK; 1210 ip->ip_tos |= ect; 1211 } 1212 } 1213 1214 /* 1215 * If we are doing retransmissions, then snd_nxt will 1216 * not reflect the first unsent octet. For ACK only 1217 * packets, we do not want the sequence number of the 1218 * retransmitted packet, we want the sequence number 1219 * of the next unsent octet. So, if there is no data 1220 * (and no SYN or FIN), use snd_max instead of snd_nxt 1221 * when filling in ti_seq. But if we are in persist 1222 * state, snd_max might reflect one byte beyond the 1223 * right edge of the window, so use snd_nxt in that 1224 * case, since we know we aren't doing a retransmission. 1225 * (retransmit and persist are mutually exclusive...) 1226 */ 1227 if (sack_rxmit == 0) { 1228 if (len || (flags & (TH_SYN|TH_FIN)) || 1229 tcp_timer_active(tp, TT_PERSIST)) 1230 th->th_seq = htonl(tp->snd_nxt); 1231 else 1232 th->th_seq = htonl(tp->snd_max); 1233 } else { 1234 th->th_seq = htonl(p->rxmit); 1235 p->rxmit += len; 1236 /* 1237 * Lost Retransmission Detection 1238 * trigger resending of a (then 1239 * still existing) hole, when 1240 * fack acks recoverypoint. 1241 */ 1242 if ((tp->t_flags & TF_LRD) && SEQ_GEQ(p->rxmit, p->end)) 1243 p->rxmit = tp->snd_recover; 1244 tp->sackhint.sack_bytes_rexmit += len; 1245 } 1246 if (IN_RECOVERY(tp->t_flags)) { 1247 /* 1248 * Account all bytes transmitted while 1249 * IN_RECOVERY, simplifying PRR and 1250 * Lost Retransmit Detection 1251 */ 1252 tp->sackhint.prr_out += len; 1253 } 1254 th->th_ack = htonl(tp->rcv_nxt); 1255 if (optlen) { 1256 bcopy(opt, th + 1, optlen); 1257 th->th_off = (sizeof (struct tcphdr) + optlen) >> 2; 1258 } 1259 tcp_set_flags(th, flags); 1260 /* 1261 * Calculate receive window. Don't shrink window, 1262 * but avoid silly window syndrome. 1263 * If a RST segment is sent, advertise a window of zero. 1264 */ 1265 if (flags & TH_RST) { 1266 recwin = 0; 1267 } else { 1268 if (recwin < (so->so_rcv.sb_hiwat / 4) && 1269 recwin < tp->t_maxseg) 1270 recwin = 0; 1271 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 1272 recwin < (tp->rcv_adv - tp->rcv_nxt)) 1273 recwin = (tp->rcv_adv - tp->rcv_nxt); 1274 } 1275 /* 1276 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1277 * or <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> 1278 * case is handled in syncache. 1279 */ 1280 if (flags & TH_SYN) 1281 th->th_win = htons((u_short) 1282 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 1283 else { 1284 /* Avoid shrinking window with window scaling. */ 1285 recwin = roundup2(recwin, 1 << tp->rcv_scale); 1286 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 1287 } 1288 1289 /* 1290 * Adjust the RXWIN0SENT flag - indicate that we have advertised 1291 * a 0 window. This may cause the remote transmitter to stall. This 1292 * flag tells soreceive() to disable delayed acknowledgements when 1293 * draining the buffer. This can occur if the receiver is attempting 1294 * to read more data than can be buffered prior to transmitting on 1295 * the connection. 1296 */ 1297 if (th->th_win == 0) { 1298 tp->t_sndzerowin++; 1299 tp->t_flags |= TF_RXWIN0SENT; 1300 } else 1301 tp->t_flags &= ~TF_RXWIN0SENT; 1302 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) { 1303 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt)); 1304 th->th_flags |= TH_URG; 1305 } else 1306 /* 1307 * If no urgent pointer to send, then we pull 1308 * the urgent pointer to the left edge of the send window 1309 * so that it doesn't drift into the send window on sequence 1310 * number wraparound. 1311 */ 1312 tp->snd_up = tp->snd_una; /* drag it along */ 1313 1314 /* 1315 * Put TCP length in extended header, and then 1316 * checksum extended header and data. 1317 */ 1318 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 1319 1320 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1321 if (to.to_flags & TOF_SIGNATURE) { 1322 /* 1323 * Calculate MD5 signature and put it into the place 1324 * determined before. 1325 * NOTE: since TCP options buffer doesn't point into 1326 * mbuf's data, calculate offset and use it. 1327 */ 1328 if (!TCPMD5_ENABLED() || (error = TCPMD5_OUTPUT(m, th, 1329 (u_char *)(th + 1) + (to.to_signature - opt))) != 0) { 1330 /* 1331 * Do not send segment if the calculation of MD5 1332 * digest has failed. 1333 */ 1334 m_freem(m); 1335 goto out; 1336 } 1337 } 1338 #endif 1339 #ifdef INET6 1340 if (isipv6) { 1341 /* 1342 * There is no need to fill in ip6_plen right now. 1343 * It will be filled later by ip6_output. 1344 */ 1345 if (tp->t_port) { 1346 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 1347 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 1348 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 1349 th->th_sum = htons(0); 1350 UDPSTAT_INC(udps_opackets); 1351 } else { 1352 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 1353 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1354 th->th_sum = in6_cksum_pseudo(ip6, 1355 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 1356 0); 1357 } 1358 } 1359 #endif 1360 #if defined(INET6) && defined(INET) 1361 else 1362 #endif 1363 #ifdef INET 1364 { 1365 if (tp->t_port) { 1366 m->m_pkthdr.csum_flags = CSUM_UDP; 1367 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 1368 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 1369 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 1370 th->th_sum = htons(0); 1371 UDPSTAT_INC(udps_opackets); 1372 } else { 1373 m->m_pkthdr.csum_flags = CSUM_TCP; 1374 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1375 th->th_sum = in_pseudo(ip->ip_src.s_addr, 1376 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 1377 IPPROTO_TCP + len + optlen)); 1378 } 1379 1380 /* IP version must be set here for ipv4/ipv6 checking later */ 1381 KASSERT(ip->ip_v == IPVERSION, 1382 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 1383 } 1384 #endif 1385 1386 /* 1387 * Enable TSO and specify the size of the segments. 1388 * The TCP pseudo header checksum is always provided. 1389 */ 1390 if (tso) { 1391 KASSERT(len > tp->t_maxseg - optlen - ipsec_optlen, 1392 ("%s: len <= tso_segsz", __func__)); 1393 m->m_pkthdr.csum_flags |= CSUM_TSO; 1394 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen - ipsec_optlen; 1395 } 1396 1397 KASSERT(len + hdrlen == m_length(m, NULL), 1398 ("%s: mbuf chain shorter than expected: %d + %u != %u", 1399 __func__, len, hdrlen, m_length(m, NULL))); 1400 1401 #ifdef TCP_HHOOK 1402 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 1403 hhook_run_tcp_est_out(tp, th, &to, len, tso); 1404 #endif 1405 1406 TCP_PROBE3(debug__output, tp, th, m); 1407 1408 /* We're getting ready to send; log now. */ 1409 /* XXXMT: We are not honoring verbose logging. */ 1410 1411 if (tcp_bblogging_on(tp)) 1412 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, 1413 TCP_LOG_OUT, ERRNO_UNK, len, NULL, false, NULL, NULL, 0, 1414 NULL); 1415 else 1416 lgb = NULL; 1417 1418 /* 1419 * Fill in IP length and desired time to live and 1420 * send to IP level. There should be a better way 1421 * to handle ttl and tos; we could keep them in 1422 * the template, but need a way to checksum without them. 1423 */ 1424 /* 1425 * m->m_pkthdr.len should have been set before checksum calculation, 1426 * because in6_cksum() need it. 1427 */ 1428 #ifdef INET6 1429 if (isipv6) { 1430 /* 1431 * we separately set hoplimit for every segment, since the 1432 * user might want to change the value via setsockopt. 1433 * Also, desired default hop limit might be changed via 1434 * Neighbor Discovery. 1435 */ 1436 ip6->ip6_hlim = in6_selecthlim(inp, NULL); 1437 1438 /* 1439 * Set the packet size here for the benefit of DTrace probes. 1440 * ip6_output() will set it properly; it's supposed to include 1441 * the option header lengths as well. 1442 */ 1443 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 1444 1445 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 1446 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 1447 else 1448 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 1449 1450 if (tp->t_state == TCPS_SYN_SENT) 1451 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 1452 1453 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 1454 1455 #ifdef TCPPCAP 1456 /* Save packet, if requested. */ 1457 tcp_pcap_add(th, m, &(tp->t_outpkts)); 1458 #endif 1459 1460 /* TODO: IPv6 IP6TOS_ECT bit on */ 1461 error = ip6_output(m, inp->in6p_outputopts, &inp->inp_route6, 1462 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 1463 NULL, NULL, inp); 1464 1465 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 1466 mtu = inp->inp_route6.ro_nh->nh_mtu; 1467 } 1468 #endif /* INET6 */ 1469 #if defined(INET) && defined(INET6) 1470 else 1471 #endif 1472 #ifdef INET 1473 { 1474 ip->ip_len = htons(m->m_pkthdr.len); 1475 #ifdef INET6 1476 if (inp->inp_vflag & INP_IPV6PROTO) 1477 ip->ip_ttl = in6_selecthlim(inp, NULL); 1478 #endif /* INET6 */ 1479 /* 1480 * If we do path MTU discovery, then we set DF on every packet. 1481 * This might not be the best thing to do according to RFC3390 1482 * Section 2. However the tcp hostcache migitates the problem 1483 * so it affects only the first tcp connection with a host. 1484 * 1485 * NB: Don't set DF on small MTU/MSS to have a safe fallback. 1486 */ 1487 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 1488 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 1489 if (tp->t_port == 0 || len < V_tcp_minmss) { 1490 ip->ip_off |= htons(IP_DF); 1491 } 1492 } else { 1493 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 1494 } 1495 1496 if (tp->t_state == TCPS_SYN_SENT) 1497 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 1498 1499 TCP_PROBE5(send, NULL, tp, ip, tp, th); 1500 1501 #ifdef TCPPCAP 1502 /* Save packet, if requested. */ 1503 tcp_pcap_add(th, m, &(tp->t_outpkts)); 1504 #endif 1505 1506 error = ip_output(m, inp->inp_options, &inp->inp_route, 1507 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0, inp); 1508 1509 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 1510 mtu = inp->inp_route.ro_nh->nh_mtu; 1511 } 1512 #endif /* INET */ 1513 1514 if (lgb != NULL) { 1515 lgb->tlb_errno = error; 1516 lgb = NULL; 1517 } 1518 out: 1519 if (error == 0) 1520 tcp_account_for_send(tp, len, (tp->snd_nxt != tp->snd_max), 0, hw_tls); 1521 /* 1522 * In transmit state, time the transmission and arrange for 1523 * the retransmit. In persist state, just set snd_max. In a closed 1524 * state just return. 1525 */ 1526 if (flags & TH_RST) { 1527 TCPSTAT_INC(tcps_sndtotal); 1528 return (0); 1529 } else if ((tp->t_flags & TF_FORCEDATA) == 0 || 1530 !tcp_timer_active(tp, TT_PERSIST)) { 1531 tcp_seq startseq = tp->snd_nxt; 1532 1533 /* 1534 * Advance snd_nxt over sequence space of this segment. 1535 */ 1536 if (flags & (TH_SYN|TH_FIN)) { 1537 if (flags & TH_SYN) 1538 tp->snd_nxt++; 1539 if (flags & TH_FIN) { 1540 tp->snd_nxt++; 1541 tp->t_flags |= TF_SENTFIN; 1542 } 1543 } 1544 if (sack_rxmit) 1545 goto timer; 1546 tp->snd_nxt += len; 1547 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 1548 /* 1549 * Update "made progress" indication if we just 1550 * added new data to an empty socket buffer. 1551 */ 1552 if (tp->snd_una == tp->snd_max) 1553 tp->t_acktime = ticks; 1554 tp->snd_max = tp->snd_nxt; 1555 /* 1556 * Time this transmission if not a retransmission and 1557 * not currently timing anything. 1558 */ 1559 tp->t_sndtime = ticks; 1560 if (tp->t_rtttime == 0) { 1561 tp->t_rtttime = ticks; 1562 tp->t_rtseq = startseq; 1563 TCPSTAT_INC(tcps_segstimed); 1564 } 1565 #ifdef STATS 1566 if (!(tp->t_flags & TF_GPUTINPROG) && len) { 1567 tp->t_flags |= TF_GPUTINPROG; 1568 tp->gput_seq = startseq; 1569 tp->gput_ack = startseq + 1570 ulmin(sbavail(&so->so_snd) - off, sendwin); 1571 tp->gput_ts = tcp_ts_getticks(); 1572 } 1573 #endif /* STATS */ 1574 } 1575 1576 /* 1577 * Set retransmit timer if not currently set, 1578 * and not doing a pure ack or a keep-alive probe. 1579 * Initial value for retransmit timer is smoothed 1580 * round-trip time + 2 * round-trip time variance. 1581 * Initialize shift counter which is used for backoff 1582 * of retransmit time. 1583 */ 1584 timer: 1585 if (!tcp_timer_active(tp, TT_REXMT) && 1586 ((sack_rxmit && tp->snd_nxt != tp->snd_max) || 1587 (tp->snd_nxt != tp->snd_una))) { 1588 if (tcp_timer_active(tp, TT_PERSIST)) { 1589 tcp_timer_activate(tp, TT_PERSIST, 0); 1590 tp->t_rxtshift = 0; 1591 } 1592 tcp_timer_activate(tp, TT_REXMT, TP_RXTCUR(tp)); 1593 } else if (len == 0 && sbavail(&so->so_snd) && 1594 !tcp_timer_active(tp, TT_REXMT) && 1595 !tcp_timer_active(tp, TT_PERSIST)) { 1596 /* 1597 * Avoid a situation where we do not set persist timer 1598 * after a zero window condition. For example: 1599 * 1) A -> B: packet with enough data to fill the window 1600 * 2) B -> A: ACK for #1 + new data (0 window 1601 * advertisement) 1602 * 3) A -> B: ACK for #2, 0 len packet 1603 * 1604 * In this case, A will not activate the persist timer, 1605 * because it chose to send a packet. Unless tcp_output 1606 * is called for some other reason (delayed ack timer, 1607 * another input packet from B, socket syscall), A will 1608 * not send zero window probes. 1609 * 1610 * So, if you send a 0-length packet, but there is data 1611 * in the socket buffer, and neither the rexmt or 1612 * persist timer is already set, then activate the 1613 * persist timer. 1614 */ 1615 tp->t_rxtshift = 0; 1616 tcp_setpersist(tp); 1617 } 1618 } else { 1619 /* 1620 * Persist case, update snd_max but since we are in 1621 * persist mode (no window) we do not update snd_nxt. 1622 */ 1623 int xlen = len; 1624 if (flags & TH_SYN) 1625 ++xlen; 1626 if (flags & TH_FIN) { 1627 ++xlen; 1628 tp->t_flags |= TF_SENTFIN; 1629 } 1630 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) 1631 tp->snd_max = tp->snd_nxt + xlen; 1632 } 1633 if ((error == 0) && 1634 (TCPS_HAVEESTABLISHED(tp->t_state) && 1635 (tp->t_flags & TF_SACK_PERMIT) && 1636 tp->rcv_numsacks > 0)) { 1637 /* Clean up any DSACK's sent */ 1638 tcp_clean_dsack_blocks(tp); 1639 } 1640 if (error) { 1641 /* 1642 * We know that the packet was lost, so back out the 1643 * sequence number advance, if any. 1644 * 1645 * If the error is EPERM the packet got blocked by the 1646 * local firewall. Normally we should terminate the 1647 * connection but the blocking may have been spurious 1648 * due to a firewall reconfiguration cycle. So we treat 1649 * it like a packet loss and let the retransmit timer and 1650 * timeouts do their work over time. 1651 * XXX: It is a POLA question whether calling tcp_drop right 1652 * away would be the really correct behavior instead. 1653 */ 1654 if (((tp->t_flags & TF_FORCEDATA) == 0 || 1655 !tcp_timer_active(tp, TT_PERSIST)) && 1656 ((flags & TH_SYN) == 0) && 1657 (error != EPERM)) { 1658 if (sack_rxmit) { 1659 p->rxmit = SEQ_MIN(p->end, p->rxmit) - len; 1660 tp->sackhint.sack_bytes_rexmit -= len; 1661 KASSERT(tp->sackhint.sack_bytes_rexmit >= 0, 1662 ("sackhint bytes rtx >= 0")); 1663 KASSERT((flags & TH_FIN) == 0, 1664 ("error while FIN with SACK rxmit")); 1665 } else { 1666 tp->snd_nxt -= len; 1667 if (flags & TH_FIN) 1668 tp->snd_nxt--; 1669 } 1670 if (IN_RECOVERY(tp->t_flags)) 1671 tp->sackhint.prr_out -= len; 1672 } 1673 SOCKBUF_UNLOCK_ASSERT(&so->so_snd); /* Check gotos. */ 1674 switch (error) { 1675 case EACCES: 1676 case EPERM: 1677 tp->t_softerror = error; 1678 return (error); 1679 case ENOBUFS: 1680 TCP_XMIT_TIMER_ASSERT(tp, len, flags); 1681 tp->snd_cwnd = tp->t_maxseg; 1682 return (0); 1683 case EMSGSIZE: 1684 /* 1685 * For some reason the interface we used initially 1686 * to send segments changed to another or lowered 1687 * its MTU. 1688 * If TSO was active we either got an interface 1689 * without TSO capabilits or TSO was turned off. 1690 * If we obtained mtu from ip_output() then update 1691 * it and try again. 1692 */ 1693 if (tso) 1694 tp->t_flags &= ~TF_TSO; 1695 if (mtu != 0) { 1696 tcp_mss_update(tp, -1, mtu, NULL, NULL); 1697 goto again; 1698 } 1699 return (error); 1700 case EHOSTDOWN: 1701 case EHOSTUNREACH: 1702 case ENETDOWN: 1703 case ENETUNREACH: 1704 if (TCPS_HAVERCVDSYN(tp->t_state)) { 1705 tp->t_softerror = error; 1706 return (0); 1707 } 1708 /* FALLTHROUGH */ 1709 default: 1710 return (error); 1711 } 1712 } 1713 TCPSTAT_INC(tcps_sndtotal); 1714 1715 /* 1716 * Data sent (as far as we can tell). 1717 * If this advertises a larger window than any other segment, 1718 * then remember the size of the advertised window. 1719 * Any pending ACK has now been sent. 1720 */ 1721 if (SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 1722 tp->rcv_adv = tp->rcv_nxt + recwin; 1723 tp->last_ack_sent = tp->rcv_nxt; 1724 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 1725 if (tcp_timer_active(tp, TT_DELACK)) 1726 tcp_timer_activate(tp, TT_DELACK, 0); 1727 if (sendalot) 1728 goto again; 1729 return (0); 1730 } 1731 1732 void 1733 tcp_setpersist(struct tcpcb *tp) 1734 { 1735 int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1; 1736 int tt; 1737 int maxunacktime; 1738 1739 tp->t_flags &= ~TF_PREVVALID; 1740 if (tcp_timer_active(tp, TT_REXMT)) 1741 panic("tcp_setpersist: retransmit pending"); 1742 /* 1743 * If the state is already closed, don't bother. 1744 */ 1745 if (tp->t_state == TCPS_CLOSED) 1746 return; 1747 1748 /* 1749 * Start/restart persistence timer. 1750 */ 1751 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 1752 tcp_persmin, tcp_persmax); 1753 if (TP_MAXUNACKTIME(tp) && tp->t_acktime) { 1754 maxunacktime = tp->t_acktime + TP_MAXUNACKTIME(tp) - ticks; 1755 if (maxunacktime < 1) 1756 maxunacktime = 1; 1757 if (maxunacktime < tt) 1758 tt = maxunacktime; 1759 } 1760 tcp_timer_activate(tp, TT_PERSIST, tt); 1761 if (tp->t_rxtshift < V_tcp_retries) 1762 tp->t_rxtshift++; 1763 } 1764 1765 /* 1766 * Insert TCP options according to the supplied parameters to the place 1767 * optp in a consistent way. Can handle unaligned destinations. 1768 * 1769 * The order of the option processing is crucial for optimal packing and 1770 * alignment for the scarce option space. 1771 * 1772 * The optimal order for a SYN/SYN-ACK segment is: 1773 * MSS (4) + NOP (1) + Window scale (3) + SACK permitted (2) + 1774 * Timestamp (10) + Signature (18) = 38 bytes out of a maximum of 40. 1775 * 1776 * The SACK options should be last. SACK blocks consume 8*n+2 bytes. 1777 * So a full size SACK blocks option is 34 bytes (with 4 SACK blocks). 1778 * At minimum we need 10 bytes (to generate 1 SACK block). If both 1779 * TCP Timestamps (12 bytes) and TCP Signatures (18 bytes) are present, 1780 * we only have 10 bytes for SACK options (40 - (12 + 18)). 1781 */ 1782 int 1783 tcp_addoptions(struct tcpopt *to, u_char *optp) 1784 { 1785 u_int32_t mask, optlen = 0; 1786 1787 for (mask = 1; mask < TOF_MAXOPT; mask <<= 1) { 1788 if ((to->to_flags & mask) != mask) 1789 continue; 1790 if (optlen == TCP_MAXOLEN) 1791 break; 1792 switch (to->to_flags & mask) { 1793 case TOF_MSS: 1794 while (optlen % 4) { 1795 optlen += TCPOLEN_NOP; 1796 *optp++ = TCPOPT_NOP; 1797 } 1798 if (TCP_MAXOLEN - optlen < TCPOLEN_MAXSEG) 1799 continue; 1800 optlen += TCPOLEN_MAXSEG; 1801 *optp++ = TCPOPT_MAXSEG; 1802 *optp++ = TCPOLEN_MAXSEG; 1803 to->to_mss = htons(to->to_mss); 1804 bcopy((u_char *)&to->to_mss, optp, sizeof(to->to_mss)); 1805 optp += sizeof(to->to_mss); 1806 break; 1807 case TOF_SCALE: 1808 while (!optlen || optlen % 2 != 1) { 1809 optlen += TCPOLEN_NOP; 1810 *optp++ = TCPOPT_NOP; 1811 } 1812 if (TCP_MAXOLEN - optlen < TCPOLEN_WINDOW) 1813 continue; 1814 optlen += TCPOLEN_WINDOW; 1815 *optp++ = TCPOPT_WINDOW; 1816 *optp++ = TCPOLEN_WINDOW; 1817 *optp++ = to->to_wscale; 1818 break; 1819 case TOF_SACKPERM: 1820 while (optlen % 2) { 1821 optlen += TCPOLEN_NOP; 1822 *optp++ = TCPOPT_NOP; 1823 } 1824 if (TCP_MAXOLEN - optlen < TCPOLEN_SACK_PERMITTED) 1825 continue; 1826 optlen += TCPOLEN_SACK_PERMITTED; 1827 *optp++ = TCPOPT_SACK_PERMITTED; 1828 *optp++ = TCPOLEN_SACK_PERMITTED; 1829 break; 1830 case TOF_TS: 1831 while (!optlen || optlen % 4 != 2) { 1832 optlen += TCPOLEN_NOP; 1833 *optp++ = TCPOPT_NOP; 1834 } 1835 if (TCP_MAXOLEN - optlen < TCPOLEN_TIMESTAMP) 1836 continue; 1837 optlen += TCPOLEN_TIMESTAMP; 1838 *optp++ = TCPOPT_TIMESTAMP; 1839 *optp++ = TCPOLEN_TIMESTAMP; 1840 to->to_tsval = htonl(to->to_tsval); 1841 to->to_tsecr = htonl(to->to_tsecr); 1842 bcopy((u_char *)&to->to_tsval, optp, sizeof(to->to_tsval)); 1843 optp += sizeof(to->to_tsval); 1844 bcopy((u_char *)&to->to_tsecr, optp, sizeof(to->to_tsecr)); 1845 optp += sizeof(to->to_tsecr); 1846 break; 1847 case TOF_SIGNATURE: 1848 { 1849 int siglen = TCPOLEN_SIGNATURE - 2; 1850 1851 while (!optlen || optlen % 4 != 2) { 1852 optlen += TCPOLEN_NOP; 1853 *optp++ = TCPOPT_NOP; 1854 } 1855 if (TCP_MAXOLEN - optlen < TCPOLEN_SIGNATURE) { 1856 to->to_flags &= ~TOF_SIGNATURE; 1857 continue; 1858 } 1859 optlen += TCPOLEN_SIGNATURE; 1860 *optp++ = TCPOPT_SIGNATURE; 1861 *optp++ = TCPOLEN_SIGNATURE; 1862 to->to_signature = optp; 1863 while (siglen--) 1864 *optp++ = 0; 1865 break; 1866 } 1867 case TOF_SACK: 1868 { 1869 int sackblks = 0; 1870 struct sackblk *sack = (struct sackblk *)to->to_sacks; 1871 tcp_seq sack_seq; 1872 1873 while (!optlen || optlen % 4 != 2) { 1874 optlen += TCPOLEN_NOP; 1875 *optp++ = TCPOPT_NOP; 1876 } 1877 if (TCP_MAXOLEN - optlen < TCPOLEN_SACKHDR + TCPOLEN_SACK) 1878 continue; 1879 optlen += TCPOLEN_SACKHDR; 1880 *optp++ = TCPOPT_SACK; 1881 sackblks = min(to->to_nsacks, 1882 (TCP_MAXOLEN - optlen) / TCPOLEN_SACK); 1883 *optp++ = TCPOLEN_SACKHDR + sackblks * TCPOLEN_SACK; 1884 while (sackblks--) { 1885 sack_seq = htonl(sack->start); 1886 bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq)); 1887 optp += sizeof(sack_seq); 1888 sack_seq = htonl(sack->end); 1889 bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq)); 1890 optp += sizeof(sack_seq); 1891 optlen += TCPOLEN_SACK; 1892 sack++; 1893 } 1894 TCPSTAT_INC(tcps_sack_send_blocks); 1895 break; 1896 } 1897 case TOF_FASTOPEN: 1898 { 1899 int total_len; 1900 1901 /* XXX is there any point to aligning this option? */ 1902 total_len = TCPOLEN_FAST_OPEN_EMPTY + to->to_tfo_len; 1903 if (TCP_MAXOLEN - optlen < total_len) { 1904 to->to_flags &= ~TOF_FASTOPEN; 1905 continue; 1906 } 1907 *optp++ = TCPOPT_FAST_OPEN; 1908 *optp++ = total_len; 1909 if (to->to_tfo_len > 0) { 1910 bcopy(to->to_tfo_cookie, optp, to->to_tfo_len); 1911 optp += to->to_tfo_len; 1912 } 1913 optlen += total_len; 1914 break; 1915 } 1916 default: 1917 panic("%s: unknown TCP option type", __func__); 1918 break; 1919 } 1920 } 1921 1922 /* Terminate and pad TCP options to a 4 byte boundary. */ 1923 if (optlen % 4) { 1924 optlen += TCPOLEN_EOL; 1925 *optp++ = TCPOPT_EOL; 1926 } 1927 /* 1928 * According to RFC 793 (STD0007): 1929 * "The content of the header beyond the End-of-Option option 1930 * must be header padding (i.e., zero)." 1931 * and later: "The padding is composed of zeros." 1932 */ 1933 while (optlen % 4) { 1934 optlen += TCPOLEN_PAD; 1935 *optp++ = TCPOPT_PAD; 1936 } 1937 1938 KASSERT(optlen <= TCP_MAXOLEN, ("%s: TCP options too long", __func__)); 1939 return (optlen); 1940 } 1941 1942 /* 1943 * This is a copy of m_copym(), taking the TSO segment size/limit 1944 * constraints into account, and advancing the sndptr as it goes. 1945 */ 1946 struct mbuf * 1947 tcp_m_copym(struct mbuf *m, int32_t off0, int32_t *plen, 1948 int32_t seglimit, int32_t segsize, struct sockbuf *sb, bool hw_tls) 1949 { 1950 #ifdef KERN_TLS 1951 struct ktls_session *tls, *ntls; 1952 struct mbuf *start __diagused; 1953 #endif 1954 struct mbuf *n, **np; 1955 struct mbuf *top; 1956 int32_t off = off0; 1957 int32_t len = *plen; 1958 int32_t fragsize; 1959 int32_t len_cp = 0; 1960 int32_t *pkthdrlen; 1961 uint32_t mlen, frags; 1962 bool copyhdr; 1963 1964 KASSERT(off >= 0, ("tcp_m_copym, negative off %d", off)); 1965 KASSERT(len >= 0, ("tcp_m_copym, negative len %d", len)); 1966 if (off == 0 && m->m_flags & M_PKTHDR) 1967 copyhdr = true; 1968 else 1969 copyhdr = false; 1970 while (off > 0) { 1971 KASSERT(m != NULL, ("tcp_m_copym, offset > size of mbuf chain")); 1972 if (off < m->m_len) 1973 break; 1974 off -= m->m_len; 1975 if ((sb) && (m == sb->sb_sndptr)) { 1976 sb->sb_sndptroff += m->m_len; 1977 sb->sb_sndptr = m->m_next; 1978 } 1979 m = m->m_next; 1980 } 1981 np = ⊤ 1982 top = NULL; 1983 pkthdrlen = NULL; 1984 #ifdef KERN_TLS 1985 if (hw_tls && (m->m_flags & M_EXTPG)) 1986 tls = m->m_epg_tls; 1987 else 1988 tls = NULL; 1989 start = m; 1990 #endif 1991 while (len > 0) { 1992 if (m == NULL) { 1993 KASSERT(len == M_COPYALL, 1994 ("tcp_m_copym, length > size of mbuf chain")); 1995 *plen = len_cp; 1996 if (pkthdrlen != NULL) 1997 *pkthdrlen = len_cp; 1998 break; 1999 } 2000 #ifdef KERN_TLS 2001 if (hw_tls) { 2002 if (m->m_flags & M_EXTPG) 2003 ntls = m->m_epg_tls; 2004 else 2005 ntls = NULL; 2006 2007 /* 2008 * Avoid mixing TLS records with handshake 2009 * data or TLS records from different 2010 * sessions. 2011 */ 2012 if (tls != ntls) { 2013 MPASS(m != start); 2014 *plen = len_cp; 2015 if (pkthdrlen != NULL) 2016 *pkthdrlen = len_cp; 2017 break; 2018 } 2019 } 2020 #endif 2021 mlen = min(len, m->m_len - off); 2022 if (seglimit) { 2023 /* 2024 * For M_EXTPG mbufs, add 3 segments 2025 * + 1 in case we are crossing page boundaries 2026 * + 2 in case the TLS hdr/trailer are used 2027 * It is cheaper to just add the segments 2028 * than it is to take the cache miss to look 2029 * at the mbuf ext_pgs state in detail. 2030 */ 2031 if (m->m_flags & M_EXTPG) { 2032 fragsize = min(segsize, PAGE_SIZE); 2033 frags = 3; 2034 } else { 2035 fragsize = segsize; 2036 frags = 0; 2037 } 2038 2039 /* Break if we really can't fit anymore. */ 2040 if ((frags + 1) >= seglimit) { 2041 *plen = len_cp; 2042 if (pkthdrlen != NULL) 2043 *pkthdrlen = len_cp; 2044 break; 2045 } 2046 2047 /* 2048 * Reduce size if you can't copy the whole 2049 * mbuf. If we can't copy the whole mbuf, also 2050 * adjust len so the loop will end after this 2051 * mbuf. 2052 */ 2053 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 2054 mlen = (seglimit - frags - 1) * fragsize; 2055 len = mlen; 2056 *plen = len_cp + len; 2057 if (pkthdrlen != NULL) 2058 *pkthdrlen = *plen; 2059 } 2060 frags += howmany(mlen, fragsize); 2061 if (frags == 0) 2062 frags++; 2063 seglimit -= frags; 2064 KASSERT(seglimit > 0, 2065 ("%s: seglimit went too low", __func__)); 2066 } 2067 if (copyhdr) 2068 n = m_gethdr(M_NOWAIT, m->m_type); 2069 else 2070 n = m_get(M_NOWAIT, m->m_type); 2071 *np = n; 2072 if (n == NULL) 2073 goto nospace; 2074 if (copyhdr) { 2075 if (!m_dup_pkthdr(n, m, M_NOWAIT)) 2076 goto nospace; 2077 if (len == M_COPYALL) 2078 n->m_pkthdr.len -= off0; 2079 else 2080 n->m_pkthdr.len = len; 2081 pkthdrlen = &n->m_pkthdr.len; 2082 copyhdr = false; 2083 } 2084 n->m_len = mlen; 2085 len_cp += n->m_len; 2086 if (m->m_flags & (M_EXT | M_EXTPG)) { 2087 n->m_data = m->m_data + off; 2088 mb_dupcl(n, m); 2089 } else 2090 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 2091 (u_int)n->m_len); 2092 2093 if (sb && (sb->sb_sndptr == m) && 2094 ((n->m_len + off) >= m->m_len) && m->m_next) { 2095 sb->sb_sndptroff += m->m_len; 2096 sb->sb_sndptr = m->m_next; 2097 } 2098 off = 0; 2099 if (len != M_COPYALL) { 2100 len -= n->m_len; 2101 } 2102 m = m->m_next; 2103 np = &n->m_next; 2104 } 2105 return (top); 2106 nospace: 2107 m_freem(top); 2108 return (NULL); 2109 } 2110 2111 void 2112 tcp_sndbuf_autoscale(struct tcpcb *tp, struct socket *so, uint32_t sendwin) 2113 { 2114 2115 /* 2116 * Automatic sizing of send socket buffer. Often the send buffer 2117 * size is not optimally adjusted to the actual network conditions 2118 * at hand (delay bandwidth product). Setting the buffer size too 2119 * small limits throughput on links with high bandwidth and high 2120 * delay (eg. trans-continental/oceanic links). Setting the 2121 * buffer size too big consumes too much real kernel memory, 2122 * especially with many connections on busy servers. 2123 * 2124 * The criteria to step up the send buffer one notch are: 2125 * 1. receive window of remote host is larger than send buffer 2126 * (with a fudge factor of 5/4th); 2127 * 2. send buffer is filled to 7/8th with data (so we actually 2128 * have data to make use of it); 2129 * 3. send buffer fill has not hit maximal automatic size; 2130 * 4. our send window (slow start and cogestion controlled) is 2131 * larger than sent but unacknowledged data in send buffer. 2132 * 2133 * The remote host receive window scaling factor may limit the 2134 * growing of the send buffer before it reaches its allowed 2135 * maximum. 2136 * 2137 * It scales directly with slow start or congestion window 2138 * and does at most one step per received ACK. This fast 2139 * scaling has the drawback of growing the send buffer beyond 2140 * what is strictly necessary to make full use of a given 2141 * delay*bandwidth product. However testing has shown this not 2142 * to be much of an problem. At worst we are trading wasting 2143 * of available bandwidth (the non-use of it) for wasting some 2144 * socket buffer memory. 2145 * 2146 * TODO: Shrink send buffer during idle periods together 2147 * with congestion window. Requires another timer. Has to 2148 * wait for upcoming tcp timer rewrite. 2149 * 2150 * XXXGL: should there be used sbused() or sbavail()? 2151 */ 2152 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 2153 int lowat; 2154 2155 lowat = V_tcp_sendbuf_auto_lowat ? so->so_snd.sb_lowat : 0; 2156 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat - lowat && 2157 sbused(&so->so_snd) >= 2158 (so->so_snd.sb_hiwat / 8 * 7) - lowat && 2159 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 2160 sendwin >= (sbused(&so->so_snd) - 2161 (tp->snd_nxt - tp->snd_una))) { 2162 if (!sbreserve_locked(so, SO_SND, 2163 min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc, 2164 V_tcp_autosndbuf_max), curthread)) 2165 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 2166 } 2167 } 2168 } 2169