1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_ipsec.h" 40 #include "opt_tcpdebug.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/domain.h> 45 #ifdef TCP_HHOOK 46 #include <sys/hhook.h> 47 #endif 48 #include <sys/kernel.h> 49 #include <sys/lock.h> 50 #include <sys/mbuf.h> 51 #include <sys/mutex.h> 52 #include <sys/protosw.h> 53 #include <sys/sdt.h> 54 #include <sys/socket.h> 55 #include <sys/socketvar.h> 56 #include <sys/sysctl.h> 57 58 #include <net/if.h> 59 #include <net/route.h> 60 #include <net/vnet.h> 61 62 #include <netinet/in.h> 63 #include <netinet/in_kdtrace.h> 64 #include <netinet/in_systm.h> 65 #include <netinet/ip.h> 66 #include <netinet/in_pcb.h> 67 #include <netinet/ip_var.h> 68 #include <netinet/ip_options.h> 69 #ifdef INET6 70 #include <netinet6/in6_pcb.h> 71 #include <netinet/ip6.h> 72 #include <netinet6/ip6_var.h> 73 #endif 74 #include <netinet/tcp.h> 75 #define TCPOUTFLAGS 76 #include <netinet/tcp_fsm.h> 77 #include <netinet/tcp_log_buf.h> 78 #include <netinet/tcp_seq.h> 79 #include <netinet/tcp_timer.h> 80 #include <netinet/tcp_var.h> 81 #include <netinet/tcpip.h> 82 #include <netinet/cc/cc.h> 83 #include <netinet/tcp_fastopen.h> 84 #ifdef TCPPCAP 85 #include <netinet/tcp_pcap.h> 86 #endif 87 #ifdef TCPDEBUG 88 #include <netinet/tcp_debug.h> 89 #endif 90 #ifdef TCP_OFFLOAD 91 #include <netinet/tcp_offload.h> 92 #endif 93 94 #include <netipsec/ipsec_support.h> 95 96 #include <machine/in_cksum.h> 97 98 #include <security/mac/mac_framework.h> 99 100 VNET_DEFINE(int, path_mtu_discovery) = 1; 101 SYSCTL_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_VNET | CTLFLAG_RW, 102 &VNET_NAME(path_mtu_discovery), 1, 103 "Enable Path MTU Discovery"); 104 105 VNET_DEFINE(int, tcp_do_tso) = 1; 106 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_VNET | CTLFLAG_RW, 107 &VNET_NAME(tcp_do_tso), 0, 108 "Enable TCP Segmentation Offload"); 109 110 VNET_DEFINE(int, tcp_sendspace) = 1024*32; 111 #define V_tcp_sendspace VNET(tcp_sendspace) 112 SYSCTL_INT(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLFLAG_VNET | CTLFLAG_RW, 113 &VNET_NAME(tcp_sendspace), 0, "Initial send socket buffer size"); 114 115 VNET_DEFINE(int, tcp_do_autosndbuf) = 1; 116 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto, CTLFLAG_VNET | CTLFLAG_RW, 117 &VNET_NAME(tcp_do_autosndbuf), 0, 118 "Enable automatic send buffer sizing"); 119 120 VNET_DEFINE(int, tcp_autosndbuf_inc) = 8*1024; 121 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_VNET | CTLFLAG_RW, 122 &VNET_NAME(tcp_autosndbuf_inc), 0, 123 "Incrementor step size of automatic send buffer"); 124 125 VNET_DEFINE(int, tcp_autosndbuf_max) = 2*1024*1024; 126 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_VNET | CTLFLAG_RW, 127 &VNET_NAME(tcp_autosndbuf_max), 0, 128 "Max size of automatic send buffer"); 129 130 VNET_DEFINE(int, tcp_sendbuf_auto_lowat) = 0; 131 #define V_tcp_sendbuf_auto_lowat VNET(tcp_sendbuf_auto_lowat) 132 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto_lowat, CTLFLAG_VNET | CTLFLAG_RW, 133 &VNET_NAME(tcp_sendbuf_auto_lowat), 0, 134 "Modify threshold for auto send buffer growth to account for SO_SNDLOWAT"); 135 136 /* 137 * Make sure that either retransmit or persist timer is set for SYN, FIN and 138 * non-ACK. 139 */ 140 #define TCP_XMIT_TIMER_ASSERT(tp, len, th_flags) \ 141 KASSERT(((len) == 0 && ((th_flags) & (TH_SYN | TH_FIN)) == 0) ||\ 142 tcp_timer_active((tp), TT_REXMT) || \ 143 tcp_timer_active((tp), TT_PERSIST), \ 144 ("neither rexmt nor persist timer is set")) 145 146 static void inline cc_after_idle(struct tcpcb *tp); 147 148 #ifdef TCP_HHOOK 149 /* 150 * Wrapper for the TCP established output helper hook. 151 */ 152 void 153 hhook_run_tcp_est_out(struct tcpcb *tp, struct tcphdr *th, 154 struct tcpopt *to, uint32_t len, int tso) 155 { 156 struct tcp_hhook_data hhook_data; 157 158 if (V_tcp_hhh[HHOOK_TCP_EST_OUT]->hhh_nhooks > 0) { 159 hhook_data.tp = tp; 160 hhook_data.th = th; 161 hhook_data.to = to; 162 hhook_data.len = len; 163 hhook_data.tso = tso; 164 165 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_OUT], &hhook_data, 166 tp->osd); 167 } 168 } 169 #endif 170 171 /* 172 * CC wrapper hook functions 173 */ 174 static void inline 175 cc_after_idle(struct tcpcb *tp) 176 { 177 INP_WLOCK_ASSERT(tp->t_inpcb); 178 179 if (CC_ALGO(tp)->after_idle != NULL) 180 CC_ALGO(tp)->after_idle(tp->ccv); 181 } 182 183 /* 184 * Tcp output routine: figure out what should be sent and send it. 185 */ 186 int 187 tcp_output(struct tcpcb *tp) 188 { 189 struct socket *so = tp->t_inpcb->inp_socket; 190 int32_t len; 191 uint32_t recwin, sendwin; 192 int off, flags, error = 0; /* Keep compiler happy */ 193 u_int if_hw_tsomaxsegcount = 0; 194 u_int if_hw_tsomaxsegsize; 195 struct mbuf *m; 196 struct ip *ip = NULL; 197 #ifdef TCPDEBUG 198 struct ipovly *ipov = NULL; 199 #endif 200 struct tcphdr *th; 201 u_char opt[TCP_MAXOLEN]; 202 unsigned ipoptlen, optlen, hdrlen; 203 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 204 unsigned ipsec_optlen = 0; 205 #endif 206 int idle, sendalot, curticks; 207 int sack_rxmit, sack_bytes_rxmt; 208 struct sackhole *p; 209 int tso, mtu; 210 struct tcpopt to; 211 unsigned int wanted_cookie = 0; 212 unsigned int dont_sendalot = 0; 213 #if 0 214 int maxburst = TCP_MAXBURST; 215 #endif 216 #ifdef INET6 217 struct ip6_hdr *ip6 = NULL; 218 int isipv6; 219 220 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 221 #endif 222 223 INP_WLOCK_ASSERT(tp->t_inpcb); 224 225 #ifdef TCP_OFFLOAD 226 if (tp->t_flags & TF_TOE) 227 return (tcp_offload_output(tp)); 228 #endif 229 230 /* 231 * For TFO connections in SYN_SENT or SYN_RECEIVED, 232 * only allow the initial SYN or SYN|ACK and those sent 233 * by the retransmit timer. 234 */ 235 if (IS_FASTOPEN(tp->t_flags) && 236 ((tp->t_state == TCPS_SYN_SENT) || 237 (tp->t_state == TCPS_SYN_RECEIVED)) && 238 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 239 (tp->snd_nxt != tp->snd_una)) /* not a retransmit */ 240 return (0); 241 242 /* 243 * Determine length of data that should be transmitted, 244 * and flags that will be used. 245 * If there is some data or critical controls (SYN, RST) 246 * to send, then transmit; otherwise, investigate further. 247 */ 248 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 249 if (idle && ticks - tp->t_rcvtime >= tp->t_rxtcur) 250 cc_after_idle(tp); 251 tp->t_flags &= ~TF_LASTIDLE; 252 if (idle) { 253 if (tp->t_flags & TF_MORETOCOME) { 254 tp->t_flags |= TF_LASTIDLE; 255 idle = 0; 256 } 257 } 258 again: 259 /* 260 * If we've recently taken a timeout, snd_max will be greater than 261 * snd_nxt. There may be SACK information that allows us to avoid 262 * resending already delivered data. Adjust snd_nxt accordingly. 263 */ 264 if ((tp->t_flags & TF_SACK_PERMIT) && 265 SEQ_LT(tp->snd_nxt, tp->snd_max)) 266 tcp_sack_adjust(tp); 267 sendalot = 0; 268 tso = 0; 269 mtu = 0; 270 off = tp->snd_nxt - tp->snd_una; 271 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 272 273 flags = tcp_outflags[tp->t_state]; 274 /* 275 * Send any SACK-generated retransmissions. If we're explicitly trying 276 * to send out new data (when sendalot is 1), bypass this function. 277 * If we retransmit in fast recovery mode, decrement snd_cwnd, since 278 * we're replacing a (future) new transmission with a retransmission 279 * now, and we previously incremented snd_cwnd in tcp_input(). 280 */ 281 /* 282 * Still in sack recovery , reset rxmit flag to zero. 283 */ 284 sack_rxmit = 0; 285 sack_bytes_rxmt = 0; 286 len = 0; 287 p = NULL; 288 if ((tp->t_flags & TF_SACK_PERMIT) && IN_FASTRECOVERY(tp->t_flags) && 289 (p = tcp_sack_output(tp, &sack_bytes_rxmt))) { 290 uint32_t cwin; 291 292 cwin = 293 imax(min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt, 0); 294 /* Do not retransmit SACK segments beyond snd_recover */ 295 if (SEQ_GT(p->end, tp->snd_recover)) { 296 /* 297 * (At least) part of sack hole extends beyond 298 * snd_recover. Check to see if we can rexmit data 299 * for this hole. 300 */ 301 if (SEQ_GEQ(p->rxmit, tp->snd_recover)) { 302 /* 303 * Can't rexmit any more data for this hole. 304 * That data will be rexmitted in the next 305 * sack recovery episode, when snd_recover 306 * moves past p->rxmit. 307 */ 308 p = NULL; 309 goto after_sack_rexmit; 310 } else 311 /* Can rexmit part of the current hole */ 312 len = ((int32_t)ulmin(cwin, 313 tp->snd_recover - p->rxmit)); 314 } else 315 len = ((int32_t)ulmin(cwin, p->end - p->rxmit)); 316 off = p->rxmit - tp->snd_una; 317 KASSERT(off >= 0,("%s: sack block to the left of una : %d", 318 __func__, off)); 319 if (len > 0) { 320 sack_rxmit = 1; 321 sendalot = 1; 322 TCPSTAT_INC(tcps_sack_rexmits); 323 TCPSTAT_ADD(tcps_sack_rexmit_bytes, 324 min(len, tp->t_maxseg)); 325 } 326 } 327 after_sack_rexmit: 328 /* 329 * Get standard flags, and add SYN or FIN if requested by 'hidden' 330 * state flags. 331 */ 332 if (tp->t_flags & TF_NEEDFIN) 333 flags |= TH_FIN; 334 if (tp->t_flags & TF_NEEDSYN) 335 flags |= TH_SYN; 336 337 SOCKBUF_LOCK(&so->so_snd); 338 /* 339 * If in persist timeout with window of 0, send 1 byte. 340 * Otherwise, if window is small but nonzero 341 * and timer expired, we will send what we can 342 * and go to transmit state. 343 */ 344 if (tp->t_flags & TF_FORCEDATA) { 345 if (sendwin == 0) { 346 /* 347 * If we still have some data to send, then 348 * clear the FIN bit. Usually this would 349 * happen below when it realizes that we 350 * aren't sending all the data. However, 351 * if we have exactly 1 byte of unsent data, 352 * then it won't clear the FIN bit below, 353 * and if we are in persist state, we wind 354 * up sending the packet without recording 355 * that we sent the FIN bit. 356 * 357 * We can't just blindly clear the FIN bit, 358 * because if we don't have any more data 359 * to send then the probe will be the FIN 360 * itself. 361 */ 362 if (off < sbused(&so->so_snd)) 363 flags &= ~TH_FIN; 364 sendwin = 1; 365 } else { 366 tcp_timer_activate(tp, TT_PERSIST, 0); 367 tp->t_rxtshift = 0; 368 } 369 } 370 371 /* 372 * If snd_nxt == snd_max and we have transmitted a FIN, the 373 * offset will be > 0 even if so_snd.sb_cc is 0, resulting in 374 * a negative length. This can also occur when TCP opens up 375 * its congestion window while receiving additional duplicate 376 * acks after fast-retransmit because TCP will reset snd_nxt 377 * to snd_max after the fast-retransmit. 378 * 379 * In the normal retransmit-FIN-only case, however, snd_nxt will 380 * be set to snd_una, the offset will be 0, and the length may 381 * wind up 0. 382 * 383 * If sack_rxmit is true we are retransmitting from the scoreboard 384 * in which case len is already set. 385 */ 386 if (sack_rxmit == 0) { 387 if (sack_bytes_rxmt == 0) 388 len = ((int32_t)min(sbavail(&so->so_snd), sendwin) - 389 off); 390 else { 391 int32_t cwin; 392 393 /* 394 * We are inside of a SACK recovery episode and are 395 * sending new data, having retransmitted all the 396 * data possible in the scoreboard. 397 */ 398 len = ((int32_t)min(sbavail(&so->so_snd), tp->snd_wnd) - 399 off); 400 /* 401 * Don't remove this (len > 0) check ! 402 * We explicitly check for len > 0 here (although it 403 * isn't really necessary), to work around a gcc 404 * optimization issue - to force gcc to compute 405 * len above. Without this check, the computation 406 * of len is bungled by the optimizer. 407 */ 408 if (len > 0) { 409 cwin = tp->snd_cwnd - 410 (tp->snd_nxt - tp->sack_newdata) - 411 sack_bytes_rxmt; 412 if (cwin < 0) 413 cwin = 0; 414 len = imin(len, cwin); 415 } 416 } 417 } 418 419 /* 420 * Lop off SYN bit if it has already been sent. However, if this 421 * is SYN-SENT state and if segment contains data and if we don't 422 * know that foreign host supports TAO, suppress sending segment. 423 */ 424 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) { 425 if (tp->t_state != TCPS_SYN_RECEIVED) 426 flags &= ~TH_SYN; 427 /* 428 * When sending additional segments following a TFO SYN|ACK, 429 * do not include the SYN bit. 430 */ 431 if (IS_FASTOPEN(tp->t_flags) && 432 (tp->t_state == TCPS_SYN_RECEIVED)) 433 flags &= ~TH_SYN; 434 off--, len++; 435 } 436 437 /* 438 * Be careful not to send data and/or FIN on SYN segments. 439 * This measure is needed to prevent interoperability problems 440 * with not fully conformant TCP implementations. 441 */ 442 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 443 len = 0; 444 flags &= ~TH_FIN; 445 } 446 447 /* 448 * On TFO sockets, ensure no data is sent in the following cases: 449 * 450 * - When retransmitting SYN|ACK on a passively-created socket 451 * 452 * - When retransmitting SYN on an actively created socket 453 * 454 * - When sending a zero-length cookie (cookie request) on an 455 * actively created socket 456 * 457 * - When the socket is in the CLOSED state (RST is being sent) 458 */ 459 if (IS_FASTOPEN(tp->t_flags) && 460 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 461 ((tp->t_state == TCPS_SYN_SENT) && 462 (tp->t_tfo_client_cookie_len == 0)) || 463 (flags & TH_RST))) 464 len = 0; 465 if (len <= 0) { 466 /* 467 * If FIN has been sent but not acked, 468 * but we haven't been called to retransmit, 469 * len will be < 0. Otherwise, window shrank 470 * after we sent into it. If window shrank to 0, 471 * cancel pending retransmit, pull snd_nxt back 472 * to (closed) window, and set the persist timer 473 * if it isn't already going. If the window didn't 474 * close completely, just wait for an ACK. 475 * 476 * We also do a general check here to ensure that 477 * we will set the persist timer when we have data 478 * to send, but a 0-byte window. This makes sure 479 * the persist timer is set even if the packet 480 * hits one of the "goto send" lines below. 481 */ 482 len = 0; 483 if ((sendwin == 0) && (TCPS_HAVEESTABLISHED(tp->t_state)) && 484 (off < (int) sbavail(&so->so_snd))) { 485 tcp_timer_activate(tp, TT_REXMT, 0); 486 tp->t_rxtshift = 0; 487 tp->snd_nxt = tp->snd_una; 488 if (!tcp_timer_active(tp, TT_PERSIST)) 489 tcp_setpersist(tp); 490 } 491 } 492 493 /* len will be >= 0 after this point. */ 494 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 495 496 tcp_sndbuf_autoscale(tp, so, sendwin); 497 498 /* 499 * Decide if we can use TCP Segmentation Offloading (if supported by 500 * hardware). 501 * 502 * TSO may only be used if we are in a pure bulk sending state. The 503 * presence of TCP-MD5, SACK retransmits, SACK advertizements and 504 * IP options prevent using TSO. With TSO the TCP header is the same 505 * (except for the sequence number) for all generated packets. This 506 * makes it impossible to transmit any options which vary per generated 507 * segment or packet. 508 * 509 * IPv4 handling has a clear separation of ip options and ip header 510 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 511 * the right thing below to provide length of just ip options and thus 512 * checking for ipoptlen is enough to decide if ip options are present. 513 */ 514 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 515 /* 516 * Pre-calculate here as we save another lookup into the darknesses 517 * of IPsec that way and can actually decide if TSO is ok. 518 */ 519 #ifdef INET6 520 if (isipv6 && IPSEC_ENABLED(ipv6)) 521 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); 522 #ifdef INET 523 else 524 #endif 525 #endif /* INET6 */ 526 #ifdef INET 527 if (IPSEC_ENABLED(ipv4)) 528 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); 529 #endif /* INET */ 530 #endif /* IPSEC */ 531 #ifdef INET6 532 if (isipv6) 533 ipoptlen = ip6_optlen(tp->t_inpcb); 534 else 535 #endif 536 if (tp->t_inpcb->inp_options) 537 ipoptlen = tp->t_inpcb->inp_options->m_len - 538 offsetof(struct ipoption, ipopt_list); 539 else 540 ipoptlen = 0; 541 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 542 ipoptlen += ipsec_optlen; 543 #endif 544 545 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > tp->t_maxseg && 546 ((tp->t_flags & TF_SIGNATURE) == 0) && 547 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 548 ipoptlen == 0 && !(flags & TH_SYN)) 549 tso = 1; 550 551 if (sack_rxmit) { 552 if (SEQ_LT(p->rxmit + len, tp->snd_una + sbused(&so->so_snd))) 553 flags &= ~TH_FIN; 554 } else { 555 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 556 sbused(&so->so_snd))) 557 flags &= ~TH_FIN; 558 } 559 560 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 561 (long)TCP_MAXWIN << tp->rcv_scale); 562 563 /* 564 * Sender silly window avoidance. We transmit under the following 565 * conditions when len is non-zero: 566 * 567 * - We have a full segment (or more with TSO) 568 * - This is the last buffer in a write()/send() and we are 569 * either idle or running NODELAY 570 * - we've timed out (e.g. persist timer) 571 * - we have more then 1/2 the maximum send window's worth of 572 * data (receiver may be limited the window size) 573 * - we need to retransmit 574 */ 575 if (len) { 576 if (len >= tp->t_maxseg) 577 goto send; 578 /* 579 * NOTE! on localhost connections an 'ack' from the remote 580 * end may occur synchronously with the output and cause 581 * us to flush a buffer queued with moretocome. XXX 582 * 583 * note: the len + off check is almost certainly unnecessary. 584 */ 585 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 586 (idle || (tp->t_flags & TF_NODELAY)) && 587 (uint32_t)len + (uint32_t)off >= sbavail(&so->so_snd) && 588 (tp->t_flags & TF_NOPUSH) == 0) { 589 goto send; 590 } 591 if (tp->t_flags & TF_FORCEDATA) /* typ. timeout case */ 592 goto send; 593 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) 594 goto send; 595 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) /* retransmit case */ 596 goto send; 597 if (sack_rxmit) 598 goto send; 599 } 600 601 /* 602 * Sending of standalone window updates. 603 * 604 * Window updates are important when we close our window due to a 605 * full socket buffer and are opening it again after the application 606 * reads data from it. Once the window has opened again and the 607 * remote end starts to send again the ACK clock takes over and 608 * provides the most current window information. 609 * 610 * We must avoid the silly window syndrome whereas every read 611 * from the receive buffer, no matter how small, causes a window 612 * update to be sent. We also should avoid sending a flurry of 613 * window updates when the socket buffer had queued a lot of data 614 * and the application is doing small reads. 615 * 616 * Prevent a flurry of pointless window updates by only sending 617 * an update when we can increase the advertized window by more 618 * than 1/4th of the socket buffer capacity. When the buffer is 619 * getting full or is very small be more aggressive and send an 620 * update whenever we can increase by two mss sized segments. 621 * In all other situations the ACK's to new incoming data will 622 * carry further window increases. 623 * 624 * Don't send an independent window update if a delayed 625 * ACK is pending (it will get piggy-backed on it) or the 626 * remote side already has done a half-close and won't send 627 * more data. Skip this if the connection is in T/TCP 628 * half-open state. 629 */ 630 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 631 !(tp->t_flags & TF_DELACK) && 632 !TCPS_HAVERCVDFIN(tp->t_state)) { 633 /* 634 * "adv" is the amount we could increase the window, 635 * taking into account that we are limited by 636 * TCP_MAXWIN << tp->rcv_scale. 637 */ 638 int32_t adv; 639 int oldwin; 640 641 adv = recwin; 642 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 643 oldwin = (tp->rcv_adv - tp->rcv_nxt); 644 adv -= oldwin; 645 } else 646 oldwin = 0; 647 648 /* 649 * If the new window size ends up being the same as or less 650 * than the old size when it is scaled, then don't force 651 * a window update. 652 */ 653 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 654 goto dontupdate; 655 656 if (adv >= (int32_t)(2 * tp->t_maxseg) && 657 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 658 recwin <= (so->so_rcv.sb_hiwat / 8) || 659 so->so_rcv.sb_hiwat <= 8 * tp->t_maxseg)) 660 goto send; 661 if (2 * adv >= (int32_t)so->so_rcv.sb_hiwat) 662 goto send; 663 } 664 dontupdate: 665 666 /* 667 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 668 * is also a catch-all for the retransmit timer timeout case. 669 */ 670 if (tp->t_flags & TF_ACKNOW) 671 goto send; 672 if ((flags & TH_RST) || 673 ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) 674 goto send; 675 if (SEQ_GT(tp->snd_up, tp->snd_una)) 676 goto send; 677 /* 678 * If our state indicates that FIN should be sent 679 * and we have not yet done so, then we need to send. 680 */ 681 if (flags & TH_FIN && 682 ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una)) 683 goto send; 684 /* 685 * In SACK, it is possible for tcp_output to fail to send a segment 686 * after the retransmission timer has been turned off. Make sure 687 * that the retransmission timer is set. 688 */ 689 if ((tp->t_flags & TF_SACK_PERMIT) && 690 SEQ_GT(tp->snd_max, tp->snd_una) && 691 !tcp_timer_active(tp, TT_REXMT) && 692 !tcp_timer_active(tp, TT_PERSIST)) { 693 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 694 goto just_return; 695 } 696 /* 697 * TCP window updates are not reliable, rather a polling protocol 698 * using ``persist'' packets is used to insure receipt of window 699 * updates. The three ``states'' for the output side are: 700 * idle not doing retransmits or persists 701 * persisting to move a small or zero window 702 * (re)transmitting and thereby not persisting 703 * 704 * tcp_timer_active(tp, TT_PERSIST) 705 * is true when we are in persist state. 706 * (tp->t_flags & TF_FORCEDATA) 707 * is set when we are called to send a persist packet. 708 * tcp_timer_active(tp, TT_REXMT) 709 * is set when we are retransmitting 710 * The output side is idle when both timers are zero. 711 * 712 * If send window is too small, there is data to transmit, and no 713 * retransmit or persist is pending, then go to persist state. 714 * If nothing happens soon, send when timer expires: 715 * if window is nonzero, transmit what we can, 716 * otherwise force out a byte. 717 */ 718 if (sbavail(&so->so_snd) && !tcp_timer_active(tp, TT_REXMT) && 719 !tcp_timer_active(tp, TT_PERSIST)) { 720 tp->t_rxtshift = 0; 721 tcp_setpersist(tp); 722 } 723 724 /* 725 * No reason to send a segment, just return. 726 */ 727 just_return: 728 SOCKBUF_UNLOCK(&so->so_snd); 729 return (0); 730 731 send: 732 SOCKBUF_LOCK_ASSERT(&so->so_snd); 733 if (len > 0) { 734 if (len >= tp->t_maxseg) 735 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 736 else 737 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 738 } 739 /* 740 * Before ESTABLISHED, force sending of initial options 741 * unless TCP set not to do any options. 742 * NOTE: we assume that the IP/TCP header plus TCP options 743 * always fit in a single mbuf, leaving room for a maximum 744 * link header, i.e. 745 * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES 746 */ 747 optlen = 0; 748 #ifdef INET6 749 if (isipv6) 750 hdrlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr); 751 else 752 #endif 753 hdrlen = sizeof (struct tcpiphdr); 754 755 /* 756 * Compute options for segment. 757 * We only have to care about SYN and established connection 758 * segments. Options for SYN-ACK segments are handled in TCP 759 * syncache. 760 */ 761 to.to_flags = 0; 762 if ((tp->t_flags & TF_NOOPT) == 0) { 763 /* Maximum segment size. */ 764 if (flags & TH_SYN) { 765 tp->snd_nxt = tp->iss; 766 to.to_mss = tcp_mssopt(&tp->t_inpcb->inp_inc); 767 to.to_flags |= TOF_MSS; 768 769 /* 770 * On SYN or SYN|ACK transmits on TFO connections, 771 * only include the TFO option if it is not a 772 * retransmit, as the presence of the TFO option may 773 * have caused the original SYN or SYN|ACK to have 774 * been dropped by a middlebox. 775 */ 776 if (IS_FASTOPEN(tp->t_flags) && 777 (tp->t_rxtshift == 0)) { 778 if (tp->t_state == TCPS_SYN_RECEIVED) { 779 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 780 to.to_tfo_cookie = 781 (u_int8_t *)&tp->t_tfo_cookie.server; 782 to.to_flags |= TOF_FASTOPEN; 783 wanted_cookie = 1; 784 } else if (tp->t_state == TCPS_SYN_SENT) { 785 to.to_tfo_len = 786 tp->t_tfo_client_cookie_len; 787 to.to_tfo_cookie = 788 tp->t_tfo_cookie.client; 789 to.to_flags |= TOF_FASTOPEN; 790 wanted_cookie = 1; 791 /* 792 * If we wind up having more data to 793 * send with the SYN than can fit in 794 * one segment, don't send any more 795 * until the SYN|ACK comes back from 796 * the other end. 797 */ 798 dont_sendalot = 1; 799 } 800 } 801 } 802 /* Window scaling. */ 803 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 804 to.to_wscale = tp->request_r_scale; 805 to.to_flags |= TOF_SCALE; 806 } 807 /* Timestamps. */ 808 if ((tp->t_flags & TF_RCVD_TSTMP) || 809 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 810 curticks = tcp_ts_getticks(); 811 to.to_tsval = curticks + tp->ts_offset; 812 to.to_tsecr = tp->ts_recent; 813 to.to_flags |= TOF_TS; 814 if (tp->t_rxtshift == 1) 815 tp->t_badrxtwin = curticks; 816 } 817 818 /* Set receive buffer autosizing timestamp. */ 819 if (tp->rfbuf_ts == 0 && 820 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 821 tp->rfbuf_ts = tcp_ts_getticks(); 822 823 /* Selective ACK's. */ 824 if (tp->t_flags & TF_SACK_PERMIT) { 825 if (flags & TH_SYN) 826 to.to_flags |= TOF_SACKPERM; 827 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 828 (tp->t_flags & TF_SACK_PERMIT) && 829 tp->rcv_numsacks > 0) { 830 to.to_flags |= TOF_SACK; 831 to.to_nsacks = tp->rcv_numsacks; 832 to.to_sacks = (u_char *)tp->sackblks; 833 } 834 } 835 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 836 /* TCP-MD5 (RFC2385). */ 837 /* 838 * Check that TCP_MD5SIG is enabled in tcpcb to 839 * account the size needed to set this TCP option. 840 */ 841 if (tp->t_flags & TF_SIGNATURE) 842 to.to_flags |= TOF_SIGNATURE; 843 #endif /* TCP_SIGNATURE */ 844 845 /* Processing the options. */ 846 hdrlen += optlen = tcp_addoptions(&to, opt); 847 /* 848 * If we wanted a TFO option to be added, but it was unable 849 * to fit, ensure no data is sent. 850 */ 851 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 852 !(to.to_flags & TOF_FASTOPEN)) 853 len = 0; 854 } 855 856 /* 857 * Adjust data length if insertion of options will 858 * bump the packet length beyond the t_maxseg length. 859 * Clear the FIN bit because we cut off the tail of 860 * the segment. 861 */ 862 if (len + optlen + ipoptlen > tp->t_maxseg) { 863 flags &= ~TH_FIN; 864 865 if (tso) { 866 u_int if_hw_tsomax; 867 u_int moff; 868 int max_len; 869 870 /* extract TSO information */ 871 if_hw_tsomax = tp->t_tsomax; 872 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 873 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 874 875 /* 876 * Limit a TSO burst to prevent it from 877 * overflowing or exceeding the maximum length 878 * allowed by the network interface: 879 */ 880 KASSERT(ipoptlen == 0, 881 ("%s: TSO can't do IP options", __func__)); 882 883 /* 884 * Check if we should limit by maximum payload 885 * length: 886 */ 887 if (if_hw_tsomax != 0) { 888 /* compute maximum TSO length */ 889 max_len = (if_hw_tsomax - hdrlen - 890 max_linkhdr); 891 if (max_len <= 0) { 892 len = 0; 893 } else if (len > max_len) { 894 sendalot = 1; 895 len = max_len; 896 } 897 } 898 899 /* 900 * Prevent the last segment from being 901 * fractional unless the send sockbuf can be 902 * emptied: 903 */ 904 max_len = (tp->t_maxseg - optlen); 905 if (((uint32_t)off + (uint32_t)len) < 906 sbavail(&so->so_snd)) { 907 moff = len % max_len; 908 if (moff != 0) { 909 len -= moff; 910 sendalot = 1; 911 } 912 } 913 914 /* 915 * In case there are too many small fragments 916 * don't use TSO: 917 */ 918 if (len <= max_len) { 919 len = max_len; 920 sendalot = 1; 921 tso = 0; 922 } 923 924 /* 925 * Send the FIN in a separate segment 926 * after the bulk sending is done. 927 * We don't trust the TSO implementations 928 * to clear the FIN flag on all but the 929 * last segment. 930 */ 931 if (tp->t_flags & TF_NEEDFIN) 932 sendalot = 1; 933 } else { 934 len = tp->t_maxseg - optlen - ipoptlen; 935 sendalot = 1; 936 if (dont_sendalot) 937 sendalot = 0; 938 } 939 } else 940 tso = 0; 941 942 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 943 ("%s: len > IP_MAXPACKET", __func__)); 944 945 /*#ifdef DIAGNOSTIC*/ 946 #ifdef INET6 947 if (max_linkhdr + hdrlen > MCLBYTES) 948 #else 949 if (max_linkhdr + hdrlen > MHLEN) 950 #endif 951 panic("tcphdr too big"); 952 /*#endif*/ 953 954 /* 955 * This KASSERT is here to catch edge cases at a well defined place. 956 * Before, those had triggered (random) panic conditions further down. 957 */ 958 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 959 960 /* 961 * Grab a header mbuf, attaching a copy of data to 962 * be transmitted, and initialize the header from 963 * the template for sends on this connection. 964 */ 965 if (len) { 966 struct mbuf *mb; 967 struct sockbuf *msb; 968 u_int moff; 969 970 if ((tp->t_flags & TF_FORCEDATA) && len == 1) 971 TCPSTAT_INC(tcps_sndprobe); 972 else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 973 tp->t_sndrexmitpack++; 974 TCPSTAT_INC(tcps_sndrexmitpack); 975 TCPSTAT_ADD(tcps_sndrexmitbyte, len); 976 } else { 977 TCPSTAT_INC(tcps_sndpack); 978 TCPSTAT_ADD(tcps_sndbyte, len); 979 } 980 #ifdef INET6 981 if (MHLEN < hdrlen + max_linkhdr) 982 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 983 else 984 #endif 985 m = m_gethdr(M_NOWAIT, MT_DATA); 986 987 if (m == NULL) { 988 SOCKBUF_UNLOCK(&so->so_snd); 989 error = ENOBUFS; 990 sack_rxmit = 0; 991 goto out; 992 } 993 994 m->m_data += max_linkhdr; 995 m->m_len = hdrlen; 996 997 /* 998 * Start the m_copy functions from the closest mbuf 999 * to the offset in the socket buffer chain. 1000 */ 1001 mb = sbsndptr_noadv(&so->so_snd, off, &moff); 1002 if (len <= MHLEN - hdrlen - max_linkhdr) { 1003 m_copydata(mb, moff, len, 1004 mtod(m, caddr_t) + hdrlen); 1005 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 1006 sbsndptr_adv(&so->so_snd, mb, len); 1007 m->m_len += len; 1008 } else { 1009 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 1010 msb = NULL; 1011 else 1012 msb = &so->so_snd; 1013 m->m_next = tcp_m_copym(mb, moff, 1014 &len, if_hw_tsomaxsegcount, 1015 if_hw_tsomaxsegsize, msb); 1016 if (len <= (tp->t_maxseg - optlen)) { 1017 /* 1018 * Must have ran out of mbufs for the copy 1019 * shorten it to no longer need tso. Lets 1020 * not put on sendalot since we are low on 1021 * mbufs. 1022 */ 1023 tso = 0; 1024 } 1025 if (m->m_next == NULL) { 1026 SOCKBUF_UNLOCK(&so->so_snd); 1027 (void) m_free(m); 1028 error = ENOBUFS; 1029 sack_rxmit = 0; 1030 goto out; 1031 } 1032 } 1033 1034 /* 1035 * If we're sending everything we've got, set PUSH. 1036 * (This will keep happy those implementations which only 1037 * give data to the user when a buffer fills or 1038 * a PUSH comes in.) 1039 */ 1040 if (((uint32_t)off + (uint32_t)len == sbused(&so->so_snd)) && 1041 !(flags & TH_SYN)) 1042 flags |= TH_PUSH; 1043 SOCKBUF_UNLOCK(&so->so_snd); 1044 } else { 1045 SOCKBUF_UNLOCK(&so->so_snd); 1046 if (tp->t_flags & TF_ACKNOW) 1047 TCPSTAT_INC(tcps_sndacks); 1048 else if (flags & (TH_SYN|TH_FIN|TH_RST)) 1049 TCPSTAT_INC(tcps_sndctrl); 1050 else if (SEQ_GT(tp->snd_up, tp->snd_una)) 1051 TCPSTAT_INC(tcps_sndurg); 1052 else 1053 TCPSTAT_INC(tcps_sndwinup); 1054 1055 m = m_gethdr(M_NOWAIT, MT_DATA); 1056 if (m == NULL) { 1057 error = ENOBUFS; 1058 sack_rxmit = 0; 1059 goto out; 1060 } 1061 #ifdef INET6 1062 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 1063 MHLEN >= hdrlen) { 1064 M_ALIGN(m, hdrlen); 1065 } else 1066 #endif 1067 m->m_data += max_linkhdr; 1068 m->m_len = hdrlen; 1069 } 1070 SOCKBUF_UNLOCK_ASSERT(&so->so_snd); 1071 m->m_pkthdr.rcvif = (struct ifnet *)0; 1072 #ifdef MAC 1073 mac_inpcb_create_mbuf(tp->t_inpcb, m); 1074 #endif 1075 #ifdef INET6 1076 if (isipv6) { 1077 ip6 = mtod(m, struct ip6_hdr *); 1078 th = (struct tcphdr *)(ip6 + 1); 1079 tcpip_fillheaders(tp->t_inpcb, ip6, th); 1080 } else 1081 #endif /* INET6 */ 1082 { 1083 ip = mtod(m, struct ip *); 1084 #ifdef TCPDEBUG 1085 ipov = (struct ipovly *)ip; 1086 #endif 1087 th = (struct tcphdr *)(ip + 1); 1088 tcpip_fillheaders(tp->t_inpcb, ip, th); 1089 } 1090 1091 /* 1092 * Fill in fields, remembering maximum advertised 1093 * window for use in delaying messages about window sizes. 1094 * If resending a FIN, be sure not to use a new sequence number. 1095 */ 1096 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 1097 tp->snd_nxt == tp->snd_max) 1098 tp->snd_nxt--; 1099 /* 1100 * If we are starting a connection, send ECN setup 1101 * SYN packet. If we are on a retransmit, we may 1102 * resend those bits a number of times as per 1103 * RFC 3168. 1104 */ 1105 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) { 1106 if (tp->t_rxtshift >= 1) { 1107 if (tp->t_rxtshift <= V_tcp_ecn_maxretries) 1108 flags |= TH_ECE|TH_CWR; 1109 } else 1110 flags |= TH_ECE|TH_CWR; 1111 } 1112 1113 if (tp->t_state == TCPS_ESTABLISHED && 1114 (tp->t_flags & TF_ECN_PERMIT)) { 1115 /* 1116 * If the peer has ECN, mark data packets with 1117 * ECN capable transmission (ECT). 1118 * Ignore pure ack packets, retransmissions and window probes. 1119 */ 1120 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) && 1121 !((tp->t_flags & TF_FORCEDATA) && len == 1)) { 1122 #ifdef INET6 1123 if (isipv6) 1124 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); 1125 else 1126 #endif 1127 ip->ip_tos |= IPTOS_ECN_ECT0; 1128 TCPSTAT_INC(tcps_ecn_ect0); 1129 } 1130 1131 /* 1132 * Reply with proper ECN notifications. 1133 */ 1134 if (tp->t_flags & TF_ECN_SND_CWR) { 1135 flags |= TH_CWR; 1136 tp->t_flags &= ~TF_ECN_SND_CWR; 1137 } 1138 if (tp->t_flags & TF_ECN_SND_ECE) 1139 flags |= TH_ECE; 1140 } 1141 1142 /* 1143 * If we are doing retransmissions, then snd_nxt will 1144 * not reflect the first unsent octet. For ACK only 1145 * packets, we do not want the sequence number of the 1146 * retransmitted packet, we want the sequence number 1147 * of the next unsent octet. So, if there is no data 1148 * (and no SYN or FIN), use snd_max instead of snd_nxt 1149 * when filling in ti_seq. But if we are in persist 1150 * state, snd_max might reflect one byte beyond the 1151 * right edge of the window, so use snd_nxt in that 1152 * case, since we know we aren't doing a retransmission. 1153 * (retransmit and persist are mutually exclusive...) 1154 */ 1155 if (sack_rxmit == 0) { 1156 if (len || (flags & (TH_SYN|TH_FIN)) || 1157 tcp_timer_active(tp, TT_PERSIST)) 1158 th->th_seq = htonl(tp->snd_nxt); 1159 else 1160 th->th_seq = htonl(tp->snd_max); 1161 } else { 1162 th->th_seq = htonl(p->rxmit); 1163 p->rxmit += len; 1164 tp->sackhint.sack_bytes_rexmit += len; 1165 } 1166 th->th_ack = htonl(tp->rcv_nxt); 1167 if (optlen) { 1168 bcopy(opt, th + 1, optlen); 1169 th->th_off = (sizeof (struct tcphdr) + optlen) >> 2; 1170 } 1171 th->th_flags = flags; 1172 /* 1173 * Calculate receive window. Don't shrink window, 1174 * but avoid silly window syndrome. 1175 */ 1176 if (recwin < (so->so_rcv.sb_hiwat / 4) && 1177 recwin < tp->t_maxseg) 1178 recwin = 0; 1179 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 1180 recwin < (tp->rcv_adv - tp->rcv_nxt)) 1181 recwin = (tp->rcv_adv - tp->rcv_nxt); 1182 1183 /* 1184 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1185 * or <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> 1186 * case is handled in syncache. 1187 */ 1188 if (flags & TH_SYN) 1189 th->th_win = htons((u_short) 1190 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 1191 else 1192 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 1193 1194 /* 1195 * Adjust the RXWIN0SENT flag - indicate that we have advertised 1196 * a 0 window. This may cause the remote transmitter to stall. This 1197 * flag tells soreceive() to disable delayed acknowledgements when 1198 * draining the buffer. This can occur if the receiver is attempting 1199 * to read more data than can be buffered prior to transmitting on 1200 * the connection. 1201 */ 1202 if (th->th_win == 0) { 1203 tp->t_sndzerowin++; 1204 tp->t_flags |= TF_RXWIN0SENT; 1205 } else 1206 tp->t_flags &= ~TF_RXWIN0SENT; 1207 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) { 1208 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt)); 1209 th->th_flags |= TH_URG; 1210 } else 1211 /* 1212 * If no urgent pointer to send, then we pull 1213 * the urgent pointer to the left edge of the send window 1214 * so that it doesn't drift into the send window on sequence 1215 * number wraparound. 1216 */ 1217 tp->snd_up = tp->snd_una; /* drag it along */ 1218 1219 /* 1220 * Put TCP length in extended header, and then 1221 * checksum extended header and data. 1222 */ 1223 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 1224 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1225 1226 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1227 if (to.to_flags & TOF_SIGNATURE) { 1228 /* 1229 * Calculate MD5 signature and put it into the place 1230 * determined before. 1231 * NOTE: since TCP options buffer doesn't point into 1232 * mbuf's data, calculate offset and use it. 1233 */ 1234 if (!TCPMD5_ENABLED() || (error = TCPMD5_OUTPUT(m, th, 1235 (u_char *)(th + 1) + (to.to_signature - opt))) != 0) { 1236 /* 1237 * Do not send segment if the calculation of MD5 1238 * digest has failed. 1239 */ 1240 m_freem(m); 1241 goto out; 1242 } 1243 } 1244 #endif 1245 #ifdef INET6 1246 if (isipv6) { 1247 /* 1248 * There is no need to fill in ip6_plen right now. 1249 * It will be filled later by ip6_output. 1250 */ 1251 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 1252 th->th_sum = in6_cksum_pseudo(ip6, sizeof(struct tcphdr) + 1253 optlen + len, IPPROTO_TCP, 0); 1254 } 1255 #endif 1256 #if defined(INET6) && defined(INET) 1257 else 1258 #endif 1259 #ifdef INET 1260 { 1261 m->m_pkthdr.csum_flags = CSUM_TCP; 1262 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1263 htons(sizeof(struct tcphdr) + IPPROTO_TCP + len + optlen)); 1264 1265 /* IP version must be set here for ipv4/ipv6 checking later */ 1266 KASSERT(ip->ip_v == IPVERSION, 1267 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 1268 } 1269 #endif 1270 1271 /* 1272 * Enable TSO and specify the size of the segments. 1273 * The TCP pseudo header checksum is always provided. 1274 */ 1275 if (tso) { 1276 KASSERT(len > tp->t_maxseg - optlen, 1277 ("%s: len <= tso_segsz", __func__)); 1278 m->m_pkthdr.csum_flags |= CSUM_TSO; 1279 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 1280 } 1281 1282 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 1283 KASSERT(len + hdrlen + ipoptlen - ipsec_optlen == m_length(m, NULL), 1284 ("%s: mbuf chain shorter than expected: %d + %u + %u - %u != %u", 1285 __func__, len, hdrlen, ipoptlen, ipsec_optlen, m_length(m, NULL))); 1286 #else 1287 KASSERT(len + hdrlen + ipoptlen == m_length(m, NULL), 1288 ("%s: mbuf chain shorter than expected: %d + %u + %u != %u", 1289 __func__, len, hdrlen, ipoptlen, m_length(m, NULL))); 1290 #endif 1291 1292 #ifdef TCP_HHOOK 1293 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 1294 hhook_run_tcp_est_out(tp, th, &to, len, tso); 1295 #endif 1296 1297 #ifdef TCPDEBUG 1298 /* 1299 * Trace. 1300 */ 1301 if (so->so_options & SO_DEBUG) { 1302 u_short save = 0; 1303 #ifdef INET6 1304 if (!isipv6) 1305 #endif 1306 { 1307 save = ipov->ih_len; 1308 ipov->ih_len = htons(m->m_pkthdr.len /* - hdrlen + (th->th_off << 2) */); 1309 } 1310 tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0); 1311 #ifdef INET6 1312 if (!isipv6) 1313 #endif 1314 ipov->ih_len = save; 1315 } 1316 #endif /* TCPDEBUG */ 1317 TCP_PROBE3(debug__output, tp, th, m); 1318 1319 /* We're getting ready to send; log now. */ 1320 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 1321 len, NULL, false); 1322 1323 /* 1324 * Fill in IP length and desired time to live and 1325 * send to IP level. There should be a better way 1326 * to handle ttl and tos; we could keep them in 1327 * the template, but need a way to checksum without them. 1328 */ 1329 /* 1330 * m->m_pkthdr.len should have been set before checksum calculation, 1331 * because in6_cksum() need it. 1332 */ 1333 #ifdef INET6 1334 if (isipv6) { 1335 /* 1336 * we separately set hoplimit for every segment, since the 1337 * user might want to change the value via setsockopt. 1338 * Also, desired default hop limit might be changed via 1339 * Neighbor Discovery. 1340 */ 1341 ip6->ip6_hlim = in6_selecthlim(tp->t_inpcb, NULL); 1342 1343 /* 1344 * Set the packet size here for the benefit of DTrace probes. 1345 * ip6_output() will set it properly; it's supposed to include 1346 * the option header lengths as well. 1347 */ 1348 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 1349 1350 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 1351 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 1352 else 1353 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 1354 1355 if (tp->t_state == TCPS_SYN_SENT) 1356 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 1357 1358 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 1359 1360 #ifdef TCPPCAP 1361 /* Save packet, if requested. */ 1362 tcp_pcap_add(th, m, &(tp->t_outpkts)); 1363 #endif 1364 1365 /* TODO: IPv6 IP6TOS_ECT bit on */ 1366 error = ip6_output(m, tp->t_inpcb->in6p_outputopts, 1367 &tp->t_inpcb->inp_route6, 1368 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 1369 NULL, NULL, tp->t_inpcb); 1370 1371 if (error == EMSGSIZE && tp->t_inpcb->inp_route6.ro_rt != NULL) 1372 mtu = tp->t_inpcb->inp_route6.ro_rt->rt_mtu; 1373 } 1374 #endif /* INET6 */ 1375 #if defined(INET) && defined(INET6) 1376 else 1377 #endif 1378 #ifdef INET 1379 { 1380 ip->ip_len = htons(m->m_pkthdr.len); 1381 #ifdef INET6 1382 if (tp->t_inpcb->inp_vflag & INP_IPV6PROTO) 1383 ip->ip_ttl = in6_selecthlim(tp->t_inpcb, NULL); 1384 #endif /* INET6 */ 1385 /* 1386 * If we do path MTU discovery, then we set DF on every packet. 1387 * This might not be the best thing to do according to RFC3390 1388 * Section 2. However the tcp hostcache migitates the problem 1389 * so it affects only the first tcp connection with a host. 1390 * 1391 * NB: Don't set DF on small MTU/MSS to have a safe fallback. 1392 */ 1393 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 1394 ip->ip_off |= htons(IP_DF); 1395 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 1396 } else { 1397 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 1398 } 1399 1400 if (tp->t_state == TCPS_SYN_SENT) 1401 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 1402 1403 TCP_PROBE5(send, NULL, tp, ip, tp, th); 1404 1405 #ifdef TCPPCAP 1406 /* Save packet, if requested. */ 1407 tcp_pcap_add(th, m, &(tp->t_outpkts)); 1408 #endif 1409 1410 error = ip_output(m, tp->t_inpcb->inp_options, &tp->t_inpcb->inp_route, 1411 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0, 1412 tp->t_inpcb); 1413 1414 if (error == EMSGSIZE && tp->t_inpcb->inp_route.ro_rt != NULL) 1415 mtu = tp->t_inpcb->inp_route.ro_rt->rt_mtu; 1416 } 1417 #endif /* INET */ 1418 1419 out: 1420 /* 1421 * In transmit state, time the transmission and arrange for 1422 * the retransmit. In persist state, just set snd_max. 1423 */ 1424 if ((tp->t_flags & TF_FORCEDATA) == 0 || 1425 !tcp_timer_active(tp, TT_PERSIST)) { 1426 tcp_seq startseq = tp->snd_nxt; 1427 1428 /* 1429 * Advance snd_nxt over sequence space of this segment. 1430 */ 1431 if (flags & (TH_SYN|TH_FIN)) { 1432 if (flags & TH_SYN) 1433 tp->snd_nxt++; 1434 if (flags & TH_FIN) { 1435 tp->snd_nxt++; 1436 tp->t_flags |= TF_SENTFIN; 1437 } 1438 } 1439 if (sack_rxmit) 1440 goto timer; 1441 tp->snd_nxt += len; 1442 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 1443 tp->snd_max = tp->snd_nxt; 1444 /* 1445 * Time this transmission if not a retransmission and 1446 * not currently timing anything. 1447 */ 1448 if (tp->t_rtttime == 0) { 1449 tp->t_rtttime = ticks; 1450 tp->t_rtseq = startseq; 1451 TCPSTAT_INC(tcps_segstimed); 1452 } 1453 } 1454 1455 /* 1456 * Set retransmit timer if not currently set, 1457 * and not doing a pure ack or a keep-alive probe. 1458 * Initial value for retransmit timer is smoothed 1459 * round-trip time + 2 * round-trip time variance. 1460 * Initialize shift counter which is used for backoff 1461 * of retransmit time. 1462 */ 1463 timer: 1464 if (!tcp_timer_active(tp, TT_REXMT) && 1465 ((sack_rxmit && tp->snd_nxt != tp->snd_max) || 1466 (tp->snd_nxt != tp->snd_una))) { 1467 if (tcp_timer_active(tp, TT_PERSIST)) { 1468 tcp_timer_activate(tp, TT_PERSIST, 0); 1469 tp->t_rxtshift = 0; 1470 } 1471 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 1472 } else if (len == 0 && sbavail(&so->so_snd) && 1473 !tcp_timer_active(tp, TT_REXMT) && 1474 !tcp_timer_active(tp, TT_PERSIST)) { 1475 /* 1476 * Avoid a situation where we do not set persist timer 1477 * after a zero window condition. For example: 1478 * 1) A -> B: packet with enough data to fill the window 1479 * 2) B -> A: ACK for #1 + new data (0 window 1480 * advertisement) 1481 * 3) A -> B: ACK for #2, 0 len packet 1482 * 1483 * In this case, A will not activate the persist timer, 1484 * because it chose to send a packet. Unless tcp_output 1485 * is called for some other reason (delayed ack timer, 1486 * another input packet from B, socket syscall), A will 1487 * not send zero window probes. 1488 * 1489 * So, if you send a 0-length packet, but there is data 1490 * in the socket buffer, and neither the rexmt or 1491 * persist timer is already set, then activate the 1492 * persist timer. 1493 */ 1494 tp->t_rxtshift = 0; 1495 tcp_setpersist(tp); 1496 } 1497 } else { 1498 /* 1499 * Persist case, update snd_max but since we are in 1500 * persist mode (no window) we do not update snd_nxt. 1501 */ 1502 int xlen = len; 1503 if (flags & TH_SYN) 1504 ++xlen; 1505 if (flags & TH_FIN) { 1506 ++xlen; 1507 tp->t_flags |= TF_SENTFIN; 1508 } 1509 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) 1510 tp->snd_max = tp->snd_nxt + xlen; 1511 } 1512 1513 if (error) { 1514 /* Record the error. */ 1515 TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, 1516 error, 0, NULL, false); 1517 1518 /* 1519 * We know that the packet was lost, so back out the 1520 * sequence number advance, if any. 1521 * 1522 * If the error is EPERM the packet got blocked by the 1523 * local firewall. Normally we should terminate the 1524 * connection but the blocking may have been spurious 1525 * due to a firewall reconfiguration cycle. So we treat 1526 * it like a packet loss and let the retransmit timer and 1527 * timeouts do their work over time. 1528 * XXX: It is a POLA question whether calling tcp_drop right 1529 * away would be the really correct behavior instead. 1530 */ 1531 if (((tp->t_flags & TF_FORCEDATA) == 0 || 1532 !tcp_timer_active(tp, TT_PERSIST)) && 1533 ((flags & TH_SYN) == 0) && 1534 (error != EPERM)) { 1535 if (sack_rxmit) { 1536 p->rxmit -= len; 1537 tp->sackhint.sack_bytes_rexmit -= len; 1538 KASSERT(tp->sackhint.sack_bytes_rexmit >= 0, 1539 ("sackhint bytes rtx >= 0")); 1540 } else 1541 tp->snd_nxt -= len; 1542 } 1543 SOCKBUF_UNLOCK_ASSERT(&so->so_snd); /* Check gotos. */ 1544 switch (error) { 1545 case EACCES: 1546 case EPERM: 1547 tp->t_softerror = error; 1548 return (error); 1549 case ENOBUFS: 1550 TCP_XMIT_TIMER_ASSERT(tp, len, flags); 1551 tp->snd_cwnd = tp->t_maxseg; 1552 return (0); 1553 case EMSGSIZE: 1554 /* 1555 * For some reason the interface we used initially 1556 * to send segments changed to another or lowered 1557 * its MTU. 1558 * If TSO was active we either got an interface 1559 * without TSO capabilits or TSO was turned off. 1560 * If we obtained mtu from ip_output() then update 1561 * it and try again. 1562 */ 1563 if (tso) 1564 tp->t_flags &= ~TF_TSO; 1565 if (mtu != 0) { 1566 tcp_mss_update(tp, -1, mtu, NULL, NULL); 1567 goto again; 1568 } 1569 return (error); 1570 case EHOSTDOWN: 1571 case EHOSTUNREACH: 1572 case ENETDOWN: 1573 case ENETUNREACH: 1574 if (TCPS_HAVERCVDSYN(tp->t_state)) { 1575 tp->t_softerror = error; 1576 return (0); 1577 } 1578 /* FALLTHROUGH */ 1579 default: 1580 return (error); 1581 } 1582 } 1583 TCPSTAT_INC(tcps_sndtotal); 1584 1585 /* 1586 * Data sent (as far as we can tell). 1587 * If this advertises a larger window than any other segment, 1588 * then remember the size of the advertised window. 1589 * Any pending ACK has now been sent. 1590 */ 1591 if (SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 1592 tp->rcv_adv = tp->rcv_nxt + recwin; 1593 tp->last_ack_sent = tp->rcv_nxt; 1594 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 1595 if (tcp_timer_active(tp, TT_DELACK)) 1596 tcp_timer_activate(tp, TT_DELACK, 0); 1597 #if 0 1598 /* 1599 * This completely breaks TCP if newreno is turned on. What happens 1600 * is that if delayed-acks are turned on on the receiver, this code 1601 * on the transmitter effectively destroys the TCP window, forcing 1602 * it to four packets (1.5Kx4 = 6K window). 1603 */ 1604 if (sendalot && --maxburst) 1605 goto again; 1606 #endif 1607 if (sendalot) 1608 goto again; 1609 return (0); 1610 } 1611 1612 void 1613 tcp_setpersist(struct tcpcb *tp) 1614 { 1615 int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1; 1616 int tt; 1617 1618 tp->t_flags &= ~TF_PREVVALID; 1619 if (tcp_timer_active(tp, TT_REXMT)) 1620 panic("tcp_setpersist: retransmit pending"); 1621 /* 1622 * Start/restart persistence timer. 1623 */ 1624 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 1625 tcp_persmin, tcp_persmax); 1626 tcp_timer_activate(tp, TT_PERSIST, tt); 1627 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 1628 tp->t_rxtshift++; 1629 } 1630 1631 /* 1632 * Insert TCP options according to the supplied parameters to the place 1633 * optp in a consistent way. Can handle unaligned destinations. 1634 * 1635 * The order of the option processing is crucial for optimal packing and 1636 * alignment for the scarce option space. 1637 * 1638 * The optimal order for a SYN/SYN-ACK segment is: 1639 * MSS (4) + NOP (1) + Window scale (3) + SACK permitted (2) + 1640 * Timestamp (10) + Signature (18) = 38 bytes out of a maximum of 40. 1641 * 1642 * The SACK options should be last. SACK blocks consume 8*n+2 bytes. 1643 * So a full size SACK blocks option is 34 bytes (with 4 SACK blocks). 1644 * At minimum we need 10 bytes (to generate 1 SACK block). If both 1645 * TCP Timestamps (12 bytes) and TCP Signatures (18 bytes) are present, 1646 * we only have 10 bytes for SACK options (40 - (12 + 18)). 1647 */ 1648 int 1649 tcp_addoptions(struct tcpopt *to, u_char *optp) 1650 { 1651 u_int32_t mask, optlen = 0; 1652 1653 for (mask = 1; mask < TOF_MAXOPT; mask <<= 1) { 1654 if ((to->to_flags & mask) != mask) 1655 continue; 1656 if (optlen == TCP_MAXOLEN) 1657 break; 1658 switch (to->to_flags & mask) { 1659 case TOF_MSS: 1660 while (optlen % 4) { 1661 optlen += TCPOLEN_NOP; 1662 *optp++ = TCPOPT_NOP; 1663 } 1664 if (TCP_MAXOLEN - optlen < TCPOLEN_MAXSEG) 1665 continue; 1666 optlen += TCPOLEN_MAXSEG; 1667 *optp++ = TCPOPT_MAXSEG; 1668 *optp++ = TCPOLEN_MAXSEG; 1669 to->to_mss = htons(to->to_mss); 1670 bcopy((u_char *)&to->to_mss, optp, sizeof(to->to_mss)); 1671 optp += sizeof(to->to_mss); 1672 break; 1673 case TOF_SCALE: 1674 while (!optlen || optlen % 2 != 1) { 1675 optlen += TCPOLEN_NOP; 1676 *optp++ = TCPOPT_NOP; 1677 } 1678 if (TCP_MAXOLEN - optlen < TCPOLEN_WINDOW) 1679 continue; 1680 optlen += TCPOLEN_WINDOW; 1681 *optp++ = TCPOPT_WINDOW; 1682 *optp++ = TCPOLEN_WINDOW; 1683 *optp++ = to->to_wscale; 1684 break; 1685 case TOF_SACKPERM: 1686 while (optlen % 2) { 1687 optlen += TCPOLEN_NOP; 1688 *optp++ = TCPOPT_NOP; 1689 } 1690 if (TCP_MAXOLEN - optlen < TCPOLEN_SACK_PERMITTED) 1691 continue; 1692 optlen += TCPOLEN_SACK_PERMITTED; 1693 *optp++ = TCPOPT_SACK_PERMITTED; 1694 *optp++ = TCPOLEN_SACK_PERMITTED; 1695 break; 1696 case TOF_TS: 1697 while (!optlen || optlen % 4 != 2) { 1698 optlen += TCPOLEN_NOP; 1699 *optp++ = TCPOPT_NOP; 1700 } 1701 if (TCP_MAXOLEN - optlen < TCPOLEN_TIMESTAMP) 1702 continue; 1703 optlen += TCPOLEN_TIMESTAMP; 1704 *optp++ = TCPOPT_TIMESTAMP; 1705 *optp++ = TCPOLEN_TIMESTAMP; 1706 to->to_tsval = htonl(to->to_tsval); 1707 to->to_tsecr = htonl(to->to_tsecr); 1708 bcopy((u_char *)&to->to_tsval, optp, sizeof(to->to_tsval)); 1709 optp += sizeof(to->to_tsval); 1710 bcopy((u_char *)&to->to_tsecr, optp, sizeof(to->to_tsecr)); 1711 optp += sizeof(to->to_tsecr); 1712 break; 1713 case TOF_SIGNATURE: 1714 { 1715 int siglen = TCPOLEN_SIGNATURE - 2; 1716 1717 while (!optlen || optlen % 4 != 2) { 1718 optlen += TCPOLEN_NOP; 1719 *optp++ = TCPOPT_NOP; 1720 } 1721 if (TCP_MAXOLEN - optlen < TCPOLEN_SIGNATURE) { 1722 to->to_flags &= ~TOF_SIGNATURE; 1723 continue; 1724 } 1725 optlen += TCPOLEN_SIGNATURE; 1726 *optp++ = TCPOPT_SIGNATURE; 1727 *optp++ = TCPOLEN_SIGNATURE; 1728 to->to_signature = optp; 1729 while (siglen--) 1730 *optp++ = 0; 1731 break; 1732 } 1733 case TOF_SACK: 1734 { 1735 int sackblks = 0; 1736 struct sackblk *sack = (struct sackblk *)to->to_sacks; 1737 tcp_seq sack_seq; 1738 1739 while (!optlen || optlen % 4 != 2) { 1740 optlen += TCPOLEN_NOP; 1741 *optp++ = TCPOPT_NOP; 1742 } 1743 if (TCP_MAXOLEN - optlen < TCPOLEN_SACKHDR + TCPOLEN_SACK) 1744 continue; 1745 optlen += TCPOLEN_SACKHDR; 1746 *optp++ = TCPOPT_SACK; 1747 sackblks = min(to->to_nsacks, 1748 (TCP_MAXOLEN - optlen) / TCPOLEN_SACK); 1749 *optp++ = TCPOLEN_SACKHDR + sackblks * TCPOLEN_SACK; 1750 while (sackblks--) { 1751 sack_seq = htonl(sack->start); 1752 bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq)); 1753 optp += sizeof(sack_seq); 1754 sack_seq = htonl(sack->end); 1755 bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq)); 1756 optp += sizeof(sack_seq); 1757 optlen += TCPOLEN_SACK; 1758 sack++; 1759 } 1760 TCPSTAT_INC(tcps_sack_send_blocks); 1761 break; 1762 } 1763 case TOF_FASTOPEN: 1764 { 1765 int total_len; 1766 1767 /* XXX is there any point to aligning this option? */ 1768 total_len = TCPOLEN_FAST_OPEN_EMPTY + to->to_tfo_len; 1769 if (TCP_MAXOLEN - optlen < total_len) { 1770 to->to_flags &= ~TOF_FASTOPEN; 1771 continue; 1772 } 1773 *optp++ = TCPOPT_FAST_OPEN; 1774 *optp++ = total_len; 1775 if (to->to_tfo_len > 0) { 1776 bcopy(to->to_tfo_cookie, optp, to->to_tfo_len); 1777 optp += to->to_tfo_len; 1778 } 1779 optlen += total_len; 1780 break; 1781 } 1782 default: 1783 panic("%s: unknown TCP option type", __func__); 1784 break; 1785 } 1786 } 1787 1788 /* Terminate and pad TCP options to a 4 byte boundary. */ 1789 if (optlen % 4) { 1790 optlen += TCPOLEN_EOL; 1791 *optp++ = TCPOPT_EOL; 1792 } 1793 /* 1794 * According to RFC 793 (STD0007): 1795 * "The content of the header beyond the End-of-Option option 1796 * must be header padding (i.e., zero)." 1797 * and later: "The padding is composed of zeros." 1798 */ 1799 while (optlen % 4) { 1800 optlen += TCPOLEN_PAD; 1801 *optp++ = TCPOPT_PAD; 1802 } 1803 1804 KASSERT(optlen <= TCP_MAXOLEN, ("%s: TCP options too long", __func__)); 1805 return (optlen); 1806 } 1807 1808 /* 1809 * This is a copy of m_copym(), taking the TSO segment size/limit 1810 * constraints into account, and advancing the sndptr as it goes. 1811 */ 1812 struct mbuf * 1813 tcp_m_copym(struct mbuf *m, int32_t off0, int32_t *plen, 1814 int32_t seglimit, int32_t segsize, struct sockbuf *sb) 1815 { 1816 struct mbuf *n, **np; 1817 struct mbuf *top; 1818 int32_t off = off0; 1819 int32_t len = *plen; 1820 int32_t fragsize; 1821 int32_t len_cp = 0; 1822 int32_t *pkthdrlen; 1823 uint32_t mlen, frags; 1824 bool copyhdr; 1825 1826 1827 KASSERT(off >= 0, ("tcp_m_copym, negative off %d", off)); 1828 KASSERT(len >= 0, ("tcp_m_copym, negative len %d", len)); 1829 if (off == 0 && m->m_flags & M_PKTHDR) 1830 copyhdr = true; 1831 else 1832 copyhdr = false; 1833 while (off > 0) { 1834 KASSERT(m != NULL, ("tcp_m_copym, offset > size of mbuf chain")); 1835 if (off < m->m_len) 1836 break; 1837 off -= m->m_len; 1838 if ((sb) && (m == sb->sb_sndptr)) { 1839 sb->sb_sndptroff += m->m_len; 1840 sb->sb_sndptr = m->m_next; 1841 } 1842 m = m->m_next; 1843 } 1844 np = ⊤ 1845 top = NULL; 1846 pkthdrlen = NULL; 1847 while (len > 0) { 1848 if (m == NULL) { 1849 KASSERT(len == M_COPYALL, 1850 ("tcp_m_copym, length > size of mbuf chain")); 1851 *plen = len_cp; 1852 if (pkthdrlen != NULL) 1853 *pkthdrlen = len_cp; 1854 break; 1855 } 1856 mlen = min(len, m->m_len - off); 1857 if (seglimit) { 1858 /* 1859 * For M_NOMAP mbufs, add 3 segments 1860 * + 1 in case we are crossing page boundaries 1861 * + 2 in case the TLS hdr/trailer are used 1862 * It is cheaper to just add the segments 1863 * than it is to take the cache miss to look 1864 * at the mbuf ext_pgs state in detail. 1865 */ 1866 if (m->m_flags & M_NOMAP) { 1867 fragsize = min(segsize, PAGE_SIZE); 1868 frags = 3; 1869 } else { 1870 fragsize = segsize; 1871 frags = 0; 1872 } 1873 1874 /* Break if we really can't fit anymore. */ 1875 if ((frags + 1) >= seglimit) { 1876 *plen = len_cp; 1877 if (pkthdrlen != NULL) 1878 *pkthdrlen = len_cp; 1879 break; 1880 } 1881 1882 /* 1883 * Reduce size if you can't copy the whole 1884 * mbuf. If we can't copy the whole mbuf, also 1885 * adjust len so the loop will end after this 1886 * mbuf. 1887 */ 1888 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 1889 mlen = (seglimit - frags - 1) * fragsize; 1890 len = mlen; 1891 *plen = len_cp + len; 1892 if (pkthdrlen != NULL) 1893 *pkthdrlen = *plen; 1894 } 1895 frags += howmany(mlen, fragsize); 1896 if (frags == 0) 1897 frags++; 1898 seglimit -= frags; 1899 KASSERT(seglimit > 0, 1900 ("%s: seglimit went too low", __func__)); 1901 } 1902 if (copyhdr) 1903 n = m_gethdr(M_NOWAIT, m->m_type); 1904 else 1905 n = m_get(M_NOWAIT, m->m_type); 1906 *np = n; 1907 if (n == NULL) 1908 goto nospace; 1909 if (copyhdr) { 1910 if (!m_dup_pkthdr(n, m, M_NOWAIT)) 1911 goto nospace; 1912 if (len == M_COPYALL) 1913 n->m_pkthdr.len -= off0; 1914 else 1915 n->m_pkthdr.len = len; 1916 pkthdrlen = &n->m_pkthdr.len; 1917 copyhdr = false; 1918 } 1919 n->m_len = mlen; 1920 len_cp += n->m_len; 1921 if (m->m_flags & M_EXT) { 1922 n->m_data = m->m_data + off; 1923 mb_dupcl(n, m); 1924 } else 1925 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 1926 (u_int)n->m_len); 1927 1928 if (sb && (sb->sb_sndptr == m) && 1929 ((n->m_len + off) >= m->m_len) && m->m_next) { 1930 sb->sb_sndptroff += m->m_len; 1931 sb->sb_sndptr = m->m_next; 1932 } 1933 off = 0; 1934 if (len != M_COPYALL) { 1935 len -= n->m_len; 1936 } 1937 m = m->m_next; 1938 np = &n->m_next; 1939 } 1940 return (top); 1941 nospace: 1942 m_freem(top); 1943 return (NULL); 1944 } 1945 1946 void 1947 tcp_sndbuf_autoscale(struct tcpcb *tp, struct socket *so, uint32_t sendwin) 1948 { 1949 1950 /* 1951 * Automatic sizing of send socket buffer. Often the send buffer 1952 * size is not optimally adjusted to the actual network conditions 1953 * at hand (delay bandwidth product). Setting the buffer size too 1954 * small limits throughput on links with high bandwidth and high 1955 * delay (eg. trans-continental/oceanic links). Setting the 1956 * buffer size too big consumes too much real kernel memory, 1957 * especially with many connections on busy servers. 1958 * 1959 * The criteria to step up the send buffer one notch are: 1960 * 1. receive window of remote host is larger than send buffer 1961 * (with a fudge factor of 5/4th); 1962 * 2. send buffer is filled to 7/8th with data (so we actually 1963 * have data to make use of it); 1964 * 3. send buffer fill has not hit maximal automatic size; 1965 * 4. our send window (slow start and cogestion controlled) is 1966 * larger than sent but unacknowledged data in send buffer. 1967 * 1968 * The remote host receive window scaling factor may limit the 1969 * growing of the send buffer before it reaches its allowed 1970 * maximum. 1971 * 1972 * It scales directly with slow start or congestion window 1973 * and does at most one step per received ACK. This fast 1974 * scaling has the drawback of growing the send buffer beyond 1975 * what is strictly necessary to make full use of a given 1976 * delay*bandwidth product. However testing has shown this not 1977 * to be much of an problem. At worst we are trading wasting 1978 * of available bandwidth (the non-use of it) for wasting some 1979 * socket buffer memory. 1980 * 1981 * TODO: Shrink send buffer during idle periods together 1982 * with congestion window. Requires another timer. Has to 1983 * wait for upcoming tcp timer rewrite. 1984 * 1985 * XXXGL: should there be used sbused() or sbavail()? 1986 */ 1987 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 1988 int lowat; 1989 1990 lowat = V_tcp_sendbuf_auto_lowat ? so->so_snd.sb_lowat : 0; 1991 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat - lowat && 1992 sbused(&so->so_snd) >= 1993 (so->so_snd.sb_hiwat / 8 * 7) - lowat && 1994 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 1995 sendwin >= (sbused(&so->so_snd) - 1996 (tp->snd_nxt - tp->snd_una))) { 1997 if (!sbreserve_locked(&so->so_snd, 1998 min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc, 1999 V_tcp_autosndbuf_max), so, curthread)) 2000 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 2001 } 2002 } 2003 } 2004