1c398230bSWarner Losh /*- 251369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 351369649SPedro F. Giffuni * 4d7f570e6SGarrett Wollman * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 6df8bae1dSRodney W. Grimes * 7df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 8df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 9df8bae1dSRodney W. Grimes * are met: 10df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 11df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 12df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 13df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 14df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 15fbbd9655SWarner Losh * 3. Neither the name of the University nor the names of its contributors 16df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 17df8bae1dSRodney W. Grimes * without specific prior written permission. 18df8bae1dSRodney W. Grimes * 19df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29df8bae1dSRodney W. Grimes * SUCH DAMAGE. 30df8bae1dSRodney W. Grimes * 31d7f570e6SGarrett Wollman * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95 32df8bae1dSRodney W. Grimes */ 33df8bae1dSRodney W. Grimes 344b421e2dSMike Silbersack #include <sys/cdefs.h> 354b421e2dSMike Silbersack __FBSDID("$FreeBSD$"); 364b421e2dSMike Silbersack 371cfd4b53SBruce M Simpson #include "opt_inet.h" 38fb59c426SYoshinobu Inoue #include "opt_inet6.h" 393a2a9f79SYoshinobu Inoue #include "opt_ipsec.h" 40b2e60773SJohn Baldwin #include "opt_kern_tls.h" 410cc12cc5SJoerg Wunsch #include "opt_tcpdebug.h" 420cc12cc5SJoerg Wunsch 43df8bae1dSRodney W. Grimes #include <sys/param.h> 44df8bae1dSRodney W. Grimes #include <sys/systm.h> 45adc56f5aSEdward Tomasz Napierala #include <sys/arb.h> 46686cdd19SJun-ichiro itojun Hagino #include <sys/domain.h> 47bd79708dSJonathan T. Looney #ifdef TCP_HHOOK 4839bc9de5SLawrence Stewart #include <sys/hhook.h> 49bd79708dSJonathan T. Looney #endif 50fb919e4dSMark Murray #include <sys/kernel.h> 51b2e60773SJohn Baldwin #ifdef KERN_TLS 52b2e60773SJohn Baldwin #include <sys/ktls.h> 53b2e60773SJohn Baldwin #endif 54fb919e4dSMark Murray #include <sys/lock.h> 55fb919e4dSMark Murray #include <sys/mbuf.h> 56fb919e4dSMark Murray #include <sys/mutex.h> 57df8bae1dSRodney W. Grimes #include <sys/protosw.h> 58adc56f5aSEdward Tomasz Napierala #include <sys/qmath.h> 5957f60867SMark Johnston #include <sys/sdt.h> 60df8bae1dSRodney W. Grimes #include <sys/socket.h> 61df8bae1dSRodney W. Grimes #include <sys/socketvar.h> 62fb919e4dSMark Murray #include <sys/sysctl.h> 63adc56f5aSEdward Tomasz Napierala #include <sys/stats.h> 64df8bae1dSRodney W. Grimes 654b79449eSBjoern A. Zeeb #include <net/if.h> 66df8bae1dSRodney W. Grimes #include <net/route.h> 67983066f0SAlexander V. Chernikov #include <net/route/nhop.h> 68530c0060SRobert Watson #include <net/vnet.h> 69df8bae1dSRodney W. Grimes 70df8bae1dSRodney W. Grimes #include <netinet/in.h> 7157f60867SMark Johnston #include <netinet/in_kdtrace.h> 72df8bae1dSRodney W. Grimes #include <netinet/in_systm.h> 73df8bae1dSRodney W. Grimes #include <netinet/ip.h> 74df8bae1dSRodney W. Grimes #include <netinet/in_pcb.h> 75df8bae1dSRodney W. Grimes #include <netinet/ip_var.h> 76ef39adf0SAndre Oppermann #include <netinet/ip_options.h> 77fb59c426SYoshinobu Inoue #ifdef INET6 78686cdd19SJun-ichiro itojun Hagino #include <netinet6/in6_pcb.h> 79686cdd19SJun-ichiro itojun Hagino #include <netinet/ip6.h> 80fb59c426SYoshinobu Inoue #include <netinet6/ip6_var.h> 81fb59c426SYoshinobu Inoue #endif 822de3e790SGleb Smirnoff #include <netinet/tcp.h> 83a00f4ac2SGleb Smirnoff #define TCPOUTFLAGS 84df8bae1dSRodney W. Grimes #include <netinet/tcp_fsm.h> 852529f56eSJonathan T. Looney #include <netinet/tcp_log_buf.h> 86df8bae1dSRodney W. Grimes #include <netinet/tcp_seq.h> 87df8bae1dSRodney W. Grimes #include <netinet/tcp_timer.h> 88df8bae1dSRodney W. Grimes #include <netinet/tcp_var.h> 89df8bae1dSRodney W. Grimes #include <netinet/tcpip.h> 904644fda3SGleb Smirnoff #include <netinet/cc/cc.h> 91c560df6fSPatrick Kelsey #include <netinet/tcp_fastopen.h> 9286a996e6SHiren Panchasara #ifdef TCPPCAP 9386a996e6SHiren Panchasara #include <netinet/tcp_pcap.h> 9486a996e6SHiren Panchasara #endif 95610ee2f9SDavid Greenman #ifdef TCPDEBUG 96df8bae1dSRodney W. Grimes #include <netinet/tcp_debug.h> 97610ee2f9SDavid Greenman #endif 9809fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD 9909fe6320SNavdeep Parhar #include <netinet/tcp_offload.h> 10009fe6320SNavdeep Parhar #endif 101df8bae1dSRodney W. Grimes 102fcf59617SAndrey V. Elsukov #include <netipsec/ipsec_support.h> 103b9234fafSSam Leffler 104db4f9cc7SJonathan Lemon #include <machine/in_cksum.h> 105db4f9cc7SJonathan Lemon 106aed55708SRobert Watson #include <security/mac/mac_framework.h> 107aed55708SRobert Watson 10882cea7e6SBjoern A. Zeeb VNET_DEFINE(int, path_mtu_discovery) = 1; 1096df8a710SGleb Smirnoff SYSCTL_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_VNET | CTLFLAG_RW, 110eddfbb76SRobert Watson &VNET_NAME(path_mtu_discovery), 1, 111eddfbb76SRobert Watson "Enable Path MTU Discovery"); 1129a039a5fSDavid Greenman 11382cea7e6SBjoern A. Zeeb VNET_DEFINE(int, tcp_do_tso) = 1; 1146df8a710SGleb Smirnoff SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_VNET | CTLFLAG_RW, 115eddfbb76SRobert Watson &VNET_NAME(tcp_do_tso), 0, 116eddfbb76SRobert Watson "Enable TCP Segmentation Offload"); 117b3c0f300SAndre Oppermann 118873789cbSAndre Oppermann VNET_DEFINE(int, tcp_sendspace) = 1024*32; 119873789cbSAndre Oppermann #define V_tcp_sendspace VNET(tcp_sendspace) 1206df8a710SGleb Smirnoff SYSCTL_INT(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLFLAG_VNET | CTLFLAG_RW, 121873789cbSAndre Oppermann &VNET_NAME(tcp_sendspace), 0, "Initial send socket buffer size"); 122873789cbSAndre Oppermann 12382cea7e6SBjoern A. Zeeb VNET_DEFINE(int, tcp_do_autosndbuf) = 1; 1246df8a710SGleb Smirnoff SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto, CTLFLAG_VNET | CTLFLAG_RW, 125eddfbb76SRobert Watson &VNET_NAME(tcp_do_autosndbuf), 0, 126eddfbb76SRobert Watson "Enable automatic send buffer sizing"); 1276741ecf5SAndre Oppermann 12882cea7e6SBjoern A. Zeeb VNET_DEFINE(int, tcp_autosndbuf_inc) = 8*1024; 1296df8a710SGleb Smirnoff SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_VNET | CTLFLAG_RW, 130eddfbb76SRobert Watson &VNET_NAME(tcp_autosndbuf_inc), 0, 1318b615593SMarko Zec "Incrementor step size of automatic send buffer"); 1326741ecf5SAndre Oppermann 133b233773bSBjoern A. Zeeb VNET_DEFINE(int, tcp_autosndbuf_max) = 2*1024*1024; 1346df8a710SGleb Smirnoff SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_VNET | CTLFLAG_RW, 135eddfbb76SRobert Watson &VNET_NAME(tcp_autosndbuf_max), 0, 1368b615593SMarko Zec "Max size of automatic send buffer"); 1376741ecf5SAndre Oppermann 138ac952dd2SSean Bruno VNET_DEFINE(int, tcp_sendbuf_auto_lowat) = 0; 139ac952dd2SSean Bruno #define V_tcp_sendbuf_auto_lowat VNET(tcp_sendbuf_auto_lowat) 140ac952dd2SSean Bruno SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto_lowat, CTLFLAG_VNET | CTLFLAG_RW, 141ac952dd2SSean Bruno &VNET_NAME(tcp_sendbuf_auto_lowat), 0, 142ac952dd2SSean Bruno "Modify threshold for auto send buffer growth to account for SO_SNDLOWAT"); 143ac952dd2SSean Bruno 144425b7639SSepherosa Ziehau /* 145425b7639SSepherosa Ziehau * Make sure that either retransmit or persist timer is set for SYN, FIN and 146425b7639SSepherosa Ziehau * non-ACK. 147425b7639SSepherosa Ziehau */ 148425b7639SSepherosa Ziehau #define TCP_XMIT_TIMER_ASSERT(tp, len, th_flags) \ 149fd29ff5dSRandall Stewart KASSERT(((len) == 0 && ((th_flags) & (TH_SYN | TH_FIN)) == 0) ||\ 150425b7639SSepherosa Ziehau tcp_timer_active((tp), TT_REXMT) || \ 151425b7639SSepherosa Ziehau tcp_timer_active((tp), TT_PERSIST), \ 152425b7639SSepherosa Ziehau ("neither rexmt nor persist timer is set")) 153425b7639SSepherosa Ziehau 154dbc42409SLawrence Stewart static void inline cc_after_idle(struct tcpcb *tp); 155dbc42409SLawrence Stewart 156bd79708dSJonathan T. Looney #ifdef TCP_HHOOK 157dbc42409SLawrence Stewart /* 1580f5e7edcSKevin Lo * Wrapper for the TCP established output helper hook. 15939bc9de5SLawrence Stewart */ 16089e560f4SRandall Stewart void 16139bc9de5SLawrence Stewart hhook_run_tcp_est_out(struct tcpcb *tp, struct tcphdr *th, 1623ac12506SJonathan T. Looney struct tcpopt *to, uint32_t len, int tso) 16339bc9de5SLawrence Stewart { 16439bc9de5SLawrence Stewart struct tcp_hhook_data hhook_data; 16539bc9de5SLawrence Stewart 16639bc9de5SLawrence Stewart if (V_tcp_hhh[HHOOK_TCP_EST_OUT]->hhh_nhooks > 0) { 16739bc9de5SLawrence Stewart hhook_data.tp = tp; 16839bc9de5SLawrence Stewart hhook_data.th = th; 16939bc9de5SLawrence Stewart hhook_data.to = to; 17039bc9de5SLawrence Stewart hhook_data.len = len; 17139bc9de5SLawrence Stewart hhook_data.tso = tso; 17239bc9de5SLawrence Stewart 17339bc9de5SLawrence Stewart hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_OUT], &hhook_data, 17439bc9de5SLawrence Stewart tp->osd); 17539bc9de5SLawrence Stewart } 17639bc9de5SLawrence Stewart } 177bd79708dSJonathan T. Looney #endif 17839bc9de5SLawrence Stewart 17939bc9de5SLawrence Stewart /* 180dbc42409SLawrence Stewart * CC wrapper hook functions 181dbc42409SLawrence Stewart */ 182dbc42409SLawrence Stewart static void inline 183dbc42409SLawrence Stewart cc_after_idle(struct tcpcb *tp) 184dbc42409SLawrence Stewart { 185dbc42409SLawrence Stewart INP_WLOCK_ASSERT(tp->t_inpcb); 186dbc42409SLawrence Stewart 187dbc42409SLawrence Stewart if (CC_ALGO(tp)->after_idle != NULL) 188dbc42409SLawrence Stewart CC_ALGO(tp)->after_idle(tp->ccv); 189dbc42409SLawrence Stewart } 1906741ecf5SAndre Oppermann 191df8bae1dSRodney W. Grimes /* 192df8bae1dSRodney W. Grimes * Tcp output routine: figure out what should be sent and send it. 193df8bae1dSRodney W. Grimes */ 194df8bae1dSRodney W. Grimes int 195f10e85d7SLuigi Rizzo tcp_output(struct tcpcb *tp) 196df8bae1dSRodney W. Grimes { 197f10e85d7SLuigi Rizzo struct socket *so = tp->t_inpcb->inp_socket; 1983ac12506SJonathan T. Looney int32_t len; 1993ac12506SJonathan T. Looney uint32_t recwin, sendwin; 200b287c6c7SBjoern A. Zeeb int off, flags, error = 0; /* Keep compiler happy */ 201581a046aSRandall Stewart u_int if_hw_tsomaxsegcount = 0; 20282e837f8SWarner Losh u_int if_hw_tsomaxsegsize = 0; 203f10e85d7SLuigi Rizzo struct mbuf *m; 204fb59c426SYoshinobu Inoue struct ip *ip = NULL; 205151ba793SAlexander Kabaev #ifdef TCPDEBUG 206f10e85d7SLuigi Rizzo struct ipovly *ipov = NULL; 207151ba793SAlexander Kabaev #endif 208f10e85d7SLuigi Rizzo struct tcphdr *th; 209a0292f23SGarrett Wollman u_char opt[TCP_MAXOLEN]; 210a04884fcSBill Fenner unsigned ipoptlen, optlen, hdrlen; 211fcf59617SAndrey V. Elsukov #if defined(IPSEC) || defined(IPSEC_SUPPORT) 2129ad0173dSBjoern A. Zeeb unsigned ipsec_optlen = 0; 2139ad0173dSBjoern A. Zeeb #endif 21410d20c84SMatt Macy int idle, sendalot, curticks; 21502a1a643SAndre Oppermann int sack_rxmit, sack_bytes_rxmt; 2166d90faf3SPaul Saab struct sackhole *p; 217df0633a1SGleb Smirnoff int tso, mtu; 21802a1a643SAndre Oppermann struct tcpopt to; 219c560df6fSPatrick Kelsey unsigned int wanted_cookie = 0; 220c560df6fSPatrick Kelsey unsigned int dont_sendalot = 0; 221262c1c1aSMatthew Dillon #if 0 22246f58482SJonathan Lemon int maxburst = TCP_MAXBURST; 223262c1c1aSMatthew Dillon #endif 224fb59c426SYoshinobu Inoue #ifdef INET6 225f10e85d7SLuigi Rizzo struct ip6_hdr *ip6 = NULL; 226fb59c426SYoshinobu Inoue int isipv6; 227fb59c426SYoshinobu Inoue 228fb59c426SYoshinobu Inoue isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 229fb59c426SYoshinobu Inoue #endif 230b2e60773SJohn Baldwin #ifdef KERN_TLS 231b2e60773SJohn Baldwin const bool hw_tls = (so->so_snd.sb_flags & SB_TLS_IFNET) != 0; 232b2e60773SJohn Baldwin #else 233b2e60773SJohn Baldwin const bool hw_tls = false; 234b2e60773SJohn Baldwin #endif 235df8bae1dSRodney W. Grimes 236109eb549SGleb Smirnoff NET_EPOCH_ASSERT(); 2378501a69cSRobert Watson INP_WLOCK_ASSERT(tp->t_inpcb); 2383d6ade3aSJennifer Yang 23909fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD 24009fe6320SNavdeep Parhar if (tp->t_flags & TF_TOE) 24109fe6320SNavdeep Parhar return (tcp_offload_output(tp)); 24209fe6320SNavdeep Parhar #endif 24309fe6320SNavdeep Parhar 244281a0fd4SPatrick Kelsey /* 2458db239dcSMichael Tuexen * For TFO connections in SYN_SENT or SYN_RECEIVED, 2468db239dcSMichael Tuexen * only allow the initial SYN or SYN|ACK and those sent 2478db239dcSMichael Tuexen * by the retransmit timer. 248281a0fd4SPatrick Kelsey */ 24968bd7ed1SJonathan T. Looney if (IS_FASTOPEN(tp->t_flags) && 2508db239dcSMichael Tuexen ((tp->t_state == TCPS_SYN_SENT) || 2518db239dcSMichael Tuexen (tp->t_state == TCPS_SYN_RECEIVED)) && 2528db239dcSMichael Tuexen SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 253281a0fd4SPatrick Kelsey (tp->snd_nxt != tp->snd_una)) /* not a retransmit */ 254281a0fd4SPatrick Kelsey return (0); 25518a75309SPatrick Kelsey 256df8bae1dSRodney W. Grimes /* 257df8bae1dSRodney W. Grimes * Determine length of data that should be transmitted, 258df8bae1dSRodney W. Grimes * and flags that will be used. 259df8bae1dSRodney W. Grimes * If there is some data or critical controls (SYN, RST) 260df8bae1dSRodney W. Grimes * to send, then transmit; otherwise, investigate further. 261df8bae1dSRodney W. Grimes */ 262c24d5daeSJayanth Vijayaraghavan idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 2639dc7d8a2SRichard Scheffenegger if (idle && (((ticks - tp->t_rcvtime) >= tp->t_rxtcur) || 2649dc7d8a2SRichard Scheffenegger (tp->t_sndtime && ((ticks - tp->t_sndtime) >= tp->t_rxtcur)))) 2652ea8da28SLawrence Stewart cc_after_idle(tp); 266c24d5daeSJayanth Vijayaraghavan tp->t_flags &= ~TF_LASTIDLE; 267c24d5daeSJayanth Vijayaraghavan if (idle) { 268c24d5daeSJayanth Vijayaraghavan if (tp->t_flags & TF_MORETOCOME) { 269c24d5daeSJayanth Vijayaraghavan tp->t_flags |= TF_LASTIDLE; 270c24d5daeSJayanth Vijayaraghavan idle = 0; 271c24d5daeSJayanth Vijayaraghavan } 272c24d5daeSJayanth Vijayaraghavan } 273df8bae1dSRodney W. Grimes again: 2746d90faf3SPaul Saab /* 2756d90faf3SPaul Saab * If we've recently taken a timeout, snd_max will be greater than 2766d90faf3SPaul Saab * snd_nxt. There may be SACK information that allows us to avoid 2776d90faf3SPaul Saab * resending already delivered data. Adjust snd_nxt accordingly. 2786d90faf3SPaul Saab */ 2793529149eSAndre Oppermann if ((tp->t_flags & TF_SACK_PERMIT) && 2803529149eSAndre Oppermann SEQ_LT(tp->snd_nxt, tp->snd_max)) 2816d90faf3SPaul Saab tcp_sack_adjust(tp); 282df8bae1dSRodney W. Grimes sendalot = 0; 283153e5b57SAndre Oppermann tso = 0; 284df0633a1SGleb Smirnoff mtu = 0; 285df8bae1dSRodney W. Grimes off = tp->snd_nxt - tp->snd_una; 286201d185bSAndre Oppermann sendwin = min(tp->snd_wnd, tp->snd_cwnd); 287df8bae1dSRodney W. Grimes 288df8bae1dSRodney W. Grimes flags = tcp_outflags[tp->t_state]; 289a0292f23SGarrett Wollman /* 2906d90faf3SPaul Saab * Send any SACK-generated retransmissions. If we're explicitly trying 2916d90faf3SPaul Saab * to send out new data (when sendalot is 1), bypass this function. 2926d90faf3SPaul Saab * If we retransmit in fast recovery mode, decrement snd_cwnd, since 2936d90faf3SPaul Saab * we're replacing a (future) new transmission with a retransmission 2946d90faf3SPaul Saab * now, and we previously incremented snd_cwnd in tcp_input(). 2956d90faf3SPaul Saab */ 2966d90faf3SPaul Saab /* 2976d90faf3SPaul Saab * Still in sack recovery , reset rxmit flag to zero. 2986d90faf3SPaul Saab */ 2996d90faf3SPaul Saab sack_rxmit = 0; 300a55db2b6SPaul Saab sack_bytes_rxmt = 0; 3016d90faf3SPaul Saab len = 0; 3026d90faf3SPaul Saab p = NULL; 303dbc42409SLawrence Stewart if ((tp->t_flags & TF_SACK_PERMIT) && IN_FASTRECOVERY(tp->t_flags) && 304a55db2b6SPaul Saab (p = tcp_sack_output(tp, &sack_bytes_rxmt))) { 3053ac12506SJonathan T. Looney uint32_t cwin; 306a55db2b6SPaul Saab 3073ac12506SJonathan T. Looney cwin = 3083ac12506SJonathan T. Looney imax(min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt, 0); 309f787edd8SJayanth Vijayaraghavan /* Do not retransmit SACK segments beyond snd_recover */ 310f787edd8SJayanth Vijayaraghavan if (SEQ_GT(p->end, tp->snd_recover)) { 311f787edd8SJayanth Vijayaraghavan /* 312f787edd8SJayanth Vijayaraghavan * (At least) part of sack hole extends beyond 313f787edd8SJayanth Vijayaraghavan * snd_recover. Check to see if we can rexmit data 314f787edd8SJayanth Vijayaraghavan * for this hole. 315f787edd8SJayanth Vijayaraghavan */ 316f787edd8SJayanth Vijayaraghavan if (SEQ_GEQ(p->rxmit, tp->snd_recover)) { 317f787edd8SJayanth Vijayaraghavan /* 318f787edd8SJayanth Vijayaraghavan * Can't rexmit any more data for this hole. 319f787edd8SJayanth Vijayaraghavan * That data will be rexmitted in the next 320f787edd8SJayanth Vijayaraghavan * sack recovery episode, when snd_recover 321f787edd8SJayanth Vijayaraghavan * moves past p->rxmit. 322f787edd8SJayanth Vijayaraghavan */ 323f787edd8SJayanth Vijayaraghavan p = NULL; 324f787edd8SJayanth Vijayaraghavan goto after_sack_rexmit; 325f787edd8SJayanth Vijayaraghavan } else 326f787edd8SJayanth Vijayaraghavan /* Can rexmit part of the current hole */ 3273ac12506SJonathan T. Looney len = ((int32_t)ulmin(cwin, 328f787edd8SJayanth Vijayaraghavan tp->snd_recover - p->rxmit)); 329f787edd8SJayanth Vijayaraghavan } else 3303ac12506SJonathan T. Looney len = ((int32_t)ulmin(cwin, p->end - p->rxmit)); 3316d90faf3SPaul Saab off = p->rxmit - tp->snd_una; 332f787edd8SJayanth Vijayaraghavan KASSERT(off >= 0,("%s: sack block to the left of una : %d", 333f787edd8SJayanth Vijayaraghavan __func__, off)); 3346d90faf3SPaul Saab if (len > 0) { 335ab5c14d8SRobert Watson sack_rxmit = 1; 336ab5c14d8SRobert Watson sendalot = 1; 33778b50714SRobert Watson TCPSTAT_INC(tcps_sack_rexmits); 33878b50714SRobert Watson TCPSTAT_ADD(tcps_sack_rexmit_bytes, 3394b72ae16SRichard Scheffenegger min(len, tcp_maxseg(tp))); 3406d90faf3SPaul Saab } 3416d90faf3SPaul Saab } 342f787edd8SJayanth Vijayaraghavan after_sack_rexmit: 3436d90faf3SPaul Saab /* 344a0292f23SGarrett Wollman * Get standard flags, and add SYN or FIN if requested by 'hidden' 345a0292f23SGarrett Wollman * state flags. 346a0292f23SGarrett Wollman */ 347a0292f23SGarrett Wollman if (tp->t_flags & TF_NEEDFIN) 348a0292f23SGarrett Wollman flags |= TH_FIN; 349a0292f23SGarrett Wollman if (tp->t_flags & TF_NEEDSYN) 350a0292f23SGarrett Wollman flags |= TH_SYN; 351a0292f23SGarrett Wollman 352cf2942b6SRobert Watson SOCKBUF_LOCK(&so->so_snd); 353df8bae1dSRodney W. Grimes /* 354df8bae1dSRodney W. Grimes * If in persist timeout with window of 0, send 1 byte. 355df8bae1dSRodney W. Grimes * Otherwise, if window is small but nonzero 356df8bae1dSRodney W. Grimes * and timer expired, we will send what we can 357df8bae1dSRodney W. Grimes * and go to transmit state. 358df8bae1dSRodney W. Grimes */ 3592cdbfa66SPaul Saab if (tp->t_flags & TF_FORCEDATA) { 360201d185bSAndre Oppermann if (sendwin == 0) { 361df8bae1dSRodney W. Grimes /* 362df8bae1dSRodney W. Grimes * If we still have some data to send, then 363df8bae1dSRodney W. Grimes * clear the FIN bit. Usually this would 364df8bae1dSRodney W. Grimes * happen below when it realizes that we 365df8bae1dSRodney W. Grimes * aren't sending all the data. However, 36629089b51SJulian Elischer * if we have exactly 1 byte of unsent data, 367df8bae1dSRodney W. Grimes * then it won't clear the FIN bit below, 368df8bae1dSRodney W. Grimes * and if we are in persist state, we wind 369df8bae1dSRodney W. Grimes * up sending the packet without recording 370df8bae1dSRodney W. Grimes * that we sent the FIN bit. 371df8bae1dSRodney W. Grimes * 372df8bae1dSRodney W. Grimes * We can't just blindly clear the FIN bit, 373df8bae1dSRodney W. Grimes * because if we don't have any more data 374df8bae1dSRodney W. Grimes * to send then the probe will be the FIN 375df8bae1dSRodney W. Grimes * itself. 376df8bae1dSRodney W. Grimes */ 377cfa6009eSGleb Smirnoff if (off < sbused(&so->so_snd)) 378df8bae1dSRodney W. Grimes flags &= ~TH_FIN; 379201d185bSAndre Oppermann sendwin = 1; 380df8bae1dSRodney W. Grimes } else { 381b8152ba7SAndre Oppermann tcp_timer_activate(tp, TT_PERSIST, 0); 382df8bae1dSRodney W. Grimes tp->t_rxtshift = 0; 383df8bae1dSRodney W. Grimes } 384df8bae1dSRodney W. Grimes } 385df8bae1dSRodney W. Grimes 38628257b5cSMatthew Dillon /* 38728257b5cSMatthew Dillon * If snd_nxt == snd_max and we have transmitted a FIN, the 38828257b5cSMatthew Dillon * offset will be > 0 even if so_snd.sb_cc is 0, resulting in 389201d185bSAndre Oppermann * a negative length. This can also occur when TCP opens up 39028257b5cSMatthew Dillon * its congestion window while receiving additional duplicate 39128257b5cSMatthew Dillon * acks after fast-retransmit because TCP will reset snd_nxt 39228257b5cSMatthew Dillon * to snd_max after the fast-retransmit. 39328257b5cSMatthew Dillon * 39428257b5cSMatthew Dillon * In the normal retransmit-FIN-only case, however, snd_nxt will 39528257b5cSMatthew Dillon * be set to snd_una, the offset will be 0, and the length may 39628257b5cSMatthew Dillon * wind up 0. 3976d90faf3SPaul Saab * 3986d90faf3SPaul Saab * If sack_rxmit is true we are retransmitting from the scoreboard 3996d90faf3SPaul Saab * in which case len is already set. 40028257b5cSMatthew Dillon */ 401a55db2b6SPaul Saab if (sack_rxmit == 0) { 402a55db2b6SPaul Saab if (sack_bytes_rxmt == 0) 403cb503ae2SJonathan T. Looney len = ((int32_t)min(sbavail(&so->so_snd), sendwin) - 404cfa6009eSGleb Smirnoff off); 405a55db2b6SPaul Saab else { 4063ac12506SJonathan T. Looney int32_t cwin; 407a55db2b6SPaul Saab 408a55db2b6SPaul Saab /* 409a55db2b6SPaul Saab * We are inside of a SACK recovery episode and are 410a55db2b6SPaul Saab * sending new data, having retransmitted all the 411a55db2b6SPaul Saab * data possible in the scoreboard. 412a55db2b6SPaul Saab */ 4133ac12506SJonathan T. Looney len = ((int32_t)min(sbavail(&so->so_snd), tp->snd_wnd) - 414cfa6009eSGleb Smirnoff off); 4158d03f2b5SPaul Saab /* 4168d03f2b5SPaul Saab * Don't remove this (len > 0) check ! 4178d03f2b5SPaul Saab * We explicitly check for len > 0 here (although it 4188d03f2b5SPaul Saab * isn't really necessary), to work around a gcc 4198d03f2b5SPaul Saab * optimization issue - to force gcc to compute 4208d03f2b5SPaul Saab * len above. Without this check, the computation 4218d03f2b5SPaul Saab * of len is bungled by the optimizer. 4228d03f2b5SPaul Saab */ 4238d03f2b5SPaul Saab if (len > 0) { 4247d5ed1ceSPaul Saab cwin = tp->snd_cwnd - 425a3574665SMichael Tuexen (tp->snd_nxt - tp->snd_recover) - 426a55db2b6SPaul Saab sack_bytes_rxmt; 427a55db2b6SPaul Saab if (cwin < 0) 428a55db2b6SPaul Saab cwin = 0; 4293ac12506SJonathan T. Looney len = imin(len, cwin); 430a55db2b6SPaul Saab } 431a55db2b6SPaul Saab } 4328d03f2b5SPaul Saab } 433a0292f23SGarrett Wollman 434a0292f23SGarrett Wollman /* 435a0292f23SGarrett Wollman * Lop off SYN bit if it has already been sent. However, if this 436a0292f23SGarrett Wollman * is SYN-SENT state and if segment contains data and if we don't 437a0292f23SGarrett Wollman * know that foreign host supports TAO, suppress sending segment. 438a0292f23SGarrett Wollman */ 439a0292f23SGarrett Wollman if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) { 4404b8e98d6SQing Li if (tp->t_state != TCPS_SYN_RECEIVED) 441a0292f23SGarrett Wollman flags &= ~TH_SYN; 442281a0fd4SPatrick Kelsey /* 443281a0fd4SPatrick Kelsey * When sending additional segments following a TFO SYN|ACK, 444281a0fd4SPatrick Kelsey * do not include the SYN bit. 445281a0fd4SPatrick Kelsey */ 44668bd7ed1SJonathan T. Looney if (IS_FASTOPEN(tp->t_flags) && 447281a0fd4SPatrick Kelsey (tp->t_state == TCPS_SYN_RECEIVED)) 448281a0fd4SPatrick Kelsey flags &= ~TH_SYN; 449a0292f23SGarrett Wollman off--, len++; 450a0292f23SGarrett Wollman } 451a0292f23SGarrett Wollman 45281165e48SAndras Olah /* 453c94c54e4SAndre Oppermann * Be careful not to send data and/or FIN on SYN segments. 45481165e48SAndras Olah * This measure is needed to prevent interoperability problems 45581165e48SAndras Olah * with not fully conformant TCP implementations. 45681165e48SAndras Olah */ 457c94c54e4SAndre Oppermann if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 45881165e48SAndras Olah len = 0; 45981165e48SAndras Olah flags &= ~TH_FIN; 46081165e48SAndras Olah } 46181165e48SAndras Olah 462281a0fd4SPatrick Kelsey /* 463c560df6fSPatrick Kelsey * On TFO sockets, ensure no data is sent in the following cases: 464c560df6fSPatrick Kelsey * 465c560df6fSPatrick Kelsey * - When retransmitting SYN|ACK on a passively-created socket 466c560df6fSPatrick Kelsey * 467c560df6fSPatrick Kelsey * - When retransmitting SYN on an actively created socket 468c560df6fSPatrick Kelsey * 469c560df6fSPatrick Kelsey * - When sending a zero-length cookie (cookie request) on an 470c560df6fSPatrick Kelsey * actively created socket 471c560df6fSPatrick Kelsey * 472c560df6fSPatrick Kelsey * - When the socket is in the CLOSED state (RST is being sent) 473281a0fd4SPatrick Kelsey */ 47468bd7ed1SJonathan T. Looney if (IS_FASTOPEN(tp->t_flags) && 475c560df6fSPatrick Kelsey (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 476c560df6fSPatrick Kelsey ((tp->t_state == TCPS_SYN_SENT) && 477c560df6fSPatrick Kelsey (tp->t_tfo_client_cookie_len == 0)) || 478281a0fd4SPatrick Kelsey (flags & TH_RST))) 479281a0fd4SPatrick Kelsey len = 0; 48047a8e865SXin LI if (len <= 0) { 481df8bae1dSRodney W. Grimes /* 482df8bae1dSRodney W. Grimes * If FIN has been sent but not acked, 483df8bae1dSRodney W. Grimes * but we haven't been called to retransmit, 48428257b5cSMatthew Dillon * len will be < 0. Otherwise, window shrank 485df8bae1dSRodney W. Grimes * after we sent into it. If window shrank to 0, 4862d8266afSDavid Greenman * cancel pending retransmit, pull snd_nxt back 4872d8266afSDavid Greenman * to (closed) window, and set the persist timer 4882d8266afSDavid Greenman * if it isn't already going. If the window didn't 4892d8266afSDavid Greenman * close completely, just wait for an ACK. 49047a8e865SXin LI * 49147a8e865SXin LI * We also do a general check here to ensure that 49247a8e865SXin LI * we will set the persist timer when we have data 49347a8e865SXin LI * to send, but a 0-byte window. This makes sure 49447a8e865SXin LI * the persist timer is set even if the packet 49547a8e865SXin LI * hits one of the "goto send" lines below. 496df8bae1dSRodney W. Grimes */ 497df8bae1dSRodney W. Grimes len = 0; 49847a8e865SXin LI if ((sendwin == 0) && (TCPS_HAVEESTABLISHED(tp->t_state)) && 49947a8e865SXin LI (off < (int) sbavail(&so->so_snd))) { 500b8152ba7SAndre Oppermann tcp_timer_activate(tp, TT_REXMT, 0); 5012d8266afSDavid Greenman tp->t_rxtshift = 0; 502df8bae1dSRodney W. Grimes tp->snd_nxt = tp->snd_una; 503b8152ba7SAndre Oppermann if (!tcp_timer_active(tp, TT_PERSIST)) 5042d8266afSDavid Greenman tcp_setpersist(tp); 505df8bae1dSRodney W. Grimes } 506df8bae1dSRodney W. Grimes } 50728257b5cSMatthew Dillon 5086741ecf5SAndre Oppermann /* len will be >= 0 after this point. */ 509c4982faeSBjoern A. Zeeb KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 5106741ecf5SAndre Oppermann 51166492feaSGleb Smirnoff tcp_sndbuf_autoscale(tp, so, sendwin); 5126741ecf5SAndre Oppermann 5136741ecf5SAndre Oppermann /* 514ed420311SAndre Oppermann * Decide if we can use TCP Segmentation Offloading (if supported by 515ed420311SAndre Oppermann * hardware). 516b3c0f300SAndre Oppermann * 517b3c0f300SAndre Oppermann * TSO may only be used if we are in a pure bulk sending state. The 518b3c0f300SAndre Oppermann * presence of TCP-MD5, SACK retransmits, SACK advertizements and 519b3c0f300SAndre Oppermann * IP options prevent using TSO. With TSO the TCP header is the same 520b3c0f300SAndre Oppermann * (except for the sequence number) for all generated packets. This 521b3c0f300SAndre Oppermann * makes it impossible to transmit any options which vary per generated 522b3c0f300SAndre Oppermann * segment or packet. 523b6ff6724SHiren Panchasara * 524b6ff6724SHiren Panchasara * IPv4 handling has a clear separation of ip options and ip header 525b6ff6724SHiren Panchasara * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 526b6ff6724SHiren Panchasara * the right thing below to provide length of just ip options and thus 527b6ff6724SHiren Panchasara * checking for ipoptlen is enough to decide if ip options are present. 52828257b5cSMatthew Dillon */ 529fcf59617SAndrey V. Elsukov #if defined(IPSEC) || defined(IPSEC_SUPPORT) 5309ad0173dSBjoern A. Zeeb /* 5319ad0173dSBjoern A. Zeeb * Pre-calculate here as we save another lookup into the darknesses 5329ad0173dSBjoern A. Zeeb * of IPsec that way and can actually decide if TSO is ok. 5339ad0173dSBjoern A. Zeeb */ 534fcf59617SAndrey V. Elsukov #ifdef INET6 535fcf59617SAndrey V. Elsukov if (isipv6 && IPSEC_ENABLED(ipv6)) 536fcf59617SAndrey V. Elsukov ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); 537fcf59617SAndrey V. Elsukov #ifdef INET 538fcf59617SAndrey V. Elsukov else 5399ad0173dSBjoern A. Zeeb #endif 540fcf59617SAndrey V. Elsukov #endif /* INET6 */ 541fcf59617SAndrey V. Elsukov #ifdef INET 542fcf59617SAndrey V. Elsukov if (IPSEC_ENABLED(ipv4)) 543fcf59617SAndrey V. Elsukov ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); 544fcf59617SAndrey V. Elsukov #endif /* INET */ 545fcf59617SAndrey V. Elsukov #endif /* IPSEC */ 546b6ff6724SHiren Panchasara #ifdef INET6 547b6ff6724SHiren Panchasara if (isipv6) 548b6ff6724SHiren Panchasara ipoptlen = ip6_optlen(tp->t_inpcb); 549b6ff6724SHiren Panchasara else 550b6ff6724SHiren Panchasara #endif 551b6ff6724SHiren Panchasara if (tp->t_inpcb->inp_options) 552b6ff6724SHiren Panchasara ipoptlen = tp->t_inpcb->inp_options->m_len - 553b6ff6724SHiren Panchasara offsetof(struct ipoption, ipopt_list); 554b6ff6724SHiren Panchasara else 555b6ff6724SHiren Panchasara ipoptlen = 0; 556fcf59617SAndrey V. Elsukov #if defined(IPSEC) || defined(IPSEC_SUPPORT) 557b6ff6724SHiren Panchasara ipoptlen += ipsec_optlen; 558b6ff6724SHiren Panchasara #endif 559b6ff6724SHiren Panchasara 560ed420311SAndre Oppermann if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > tp->t_maxseg && 561b3c0f300SAndre Oppermann ((tp->t_flags & TF_SIGNATURE) == 0) && 562b3c0f300SAndre Oppermann tp->rcv_numsacks == 0 && sack_rxmit == 0 && 563c560df6fSPatrick Kelsey ipoptlen == 0 && !(flags & TH_SYN)) 564b3c0f300SAndre Oppermann tso = 1; 565153e5b57SAndre Oppermann 5665d3b1b75SJayanth Vijayaraghavan if (sack_rxmit) { 567cfa6009eSGleb Smirnoff if (SEQ_LT(p->rxmit + len, tp->snd_una + sbused(&so->so_snd))) 568df8bae1dSRodney W. Grimes flags &= ~TH_FIN; 5695d3b1b75SJayanth Vijayaraghavan } else { 570cfa6009eSGleb Smirnoff if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 571cfa6009eSGleb Smirnoff sbused(&so->so_snd))) 5725d3b1b75SJayanth Vijayaraghavan flags &= ~TH_FIN; 5735d3b1b75SJayanth Vijayaraghavan } 574df8bae1dSRodney W. Grimes 5753ac12506SJonathan T. Looney recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 5763ac12506SJonathan T. Looney (long)TCP_MAXWIN << tp->rcv_scale); 577df8bae1dSRodney W. Grimes 578df8bae1dSRodney W. Grimes /* 579262c1c1aSMatthew Dillon * Sender silly window avoidance. We transmit under the following 580262c1c1aSMatthew Dillon * conditions when len is non-zero: 581262c1c1aSMatthew Dillon * 582b3c0f300SAndre Oppermann * - We have a full segment (or more with TSO) 583262c1c1aSMatthew Dillon * - This is the last buffer in a write()/send() and we are 584262c1c1aSMatthew Dillon * either idle or running NODELAY 585262c1c1aSMatthew Dillon * - we've timed out (e.g. persist timer) 586262c1c1aSMatthew Dillon * - we have more then 1/2 the maximum send window's worth of 587262c1c1aSMatthew Dillon * data (receiver may be limited the window size) 588262c1c1aSMatthew Dillon * - we need to retransmit 589df8bae1dSRodney W. Grimes */ 590df8bae1dSRodney W. Grimes if (len) { 591b3c0f300SAndre Oppermann if (len >= tp->t_maxseg) 592df8bae1dSRodney W. Grimes goto send; 593262c1c1aSMatthew Dillon /* 594e3995661SRichard Scheffenegger * As the TCP header options are now 595e3995661SRichard Scheffenegger * considered when setting up the initial 596e3995661SRichard Scheffenegger * window, we would not send the last segment 597e3995661SRichard Scheffenegger * if we skip considering the option length here. 598e3995661SRichard Scheffenegger * Note: this may not work when tcp headers change 599e3995661SRichard Scheffenegger * very dynamically in the future. 600e3995661SRichard Scheffenegger */ 601e3995661SRichard Scheffenegger if ((((tp->t_flags & TF_SIGNATURE) ? 602e3995661SRichard Scheffenegger PADTCPOLEN(TCPOLEN_SIGNATURE) : 0) + 603e3995661SRichard Scheffenegger ((tp->t_flags & TF_RCVD_TSTMP) ? 604e3995661SRichard Scheffenegger PADTCPOLEN(TCPOLEN_TIMESTAMP) : 0) + 605e3995661SRichard Scheffenegger len) >= tp->t_maxseg) 606e3995661SRichard Scheffenegger goto send; 607e3995661SRichard Scheffenegger /* 608262c1c1aSMatthew Dillon * NOTE! on localhost connections an 'ack' from the remote 609262c1c1aSMatthew Dillon * end may occur synchronously with the output and cause 610262c1c1aSMatthew Dillon * us to flush a buffer queued with moretocome. XXX 611262c1c1aSMatthew Dillon * 612262c1c1aSMatthew Dillon * note: the len + off check is almost certainly unnecessary. 613262c1c1aSMatthew Dillon */ 614262c1c1aSMatthew Dillon if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 615262c1c1aSMatthew Dillon (idle || (tp->t_flags & TF_NODELAY)) && 6163ac12506SJonathan T. Looney (uint32_t)len + (uint32_t)off >= sbavail(&so->so_snd) && 617262c1c1aSMatthew Dillon (tp->t_flags & TF_NOPUSH) == 0) { 618df8bae1dSRodney W. Grimes goto send; 619262c1c1aSMatthew Dillon } 6202cdbfa66SPaul Saab if (tp->t_flags & TF_FORCEDATA) /* typ. timeout case */ 621df8bae1dSRodney W. Grimes goto send; 622a0292f23SGarrett Wollman if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) 623df8bae1dSRodney W. Grimes goto send; 624262c1c1aSMatthew Dillon if (SEQ_LT(tp->snd_nxt, tp->snd_max)) /* retransmit case */ 625df8bae1dSRodney W. Grimes goto send; 6266d90faf3SPaul Saab if (sack_rxmit) 6276d90faf3SPaul Saab goto send; 628df8bae1dSRodney W. Grimes } 629df8bae1dSRodney W. Grimes 630df8bae1dSRodney W. Grimes /* 631f62563d3SAndre Oppermann * Sending of standalone window updates. 632f62563d3SAndre Oppermann * 63378f59b4bSAndre Oppermann * Window updates are important when we close our window due to a 63478f59b4bSAndre Oppermann * full socket buffer and are opening it again after the application 635f62563d3SAndre Oppermann * reads data from it. Once the window has opened again and the 636f62563d3SAndre Oppermann * remote end starts to send again the ACK clock takes over and 637f62563d3SAndre Oppermann * provides the most current window information. 638f62563d3SAndre Oppermann * 63978f59b4bSAndre Oppermann * We must avoid the silly window syndrome whereas every read 640f62563d3SAndre Oppermann * from the receive buffer, no matter how small, causes a window 641f62563d3SAndre Oppermann * update to be sent. We also should avoid sending a flurry of 642f62563d3SAndre Oppermann * window updates when the socket buffer had queued a lot of data 643f62563d3SAndre Oppermann * and the application is doing small reads. 644f62563d3SAndre Oppermann * 645f62563d3SAndre Oppermann * Prevent a flurry of pointless window updates by only sending 646f62563d3SAndre Oppermann * an update when we can increase the advertized window by more 647f62563d3SAndre Oppermann * than 1/4th of the socket buffer capacity. When the buffer is 648f62563d3SAndre Oppermann * getting full or is very small be more aggressive and send an 649f62563d3SAndre Oppermann * update whenever we can increase by two mss sized segments. 650f62563d3SAndre Oppermann * In all other situations the ACK's to new incoming data will 651f62563d3SAndre Oppermann * carry further window increases. 6524249614cSAndre Oppermann * 6534249614cSAndre Oppermann * Don't send an independent window update if a delayed 6544249614cSAndre Oppermann * ACK is pending (it will get piggy-backed on it) or the 6554249614cSAndre Oppermann * remote side already has done a half-close and won't send 656f62563d3SAndre Oppermann * more data. Skip this if the connection is in T/TCP 657f62563d3SAndre Oppermann * half-open state. 658df8bae1dSRodney W. Grimes */ 659b7de7d87SAndre Oppermann if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 6604249614cSAndre Oppermann !(tp->t_flags & TF_DELACK) && 661b7de7d87SAndre Oppermann !TCPS_HAVERCVDFIN(tp->t_state)) { 662df8bae1dSRodney W. Grimes /* 663f62563d3SAndre Oppermann * "adv" is the amount we could increase the window, 664df8bae1dSRodney W. Grimes * taking into account that we are limited by 665df8bae1dSRodney W. Grimes * TCP_MAXWIN << tp->rcv_scale. 666df8bae1dSRodney W. Grimes */ 6673ac12506SJonathan T. Looney int32_t adv; 668f701e30dSJohn Baldwin int oldwin; 669f701e30dSJohn Baldwin 6703ac12506SJonathan T. Looney adv = recwin; 671f701e30dSJohn Baldwin if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 672f701e30dSJohn Baldwin oldwin = (tp->rcv_adv - tp->rcv_nxt); 673f092a3c7SRandall Stewart if (adv > oldwin) 674f701e30dSJohn Baldwin adv -= oldwin; 675f092a3c7SRandall Stewart else 676f092a3c7SRandall Stewart adv = 0; 677f701e30dSJohn Baldwin } else 678f701e30dSJohn Baldwin oldwin = 0; 679df8bae1dSRodney W. Grimes 680da84b2e6SJohn Baldwin /* 6810dda76b8SJonathan T. Looney * If the new window size ends up being the same as or less 6820dda76b8SJonathan T. Looney * than the old size when it is scaled, then don't force 6830dda76b8SJonathan T. Looney * a window update. 684da84b2e6SJohn Baldwin */ 6850dda76b8SJonathan T. Looney if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 686da84b2e6SJohn Baldwin goto dontupdate; 687f62563d3SAndre Oppermann 6883ac12506SJonathan T. Looney if (adv >= (int32_t)(2 * tp->t_maxseg) && 6893ac12506SJonathan T. Looney (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 6903ac12506SJonathan T. Looney recwin <= (so->so_rcv.sb_hiwat / 8) || 69150075939SStephen Hurd so->so_rcv.sb_hiwat <= 8 * tp->t_maxseg || 69250075939SStephen Hurd adv >= TCP_MAXWIN << tp->rcv_scale)) 693df8bae1dSRodney W. Grimes goto send; 6948d62aae8SMichael Tuexen if (2 * adv >= (int32_t)so->so_rcv.sb_hiwat) 6958d62aae8SMichael Tuexen goto send; 696df8bae1dSRodney W. Grimes } 697da84b2e6SJohn Baldwin dontupdate: 698df8bae1dSRodney W. Grimes 699df8bae1dSRodney W. Grimes /* 70028257b5cSMatthew Dillon * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 70128257b5cSMatthew Dillon * is also a catch-all for the retransmit timer timeout case. 702df8bae1dSRodney W. Grimes */ 703df8bae1dSRodney W. Grimes if (tp->t_flags & TF_ACKNOW) 704df8bae1dSRodney W. Grimes goto send; 705a0292f23SGarrett Wollman if ((flags & TH_RST) || 706a0292f23SGarrett Wollman ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) 707df8bae1dSRodney W. Grimes goto send; 708df8bae1dSRodney W. Grimes if (SEQ_GT(tp->snd_up, tp->snd_una)) 709df8bae1dSRodney W. Grimes goto send; 710df8bae1dSRodney W. Grimes /* 711df8bae1dSRodney W. Grimes * If our state indicates that FIN should be sent 71228257b5cSMatthew Dillon * and we have not yet done so, then we need to send. 713df8bae1dSRodney W. Grimes */ 714df8bae1dSRodney W. Grimes if (flags & TH_FIN && 715df8bae1dSRodney W. Grimes ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una)) 716df8bae1dSRodney W. Grimes goto send; 7176d90faf3SPaul Saab /* 7186d90faf3SPaul Saab * In SACK, it is possible for tcp_output to fail to send a segment 7196d90faf3SPaul Saab * after the retransmission timer has been turned off. Make sure 7206d90faf3SPaul Saab * that the retransmission timer is set. 7216d90faf3SPaul Saab */ 7223529149eSAndre Oppermann if ((tp->t_flags & TF_SACK_PERMIT) && 7233529149eSAndre Oppermann SEQ_GT(tp->snd_max, tp->snd_una) && 724b8152ba7SAndre Oppermann !tcp_timer_active(tp, TT_REXMT) && 725b8152ba7SAndre Oppermann !tcp_timer_active(tp, TT_PERSIST)) { 726b8152ba7SAndre Oppermann tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 727cf2942b6SRobert Watson goto just_return; 7286d90faf3SPaul Saab } 729df8bae1dSRodney W. Grimes /* 730df8bae1dSRodney W. Grimes * TCP window updates are not reliable, rather a polling protocol 731df8bae1dSRodney W. Grimes * using ``persist'' packets is used to insure receipt of window 732df8bae1dSRodney W. Grimes * updates. The three ``states'' for the output side are: 733df8bae1dSRodney W. Grimes * idle not doing retransmits or persists 734df8bae1dSRodney W. Grimes * persisting to move a small or zero window 735df8bae1dSRodney W. Grimes * (re)transmitting and thereby not persisting 736df8bae1dSRodney W. Grimes * 737b8152ba7SAndre Oppermann * tcp_timer_active(tp, TT_PERSIST) 7389b8b58e0SJonathan Lemon * is true when we are in persist state. 7392cdbfa66SPaul Saab * (tp->t_flags & TF_FORCEDATA) 740df8bae1dSRodney W. Grimes * is set when we are called to send a persist packet. 741b8152ba7SAndre Oppermann * tcp_timer_active(tp, TT_REXMT) 742df8bae1dSRodney W. Grimes * is set when we are retransmitting 743df8bae1dSRodney W. Grimes * The output side is idle when both timers are zero. 744df8bae1dSRodney W. Grimes * 745df8bae1dSRodney W. Grimes * If send window is too small, there is data to transmit, and no 746df8bae1dSRodney W. Grimes * retransmit or persist is pending, then go to persist state. 747df8bae1dSRodney W. Grimes * If nothing happens soon, send when timer expires: 748df8bae1dSRodney W. Grimes * if window is nonzero, transmit what we can, 749df8bae1dSRodney W. Grimes * otherwise force out a byte. 750df8bae1dSRodney W. Grimes */ 751cfa6009eSGleb Smirnoff if (sbavail(&so->so_snd) && !tcp_timer_active(tp, TT_REXMT) && 752b8152ba7SAndre Oppermann !tcp_timer_active(tp, TT_PERSIST)) { 753df8bae1dSRodney W. Grimes tp->t_rxtshift = 0; 754df8bae1dSRodney W. Grimes tcp_setpersist(tp); 755df8bae1dSRodney W. Grimes } 756df8bae1dSRodney W. Grimes 757df8bae1dSRodney W. Grimes /* 758df8bae1dSRodney W. Grimes * No reason to send a segment, just return. 759df8bae1dSRodney W. Grimes */ 760cf2942b6SRobert Watson just_return: 761cf2942b6SRobert Watson SOCKBUF_UNLOCK(&so->so_snd); 762df8bae1dSRodney W. Grimes return (0); 763df8bae1dSRodney W. Grimes 764df8bae1dSRodney W. Grimes send: 765cf2942b6SRobert Watson SOCKBUF_LOCK_ASSERT(&so->so_snd); 766f6f6703fSSean Bruno if (len > 0) { 767f6f6703fSSean Bruno if (len >= tp->t_maxseg) 768f6f6703fSSean Bruno tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 769f6f6703fSSean Bruno else 770f6f6703fSSean Bruno tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 771f6f6703fSSean Bruno } 772df8bae1dSRodney W. Grimes /* 773df8bae1dSRodney W. Grimes * Before ESTABLISHED, force sending of initial options 774df8bae1dSRodney W. Grimes * unless TCP set not to do any options. 775df8bae1dSRodney W. Grimes * NOTE: we assume that the IP/TCP header plus TCP options 776df8bae1dSRodney W. Grimes * always fit in a single mbuf, leaving room for a maximum 777df8bae1dSRodney W. Grimes * link header, i.e. 77833841545SHajimu UMEMOTO * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES 779df8bae1dSRodney W. Grimes */ 780df8bae1dSRodney W. Grimes optlen = 0; 781fb59c426SYoshinobu Inoue #ifdef INET6 782fb59c426SYoshinobu Inoue if (isipv6) 783fb59c426SYoshinobu Inoue hdrlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr); 784fb59c426SYoshinobu Inoue else 785fb59c426SYoshinobu Inoue #endif 786df8bae1dSRodney W. Grimes hdrlen = sizeof (struct tcpiphdr); 78702a1a643SAndre Oppermann 788*ed782b9fSMichael Tuexen if (flags & TH_SYN) { 789*ed782b9fSMichael Tuexen tp->snd_nxt = tp->iss; 790*ed782b9fSMichael Tuexen } 791*ed782b9fSMichael Tuexen 79202a1a643SAndre Oppermann /* 79302a1a643SAndre Oppermann * Compute options for segment. 79402a1a643SAndre Oppermann * We only have to care about SYN and established connection 79502a1a643SAndre Oppermann * segments. Options for SYN-ACK segments are handled in TCP 79602a1a643SAndre Oppermann * syncache. 79702a1a643SAndre Oppermann */ 79802a1a643SAndre Oppermann to.to_flags = 0; 799f73d9fd2SGleb Smirnoff if ((tp->t_flags & TF_NOOPT) == 0) { 80002a1a643SAndre Oppermann /* Maximum segment size. */ 801df8bae1dSRodney W. Grimes if (flags & TH_SYN) { 80202a1a643SAndre Oppermann to.to_mss = tcp_mssopt(&tp->t_inpcb->inp_inc); 80302a1a643SAndre Oppermann to.to_flags |= TOF_MSS; 80418a75309SPatrick Kelsey 805281a0fd4SPatrick Kelsey /* 806c560df6fSPatrick Kelsey * On SYN or SYN|ACK transmits on TFO connections, 807c560df6fSPatrick Kelsey * only include the TFO option if it is not a 808c560df6fSPatrick Kelsey * retransmit, as the presence of the TFO option may 809c560df6fSPatrick Kelsey * have caused the original SYN or SYN|ACK to have 810c560df6fSPatrick Kelsey * been dropped by a middlebox. 811281a0fd4SPatrick Kelsey */ 81268bd7ed1SJonathan T. Looney if (IS_FASTOPEN(tp->t_flags) && 813281a0fd4SPatrick Kelsey (tp->t_rxtshift == 0)) { 814c560df6fSPatrick Kelsey if (tp->t_state == TCPS_SYN_RECEIVED) { 815281a0fd4SPatrick Kelsey to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 816c560df6fSPatrick Kelsey to.to_tfo_cookie = 817c560df6fSPatrick Kelsey (u_int8_t *)&tp->t_tfo_cookie.server; 818281a0fd4SPatrick Kelsey to.to_flags |= TOF_FASTOPEN; 819c560df6fSPatrick Kelsey wanted_cookie = 1; 820c560df6fSPatrick Kelsey } else if (tp->t_state == TCPS_SYN_SENT) { 821c560df6fSPatrick Kelsey to.to_tfo_len = 822c560df6fSPatrick Kelsey tp->t_tfo_client_cookie_len; 823c560df6fSPatrick Kelsey to.to_tfo_cookie = 824c560df6fSPatrick Kelsey tp->t_tfo_cookie.client; 825c560df6fSPatrick Kelsey to.to_flags |= TOF_FASTOPEN; 826c560df6fSPatrick Kelsey wanted_cookie = 1; 827c560df6fSPatrick Kelsey /* 828c560df6fSPatrick Kelsey * If we wind up having more data to 829c560df6fSPatrick Kelsey * send with the SYN than can fit in 830c560df6fSPatrick Kelsey * one segment, don't send any more 831c560df6fSPatrick Kelsey * until the SYN|ACK comes back from 832c560df6fSPatrick Kelsey * the other end. 833c560df6fSPatrick Kelsey */ 834c560df6fSPatrick Kelsey dont_sendalot = 1; 835c560df6fSPatrick Kelsey } 836281a0fd4SPatrick Kelsey } 837df8bae1dSRodney W. Grimes } 83802a1a643SAndre Oppermann /* Window scaling. */ 83902a1a643SAndre Oppermann if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 84002a1a643SAndre Oppermann to.to_wscale = tp->request_r_scale; 84102a1a643SAndre Oppermann to.to_flags |= TOF_SCALE; 842df8bae1dSRodney W. Grimes } 84302a1a643SAndre Oppermann /* Timestamps. */ 84402a1a643SAndre Oppermann if ((tp->t_flags & TF_RCVD_TSTMP) || 84502a1a643SAndre Oppermann ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 84610d20c84SMatt Macy curticks = tcp_ts_getticks(); 84710d20c84SMatt Macy to.to_tsval = curticks + tp->ts_offset; 84802a1a643SAndre Oppermann to.to_tsecr = tp->ts_recent; 84902a1a643SAndre Oppermann to.to_flags |= TOF_TS; 85010d20c84SMatt Macy if (tp->t_rxtshift == 1) 85110d20c84SMatt Macy tp->t_badrxtwin = curticks; 852e44c1887SSteven Hartland } 853e44c1887SSteven Hartland 8546741ecf5SAndre Oppermann /* Set receive buffer autosizing timestamp. */ 85502a1a643SAndre Oppermann if (tp->rfbuf_ts == 0 && 85602a1a643SAndre Oppermann (so->so_rcv.sb_flags & SB_AUTOSIZE)) 857d8951c8aSBjoern A. Zeeb tp->rfbuf_ts = tcp_ts_getticks(); 858e44c1887SSteven Hartland 85902a1a643SAndre Oppermann /* Selective ACK's. */ 8603529149eSAndre Oppermann if (tp->t_flags & TF_SACK_PERMIT) { 86102a1a643SAndre Oppermann if (flags & TH_SYN) 86202a1a643SAndre Oppermann to.to_flags |= TOF_SACKPERM; 86302a1a643SAndre Oppermann else if (TCPS_HAVEESTABLISHED(tp->t_state) && 86402a1a643SAndre Oppermann tp->rcv_numsacks > 0) { 86502a1a643SAndre Oppermann to.to_flags |= TOF_SACK; 86602a1a643SAndre Oppermann to.to_nsacks = tp->rcv_numsacks; 86702a1a643SAndre Oppermann to.to_sacks = (u_char *)tp->sackblks; 86802a1a643SAndre Oppermann } 86902a1a643SAndre Oppermann } 870fcf59617SAndrey V. Elsukov #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 87102a1a643SAndre Oppermann /* TCP-MD5 (RFC2385). */ 872fcf59617SAndrey V. Elsukov /* 873fcf59617SAndrey V. Elsukov * Check that TCP_MD5SIG is enabled in tcpcb to 874fcf59617SAndrey V. Elsukov * account the size needed to set this TCP option. 875fcf59617SAndrey V. Elsukov */ 87602a1a643SAndre Oppermann if (tp->t_flags & TF_SIGNATURE) 87702a1a643SAndre Oppermann to.to_flags |= TOF_SIGNATURE; 8781cfd4b53SBruce M Simpson #endif /* TCP_SIGNATURE */ 8791cfd4b53SBruce M Simpson 88002a1a643SAndre Oppermann /* Processing the options. */ 8814a411b9fSBjoern A. Zeeb hdrlen += optlen = tcp_addoptions(&to, opt); 882c560df6fSPatrick Kelsey /* 883c560df6fSPatrick Kelsey * If we wanted a TFO option to be added, but it was unable 884c560df6fSPatrick Kelsey * to fit, ensure no data is sent. 885c560df6fSPatrick Kelsey */ 886c560df6fSPatrick Kelsey if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 887c560df6fSPatrick Kelsey !(to.to_flags & TOF_FASTOPEN)) 888c560df6fSPatrick Kelsey len = 0; 889be3f3b5eSPaul Saab } 890be3f3b5eSPaul Saab 891df8bae1dSRodney W. Grimes /* 892df8bae1dSRodney W. Grimes * Adjust data length if insertion of options will 8930c39d38dSGleb Smirnoff * bump the packet length beyond the t_maxseg length. 894a0292f23SGarrett Wollman * Clear the FIN bit because we cut off the tail of 895a0292f23SGarrett Wollman * the segment. 896df8bae1dSRodney W. Grimes */ 8970c39d38dSGleb Smirnoff if (len + optlen + ipoptlen > tp->t_maxseg) { 898297a37f3SDavid Greenman flags &= ~TH_FIN; 899ed420311SAndre Oppermann 900b3c0f300SAndre Oppermann if (tso) { 9019fd573c3SHans Petter Selasky u_int if_hw_tsomax; 9029fd573c3SHans Petter Selasky u_int moff; 9039fd573c3SHans Petter Selasky int max_len; 9049fd573c3SHans Petter Selasky 9059fd573c3SHans Petter Selasky /* extract TSO information */ 9069fd573c3SHans Petter Selasky if_hw_tsomax = tp->t_tsomax; 9079fd573c3SHans Petter Selasky if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 9089fd573c3SHans Petter Selasky if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 9099fd573c3SHans Petter Selasky 9109fd573c3SHans Petter Selasky /* 9119fd573c3SHans Petter Selasky * Limit a TSO burst to prevent it from 9129fd573c3SHans Petter Selasky * overflowing or exceeding the maximum length 9139fd573c3SHans Petter Selasky * allowed by the network interface: 9149fd573c3SHans Petter Selasky */ 915ed420311SAndre Oppermann KASSERT(ipoptlen == 0, 916ed420311SAndre Oppermann ("%s: TSO can't do IP options", __func__)); 917ed420311SAndre Oppermann 918ed420311SAndre Oppermann /* 9199fd573c3SHans Petter Selasky * Check if we should limit by maximum payload 9209fd573c3SHans Petter Selasky * length: 921ed420311SAndre Oppermann */ 9229fd573c3SHans Petter Selasky if (if_hw_tsomax != 0) { 9239fd573c3SHans Petter Selasky /* compute maximum TSO length */ 924d76d4012SHans Petter Selasky max_len = (if_hw_tsomax - hdrlen - 925d76d4012SHans Petter Selasky max_linkhdr); 9269fd573c3SHans Petter Selasky if (max_len <= 0) { 9279fd573c3SHans Petter Selasky len = 0; 9283c7c188cSHans Petter Selasky } else if (len > max_len) { 929b3c0f300SAndre Oppermann sendalot = 1; 9303c7c188cSHans Petter Selasky len = max_len; 9319fd573c3SHans Petter Selasky } 9329fd573c3SHans Petter Selasky } 93374e10fb6SJohn Baldwin 934ed420311SAndre Oppermann /* 935ed420311SAndre Oppermann * Prevent the last segment from being 9369fd573c3SHans Petter Selasky * fractional unless the send sockbuf can be 9379fd573c3SHans Petter Selasky * emptied: 938ed420311SAndre Oppermann */ 9390c39d38dSGleb Smirnoff max_len = (tp->t_maxseg - optlen); 9403ac12506SJonathan T. Looney if (((uint32_t)off + (uint32_t)len) < 9413ac12506SJonathan T. Looney sbavail(&so->so_snd)) { 9423c7c188cSHans Petter Selasky moff = len % max_len; 9439fd573c3SHans Petter Selasky if (moff != 0) { 9449fd573c3SHans Petter Selasky len -= moff; 945b3c0f300SAndre Oppermann sendalot = 1; 946ed420311SAndre Oppermann } 9479fd573c3SHans Petter Selasky } 9489fd573c3SHans Petter Selasky 9499fd573c3SHans Petter Selasky /* 9509fd573c3SHans Petter Selasky * In case there are too many small fragments 9519fd573c3SHans Petter Selasky * don't use TSO: 9529fd573c3SHans Petter Selasky */ 9533c7c188cSHans Petter Selasky if (len <= max_len) { 9543c7c188cSHans Petter Selasky len = max_len; 9559fd573c3SHans Petter Selasky sendalot = 1; 9569fd573c3SHans Petter Selasky tso = 0; 9579fd573c3SHans Petter Selasky } 958ed420311SAndre Oppermann 959ed420311SAndre Oppermann /* 960ed420311SAndre Oppermann * Send the FIN in a separate segment 961ed420311SAndre Oppermann * after the bulk sending is done. 962ed420311SAndre Oppermann * We don't trust the TSO implementations 963ed420311SAndre Oppermann * to clear the FIN flag on all but the 964ed420311SAndre Oppermann * last segment. 965ed420311SAndre Oppermann */ 966ed420311SAndre Oppermann if (tp->t_flags & TF_NEEDFIN) 967ed420311SAndre Oppermann sendalot = 1; 968b3c0f300SAndre Oppermann } else { 96912a43d0dSMichael Tuexen if (optlen + ipoptlen >= tp->t_maxseg) { 97012a43d0dSMichael Tuexen /* 97112a43d0dSMichael Tuexen * Since we don't have enough space to put 97212a43d0dSMichael Tuexen * the IP header chain and the TCP header in 97312a43d0dSMichael Tuexen * one packet as required by RFC 7112, don't 97412a43d0dSMichael Tuexen * send it. Also ensure that at least one 97512a43d0dSMichael Tuexen * byte of the payload can be put into the 97612a43d0dSMichael Tuexen * TCP segment. 97712a43d0dSMichael Tuexen */ 97812a43d0dSMichael Tuexen SOCKBUF_UNLOCK(&so->so_snd); 97912a43d0dSMichael Tuexen error = EMSGSIZE; 98012a43d0dSMichael Tuexen sack_rxmit = 0; 98112a43d0dSMichael Tuexen goto out; 98212a43d0dSMichael Tuexen } 9830c39d38dSGleb Smirnoff len = tp->t_maxseg - optlen - ipoptlen; 984df8bae1dSRodney W. Grimes sendalot = 1; 985c560df6fSPatrick Kelsey if (dont_sendalot) 986c560df6fSPatrick Kelsey sendalot = 0; 987df8bae1dSRodney W. Grimes } 988ed420311SAndre Oppermann } else 989ed420311SAndre Oppermann tso = 0; 990ed420311SAndre Oppermann 991ed420311SAndre Oppermann KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 992ed420311SAndre Oppermann ("%s: len > IP_MAXPACKET", __func__)); 993df8bae1dSRodney W. Grimes 994a0292f23SGarrett Wollman /*#ifdef DIAGNOSTIC*/ 995a683a7ddSYoshinobu Inoue #ifdef INET6 996a683a7ddSYoshinobu Inoue if (max_linkhdr + hdrlen > MCLBYTES) 997a683a7ddSYoshinobu Inoue #else 998df8bae1dSRodney W. Grimes if (max_linkhdr + hdrlen > MHLEN) 999a683a7ddSYoshinobu Inoue #endif 1000f10e85d7SLuigi Rizzo panic("tcphdr too big"); 1001a0292f23SGarrett Wollman /*#endif*/ 1002df8bae1dSRodney W. Grimes 1003df8bae1dSRodney W. Grimes /* 1004c4982faeSBjoern A. Zeeb * This KASSERT is here to catch edge cases at a well defined place. 1005c4982faeSBjoern A. Zeeb * Before, those had triggered (random) panic conditions further down. 1006c4982faeSBjoern A. Zeeb */ 1007c4982faeSBjoern A. Zeeb KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 1008c4982faeSBjoern A. Zeeb 1009c4982faeSBjoern A. Zeeb /* 1010df8bae1dSRodney W. Grimes * Grab a header mbuf, attaching a copy of data to 1011df8bae1dSRodney W. Grimes * be transmitted, and initialize the header from 1012df8bae1dSRodney W. Grimes * the template for sends on this connection. 1013df8bae1dSRodney W. Grimes */ 1014df8bae1dSRodney W. Grimes if (len) { 10154e023759SAndre Oppermann struct mbuf *mb; 1016581a046aSRandall Stewart struct sockbuf *msb; 10174e023759SAndre Oppermann u_int moff; 10184e023759SAndre Oppermann 1019adc56f5aSEdward Tomasz Napierala if ((tp->t_flags & TF_FORCEDATA) && len == 1) { 102078b50714SRobert Watson TCPSTAT_INC(tcps_sndprobe); 1021adc56f5aSEdward Tomasz Napierala #ifdef STATS 1022adc56f5aSEdward Tomasz Napierala if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 1023adc56f5aSEdward Tomasz Napierala stats_voi_update_abs_u32(tp->t_stats, 1024adc56f5aSEdward Tomasz Napierala VOI_TCP_RETXPB, len); 1025adc56f5aSEdward Tomasz Napierala else 1026adc56f5aSEdward Tomasz Napierala stats_voi_update_abs_u64(tp->t_stats, 1027adc56f5aSEdward Tomasz Napierala VOI_TCP_TXPB, len); 1028adc56f5aSEdward Tomasz Napierala #endif /* STATS */ 1029adc56f5aSEdward Tomasz Napierala } else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 1030f5d34df5SGeorge V. Neville-Neil tp->t_sndrexmitpack++; 103178b50714SRobert Watson TCPSTAT_INC(tcps_sndrexmitpack); 103278b50714SRobert Watson TCPSTAT_ADD(tcps_sndrexmitbyte, len); 1033adc56f5aSEdward Tomasz Napierala #ifdef STATS 1034adc56f5aSEdward Tomasz Napierala stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 1035adc56f5aSEdward Tomasz Napierala len); 1036adc56f5aSEdward Tomasz Napierala #endif /* STATS */ 1037df8bae1dSRodney W. Grimes } else { 103878b50714SRobert Watson TCPSTAT_INC(tcps_sndpack); 103978b50714SRobert Watson TCPSTAT_ADD(tcps_sndbyte, len); 1040adc56f5aSEdward Tomasz Napierala #ifdef STATS 1041adc56f5aSEdward Tomasz Napierala stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 1042adc56f5aSEdward Tomasz Napierala len); 1043adc56f5aSEdward Tomasz Napierala #endif /* STATS */ 1044df8bae1dSRodney W. Grimes } 104539f6074eSGleb Smirnoff #ifdef INET6 104639f6074eSGleb Smirnoff if (MHLEN < hdrlen + max_linkhdr) 104739f6074eSGleb Smirnoff m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 104839f6074eSGleb Smirnoff else 104939f6074eSGleb Smirnoff #endif 105039f6074eSGleb Smirnoff m = m_gethdr(M_NOWAIT, MT_DATA); 105139f6074eSGleb Smirnoff 1052df8bae1dSRodney W. Grimes if (m == NULL) { 1053cf2942b6SRobert Watson SOCKBUF_UNLOCK(&so->so_snd); 1054df8bae1dSRodney W. Grimes error = ENOBUFS; 10550e2bc05cSGleb Smirnoff sack_rxmit = 0; 1056df8bae1dSRodney W. Grimes goto out; 1057df8bae1dSRodney W. Grimes } 105839f6074eSGleb Smirnoff 1059df8bae1dSRodney W. Grimes m->m_data += max_linkhdr; 1060df8bae1dSRodney W. Grimes m->m_len = hdrlen; 10614e023759SAndre Oppermann 10624e023759SAndre Oppermann /* 10634e023759SAndre Oppermann * Start the m_copy functions from the closest mbuf 10644e023759SAndre Oppermann * to the offset in the socket buffer chain. 10654e023759SAndre Oppermann */ 1066581a046aSRandall Stewart mb = sbsndptr_noadv(&so->so_snd, off, &moff); 1067b2e60773SJohn Baldwin if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 10683ac12506SJonathan T. Looney m_copydata(mb, moff, len, 1069df8bae1dSRodney W. Grimes mtod(m, caddr_t) + hdrlen); 1070581a046aSRandall Stewart if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 1071581a046aSRandall Stewart sbsndptr_adv(&so->so_snd, mb, len); 1072df8bae1dSRodney W. Grimes m->m_len += len; 1073df8bae1dSRodney W. Grimes } else { 1074581a046aSRandall Stewart if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 1075581a046aSRandall Stewart msb = NULL; 1076581a046aSRandall Stewart else 1077581a046aSRandall Stewart msb = &so->so_snd; 1078581a046aSRandall Stewart m->m_next = tcp_m_copym(mb, moff, 1079581a046aSRandall Stewart &len, if_hw_tsomaxsegcount, 1080b2e60773SJohn Baldwin if_hw_tsomaxsegsize, msb, hw_tls); 1081581a046aSRandall Stewart if (len <= (tp->t_maxseg - optlen)) { 1082581a046aSRandall Stewart /* 1083581a046aSRandall Stewart * Must have ran out of mbufs for the copy 1084581a046aSRandall Stewart * shorten it to no longer need tso. Lets 1085581a046aSRandall Stewart * not put on sendalot since we are low on 1086581a046aSRandall Stewart * mbufs. 1087581a046aSRandall Stewart */ 1088581a046aSRandall Stewart tso = 0; 1089581a046aSRandall Stewart } 10904e023759SAndre Oppermann if (m->m_next == NULL) { 1091cf2942b6SRobert Watson SOCKBUF_UNLOCK(&so->so_snd); 1092d7f570e6SGarrett Wollman (void) m_free(m); 109351823c3aSGarrett Wollman error = ENOBUFS; 10940e2bc05cSGleb Smirnoff sack_rxmit = 0; 109551823c3aSGarrett Wollman goto out; 109651823c3aSGarrett Wollman } 1097df8bae1dSRodney W. Grimes } 1098472ea5beSColin Percival 1099df8bae1dSRodney W. Grimes /* 1100df8bae1dSRodney W. Grimes * If we're sending everything we've got, set PUSH. 1101df8bae1dSRodney W. Grimes * (This will keep happy those implementations which only 1102df8bae1dSRodney W. Grimes * give data to the user when a buffer fills or 1103df8bae1dSRodney W. Grimes * a PUSH comes in.) 1104df8bae1dSRodney W. Grimes */ 11053ac12506SJonathan T. Looney if (((uint32_t)off + (uint32_t)len == sbused(&so->so_snd)) && 11063ac12506SJonathan T. Looney !(flags & TH_SYN)) 1107df8bae1dSRodney W. Grimes flags |= TH_PUSH; 1108cf2942b6SRobert Watson SOCKBUF_UNLOCK(&so->so_snd); 1109df8bae1dSRodney W. Grimes } else { 1110cf2942b6SRobert Watson SOCKBUF_UNLOCK(&so->so_snd); 1111df8bae1dSRodney W. Grimes if (tp->t_flags & TF_ACKNOW) 111278b50714SRobert Watson TCPSTAT_INC(tcps_sndacks); 1113df8bae1dSRodney W. Grimes else if (flags & (TH_SYN|TH_FIN|TH_RST)) 111478b50714SRobert Watson TCPSTAT_INC(tcps_sndctrl); 1115df8bae1dSRodney W. Grimes else if (SEQ_GT(tp->snd_up, tp->snd_una)) 111678b50714SRobert Watson TCPSTAT_INC(tcps_sndurg); 1117df8bae1dSRodney W. Grimes else 111878b50714SRobert Watson TCPSTAT_INC(tcps_sndwinup); 1119df8bae1dSRodney W. Grimes 1120aa8bd99dSGleb Smirnoff m = m_gethdr(M_NOWAIT, MT_DATA); 1121df8bae1dSRodney W. Grimes if (m == NULL) { 1122df8bae1dSRodney W. Grimes error = ENOBUFS; 11230e2bc05cSGleb Smirnoff sack_rxmit = 0; 1124df8bae1dSRodney W. Grimes goto out; 1125df8bae1dSRodney W. Grimes } 1126fb59c426SYoshinobu Inoue #ifdef INET6 1127fb59c426SYoshinobu Inoue if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 1128fb59c426SYoshinobu Inoue MHLEN >= hdrlen) { 1129ed6a66caSRobert Watson M_ALIGN(m, hdrlen); 1130fb59c426SYoshinobu Inoue } else 1131fb59c426SYoshinobu Inoue #endif 1132df8bae1dSRodney W. Grimes m->m_data += max_linkhdr; 1133df8bae1dSRodney W. Grimes m->m_len = hdrlen; 1134df8bae1dSRodney W. Grimes } 1135cf2942b6SRobert Watson SOCKBUF_UNLOCK_ASSERT(&so->so_snd); 1136df8bae1dSRodney W. Grimes m->m_pkthdr.rcvif = (struct ifnet *)0; 1137c488362eSRobert Watson #ifdef MAC 113830d239bcSRobert Watson mac_inpcb_create_mbuf(tp->t_inpcb, m); 1139c488362eSRobert Watson #endif 1140fb59c426SYoshinobu Inoue #ifdef INET6 1141fb59c426SYoshinobu Inoue if (isipv6) { 1142fb59c426SYoshinobu Inoue ip6 = mtod(m, struct ip6_hdr *); 1143fb59c426SYoshinobu Inoue th = (struct tcphdr *)(ip6 + 1); 114479909384SJonathan Lemon tcpip_fillheaders(tp->t_inpcb, ip6, th); 1145fb59c426SYoshinobu Inoue } else 1146fb59c426SYoshinobu Inoue #endif /* INET6 */ 1147fb59c426SYoshinobu Inoue { 1148fb59c426SYoshinobu Inoue ip = mtod(m, struct ip *); 1149151ba793SAlexander Kabaev #ifdef TCPDEBUG 1150fb59c426SYoshinobu Inoue ipov = (struct ipovly *)ip; 1151151ba793SAlexander Kabaev #endif 1152fb59c426SYoshinobu Inoue th = (struct tcphdr *)(ip + 1); 115379909384SJonathan Lemon tcpip_fillheaders(tp->t_inpcb, ip, th); 1154fb59c426SYoshinobu Inoue } 1155df8bae1dSRodney W. Grimes 1156df8bae1dSRodney W. Grimes /* 1157df8bae1dSRodney W. Grimes * Fill in fields, remembering maximum advertised 1158df8bae1dSRodney W. Grimes * window for use in delaying messages about window sizes. 1159df8bae1dSRodney W. Grimes * If resending a FIN, be sure not to use a new sequence number. 1160df8bae1dSRodney W. Grimes */ 1161df8bae1dSRodney W. Grimes if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 1162df8bae1dSRodney W. Grimes tp->snd_nxt == tp->snd_max) 1163df8bae1dSRodney W. Grimes tp->snd_nxt--; 1164df8bae1dSRodney W. Grimes /* 1165f2512ba1SRui Paulo * If we are starting a connection, send ECN setup 1166f2512ba1SRui Paulo * SYN packet. If we are on a retransmit, we may 1167f2512ba1SRui Paulo * resend those bits a number of times as per 1168f2512ba1SRui Paulo * RFC 3168. 1169f2512ba1SRui Paulo */ 1170883054b4SDon Lewis if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) { 1171f2512ba1SRui Paulo if (tp->t_rxtshift >= 1) { 1172603724d3SBjoern A. Zeeb if (tp->t_rxtshift <= V_tcp_ecn_maxretries) 1173f2512ba1SRui Paulo flags |= TH_ECE|TH_CWR; 1174f2512ba1SRui Paulo } else 1175f2512ba1SRui Paulo flags |= TH_ECE|TH_CWR; 1176f2512ba1SRui Paulo } 11776e16d877SRichard Scheffenegger /* Handle parallel SYN for ECN */ 11786e16d877SRichard Scheffenegger if ((tp->t_state == TCPS_SYN_RECEIVED) && 11796e16d877SRichard Scheffenegger (tp->t_flags2 & TF2_ECN_SND_ECE)) { 11806e16d877SRichard Scheffenegger flags |= TH_ECE; 11816e16d877SRichard Scheffenegger tp->t_flags2 &= ~TF2_ECN_SND_ECE; 11826e16d877SRichard Scheffenegger } 1183f2512ba1SRui Paulo 1184f2512ba1SRui Paulo if (tp->t_state == TCPS_ESTABLISHED && 11853cf38784SMichael Tuexen (tp->t_flags2 & TF2_ECN_PERMIT)) { 1186f2512ba1SRui Paulo /* 1187f2512ba1SRui Paulo * If the peer has ECN, mark data packets with 1188f2512ba1SRui Paulo * ECN capable transmission (ECT). 1189f2512ba1SRui Paulo * Ignore pure ack packets, retransmissions and window probes. 1190f2512ba1SRui Paulo */ 1191f2512ba1SRui Paulo if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) && 119247e2c17cSMichael Tuexen (sack_rxmit == 0) && 1193af2fb894SRichard Scheffenegger !((tp->t_flags & TF_FORCEDATA) && len == 1 && 1194af2fb894SRichard Scheffenegger SEQ_LT(tp->snd_una, tp->snd_max))) { 1195f2512ba1SRui Paulo #ifdef INET6 1196f2512ba1SRui Paulo if (isipv6) 1197f2512ba1SRui Paulo ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); 1198f2512ba1SRui Paulo else 1199f2512ba1SRui Paulo #endif 1200f2512ba1SRui Paulo ip->ip_tos |= IPTOS_ECN_ECT0; 120178b50714SRobert Watson TCPSTAT_INC(tcps_ecn_ect0); 1202f2512ba1SRui Paulo /* 1203f2512ba1SRui Paulo * Reply with proper ECN notifications. 1204af2fb894SRichard Scheffenegger * Only set CWR on new data segments. 1205f2512ba1SRui Paulo */ 12063cf38784SMichael Tuexen if (tp->t_flags2 & TF2_ECN_SND_CWR) { 1207f2512ba1SRui Paulo flags |= TH_CWR; 12083cf38784SMichael Tuexen tp->t_flags2 &= ~TF2_ECN_SND_CWR; 1209f2512ba1SRui Paulo } 1210af2fb894SRichard Scheffenegger } 12113cf38784SMichael Tuexen if (tp->t_flags2 & TF2_ECN_SND_ECE) 1212f2512ba1SRui Paulo flags |= TH_ECE; 1213f2512ba1SRui Paulo } 1214f2512ba1SRui Paulo 1215f2512ba1SRui Paulo /* 1216df8bae1dSRodney W. Grimes * If we are doing retransmissions, then snd_nxt will 1217df8bae1dSRodney W. Grimes * not reflect the first unsent octet. For ACK only 1218df8bae1dSRodney W. Grimes * packets, we do not want the sequence number of the 1219df8bae1dSRodney W. Grimes * retransmitted packet, we want the sequence number 1220df8bae1dSRodney W. Grimes * of the next unsent octet. So, if there is no data 1221df8bae1dSRodney W. Grimes * (and no SYN or FIN), use snd_max instead of snd_nxt 1222df8bae1dSRodney W. Grimes * when filling in ti_seq. But if we are in persist 1223df8bae1dSRodney W. Grimes * state, snd_max might reflect one byte beyond the 1224df8bae1dSRodney W. Grimes * right edge of the window, so use snd_nxt in that 1225df8bae1dSRodney W. Grimes * case, since we know we aren't doing a retransmission. 1226df8bae1dSRodney W. Grimes * (retransmit and persist are mutually exclusive...) 1227df8bae1dSRodney W. Grimes */ 1228a55db2b6SPaul Saab if (sack_rxmit == 0) { 1229b8152ba7SAndre Oppermann if (len || (flags & (TH_SYN|TH_FIN)) || 1230b8152ba7SAndre Oppermann tcp_timer_active(tp, TT_PERSIST)) 1231fb59c426SYoshinobu Inoue th->th_seq = htonl(tp->snd_nxt); 1232df8bae1dSRodney W. Grimes else 1233fb59c426SYoshinobu Inoue th->th_seq = htonl(tp->snd_max); 1234a55db2b6SPaul Saab } else { 12356d90faf3SPaul Saab th->th_seq = htonl(p->rxmit); 12366d90faf3SPaul Saab p->rxmit += len; 12370077b016SPaul Saab tp->sackhint.sack_bytes_rexmit += len; 12386d90faf3SPaul Saab } 1239fb59c426SYoshinobu Inoue th->th_ack = htonl(tp->rcv_nxt); 1240df8bae1dSRodney W. Grimes if (optlen) { 1241fb59c426SYoshinobu Inoue bcopy(opt, th + 1, optlen); 1242fb59c426SYoshinobu Inoue th->th_off = (sizeof (struct tcphdr) + optlen) >> 2; 1243df8bae1dSRodney W. Grimes } 1244fb59c426SYoshinobu Inoue th->th_flags = flags; 1245df8bae1dSRodney W. Grimes /* 1246df8bae1dSRodney W. Grimes * Calculate receive window. Don't shrink window, 1247df8bae1dSRodney W. Grimes * but avoid silly window syndrome. 124879410718SMichael Tuexen * If a RST segment is sent, advertise a window of zero. 1249df8bae1dSRodney W. Grimes */ 125079410718SMichael Tuexen if (flags & TH_RST) { 125179410718SMichael Tuexen recwin = 0; 125279410718SMichael Tuexen } else { 12533ac12506SJonathan T. Looney if (recwin < (so->so_rcv.sb_hiwat / 4) && 12543ac12506SJonathan T. Looney recwin < tp->t_maxseg) 1255201d185bSAndre Oppermann recwin = 0; 1256f701e30dSJohn Baldwin if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 12573ac12506SJonathan T. Looney recwin < (tp->rcv_adv - tp->rcv_nxt)) 12583ac12506SJonathan T. Looney recwin = (tp->rcv_adv - tp->rcv_nxt); 125979410718SMichael Tuexen } 1260104ebb2aSAndre Oppermann /* 1261104ebb2aSAndre Oppermann * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1262104ebb2aSAndre Oppermann * or <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> 1263104ebb2aSAndre Oppermann * case is handled in syncache. 1264104ebb2aSAndre Oppermann */ 1265104ebb2aSAndre Oppermann if (flags & TH_SYN) 1266104ebb2aSAndre Oppermann th->th_win = htons((u_short) 1267104ebb2aSAndre Oppermann (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 12689028b6e0SRichard Scheffenegger else { 12699028b6e0SRichard Scheffenegger /* Avoid shrinking window with window scaling. */ 12709028b6e0SRichard Scheffenegger recwin = roundup2(recwin, 1 << tp->rcv_scale); 1271104ebb2aSAndre Oppermann th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 12729028b6e0SRichard Scheffenegger } 1273262c1c1aSMatthew Dillon 1274262c1c1aSMatthew Dillon /* 1275262c1c1aSMatthew Dillon * Adjust the RXWIN0SENT flag - indicate that we have advertised 1276262c1c1aSMatthew Dillon * a 0 window. This may cause the remote transmitter to stall. This 1277262c1c1aSMatthew Dillon * flag tells soreceive() to disable delayed acknowledgements when 1278262c1c1aSMatthew Dillon * draining the buffer. This can occur if the receiver is attempting 1279b2722702SRui Paulo * to read more data than can be buffered prior to transmitting on 1280262c1c1aSMatthew Dillon * the connection. 1281262c1c1aSMatthew Dillon */ 1282f5d34df5SGeorge V. Neville-Neil if (th->th_win == 0) { 1283f5d34df5SGeorge V. Neville-Neil tp->t_sndzerowin++; 1284262c1c1aSMatthew Dillon tp->t_flags |= TF_RXWIN0SENT; 1285f5d34df5SGeorge V. Neville-Neil } else 1286262c1c1aSMatthew Dillon tp->t_flags &= ~TF_RXWIN0SENT; 1287df8bae1dSRodney W. Grimes if (SEQ_GT(tp->snd_up, tp->snd_nxt)) { 1288fb59c426SYoshinobu Inoue th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt)); 1289fb59c426SYoshinobu Inoue th->th_flags |= TH_URG; 1290df8bae1dSRodney W. Grimes } else 1291df8bae1dSRodney W. Grimes /* 1292df8bae1dSRodney W. Grimes * If no urgent pointer to send, then we pull 1293df8bae1dSRodney W. Grimes * the urgent pointer to the left edge of the send window 1294df8bae1dSRodney W. Grimes * so that it doesn't drift into the send window on sequence 1295df8bae1dSRodney W. Grimes * number wraparound. 1296df8bae1dSRodney W. Grimes */ 1297df8bae1dSRodney W. Grimes tp->snd_up = tp->snd_una; /* drag it along */ 1298df8bae1dSRodney W. Grimes 1299df8bae1dSRodney W. Grimes /* 1300df8bae1dSRodney W. Grimes * Put TCP length in extended header, and then 1301df8bae1dSRodney W. Grimes * checksum extended header and data. 1302df8bae1dSRodney W. Grimes */ 1303fb59c426SYoshinobu Inoue m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 130445747ba5SBjoern A. Zeeb m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1305fcf59617SAndrey V. Elsukov 1306fcf59617SAndrey V. Elsukov #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1307fcf59617SAndrey V. Elsukov if (to.to_flags & TOF_SIGNATURE) { 1308fcf59617SAndrey V. Elsukov /* 1309fcf59617SAndrey V. Elsukov * Calculate MD5 signature and put it into the place 1310fcf59617SAndrey V. Elsukov * determined before. 1311fcf59617SAndrey V. Elsukov * NOTE: since TCP options buffer doesn't point into 1312fcf59617SAndrey V. Elsukov * mbuf's data, calculate offset and use it. 1313fcf59617SAndrey V. Elsukov */ 13142aad6240SAndrey V. Elsukov if (!TCPMD5_ENABLED() || (error = TCPMD5_OUTPUT(m, th, 13152aad6240SAndrey V. Elsukov (u_char *)(th + 1) + (to.to_signature - opt))) != 0) { 1316fcf59617SAndrey V. Elsukov /* 1317fcf59617SAndrey V. Elsukov * Do not send segment if the calculation of MD5 1318fcf59617SAndrey V. Elsukov * digest has failed. 1319fcf59617SAndrey V. Elsukov */ 13202aad6240SAndrey V. Elsukov m_freem(m); 1321fcf59617SAndrey V. Elsukov goto out; 1322fcf59617SAndrey V. Elsukov } 1323fcf59617SAndrey V. Elsukov } 1324fcf59617SAndrey V. Elsukov #endif 1325fb59c426SYoshinobu Inoue #ifdef INET6 132645747ba5SBjoern A. Zeeb if (isipv6) { 1327fb59c426SYoshinobu Inoue /* 13283df96ee6SCy Schubert * There is no need to fill in ip6_plen right now. 13293df96ee6SCy Schubert * It will be filled later by ip6_output. 1330fb59c426SYoshinobu Inoue */ 1331356ab07eSBjoern A. Zeeb m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 133245747ba5SBjoern A. Zeeb th->th_sum = in6_cksum_pseudo(ip6, sizeof(struct tcphdr) + 133345747ba5SBjoern A. Zeeb optlen + len, IPPROTO_TCP, 0); 133445747ba5SBjoern A. Zeeb } 133545747ba5SBjoern A. Zeeb #endif 133645747ba5SBjoern A. Zeeb #if defined(INET6) && defined(INET) 1337fb59c426SYoshinobu Inoue else 133845747ba5SBjoern A. Zeeb #endif 133945747ba5SBjoern A. Zeeb #ifdef INET 1340fb59c426SYoshinobu Inoue { 1341356ab07eSBjoern A. Zeeb m->m_pkthdr.csum_flags = CSUM_TCP; 134279909384SJonathan Lemon th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 134379909384SJonathan Lemon htons(sizeof(struct tcphdr) + IPPROTO_TCP + len + optlen)); 1344fb59c426SYoshinobu Inoue 1345db4f9cc7SJonathan Lemon /* IP version must be set here for ipv4/ipv6 checking later */ 1346db4f9cc7SJonathan Lemon KASSERT(ip->ip_v == IPVERSION, 13476e551fb6SDavid E. O'Brien ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 1348fb59c426SYoshinobu Inoue } 134945747ba5SBjoern A. Zeeb #endif 1350df8bae1dSRodney W. Grimes 1351df8bae1dSRodney W. Grimes /* 1352b3c0f300SAndre Oppermann * Enable TSO and specify the size of the segments. 1353b3c0f300SAndre Oppermann * The TCP pseudo header checksum is always provided. 1354b3c0f300SAndre Oppermann */ 1355b3c0f300SAndre Oppermann if (tso) { 13560c39d38dSGleb Smirnoff KASSERT(len > tp->t_maxseg - optlen, 1357153e5b57SAndre Oppermann ("%s: len <= tso_segsz", __func__)); 13583579cf4cSKenneth D. Merry m->m_pkthdr.csum_flags |= CSUM_TSO; 13590c39d38dSGleb Smirnoff m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 1360b3c0f300SAndre Oppermann } 1361b3c0f300SAndre Oppermann 136205fb056cSMichael Tuexen KASSERT(len + hdrlen == m_length(m, NULL), 136305fb056cSMichael Tuexen ("%s: mbuf chain shorter than expected: %d + %u != %u", 136405fb056cSMichael Tuexen __func__, len, hdrlen, m_length(m, NULL))); 1365ed420311SAndre Oppermann 1366bd79708dSJonathan T. Looney #ifdef TCP_HHOOK 136739bc9de5SLawrence Stewart /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 136839bc9de5SLawrence Stewart hhook_run_tcp_est_out(tp, th, &to, len, tso); 1369bd79708dSJonathan T. Looney #endif 137039bc9de5SLawrence Stewart 1371610ee2f9SDavid Greenman #ifdef TCPDEBUG 1372df8bae1dSRodney W. Grimes /* 1373df8bae1dSRodney W. Grimes * Trace. 1374df8bae1dSRodney W. Grimes */ 137591f467d5SHartmut Brandt if (so->so_options & SO_DEBUG) { 1376f3e0b7efSBruce M Simpson u_short save = 0; 13775214cb3fSBruce M Simpson #ifdef INET6 13785214cb3fSBruce M Simpson if (!isipv6) 13795214cb3fSBruce M Simpson #endif 13805214cb3fSBruce M Simpson { 13815214cb3fSBruce M Simpson save = ipov->ih_len; 138291f467d5SHartmut Brandt ipov->ih_len = htons(m->m_pkthdr.len /* - hdrlen + (th->th_off << 2) */); 13835214cb3fSBruce M Simpson } 1384fb59c426SYoshinobu Inoue tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0); 13855214cb3fSBruce M Simpson #ifdef INET6 13865214cb3fSBruce M Simpson if (!isipv6) 13875214cb3fSBruce M Simpson #endif 138891f467d5SHartmut Brandt ipov->ih_len = save; 138991f467d5SHartmut Brandt } 1390b287c6c7SBjoern A. Zeeb #endif /* TCPDEBUG */ 13912b9c9984SGeorge V. Neville-Neil TCP_PROBE3(debug__output, tp, th, m); 1392df8bae1dSRodney W. Grimes 1393dcaffbd6SJonathan T. Looney /* We're getting ready to send; log now. */ 1394dcaffbd6SJonathan T. Looney TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 1395dcaffbd6SJonathan T. Looney len, NULL, false); 1396dcaffbd6SJonathan T. Looney 1397df8bae1dSRodney W. Grimes /* 1398df8bae1dSRodney W. Grimes * Fill in IP length and desired time to live and 1399df8bae1dSRodney W. Grimes * send to IP level. There should be a better way 1400df8bae1dSRodney W. Grimes * to handle ttl and tos; we could keep them in 1401df8bae1dSRodney W. Grimes * the template, but need a way to checksum without them. 1402df8bae1dSRodney W. Grimes */ 1403fb59c426SYoshinobu Inoue /* 140443630e62SHiren Panchasara * m->m_pkthdr.len should have been set before checksum calculation, 1405fb59c426SYoshinobu Inoue * because in6_cksum() need it. 1406fb59c426SYoshinobu Inoue */ 1407fb59c426SYoshinobu Inoue #ifdef INET6 1408fb59c426SYoshinobu Inoue if (isipv6) { 1409fb59c426SYoshinobu Inoue /* 1410fb59c426SYoshinobu Inoue * we separately set hoplimit for every segment, since the 1411fb59c426SYoshinobu Inoue * user might want to change the value via setsockopt. 1412fb59c426SYoshinobu Inoue * Also, desired default hop limit might be changed via 1413fb59c426SYoshinobu Inoue * Neighbor Discovery. 1414fb59c426SYoshinobu Inoue */ 141597d8d152SAndre Oppermann ip6->ip6_hlim = in6_selecthlim(tp->t_inpcb, NULL); 1416fb59c426SYoshinobu Inoue 141757f60867SMark Johnston /* 141857f60867SMark Johnston * Set the packet size here for the benefit of DTrace probes. 141957f60867SMark Johnston * ip6_output() will set it properly; it's supposed to include 142057f60867SMark Johnston * the option header lengths as well. 142157f60867SMark Johnston */ 142257f60867SMark Johnston ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 142357f60867SMark Johnston 14240c39d38dSGleb Smirnoff if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 14250f3e3bc5SSean Bruno tp->t_flags2 |= TF2_PLPMTU_PMTUD; 14260f3e3bc5SSean Bruno else 14270f3e3bc5SSean Bruno tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 14280f3e3bc5SSean Bruno 142957f60867SMark Johnston if (tp->t_state == TCPS_SYN_SENT) 1430d9fae5abSAndriy Gapon TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 143157f60867SMark Johnston 143257f60867SMark Johnston TCP_PROBE5(send, NULL, tp, ip6, tp, th); 143357f60867SMark Johnston 143486a996e6SHiren Panchasara #ifdef TCPPCAP 143586a996e6SHiren Panchasara /* Save packet, if requested. */ 143686a996e6SHiren Panchasara tcp_pcap_add(th, m, &(tp->t_outpkts)); 143786a996e6SHiren Panchasara #endif 143886a996e6SHiren Panchasara 1439fb59c426SYoshinobu Inoue /* TODO: IPv6 IP6TOS_ECT bit on */ 14404a5c6c6aSMike Karels error = ip6_output(m, tp->t_inpcb->in6p_outputopts, 14414a5c6c6aSMike Karels &tp->t_inpcb->inp_route6, 1442df0633a1SGleb Smirnoff ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 1443df0633a1SGleb Smirnoff NULL, NULL, tp->t_inpcb); 1444df0633a1SGleb Smirnoff 1445983066f0SAlexander V. Chernikov if (error == EMSGSIZE && tp->t_inpcb->inp_route6.ro_nh != NULL) 1446983066f0SAlexander V. Chernikov mtu = tp->t_inpcb->inp_route6.ro_nh->nh_mtu; 1447b287c6c7SBjoern A. Zeeb } 1448fb59c426SYoshinobu Inoue #endif /* INET6 */ 1449b287c6c7SBjoern A. Zeeb #if defined(INET) && defined(INET6) 1450b287c6c7SBjoern A. Zeeb else 1451b287c6c7SBjoern A. Zeeb #endif 1452b287c6c7SBjoern A. Zeeb #ifdef INET 1453df8bae1dSRodney W. Grimes { 14548f134647SGleb Smirnoff ip->ip_len = htons(m->m_pkthdr.len); 1455686cdd19SJun-ichiro itojun Hagino #ifdef INET6 14565cd54324SBjoern A. Zeeb if (tp->t_inpcb->inp_vflag & INP_IPV6PROTO) 145797d8d152SAndre Oppermann ip->ip_ttl = in6_selecthlim(tp->t_inpcb, NULL); 1458686cdd19SJun-ichiro itojun Hagino #endif /* INET6 */ 1459f138387aSGarrett Wollman /* 146097d8d152SAndre Oppermann * If we do path MTU discovery, then we set DF on every packet. 146197d8d152SAndre Oppermann * This might not be the best thing to do according to RFC3390 146297d8d152SAndre Oppermann * Section 2. However the tcp hostcache migitates the problem 146397d8d152SAndre Oppermann * so it affects only the first tcp connection with a host. 1464e4e92660SAndre Oppermann * 1465e4e92660SAndre Oppermann * NB: Don't set DF on small MTU/MSS to have a safe fallback. 1466f138387aSGarrett Wollman */ 14670c39d38dSGleb Smirnoff if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 14688f134647SGleb Smirnoff ip->ip_off |= htons(IP_DF); 1469f6f6703fSSean Bruno tp->t_flags2 |= TF2_PLPMTU_PMTUD; 1470f6f6703fSSean Bruno } else { 1471f6f6703fSSean Bruno tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 1472f6f6703fSSean Bruno } 147397d8d152SAndre Oppermann 147457f60867SMark Johnston if (tp->t_state == TCPS_SYN_SENT) 1475d9fae5abSAndriy Gapon TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 147657f60867SMark Johnston 147757f60867SMark Johnston TCP_PROBE5(send, NULL, tp, ip, tp, th); 147857f60867SMark Johnston 147986a996e6SHiren Panchasara #ifdef TCPPCAP 148086a996e6SHiren Panchasara /* Save packet, if requested. */ 148186a996e6SHiren Panchasara tcp_pcap_add(th, m, &(tp->t_outpkts)); 148286a996e6SHiren Panchasara #endif 148386a996e6SHiren Panchasara 148484cc0778SGeorge V. Neville-Neil error = ip_output(m, tp->t_inpcb->inp_options, &tp->t_inpcb->inp_route, 1485b5d47ff5SJohn-Mark Gurney ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0, 1486b5d47ff5SJohn-Mark Gurney tp->t_inpcb); 1487df0633a1SGleb Smirnoff 1488983066f0SAlexander V. Chernikov if (error == EMSGSIZE && tp->t_inpcb->inp_route.ro_nh != NULL) 1489983066f0SAlexander V. Chernikov mtu = tp->t_inpcb->inp_route.ro_nh->nh_mtu; 1490df8bae1dSRodney W. Grimes } 1491b287c6c7SBjoern A. Zeeb #endif /* INET */ 14920e2bc05cSGleb Smirnoff 14930e2bc05cSGleb Smirnoff out: 14940e2bc05cSGleb Smirnoff /* 14950e2bc05cSGleb Smirnoff * In transmit state, time the transmission and arrange for 14960e2bc05cSGleb Smirnoff * the retransmit. In persist state, just set snd_max. 14970e2bc05cSGleb Smirnoff */ 14980e2bc05cSGleb Smirnoff if ((tp->t_flags & TF_FORCEDATA) == 0 || 14990e2bc05cSGleb Smirnoff !tcp_timer_active(tp, TT_PERSIST)) { 15000e2bc05cSGleb Smirnoff tcp_seq startseq = tp->snd_nxt; 15010e2bc05cSGleb Smirnoff 15020e2bc05cSGleb Smirnoff /* 15030e2bc05cSGleb Smirnoff * Advance snd_nxt over sequence space of this segment. 15040e2bc05cSGleb Smirnoff */ 15050e2bc05cSGleb Smirnoff if (flags & (TH_SYN|TH_FIN)) { 15060e2bc05cSGleb Smirnoff if (flags & TH_SYN) 15070e2bc05cSGleb Smirnoff tp->snd_nxt++; 15080e2bc05cSGleb Smirnoff if (flags & TH_FIN) { 15090e2bc05cSGleb Smirnoff tp->snd_nxt++; 15100e2bc05cSGleb Smirnoff tp->t_flags |= TF_SENTFIN; 15110e2bc05cSGleb Smirnoff } 15120e2bc05cSGleb Smirnoff } 15130e2bc05cSGleb Smirnoff if (sack_rxmit) 15140e2bc05cSGleb Smirnoff goto timer; 15150e2bc05cSGleb Smirnoff tp->snd_nxt += len; 15160e2bc05cSGleb Smirnoff if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 15170e2bc05cSGleb Smirnoff tp->snd_max = tp->snd_nxt; 15180e2bc05cSGleb Smirnoff /* 15190e2bc05cSGleb Smirnoff * Time this transmission if not a retransmission and 15200e2bc05cSGleb Smirnoff * not currently timing anything. 15210e2bc05cSGleb Smirnoff */ 15229dc7d8a2SRichard Scheffenegger tp->t_sndtime = ticks; 15230e2bc05cSGleb Smirnoff if (tp->t_rtttime == 0) { 15240e2bc05cSGleb Smirnoff tp->t_rtttime = ticks; 15250e2bc05cSGleb Smirnoff tp->t_rtseq = startseq; 15260e2bc05cSGleb Smirnoff TCPSTAT_INC(tcps_segstimed); 15270e2bc05cSGleb Smirnoff } 1528adc56f5aSEdward Tomasz Napierala #ifdef STATS 1529adc56f5aSEdward Tomasz Napierala if (!(tp->t_flags & TF_GPUTINPROG) && len) { 1530adc56f5aSEdward Tomasz Napierala tp->t_flags |= TF_GPUTINPROG; 1531adc56f5aSEdward Tomasz Napierala tp->gput_seq = startseq; 1532adc56f5aSEdward Tomasz Napierala tp->gput_ack = startseq + 1533adc56f5aSEdward Tomasz Napierala ulmin(sbavail(&so->so_snd) - off, sendwin); 1534adc56f5aSEdward Tomasz Napierala tp->gput_ts = tcp_ts_getticks(); 1535adc56f5aSEdward Tomasz Napierala } 1536adc56f5aSEdward Tomasz Napierala #endif /* STATS */ 15370e2bc05cSGleb Smirnoff } 15380e2bc05cSGleb Smirnoff 15390e2bc05cSGleb Smirnoff /* 15400e2bc05cSGleb Smirnoff * Set retransmit timer if not currently set, 15410e2bc05cSGleb Smirnoff * and not doing a pure ack or a keep-alive probe. 15420e2bc05cSGleb Smirnoff * Initial value for retransmit timer is smoothed 15430e2bc05cSGleb Smirnoff * round-trip time + 2 * round-trip time variance. 15440e2bc05cSGleb Smirnoff * Initialize shift counter which is used for backoff 15450e2bc05cSGleb Smirnoff * of retransmit time. 15460e2bc05cSGleb Smirnoff */ 15470e2bc05cSGleb Smirnoff timer: 15480e2bc05cSGleb Smirnoff if (!tcp_timer_active(tp, TT_REXMT) && 15490e2bc05cSGleb Smirnoff ((sack_rxmit && tp->snd_nxt != tp->snd_max) || 15500e2bc05cSGleb Smirnoff (tp->snd_nxt != tp->snd_una))) { 15510e2bc05cSGleb Smirnoff if (tcp_timer_active(tp, TT_PERSIST)) { 15520e2bc05cSGleb Smirnoff tcp_timer_activate(tp, TT_PERSIST, 0); 15530e2bc05cSGleb Smirnoff tp->t_rxtshift = 0; 15540e2bc05cSGleb Smirnoff } 15550e2bc05cSGleb Smirnoff tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 1556f8568079SHiren Panchasara } else if (len == 0 && sbavail(&so->so_snd) && 1557f8568079SHiren Panchasara !tcp_timer_active(tp, TT_REXMT) && 1558f8568079SHiren Panchasara !tcp_timer_active(tp, TT_PERSIST)) { 1559f8568079SHiren Panchasara /* 1560f8568079SHiren Panchasara * Avoid a situation where we do not set persist timer 1561f8568079SHiren Panchasara * after a zero window condition. For example: 1562f8568079SHiren Panchasara * 1) A -> B: packet with enough data to fill the window 1563f8568079SHiren Panchasara * 2) B -> A: ACK for #1 + new data (0 window 1564f8568079SHiren Panchasara * advertisement) 1565f8568079SHiren Panchasara * 3) A -> B: ACK for #2, 0 len packet 1566f8568079SHiren Panchasara * 1567f8568079SHiren Panchasara * In this case, A will not activate the persist timer, 1568f8568079SHiren Panchasara * because it chose to send a packet. Unless tcp_output 1569f8568079SHiren Panchasara * is called for some other reason (delayed ack timer, 1570f8568079SHiren Panchasara * another input packet from B, socket syscall), A will 1571f8568079SHiren Panchasara * not send zero window probes. 1572f8568079SHiren Panchasara * 1573f8568079SHiren Panchasara * So, if you send a 0-length packet, but there is data 1574f8568079SHiren Panchasara * in the socket buffer, and neither the rexmt or 1575f8568079SHiren Panchasara * persist timer is already set, then activate the 1576f8568079SHiren Panchasara * persist timer. 1577f8568079SHiren Panchasara */ 1578f8568079SHiren Panchasara tp->t_rxtshift = 0; 1579f8568079SHiren Panchasara tcp_setpersist(tp); 15800e2bc05cSGleb Smirnoff } 15810e2bc05cSGleb Smirnoff } else { 15820e2bc05cSGleb Smirnoff /* 15830e2bc05cSGleb Smirnoff * Persist case, update snd_max but since we are in 15840e2bc05cSGleb Smirnoff * persist mode (no window) we do not update snd_nxt. 15850e2bc05cSGleb Smirnoff */ 15860e2bc05cSGleb Smirnoff int xlen = len; 15870e2bc05cSGleb Smirnoff if (flags & TH_SYN) 15880e2bc05cSGleb Smirnoff ++xlen; 15890e2bc05cSGleb Smirnoff if (flags & TH_FIN) { 15900e2bc05cSGleb Smirnoff ++xlen; 15910e2bc05cSGleb Smirnoff tp->t_flags |= TF_SENTFIN; 15920e2bc05cSGleb Smirnoff } 15930e2bc05cSGleb Smirnoff if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) 159415c82571SJonathan T. Looney tp->snd_max = tp->snd_nxt + xlen; 15950e2bc05cSGleb Smirnoff } 1596e5926fd3SRandall Stewart if ((error == 0) && 1597e5926fd3SRandall Stewart (TCPS_HAVEESTABLISHED(tp->t_state) && 1598e5926fd3SRandall Stewart (tp->t_flags & TF_SACK_PERMIT) && 1599e5926fd3SRandall Stewart tp->rcv_numsacks > 0)) { 1600e5926fd3SRandall Stewart /* Clean up any DSACK's sent */ 1601e5926fd3SRandall Stewart tcp_clean_dsack_blocks(tp); 1602e5926fd3SRandall Stewart } 1603df8bae1dSRodney W. Grimes if (error) { 16042529f56eSJonathan T. Looney /* Record the error. */ 16052529f56eSJonathan T. Looney TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, 16062529f56eSJonathan T. Looney error, 0, NULL, false); 16077734ea06SArchie Cobbs 16087734ea06SArchie Cobbs /* 16097734ea06SArchie Cobbs * We know that the packet was lost, so back out the 16107734ea06SArchie Cobbs * sequence number advance, if any. 16112c30ec0aSAndre Oppermann * 16122c30ec0aSAndre Oppermann * If the error is EPERM the packet got blocked by the 16132c30ec0aSAndre Oppermann * local firewall. Normally we should terminate the 16142c30ec0aSAndre Oppermann * connection but the blocking may have been spurious 16152c30ec0aSAndre Oppermann * due to a firewall reconfiguration cycle. So we treat 16162c30ec0aSAndre Oppermann * it like a packet loss and let the retransmit timer and 16172c30ec0aSAndre Oppermann * timeouts do their work over time. 16182c30ec0aSAndre Oppermann * XXX: It is a POLA question whether calling tcp_drop right 16192c30ec0aSAndre Oppermann * away would be the really correct behavior instead. 16207734ea06SArchie Cobbs */ 162172757d9aSGleb Smirnoff if (((tp->t_flags & TF_FORCEDATA) == 0 || 1622b8152ba7SAndre Oppermann !tcp_timer_active(tp, TT_PERSIST)) && 162372757d9aSGleb Smirnoff ((flags & TH_SYN) == 0) && 162472757d9aSGleb Smirnoff (error != EPERM)) { 16250077b016SPaul Saab if (sack_rxmit) { 16265d3b1b75SJayanth Vijayaraghavan p->rxmit -= len; 16270077b016SPaul Saab tp->sackhint.sack_bytes_rexmit -= len; 162872757d9aSGleb Smirnoff KASSERT(tp->sackhint.sack_bytes_rexmit >= 0, 16290077b016SPaul Saab ("sackhint bytes rtx >= 0")); 16300077b016SPaul Saab } else 16317734ea06SArchie Cobbs tp->snd_nxt -= len; 16327734ea06SArchie Cobbs } 1633cf2942b6SRobert Watson SOCKBUF_UNLOCK_ASSERT(&so->so_snd); /* Check gotos. */ 163472757d9aSGleb Smirnoff switch (error) { 1635fcf59617SAndrey V. Elsukov case EACCES: 163672757d9aSGleb Smirnoff case EPERM: 163772757d9aSGleb Smirnoff tp->t_softerror = error; 163872757d9aSGleb Smirnoff return (error); 163972757d9aSGleb Smirnoff case ENOBUFS: 1640425b7639SSepherosa Ziehau TCP_XMIT_TIMER_ASSERT(tp, len, flags); 16411600372bSAndre Oppermann tp->snd_cwnd = tp->t_maxseg; 1642df8bae1dSRodney W. Grimes return (0); 164372757d9aSGleb Smirnoff case EMSGSIZE: 16443d1f141bSGarrett Wollman /* 1645b3c0f300SAndre Oppermann * For some reason the interface we used initially 1646b3c0f300SAndre Oppermann * to send segments changed to another or lowered 1647b3c0f300SAndre Oppermann * its MTU. 1648b3c0f300SAndre Oppermann * If TSO was active we either got an interface 1649b3c0f300SAndre Oppermann * without TSO capabilits or TSO was turned off. 1650df0633a1SGleb Smirnoff * If we obtained mtu from ip_output() then update 1651df0633a1SGleb Smirnoff * it and try again. 16523d1f141bSGarrett Wollman */ 1653b3c0f300SAndre Oppermann if (tso) 1654b3c0f300SAndre Oppermann tp->t_flags &= ~TF_TSO; 1655df0633a1SGleb Smirnoff if (mtu != 0) { 1656df0633a1SGleb Smirnoff tcp_mss_update(tp, -1, mtu, NULL, NULL); 1657df0633a1SGleb Smirnoff goto again; 1658df0633a1SGleb Smirnoff } 1659df0633a1SGleb Smirnoff return (error); 16608bec3467SGleb Smirnoff case EHOSTDOWN: 166172757d9aSGleb Smirnoff case EHOSTUNREACH: 166272757d9aSGleb Smirnoff case ENETDOWN: 16638bec3467SGleb Smirnoff case ENETUNREACH: 166472757d9aSGleb Smirnoff if (TCPS_HAVERCVDSYN(tp->t_state)) { 1665df8bae1dSRodney W. Grimes tp->t_softerror = error; 1666df8bae1dSRodney W. Grimes return (0); 1667df8bae1dSRodney W. Grimes } 166872757d9aSGleb Smirnoff /* FALLTHROUGH */ 166972757d9aSGleb Smirnoff default: 1670df8bae1dSRodney W. Grimes return (error); 1671df8bae1dSRodney W. Grimes } 167272757d9aSGleb Smirnoff } 167378b50714SRobert Watson TCPSTAT_INC(tcps_sndtotal); 1674df8bae1dSRodney W. Grimes 1675df8bae1dSRodney W. Grimes /* 1676df8bae1dSRodney W. Grimes * Data sent (as far as we can tell). 1677df8bae1dSRodney W. Grimes * If this advertises a larger window than any other segment, 1678df8bae1dSRodney W. Grimes * then remember the size of the advertised window. 1679df8bae1dSRodney W. Grimes * Any pending ACK has now been sent. 1680df8bae1dSRodney W. Grimes */ 16813ac12506SJonathan T. Looney if (SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 1682201d185bSAndre Oppermann tp->rcv_adv = tp->rcv_nxt + recwin; 1683df8bae1dSRodney W. Grimes tp->last_ack_sent = tp->rcv_nxt; 16843bfd6421SJonathan Lemon tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 1685b8152ba7SAndre Oppermann if (tcp_timer_active(tp, TT_DELACK)) 1686b8152ba7SAndre Oppermann tcp_timer_activate(tp, TT_DELACK, 0); 1687d912c694SMatthew Dillon #if 0 1688d912c694SMatthew Dillon /* 1689d912c694SMatthew Dillon * This completely breaks TCP if newreno is turned on. What happens 1690d912c694SMatthew Dillon * is that if delayed-acks are turned on on the receiver, this code 1691d912c694SMatthew Dillon * on the transmitter effectively destroys the TCP window, forcing 1692d912c694SMatthew Dillon * it to four packets (1.5Kx4 = 6K window). 1693d912c694SMatthew Dillon */ 1694dbc42409SLawrence Stewart if (sendalot && --maxburst) 1695df8bae1dSRodney W. Grimes goto again; 1696d912c694SMatthew Dillon #endif 1697d912c694SMatthew Dillon if (sendalot) 1698d912c694SMatthew Dillon goto again; 1699df8bae1dSRodney W. Grimes return (0); 1700df8bae1dSRodney W. Grimes } 1701df8bae1dSRodney W. Grimes 1702df8bae1dSRodney W. Grimes void 1703ad3f9ab3SAndre Oppermann tcp_setpersist(struct tcpcb *tp) 1704df8bae1dSRodney W. Grimes { 17059b8b58e0SJonathan Lemon int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1; 17069b8b58e0SJonathan Lemon int tt; 1707df8bae1dSRodney W. Grimes 1708672dc4aeSJohn Baldwin tp->t_flags &= ~TF_PREVVALID; 1709b8152ba7SAndre Oppermann if (tcp_timer_active(tp, TT_REXMT)) 17109b8b58e0SJonathan Lemon panic("tcp_setpersist: retransmit pending"); 1711df8bae1dSRodney W. Grimes /* 1712a4641f4eSPedro F. Giffuni * Start/restart persistence timer. 1713df8bae1dSRodney W. Grimes */ 17149b8b58e0SJonathan Lemon TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 17150645c604SHiren Panchasara tcp_persmin, tcp_persmax); 1716b8152ba7SAndre Oppermann tcp_timer_activate(tp, TT_PERSIST, tt); 1717df8bae1dSRodney W. Grimes if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 1718df8bae1dSRodney W. Grimes tp->t_rxtshift++; 1719df8bae1dSRodney W. Grimes } 172002a1a643SAndre Oppermann 172102a1a643SAndre Oppermann /* 172202a1a643SAndre Oppermann * Insert TCP options according to the supplied parameters to the place 172302a1a643SAndre Oppermann * optp in a consistent way. Can handle unaligned destinations. 172402a1a643SAndre Oppermann * 172502a1a643SAndre Oppermann * The order of the option processing is crucial for optimal packing and 172602a1a643SAndre Oppermann * alignment for the scarce option space. 172702a1a643SAndre Oppermann * 172802a1a643SAndre Oppermann * The optimal order for a SYN/SYN-ACK segment is: 172902a1a643SAndre Oppermann * MSS (4) + NOP (1) + Window scale (3) + SACK permitted (2) + 173002a1a643SAndre Oppermann * Timestamp (10) + Signature (18) = 38 bytes out of a maximum of 40. 173102a1a643SAndre Oppermann * 173202a1a643SAndre Oppermann * The SACK options should be last. SACK blocks consume 8*n+2 bytes. 173302a1a643SAndre Oppermann * So a full size SACK blocks option is 34 bytes (with 4 SACK blocks). 173402a1a643SAndre Oppermann * At minimum we need 10 bytes (to generate 1 SACK block). If both 173502a1a643SAndre Oppermann * TCP Timestamps (12 bytes) and TCP Signatures (18 bytes) are present, 173602a1a643SAndre Oppermann * we only have 10 bytes for SACK options (40 - (12 + 18)). 173702a1a643SAndre Oppermann */ 173802a1a643SAndre Oppermann int 173902a1a643SAndre Oppermann tcp_addoptions(struct tcpopt *to, u_char *optp) 174002a1a643SAndre Oppermann { 17415d20f974SJonathan T. Looney u_int32_t mask, optlen = 0; 174202a1a643SAndre Oppermann 174302a1a643SAndre Oppermann for (mask = 1; mask < TOF_MAXOPT; mask <<= 1) { 174402a1a643SAndre Oppermann if ((to->to_flags & mask) != mask) 174502a1a643SAndre Oppermann continue; 17463a4018c4SAndre Oppermann if (optlen == TCP_MAXOLEN) 17473a4018c4SAndre Oppermann break; 174802a1a643SAndre Oppermann switch (to->to_flags & mask) { 174902a1a643SAndre Oppermann case TOF_MSS: 175002a1a643SAndre Oppermann while (optlen % 4) { 175102a1a643SAndre Oppermann optlen += TCPOLEN_NOP; 175202a1a643SAndre Oppermann *optp++ = TCPOPT_NOP; 175302a1a643SAndre Oppermann } 17543a4018c4SAndre Oppermann if (TCP_MAXOLEN - optlen < TCPOLEN_MAXSEG) 17553a4018c4SAndre Oppermann continue; 175602a1a643SAndre Oppermann optlen += TCPOLEN_MAXSEG; 175702a1a643SAndre Oppermann *optp++ = TCPOPT_MAXSEG; 175802a1a643SAndre Oppermann *optp++ = TCPOLEN_MAXSEG; 175902a1a643SAndre Oppermann to->to_mss = htons(to->to_mss); 176002a1a643SAndre Oppermann bcopy((u_char *)&to->to_mss, optp, sizeof(to->to_mss)); 176102a1a643SAndre Oppermann optp += sizeof(to->to_mss); 176202a1a643SAndre Oppermann break; 176302a1a643SAndre Oppermann case TOF_SCALE: 176402a1a643SAndre Oppermann while (!optlen || optlen % 2 != 1) { 176502a1a643SAndre Oppermann optlen += TCPOLEN_NOP; 176602a1a643SAndre Oppermann *optp++ = TCPOPT_NOP; 176702a1a643SAndre Oppermann } 17683a4018c4SAndre Oppermann if (TCP_MAXOLEN - optlen < TCPOLEN_WINDOW) 17693a4018c4SAndre Oppermann continue; 177002a1a643SAndre Oppermann optlen += TCPOLEN_WINDOW; 177102a1a643SAndre Oppermann *optp++ = TCPOPT_WINDOW; 177202a1a643SAndre Oppermann *optp++ = TCPOLEN_WINDOW; 177302a1a643SAndre Oppermann *optp++ = to->to_wscale; 177402a1a643SAndre Oppermann break; 177502a1a643SAndre Oppermann case TOF_SACKPERM: 177602a1a643SAndre Oppermann while (optlen % 2) { 177702a1a643SAndre Oppermann optlen += TCPOLEN_NOP; 177802a1a643SAndre Oppermann *optp++ = TCPOPT_NOP; 177902a1a643SAndre Oppermann } 17803a4018c4SAndre Oppermann if (TCP_MAXOLEN - optlen < TCPOLEN_SACK_PERMITTED) 17813a4018c4SAndre Oppermann continue; 178202a1a643SAndre Oppermann optlen += TCPOLEN_SACK_PERMITTED; 178302a1a643SAndre Oppermann *optp++ = TCPOPT_SACK_PERMITTED; 178402a1a643SAndre Oppermann *optp++ = TCPOLEN_SACK_PERMITTED; 178502a1a643SAndre Oppermann break; 178602a1a643SAndre Oppermann case TOF_TS: 178702a1a643SAndre Oppermann while (!optlen || optlen % 4 != 2) { 178802a1a643SAndre Oppermann optlen += TCPOLEN_NOP; 178902a1a643SAndre Oppermann *optp++ = TCPOPT_NOP; 179002a1a643SAndre Oppermann } 17913a4018c4SAndre Oppermann if (TCP_MAXOLEN - optlen < TCPOLEN_TIMESTAMP) 17923a4018c4SAndre Oppermann continue; 179302a1a643SAndre Oppermann optlen += TCPOLEN_TIMESTAMP; 179402a1a643SAndre Oppermann *optp++ = TCPOPT_TIMESTAMP; 179502a1a643SAndre Oppermann *optp++ = TCPOLEN_TIMESTAMP; 179602a1a643SAndre Oppermann to->to_tsval = htonl(to->to_tsval); 179702a1a643SAndre Oppermann to->to_tsecr = htonl(to->to_tsecr); 179802a1a643SAndre Oppermann bcopy((u_char *)&to->to_tsval, optp, sizeof(to->to_tsval)); 179902a1a643SAndre Oppermann optp += sizeof(to->to_tsval); 180002a1a643SAndre Oppermann bcopy((u_char *)&to->to_tsecr, optp, sizeof(to->to_tsecr)); 180102a1a643SAndre Oppermann optp += sizeof(to->to_tsecr); 180202a1a643SAndre Oppermann break; 180302a1a643SAndre Oppermann case TOF_SIGNATURE: 180402a1a643SAndre Oppermann { 180502a1a643SAndre Oppermann int siglen = TCPOLEN_SIGNATURE - 2; 180602a1a643SAndre Oppermann 180702a1a643SAndre Oppermann while (!optlen || optlen % 4 != 2) { 180802a1a643SAndre Oppermann optlen += TCPOLEN_NOP; 180902a1a643SAndre Oppermann *optp++ = TCPOPT_NOP; 181002a1a643SAndre Oppermann } 1811fcf59617SAndrey V. Elsukov if (TCP_MAXOLEN - optlen < TCPOLEN_SIGNATURE) { 1812fcf59617SAndrey V. Elsukov to->to_flags &= ~TOF_SIGNATURE; 181302a1a643SAndre Oppermann continue; 1814fcf59617SAndrey V. Elsukov } 181502a1a643SAndre Oppermann optlen += TCPOLEN_SIGNATURE; 181602a1a643SAndre Oppermann *optp++ = TCPOPT_SIGNATURE; 181702a1a643SAndre Oppermann *optp++ = TCPOLEN_SIGNATURE; 181802a1a643SAndre Oppermann to->to_signature = optp; 181902a1a643SAndre Oppermann while (siglen--) 182002a1a643SAndre Oppermann *optp++ = 0; 182102a1a643SAndre Oppermann break; 182202a1a643SAndre Oppermann } 182302a1a643SAndre Oppermann case TOF_SACK: 182402a1a643SAndre Oppermann { 182502a1a643SAndre Oppermann int sackblks = 0; 182602a1a643SAndre Oppermann struct sackblk *sack = (struct sackblk *)to->to_sacks; 182702a1a643SAndre Oppermann tcp_seq sack_seq; 182802a1a643SAndre Oppermann 182902a1a643SAndre Oppermann while (!optlen || optlen % 4 != 2) { 183002a1a643SAndre Oppermann optlen += TCPOLEN_NOP; 183102a1a643SAndre Oppermann *optp++ = TCPOPT_NOP; 183202a1a643SAndre Oppermann } 18333a4018c4SAndre Oppermann if (TCP_MAXOLEN - optlen < TCPOLEN_SACKHDR + TCPOLEN_SACK) 183402a1a643SAndre Oppermann continue; 183502a1a643SAndre Oppermann optlen += TCPOLEN_SACKHDR; 183602a1a643SAndre Oppermann *optp++ = TCPOPT_SACK; 183702a1a643SAndre Oppermann sackblks = min(to->to_nsacks, 18380d957bbaSAndre Oppermann (TCP_MAXOLEN - optlen) / TCPOLEN_SACK); 183902a1a643SAndre Oppermann *optp++ = TCPOLEN_SACKHDR + sackblks * TCPOLEN_SACK; 184002a1a643SAndre Oppermann while (sackblks--) { 184102a1a643SAndre Oppermann sack_seq = htonl(sack->start); 184202a1a643SAndre Oppermann bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq)); 184302a1a643SAndre Oppermann optp += sizeof(sack_seq); 184402a1a643SAndre Oppermann sack_seq = htonl(sack->end); 184502a1a643SAndre Oppermann bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq)); 184602a1a643SAndre Oppermann optp += sizeof(sack_seq); 184702a1a643SAndre Oppermann optlen += TCPOLEN_SACK; 184802a1a643SAndre Oppermann sack++; 184902a1a643SAndre Oppermann } 185078b50714SRobert Watson TCPSTAT_INC(tcps_sack_send_blocks); 185102a1a643SAndre Oppermann break; 185202a1a643SAndre Oppermann } 1853281a0fd4SPatrick Kelsey case TOF_FASTOPEN: 1854281a0fd4SPatrick Kelsey { 1855281a0fd4SPatrick Kelsey int total_len; 1856281a0fd4SPatrick Kelsey 1857281a0fd4SPatrick Kelsey /* XXX is there any point to aligning this option? */ 1858281a0fd4SPatrick Kelsey total_len = TCPOLEN_FAST_OPEN_EMPTY + to->to_tfo_len; 1859c560df6fSPatrick Kelsey if (TCP_MAXOLEN - optlen < total_len) { 1860c560df6fSPatrick Kelsey to->to_flags &= ~TOF_FASTOPEN; 1861281a0fd4SPatrick Kelsey continue; 1862c560df6fSPatrick Kelsey } 1863281a0fd4SPatrick Kelsey *optp++ = TCPOPT_FAST_OPEN; 1864281a0fd4SPatrick Kelsey *optp++ = total_len; 1865281a0fd4SPatrick Kelsey if (to->to_tfo_len > 0) { 1866281a0fd4SPatrick Kelsey bcopy(to->to_tfo_cookie, optp, to->to_tfo_len); 1867281a0fd4SPatrick Kelsey optp += to->to_tfo_len; 1868281a0fd4SPatrick Kelsey } 1869281a0fd4SPatrick Kelsey optlen += total_len; 1870281a0fd4SPatrick Kelsey break; 1871281a0fd4SPatrick Kelsey } 187202a1a643SAndre Oppermann default: 187302a1a643SAndre Oppermann panic("%s: unknown TCP option type", __func__); 187402a1a643SAndre Oppermann break; 187502a1a643SAndre Oppermann } 187602a1a643SAndre Oppermann } 187702a1a643SAndre Oppermann 187802a1a643SAndre Oppermann /* Terminate and pad TCP options to a 4 byte boundary. */ 187902a1a643SAndre Oppermann if (optlen % 4) { 188002a1a643SAndre Oppermann optlen += TCPOLEN_EOL; 188102a1a643SAndre Oppermann *optp++ = TCPOPT_EOL; 188202a1a643SAndre Oppermann } 1883413deb12SBjoern A. Zeeb /* 1884413deb12SBjoern A. Zeeb * According to RFC 793 (STD0007): 1885413deb12SBjoern A. Zeeb * "The content of the header beyond the End-of-Option option 1886413deb12SBjoern A. Zeeb * must be header padding (i.e., zero)." 1887413deb12SBjoern A. Zeeb * and later: "The padding is composed of zeros." 1888413deb12SBjoern A. Zeeb */ 188902a1a643SAndre Oppermann while (optlen % 4) { 1890c343c524SAndre Oppermann optlen += TCPOLEN_PAD; 1891c343c524SAndre Oppermann *optp++ = TCPOPT_PAD; 189202a1a643SAndre Oppermann } 189302a1a643SAndre Oppermann 18940d957bbaSAndre Oppermann KASSERT(optlen <= TCP_MAXOLEN, ("%s: TCP options too long", __func__)); 189502a1a643SAndre Oppermann return (optlen); 189602a1a643SAndre Oppermann } 189766492feaSGleb Smirnoff 189889e560f4SRandall Stewart /* 189989e560f4SRandall Stewart * This is a copy of m_copym(), taking the TSO segment size/limit 190089e560f4SRandall Stewart * constraints into account, and advancing the sndptr as it goes. 190189e560f4SRandall Stewart */ 190289e560f4SRandall Stewart struct mbuf * 190389e560f4SRandall Stewart tcp_m_copym(struct mbuf *m, int32_t off0, int32_t *plen, 1904b2e60773SJohn Baldwin int32_t seglimit, int32_t segsize, struct sockbuf *sb, bool hw_tls) 190589e560f4SRandall Stewart { 1906b2e60773SJohn Baldwin #ifdef KERN_TLS 1907b2e60773SJohn Baldwin struct ktls_session *tls, *ntls; 1908b2e60773SJohn Baldwin struct mbuf *start; 1909b2e60773SJohn Baldwin #endif 191089e560f4SRandall Stewart struct mbuf *n, **np; 191189e560f4SRandall Stewart struct mbuf *top; 191289e560f4SRandall Stewart int32_t off = off0; 191389e560f4SRandall Stewart int32_t len = *plen; 191489e560f4SRandall Stewart int32_t fragsize; 191589e560f4SRandall Stewart int32_t len_cp = 0; 191689e560f4SRandall Stewart int32_t *pkthdrlen; 191789e560f4SRandall Stewart uint32_t mlen, frags; 191889e560f4SRandall Stewart bool copyhdr; 191989e560f4SRandall Stewart 192089e560f4SRandall Stewart KASSERT(off >= 0, ("tcp_m_copym, negative off %d", off)); 192189e560f4SRandall Stewart KASSERT(len >= 0, ("tcp_m_copym, negative len %d", len)); 192289e560f4SRandall Stewart if (off == 0 && m->m_flags & M_PKTHDR) 192389e560f4SRandall Stewart copyhdr = true; 192489e560f4SRandall Stewart else 192589e560f4SRandall Stewart copyhdr = false; 192689e560f4SRandall Stewart while (off > 0) { 192789e560f4SRandall Stewart KASSERT(m != NULL, ("tcp_m_copym, offset > size of mbuf chain")); 192889e560f4SRandall Stewart if (off < m->m_len) 192989e560f4SRandall Stewart break; 193089e560f4SRandall Stewart off -= m->m_len; 193189e560f4SRandall Stewart if ((sb) && (m == sb->sb_sndptr)) { 193289e560f4SRandall Stewart sb->sb_sndptroff += m->m_len; 193389e560f4SRandall Stewart sb->sb_sndptr = m->m_next; 193489e560f4SRandall Stewart } 193589e560f4SRandall Stewart m = m->m_next; 193689e560f4SRandall Stewart } 193789e560f4SRandall Stewart np = ⊤ 193889e560f4SRandall Stewart top = NULL; 193989e560f4SRandall Stewart pkthdrlen = NULL; 1940b2e60773SJohn Baldwin #ifdef KERN_TLS 19416edfd179SGleb Smirnoff if (hw_tls && (m->m_flags & M_EXTPG)) 19427b6c99d0SGleb Smirnoff tls = m->m_epg_tls; 1943b2e60773SJohn Baldwin else 1944b2e60773SJohn Baldwin tls = NULL; 1945b2e60773SJohn Baldwin start = m; 1946b2e60773SJohn Baldwin #endif 194789e560f4SRandall Stewart while (len > 0) { 194889e560f4SRandall Stewart if (m == NULL) { 194989e560f4SRandall Stewart KASSERT(len == M_COPYALL, 195089e560f4SRandall Stewart ("tcp_m_copym, length > size of mbuf chain")); 195189e560f4SRandall Stewart *plen = len_cp; 195289e560f4SRandall Stewart if (pkthdrlen != NULL) 195389e560f4SRandall Stewart *pkthdrlen = len_cp; 195489e560f4SRandall Stewart break; 195589e560f4SRandall Stewart } 1956b2e60773SJohn Baldwin #ifdef KERN_TLS 1957b2e60773SJohn Baldwin if (hw_tls) { 19586edfd179SGleb Smirnoff if (m->m_flags & M_EXTPG) 19597b6c99d0SGleb Smirnoff ntls = m->m_epg_tls; 1960b2e60773SJohn Baldwin else 1961b2e60773SJohn Baldwin ntls = NULL; 1962b2e60773SJohn Baldwin 1963b2e60773SJohn Baldwin /* 1964b2e60773SJohn Baldwin * Avoid mixing TLS records with handshake 1965b2e60773SJohn Baldwin * data or TLS records from different 1966b2e60773SJohn Baldwin * sessions. 1967b2e60773SJohn Baldwin */ 1968b2e60773SJohn Baldwin if (tls != ntls) { 1969b2e60773SJohn Baldwin MPASS(m != start); 1970b2e60773SJohn Baldwin *plen = len_cp; 1971b2e60773SJohn Baldwin if (pkthdrlen != NULL) 1972b2e60773SJohn Baldwin *pkthdrlen = len_cp; 1973b2e60773SJohn Baldwin break; 1974b2e60773SJohn Baldwin } 1975b2e60773SJohn Baldwin } 1976b2e60773SJohn Baldwin #endif 197789e560f4SRandall Stewart mlen = min(len, m->m_len - off); 197889e560f4SRandall Stewart if (seglimit) { 197989e560f4SRandall Stewart /* 19806edfd179SGleb Smirnoff * For M_EXTPG mbufs, add 3 segments 198189e560f4SRandall Stewart * + 1 in case we are crossing page boundaries 198289e560f4SRandall Stewart * + 2 in case the TLS hdr/trailer are used 198389e560f4SRandall Stewart * It is cheaper to just add the segments 198489e560f4SRandall Stewart * than it is to take the cache miss to look 198589e560f4SRandall Stewart * at the mbuf ext_pgs state in detail. 198689e560f4SRandall Stewart */ 19876edfd179SGleb Smirnoff if (m->m_flags & M_EXTPG) { 198889e560f4SRandall Stewart fragsize = min(segsize, PAGE_SIZE); 198989e560f4SRandall Stewart frags = 3; 199089e560f4SRandall Stewart } else { 199189e560f4SRandall Stewart fragsize = segsize; 199289e560f4SRandall Stewart frags = 0; 199389e560f4SRandall Stewart } 199489e560f4SRandall Stewart 199589e560f4SRandall Stewart /* Break if we really can't fit anymore. */ 199689e560f4SRandall Stewart if ((frags + 1) >= seglimit) { 199789e560f4SRandall Stewart *plen = len_cp; 199889e560f4SRandall Stewart if (pkthdrlen != NULL) 199989e560f4SRandall Stewart *pkthdrlen = len_cp; 200089e560f4SRandall Stewart break; 200189e560f4SRandall Stewart } 200289e560f4SRandall Stewart 200389e560f4SRandall Stewart /* 200489e560f4SRandall Stewart * Reduce size if you can't copy the whole 200589e560f4SRandall Stewart * mbuf. If we can't copy the whole mbuf, also 200689e560f4SRandall Stewart * adjust len so the loop will end after this 200789e560f4SRandall Stewart * mbuf. 200889e560f4SRandall Stewart */ 200989e560f4SRandall Stewart if ((frags + howmany(mlen, fragsize)) >= seglimit) { 201089e560f4SRandall Stewart mlen = (seglimit - frags - 1) * fragsize; 201189e560f4SRandall Stewart len = mlen; 201289e560f4SRandall Stewart *plen = len_cp + len; 201389e560f4SRandall Stewart if (pkthdrlen != NULL) 201489e560f4SRandall Stewart *pkthdrlen = *plen; 201589e560f4SRandall Stewart } 201689e560f4SRandall Stewart frags += howmany(mlen, fragsize); 201789e560f4SRandall Stewart if (frags == 0) 201889e560f4SRandall Stewart frags++; 201989e560f4SRandall Stewart seglimit -= frags; 202089e560f4SRandall Stewart KASSERT(seglimit > 0, 202189e560f4SRandall Stewart ("%s: seglimit went too low", __func__)); 202289e560f4SRandall Stewart } 202389e560f4SRandall Stewart if (copyhdr) 202489e560f4SRandall Stewart n = m_gethdr(M_NOWAIT, m->m_type); 202589e560f4SRandall Stewart else 202689e560f4SRandall Stewart n = m_get(M_NOWAIT, m->m_type); 202789e560f4SRandall Stewart *np = n; 202889e560f4SRandall Stewart if (n == NULL) 202989e560f4SRandall Stewart goto nospace; 203089e560f4SRandall Stewart if (copyhdr) { 203189e560f4SRandall Stewart if (!m_dup_pkthdr(n, m, M_NOWAIT)) 203289e560f4SRandall Stewart goto nospace; 203389e560f4SRandall Stewart if (len == M_COPYALL) 203489e560f4SRandall Stewart n->m_pkthdr.len -= off0; 203589e560f4SRandall Stewart else 203689e560f4SRandall Stewart n->m_pkthdr.len = len; 203789e560f4SRandall Stewart pkthdrlen = &n->m_pkthdr.len; 203889e560f4SRandall Stewart copyhdr = false; 203989e560f4SRandall Stewart } 204089e560f4SRandall Stewart n->m_len = mlen; 204189e560f4SRandall Stewart len_cp += n->m_len; 204261664ee7SGleb Smirnoff if (m->m_flags & (M_EXT|M_EXTPG)) { 204389e560f4SRandall Stewart n->m_data = m->m_data + off; 204489e560f4SRandall Stewart mb_dupcl(n, m); 204589e560f4SRandall Stewart } else 204689e560f4SRandall Stewart bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 204789e560f4SRandall Stewart (u_int)n->m_len); 204889e560f4SRandall Stewart 204989e560f4SRandall Stewart if (sb && (sb->sb_sndptr == m) && 205089e560f4SRandall Stewart ((n->m_len + off) >= m->m_len) && m->m_next) { 205189e560f4SRandall Stewart sb->sb_sndptroff += m->m_len; 205289e560f4SRandall Stewart sb->sb_sndptr = m->m_next; 205389e560f4SRandall Stewart } 205489e560f4SRandall Stewart off = 0; 205589e560f4SRandall Stewart if (len != M_COPYALL) { 205689e560f4SRandall Stewart len -= n->m_len; 205789e560f4SRandall Stewart } 205889e560f4SRandall Stewart m = m->m_next; 205989e560f4SRandall Stewart np = &n->m_next; 206089e560f4SRandall Stewart } 206189e560f4SRandall Stewart return (top); 206289e560f4SRandall Stewart nospace: 206389e560f4SRandall Stewart m_freem(top); 206489e560f4SRandall Stewart return (NULL); 206589e560f4SRandall Stewart } 206689e560f4SRandall Stewart 206766492feaSGleb Smirnoff void 206866492feaSGleb Smirnoff tcp_sndbuf_autoscale(struct tcpcb *tp, struct socket *so, uint32_t sendwin) 206966492feaSGleb Smirnoff { 207066492feaSGleb Smirnoff 207166492feaSGleb Smirnoff /* 207266492feaSGleb Smirnoff * Automatic sizing of send socket buffer. Often the send buffer 207366492feaSGleb Smirnoff * size is not optimally adjusted to the actual network conditions 207466492feaSGleb Smirnoff * at hand (delay bandwidth product). Setting the buffer size too 207566492feaSGleb Smirnoff * small limits throughput on links with high bandwidth and high 207666492feaSGleb Smirnoff * delay (eg. trans-continental/oceanic links). Setting the 207766492feaSGleb Smirnoff * buffer size too big consumes too much real kernel memory, 207866492feaSGleb Smirnoff * especially with many connections on busy servers. 207966492feaSGleb Smirnoff * 208066492feaSGleb Smirnoff * The criteria to step up the send buffer one notch are: 208166492feaSGleb Smirnoff * 1. receive window of remote host is larger than send buffer 208266492feaSGleb Smirnoff * (with a fudge factor of 5/4th); 208366492feaSGleb Smirnoff * 2. send buffer is filled to 7/8th with data (so we actually 208466492feaSGleb Smirnoff * have data to make use of it); 208566492feaSGleb Smirnoff * 3. send buffer fill has not hit maximal automatic size; 208666492feaSGleb Smirnoff * 4. our send window (slow start and cogestion controlled) is 208766492feaSGleb Smirnoff * larger than sent but unacknowledged data in send buffer. 208866492feaSGleb Smirnoff * 208966492feaSGleb Smirnoff * The remote host receive window scaling factor may limit the 209066492feaSGleb Smirnoff * growing of the send buffer before it reaches its allowed 209166492feaSGleb Smirnoff * maximum. 209266492feaSGleb Smirnoff * 209366492feaSGleb Smirnoff * It scales directly with slow start or congestion window 209466492feaSGleb Smirnoff * and does at most one step per received ACK. This fast 209566492feaSGleb Smirnoff * scaling has the drawback of growing the send buffer beyond 209666492feaSGleb Smirnoff * what is strictly necessary to make full use of a given 209766492feaSGleb Smirnoff * delay*bandwidth product. However testing has shown this not 209866492feaSGleb Smirnoff * to be much of an problem. At worst we are trading wasting 209966492feaSGleb Smirnoff * of available bandwidth (the non-use of it) for wasting some 210066492feaSGleb Smirnoff * socket buffer memory. 210166492feaSGleb Smirnoff * 210266492feaSGleb Smirnoff * TODO: Shrink send buffer during idle periods together 210366492feaSGleb Smirnoff * with congestion window. Requires another timer. Has to 210466492feaSGleb Smirnoff * wait for upcoming tcp timer rewrite. 210566492feaSGleb Smirnoff * 210666492feaSGleb Smirnoff * XXXGL: should there be used sbused() or sbavail()? 210766492feaSGleb Smirnoff */ 210866492feaSGleb Smirnoff if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 210966492feaSGleb Smirnoff int lowat; 211066492feaSGleb Smirnoff 211166492feaSGleb Smirnoff lowat = V_tcp_sendbuf_auto_lowat ? so->so_snd.sb_lowat : 0; 211266492feaSGleb Smirnoff if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat - lowat && 211366492feaSGleb Smirnoff sbused(&so->so_snd) >= 211466492feaSGleb Smirnoff (so->so_snd.sb_hiwat / 8 * 7) - lowat && 211566492feaSGleb Smirnoff sbused(&so->so_snd) < V_tcp_autosndbuf_max && 211666492feaSGleb Smirnoff sendwin >= (sbused(&so->so_snd) - 211766492feaSGleb Smirnoff (tp->snd_nxt - tp->snd_una))) { 211866492feaSGleb Smirnoff if (!sbreserve_locked(&so->so_snd, 211966492feaSGleb Smirnoff min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc, 212066492feaSGleb Smirnoff V_tcp_autosndbuf_max), so, curthread)) 212166492feaSGleb Smirnoff so->so_snd.sb_flags &= ~SB_AUTOSIZE; 212266492feaSGleb Smirnoff } 212366492feaSGleb Smirnoff } 212466492feaSGleb Smirnoff } 2125