1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 #include "opt_inet.h"
34 #include "opt_inet6.h"
35 #include "opt_ipsec.h"
36 #include "opt_kern_tls.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/arb.h>
41 #include <sys/domain.h>
42 #ifdef TCP_HHOOK
43 #include <sys/hhook.h>
44 #endif
45 #include <sys/kernel.h>
46 #ifdef KERN_TLS
47 #include <sys/ktls.h>
48 #endif
49 #include <sys/lock.h>
50 #include <sys/mbuf.h>
51 #include <sys/mutex.h>
52 #include <sys/protosw.h>
53 #include <sys/qmath.h>
54 #include <sys/sdt.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/sysctl.h>
58 #include <sys/stats.h>
59
60 #include <net/if.h>
61 #include <net/route.h>
62 #include <net/route/nhop.h>
63 #include <net/vnet.h>
64
65 #include <netinet/in.h>
66 #include <netinet/in_kdtrace.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/ip.h>
69 #include <netinet/in_pcb.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/ip_options.h>
72 #ifdef INET6
73 #include <netinet6/in6_pcb.h>
74 #include <netinet/ip6.h>
75 #include <netinet6/ip6_var.h>
76 #endif
77 #include <netinet/tcp.h>
78 #define TCPOUTFLAGS
79 #include <netinet/tcp_fsm.h>
80 #include <netinet/tcp_seq.h>
81 #include <netinet/tcp_var.h>
82 #include <netinet/tcp_log_buf.h>
83 #include <netinet/tcp_syncache.h>
84 #include <netinet/tcp_timer.h>
85 #include <netinet/tcpip.h>
86 #include <netinet/cc/cc.h>
87 #include <netinet/tcp_fastopen.h>
88 #ifdef TCPPCAP
89 #include <netinet/tcp_pcap.h>
90 #endif
91 #ifdef TCP_OFFLOAD
92 #include <netinet/tcp_offload.h>
93 #endif
94 #include <netinet/tcp_ecn.h>
95
96 #include <netipsec/ipsec_support.h>
97
98 #include <netinet/udp.h>
99 #include <netinet/udp_var.h>
100 #include <machine/in_cksum.h>
101
102 #include <security/mac/mac_framework.h>
103
104 VNET_DEFINE(int, path_mtu_discovery) = 1;
105 SYSCTL_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_VNET | CTLFLAG_RW,
106 &VNET_NAME(path_mtu_discovery), 1,
107 "Enable Path MTU Discovery");
108
109 VNET_DEFINE(int, tcp_do_tso) = 1;
110 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_VNET | CTLFLAG_RW,
111 &VNET_NAME(tcp_do_tso), 0,
112 "Enable TCP Segmentation Offload");
113
114 VNET_DEFINE(int, tcp_sendspace) = 1024*32;
115 #define V_tcp_sendspace VNET(tcp_sendspace)
116 SYSCTL_INT(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLFLAG_VNET | CTLFLAG_RW,
117 &VNET_NAME(tcp_sendspace), 0, "Initial send socket buffer size");
118
119 VNET_DEFINE(int, tcp_do_autosndbuf) = 1;
120 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto, CTLFLAG_VNET | CTLFLAG_RW,
121 &VNET_NAME(tcp_do_autosndbuf), 0,
122 "Enable automatic send buffer sizing");
123
124 VNET_DEFINE(int, tcp_autosndbuf_inc) = 8*1024;
125 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_VNET | CTLFLAG_RW,
126 &VNET_NAME(tcp_autosndbuf_inc), 0,
127 "Incrementor step size of automatic send buffer");
128
129 VNET_DEFINE(int, tcp_autosndbuf_max) = 2*1024*1024;
130 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
131 &VNET_NAME(tcp_autosndbuf_max), 0,
132 "Max size of automatic send buffer");
133
134 VNET_DEFINE(int, tcp_sendbuf_auto_lowat) = 0;
135 #define V_tcp_sendbuf_auto_lowat VNET(tcp_sendbuf_auto_lowat)
136 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto_lowat, CTLFLAG_VNET | CTLFLAG_RW,
137 &VNET_NAME(tcp_sendbuf_auto_lowat), 0,
138 "Modify threshold for auto send buffer growth to account for SO_SNDLOWAT");
139
140 /*
141 * Make sure that either retransmit or persist timer is set for SYN, FIN and
142 * non-ACK.
143 */
144 #define TCP_XMIT_TIMER_ASSERT(tp, len, th_flags) \
145 KASSERT(((len) == 0 && ((th_flags) & (TH_SYN | TH_FIN)) == 0) ||\
146 tcp_timer_active((tp), TT_REXMT) || \
147 tcp_timer_active((tp), TT_PERSIST), \
148 ("neither rexmt nor persist timer is set"))
149
150 #ifdef TCP_HHOOK
151 /*
152 * Wrapper for the TCP established output helper hook.
153 */
154 void
hhook_run_tcp_est_out(struct tcpcb * tp,struct tcphdr * th,struct tcpopt * to,uint32_t len,int tso)155 hhook_run_tcp_est_out(struct tcpcb *tp, struct tcphdr *th,
156 struct tcpopt *to, uint32_t len, int tso)
157 {
158 struct tcp_hhook_data hhook_data;
159
160 if (V_tcp_hhh[HHOOK_TCP_EST_OUT]->hhh_nhooks > 0) {
161 hhook_data.tp = tp;
162 hhook_data.th = th;
163 hhook_data.to = to;
164 hhook_data.len = len;
165 hhook_data.tso = tso;
166
167 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_OUT], &hhook_data,
168 &tp->t_osd);
169 }
170 }
171 #endif
172
173 /*
174 * CC wrapper hook functions
175 */
176 void
cc_after_idle(struct tcpcb * tp)177 cc_after_idle(struct tcpcb *tp)
178 {
179 INP_WLOCK_ASSERT(tptoinpcb(tp));
180
181 if (CC_ALGO(tp)->after_idle != NULL)
182 CC_ALGO(tp)->after_idle(&tp->t_ccv);
183 }
184
185 /*
186 * Tcp output routine: figure out what should be sent and send it.
187 */
188 int
tcp_default_output(struct tcpcb * tp)189 tcp_default_output(struct tcpcb *tp)
190 {
191 struct socket *so = tptosocket(tp);
192 struct inpcb *inp = tptoinpcb(tp);
193 int32_t len;
194 uint32_t recwin, sendwin;
195 uint16_t flags;
196 int off, error = 0; /* Keep compiler happy */
197 u_int if_hw_tsomaxsegcount = 0;
198 u_int if_hw_tsomaxsegsize = 0;
199 struct mbuf *m;
200 struct ip *ip = NULL;
201 struct tcphdr *th;
202 u_char opt[TCP_MAXOLEN];
203 unsigned ipoptlen, optlen, hdrlen, ulen;
204 unsigned ipsec_optlen = 0;
205 int idle, sendalot, curticks;
206 int sack_bytes_rxmt;
207 struct sackhole *p;
208 int tso, mtu;
209 struct tcpopt to;
210 struct udphdr *udp = NULL;
211 struct tcp_log_buffer *lgb;
212 unsigned int wanted_cookie = 0;
213 unsigned int dont_sendalot = 0;
214 bool sack_rxmit;
215 #ifdef INET6
216 struct ip6_hdr *ip6 = NULL;
217 const bool isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
218 #endif
219 #ifdef KERN_TLS
220 const bool hw_tls = tp->t_nic_ktls_xmit != 0;
221 #else
222 const bool hw_tls = false;
223 #endif
224
225 NET_EPOCH_ASSERT();
226 INP_WLOCK_ASSERT(inp);
227
228 #ifdef TCP_OFFLOAD
229 if (tp->t_flags & TF_TOE)
230 return (tcp_offload_output(tp));
231 #endif
232
233 /*
234 * For TFO connections in SYN_SENT or SYN_RECEIVED,
235 * only allow the initial SYN or SYN|ACK and those sent
236 * by the retransmit timer.
237 */
238 if ((tp->t_flags & TF_FASTOPEN) &&
239 ((tp->t_state == TCPS_SYN_SENT) ||
240 (tp->t_state == TCPS_SYN_RECEIVED)) &&
241 SEQ_GT(tp->snd_max, tp->snd_una) && /* SYN or SYN|ACK sent */
242 (tp->snd_nxt != tp->snd_una)) /* not a retransmit */
243 return (0);
244
245 /*
246 * Determine length of data that should be transmitted,
247 * and flags that will be used.
248 * If there is some data or critical controls (SYN, RST)
249 * to send, then transmit; otherwise, investigate further.
250 */
251 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
252 if (idle && (((ticks - tp->t_rcvtime) >= tp->t_rxtcur) ||
253 (tp->t_sndtime && ((ticks - tp->t_sndtime) >= tp->t_rxtcur))))
254 cc_after_idle(tp);
255 tp->t_flags &= ~TF_LASTIDLE;
256 if (idle) {
257 if (tp->t_flags & TF_MORETOCOME) {
258 tp->t_flags |= TF_LASTIDLE;
259 idle = 0;
260 }
261 }
262 again:
263 sendwin = 0;
264 /*
265 * If we've recently taken a timeout, snd_max will be greater than
266 * snd_nxt. There may be SACK information that allows us to avoid
267 * resending already delivered data. Adjust snd_nxt accordingly.
268 */
269 if ((tp->t_flags & TF_SACK_PERMIT) &&
270 (tp->sackhint.nexthole != NULL) &&
271 !IN_FASTRECOVERY(tp->t_flags)) {
272 sendwin = tcp_sack_adjust(tp);
273 }
274 sendalot = 0;
275 tso = 0;
276 mtu = 0;
277 off = tp->snd_nxt - tp->snd_una;
278 sendwin = min(tp->snd_wnd, tp->snd_cwnd + sendwin);
279
280 flags = tcp_outflags[tp->t_state];
281 /*
282 * Send any SACK-generated retransmissions. If we're explicitly trying
283 * to send out new data (when sendalot is 1), bypass this function.
284 * If we retransmit in fast recovery mode, decrement snd_cwnd, since
285 * we're replacing a (future) new transmission with a retransmission
286 * now, and we previously incremented snd_cwnd in tcp_input().
287 */
288 /*
289 * Still in sack recovery , reset rxmit flag to zero.
290 */
291 sack_bytes_rxmt = 0;
292 len = 0;
293 if ((tp->t_flags & TF_SACK_PERMIT) &&
294 (IN_FASTRECOVERY(tp->t_flags) ||
295 (SEQ_LT(tp->snd_nxt, tp->snd_max) && (tp->t_dupacks >= tcprexmtthresh))) &&
296 (p = tcp_sack_output(tp, &sack_bytes_rxmt))) {
297 int32_t cwin;
298
299 if (IN_FASTRECOVERY(tp->t_flags)) {
300 cwin = imax(sendwin - tcp_compute_pipe(tp), 0);
301 } else {
302 cwin = imax(sendwin - off, 0);
303 }
304 /* Do not retransmit SACK segments beyond snd_recover */
305 if (SEQ_GT(p->end, tp->snd_recover)) {
306 /*
307 * (At least) part of sack hole extends beyond
308 * snd_recover. Check to see if we can rexmit data
309 * for this hole.
310 */
311 if (SEQ_GEQ(p->rxmit, tp->snd_recover)) {
312 /*
313 * Can't rexmit any more data for this hole.
314 * That data will be rexmitted in the next
315 * sack recovery episode, when snd_recover
316 * moves past p->rxmit.
317 */
318 p = NULL;
319 sack_rxmit = false;
320 goto after_sack_rexmit;
321 } else {
322 /* Can rexmit part of the current hole */
323 len = SEQ_SUB(tp->snd_recover, p->rxmit);
324 if (cwin <= len) {
325 len = cwin;
326 } else {
327 sendalot = 1;
328 }
329 }
330 } else {
331 len = SEQ_SUB(p->end, p->rxmit);
332 if (cwin <= len) {
333 len = cwin;
334 } else {
335 sendalot = 1;
336 }
337 }
338 /* we could have transmitted from the scoreboard,
339 * but sendwin (expected flightsize) - pipe didn't
340 * allow any transmission.
341 * Bypass recalculating the possible transmission
342 * length further down by setting sack_rxmit.
343 * Wouldn't be here if there would have been
344 * nothing in the scoreboard to transmit.
345 */
346 if (len > 0) {
347 off = SEQ_SUB(p->rxmit, tp->snd_una);
348 KASSERT(off >= 0,("%s: sack block to the left of una : %d",
349 __func__, off));
350 }
351 sack_rxmit = true;
352 } else {
353 p = NULL;
354 sack_rxmit = false;
355 }
356 after_sack_rexmit:
357 /*
358 * Get standard flags, and add SYN or FIN if requested by 'hidden'
359 * state flags.
360 */
361 if (tp->t_flags & TF_NEEDFIN)
362 flags |= TH_FIN;
363 if (tp->t_flags & TF_NEEDSYN)
364 flags |= TH_SYN;
365
366 SOCK_SENDBUF_LOCK(so);
367 /*
368 * If in persist timeout with window of 0, send 1 byte.
369 * Otherwise, if window is small but nonzero
370 * and timer expired, we will send what we can
371 * and go to transmit state.
372 */
373 if (tp->t_flags & TF_FORCEDATA) {
374 if (sendwin == 0) {
375 /*
376 * If we still have some data to send, then
377 * clear the FIN bit. Usually this would
378 * happen below when it realizes that we
379 * aren't sending all the data. However,
380 * if we have exactly 1 byte of unsent data,
381 * then it won't clear the FIN bit below,
382 * and if we are in persist state, we wind
383 * up sending the packet without recording
384 * that we sent the FIN bit.
385 *
386 * We can't just blindly clear the FIN bit,
387 * because if we don't have any more data
388 * to send then the probe will be the FIN
389 * itself.
390 */
391 if (off < sbused(&so->so_snd))
392 flags &= ~TH_FIN;
393 sendwin = 1;
394 } else {
395 tcp_timer_activate(tp, TT_PERSIST, 0);
396 tp->t_rxtshift = 0;
397 }
398 }
399
400 /*
401 * If snd_nxt == snd_max and we have transmitted a FIN, the
402 * offset will be > 0 even if so_snd.sb_cc is 0, resulting in
403 * a negative length. This can also occur when TCP opens up
404 * its congestion window while receiving additional duplicate
405 * acks after fast-retransmit because TCP will reset snd_nxt
406 * to snd_max after the fast-retransmit.
407 *
408 * In the normal retransmit-FIN-only case, however, snd_nxt will
409 * be set to snd_una, the offset will be 0, and the length may
410 * wind up 0.
411 *
412 * If sack_rxmit is true we are retransmitting from the scoreboard
413 * in which case len is already set.
414 */
415 if (!sack_rxmit) {
416 if ((sack_bytes_rxmt == 0) || SEQ_LT(tp->snd_nxt, tp->snd_max)) {
417 len = imin(sbavail(&so->so_snd), sendwin) - off;
418 } else {
419 /*
420 * We are inside of a SACK recovery episode and are
421 * sending new data, having retransmitted all the
422 * data possible in the scoreboard.
423 */
424 len = imax(
425 imin(sbavail(&so->so_snd), sendwin) -
426 imax(tcp_compute_pipe(tp), off), 0);
427 }
428 }
429
430 /*
431 * Lop off SYN bit if it has already been sent. However, if this
432 * is SYN-SENT state and if segment contains data and if we don't
433 * know that foreign host supports TAO, suppress sending segment.
434 */
435 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) {
436 if (tp->t_state != TCPS_SYN_RECEIVED)
437 flags &= ~TH_SYN;
438 /*
439 * When sending additional segments following a TFO SYN|ACK,
440 * do not include the SYN bit.
441 */
442 if ((tp->t_flags & TF_FASTOPEN) &&
443 (tp->t_state == TCPS_SYN_RECEIVED))
444 flags &= ~TH_SYN;
445 off--, len++;
446 }
447
448 /*
449 * Be careful not to send data and/or FIN on SYN segments.
450 * This measure is needed to prevent interoperability problems
451 * with not fully conformant TCP implementations.
452 */
453 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
454 len = 0;
455 flags &= ~TH_FIN;
456 }
457
458 /*
459 * On TFO sockets, ensure no data is sent in the following cases:
460 *
461 * - When retransmitting SYN|ACK on a passively-created socket
462 *
463 * - When retransmitting SYN on an actively created socket
464 *
465 * - When sending a zero-length cookie (cookie request) on an
466 * actively created socket
467 *
468 * - When the socket is in the CLOSED state (RST is being sent)
469 */
470 if ((tp->t_flags & TF_FASTOPEN) &&
471 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
472 ((tp->t_state == TCPS_SYN_SENT) &&
473 (tp->t_tfo_client_cookie_len == 0)) ||
474 (flags & TH_RST)))
475 len = 0;
476
477 /* Without fast-open there should never be data sent on a SYN. */
478 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) {
479 len = 0;
480 }
481
482 if (len <= 0) {
483 /*
484 * If FIN has been sent but not acked,
485 * but we haven't been called to retransmit,
486 * len will be < 0. Otherwise, window shrank
487 * after we sent into it. If window shrank to 0,
488 * cancel pending retransmit, pull snd_nxt back
489 * to (closed) window, and set the persist timer
490 * if it isn't already going. If the window didn't
491 * close completely, just wait for an ACK.
492 *
493 * We also do a general check here to ensure that
494 * we will set the persist timer when we have data
495 * to send, but a 0-byte window. This makes sure
496 * the persist timer is set even if the packet
497 * hits one of the "goto send" lines below.
498 */
499 len = 0;
500 if ((sendwin == 0) && (TCPS_HAVEESTABLISHED(tp->t_state)) &&
501 (off < (int) sbavail(&so->so_snd)) &&
502 !tcp_timer_active(tp, TT_PERSIST)) {
503 tcp_timer_activate(tp, TT_REXMT, 0);
504 tp->t_rxtshift = 0;
505 tp->snd_nxt = tp->snd_una;
506 if (!tcp_timer_active(tp, TT_PERSIST))
507 tcp_setpersist(tp);
508 }
509 }
510
511 /* len will be >= 0 after this point. */
512 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
513
514 tcp_sndbuf_autoscale(tp, so, sendwin);
515
516 /*
517 * Decide if we can use TCP Segmentation Offloading (if supported by
518 * hardware).
519 *
520 * TSO may only be used if we are in a pure bulk sending state. The
521 * presence of TCP-MD5, IP options (IPsec), and possibly SACK
522 * retransmits prevent using TSO. With TSO the TCP header is the same
523 * (except for the sequence number) for all generated packets. This
524 * makes it impossible to transmit any options which vary per generated
525 * segment or packet.
526 *
527 * IPv4 handling has a clear separation of ip options and ip header
528 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
529 * the right thing below to provide length of just ip options and thus
530 * checking for ipoptlen is enough to decide if ip options are present.
531 */
532 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
533 /*
534 * Pre-calculate here as we save another lookup into the darknesses
535 * of IPsec that way and can actually decide if TSO is ok.
536 */
537 #ifdef INET6
538 if (isipv6 && IPSEC_ENABLED(ipv6))
539 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp);
540 #ifdef INET
541 else
542 #endif
543 #endif /* INET6 */
544 #ifdef INET
545 if (IPSEC_ENABLED(ipv4))
546 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp);
547 #endif /* INET */
548 #endif /* IPSEC */
549 #ifdef INET6
550 if (isipv6)
551 ipoptlen = ip6_optlen(inp);
552 else
553 #endif
554 if (inp->inp_options)
555 ipoptlen = inp->inp_options->m_len -
556 offsetof(struct ipoption, ipopt_list);
557 else
558 ipoptlen = 0;
559 ipoptlen += ipsec_optlen;
560
561 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > tp->t_maxseg &&
562 (tp->t_port == 0) &&
563 ((tp->t_flags & TF_SIGNATURE) == 0) &&
564 (!sack_rxmit || V_tcp_sack_tso) &&
565 (ipoptlen == 0 || (ipoptlen == ipsec_optlen &&
566 (tp->t_flags2 & TF2_IPSEC_TSO) != 0)) &&
567 !(flags & TH_SYN))
568 tso = 1;
569
570 if (SEQ_LT((sack_rxmit ? p->rxmit : tp->snd_nxt) + len,
571 tp->snd_una + sbused(&so->so_snd))) {
572 flags &= ~TH_FIN;
573 }
574
575 recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
576 (long)TCP_MAXWIN << tp->rcv_scale);
577
578 /*
579 * Sender silly window avoidance. We transmit under the following
580 * conditions when len is non-zero:
581 *
582 * - We have a full segment (or more with TSO)
583 * - This is the last buffer in a write()/send() and we are
584 * either idle or running NODELAY
585 * - we've timed out (e.g. persist timer)
586 * - we have more then 1/2 the maximum send window's worth of
587 * data (receiver may be limited the window size)
588 * - we need to retransmit
589 */
590 if (len) {
591 if (len >= tp->t_maxseg)
592 goto send;
593 /*
594 * As the TCP header options are now
595 * considered when setting up the initial
596 * window, we would not send the last segment
597 * if we skip considering the option length here.
598 * Note: this may not work when tcp headers change
599 * very dynamically in the future.
600 */
601 if ((((tp->t_flags & TF_SIGNATURE) ?
602 PADTCPOLEN(TCPOLEN_SIGNATURE) : 0) +
603 ((tp->t_flags & TF_RCVD_TSTMP) ?
604 PADTCPOLEN(TCPOLEN_TIMESTAMP) : 0) +
605 len) >= tp->t_maxseg)
606 goto send;
607 /*
608 * NOTE! on localhost connections an 'ack' from the remote
609 * end may occur synchronously with the output and cause
610 * us to flush a buffer queued with moretocome. XXX
611 *
612 * note: the len + off check is almost certainly unnecessary.
613 */
614 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
615 (idle || (tp->t_flags & TF_NODELAY)) &&
616 (uint32_t)len + (uint32_t)off >= sbavail(&so->so_snd) &&
617 (tp->t_flags & TF_NOPUSH) == 0) {
618 goto send;
619 }
620 if (tp->t_flags & TF_FORCEDATA) /* typ. timeout case */
621 goto send;
622 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0)
623 goto send;
624 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) /* retransmit case */
625 goto send;
626 if (sack_rxmit)
627 goto send;
628 }
629
630 /*
631 * Sending of standalone window updates.
632 *
633 * Window updates are important when we close our window due to a
634 * full socket buffer and are opening it again after the application
635 * reads data from it. Once the window has opened again and the
636 * remote end starts to send again the ACK clock takes over and
637 * provides the most current window information.
638 *
639 * We must avoid the silly window syndrome whereas every read
640 * from the receive buffer, no matter how small, causes a window
641 * update to be sent. We also should avoid sending a flurry of
642 * window updates when the socket buffer had queued a lot of data
643 * and the application is doing small reads.
644 *
645 * Prevent a flurry of pointless window updates by only sending
646 * an update when we can increase the advertized window by more
647 * than 1/4th of the socket buffer capacity. When the buffer is
648 * getting full or is very small be more aggressive and send an
649 * update whenever we can increase by two mss sized segments.
650 * In all other situations the ACK's to new incoming data will
651 * carry further window increases.
652 *
653 * Don't send an independent window update if a delayed
654 * ACK is pending (it will get piggy-backed on it) or the
655 * remote side already has done a half-close and won't send
656 * more data.
657 */
658 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
659 !(tp->t_flags & TF_DELACK) &&
660 !TCPS_HAVERCVDFIN(tp->t_state)) {
661 /*
662 * "adv" is the amount we could increase the window,
663 * taking into account that we are limited by
664 * TCP_MAXWIN << tp->rcv_scale.
665 */
666 int32_t adv;
667 int oldwin;
668
669 adv = recwin;
670 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
671 oldwin = (tp->rcv_adv - tp->rcv_nxt);
672 if (adv > oldwin)
673 adv -= oldwin;
674 else
675 adv = 0;
676 } else
677 oldwin = 0;
678
679 /*
680 * If the new window size ends up being the same as or less
681 * than the old size when it is scaled, then don't force
682 * a window update.
683 */
684 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
685 goto dontupdate;
686
687 if (adv >= (int32_t)(2 * tp->t_maxseg) &&
688 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
689 recwin <= (so->so_rcv.sb_hiwat / 8) ||
690 so->so_rcv.sb_hiwat <= 8 * tp->t_maxseg ||
691 adv >= TCP_MAXWIN << tp->rcv_scale))
692 goto send;
693 if (2 * adv >= (int32_t)so->so_rcv.sb_hiwat)
694 goto send;
695 }
696 dontupdate:
697
698 /*
699 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
700 * is also a catch-all for the retransmit timer timeout case.
701 */
702 if (tp->t_flags & TF_ACKNOW)
703 goto send;
704 if ((flags & TH_RST) ||
705 ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0))
706 goto send;
707 if (SEQ_GT(tp->snd_up, tp->snd_una))
708 goto send;
709 /*
710 * If our state indicates that FIN should be sent
711 * and we have not yet done so, then we need to send.
712 */
713 if (flags & TH_FIN &&
714 ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una))
715 goto send;
716 /*
717 * In SACK, it is possible for tcp_output to fail to send a segment
718 * after the retransmission timer has been turned off. Make sure
719 * that the retransmission timer is set.
720 */
721 if ((tp->t_flags & TF_SACK_PERMIT) &&
722 SEQ_GT(tp->snd_max, tp->snd_una) &&
723 !tcp_timer_active(tp, TT_REXMT) &&
724 !tcp_timer_active(tp, TT_PERSIST)) {
725 tcp_timer_activate(tp, TT_REXMT, TP_RXTCUR(tp));
726 goto just_return;
727 }
728 /*
729 * TCP window updates are not reliable, rather a polling protocol
730 * using ``persist'' packets is used to insure receipt of window
731 * updates. The three ``states'' for the output side are:
732 * idle not doing retransmits or persists
733 * persisting to move a small or zero window
734 * (re)transmitting and thereby not persisting
735 *
736 * tcp_timer_active(tp, TT_PERSIST)
737 * is true when we are in persist state.
738 * (tp->t_flags & TF_FORCEDATA)
739 * is set when we are called to send a persist packet.
740 * tcp_timer_active(tp, TT_REXMT)
741 * is set when we are retransmitting
742 * The output side is idle when both timers are zero.
743 *
744 * If send window is too small, there is data to transmit, and no
745 * retransmit or persist is pending, then go to persist state.
746 * If nothing happens soon, send when timer expires:
747 * if window is nonzero, transmit what we can,
748 * otherwise force out a byte.
749 */
750 if (sbavail(&so->so_snd) && !tcp_timer_active(tp, TT_REXMT) &&
751 !tcp_timer_active(tp, TT_PERSIST)) {
752 tp->t_rxtshift = 0;
753 tcp_setpersist(tp);
754 }
755
756 /*
757 * No reason to send a segment, just return.
758 */
759 just_return:
760 SOCK_SENDBUF_UNLOCK(so);
761 return (0);
762
763 send:
764 SOCK_SENDBUF_LOCK_ASSERT(so);
765 if (len > 0) {
766 if (len >= tp->t_maxseg)
767 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
768 else
769 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
770 }
771 /*
772 * Before ESTABLISHED, force sending of initial options
773 * unless TCP set not to do any options.
774 * NOTE: we assume that the IP/TCP header plus TCP options
775 * always fit in a single mbuf, leaving room for a maximum
776 * link header, i.e.
777 * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES
778 */
779 optlen = 0;
780 #ifdef INET6
781 if (isipv6)
782 hdrlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
783 else
784 #endif
785 hdrlen = sizeof (struct tcpiphdr);
786
787 if (flags & TH_SYN) {
788 tp->snd_nxt = tp->iss;
789 }
790
791 /*
792 * Compute options for segment.
793 * We only have to care about SYN and established connection
794 * segments. Options for SYN-ACK segments are handled in TCP
795 * syncache.
796 */
797 to.to_flags = 0;
798 if ((tp->t_flags & TF_NOOPT) == 0) {
799 /* Maximum segment size. */
800 if (flags & TH_SYN) {
801 to.to_mss = tcp_mssopt(&inp->inp_inc);
802 if (tp->t_port)
803 to.to_mss -= V_tcp_udp_tunneling_overhead;
804 to.to_flags |= TOF_MSS;
805
806 /*
807 * On SYN or SYN|ACK transmits on TFO connections,
808 * only include the TFO option if it is not a
809 * retransmit, as the presence of the TFO option may
810 * have caused the original SYN or SYN|ACK to have
811 * been dropped by a middlebox.
812 */
813 if ((tp->t_flags & TF_FASTOPEN) &&
814 (tp->t_rxtshift == 0)) {
815 if (tp->t_state == TCPS_SYN_RECEIVED) {
816 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
817 to.to_tfo_cookie =
818 (u_int8_t *)&tp->t_tfo_cookie.server;
819 to.to_flags |= TOF_FASTOPEN;
820 wanted_cookie = 1;
821 } else if (tp->t_state == TCPS_SYN_SENT) {
822 to.to_tfo_len =
823 tp->t_tfo_client_cookie_len;
824 to.to_tfo_cookie =
825 tp->t_tfo_cookie.client;
826 to.to_flags |= TOF_FASTOPEN;
827 wanted_cookie = 1;
828 /*
829 * If we wind up having more data to
830 * send with the SYN than can fit in
831 * one segment, don't send any more
832 * until the SYN|ACK comes back from
833 * the other end.
834 */
835 dont_sendalot = 1;
836 }
837 }
838 }
839 /* Window scaling. */
840 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
841 to.to_wscale = tp->request_r_scale;
842 to.to_flags |= TOF_SCALE;
843 }
844 /* Timestamps. */
845 if ((tp->t_flags & TF_RCVD_TSTMP) ||
846 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
847 curticks = tcp_ts_getticks();
848 to.to_tsval = curticks + tp->ts_offset;
849 to.to_tsecr = tp->ts_recent;
850 to.to_flags |= TOF_TS;
851 if (tp->t_rxtshift == 1)
852 tp->t_badrxtwin = curticks;
853 }
854
855 /* Set receive buffer autosizing timestamp. */
856 if (tp->rfbuf_ts == 0 &&
857 (so->so_rcv.sb_flags & SB_AUTOSIZE))
858 tp->rfbuf_ts = tcp_ts_getticks();
859
860 /* Selective ACK's. */
861 if (tp->t_flags & TF_SACK_PERMIT) {
862 if (flags & TH_SYN)
863 to.to_flags |= TOF_SACKPERM;
864 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
865 tp->rcv_numsacks > 0) {
866 to.to_flags |= TOF_SACK;
867 to.to_nsacks = tp->rcv_numsacks;
868 to.to_sacks = (u_char *)tp->sackblks;
869 }
870 }
871 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
872 /* TCP-MD5 (RFC2385). */
873 /*
874 * Check that TCP_MD5SIG is enabled in tcpcb to
875 * account the size needed to set this TCP option.
876 */
877 if (tp->t_flags & TF_SIGNATURE)
878 to.to_flags |= TOF_SIGNATURE;
879 #endif /* TCP_SIGNATURE */
880
881 /* Processing the options. */
882 hdrlen += optlen = tcp_addoptions(&to, opt);
883 /*
884 * If we wanted a TFO option to be added, but it was unable
885 * to fit, ensure no data is sent.
886 */
887 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie &&
888 !(to.to_flags & TOF_FASTOPEN))
889 len = 0;
890 }
891 if (tp->t_port) {
892 if (V_tcp_udp_tunneling_port == 0) {
893 /* The port was removed?? */
894 SOCK_SENDBUF_UNLOCK(so);
895 return (EHOSTUNREACH);
896 }
897 hdrlen += sizeof(struct udphdr);
898 }
899 /*
900 * Adjust data length if insertion of options will
901 * bump the packet length beyond the t_maxseg length.
902 * Clear the FIN bit because we cut off the tail of
903 * the segment.
904 */
905 if (len + optlen + ipoptlen > tp->t_maxseg) {
906 flags &= ~TH_FIN;
907
908 if (tso) {
909 u_int if_hw_tsomax;
910 u_int moff;
911 int max_len;
912
913 /* extract TSO information */
914 if_hw_tsomax = tp->t_tsomax;
915 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
916 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
917
918 /*
919 * Limit a TSO burst to prevent it from
920 * overflowing or exceeding the maximum length
921 * allowed by the network interface:
922 */
923 KASSERT(ipoptlen == ipsec_optlen,
924 ("%s: TSO can't do IP options", __func__));
925
926 /*
927 * Check if we should limit by maximum payload
928 * length:
929 */
930 if (if_hw_tsomax != 0) {
931 /* compute maximum TSO length */
932 max_len = if_hw_tsomax - hdrlen -
933 ipsec_optlen - max_linkhdr;
934 if (max_len <= 0) {
935 len = 0;
936 } else if (len > max_len) {
937 sendalot = 1;
938 len = max_len;
939 }
940 }
941
942 /*
943 * Prevent the last segment from being
944 * fractional unless the send sockbuf can be
945 * emptied:
946 */
947 max_len = tp->t_maxseg - optlen - ipsec_optlen;
948 if (((uint32_t)off + (uint32_t)len) <
949 sbavail(&so->so_snd)) {
950 moff = len % max_len;
951 if (moff != 0) {
952 len -= moff;
953 sendalot = 1;
954 }
955 }
956
957 /*
958 * In case there are too many small fragments
959 * don't use TSO:
960 */
961 if (len <= max_len) {
962 len = max_len;
963 sendalot = 1;
964 tso = 0;
965 }
966
967 /*
968 * Send the FIN in a separate segment
969 * after the bulk sending is done.
970 * We don't trust the TSO implementations
971 * to clear the FIN flag on all but the
972 * last segment.
973 */
974 if (tp->t_flags & TF_NEEDFIN)
975 sendalot = 1;
976 } else {
977 if (optlen + ipoptlen >= tp->t_maxseg) {
978 /*
979 * Since we don't have enough space to put
980 * the IP header chain and the TCP header in
981 * one packet as required by RFC 7112, don't
982 * send it. Also ensure that at least one
983 * byte of the payload can be put into the
984 * TCP segment.
985 */
986 SOCK_SENDBUF_UNLOCK(so);
987 error = EMSGSIZE;
988 sack_rxmit = false;
989 goto out;
990 }
991 len = tp->t_maxseg - optlen - ipoptlen;
992 sendalot = 1;
993 if (dont_sendalot)
994 sendalot = 0;
995 }
996 } else
997 tso = 0;
998
999 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
1000 ("%s: len > IP_MAXPACKET", __func__));
1001
1002 /*#ifdef DIAGNOSTIC*/
1003 #ifdef INET6
1004 if (max_linkhdr + hdrlen > MCLBYTES)
1005 #else
1006 if (max_linkhdr + hdrlen > MHLEN)
1007 #endif
1008 panic("tcphdr too big");
1009 /*#endif*/
1010
1011 /*
1012 * This KASSERT is here to catch edge cases at a well defined place.
1013 * Before, those had triggered (random) panic conditions further down.
1014 */
1015 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
1016
1017 /*
1018 * Grab a header mbuf, attaching a copy of data to
1019 * be transmitted, and initialize the header from
1020 * the template for sends on this connection.
1021 */
1022 if (len) {
1023 struct mbuf *mb;
1024 struct sockbuf *msb;
1025 u_int moff;
1026
1027 if ((tp->t_flags & TF_FORCEDATA) && len == 1) {
1028 TCPSTAT_INC(tcps_sndprobe);
1029 #ifdef STATS
1030 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
1031 stats_voi_update_abs_u32(tp->t_stats,
1032 VOI_TCP_RETXPB, len);
1033 else
1034 stats_voi_update_abs_u64(tp->t_stats,
1035 VOI_TCP_TXPB, len);
1036 #endif /* STATS */
1037 } else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
1038 tp->t_sndrexmitpack++;
1039 TCPSTAT_INC(tcps_sndrexmitpack);
1040 TCPSTAT_ADD(tcps_sndrexmitbyte, len);
1041 if (sack_rxmit) {
1042 TCPSTAT_INC(tcps_sack_rexmits);
1043 if (tso) {
1044 TCPSTAT_INC(tcps_sack_rexmits_tso);
1045 }
1046 TCPSTAT_ADD(tcps_sack_rexmit_bytes, len);
1047 }
1048 #ifdef STATS
1049 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
1050 len);
1051 #endif /* STATS */
1052 } else {
1053 TCPSTAT_INC(tcps_sndpack);
1054 TCPSTAT_ADD(tcps_sndbyte, len);
1055 #ifdef STATS
1056 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
1057 len);
1058 #endif /* STATS */
1059 }
1060 #ifdef INET6
1061 if (MHLEN < hdrlen + max_linkhdr)
1062 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1063 else
1064 #endif
1065 m = m_gethdr(M_NOWAIT, MT_DATA);
1066
1067 if (m == NULL) {
1068 SOCK_SENDBUF_UNLOCK(so);
1069 error = ENOBUFS;
1070 sack_rxmit = false;
1071 goto out;
1072 }
1073
1074 m->m_data += max_linkhdr;
1075 m->m_len = hdrlen;
1076
1077 /*
1078 * Start the m_copy functions from the closest mbuf
1079 * to the offset in the socket buffer chain.
1080 */
1081 mb = sbsndptr_noadv(&so->so_snd, off, &moff);
1082 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
1083 m_copydata(mb, moff, len,
1084 mtod(m, caddr_t) + hdrlen);
1085 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
1086 sbsndptr_adv(&so->so_snd, mb, len);
1087 m->m_len += len;
1088 } else {
1089 int32_t old_len;
1090
1091 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
1092 msb = NULL;
1093 else
1094 msb = &so->so_snd;
1095 old_len = len;
1096 m->m_next = tcp_m_copym(mb, moff,
1097 &len, if_hw_tsomaxsegcount,
1098 if_hw_tsomaxsegsize, msb, hw_tls);
1099 if (old_len != len)
1100 flags &= ~TH_FIN;
1101 if (len <= (tp->t_maxseg - optlen)) {
1102 /*
1103 * Must have ran out of mbufs for the copy
1104 * shorten it to no longer need tso. Lets
1105 * not put on sendalot since we are low on
1106 * mbufs.
1107 */
1108 tso = 0;
1109 }
1110 if (m->m_next == NULL) {
1111 SOCK_SENDBUF_UNLOCK(so);
1112 (void) m_free(m);
1113 error = ENOBUFS;
1114 sack_rxmit = false;
1115 goto out;
1116 }
1117 }
1118
1119 /*
1120 * If we're sending everything we've got, set PUSH.
1121 * (This will keep happy those implementations which only
1122 * give data to the user when a buffer fills or
1123 * a PUSH comes in.)
1124 */
1125 if (((uint32_t)off + (uint32_t)len == sbused(&so->so_snd)) &&
1126 !(flags & TH_SYN))
1127 flags |= TH_PUSH;
1128 SOCK_SENDBUF_UNLOCK(so);
1129 } else {
1130 SOCK_SENDBUF_UNLOCK(so);
1131 if (tp->t_flags & TF_ACKNOW)
1132 TCPSTAT_INC(tcps_sndacks);
1133 else if (flags & (TH_SYN|TH_FIN|TH_RST))
1134 TCPSTAT_INC(tcps_sndctrl);
1135 else if (SEQ_GT(tp->snd_up, tp->snd_una))
1136 TCPSTAT_INC(tcps_sndurg);
1137 else
1138 TCPSTAT_INC(tcps_sndwinup);
1139
1140 m = m_gethdr(M_NOWAIT, MT_DATA);
1141 if (m == NULL) {
1142 error = ENOBUFS;
1143 sack_rxmit = false;
1144 goto out;
1145 }
1146 #ifdef INET6
1147 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
1148 MHLEN >= hdrlen) {
1149 M_ALIGN(m, hdrlen);
1150 } else
1151 #endif
1152 m->m_data += max_linkhdr;
1153 m->m_len = hdrlen;
1154 }
1155 SOCK_SENDBUF_UNLOCK_ASSERT(so);
1156 m->m_pkthdr.rcvif = (struct ifnet *)0;
1157 #ifdef MAC
1158 mac_inpcb_create_mbuf(inp, m);
1159 #endif
1160 #ifdef INET6
1161 if (isipv6) {
1162 ip6 = mtod(m, struct ip6_hdr *);
1163 if (tp->t_port) {
1164 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1165 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
1166 udp->uh_dport = tp->t_port;
1167 ulen = hdrlen + len - sizeof(struct ip6_hdr);
1168 udp->uh_ulen = htons(ulen);
1169 th = (struct tcphdr *)(udp + 1);
1170 } else {
1171 th = (struct tcphdr *)(ip6 + 1);
1172 }
1173 tcpip_fillheaders(inp, tp->t_port, ip6, th);
1174 } else
1175 #endif /* INET6 */
1176 {
1177 ip = mtod(m, struct ip *);
1178 if (tp->t_port) {
1179 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
1180 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
1181 udp->uh_dport = tp->t_port;
1182 ulen = hdrlen + len - sizeof(struct ip);
1183 udp->uh_ulen = htons(ulen);
1184 th = (struct tcphdr *)(udp + 1);
1185 } else
1186 th = (struct tcphdr *)(ip + 1);
1187 tcpip_fillheaders(inp, tp->t_port, ip, th);
1188 }
1189
1190 /*
1191 * Fill in fields, remembering maximum advertised
1192 * window for use in delaying messages about window sizes.
1193 * If resending a FIN, be sure not to use a new sequence number.
1194 */
1195 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
1196 tp->snd_nxt == tp->snd_max)
1197 tp->snd_nxt--;
1198 /*
1199 * If we are starting a connection, send ECN setup
1200 * SYN packet. If we are on a retransmit, we may
1201 * resend those bits a number of times as per
1202 * RFC 3168.
1203 */
1204 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) {
1205 flags |= tcp_ecn_output_syn_sent(tp);
1206 }
1207 /* Also handle parallel SYN for ECN */
1208 if ((TCPS_HAVERCVDSYN(tp->t_state)) &&
1209 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
1210 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit);
1211 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
1212 (tp->t_flags2 & TF2_ECN_SND_ECE))
1213 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
1214 #ifdef INET6
1215 if (isipv6) {
1216 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << IPV6_FLOWLABEL_LEN);
1217 ip6->ip6_flow |= htonl(ect << IPV6_FLOWLABEL_LEN);
1218 }
1219 else
1220 #endif
1221 {
1222 ip->ip_tos &= ~IPTOS_ECN_MASK;
1223 ip->ip_tos |= ect;
1224 }
1225 }
1226
1227 /*
1228 * If we are doing retransmissions, then snd_nxt will
1229 * not reflect the first unsent octet. For ACK only
1230 * packets, we do not want the sequence number of the
1231 * retransmitted packet, we want the sequence number
1232 * of the next unsent octet. So, if there is no data
1233 * (and no SYN or FIN), use snd_max instead of snd_nxt
1234 * when filling in ti_seq. But if we are in persist
1235 * state, snd_max might reflect one byte beyond the
1236 * right edge of the window, so use snd_nxt in that
1237 * case, since we know we aren't doing a retransmission.
1238 * (retransmit and persist are mutually exclusive...)
1239 */
1240 if (!sack_rxmit) {
1241 if (len || (flags & (TH_SYN|TH_FIN)) ||
1242 tcp_timer_active(tp, TT_PERSIST))
1243 th->th_seq = htonl(tp->snd_nxt);
1244 else
1245 th->th_seq = htonl(tp->snd_max);
1246 } else {
1247 th->th_seq = htonl(p->rxmit);
1248 p->rxmit += len;
1249 /*
1250 * Lost Retransmission Detection
1251 * trigger resending of a (then
1252 * still existing) hole, when
1253 * fack acks recoverypoint.
1254 */
1255 if ((tp->t_flags & TF_LRD) && SEQ_GEQ(p->rxmit, p->end))
1256 p->rxmit = tp->snd_recover;
1257 tp->sackhint.sack_bytes_rexmit += len;
1258 }
1259 if (IN_RECOVERY(tp->t_flags)) {
1260 /*
1261 * Account all bytes transmitted while
1262 * IN_RECOVERY, simplifying PRR and
1263 * Lost Retransmit Detection
1264 */
1265 tp->sackhint.prr_out += len;
1266 }
1267 th->th_ack = htonl(tp->rcv_nxt);
1268 if (optlen) {
1269 bcopy(opt, th + 1, optlen);
1270 th->th_off = (sizeof (struct tcphdr) + optlen) >> 2;
1271 }
1272 /*
1273 * Calculate receive window. Don't shrink window,
1274 * but avoid silly window syndrome.
1275 * If a RST segment is sent, advertise a window of zero.
1276 */
1277 if (flags & TH_RST) {
1278 recwin = 0;
1279 } else {
1280 if (recwin < (so->so_rcv.sb_hiwat / 4) &&
1281 recwin < tp->t_maxseg)
1282 recwin = 0;
1283 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
1284 recwin < (tp->rcv_adv - tp->rcv_nxt))
1285 recwin = (tp->rcv_adv - tp->rcv_nxt);
1286 }
1287 /*
1288 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1289 * or <SYN,ACK>) segment itself is never scaled. The <SYN,ACK>
1290 * case is handled in syncache.
1291 */
1292 if (flags & TH_SYN)
1293 th->th_win = htons((u_short)
1294 (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
1295 else {
1296 /* Avoid shrinking window with window scaling. */
1297 recwin = roundup2(recwin, 1 << tp->rcv_scale);
1298 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
1299 }
1300
1301 /*
1302 * Adjust the RXWIN0SENT flag - indicate that we have advertised
1303 * a 0 window. This may cause the remote transmitter to stall. This
1304 * flag tells soreceive() to disable delayed acknowledgements when
1305 * draining the buffer. This can occur if the receiver is attempting
1306 * to read more data than can be buffered prior to transmitting on
1307 * the connection.
1308 */
1309 if (th->th_win == 0) {
1310 tp->t_sndzerowin++;
1311 tp->t_flags |= TF_RXWIN0SENT;
1312 } else
1313 tp->t_flags &= ~TF_RXWIN0SENT;
1314 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
1315 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
1316 flags |= TH_URG;
1317 } else {
1318 /*
1319 * If no urgent pointer to send, then we pull
1320 * the urgent pointer to the left edge of the send window
1321 * so that it doesn't drift into the send window on sequence
1322 * number wraparound.
1323 */
1324 tp->snd_up = tp->snd_una; /* drag it along */
1325 }
1326 tcp_set_flags(th, flags);
1327
1328 /*
1329 * Put TCP length in extended header, and then
1330 * checksum extended header and data.
1331 */
1332 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
1333
1334 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1335 if (to.to_flags & TOF_SIGNATURE) {
1336 /*
1337 * Calculate MD5 signature and put it into the place
1338 * determined before.
1339 * NOTE: since TCP options buffer doesn't point into
1340 * mbuf's data, calculate offset and use it.
1341 */
1342 if (!TCPMD5_ENABLED() || (error = TCPMD5_OUTPUT(m, th,
1343 (u_char *)(th + 1) + (to.to_signature - opt))) != 0) {
1344 /*
1345 * Do not send segment if the calculation of MD5
1346 * digest has failed.
1347 */
1348 m_freem(m);
1349 goto out;
1350 }
1351 }
1352 #endif
1353 #ifdef INET6
1354 if (isipv6) {
1355 /*
1356 * There is no need to fill in ip6_plen right now.
1357 * It will be filled later by ip6_output.
1358 */
1359 if (tp->t_port) {
1360 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
1361 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1362 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
1363 th->th_sum = htons(0);
1364 UDPSTAT_INC(udps_opackets);
1365 } else {
1366 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
1367 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1368 th->th_sum = in6_cksum_pseudo(ip6,
1369 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
1370 0);
1371 }
1372 }
1373 #endif
1374 #if defined(INET6) && defined(INET)
1375 else
1376 #endif
1377 #ifdef INET
1378 {
1379 if (tp->t_port) {
1380 m->m_pkthdr.csum_flags = CSUM_UDP;
1381 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1382 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
1383 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
1384 th->th_sum = htons(0);
1385 UDPSTAT_INC(udps_opackets);
1386 } else {
1387 m->m_pkthdr.csum_flags = CSUM_TCP;
1388 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1389 th->th_sum = in_pseudo(ip->ip_src.s_addr,
1390 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
1391 IPPROTO_TCP + len + optlen));
1392 }
1393
1394 /* IP version must be set here for ipv4/ipv6 checking later */
1395 KASSERT(ip->ip_v == IPVERSION,
1396 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
1397 }
1398 #endif
1399
1400 /*
1401 * Enable TSO and specify the size of the segments.
1402 * The TCP pseudo header checksum is always provided.
1403 */
1404 if (tso) {
1405 KASSERT(len > tp->t_maxseg - optlen - ipsec_optlen,
1406 ("%s: len <= tso_segsz", __func__));
1407 m->m_pkthdr.csum_flags |= CSUM_TSO;
1408 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen - ipsec_optlen;
1409 }
1410
1411 KASSERT(len + hdrlen == m_length(m, NULL),
1412 ("%s: mbuf chain shorter than expected: %d + %u != %u",
1413 __func__, len, hdrlen, m_length(m, NULL)));
1414
1415 #ifdef TCP_HHOOK
1416 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
1417 hhook_run_tcp_est_out(tp, th, &to, len, tso);
1418 #endif
1419
1420 TCP_PROBE3(debug__output, tp, th, m);
1421
1422 /* We're getting ready to send; log now. */
1423 /* XXXMT: We are not honoring verbose logging. */
1424
1425 if (tcp_bblogging_on(tp))
1426 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd,
1427 TCP_LOG_OUT, ERRNO_UNK, len, NULL, false, NULL, NULL, 0,
1428 NULL);
1429 else
1430 lgb = NULL;
1431
1432 /*
1433 * Fill in IP length and desired time to live and
1434 * send to IP level. There should be a better way
1435 * to handle ttl and tos; we could keep them in
1436 * the template, but need a way to checksum without them.
1437 */
1438 /*
1439 * m->m_pkthdr.len should have been set before checksum calculation,
1440 * because in6_cksum() need it.
1441 */
1442 #ifdef INET6
1443 if (isipv6) {
1444 /*
1445 * we separately set hoplimit for every segment, since the
1446 * user might want to change the value via setsockopt.
1447 * Also, desired default hop limit might be changed via
1448 * Neighbor Discovery.
1449 */
1450 ip6->ip6_hlim = in6_selecthlim(inp, NULL);
1451
1452 /*
1453 * Set the packet size here for the benefit of DTrace probes.
1454 * ip6_output() will set it properly; it's supposed to include
1455 * the option header lengths as well.
1456 */
1457 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
1458
1459 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
1460 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
1461 else
1462 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
1463
1464 if (tp->t_state == TCPS_SYN_SENT)
1465 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
1466
1467 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
1468
1469 #ifdef TCPPCAP
1470 /* Save packet, if requested. */
1471 tcp_pcap_add(th, m, &(tp->t_outpkts));
1472 #endif
1473
1474 /* TODO: IPv6 IP6TOS_ECT bit on */
1475 error = ip6_output(m, inp->in6p_outputopts, &inp->inp_route6,
1476 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
1477 NULL, NULL, inp);
1478
1479 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
1480 mtu = inp->inp_route6.ro_nh->nh_mtu;
1481 }
1482 #endif /* INET6 */
1483 #if defined(INET) && defined(INET6)
1484 else
1485 #endif
1486 #ifdef INET
1487 {
1488 ip->ip_len = htons(m->m_pkthdr.len);
1489 #ifdef INET6
1490 if (inp->inp_vflag & INP_IPV6PROTO)
1491 ip->ip_ttl = in6_selecthlim(inp, NULL);
1492 #endif /* INET6 */
1493 /*
1494 * If we do path MTU discovery, then we set DF on every packet.
1495 * This might not be the best thing to do according to RFC3390
1496 * Section 2. However the tcp hostcache migitates the problem
1497 * so it affects only the first tcp connection with a host.
1498 *
1499 * NB: Don't set DF on small MTU/MSS to have a safe fallback.
1500 */
1501 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
1502 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
1503 if (tp->t_port == 0 || len < V_tcp_minmss) {
1504 ip->ip_off |= htons(IP_DF);
1505 }
1506 } else {
1507 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
1508 }
1509
1510 if (tp->t_state == TCPS_SYN_SENT)
1511 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
1512
1513 TCP_PROBE5(send, NULL, tp, ip, tp, th);
1514
1515 #ifdef TCPPCAP
1516 /* Save packet, if requested. */
1517 tcp_pcap_add(th, m, &(tp->t_outpkts));
1518 #endif
1519
1520 error = ip_output(m, inp->inp_options, &inp->inp_route,
1521 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0, inp);
1522
1523 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
1524 mtu = inp->inp_route.ro_nh->nh_mtu;
1525 }
1526 #endif /* INET */
1527
1528 if (lgb != NULL) {
1529 lgb->tlb_errno = error;
1530 lgb = NULL;
1531 }
1532 out:
1533 if (error == 0)
1534 tcp_account_for_send(tp, len, (tp->snd_nxt != tp->snd_max), 0, hw_tls);
1535 /*
1536 * In transmit state, time the transmission and arrange for
1537 * the retransmit. In persist state, just set snd_max. In a closed
1538 * state just return.
1539 */
1540 if (flags & TH_RST) {
1541 TCPSTAT_INC(tcps_sndtotal);
1542 return (0);
1543 } else if ((tp->t_flags & TF_FORCEDATA) == 0 ||
1544 !tcp_timer_active(tp, TT_PERSIST)) {
1545 tcp_seq startseq = tp->snd_nxt;
1546
1547 /*
1548 * Advance snd_nxt over sequence space of this segment.
1549 */
1550 if (flags & (TH_SYN|TH_FIN)) {
1551 if (flags & TH_SYN)
1552 tp->snd_nxt++;
1553 if (flags & TH_FIN) {
1554 tp->snd_nxt++;
1555 tp->t_flags |= TF_SENTFIN;
1556 }
1557 }
1558 if (sack_rxmit)
1559 goto timer;
1560 tp->snd_nxt += len;
1561 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
1562 /*
1563 * Update "made progress" indication if we just
1564 * added new data to an empty socket buffer.
1565 */
1566 if (tp->snd_una == tp->snd_max)
1567 tp->t_acktime = ticks;
1568 tp->snd_max = tp->snd_nxt;
1569 /*
1570 * Time this transmission if not a retransmission and
1571 * not currently timing anything.
1572 */
1573 tp->t_sndtime = ticks;
1574 if (tp->t_rtttime == 0) {
1575 tp->t_rtttime = ticks;
1576 tp->t_rtseq = startseq;
1577 TCPSTAT_INC(tcps_segstimed);
1578 }
1579 #ifdef STATS
1580 if (!(tp->t_flags & TF_GPUTINPROG) && len) {
1581 tp->t_flags |= TF_GPUTINPROG;
1582 tp->gput_seq = startseq;
1583 tp->gput_ack = startseq +
1584 ulmin(sbavail(&so->so_snd) - off, sendwin);
1585 tp->gput_ts = tcp_ts_getticks();
1586 }
1587 #endif /* STATS */
1588 }
1589
1590 /*
1591 * Set retransmit timer if not currently set,
1592 * and not doing a pure ack or a keep-alive probe.
1593 * Initial value for retransmit timer is smoothed
1594 * round-trip time + 2 * round-trip time variance.
1595 * Initialize shift counter which is used for backoff
1596 * of retransmit time.
1597 */
1598 timer:
1599 if (!tcp_timer_active(tp, TT_REXMT) &&
1600 ((sack_rxmit && tp->snd_nxt != tp->snd_max) ||
1601 (tp->snd_nxt != tp->snd_una))) {
1602 if (tcp_timer_active(tp, TT_PERSIST)) {
1603 tcp_timer_activate(tp, TT_PERSIST, 0);
1604 tp->t_rxtshift = 0;
1605 }
1606 tcp_timer_activate(tp, TT_REXMT, TP_RXTCUR(tp));
1607 } else if (len == 0 && sbavail(&so->so_snd) &&
1608 !tcp_timer_active(tp, TT_REXMT) &&
1609 !tcp_timer_active(tp, TT_PERSIST)) {
1610 /*
1611 * Avoid a situation where we do not set persist timer
1612 * after a zero window condition. For example:
1613 * 1) A -> B: packet with enough data to fill the window
1614 * 2) B -> A: ACK for #1 + new data (0 window
1615 * advertisement)
1616 * 3) A -> B: ACK for #2, 0 len packet
1617 *
1618 * In this case, A will not activate the persist timer,
1619 * because it chose to send a packet. Unless tcp_output
1620 * is called for some other reason (delayed ack timer,
1621 * another input packet from B, socket syscall), A will
1622 * not send zero window probes.
1623 *
1624 * So, if you send a 0-length packet, but there is data
1625 * in the socket buffer, and neither the rexmt or
1626 * persist timer is already set, then activate the
1627 * persist timer.
1628 */
1629 tp->t_rxtshift = 0;
1630 tcp_setpersist(tp);
1631 }
1632 } else {
1633 /*
1634 * Persist case, update snd_max but since we are in
1635 * persist mode (no window) we do not update snd_nxt.
1636 */
1637 int xlen = len;
1638 if (flags & TH_SYN)
1639 ++xlen;
1640 if (flags & TH_FIN) {
1641 ++xlen;
1642 tp->t_flags |= TF_SENTFIN;
1643 }
1644 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max))
1645 tp->snd_max = tp->snd_nxt + xlen;
1646 }
1647 if ((error == 0) &&
1648 (tp->rcv_numsacks > 0) &&
1649 TCPS_HAVEESTABLISHED(tp->t_state) &&
1650 (tp->t_flags & TF_SACK_PERMIT)) {
1651 /* Clean up any DSACK's sent */
1652 tcp_clean_dsack_blocks(tp);
1653 }
1654 if ((error == 0) &&
1655 sack_rxmit &&
1656 SEQ_LT(tp->snd_nxt, SEQ_MIN(p->rxmit, p->end))) {
1657 /*
1658 * When transmitting from SACK scoreboard
1659 * after an RTO, pull snd_nxt along.
1660 */
1661 tp->snd_nxt = SEQ_MIN(p->rxmit, p->end);
1662 }
1663 if (error) {
1664 /*
1665 * We know that the packet was lost, so back out the
1666 * sequence number advance, if any.
1667 *
1668 * If the error is EPERM the packet got blocked by the
1669 * local firewall. Normally we should terminate the
1670 * connection but the blocking may have been spurious
1671 * due to a firewall reconfiguration cycle. So we treat
1672 * it like a packet loss and let the retransmit timer and
1673 * timeouts do their work over time.
1674 * XXX: It is a POLA question whether calling tcp_drop right
1675 * away would be the really correct behavior instead.
1676 */
1677 if (((tp->t_flags & TF_FORCEDATA) == 0 ||
1678 !tcp_timer_active(tp, TT_PERSIST)) &&
1679 ((flags & TH_SYN) == 0) &&
1680 (error != EPERM)) {
1681 if (sack_rxmit) {
1682 p->rxmit = SEQ_MIN(p->end, p->rxmit) - len;
1683 tp->sackhint.sack_bytes_rexmit -= len;
1684 KASSERT(tp->sackhint.sack_bytes_rexmit >= 0,
1685 ("sackhint bytes rtx >= 0"));
1686 KASSERT((flags & TH_FIN) == 0,
1687 ("error while FIN with SACK rxmit"));
1688 } else {
1689 tp->snd_nxt -= len;
1690 if (flags & TH_FIN)
1691 tp->snd_nxt--;
1692 }
1693 if (IN_RECOVERY(tp->t_flags))
1694 tp->sackhint.prr_out -= len;
1695 }
1696 SOCK_SENDBUF_UNLOCK_ASSERT(so); /* Check gotos. */
1697 switch (error) {
1698 case EACCES:
1699 case EPERM:
1700 tp->t_softerror = error;
1701 return (error);
1702 case ENOBUFS:
1703 TCP_XMIT_TIMER_ASSERT(tp, len, flags);
1704 tp->snd_cwnd = tcp_maxseg(tp);
1705 return (0);
1706 case EMSGSIZE:
1707 /*
1708 * For some reason the interface we used initially
1709 * to send segments changed to another or lowered
1710 * its MTU.
1711 * If TSO was active we either got an interface
1712 * without TSO capabilits or TSO was turned off.
1713 * If we obtained mtu from ip_output() then update
1714 * it and try again.
1715 */
1716 if (tso)
1717 tp->t_flags &= ~TF_TSO;
1718 if (mtu != 0) {
1719 tcp_mss_update(tp, -1, mtu, NULL, NULL);
1720 goto again;
1721 }
1722 return (error);
1723 case EHOSTDOWN:
1724 case EHOSTUNREACH:
1725 case ENETDOWN:
1726 case ENETUNREACH:
1727 if (TCPS_HAVERCVDSYN(tp->t_state)) {
1728 tp->t_softerror = error;
1729 return (0);
1730 }
1731 /* FALLTHROUGH */
1732 default:
1733 return (error);
1734 }
1735 }
1736 TCPSTAT_INC(tcps_sndtotal);
1737
1738 /*
1739 * Data sent (as far as we can tell).
1740 * If this advertises a larger window than any other segment,
1741 * then remember the size of the advertised window.
1742 * Any pending ACK has now been sent.
1743 */
1744 if (SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
1745 tp->rcv_adv = tp->rcv_nxt + recwin;
1746 tp->last_ack_sent = tp->rcv_nxt;
1747 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
1748 if (tcp_timer_active(tp, TT_DELACK))
1749 tcp_timer_activate(tp, TT_DELACK, 0);
1750 if (sendalot)
1751 goto again;
1752 return (0);
1753 }
1754
1755 void
tcp_setpersist(struct tcpcb * tp)1756 tcp_setpersist(struct tcpcb *tp)
1757 {
1758 int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1;
1759 int tt;
1760 int maxunacktime;
1761
1762 tp->t_flags &= ~TF_PREVVALID;
1763 if (tcp_timer_active(tp, TT_REXMT))
1764 panic("tcp_setpersist: retransmit pending");
1765 /*
1766 * If the state is already closed, don't bother.
1767 */
1768 if (tp->t_state == TCPS_CLOSED)
1769 return;
1770
1771 /*
1772 * Start/restart persistence timer.
1773 */
1774 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
1775 tcp_persmin, tcp_persmax);
1776 if (TP_MAXUNACKTIME(tp) && tp->t_acktime) {
1777 maxunacktime = tp->t_acktime + TP_MAXUNACKTIME(tp) - ticks;
1778 if (maxunacktime < 1)
1779 maxunacktime = 1;
1780 if (maxunacktime < tt)
1781 tt = maxunacktime;
1782 }
1783 tcp_timer_activate(tp, TT_PERSIST, tt);
1784 if (tp->t_rxtshift < V_tcp_retries)
1785 tp->t_rxtshift++;
1786 }
1787
1788 /*
1789 * Insert TCP options according to the supplied parameters to the place
1790 * optp in a consistent way. Can handle unaligned destinations.
1791 *
1792 * The order of the option processing is crucial for optimal packing and
1793 * alignment for the scarce option space.
1794 *
1795 * The optimal order for a SYN/SYN-ACK segment is:
1796 * MSS (4) + NOP (1) + Window scale (3) + SACK permitted (2) +
1797 * Timestamp (10) + Signature (18) = 38 bytes out of a maximum of 40.
1798 *
1799 * The SACK options should be last. SACK blocks consume 8*n+2 bytes.
1800 * So a full size SACK blocks option is 34 bytes (with 4 SACK blocks).
1801 * At minimum we need 10 bytes (to generate 1 SACK block). If both
1802 * TCP Timestamps (12 bytes) and TCP Signatures (18 bytes) are present,
1803 * we only have 10 bytes for SACK options (40 - (12 + 18)).
1804 */
1805 int
tcp_addoptions(struct tcpopt * to,u_char * optp)1806 tcp_addoptions(struct tcpopt *to, u_char *optp)
1807 {
1808 u_int32_t mask, optlen = 0;
1809
1810 for (mask = 1; mask < TOF_MAXOPT; mask <<= 1) {
1811 if ((to->to_flags & mask) != mask)
1812 continue;
1813 if (optlen == TCP_MAXOLEN)
1814 break;
1815 switch (to->to_flags & mask) {
1816 case TOF_MSS:
1817 while (optlen % 4) {
1818 optlen += TCPOLEN_NOP;
1819 *optp++ = TCPOPT_NOP;
1820 }
1821 if (TCP_MAXOLEN - optlen < TCPOLEN_MAXSEG)
1822 continue;
1823 optlen += TCPOLEN_MAXSEG;
1824 *optp++ = TCPOPT_MAXSEG;
1825 *optp++ = TCPOLEN_MAXSEG;
1826 to->to_mss = htons(to->to_mss);
1827 bcopy((u_char *)&to->to_mss, optp, sizeof(to->to_mss));
1828 optp += sizeof(to->to_mss);
1829 break;
1830 case TOF_SCALE:
1831 while (!optlen || optlen % 2 != 1) {
1832 optlen += TCPOLEN_NOP;
1833 *optp++ = TCPOPT_NOP;
1834 }
1835 if (TCP_MAXOLEN - optlen < TCPOLEN_WINDOW)
1836 continue;
1837 optlen += TCPOLEN_WINDOW;
1838 *optp++ = TCPOPT_WINDOW;
1839 *optp++ = TCPOLEN_WINDOW;
1840 *optp++ = to->to_wscale;
1841 break;
1842 case TOF_SACKPERM:
1843 while (optlen % 2) {
1844 optlen += TCPOLEN_NOP;
1845 *optp++ = TCPOPT_NOP;
1846 }
1847 if (TCP_MAXOLEN - optlen < TCPOLEN_SACK_PERMITTED)
1848 continue;
1849 optlen += TCPOLEN_SACK_PERMITTED;
1850 *optp++ = TCPOPT_SACK_PERMITTED;
1851 *optp++ = TCPOLEN_SACK_PERMITTED;
1852 break;
1853 case TOF_TS:
1854 while (!optlen || optlen % 4 != 2) {
1855 optlen += TCPOLEN_NOP;
1856 *optp++ = TCPOPT_NOP;
1857 }
1858 if (TCP_MAXOLEN - optlen < TCPOLEN_TIMESTAMP)
1859 continue;
1860 optlen += TCPOLEN_TIMESTAMP;
1861 *optp++ = TCPOPT_TIMESTAMP;
1862 *optp++ = TCPOLEN_TIMESTAMP;
1863 to->to_tsval = htonl(to->to_tsval);
1864 to->to_tsecr = htonl(to->to_tsecr);
1865 bcopy((u_char *)&to->to_tsval, optp, sizeof(to->to_tsval));
1866 optp += sizeof(to->to_tsval);
1867 bcopy((u_char *)&to->to_tsecr, optp, sizeof(to->to_tsecr));
1868 optp += sizeof(to->to_tsecr);
1869 break;
1870 case TOF_SIGNATURE:
1871 {
1872 int siglen = TCPOLEN_SIGNATURE - 2;
1873
1874 while (!optlen || optlen % 4 != 2) {
1875 optlen += TCPOLEN_NOP;
1876 *optp++ = TCPOPT_NOP;
1877 }
1878 if (TCP_MAXOLEN - optlen < TCPOLEN_SIGNATURE) {
1879 to->to_flags &= ~TOF_SIGNATURE;
1880 continue;
1881 }
1882 optlen += TCPOLEN_SIGNATURE;
1883 *optp++ = TCPOPT_SIGNATURE;
1884 *optp++ = TCPOLEN_SIGNATURE;
1885 to->to_signature = optp;
1886 while (siglen--)
1887 *optp++ = 0;
1888 break;
1889 }
1890 case TOF_SACK:
1891 {
1892 int sackblks = 0;
1893 struct sackblk *sack = (struct sackblk *)to->to_sacks;
1894 tcp_seq sack_seq;
1895
1896 while (!optlen || optlen % 4 != 2) {
1897 optlen += TCPOLEN_NOP;
1898 *optp++ = TCPOPT_NOP;
1899 }
1900 if (TCP_MAXOLEN - optlen < TCPOLEN_SACKHDR + TCPOLEN_SACK)
1901 continue;
1902 optlen += TCPOLEN_SACKHDR;
1903 *optp++ = TCPOPT_SACK;
1904 sackblks = min(to->to_nsacks,
1905 (TCP_MAXOLEN - optlen) / TCPOLEN_SACK);
1906 *optp++ = TCPOLEN_SACKHDR + sackblks * TCPOLEN_SACK;
1907 while (sackblks--) {
1908 sack_seq = htonl(sack->start);
1909 bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq));
1910 optp += sizeof(sack_seq);
1911 sack_seq = htonl(sack->end);
1912 bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq));
1913 optp += sizeof(sack_seq);
1914 optlen += TCPOLEN_SACK;
1915 sack++;
1916 }
1917 TCPSTAT_INC(tcps_sack_send_blocks);
1918 break;
1919 }
1920 case TOF_FASTOPEN:
1921 {
1922 int total_len;
1923
1924 /* XXX is there any point to aligning this option? */
1925 total_len = TCPOLEN_FAST_OPEN_EMPTY + to->to_tfo_len;
1926 if (TCP_MAXOLEN - optlen < total_len) {
1927 to->to_flags &= ~TOF_FASTOPEN;
1928 continue;
1929 }
1930 *optp++ = TCPOPT_FAST_OPEN;
1931 *optp++ = total_len;
1932 if (to->to_tfo_len > 0) {
1933 bcopy(to->to_tfo_cookie, optp, to->to_tfo_len);
1934 optp += to->to_tfo_len;
1935 }
1936 optlen += total_len;
1937 break;
1938 }
1939 default:
1940 panic("%s: unknown TCP option type", __func__);
1941 break;
1942 }
1943 }
1944
1945 /* Terminate and pad TCP options to a 4 byte boundary. */
1946 if (optlen % 4) {
1947 optlen += TCPOLEN_EOL;
1948 *optp++ = TCPOPT_EOL;
1949 }
1950 /*
1951 * According to RFC 793 (STD0007):
1952 * "The content of the header beyond the End-of-Option option
1953 * must be header padding (i.e., zero)."
1954 * and later: "The padding is composed of zeros."
1955 */
1956 while (optlen % 4) {
1957 optlen += TCPOLEN_PAD;
1958 *optp++ = TCPOPT_PAD;
1959 }
1960
1961 KASSERT(optlen <= TCP_MAXOLEN, ("%s: TCP options too long", __func__));
1962 return (optlen);
1963 }
1964
1965 /*
1966 * This is a copy of m_copym(), taking the TSO segment size/limit
1967 * constraints into account, and advancing the sndptr as it goes.
1968 */
1969 struct mbuf *
tcp_m_copym(struct mbuf * m,int32_t off0,int32_t * plen,int32_t seglimit,int32_t segsize,struct sockbuf * sb,bool hw_tls)1970 tcp_m_copym(struct mbuf *m, int32_t off0, int32_t *plen,
1971 int32_t seglimit, int32_t segsize, struct sockbuf *sb, bool hw_tls)
1972 {
1973 #ifdef KERN_TLS
1974 struct ktls_session *tls, *ntls;
1975 struct mbuf *start __diagused;
1976 #endif
1977 struct mbuf *n, **np;
1978 struct mbuf *top;
1979 int32_t off = off0;
1980 int32_t len = *plen;
1981 int32_t fragsize;
1982 int32_t len_cp = 0;
1983 int32_t *pkthdrlen;
1984 uint32_t mlen, frags;
1985 bool copyhdr;
1986
1987 KASSERT(off >= 0, ("tcp_m_copym, negative off %d", off));
1988 KASSERT(len >= 0, ("tcp_m_copym, negative len %d", len));
1989 if (off == 0 && m->m_flags & M_PKTHDR)
1990 copyhdr = true;
1991 else
1992 copyhdr = false;
1993 while (off > 0) {
1994 KASSERT(m != NULL, ("tcp_m_copym, offset > size of mbuf chain"));
1995 if (off < m->m_len)
1996 break;
1997 off -= m->m_len;
1998 if ((sb) && (m == sb->sb_sndptr)) {
1999 sb->sb_sndptroff += m->m_len;
2000 sb->sb_sndptr = m->m_next;
2001 }
2002 m = m->m_next;
2003 }
2004 np = ⊤
2005 top = NULL;
2006 pkthdrlen = NULL;
2007 #ifdef KERN_TLS
2008 if (hw_tls && (m->m_flags & M_EXTPG))
2009 tls = m->m_epg_tls;
2010 else
2011 tls = NULL;
2012 start = m;
2013 #endif
2014 while (len > 0) {
2015 if (m == NULL) {
2016 KASSERT(len == M_COPYALL,
2017 ("tcp_m_copym, length > size of mbuf chain"));
2018 *plen = len_cp;
2019 if (pkthdrlen != NULL)
2020 *pkthdrlen = len_cp;
2021 break;
2022 }
2023 #ifdef KERN_TLS
2024 if (hw_tls) {
2025 if (m->m_flags & M_EXTPG)
2026 ntls = m->m_epg_tls;
2027 else
2028 ntls = NULL;
2029
2030 /*
2031 * Avoid mixing TLS records with handshake
2032 * data or TLS records from different
2033 * sessions.
2034 */
2035 if (tls != ntls) {
2036 MPASS(m != start);
2037 *plen = len_cp;
2038 if (pkthdrlen != NULL)
2039 *pkthdrlen = len_cp;
2040 break;
2041 }
2042 }
2043 #endif
2044 mlen = min(len, m->m_len - off);
2045 if (seglimit) {
2046 /*
2047 * For M_EXTPG mbufs, add 3 segments
2048 * + 1 in case we are crossing page boundaries
2049 * + 2 in case the TLS hdr/trailer are used
2050 * It is cheaper to just add the segments
2051 * than it is to take the cache miss to look
2052 * at the mbuf ext_pgs state in detail.
2053 */
2054 if (m->m_flags & M_EXTPG) {
2055 fragsize = min(segsize, PAGE_SIZE);
2056 frags = 3;
2057 } else {
2058 fragsize = segsize;
2059 frags = 0;
2060 }
2061
2062 /* Break if we really can't fit anymore. */
2063 if ((frags + 1) >= seglimit) {
2064 *plen = len_cp;
2065 if (pkthdrlen != NULL)
2066 *pkthdrlen = len_cp;
2067 break;
2068 }
2069
2070 /*
2071 * Reduce size if you can't copy the whole
2072 * mbuf. If we can't copy the whole mbuf, also
2073 * adjust len so the loop will end after this
2074 * mbuf.
2075 */
2076 if ((frags + howmany(mlen, fragsize)) >= seglimit) {
2077 mlen = (seglimit - frags - 1) * fragsize;
2078 len = mlen;
2079 *plen = len_cp + len;
2080 if (pkthdrlen != NULL)
2081 *pkthdrlen = *plen;
2082 }
2083 frags += howmany(mlen, fragsize);
2084 if (frags == 0)
2085 frags++;
2086 seglimit -= frags;
2087 KASSERT(seglimit > 0,
2088 ("%s: seglimit went too low", __func__));
2089 }
2090 if (copyhdr)
2091 n = m_gethdr(M_NOWAIT, m->m_type);
2092 else
2093 n = m_get(M_NOWAIT, m->m_type);
2094 *np = n;
2095 if (n == NULL)
2096 goto nospace;
2097 if (copyhdr) {
2098 if (!m_dup_pkthdr(n, m, M_NOWAIT))
2099 goto nospace;
2100 if (len == M_COPYALL)
2101 n->m_pkthdr.len -= off0;
2102 else
2103 n->m_pkthdr.len = len;
2104 pkthdrlen = &n->m_pkthdr.len;
2105 copyhdr = false;
2106 }
2107 n->m_len = mlen;
2108 len_cp += n->m_len;
2109 if (m->m_flags & (M_EXT | M_EXTPG)) {
2110 n->m_data = m->m_data + off;
2111 mb_dupcl(n, m);
2112 } else
2113 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
2114 (u_int)n->m_len);
2115
2116 if (sb && (sb->sb_sndptr == m) &&
2117 ((n->m_len + off) >= m->m_len) && m->m_next) {
2118 sb->sb_sndptroff += m->m_len;
2119 sb->sb_sndptr = m->m_next;
2120 }
2121 off = 0;
2122 if (len != M_COPYALL) {
2123 len -= n->m_len;
2124 }
2125 m = m->m_next;
2126 np = &n->m_next;
2127 }
2128 return (top);
2129 nospace:
2130 m_freem(top);
2131 return (NULL);
2132 }
2133
2134 void
tcp_sndbuf_autoscale(struct tcpcb * tp,struct socket * so,uint32_t sendwin)2135 tcp_sndbuf_autoscale(struct tcpcb *tp, struct socket *so, uint32_t sendwin)
2136 {
2137
2138 /*
2139 * Automatic sizing of send socket buffer. Often the send buffer
2140 * size is not optimally adjusted to the actual network conditions
2141 * at hand (delay bandwidth product). Setting the buffer size too
2142 * small limits throughput on links with high bandwidth and high
2143 * delay (eg. trans-continental/oceanic links). Setting the
2144 * buffer size too big consumes too much real kernel memory,
2145 * especially with many connections on busy servers.
2146 *
2147 * The criteria to step up the send buffer one notch are:
2148 * 1. receive window of remote host is larger than send buffer
2149 * (with a fudge factor of 5/4th);
2150 * 2. send buffer is filled to 7/8th with data (so we actually
2151 * have data to make use of it);
2152 * 3. send buffer fill has not hit maximal automatic size;
2153 * 4. our send window (slow start and cogestion controlled) is
2154 * larger than sent but unacknowledged data in send buffer.
2155 *
2156 * The remote host receive window scaling factor may limit the
2157 * growing of the send buffer before it reaches its allowed
2158 * maximum.
2159 *
2160 * It scales directly with slow start or congestion window
2161 * and does at most one step per received ACK. This fast
2162 * scaling has the drawback of growing the send buffer beyond
2163 * what is strictly necessary to make full use of a given
2164 * delay*bandwidth product. However testing has shown this not
2165 * to be much of an problem. At worst we are trading wasting
2166 * of available bandwidth (the non-use of it) for wasting some
2167 * socket buffer memory.
2168 *
2169 * TODO: Shrink send buffer during idle periods together
2170 * with congestion window. Requires another timer. Has to
2171 * wait for upcoming tcp timer rewrite.
2172 *
2173 * XXXGL: should there be used sbused() or sbavail()?
2174 */
2175 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
2176 int lowat;
2177
2178 lowat = V_tcp_sendbuf_auto_lowat ? so->so_snd.sb_lowat : 0;
2179 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat - lowat &&
2180 sbused(&so->so_snd) >=
2181 (so->so_snd.sb_hiwat / 8 * 7) - lowat &&
2182 sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
2183 sendwin >= (sbused(&so->so_snd) -
2184 (tp->snd_nxt - tp->snd_una))) {
2185 if (!sbreserve_locked(so, SO_SND,
2186 min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc,
2187 V_tcp_autosndbuf_max), curthread))
2188 so->so_snd.sb_flags &= ~SB_AUTOSIZE;
2189 }
2190 }
2191 }
2192