1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 #include "opt_inet.h"
34 #include "opt_inet6.h"
35 #include "opt_ipsec.h"
36 #include "opt_kern_tls.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/arb.h>
41 #include <sys/domain.h>
42 #ifdef TCP_HHOOK
43 #include <sys/hhook.h>
44 #endif
45 #include <sys/kernel.h>
46 #ifdef KERN_TLS
47 #include <sys/ktls.h>
48 #endif
49 #include <sys/lock.h>
50 #include <sys/mbuf.h>
51 #include <sys/mutex.h>
52 #include <sys/protosw.h>
53 #include <sys/qmath.h>
54 #include <sys/sdt.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/sysctl.h>
58 #include <sys/stats.h>
59
60 #include <net/if.h>
61 #include <net/route.h>
62 #include <net/route/nhop.h>
63 #include <net/vnet.h>
64
65 #include <netinet/in.h>
66 #include <netinet/in_kdtrace.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/ip.h>
69 #include <netinet/in_pcb.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/ip_options.h>
72 #ifdef INET6
73 #include <netinet6/in6_pcb.h>
74 #include <netinet/ip6.h>
75 #include <netinet6/ip6_var.h>
76 #endif
77 #include <netinet/tcp.h>
78 #define TCPOUTFLAGS
79 #include <netinet/tcp_fsm.h>
80 #include <netinet/tcp_seq.h>
81 #include <netinet/tcp_var.h>
82 #include <netinet/tcp_log_buf.h>
83 #include <netinet/tcp_syncache.h>
84 #include <netinet/tcp_timer.h>
85 #include <netinet/tcpip.h>
86 #include <netinet/cc/cc.h>
87 #include <netinet/tcp_fastopen.h>
88 #ifdef TCP_OFFLOAD
89 #include <netinet/tcp_offload.h>
90 #endif
91 #include <netinet/tcp_ecn.h>
92
93 #include <netipsec/ipsec_support.h>
94
95 #include <netinet/udp.h>
96 #include <netinet/udp_var.h>
97 #include <machine/in_cksum.h>
98
99 #include <security/mac/mac_framework.h>
100
101 VNET_DEFINE(int, path_mtu_discovery) = 1;
102 SYSCTL_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_VNET | CTLFLAG_RW,
103 &VNET_NAME(path_mtu_discovery), 1,
104 "Enable Path MTU Discovery");
105
106 VNET_DEFINE(int, tcp_do_tso) = 1;
107 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_VNET | CTLFLAG_RW,
108 &VNET_NAME(tcp_do_tso), 0,
109 "Enable TCP Segmentation Offload");
110
111 VNET_DEFINE(int, tcp_sendspace) = 1024*32;
112 #define V_tcp_sendspace VNET(tcp_sendspace)
113 SYSCTL_INT(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLFLAG_VNET | CTLFLAG_RW,
114 &VNET_NAME(tcp_sendspace), 0, "Initial send socket buffer size");
115
116 VNET_DEFINE(int, tcp_do_autosndbuf) = 1;
117 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto, CTLFLAG_VNET | CTLFLAG_RW,
118 &VNET_NAME(tcp_do_autosndbuf), 0,
119 "Enable automatic send buffer sizing");
120
121 VNET_DEFINE(int, tcp_autosndbuf_inc) = 8*1024;
122 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_VNET | CTLFLAG_RW,
123 &VNET_NAME(tcp_autosndbuf_inc), 0,
124 "Incrementor step size of automatic send buffer");
125
126 VNET_DEFINE(int, tcp_autosndbuf_max) = 2*1024*1024;
127 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
128 &VNET_NAME(tcp_autosndbuf_max), 0,
129 "Max size of automatic send buffer");
130
131 VNET_DEFINE(int, tcp_sendbuf_auto_lowat) = 0;
132 #define V_tcp_sendbuf_auto_lowat VNET(tcp_sendbuf_auto_lowat)
133 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto_lowat, CTLFLAG_VNET | CTLFLAG_RW,
134 &VNET_NAME(tcp_sendbuf_auto_lowat), 0,
135 "Modify threshold for auto send buffer growth to account for SO_SNDLOWAT");
136
137 /*
138 * Make sure that either retransmit or persist timer is set for SYN, FIN and
139 * non-ACK.
140 */
141 #define TCP_XMIT_TIMER_ASSERT(tp, len, th_flags) \
142 KASSERT(((len) == 0 && ((th_flags) & (TH_SYN | TH_FIN)) == 0) ||\
143 tcp_timer_active((tp), TT_REXMT) || \
144 tcp_timer_active((tp), TT_PERSIST), \
145 ("neither rexmt nor persist timer is set"))
146
147 #ifdef TCP_HHOOK
148 /*
149 * Wrapper for the TCP established output helper hook.
150 */
151 void
hhook_run_tcp_est_out(struct tcpcb * tp,struct tcphdr * th,struct tcpopt * to,uint32_t len,int tso)152 hhook_run_tcp_est_out(struct tcpcb *tp, struct tcphdr *th,
153 struct tcpopt *to, uint32_t len, int tso)
154 {
155 struct tcp_hhook_data hhook_data;
156
157 if (V_tcp_hhh[HHOOK_TCP_EST_OUT]->hhh_nhooks > 0) {
158 hhook_data.tp = tp;
159 hhook_data.th = th;
160 hhook_data.to = to;
161 hhook_data.len = len;
162 hhook_data.tso = tso;
163
164 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_OUT], &hhook_data,
165 &tp->t_osd);
166 }
167 }
168 #endif
169
170 /*
171 * CC wrapper hook functions
172 */
173 void
cc_after_idle(struct tcpcb * tp)174 cc_after_idle(struct tcpcb *tp)
175 {
176 INP_WLOCK_ASSERT(tptoinpcb(tp));
177
178 if (CC_ALGO(tp)->after_idle != NULL)
179 CC_ALGO(tp)->after_idle(&tp->t_ccv);
180 }
181
182 /*
183 * Tcp output routine: figure out what should be sent and send it.
184 */
185 int
tcp_default_output(struct tcpcb * tp)186 tcp_default_output(struct tcpcb *tp)
187 {
188 struct socket *so = tptosocket(tp);
189 struct inpcb *inp = tptoinpcb(tp);
190 int32_t len;
191 uint32_t recwin, sendwin;
192 uint16_t flags;
193 int off, error = 0; /* Keep compiler happy */
194 u_int if_hw_tsomaxsegcount = 0;
195 u_int if_hw_tsomaxsegsize = 0;
196 struct mbuf *m;
197 struct ip *ip = NULL;
198 struct tcphdr *th;
199 u_char opt[TCP_MAXOLEN];
200 unsigned ipoptlen, optlen, hdrlen, ulen;
201 unsigned ipsec_optlen = 0;
202 int idle, sendalot, curticks;
203 int sack_bytes_rxmt;
204 struct sackhole *p;
205 int tso, mtu;
206 struct tcpopt to;
207 struct udphdr *udp = NULL;
208 struct tcp_log_buffer *lgb;
209 unsigned int wanted_cookie = 0;
210 unsigned int dont_sendalot = 0;
211 bool sack_rxmit;
212 #ifdef INET6
213 struct ip6_hdr *ip6 = NULL;
214 const bool isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
215 #endif
216 #ifdef KERN_TLS
217 const bool hw_tls = tp->t_nic_ktls_xmit != 0;
218 #else
219 const bool hw_tls = false;
220 #endif
221
222 NET_EPOCH_ASSERT();
223 INP_WLOCK_ASSERT(inp);
224
225 #ifdef TCP_OFFLOAD
226 if (tp->t_flags & TF_TOE)
227 return (tcp_offload_output(tp));
228 #endif
229
230 /*
231 * For TFO connections in SYN_SENT or SYN_RECEIVED,
232 * only allow the initial SYN or SYN|ACK and those sent
233 * by the retransmit timer.
234 */
235 if ((tp->t_flags & TF_FASTOPEN) &&
236 ((tp->t_state == TCPS_SYN_SENT) ||
237 (tp->t_state == TCPS_SYN_RECEIVED)) &&
238 SEQ_GT(tp->snd_max, tp->snd_una) && /* SYN or SYN|ACK sent */
239 (tp->snd_nxt != tp->snd_una)) /* not a retransmit */
240 return (0);
241
242 /*
243 * Determine length of data that should be transmitted,
244 * and flags that will be used.
245 * If there is some data or critical controls (SYN, RST)
246 * to send, then transmit; otherwise, investigate further.
247 */
248 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
249 if (idle && (((ticks - tp->t_rcvtime) >= tp->t_rxtcur) ||
250 (tp->t_sndtime && ((ticks - tp->t_sndtime) >= tp->t_rxtcur))))
251 cc_after_idle(tp);
252 tp->t_flags &= ~TF_LASTIDLE;
253 if (idle) {
254 if (tp->t_flags & TF_MORETOCOME) {
255 tp->t_flags |= TF_LASTIDLE;
256 idle = 0;
257 }
258 }
259 again:
260 sendwin = 0;
261 /*
262 * If we've recently taken a timeout, snd_max will be greater than
263 * snd_nxt. There may be SACK information that allows us to avoid
264 * resending already delivered data. Adjust snd_nxt accordingly.
265 */
266 if ((tp->t_flags & TF_SACK_PERMIT) &&
267 (tp->sackhint.nexthole != NULL) &&
268 !IN_FASTRECOVERY(tp->t_flags)) {
269 sendwin = tcp_sack_adjust(tp);
270 }
271 sendalot = 0;
272 tso = 0;
273 mtu = 0;
274 off = tp->snd_nxt - tp->snd_una;
275 sendwin = min(tp->snd_wnd, tp->snd_cwnd + sendwin);
276
277 flags = tcp_outflags[tp->t_state];
278 /*
279 * Send any SACK-generated retransmissions. If we're explicitly trying
280 * to send out new data (when sendalot is 1), bypass this function.
281 * If we retransmit in fast recovery mode, decrement snd_cwnd, since
282 * we're replacing a (future) new transmission with a retransmission
283 * now, and we previously incremented snd_cwnd in tcp_input().
284 */
285 /*
286 * Still in sack recovery , reset rxmit flag to zero.
287 */
288 sack_bytes_rxmt = 0;
289 len = 0;
290 if ((tp->t_flags & TF_SACK_PERMIT) &&
291 (IN_FASTRECOVERY(tp->t_flags) ||
292 (SEQ_LT(tp->snd_nxt, tp->snd_max) && (tp->t_dupacks >= tcprexmtthresh))) &&
293 (p = tcp_sack_output(tp, &sack_bytes_rxmt))) {
294 int32_t cwin;
295
296 if (IN_FASTRECOVERY(tp->t_flags)) {
297 cwin = imax(sendwin - tcp_compute_pipe(tp), 0);
298 } else {
299 cwin = imax(sendwin - off, 0);
300 }
301 /* Do not retransmit SACK segments beyond snd_recover */
302 if (SEQ_GT(p->end, tp->snd_recover)) {
303 /*
304 * (At least) part of sack hole extends beyond
305 * snd_recover. Check to see if we can rexmit data
306 * for this hole.
307 */
308 if (SEQ_GEQ(p->rxmit, tp->snd_recover)) {
309 /*
310 * Can't rexmit any more data for this hole.
311 * That data will be rexmitted in the next
312 * sack recovery episode, when snd_recover
313 * moves past p->rxmit.
314 */
315 p = NULL;
316 sack_rxmit = false;
317 goto after_sack_rexmit;
318 } else {
319 /* Can rexmit part of the current hole */
320 len = SEQ_SUB(tp->snd_recover, p->rxmit);
321 if (cwin <= len) {
322 len = cwin;
323 } else {
324 sendalot = 1;
325 }
326 }
327 } else {
328 len = SEQ_SUB(p->end, p->rxmit);
329 if (cwin <= len) {
330 len = cwin;
331 } else {
332 sendalot = 1;
333 }
334 }
335 /* we could have transmitted from the scoreboard,
336 * but sendwin (expected flightsize) - pipe didn't
337 * allow any transmission.
338 * Bypass recalculating the possible transmission
339 * length further down by setting sack_rxmit.
340 * Wouldn't be here if there would have been
341 * nothing in the scoreboard to transmit.
342 */
343 if (len > 0) {
344 off = SEQ_SUB(p->rxmit, tp->snd_una);
345 KASSERT(off >= 0,("%s: sack block to the left of una : %d",
346 __func__, off));
347 }
348 sack_rxmit = true;
349 } else {
350 p = NULL;
351 sack_rxmit = false;
352 }
353 after_sack_rexmit:
354 /*
355 * Get standard flags, and add SYN or FIN if requested by 'hidden'
356 * state flags.
357 */
358 if (tp->t_flags & TF_NEEDFIN)
359 flags |= TH_FIN;
360 if (tp->t_flags & TF_NEEDSYN)
361 flags |= TH_SYN;
362
363 SOCK_SENDBUF_LOCK(so);
364 /*
365 * If in persist timeout with window of 0, send 1 byte.
366 * Otherwise, if window is small but nonzero
367 * and timer expired, we will send what we can
368 * and go to transmit state.
369 */
370 if (tp->t_flags & TF_FORCEDATA) {
371 if (sendwin == 0) {
372 /*
373 * If we still have some data to send, then
374 * clear the FIN bit. Usually this would
375 * happen below when it realizes that we
376 * aren't sending all the data. However,
377 * if we have exactly 1 byte of unsent data,
378 * then it won't clear the FIN bit below,
379 * and if we are in persist state, we wind
380 * up sending the packet without recording
381 * that we sent the FIN bit.
382 *
383 * We can't just blindly clear the FIN bit,
384 * because if we don't have any more data
385 * to send then the probe will be the FIN
386 * itself.
387 */
388 if (off < sbused(&so->so_snd))
389 flags &= ~TH_FIN;
390 sendwin = 1;
391 } else {
392 tcp_timer_activate(tp, TT_PERSIST, 0);
393 tp->t_rxtshift = 0;
394 }
395 }
396
397 /*
398 * If snd_nxt == snd_max and we have transmitted a FIN, the
399 * offset will be > 0 even if so_snd.sb_cc is 0, resulting in
400 * a negative length. This can also occur when TCP opens up
401 * its congestion window while receiving additional duplicate
402 * acks after fast-retransmit because TCP will reset snd_nxt
403 * to snd_max after the fast-retransmit.
404 *
405 * In the normal retransmit-FIN-only case, however, snd_nxt will
406 * be set to snd_una, the offset will be 0, and the length may
407 * wind up 0.
408 *
409 * If sack_rxmit is true we are retransmitting from the scoreboard
410 * in which case len is already set.
411 */
412 if (!sack_rxmit) {
413 if ((sack_bytes_rxmt == 0) || SEQ_LT(tp->snd_nxt, tp->snd_max)) {
414 len = imin(sbavail(&so->so_snd), sendwin) - off;
415 } else {
416 /*
417 * We are inside of a SACK recovery episode and are
418 * sending new data, having retransmitted all the
419 * data possible in the scoreboard.
420 */
421 len = imax(
422 imin(sbavail(&so->so_snd), sendwin) -
423 imax(tcp_compute_pipe(tp), off), 0);
424 }
425 }
426
427 /*
428 * Lop off SYN bit if it has already been sent. However, if this
429 * is SYN-SENT state and if segment contains data and if we don't
430 * know that foreign host supports TAO, suppress sending segment.
431 */
432 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) {
433 if (tp->t_state != TCPS_SYN_RECEIVED)
434 flags &= ~TH_SYN;
435 /*
436 * When sending additional segments following a TFO SYN|ACK,
437 * do not include the SYN bit.
438 */
439 if ((tp->t_flags & TF_FASTOPEN) &&
440 (tp->t_state == TCPS_SYN_RECEIVED))
441 flags &= ~TH_SYN;
442 off--, len++;
443 }
444
445 /*
446 * Be careful not to send data and/or FIN on SYN segments.
447 * This measure is needed to prevent interoperability problems
448 * with not fully conformant TCP implementations.
449 */
450 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
451 len = 0;
452 flags &= ~TH_FIN;
453 }
454
455 /*
456 * On TFO sockets, ensure no data is sent in the following cases:
457 *
458 * - When retransmitting SYN|ACK on a passively-created socket
459 *
460 * - When retransmitting SYN on an actively created socket
461 *
462 * - When sending a zero-length cookie (cookie request) on an
463 * actively created socket
464 *
465 * - When the socket is in the CLOSED state (RST is being sent)
466 */
467 if ((tp->t_flags & TF_FASTOPEN) &&
468 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
469 ((tp->t_state == TCPS_SYN_SENT) &&
470 (tp->t_tfo_client_cookie_len == 0)) ||
471 (flags & TH_RST)))
472 len = 0;
473
474 /* Without fast-open there should never be data sent on a SYN. */
475 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) {
476 len = 0;
477 }
478
479 if (len <= 0) {
480 /*
481 * If FIN has been sent but not acked,
482 * but we haven't been called to retransmit,
483 * len will be < 0. Otherwise, window shrank
484 * after we sent into it. If window shrank to 0,
485 * cancel pending retransmit, pull snd_nxt back
486 * to (closed) window, and set the persist timer
487 * if it isn't already going. If the window didn't
488 * close completely, just wait for an ACK.
489 *
490 * We also do a general check here to ensure that
491 * we will set the persist timer when we have data
492 * to send, but a 0-byte window. This makes sure
493 * the persist timer is set even if the packet
494 * hits one of the "goto send" lines below.
495 */
496 len = 0;
497 if ((sendwin == 0) && (TCPS_HAVEESTABLISHED(tp->t_state)) &&
498 (off < (int) sbavail(&so->so_snd)) &&
499 !tcp_timer_active(tp, TT_PERSIST)) {
500 tcp_timer_activate(tp, TT_REXMT, 0);
501 tp->t_rxtshift = 0;
502 tp->snd_nxt = tp->snd_una;
503 if (!tcp_timer_active(tp, TT_PERSIST))
504 tcp_setpersist(tp);
505 }
506 }
507
508 /* len will be >= 0 after this point. */
509 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
510
511 tcp_sndbuf_autoscale(tp, so, sendwin);
512
513 /*
514 * Decide if we can use TCP Segmentation Offloading (if supported by
515 * hardware).
516 *
517 * TSO may only be used if we are in a pure bulk sending state. The
518 * presence of TCP-MD5, IP options (IPsec), and possibly SACK
519 * retransmits prevent using TSO. With TSO the TCP header is the same
520 * (except for the sequence number) for all generated packets. This
521 * makes it impossible to transmit any options which vary per generated
522 * segment or packet.
523 *
524 * IPv4 handling has a clear separation of ip options and ip header
525 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
526 * the right thing below to provide length of just ip options and thus
527 * checking for ipoptlen is enough to decide if ip options are present.
528 */
529 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
530 /*
531 * Pre-calculate here as we save another lookup into the darknesses
532 * of IPsec that way and can actually decide if TSO is ok.
533 */
534 #ifdef INET6
535 if (isipv6 && IPSEC_ENABLED(ipv6))
536 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp);
537 #ifdef INET
538 else
539 #endif
540 #endif /* INET6 */
541 #ifdef INET
542 if (IPSEC_ENABLED(ipv4))
543 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp);
544 #endif /* INET */
545 #endif /* IPSEC */
546 #ifdef INET6
547 if (isipv6)
548 ipoptlen = ip6_optlen(inp);
549 else
550 #endif
551 if (inp->inp_options)
552 ipoptlen = inp->inp_options->m_len -
553 offsetof(struct ipoption, ipopt_list);
554 else
555 ipoptlen = 0;
556 ipoptlen += ipsec_optlen;
557
558 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > tp->t_maxseg &&
559 (tp->t_port == 0) &&
560 ((tp->t_flags & TF_SIGNATURE) == 0) &&
561 (!sack_rxmit || V_tcp_sack_tso) &&
562 (ipoptlen == 0 || (ipoptlen == ipsec_optlen &&
563 (tp->t_flags2 & TF2_IPSEC_TSO) != 0)) &&
564 !(flags & TH_SYN))
565 tso = 1;
566
567 if (SEQ_LT((sack_rxmit ? p->rxmit : tp->snd_nxt) + len,
568 tp->snd_una + sbused(&so->so_snd))) {
569 flags &= ~TH_FIN;
570 }
571
572 recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
573 (long)TCP_MAXWIN << tp->rcv_scale);
574
575 /*
576 * Sender silly window avoidance. We transmit under the following
577 * conditions when len is non-zero:
578 *
579 * - We have a full segment (or more with TSO)
580 * - This is the last buffer in a write()/send() and we are
581 * either idle or running NODELAY
582 * - we've timed out (e.g. persist timer)
583 * - we have more then 1/2 the maximum send window's worth of
584 * data (receiver may be limited the window size)
585 * - we need to retransmit
586 */
587 if (len) {
588 if (len >= tp->t_maxseg)
589 goto send;
590 /*
591 * As the TCP header options are now
592 * considered when setting up the initial
593 * window, we would not send the last segment
594 * if we skip considering the option length here.
595 * Note: this may not work when tcp headers change
596 * very dynamically in the future.
597 */
598 if ((((tp->t_flags & TF_SIGNATURE) ?
599 PADTCPOLEN(TCPOLEN_SIGNATURE) : 0) +
600 ((tp->t_flags & TF_RCVD_TSTMP) ?
601 PADTCPOLEN(TCPOLEN_TIMESTAMP) : 0) +
602 len) >= tp->t_maxseg)
603 goto send;
604 /*
605 * NOTE! on localhost connections an 'ack' from the remote
606 * end may occur synchronously with the output and cause
607 * us to flush a buffer queued with moretocome. XXX
608 *
609 * note: the len + off check is almost certainly unnecessary.
610 */
611 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
612 (idle || (tp->t_flags & TF_NODELAY)) &&
613 (uint32_t)len + (uint32_t)off >= sbavail(&so->so_snd) &&
614 (tp->t_flags & TF_NOPUSH) == 0) {
615 goto send;
616 }
617 if (tp->t_flags & TF_FORCEDATA) /* typ. timeout case */
618 goto send;
619 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0)
620 goto send;
621 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) /* retransmit case */
622 goto send;
623 if (sack_rxmit)
624 goto send;
625 }
626
627 /*
628 * Sending of standalone window updates.
629 *
630 * Window updates are important when we close our window due to a
631 * full socket buffer and are opening it again after the application
632 * reads data from it. Once the window has opened again and the
633 * remote end starts to send again the ACK clock takes over and
634 * provides the most current window information.
635 *
636 * We must avoid the silly window syndrome whereas every read
637 * from the receive buffer, no matter how small, causes a window
638 * update to be sent. We also should avoid sending a flurry of
639 * window updates when the socket buffer had queued a lot of data
640 * and the application is doing small reads.
641 *
642 * Prevent a flurry of pointless window updates by only sending
643 * an update when we can increase the advertized window by more
644 * than 1/4th of the socket buffer capacity. When the buffer is
645 * getting full or is very small be more aggressive and send an
646 * update whenever we can increase by two mss sized segments.
647 * In all other situations the ACK's to new incoming data will
648 * carry further window increases.
649 *
650 * Don't send an independent window update if a delayed
651 * ACK is pending (it will get piggy-backed on it) or the
652 * remote side already has done a half-close and won't send
653 * more data.
654 */
655 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
656 !(tp->t_flags & TF_DELACK) &&
657 !TCPS_HAVERCVDFIN(tp->t_state)) {
658 /*
659 * "adv" is the amount we could increase the window,
660 * taking into account that we are limited by
661 * TCP_MAXWIN << tp->rcv_scale.
662 */
663 int32_t adv;
664 int oldwin;
665
666 adv = recwin;
667 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
668 oldwin = (tp->rcv_adv - tp->rcv_nxt);
669 if (adv > oldwin)
670 adv -= oldwin;
671 else
672 adv = 0;
673 } else
674 oldwin = 0;
675
676 /*
677 * If the new window size ends up being the same as or less
678 * than the old size when it is scaled, then don't force
679 * a window update.
680 */
681 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
682 goto dontupdate;
683
684 if (adv >= (int32_t)(2 * tp->t_maxseg) &&
685 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
686 recwin <= (so->so_rcv.sb_hiwat / 8) ||
687 so->so_rcv.sb_hiwat <= 8 * tp->t_maxseg ||
688 adv >= TCP_MAXWIN << tp->rcv_scale))
689 goto send;
690 if (2 * adv >= (int32_t)so->so_rcv.sb_hiwat)
691 goto send;
692 }
693 dontupdate:
694
695 /*
696 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
697 * is also a catch-all for the retransmit timer timeout case.
698 */
699 if (tp->t_flags & TF_ACKNOW)
700 goto send;
701 if ((flags & TH_RST) ||
702 ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0))
703 goto send;
704 if (SEQ_GT(tp->snd_up, tp->snd_una))
705 goto send;
706 /*
707 * If our state indicates that FIN should be sent
708 * and we have not yet done so, then we need to send.
709 */
710 if (flags & TH_FIN &&
711 ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una))
712 goto send;
713 /*
714 * In SACK, it is possible for tcp_output to fail to send a segment
715 * after the retransmission timer has been turned off. Make sure
716 * that the retransmission timer is set.
717 */
718 if ((tp->t_flags & TF_SACK_PERMIT) &&
719 SEQ_GT(tp->snd_max, tp->snd_una) &&
720 !tcp_timer_active(tp, TT_REXMT) &&
721 !tcp_timer_active(tp, TT_PERSIST)) {
722 tcp_timer_activate(tp, TT_REXMT, TP_RXTCUR(tp));
723 goto just_return;
724 }
725 /*
726 * TCP window updates are not reliable, rather a polling protocol
727 * using ``persist'' packets is used to insure receipt of window
728 * updates. The three ``states'' for the output side are:
729 * idle not doing retransmits or persists
730 * persisting to move a small or zero window
731 * (re)transmitting and thereby not persisting
732 *
733 * tcp_timer_active(tp, TT_PERSIST)
734 * is true when we are in persist state.
735 * (tp->t_flags & TF_FORCEDATA)
736 * is set when we are called to send a persist packet.
737 * tcp_timer_active(tp, TT_REXMT)
738 * is set when we are retransmitting
739 * The output side is idle when both timers are zero.
740 *
741 * If send window is too small, there is data to transmit, and no
742 * retransmit or persist is pending, then go to persist state.
743 * If nothing happens soon, send when timer expires:
744 * if window is nonzero, transmit what we can,
745 * otherwise force out a byte.
746 */
747 if (sbavail(&so->so_snd) && !tcp_timer_active(tp, TT_REXMT) &&
748 !tcp_timer_active(tp, TT_PERSIST)) {
749 tp->t_rxtshift = 0;
750 tcp_setpersist(tp);
751 }
752
753 /*
754 * No reason to send a segment, just return.
755 */
756 just_return:
757 SOCK_SENDBUF_UNLOCK(so);
758 return (0);
759
760 send:
761 SOCK_SENDBUF_LOCK_ASSERT(so);
762 if (len > 0) {
763 if (len >= tp->t_maxseg)
764 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
765 else
766 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
767 }
768 /*
769 * Before ESTABLISHED, force sending of initial options
770 * unless TCP set not to do any options.
771 * NOTE: we assume that the IP/TCP header plus TCP options
772 * always fit in a single mbuf, leaving room for a maximum
773 * link header, i.e.
774 * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES
775 */
776 optlen = 0;
777 #ifdef INET6
778 if (isipv6)
779 hdrlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
780 else
781 #endif
782 hdrlen = sizeof (struct tcpiphdr);
783
784 if (flags & TH_SYN) {
785 tp->snd_nxt = tp->iss;
786 }
787
788 /*
789 * Compute options for segment.
790 * We only have to care about SYN and established connection
791 * segments. Options for SYN-ACK segments are handled in TCP
792 * syncache.
793 */
794 to.to_flags = 0;
795 if ((tp->t_flags & TF_NOOPT) == 0) {
796 /* Maximum segment size. */
797 if (flags & TH_SYN) {
798 to.to_mss = tcp_mssopt(&inp->inp_inc);
799 if (tp->t_port)
800 to.to_mss -= V_tcp_udp_tunneling_overhead;
801 to.to_flags |= TOF_MSS;
802
803 /*
804 * On SYN or SYN|ACK transmits on TFO connections,
805 * only include the TFO option if it is not a
806 * retransmit, as the presence of the TFO option may
807 * have caused the original SYN or SYN|ACK to have
808 * been dropped by a middlebox.
809 */
810 if ((tp->t_flags & TF_FASTOPEN) &&
811 (tp->t_rxtshift == 0)) {
812 if (tp->t_state == TCPS_SYN_RECEIVED) {
813 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
814 to.to_tfo_cookie =
815 (u_int8_t *)&tp->t_tfo_cookie.server;
816 to.to_flags |= TOF_FASTOPEN;
817 wanted_cookie = 1;
818 } else if (tp->t_state == TCPS_SYN_SENT) {
819 to.to_tfo_len =
820 tp->t_tfo_client_cookie_len;
821 to.to_tfo_cookie =
822 tp->t_tfo_cookie.client;
823 to.to_flags |= TOF_FASTOPEN;
824 wanted_cookie = 1;
825 /*
826 * If we wind up having more data to
827 * send with the SYN than can fit in
828 * one segment, don't send any more
829 * until the SYN|ACK comes back from
830 * the other end.
831 */
832 dont_sendalot = 1;
833 }
834 }
835 }
836 /* Window scaling. */
837 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
838 to.to_wscale = tp->request_r_scale;
839 to.to_flags |= TOF_SCALE;
840 }
841 /* Timestamps. */
842 if ((tp->t_flags & TF_RCVD_TSTMP) ||
843 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
844 curticks = tcp_ts_getticks();
845 to.to_tsval = curticks + tp->ts_offset;
846 to.to_tsecr = tp->ts_recent;
847 to.to_flags |= TOF_TS;
848 if (tp->t_rxtshift == 1)
849 tp->t_badrxtwin = curticks;
850 }
851
852 /* Set receive buffer autosizing timestamp. */
853 if (tp->rfbuf_ts == 0 &&
854 (so->so_rcv.sb_flags & SB_AUTOSIZE))
855 tp->rfbuf_ts = tcp_ts_getticks();
856
857 /* Selective ACK's. */
858 if (tp->t_flags & TF_SACK_PERMIT) {
859 if (flags & TH_SYN)
860 to.to_flags |= TOF_SACKPERM;
861 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
862 tp->rcv_numsacks > 0) {
863 to.to_flags |= TOF_SACK;
864 to.to_nsacks = tp->rcv_numsacks;
865 to.to_sacks = (u_char *)tp->sackblks;
866 }
867 }
868 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
869 /* TCP-MD5 (RFC2385). */
870 /*
871 * Check that TCP_MD5SIG is enabled in tcpcb to
872 * account the size needed to set this TCP option.
873 */
874 if (tp->t_flags & TF_SIGNATURE)
875 to.to_flags |= TOF_SIGNATURE;
876 #endif /* TCP_SIGNATURE */
877
878 /* Processing the options. */
879 hdrlen += optlen = tcp_addoptions(&to, opt);
880 /*
881 * If we wanted a TFO option to be added, but it was unable
882 * to fit, ensure no data is sent.
883 */
884 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie &&
885 !(to.to_flags & TOF_FASTOPEN))
886 len = 0;
887 }
888 if (tp->t_port) {
889 if (V_tcp_udp_tunneling_port == 0) {
890 /* The port was removed?? */
891 SOCK_SENDBUF_UNLOCK(so);
892 return (EHOSTUNREACH);
893 }
894 hdrlen += sizeof(struct udphdr);
895 }
896 /*
897 * Adjust data length if insertion of options will
898 * bump the packet length beyond the t_maxseg length.
899 * Clear the FIN bit because we cut off the tail of
900 * the segment.
901 */
902 if (len + optlen + ipoptlen > tp->t_maxseg) {
903 flags &= ~TH_FIN;
904
905 if (tso) {
906 u_int if_hw_tsomax;
907 u_int moff;
908 int max_len;
909
910 /* extract TSO information */
911 if_hw_tsomax = tp->t_tsomax;
912 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
913 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
914
915 /*
916 * Limit a TSO burst to prevent it from
917 * overflowing or exceeding the maximum length
918 * allowed by the network interface:
919 */
920 KASSERT(ipoptlen == ipsec_optlen,
921 ("%s: TSO can't do IP options", __func__));
922
923 /*
924 * Check if we should limit by maximum payload
925 * length:
926 */
927 if (if_hw_tsomax != 0) {
928 /* compute maximum TSO length */
929 max_len = if_hw_tsomax - hdrlen -
930 ipsec_optlen - max_linkhdr;
931 if (max_len <= 0) {
932 len = 0;
933 } else if (len > max_len) {
934 sendalot = 1;
935 len = max_len;
936 }
937 }
938
939 /*
940 * Prevent the last segment from being
941 * fractional unless the send sockbuf can be
942 * emptied:
943 */
944 max_len = tp->t_maxseg - optlen - ipsec_optlen;
945 if (((uint32_t)off + (uint32_t)len) <
946 sbavail(&so->so_snd)) {
947 moff = len % max_len;
948 if (moff != 0) {
949 len -= moff;
950 sendalot = 1;
951 }
952 }
953
954 /*
955 * In case there are too many small fragments
956 * don't use TSO:
957 */
958 if (len <= max_len) {
959 len = max_len;
960 sendalot = 1;
961 tso = 0;
962 }
963
964 /*
965 * Send the FIN in a separate segment
966 * after the bulk sending is done.
967 * We don't trust the TSO implementations
968 * to clear the FIN flag on all but the
969 * last segment.
970 */
971 if (tp->t_flags & TF_NEEDFIN)
972 sendalot = 1;
973 } else {
974 if (optlen + ipoptlen >= tp->t_maxseg) {
975 /*
976 * Since we don't have enough space to put
977 * the IP header chain and the TCP header in
978 * one packet as required by RFC 7112, don't
979 * send it. Also ensure that at least one
980 * byte of the payload can be put into the
981 * TCP segment.
982 */
983 SOCK_SENDBUF_UNLOCK(so);
984 error = EMSGSIZE;
985 sack_rxmit = false;
986 goto out;
987 }
988 len = tp->t_maxseg - optlen - ipoptlen;
989 sendalot = 1;
990 if (dont_sendalot)
991 sendalot = 0;
992 }
993 } else
994 tso = 0;
995
996 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
997 ("%s: len > IP_MAXPACKET", __func__));
998
999 /*#ifdef DIAGNOSTIC*/
1000 #ifdef INET6
1001 if (max_linkhdr + hdrlen > MCLBYTES)
1002 #else
1003 if (max_linkhdr + hdrlen > MHLEN)
1004 #endif
1005 panic("tcphdr too big");
1006 /*#endif*/
1007
1008 /*
1009 * This KASSERT is here to catch edge cases at a well defined place.
1010 * Before, those had triggered (random) panic conditions further down.
1011 */
1012 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
1013
1014 /*
1015 * Grab a header mbuf, attaching a copy of data to
1016 * be transmitted, and initialize the header from
1017 * the template for sends on this connection.
1018 */
1019 if (len) {
1020 struct mbuf *mb;
1021 struct sockbuf *msb;
1022 u_int moff;
1023
1024 if ((tp->t_flags & TF_FORCEDATA) && len == 1) {
1025 TCPSTAT_INC(tcps_sndprobe);
1026 #ifdef STATS
1027 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
1028 stats_voi_update_abs_u32(tp->t_stats,
1029 VOI_TCP_RETXPB, len);
1030 else
1031 stats_voi_update_abs_u64(tp->t_stats,
1032 VOI_TCP_TXPB, len);
1033 #endif /* STATS */
1034 } else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
1035 tp->t_sndrexmitpack++;
1036 TCPSTAT_INC(tcps_sndrexmitpack);
1037 TCPSTAT_ADD(tcps_sndrexmitbyte, len);
1038 if (sack_rxmit) {
1039 TCPSTAT_INC(tcps_sack_rexmits);
1040 if (tso) {
1041 TCPSTAT_INC(tcps_sack_rexmits_tso);
1042 }
1043 TCPSTAT_ADD(tcps_sack_rexmit_bytes, len);
1044 }
1045 #ifdef STATS
1046 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
1047 len);
1048 #endif /* STATS */
1049 } else {
1050 TCPSTAT_INC(tcps_sndpack);
1051 TCPSTAT_ADD(tcps_sndbyte, len);
1052 #ifdef STATS
1053 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
1054 len);
1055 #endif /* STATS */
1056 }
1057 #ifdef INET6
1058 if (MHLEN < hdrlen + max_linkhdr)
1059 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1060 else
1061 #endif
1062 m = m_gethdr(M_NOWAIT, MT_DATA);
1063
1064 if (m == NULL) {
1065 SOCK_SENDBUF_UNLOCK(so);
1066 error = ENOBUFS;
1067 sack_rxmit = false;
1068 goto out;
1069 }
1070
1071 m->m_data += max_linkhdr;
1072 m->m_len = hdrlen;
1073
1074 /*
1075 * Start the m_copy functions from the closest mbuf
1076 * to the offset in the socket buffer chain.
1077 */
1078 mb = sbsndptr_noadv(&so->so_snd, off, &moff);
1079 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
1080 m_copydata(mb, moff, len,
1081 mtod(m, caddr_t) + hdrlen);
1082 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
1083 sbsndptr_adv(&so->so_snd, mb, len);
1084 m->m_len += len;
1085 } else {
1086 int32_t old_len;
1087
1088 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
1089 msb = NULL;
1090 else
1091 msb = &so->so_snd;
1092 old_len = len;
1093 m->m_next = tcp_m_copym(mb, moff,
1094 &len, if_hw_tsomaxsegcount,
1095 if_hw_tsomaxsegsize, msb, hw_tls);
1096 if (old_len != len)
1097 flags &= ~TH_FIN;
1098 if (len <= (tp->t_maxseg - optlen)) {
1099 /*
1100 * Must have ran out of mbufs for the copy
1101 * shorten it to no longer need tso. Lets
1102 * not put on sendalot since we are low on
1103 * mbufs.
1104 */
1105 tso = 0;
1106 }
1107 if (m->m_next == NULL) {
1108 SOCK_SENDBUF_UNLOCK(so);
1109 (void) m_free(m);
1110 error = ENOBUFS;
1111 sack_rxmit = false;
1112 goto out;
1113 }
1114 }
1115
1116 /*
1117 * If we're sending everything we've got, set PUSH.
1118 * (This will keep happy those implementations which only
1119 * give data to the user when a buffer fills or
1120 * a PUSH comes in.)
1121 */
1122 if (((uint32_t)off + (uint32_t)len == sbused(&so->so_snd)) &&
1123 !(flags & TH_SYN))
1124 flags |= TH_PUSH;
1125 SOCK_SENDBUF_UNLOCK(so);
1126 } else {
1127 SOCK_SENDBUF_UNLOCK(so);
1128 if (tp->t_flags & TF_ACKNOW)
1129 TCPSTAT_INC(tcps_sndacks);
1130 else if (flags & (TH_SYN|TH_FIN|TH_RST))
1131 TCPSTAT_INC(tcps_sndctrl);
1132 else if (SEQ_GT(tp->snd_up, tp->snd_una))
1133 TCPSTAT_INC(tcps_sndurg);
1134 else
1135 TCPSTAT_INC(tcps_sndwinup);
1136
1137 m = m_gethdr(M_NOWAIT, MT_DATA);
1138 if (m == NULL) {
1139 error = ENOBUFS;
1140 sack_rxmit = false;
1141 goto out;
1142 }
1143 #ifdef INET6
1144 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
1145 MHLEN >= hdrlen) {
1146 M_ALIGN(m, hdrlen);
1147 } else
1148 #endif
1149 m->m_data += max_linkhdr;
1150 m->m_len = hdrlen;
1151 }
1152 SOCK_SENDBUF_UNLOCK_ASSERT(so);
1153 m->m_pkthdr.rcvif = (struct ifnet *)0;
1154 #ifdef MAC
1155 mac_inpcb_create_mbuf(inp, m);
1156 #endif
1157 #ifdef INET6
1158 if (isipv6) {
1159 ip6 = mtod(m, struct ip6_hdr *);
1160 if (tp->t_port) {
1161 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1162 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
1163 udp->uh_dport = tp->t_port;
1164 ulen = hdrlen + len - sizeof(struct ip6_hdr);
1165 udp->uh_ulen = htons(ulen);
1166 th = (struct tcphdr *)(udp + 1);
1167 } else {
1168 th = (struct tcphdr *)(ip6 + 1);
1169 }
1170 tcpip_fillheaders(inp, tp->t_port, ip6, th);
1171 } else
1172 #endif /* INET6 */
1173 {
1174 ip = mtod(m, struct ip *);
1175 if (tp->t_port) {
1176 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
1177 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
1178 udp->uh_dport = tp->t_port;
1179 ulen = hdrlen + len - sizeof(struct ip);
1180 udp->uh_ulen = htons(ulen);
1181 th = (struct tcphdr *)(udp + 1);
1182 } else
1183 th = (struct tcphdr *)(ip + 1);
1184 tcpip_fillheaders(inp, tp->t_port, ip, th);
1185 }
1186
1187 /*
1188 * Fill in fields, remembering maximum advertised
1189 * window for use in delaying messages about window sizes.
1190 * If resending a FIN, be sure not to use a new sequence number.
1191 */
1192 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
1193 tp->snd_nxt == tp->snd_max)
1194 tp->snd_nxt--;
1195 /*
1196 * If we are starting a connection, send ECN setup
1197 * SYN packet. If we are on a retransmit, we may
1198 * resend those bits a number of times as per
1199 * RFC 3168.
1200 */
1201 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) {
1202 flags |= tcp_ecn_output_syn_sent(tp);
1203 }
1204 /* Also handle parallel SYN for ECN */
1205 if ((TCPS_HAVERCVDSYN(tp->t_state)) &&
1206 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
1207 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit);
1208 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
1209 (tp->t_flags2 & TF2_ECN_SND_ECE))
1210 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
1211 #ifdef INET6
1212 if (isipv6) {
1213 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << IPV6_FLOWLABEL_LEN);
1214 ip6->ip6_flow |= htonl(ect << IPV6_FLOWLABEL_LEN);
1215 }
1216 else
1217 #endif
1218 {
1219 ip->ip_tos &= ~IPTOS_ECN_MASK;
1220 ip->ip_tos |= ect;
1221 }
1222 }
1223
1224 /*
1225 * If we are doing retransmissions, then snd_nxt will
1226 * not reflect the first unsent octet. For ACK only
1227 * packets, we do not want the sequence number of the
1228 * retransmitted packet, we want the sequence number
1229 * of the next unsent octet. So, if there is no data
1230 * (and no SYN or FIN), use snd_max instead of snd_nxt
1231 * when filling in ti_seq. But if we are in persist
1232 * state, snd_max might reflect one byte beyond the
1233 * right edge of the window, so use snd_nxt in that
1234 * case, since we know we aren't doing a retransmission.
1235 * (retransmit and persist are mutually exclusive...)
1236 */
1237 if (!sack_rxmit) {
1238 if (len || (flags & (TH_SYN|TH_FIN)) ||
1239 tcp_timer_active(tp, TT_PERSIST))
1240 th->th_seq = htonl(tp->snd_nxt);
1241 else
1242 th->th_seq = htonl(tp->snd_max);
1243 } else {
1244 th->th_seq = htonl(p->rxmit);
1245 p->rxmit += len;
1246 /*
1247 * Lost Retransmission Detection
1248 * trigger resending of a (then
1249 * still existing) hole, when
1250 * fack acks recoverypoint.
1251 */
1252 if ((tp->t_flags & TF_LRD) && SEQ_GEQ(p->rxmit, p->end))
1253 p->rxmit = tp->snd_recover;
1254 tp->sackhint.sack_bytes_rexmit += len;
1255 }
1256 if (IN_RECOVERY(tp->t_flags)) {
1257 /*
1258 * Account all bytes transmitted while
1259 * IN_RECOVERY, simplifying PRR and
1260 * Lost Retransmit Detection
1261 */
1262 tp->sackhint.prr_out += len;
1263 }
1264 th->th_ack = htonl(tp->rcv_nxt);
1265 if (optlen) {
1266 bcopy(opt, th + 1, optlen);
1267 th->th_off = (sizeof (struct tcphdr) + optlen) >> 2;
1268 }
1269 /*
1270 * Calculate receive window. Don't shrink window,
1271 * but avoid silly window syndrome.
1272 * If a RST segment is sent, advertise a window of zero.
1273 */
1274 if (flags & TH_RST) {
1275 recwin = 0;
1276 } else {
1277 if (recwin < (so->so_rcv.sb_hiwat / 4) &&
1278 recwin < tp->t_maxseg)
1279 recwin = 0;
1280 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
1281 recwin < (tp->rcv_adv - tp->rcv_nxt))
1282 recwin = (tp->rcv_adv - tp->rcv_nxt);
1283 }
1284 /*
1285 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1286 * or <SYN,ACK>) segment itself is never scaled. The <SYN,ACK>
1287 * case is handled in syncache.
1288 */
1289 if (flags & TH_SYN)
1290 th->th_win = htons((u_short)
1291 (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
1292 else {
1293 /* Avoid shrinking window with window scaling. */
1294 recwin = roundup2(recwin, 1 << tp->rcv_scale);
1295 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
1296 }
1297
1298 /*
1299 * Adjust the RXWIN0SENT flag - indicate that we have advertised
1300 * a 0 window. This may cause the remote transmitter to stall. This
1301 * flag tells soreceive() to disable delayed acknowledgements when
1302 * draining the buffer. This can occur if the receiver is attempting
1303 * to read more data than can be buffered prior to transmitting on
1304 * the connection.
1305 */
1306 if (th->th_win == 0) {
1307 tp->t_sndzerowin++;
1308 tp->t_flags |= TF_RXWIN0SENT;
1309 } else
1310 tp->t_flags &= ~TF_RXWIN0SENT;
1311 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
1312 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
1313 flags |= TH_URG;
1314 } else {
1315 /*
1316 * If no urgent pointer to send, then we pull
1317 * the urgent pointer to the left edge of the send window
1318 * so that it doesn't drift into the send window on sequence
1319 * number wraparound.
1320 */
1321 tp->snd_up = tp->snd_una; /* drag it along */
1322 }
1323 tcp_set_flags(th, flags);
1324
1325 /*
1326 * Put TCP length in extended header, and then
1327 * checksum extended header and data.
1328 */
1329 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
1330
1331 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1332 if (to.to_flags & TOF_SIGNATURE) {
1333 /*
1334 * Calculate MD5 signature and put it into the place
1335 * determined before.
1336 * NOTE: since TCP options buffer doesn't point into
1337 * mbuf's data, calculate offset and use it.
1338 */
1339 if (!TCPMD5_ENABLED() || (error = TCPMD5_OUTPUT(m, th,
1340 (u_char *)(th + 1) + (to.to_signature - opt))) != 0) {
1341 /*
1342 * Do not send segment if the calculation of MD5
1343 * digest has failed.
1344 */
1345 m_freem(m);
1346 goto out;
1347 }
1348 }
1349 #endif
1350 #ifdef INET6
1351 if (isipv6) {
1352 /*
1353 * There is no need to fill in ip6_plen right now.
1354 * It will be filled later by ip6_output.
1355 */
1356 if (tp->t_port) {
1357 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
1358 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1359 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
1360 th->th_sum = htons(0);
1361 UDPSTAT_INC(udps_opackets);
1362 } else {
1363 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
1364 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1365 th->th_sum = in6_cksum_pseudo(ip6,
1366 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
1367 0);
1368 }
1369 }
1370 #endif
1371 #if defined(INET6) && defined(INET)
1372 else
1373 #endif
1374 #ifdef INET
1375 {
1376 if (tp->t_port) {
1377 m->m_pkthdr.csum_flags = CSUM_UDP;
1378 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1379 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
1380 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
1381 th->th_sum = htons(0);
1382 UDPSTAT_INC(udps_opackets);
1383 } else {
1384 m->m_pkthdr.csum_flags = CSUM_TCP;
1385 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1386 th->th_sum = in_pseudo(ip->ip_src.s_addr,
1387 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
1388 IPPROTO_TCP + len + optlen));
1389 }
1390
1391 /* IP version must be set here for ipv4/ipv6 checking later */
1392 KASSERT(ip->ip_v == IPVERSION,
1393 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
1394 }
1395 #endif
1396
1397 /*
1398 * Enable TSO and specify the size of the segments.
1399 * The TCP pseudo header checksum is always provided.
1400 */
1401 if (tso) {
1402 KASSERT(len > tp->t_maxseg - optlen - ipsec_optlen,
1403 ("%s: len <= tso_segsz", __func__));
1404 m->m_pkthdr.csum_flags |= CSUM_TSO;
1405 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen - ipsec_optlen;
1406 }
1407
1408 KASSERT(len + hdrlen == m_length(m, NULL),
1409 ("%s: mbuf chain shorter than expected: %d + %u != %u",
1410 __func__, len, hdrlen, m_length(m, NULL)));
1411
1412 #ifdef TCP_HHOOK
1413 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
1414 hhook_run_tcp_est_out(tp, th, &to, len, tso);
1415 #endif
1416
1417 TCP_PROBE3(debug__output, tp, th, m);
1418
1419 /* We're getting ready to send; log now. */
1420 /* XXXMT: We are not honoring verbose logging. */
1421
1422 if (tcp_bblogging_on(tp))
1423 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd,
1424 TCP_LOG_OUT, ERRNO_UNK, len, NULL, false, NULL, NULL, 0,
1425 NULL);
1426 else
1427 lgb = NULL;
1428
1429 /*
1430 * Fill in IP length and desired time to live and
1431 * send to IP level. There should be a better way
1432 * to handle ttl and tos; we could keep them in
1433 * the template, but need a way to checksum without them.
1434 */
1435 /*
1436 * m->m_pkthdr.len should have been set before checksum calculation,
1437 * because in6_cksum() need it.
1438 */
1439 #ifdef INET6
1440 if (isipv6) {
1441 /*
1442 * we separately set hoplimit for every segment, since the
1443 * user might want to change the value via setsockopt.
1444 * Also, desired default hop limit might be changed via
1445 * Neighbor Discovery.
1446 */
1447 ip6->ip6_hlim = in6_selecthlim(inp, NULL);
1448
1449 /*
1450 * Set the packet size here for the benefit of DTrace probes.
1451 * ip6_output() will set it properly; it's supposed to include
1452 * the option header lengths as well.
1453 */
1454 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
1455
1456 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
1457 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
1458 else
1459 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
1460
1461 if (tp->t_state == TCPS_SYN_SENT)
1462 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
1463
1464 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
1465
1466
1467 /* TODO: IPv6 IP6TOS_ECT bit on */
1468 error = ip6_output(m, inp->in6p_outputopts, &inp->inp_route6,
1469 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
1470 NULL, NULL, inp);
1471
1472 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
1473 mtu = inp->inp_route6.ro_nh->nh_mtu;
1474 }
1475 #endif /* INET6 */
1476 #if defined(INET) && defined(INET6)
1477 else
1478 #endif
1479 #ifdef INET
1480 {
1481 ip->ip_len = htons(m->m_pkthdr.len);
1482 #ifdef INET6
1483 if (inp->inp_vflag & INP_IPV6PROTO)
1484 ip->ip_ttl = in6_selecthlim(inp, NULL);
1485 #endif /* INET6 */
1486 /*
1487 * If we do path MTU discovery, then we set DF on every packet.
1488 * This might not be the best thing to do according to RFC3390
1489 * Section 2. However the tcp hostcache migitates the problem
1490 * so it affects only the first tcp connection with a host.
1491 *
1492 * NB: Don't set DF on small MTU/MSS to have a safe fallback.
1493 */
1494 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
1495 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
1496 if (tp->t_port == 0 || len < V_tcp_minmss) {
1497 ip->ip_off |= htons(IP_DF);
1498 }
1499 } else {
1500 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
1501 }
1502
1503 if (tp->t_state == TCPS_SYN_SENT)
1504 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
1505
1506 TCP_PROBE5(send, NULL, tp, ip, tp, th);
1507
1508 error = ip_output(m, inp->inp_options, &inp->inp_route,
1509 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0, inp);
1510
1511 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
1512 mtu = inp->inp_route.ro_nh->nh_mtu;
1513 }
1514 #endif /* INET */
1515
1516 if (lgb != NULL) {
1517 lgb->tlb_errno = error;
1518 lgb = NULL;
1519 }
1520 out:
1521 if (error == 0)
1522 tcp_account_for_send(tp, len, (tp->snd_nxt != tp->snd_max), 0, hw_tls);
1523 /*
1524 * In transmit state, time the transmission and arrange for
1525 * the retransmit. In persist state, just set snd_max. In a closed
1526 * state just return.
1527 */
1528 if (flags & TH_RST) {
1529 TCPSTAT_INC(tcps_sndtotal);
1530 return (0);
1531 } else if ((tp->t_flags & TF_FORCEDATA) == 0 ||
1532 !tcp_timer_active(tp, TT_PERSIST)) {
1533 tcp_seq startseq = tp->snd_nxt;
1534
1535 /*
1536 * Advance snd_nxt over sequence space of this segment.
1537 */
1538 if (flags & (TH_SYN|TH_FIN)) {
1539 if (flags & TH_SYN)
1540 tp->snd_nxt++;
1541 if (flags & TH_FIN) {
1542 tp->snd_nxt++;
1543 tp->t_flags |= TF_SENTFIN;
1544 }
1545 }
1546 if (sack_rxmit)
1547 goto timer;
1548 tp->snd_nxt += len;
1549 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
1550 /*
1551 * Update "made progress" indication if we just
1552 * added new data to an empty socket buffer.
1553 */
1554 if (tp->snd_una == tp->snd_max)
1555 tp->t_acktime = ticks;
1556 tp->snd_max = tp->snd_nxt;
1557 /*
1558 * Time this transmission if not a retransmission and
1559 * not currently timing anything.
1560 */
1561 tp->t_sndtime = ticks;
1562 if (tp->t_rtttime == 0) {
1563 tp->t_rtttime = ticks;
1564 tp->t_rtseq = startseq;
1565 TCPSTAT_INC(tcps_segstimed);
1566 }
1567 #ifdef STATS
1568 if (!(tp->t_flags & TF_GPUTINPROG) && len) {
1569 tp->t_flags |= TF_GPUTINPROG;
1570 tp->gput_seq = startseq;
1571 tp->gput_ack = startseq +
1572 ulmin(sbavail(&so->so_snd) - off, sendwin);
1573 tp->gput_ts = tcp_ts_getticks();
1574 }
1575 #endif /* STATS */
1576 }
1577
1578 /*
1579 * Set retransmit timer if not currently set,
1580 * and not doing a pure ack or a keep-alive probe.
1581 * Initial value for retransmit timer is smoothed
1582 * round-trip time + 2 * round-trip time variance.
1583 * Initialize shift counter which is used for backoff
1584 * of retransmit time.
1585 */
1586 timer:
1587 if (!tcp_timer_active(tp, TT_REXMT) &&
1588 ((sack_rxmit && tp->snd_nxt != tp->snd_max) ||
1589 (tp->snd_nxt != tp->snd_una))) {
1590 if (tcp_timer_active(tp, TT_PERSIST)) {
1591 tcp_timer_activate(tp, TT_PERSIST, 0);
1592 tp->t_rxtshift = 0;
1593 }
1594 tcp_timer_activate(tp, TT_REXMT, TP_RXTCUR(tp));
1595 } else if (len == 0 && sbavail(&so->so_snd) &&
1596 !tcp_timer_active(tp, TT_REXMT) &&
1597 !tcp_timer_active(tp, TT_PERSIST)) {
1598 /*
1599 * Avoid a situation where we do not set persist timer
1600 * after a zero window condition. For example:
1601 * 1) A -> B: packet with enough data to fill the window
1602 * 2) B -> A: ACK for #1 + new data (0 window
1603 * advertisement)
1604 * 3) A -> B: ACK for #2, 0 len packet
1605 *
1606 * In this case, A will not activate the persist timer,
1607 * because it chose to send a packet. Unless tcp_output
1608 * is called for some other reason (delayed ack timer,
1609 * another input packet from B, socket syscall), A will
1610 * not send zero window probes.
1611 *
1612 * So, if you send a 0-length packet, but there is data
1613 * in the socket buffer, and neither the rexmt or
1614 * persist timer is already set, then activate the
1615 * persist timer.
1616 */
1617 tp->t_rxtshift = 0;
1618 tcp_setpersist(tp);
1619 }
1620 } else {
1621 /*
1622 * Persist case, update snd_max but since we are in
1623 * persist mode (no window) we do not update snd_nxt.
1624 */
1625 int xlen = len;
1626 if (flags & TH_SYN)
1627 ++xlen;
1628 if (flags & TH_FIN) {
1629 ++xlen;
1630 tp->t_flags |= TF_SENTFIN;
1631 }
1632 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max))
1633 tp->snd_max = tp->snd_nxt + xlen;
1634 }
1635 if ((error == 0) &&
1636 (tp->rcv_numsacks > 0) &&
1637 TCPS_HAVEESTABLISHED(tp->t_state) &&
1638 (tp->t_flags & TF_SACK_PERMIT)) {
1639 /* Clean up any DSACK's sent */
1640 tcp_clean_dsack_blocks(tp);
1641 }
1642 if ((error == 0) &&
1643 sack_rxmit &&
1644 SEQ_LT(tp->snd_nxt, SEQ_MIN(p->rxmit, p->end))) {
1645 /*
1646 * When transmitting from SACK scoreboard
1647 * after an RTO, pull snd_nxt along.
1648 */
1649 tp->snd_nxt = SEQ_MIN(p->rxmit, p->end);
1650 }
1651 if (error) {
1652 /*
1653 * We know that the packet was lost, so back out the
1654 * sequence number advance, if any.
1655 *
1656 * If the error is EPERM the packet got blocked by the
1657 * local firewall. Normally we should terminate the
1658 * connection but the blocking may have been spurious
1659 * due to a firewall reconfiguration cycle. So we treat
1660 * it like a packet loss and let the retransmit timer and
1661 * timeouts do their work over time.
1662 * XXX: It is a POLA question whether calling tcp_drop right
1663 * away would be the really correct behavior instead.
1664 */
1665 if (((tp->t_flags & TF_FORCEDATA) == 0 ||
1666 !tcp_timer_active(tp, TT_PERSIST)) &&
1667 ((flags & TH_SYN) == 0) &&
1668 (error != EPERM)) {
1669 if (sack_rxmit) {
1670 p->rxmit = SEQ_MIN(p->end, p->rxmit) - len;
1671 tp->sackhint.sack_bytes_rexmit -= len;
1672 KASSERT(tp->sackhint.sack_bytes_rexmit >= 0,
1673 ("sackhint bytes rtx >= 0"));
1674 KASSERT((flags & TH_FIN) == 0,
1675 ("error while FIN with SACK rxmit"));
1676 } else {
1677 tp->snd_nxt -= len;
1678 if (flags & TH_FIN)
1679 tp->snd_nxt--;
1680 }
1681 if (IN_RECOVERY(tp->t_flags))
1682 tp->sackhint.prr_out -= len;
1683 }
1684 SOCK_SENDBUF_UNLOCK_ASSERT(so); /* Check gotos. */
1685 switch (error) {
1686 case EACCES:
1687 case EPERM:
1688 tp->t_softerror = error;
1689 return (error);
1690 case ENOBUFS:
1691 TCP_XMIT_TIMER_ASSERT(tp, len, flags);
1692 tp->snd_cwnd = tcp_maxseg(tp);
1693 return (0);
1694 case EMSGSIZE:
1695 /*
1696 * For some reason the interface we used initially
1697 * to send segments changed to another or lowered
1698 * its MTU.
1699 * If TSO was active we either got an interface
1700 * without TSO capabilits or TSO was turned off.
1701 * If we obtained mtu from ip_output() then update
1702 * it and try again.
1703 */
1704 if (tso)
1705 tp->t_flags &= ~TF_TSO;
1706 if (mtu != 0) {
1707 tcp_mss_update(tp, -1, mtu, NULL, NULL);
1708 goto again;
1709 }
1710 return (error);
1711 case EHOSTDOWN:
1712 case EHOSTUNREACH:
1713 case ENETDOWN:
1714 case ENETUNREACH:
1715 if (TCPS_HAVERCVDSYN(tp->t_state)) {
1716 tp->t_softerror = error;
1717 return (0);
1718 }
1719 /* FALLTHROUGH */
1720 default:
1721 return (error);
1722 }
1723 }
1724 TCPSTAT_INC(tcps_sndtotal);
1725
1726 /*
1727 * Data sent (as far as we can tell).
1728 * If this advertises a larger window than any other segment,
1729 * then remember the size of the advertised window.
1730 * Any pending ACK has now been sent.
1731 */
1732 if (SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
1733 tp->rcv_adv = tp->rcv_nxt + recwin;
1734 tp->last_ack_sent = tp->rcv_nxt;
1735 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
1736 if (tcp_timer_active(tp, TT_DELACK))
1737 tcp_timer_activate(tp, TT_DELACK, 0);
1738 if (sendalot)
1739 goto again;
1740 return (0);
1741 }
1742
1743 void
tcp_setpersist(struct tcpcb * tp)1744 tcp_setpersist(struct tcpcb *tp)
1745 {
1746 int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1;
1747 int tt;
1748 int maxunacktime;
1749
1750 tp->t_flags &= ~TF_PREVVALID;
1751 if (tcp_timer_active(tp, TT_REXMT))
1752 panic("tcp_setpersist: retransmit pending");
1753 /*
1754 * If the state is already closed, don't bother.
1755 */
1756 if (tp->t_state == TCPS_CLOSED)
1757 return;
1758
1759 /*
1760 * Start/restart persistence timer.
1761 */
1762 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
1763 tcp_persmin, tcp_persmax);
1764 if (TP_MAXUNACKTIME(tp) && tp->t_acktime) {
1765 maxunacktime = tp->t_acktime + TP_MAXUNACKTIME(tp) - ticks;
1766 if (maxunacktime < 1)
1767 maxunacktime = 1;
1768 if (maxunacktime < tt)
1769 tt = maxunacktime;
1770 }
1771 tcp_timer_activate(tp, TT_PERSIST, tt);
1772 if (tp->t_rxtshift < V_tcp_retries)
1773 tp->t_rxtshift++;
1774 }
1775
1776 /*
1777 * Insert TCP options according to the supplied parameters to the place
1778 * optp in a consistent way. Can handle unaligned destinations.
1779 *
1780 * The order of the option processing is crucial for optimal packing and
1781 * alignment for the scarce option space.
1782 *
1783 * The optimal order for a SYN/SYN-ACK segment is:
1784 * MSS (4) + NOP (1) + Window scale (3) + SACK permitted (2) +
1785 * Timestamp (10) + Signature (18) = 38 bytes out of a maximum of 40.
1786 *
1787 * The SACK options should be last. SACK blocks consume 8*n+2 bytes.
1788 * So a full size SACK blocks option is 34 bytes (with 4 SACK blocks).
1789 * At minimum we need 10 bytes (to generate 1 SACK block). If both
1790 * TCP Timestamps (12 bytes) and TCP Signatures (18 bytes) are present,
1791 * we only have 10 bytes for SACK options (40 - (12 + 18)).
1792 */
1793 int
tcp_addoptions(struct tcpopt * to,u_char * optp)1794 tcp_addoptions(struct tcpopt *to, u_char *optp)
1795 {
1796 u_int32_t mask, optlen = 0;
1797
1798 for (mask = 1; mask < TOF_MAXOPT; mask <<= 1) {
1799 if ((to->to_flags & mask) != mask)
1800 continue;
1801 if (optlen == TCP_MAXOLEN)
1802 break;
1803 switch (to->to_flags & mask) {
1804 case TOF_MSS:
1805 while (optlen % 4) {
1806 optlen += TCPOLEN_NOP;
1807 *optp++ = TCPOPT_NOP;
1808 }
1809 if (TCP_MAXOLEN - optlen < TCPOLEN_MAXSEG)
1810 continue;
1811 optlen += TCPOLEN_MAXSEG;
1812 *optp++ = TCPOPT_MAXSEG;
1813 *optp++ = TCPOLEN_MAXSEG;
1814 to->to_mss = htons(to->to_mss);
1815 bcopy((u_char *)&to->to_mss, optp, sizeof(to->to_mss));
1816 optp += sizeof(to->to_mss);
1817 break;
1818 case TOF_SCALE:
1819 while (!optlen || optlen % 2 != 1) {
1820 optlen += TCPOLEN_NOP;
1821 *optp++ = TCPOPT_NOP;
1822 }
1823 if (TCP_MAXOLEN - optlen < TCPOLEN_WINDOW)
1824 continue;
1825 optlen += TCPOLEN_WINDOW;
1826 *optp++ = TCPOPT_WINDOW;
1827 *optp++ = TCPOLEN_WINDOW;
1828 *optp++ = to->to_wscale;
1829 break;
1830 case TOF_SACKPERM:
1831 while (optlen % 2) {
1832 optlen += TCPOLEN_NOP;
1833 *optp++ = TCPOPT_NOP;
1834 }
1835 if (TCP_MAXOLEN - optlen < TCPOLEN_SACK_PERMITTED)
1836 continue;
1837 optlen += TCPOLEN_SACK_PERMITTED;
1838 *optp++ = TCPOPT_SACK_PERMITTED;
1839 *optp++ = TCPOLEN_SACK_PERMITTED;
1840 break;
1841 case TOF_TS:
1842 while (!optlen || optlen % 4 != 2) {
1843 optlen += TCPOLEN_NOP;
1844 *optp++ = TCPOPT_NOP;
1845 }
1846 if (TCP_MAXOLEN - optlen < TCPOLEN_TIMESTAMP)
1847 continue;
1848 optlen += TCPOLEN_TIMESTAMP;
1849 *optp++ = TCPOPT_TIMESTAMP;
1850 *optp++ = TCPOLEN_TIMESTAMP;
1851 to->to_tsval = htonl(to->to_tsval);
1852 to->to_tsecr = htonl(to->to_tsecr);
1853 bcopy((u_char *)&to->to_tsval, optp, sizeof(to->to_tsval));
1854 optp += sizeof(to->to_tsval);
1855 bcopy((u_char *)&to->to_tsecr, optp, sizeof(to->to_tsecr));
1856 optp += sizeof(to->to_tsecr);
1857 break;
1858 case TOF_SIGNATURE:
1859 {
1860 int siglen = TCPOLEN_SIGNATURE - 2;
1861
1862 while (!optlen || optlen % 4 != 2) {
1863 optlen += TCPOLEN_NOP;
1864 *optp++ = TCPOPT_NOP;
1865 }
1866 if (TCP_MAXOLEN - optlen < TCPOLEN_SIGNATURE) {
1867 to->to_flags &= ~TOF_SIGNATURE;
1868 continue;
1869 }
1870 optlen += TCPOLEN_SIGNATURE;
1871 *optp++ = TCPOPT_SIGNATURE;
1872 *optp++ = TCPOLEN_SIGNATURE;
1873 to->to_signature = optp;
1874 while (siglen--)
1875 *optp++ = 0;
1876 break;
1877 }
1878 case TOF_SACK:
1879 {
1880 int sackblks = 0;
1881 struct sackblk *sack = (struct sackblk *)to->to_sacks;
1882 tcp_seq sack_seq;
1883
1884 while (!optlen || optlen % 4 != 2) {
1885 optlen += TCPOLEN_NOP;
1886 *optp++ = TCPOPT_NOP;
1887 }
1888 if (TCP_MAXOLEN - optlen < TCPOLEN_SACKHDR + TCPOLEN_SACK)
1889 continue;
1890 optlen += TCPOLEN_SACKHDR;
1891 *optp++ = TCPOPT_SACK;
1892 sackblks = min(to->to_nsacks,
1893 (TCP_MAXOLEN - optlen) / TCPOLEN_SACK);
1894 *optp++ = TCPOLEN_SACKHDR + sackblks * TCPOLEN_SACK;
1895 while (sackblks--) {
1896 sack_seq = htonl(sack->start);
1897 bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq));
1898 optp += sizeof(sack_seq);
1899 sack_seq = htonl(sack->end);
1900 bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq));
1901 optp += sizeof(sack_seq);
1902 optlen += TCPOLEN_SACK;
1903 sack++;
1904 }
1905 TCPSTAT_INC(tcps_sack_send_blocks);
1906 break;
1907 }
1908 case TOF_FASTOPEN:
1909 {
1910 int total_len;
1911
1912 /* XXX is there any point to aligning this option? */
1913 total_len = TCPOLEN_FAST_OPEN_EMPTY + to->to_tfo_len;
1914 if (TCP_MAXOLEN - optlen < total_len) {
1915 to->to_flags &= ~TOF_FASTOPEN;
1916 continue;
1917 }
1918 *optp++ = TCPOPT_FAST_OPEN;
1919 *optp++ = total_len;
1920 if (to->to_tfo_len > 0) {
1921 bcopy(to->to_tfo_cookie, optp, to->to_tfo_len);
1922 optp += to->to_tfo_len;
1923 }
1924 optlen += total_len;
1925 break;
1926 }
1927 default:
1928 panic("%s: unknown TCP option type", __func__);
1929 break;
1930 }
1931 }
1932
1933 /* Terminate and pad TCP options to a 4 byte boundary. */
1934 if (optlen % 4) {
1935 optlen += TCPOLEN_EOL;
1936 *optp++ = TCPOPT_EOL;
1937 }
1938 /*
1939 * According to RFC 793 (STD0007):
1940 * "The content of the header beyond the End-of-Option option
1941 * must be header padding (i.e., zero)."
1942 * and later: "The padding is composed of zeros."
1943 */
1944 while (optlen % 4) {
1945 optlen += TCPOLEN_PAD;
1946 *optp++ = TCPOPT_PAD;
1947 }
1948
1949 KASSERT(optlen <= TCP_MAXOLEN, ("%s: TCP options too long", __func__));
1950 return (optlen);
1951 }
1952
1953 /*
1954 * This is a copy of m_copym(), taking the TSO segment size/limit
1955 * constraints into account, and advancing the sndptr as it goes.
1956 */
1957 struct mbuf *
tcp_m_copym(struct mbuf * m,int32_t off0,int32_t * plen,int32_t seglimit,int32_t segsize,struct sockbuf * sb,bool hw_tls)1958 tcp_m_copym(struct mbuf *m, int32_t off0, int32_t *plen,
1959 int32_t seglimit, int32_t segsize, struct sockbuf *sb, bool hw_tls)
1960 {
1961 #ifdef KERN_TLS
1962 struct ktls_session *tls, *ntls;
1963 struct mbuf *start __diagused;
1964 #endif
1965 struct mbuf *n, **np;
1966 struct mbuf *top;
1967 int32_t off = off0;
1968 int32_t len = *plen;
1969 int32_t fragsize;
1970 int32_t len_cp = 0;
1971 int32_t *pkthdrlen;
1972 uint32_t mlen, frags;
1973 bool copyhdr;
1974
1975 KASSERT(off >= 0, ("tcp_m_copym, negative off %d", off));
1976 KASSERT(len >= 0, ("tcp_m_copym, negative len %d", len));
1977 if (off == 0 && m->m_flags & M_PKTHDR)
1978 copyhdr = true;
1979 else
1980 copyhdr = false;
1981 while (off > 0) {
1982 KASSERT(m != NULL, ("tcp_m_copym, offset > size of mbuf chain"));
1983 if (off < m->m_len)
1984 break;
1985 off -= m->m_len;
1986 if ((sb) && (m == sb->sb_sndptr)) {
1987 sb->sb_sndptroff += m->m_len;
1988 sb->sb_sndptr = m->m_next;
1989 }
1990 m = m->m_next;
1991 }
1992 np = ⊤
1993 top = NULL;
1994 pkthdrlen = NULL;
1995 #ifdef KERN_TLS
1996 if (hw_tls && (m->m_flags & M_EXTPG))
1997 tls = m->m_epg_tls;
1998 else
1999 tls = NULL;
2000 start = m;
2001 #endif
2002 while (len > 0) {
2003 if (m == NULL) {
2004 KASSERT(len == M_COPYALL,
2005 ("tcp_m_copym, length > size of mbuf chain"));
2006 *plen = len_cp;
2007 if (pkthdrlen != NULL)
2008 *pkthdrlen = len_cp;
2009 break;
2010 }
2011 #ifdef KERN_TLS
2012 if (hw_tls) {
2013 if (m->m_flags & M_EXTPG)
2014 ntls = m->m_epg_tls;
2015 else
2016 ntls = NULL;
2017
2018 /*
2019 * Avoid mixing TLS records with handshake
2020 * data or TLS records from different
2021 * sessions.
2022 */
2023 if (tls != ntls) {
2024 MPASS(m != start);
2025 *plen = len_cp;
2026 if (pkthdrlen != NULL)
2027 *pkthdrlen = len_cp;
2028 break;
2029 }
2030 }
2031 #endif
2032 mlen = min(len, m->m_len - off);
2033 if (seglimit) {
2034 /*
2035 * For M_EXTPG mbufs, add 3 segments
2036 * + 1 in case we are crossing page boundaries
2037 * + 2 in case the TLS hdr/trailer are used
2038 * It is cheaper to just add the segments
2039 * than it is to take the cache miss to look
2040 * at the mbuf ext_pgs state in detail.
2041 */
2042 if (m->m_flags & M_EXTPG) {
2043 fragsize = min(segsize, PAGE_SIZE);
2044 frags = 3;
2045 } else {
2046 fragsize = segsize;
2047 frags = 0;
2048 }
2049
2050 /* Break if we really can't fit anymore. */
2051 if ((frags + 1) >= seglimit) {
2052 *plen = len_cp;
2053 if (pkthdrlen != NULL)
2054 *pkthdrlen = len_cp;
2055 break;
2056 }
2057
2058 /*
2059 * Reduce size if you can't copy the whole
2060 * mbuf. If we can't copy the whole mbuf, also
2061 * adjust len so the loop will end after this
2062 * mbuf.
2063 */
2064 if ((frags + howmany(mlen, fragsize)) >= seglimit) {
2065 mlen = (seglimit - frags - 1) * fragsize;
2066 len = mlen;
2067 *plen = len_cp + len;
2068 if (pkthdrlen != NULL)
2069 *pkthdrlen = *plen;
2070 }
2071 frags += howmany(mlen, fragsize);
2072 if (frags == 0)
2073 frags++;
2074 seglimit -= frags;
2075 KASSERT(seglimit > 0,
2076 ("%s: seglimit went too low", __func__));
2077 }
2078 if (copyhdr)
2079 n = m_gethdr(M_NOWAIT, m->m_type);
2080 else
2081 n = m_get(M_NOWAIT, m->m_type);
2082 *np = n;
2083 if (n == NULL)
2084 goto nospace;
2085 if (copyhdr) {
2086 if (!m_dup_pkthdr(n, m, M_NOWAIT))
2087 goto nospace;
2088 if (len == M_COPYALL)
2089 n->m_pkthdr.len -= off0;
2090 else
2091 n->m_pkthdr.len = len;
2092 pkthdrlen = &n->m_pkthdr.len;
2093 copyhdr = false;
2094 }
2095 n->m_len = mlen;
2096 len_cp += n->m_len;
2097 if (m->m_flags & (M_EXT | M_EXTPG)) {
2098 n->m_data = m->m_data + off;
2099 mb_dupcl(n, m);
2100 } else
2101 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
2102 (u_int)n->m_len);
2103
2104 if (sb && (sb->sb_sndptr == m) &&
2105 ((n->m_len + off) >= m->m_len) && m->m_next) {
2106 sb->sb_sndptroff += m->m_len;
2107 sb->sb_sndptr = m->m_next;
2108 }
2109 off = 0;
2110 if (len != M_COPYALL) {
2111 len -= n->m_len;
2112 }
2113 m = m->m_next;
2114 np = &n->m_next;
2115 }
2116 return (top);
2117 nospace:
2118 m_freem(top);
2119 return (NULL);
2120 }
2121
2122 void
tcp_sndbuf_autoscale(struct tcpcb * tp,struct socket * so,uint32_t sendwin)2123 tcp_sndbuf_autoscale(struct tcpcb *tp, struct socket *so, uint32_t sendwin)
2124 {
2125
2126 /*
2127 * Automatic sizing of send socket buffer. Often the send buffer
2128 * size is not optimally adjusted to the actual network conditions
2129 * at hand (delay bandwidth product). Setting the buffer size too
2130 * small limits throughput on links with high bandwidth and high
2131 * delay (eg. trans-continental/oceanic links). Setting the
2132 * buffer size too big consumes too much real kernel memory,
2133 * especially with many connections on busy servers.
2134 *
2135 * The criteria to step up the send buffer one notch are:
2136 * 1. receive window of remote host is larger than send buffer
2137 * (with a fudge factor of 5/4th);
2138 * 2. send buffer is filled to 7/8th with data (so we actually
2139 * have data to make use of it);
2140 * 3. send buffer fill has not hit maximal automatic size;
2141 * 4. our send window (slow start and cogestion controlled) is
2142 * larger than sent but unacknowledged data in send buffer.
2143 *
2144 * The remote host receive window scaling factor may limit the
2145 * growing of the send buffer before it reaches its allowed
2146 * maximum.
2147 *
2148 * It scales directly with slow start or congestion window
2149 * and does at most one step per received ACK. This fast
2150 * scaling has the drawback of growing the send buffer beyond
2151 * what is strictly necessary to make full use of a given
2152 * delay*bandwidth product. However testing has shown this not
2153 * to be much of an problem. At worst we are trading wasting
2154 * of available bandwidth (the non-use of it) for wasting some
2155 * socket buffer memory.
2156 *
2157 * TODO: Shrink send buffer during idle periods together
2158 * with congestion window. Requires another timer. Has to
2159 * wait for upcoming tcp timer rewrite.
2160 *
2161 * XXXGL: should there be used sbused() or sbavail()?
2162 */
2163 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
2164 int lowat;
2165
2166 lowat = V_tcp_sendbuf_auto_lowat ? so->so_snd.sb_lowat : 0;
2167 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat - lowat &&
2168 sbused(&so->so_snd) >=
2169 (so->so_snd.sb_hiwat / 8 * 7) - lowat &&
2170 sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
2171 sendwin >= (sbused(&so->so_snd) -
2172 (tp->snd_nxt - tp->snd_una))) {
2173 if (!sbreserve_locked(so, SO_SND,
2174 min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc,
2175 V_tcp_autosndbuf_max), curthread))
2176 so->so_snd.sb_flags &= ~SB_AUTOSIZE;
2177 }
2178 }
2179 }
2180